Merge https://github.com/behdad/fonttools
Seed internal-git with the external git repo above.
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..4fca027
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+MANIFEST
+build
+dist
diff --git a/Doc/changes.txt b/Doc/changes.txt
new file mode 100644
index 0000000..22bcd5b
--- /dev/null
+++ b/Doc/changes.txt
@@ -0,0 +1,161 @@
+TTX/FontTools Version 2.4
+- Option to write to arbitrary files
+- Better dump format for DSIG
+- Better detection of OTF XML
+- Fix issue with Apple's kern table format
+- Fix mangling of TT glyph programs
+- Fix issues related to mona.ttf
+- Fix Windows Installer instructions
+- Fix some modern MacOS issues
+- Fix minor issues and typos
+
+TTX/FontTools Version 2.3
+
+- TrueType Collection (TTC) support
+- Python 2.6 support
+- Update Unicode data to 5.2.0
+- Couple of bug fixes
+
+TTX/FontTools Version 2.2
+
+- ClearType support
+- cmap format 1 support
+- PFA font support
+- Switched from Numeric to numpy
+- Update Unicode data to 5.1.0
+- Update AGLFN data to 1.6
+- Many bug fixes
+
+TTX/FontTools Version 2.1
+
+- Many years worth of fixes and features
+
+TTX/FontTools Version 2.0 beta 2 (released ??? 2002)
+
+- Be "forgiving" when interpreting the maxp table version field:
+  interpret any value as 1.0 if it's not 0.5. Fixes dumping of these
+  GPL fonts: http://www.freebsd.org/cgi/pds.cgi?ports/chinese/wangttf
+- Fixed ttx -l: it turned out this part of the code didn't work with
+  Python 2.2.1 and earlier. My bad to do most of my testing with a
+  different version than I shipped TTX with :-(
+- Fixed bug in ClassDef format 1 subtable (Andreas Seidel bumped into
+  this one).
+
+TTX/FontTools Version 2.0 beta 1 (released September 10 2002)
+
+- Fixed embarrassing bug: the master checksum in the head table is now
+  calculated correctly even on little-endian platforms (such as Intel).
+- Made the cmap format 4 compiler smarter: the binary data it creates is
+  now more or less as compact as possible. TTX now makes more compact
+  data than in any shipping font I've tested it with.
+- Dump glyph names as a separate "GlyphOrder" pseudo table as opposed to
+  as part of the glyf table (obviously needed for CFF-OTF's).
+- Added proper support for the CFF table.
+- Don't barf on empty tables (questionable, but "there are font out there...")
+- When writing TT glyf data, align glyphs on 4-byte boundaries. This seems
+  to be the current recommendation by MS. Also: don't barf on fonts which
+  are already 4-byte aligned.
+- Windows installer contributed bu Adam Twardoch! Yay!
+- Changed the command line interface again, now by creating one new tool
+  replacing the old ones: ttx
+  It dumps and compiles, depending on input file types. The options have
+  changed somewhat. 
+  - The -d option is back (output dir)
+  - ttcompile's -i options is now called -m (as in "merge"), to avoid clash
+    with dump's -i.
+  - The -s option ("split tables") no longer creates a directory,
+    but instead outputs a small .ttx file containing references to the
+    individual table files. This is not a true link, it's a simple file
+    name, and the referenced file should be in the same directory so
+    ttcompile can find them.
+  - compile no longer accepts a directory as input argument. Instead it
+    can parse the new "mini-ttx" format as output by "ttx -s".
+  - all arguments are input files
+- Renamed the command line programs and moved them to the Tools
+  subdirectory. They are now installed by the setup.py install script.
+- Added OpenType support. BASE, GDEF, GPOS, GSUB and JSTF are (almost)
+  fully supported. The XML output is not yet final, as I'm still
+  considering to output certain subtables in a more human-friendly
+  manner.
+- Fixed 'kern' table to correctly accept subtables it doesn't know about,
+  as well as interpreting Apple's definition of the 'kern' table headers
+  correctly.
+- Fixed bug where glyphnames were not calculated from 'cmap' if it was
+  (one of the) first tables to be decompiled. More specifically: it cmap
+  was the first to ask for a glyphID -> glyphName mapping.
+- Switched XML parsers: use expat instead of xmlproc. Should be faster.
+- Removed my UnicodeString object: I now require Python 2.0 or up, which
+  has unicode support built in.
+- Removed assert in glyf table: redundant data at the end of the table
+  is now ignored instead of raising an error. Should become a warning.
+- Fixed bug in hmtx/vmtx code that only occured if all advances were equal.
+- Fixed subtle bug in TT instruction disassembler.
+- Couple of fixes to the 'post' table.
+- Updated OS/2 table to latest spec.
+
+TTX/FontTools Version 1.0 beta 1 (released August 10 2001)
+
+- Reorganized the command line interface for ttDump.py and ttCompile.py,
+  they now behave more like "normal" command line tool, in that they accept
+  multiple input files for batch processing. 
+- ttDump.py and ttCompile.py don't silently override files anymore, but ask
+  before doing so. Can be overridden by -f.
+- Added -d <destination-directory> option to both ttDump.py and ttCompile.py.
+- Installation is now done with distutils. (Needs work for environments without
+  compilers.)
+- Updated installation instructions.
+- Added some workarounds so as to handle certain buggy fonts more gracefully.
+- Updated Unicode table to Unicode 3.0 (Thanks Antoine!)
+- Included a Python script by Adam Twardoch that adds some useful stuff to the
+  Windows registry.
+- Moved the project to SourceForge.
+
+TTX/FontTools Version 1.0 alpha 6 (released March 15 2000)
+
+- Big reorganization: made ttLib a subpackage of the new fontTools package,
+  changed several module names. Called the entire suite "FontTools"
+- Added several submodules to fontTools, some new, some older.
+- Added experimental CFF/GPOS/GSUB support to ttLib, read-only (but XML dumping
+  of GPOS/GSUB is for now disabled)
+- Fixed hdmx endian bug
+- Added -b option to ttCompile.py, it disables recalculation of bounding boxes,
+  as requested by Werner Lemberg.
+- Renamed tt2xml.pt to ttDump.py and xml2tt.py to ttCompile.py
+- Use ".ttx" as file extension instead of ".xml".
+- TTX is now the name of the XML-based *format* for TT fonts, and not just
+  an application.
+
+Version 1.0 alpha 5 (never released)
+
+- More tables supported: hdmx, vhea, vmtx
+
+Version 1.0 alpha 3 & 4 (never released)
+
+- fixed most portability issues
+- retracted the "Euro_or_currency" change from 1.0a2: it was nonsense!
+
+Version 1.0 alpha 2 (released as binary for MacOS, 2 May 1999)
+
+- genenates full FOND resources: including width table, PS
+  font name info and kern table if applicable. 
+- added cmap format 4 support. Extra: dumps Unicode char names as XML comments! 
+- added cmap format 6 support 
+- now accepts true type files starting with "true"
+  (instead of just 0x00010000 and "OTTO") 
+- 'glyf' table support is now complete: I added support for composite scale, 
+  xy-scale and two-by-two for the 'glyf' table. For now, component offset scale 
+  behaviour defaults to Apple-style. This only affects the (re)calculation of 
+  the glyph bounding box. 
+- changed "Euro" to "Euro_or_currency" in the Standard Apple Glyph order list, 
+  since we cannot tell from the 'post' table which is meant. I should probably 
+  doublecheck with a Unicode encoding if available. (This does not affect the 
+  output!)
+
+Fixed bugs: 
+- 'hhea' table is now recalculated correctly 
+- fixed wrong assumption about sfnt resource names
+
+Version 1.0 alpha 1 (27 Apr 1999)
+
+- initial binary release for MacOS
+
diff --git a/Doc/documentation.html b/Doc/documentation.html
new file mode 100644
index 0000000..e2821fe
--- /dev/null
+++ b/Doc/documentation.html
@@ -0,0 +1,104 @@
+<HTML>
+<HEAD>
+
+<TITLE>TTX Documentation</TITLE>
+
+
+</HEAD>
+<BODY bgcolor="#FFFFFF">
+
+<H3>TTX -- From OpenType and TrueType to XML and Back</H3>
+
+<A HREF="http://fonttools.sourceforge.net/">TTX</A> is a tool for manipulating TrueType and OpenType fonts. It is written in Python and has a BSD-style, open-source licence -- see LICENSE.txt. Among other things this means you can use it free of charge. It's hosted at <A HREF="http://sourceforge.net/">sourceforge.net</A>.
+
+<P>
+TTX can dump TrueType and OpenType fonts to an XML-based text format, which is also called TTX. TTX files have a .ttx file extension.
+
+<H3>How to use TTX</H3>
+
+The TTX application works can be used in two ways, depending on what platform you run it on:
+
+<ul>
+  <li>As a command line tool (Windows/DOS, Unix, MacOSX)</li>
+  <li>By dropping files onto the application (Windows, MacOS)</li>
+</ul>
+
+<P>
+TTX detects what kind of files it is fed: it will output a .ttx file when it sees a .ttf or .otf, and it will compile a .ttf or .otf when the input file is a .ttx file. By default, the output file is created in the same folder as the input file, and will have the same name as the input file but with a different extension. TTX will <I>never</I> overwrite existing files, but if necessary will append a unique number to the output filename (before the extension), eg.: "Arial#1.ttf".
+
+<P>
+When using TTX from the command line there are a bunch of extra options, these are explained in the help text, as displayed when typing "ttx -h" at the command prompt. These additional options include:
+<ul>
+  <li>specifying the folder where the output files are created</li>
+  <li>specifying which tables to dump or which tables to exclude</li>
+  <li>merging partial .ttx files with existing .ttf or .otf files</li>
+  <li>listing brief table info isntead of dumping to .ttx</li>
+  <li>splitting tables to separate .ttx files</li>
+  <li>disabling TT instruction disassembly</li>
+</ul>
+
+<H3>The TTX file format</H3>
+
+The following tables are currently supported:
+<BLOCKQUOTE><TT>
+<!-- begin table list -->
+BASE, CBDT, CBLC, CFF, COLR, CPAL, DSIG, EBDT, EBLC, FFTM, GDEF, GMAP, GPKG, GPOS, GSUB, JSTF, LTSH, META, OS/2, SING, SVG, TSI0, TSI1, TSI2, TSI3, TSI5, TSIB, TSID, TSIJ, TSIP, TSIS, TSIV, VORG, cmap, cvt, fpgm, gasp, glyf, hdmx, head, hhea, hmtx, kern, loca, maxp, name, post, prep, sbix, vhea and vmtx
+<!-- end table list -->
+</TT></BLOCKQUOTE>
+Other tables are dumped as hexadecimal data.
+
+<P>
+TrueType fonts use glyph indices (GlyphID's) to refer to glyphs in most places.
+While this is fine in binary form, it is really hard to work with for
+humans. Therefore we use names instead.
+
+<P>The glyph names are either extracted from the 'CFF ' table or the 'post' table,
+or are derived from a Unicode 'cmap' table. In the latter case the Adobe Glyph List
+is used to calculate names based on Unicode values. If all of these mthods fail,
+names are invented based on GlyphID (eg. "glyph00142").
+
+<P>It is possible that different glyphs use the same name. If this happens,
+we force the names to be unique by appending "#n" to the name (n being an
+integer number). The original names are being kept, so this has no influence
+on a "round tripped" font.
+
+<P>Because the order in which glyphs are stored inside the TT font is
+important, we maintain an ordered list of glyph names in the font.
+
+
+<H3>Development and feedback</H3>
+
+TTX/FontTools development is ongoing, but often goes in spurts. Feature requests and bug reports are always welcome. The best place for these is currently the fonttools-discussion mailing list at SourceForge. This list is both for discussion TTX from an end-user perspective as well as TTX/FontTools development. Subscription info can be found if you follow the "Mailing Lists" link at the <A HREF="http://sourceforge.net/projects/fonttools/">SourceForge project page</A>. You can also email me directly at <A HREF="mailto:just@letterror.com">just@letterror.com</A>.
+
+<P>
+Let me take this opportunity to mention that if you have special needs (eg. custom font monipulators, dufferent table formats, etc.): I am available for contracting.
+
+<H3>Credits</H3>
+
+Windows setup script: Adam Twardoch
+<BR>Icon: Hannes Famira
+
+<H3>Acknowledgements</H3>
+
+(in alphabetical order) 
+Erik van Blokland, Petr van Blokland, Jelle Bosma, Vincent Connare, 
+Simon Daniels, Hannes Famira, Yannis Haralambous, Greg Hitchcock, John Hudson,
+Jack Jansen, Tom Kacvinsky, Antoine Leca, Werner Lemberg, Tal Leming,
+Peter Lofting, Dave Opstad, Laurence Penney, Read Roberts, Guido van Rossum, Andreas Seidel, Adam Twardoch. 
+
+<H3>Copyrights</H3>
+
+<A HREF="http://fonttools.sourceforge.net/">FontTools/TTX</A>
+<BR>1999-2003 Just van Rossum; LettError (just@letterror.com). See LICENSE.txt for the full license.
+<P>
+<A HREF="http://www.python.org/">Python</A>
+<BR>Copyright (c) 2001-2003 Python Software Foundation. All Rights Reserved.
+<BR>Copyright (c) 2000 BeOpen.com. All Rights Reserved.
+<BR>Copyright (c) 1995-2001 Corporation for National Research Initiatives. All Rights Reserved.
+<BR>Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam. All Rights Reserved.
+<P>
+<A HREF="http://www.pfdubois.com/numpy/">Numeric Python (NumPy)</A>
+<BR>Copyright (c) 1996. The Regents of the University of California. All rights reserved.
+
+</BODY>
+</HTML>
diff --git a/Doc/install.txt b/Doc/install.txt
new file mode 100644
index 0000000..f2f3b0c
--- /dev/null
+++ b/Doc/install.txt
@@ -0,0 +1,114 @@
+TTX/FontTools
+
+TTX/FontTools is a suite of tools for manipulating fonts. It is written in
+Python and has a BSD-style, open-source licence -- see LICENSE.txt.
+It's hosted at http://sourceforge.net/.
+
+The flagship is TTX, a tool to convert OpenType and TrueType font files to
+an XML-based format (also called TTX), and back. This lets you edit TTF or
+OTF files with any text editor.
+
+The FontTools library currently reads and writes TrueType font files, reads
+PostScript Type 1 fonts and more.
+
+
+Scope
+
+TTX/FontTools' functionality is aimed towards font developers and font tool
+developers. It can of course be used to just access fonts (outlines,
+metrics, etc.) but it is not optimized for that. It will be further
+developed so it can be the core of any font editor. And that's exactly
+what it will be for our upcoming major rewrite of RoboFog, our (commercial)
+PythonPowered font editor for MacOS.
+
+
+Installation
+
+For Windows and MacOS there are easy-to-use TTX installers. The rest if this
+document is meant for people who want to use TTX/FontTools from the source.
+
+You need the following software:
+
+Python
+  The fresh versions as well as older versions (You need 2.0 or higher)
+  can be downloaded from
+     http://www.python.org/download/
+  or here
+     http://sourceforge.net/projects/python/
+  
+  Windows: grab the Windows installer, run the full install.
+  Un*x: follow the build instructions.
+  MacOS: grab the installer, run "Easy Install"
+
+The numpy extension
+  See http://numpy.scipy.org/
+
+Now run the "setup.py" script from the FontTools archive. This will install
+all the modules in the right places, as well as tries to compile the one
+(optional) C extension contained in FontTools. On Unix it also installs the
+"ttx" command line tool. This tool can also be used on Windows, but might
+need some fiddling.
+
+For instructions how to build a standalone Windows installer, see
+Windows/README.TXT. Thanks a LOT to Adam Twardoch for this essential
+contribution.
+
+For TTX usage instructions, see the file "documentation.html".
+
+
+Feedback
+
+Please join the fonttools-discussion mailing list at SourceForge. Subscription
+info can be found if you follow the "Mailing Lists" link at the SourceForge
+project page:
+  http://sourceforge.net/projects/fonttools/
+You can also email me directly at just@letterror.com.
+
+If you want to follow the development of FontTools closely, or would like to
+contribute, you can also subscribe to the fonttools-checkins mailing list.
+
+
+Anonymous VCS access
+
+The FontTools sources are also accessible here:
+  http://sourceforge.net/projects/fonttools/
+Let me know if you'd like to become a co-developer.
+
+
+Developer documentation
+
+Sorry, documentation beyond doc strings in the source code is still on my to-do list... 
+Below follows a brief overview of what's there.
+
+
+The library
+
+  Cross-platform
+     fontTools.t1Lib -- Provides a Type 1 font reader. Writing is a planned feature.
+     fontTools.ttLib -- Extensive TrueType tools. Reads and writes. This is the flagship 
+	 of FontTools, it's by far the most mature component. Contains a completely modular
+	 TTF table converter architecture. See ttLib/tables/table_API_readme.txt.
+     fontTools.afmLib -- And AFM file reader/writer.
+     fontTools.cffLib -- Reads CFF fonts. Writing is a planned feature.
+     fontTools.unicode -- A simple (but large) module that translates 
+	 Unicode values to their descriptive names. Still Unicode 2.0.
+     fontTools.agl -- Interface to the Adobe Glyph List: maps unicode values
+	 to glyph names and back.
+
+  Mac-specific
+     fontTools.fondLib -- A reader/writer class for Mac FOND resources.
+     fontTools.nfntLib -- Reads Mac NFNT bitmap font resources.
+
+
+Thank-you's
+
+(in alphabetical order) 
+Erik van Blokland, Petr van Blokland, Jelle Bosma, Vincent Connare, 
+Simon Daniels, Hannes Famira, Greg Hitchcock, John Hudson, Jack Jansen,
+Antoine Leca, Werner Lemberg, Peter Lofting, Dave Opstad, Laurence Penney, 
+Guido van Rossum, Adam Twardoch. 
+
+Copyrights
+
+FontTools/TTX -- 1999-2002 Just van Rossum; Letterror (just@letterror.com) 
+See LICENCE.txt for the full license.
diff --git a/Doc/ttx.1 b/Doc/ttx.1
new file mode 100644
index 0000000..24ce9c1
--- /dev/null
+++ b/Doc/ttx.1
@@ -0,0 +1,225 @@
+.Dd May 18, 2004
+.\" ttx is not specific to any OS, but contrary to what groff_mdoc(7)
+.\" seems to imply, entirely omitting the .Os macro causes 'BSD' to
+.\" be used, so I give a zero-width space as its argument.
+.Os \&
+.\" The "FontTools Manual" argument apparently has no effect in
+.\" groff 1.18.1. I think it is a bug in the -mdoc groff package.
+.Dt TTX 1 "FontTools Manual"
+.Sh NAME
+.Nm ttx
+.Nd tool for manipulating TrueType and OpenType fonts
+.Sh SYNOPSIS
+.Nm
+.Bk
+.Op Ar option ...
+.Ek
+.Bk
+.Ar file ...
+.Ek
+.Sh DESCRIPTION
+.Nm
+is a tool for manipulating TrueType and OpenType fonts.  It can convert
+TrueType and OpenType fonts to and from an
+.Tn XML Ns -based format called
+.Tn TTX .
+.Tn TTX
+files have a
+.Ql .ttx
+extension.
+.Pp
+For each
+.Ar file
+argument it is given,
+.Nm
+detects whether it is a
+.Ql .ttf ,
+.Ql .otf
+or
+.Ql .ttx
+file and acts accordingly: if it is a
+.Ql .ttf
+or
+.Ql .otf
+file, it generates a
+.Ql .ttx
+file; if it is a
+.Ql .ttx
+file, it generates a
+.Ql .ttf
+or
+.Ql .otf
+file.
+.Pp
+By default, every output file is created in the same directory as the
+corresponding input file and with the same name except for the
+extension, which is substituted appropriately.
+.Nm
+never overwrites existing files; if necessary, it appends a suffix to
+the output file name before the extension, as in
+.Pa Arial#1.ttf .
+.Ss "General options"
+.Bl -tag -width ".Fl t Ar table"
+.It Fl h
+Display usage information.
+.It Fl d Ar dir
+Write the output files to directory
+.Ar dir
+instead of writing every output file to the same directory as the
+corresponding input file.
+.It Fl o Ar file
+Write the output to
+.Ar file
+instead of writing it to the same directory as the
+corresponding input file.
+.It Fl v
+Be verbose.  Write more messages to the standard output describing what
+is being done.
+.It Fl a
+Allow virtual glyphs ID's on compile or decompile.
+.El
+.Ss "Dump options"
+The following options control the process of dumping font files
+(TrueType or OpenType) to
+.Tn TTX
+files.
+.Bl -tag -width ".Fl t Ar table"
+.It Fl l
+List table information.  Instead of dumping the font to a
+.Tn TTX
+file, display minimal information about each table.
+.It Fl t Ar table
+Dump table
+.Ar table .
+This option may be given multiple times to dump several tables at
+once.  When not specified, all tables are dumped.
+.It Fl x Ar table
+Exclude table
+.Ar table
+from the list of tables to dump.  This option may be given multiple
+times to exclude several tables from the dump.  The
+.Fl t
+and
+.Fl x
+options are mutually exclusive.
+.It Fl s
+Split tables.  Dump each table to a separate
+.Tn TTX
+file and write (under the name that would have been used for the output
+file if the
+.Fl s
+option had not been given) one small
+.Tn TTX
+file containing references to the individual table dump files.  This
+file can be used as input to
+.Nm
+as long as the referenced files can be found in the same directory.
+.It Fl i
+.\" XXX: I suppose OpenType programs (exist and) are also affected.
+Don't disassemble TrueType instructions.  When this option is specified,
+all TrueType programs (glyph programs, the font program and the
+pre-program) are written to the
+.Tn TTX
+file as hexadecimal data instead of
+assembly.  This saves some time and results in smaller
+.Tn TTX
+files.
+.It Fl y Ar n
+When decompiling a TrueType Collection (TTC) file,
+decompile font number
+.Ar n ,
+starting from 0.
+.El
+.Ss "Compilation options"
+The following options control the process of compiling
+.Tn TTX
+files into font files (TrueType or OpenType):
+.Bl -tag -width ".Fl t Ar table"
+.It Fl m Ar fontfile
+Merge the input
+.Tn TTX
+file
+.Ar file
+with
+.Ar fontfile .
+No more than one
+.Ar file
+argument can be specified when this option is used.
+.It Fl b
+Don't recalculate glyph bounding boxes.  Use the values in the
+.Tn TTX
+file as is.
+.El
+.Sh "THE TTX FILE FORMAT"
+You can find some information about the
+.Tn TTX
+file format in
+.Pa documentation.html .
+In particular, you will find in that file the list of tables understood by
+.Nm
+and the relations between TrueType GlyphIDs and the glyph names used in
+.Tn TTX
+files.
+.Sh EXAMPLES
+In the following examples, all files are read from and written to the
+current directory.  Additionally, the name given for the output file
+assumes in every case that it did not exist before
+.Nm
+was invoked.
+.Pp
+Dump the TrueType font contained in
+.Pa FreeSans.ttf
+to
+.Pa FreeSans.ttx :
+.Pp
+.Dl ttx FreeSans.ttf
+.Pp
+Compile
+.Pa MyFont.ttx
+into a TrueType or OpenType font file:
+.Pp
+.Dl ttx MyFont.ttx
+.Pp
+List the tables in
+.Pa FreeSans.ttf
+along with some information:
+.Pp
+.Dl ttx -l FreeSans.ttf
+.Pp
+Dump the
+.Sq cmap
+table from
+.Pa FreeSans.ttf
+to
+.Pa FreeSans.ttx :
+.Pp
+.Dl ttx -t cmap FreeSans.ttf
+.Sh NOTES
+On MS\-Windows and MacOS,
+.Nm
+is available as a graphical application to which files can be dropped.
+.Sh SEE ALSO
+.Pa documentation.html
+.Pp
+.Xr fontforge 1 ,
+.Xr ftinfo 1 ,
+.Xr gfontview 1 ,
+.Xr xmbdfed 1 ,
+.Xr Font::TTF 3pm
+.Sh AUTHORS
+.Nm
+was written by
+.An -nosplit
+.An "Just van Rossum" Aq just@letterror.com .
+.Pp
+This manual page was written by
+.An "Florent Rougon" Aq f.rougon@free.fr
+for the Debian GNU/Linux system based on the existing FontTools
+documentation.  It may be freely used, modified and distributed without
+restrictions.
+.\" For Emacs:
+.\" Local Variables:
+.\" fill-column: 72
+.\" sentence-end: "[.?!][]\"')}]*\\($\\| $\\|	\\|  \\)[ 	\n]*"
+.\" sentence-end-double-space: t
+.\" End:
diff --git a/LICENSE.txt b/LICENSE.txt
new file mode 100644
index 0000000..2c90134
--- /dev/null
+++ b/LICENSE.txt
@@ -0,0 +1,25 @@
+Copyright 1999-2004
+by Just van Rossum, Letterror, The Netherlands.
+
+                        All Rights Reserved
+
+Permission to use, copy, modify, and distribute this software and 
+its documentation for any purpose and without fee is hereby granted,
+provided that the above copyright notice appear in all copies and 
+that both that copyright notice and this permission notice appear 
+in supporting documentation, and that the names of Just van Rossum 
+or Letterror not be used in advertising or publicity pertaining to
+distribution of the software without specific, written prior
+permission.
+
+JUST VAN ROSSUM AND LETTERROR DISCLAIM ALL WARRANTIES WITH
+REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL JUST VAN ROSSUM OR 
+LETTERROR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
+DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+
+just@letterror.com
diff --git a/Lib/fontTools/__init__.py b/Lib/fontTools/__init__.py
new file mode 100644
index 0000000..ea07e8d
--- /dev/null
+++ b/Lib/fontTools/__init__.py
@@ -0,0 +1 @@
+version = "2.4"
diff --git a/Lib/fontTools/afmLib.py b/Lib/fontTools/afmLib.py
new file mode 100644
index 0000000..e679770
--- /dev/null
+++ b/Lib/fontTools/afmLib.py
@@ -0,0 +1,377 @@
+"""Module for reading and writing AFM files."""
+
+# XXX reads AFM's generated by Fog, not tested with much else.
+# It does not implement the full spec (Adobe Technote 5004, Adobe Font Metrics
+# File Format Specification). Still, it should read most "common" AFM files.
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import re
+
+# every single line starts with a "word"
+identifierRE = re.compile("^([A-Za-z]+).*")
+
+# regular expression to parse char lines
+charRE = re.compile(
+		"(-?\d+)"			# charnum
+		"\s*;\s*WX\s+"		# ; WX 
+		"(-?\d+)"			# width
+		"\s*;\s*N\s+"		# ; N 
+		"([.A-Za-z0-9_]+)"	# charname
+		"\s*;\s*B\s+"		# ; B 
+		"(-?\d+)"			# left
+		"\s+"				# 
+		"(-?\d+)"			# bottom
+		"\s+"				# 
+		"(-?\d+)"			# right
+		"\s+"				# 
+		"(-?\d+)"			# top
+		"\s*;\s*"			# ; 
+		)
+
+# regular expression to parse kerning lines
+kernRE = re.compile(
+		"([.A-Za-z0-9_]+)"	# leftchar
+		"\s+"				# 
+		"([.A-Za-z0-9_]+)"	# rightchar
+		"\s+"				# 
+		"(-?\d+)"			# value
+		"\s*"				# 
+		)
+
+# regular expressions to parse composite info lines of the form:
+# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
+compositeRE = re.compile(
+		"([.A-Za-z0-9_]+)"	# char name
+		"\s+"				# 
+		"(\d+)"				# number of parts
+		"\s*;\s*"			# 
+		)
+componentRE = re.compile(
+		"PCC\s+"			# PPC
+		"([.A-Za-z0-9_]+)"	# base char name
+		"\s+"				# 
+		"(-?\d+)"			# x offset
+		"\s+"				# 
+		"(-?\d+)"			# y offset
+		"\s*;\s*"			# 
+		)
+
+preferredAttributeOrder = [
+		"FontName",
+		"FullName",
+		"FamilyName",
+		"Weight",
+		"ItalicAngle",
+		"IsFixedPitch",
+		"FontBBox",
+		"UnderlinePosition",
+		"UnderlineThickness",
+		"Version",
+		"Notice",
+		"EncodingScheme",
+		"CapHeight",
+		"XHeight",
+		"Ascender",
+		"Descender",
+]
+
+
+class error(Exception): pass
+
+
+class AFM(object):
+	
+	_attrs = None
+	
+	_keywords = ['StartFontMetrics',
+			'EndFontMetrics',
+			'StartCharMetrics',
+			'EndCharMetrics',
+			'StartKernData',
+			'StartKernPairs',
+			'EndKernPairs',
+			'EndKernData',
+			'StartComposites',
+			'EndComposites',
+			]
+	
+	def __init__(self, path=None):
+		self._attrs = {}
+		self._chars = {}
+		self._kerning = {}
+		self._index = {}
+		self._comments = []
+		self._composites = {}
+		if path is not None:
+			self.read(path)
+	
+	def read(self, path):
+		lines = readlines(path)
+		for line in lines:
+			if not line.strip():
+				continue
+			m = identifierRE.match(line)
+			if m is None:
+				raise error("syntax error in AFM file: " + repr(line))
+			
+			pos = m.regs[1][1]
+			word = line[:pos]
+			rest = line[pos:].strip()
+			if word in self._keywords:
+				continue
+			if word == "C":
+				self.parsechar(rest)
+			elif word == "KPX":
+				self.parsekernpair(rest)
+			elif word == "CC":
+				self.parsecomposite(rest)
+			else:
+				self.parseattr(word, rest)
+	
+	def parsechar(self, rest):
+		m = charRE.match(rest)
+		if m is None:
+			raise error("syntax error in AFM file: " + repr(rest))
+		things = []
+		for fr, to in m.regs[1:]:
+			things.append(rest[fr:to])
+		charname = things[2]
+		del things[2]
+		charnum, width, l, b, r, t = (int(thing) for thing in things)
+		self._chars[charname] = charnum, width, (l, b, r, t)
+	
+	def parsekernpair(self, rest):
+		m = kernRE.match(rest)
+		if m is None:
+			raise error("syntax error in AFM file: " + repr(rest))
+		things = []
+		for fr, to in m.regs[1:]:
+			things.append(rest[fr:to])
+		leftchar, rightchar, value = things
+		value = int(value)
+		self._kerning[(leftchar, rightchar)] = value
+	
+	def parseattr(self, word, rest):
+		if word == "FontBBox":
+			l, b, r, t = [int(thing) for thing in rest.split()]
+			self._attrs[word] = l, b, r, t
+		elif word == "Comment":
+			self._comments.append(rest)
+		else:
+			try:
+				value = int(rest)
+			except (ValueError, OverflowError):
+				self._attrs[word] = rest
+			else:
+				self._attrs[word] = value
+	
+	def parsecomposite(self, rest):
+		m = compositeRE.match(rest)
+		if m is None:
+			raise error("syntax error in AFM file: " + repr(rest))
+		charname = m.group(1)
+		ncomponents = int(m.group(2))
+		rest = rest[m.regs[0][1]:]
+		components = []
+		while True:
+			m = componentRE.match(rest)
+			if m is None:
+				raise error("syntax error in AFM file: " + repr(rest))
+			basechar = m.group(1)
+			xoffset = int(m.group(2))
+			yoffset = int(m.group(3))
+			components.append((basechar, xoffset, yoffset))
+			rest = rest[m.regs[0][1]:]
+			if not rest:
+				break
+		assert len(components) == ncomponents
+		self._composites[charname] = components
+	
+	def write(self, path, sep='\r'):
+		import time
+		lines = [	"StartFontMetrics 2.0",
+				"Comment Generated by afmLib; at %s" % (
+						time.strftime("%m/%d/%Y %H:%M:%S", 
+						time.localtime(time.time())))]
+		
+		# write comments, assuming (possibly wrongly!) they should
+		# all appear at the top
+		for comment in self._comments:
+			lines.append("Comment " + comment)
+		
+		# write attributes, first the ones we know about, in
+		# a preferred order
+		attrs = self._attrs
+		for attr in preferredAttributeOrder:
+			if attr in attrs:
+				value = attrs[attr]
+				if attr == "FontBBox":
+					value = "%s %s %s %s" % value
+				lines.append(attr + " " + str(value))
+		# then write the attributes we don't know about,
+		# in alphabetical order
+		items = sorted(attrs.items())
+		for attr, value in items:
+			if attr in preferredAttributeOrder:
+				continue
+			lines.append(attr + " " + str(value))
+		
+		# write char metrics
+		lines.append("StartCharMetrics " + repr(len(self._chars)))
+		items = [(charnum, (charname, width, box)) for charname, (charnum, width, box) in self._chars.items()]
+		
+		def myKey(a):
+			"""Custom key function to make sure unencoded chars (-1) 
+			end up at the end of the list after sorting."""
+			if a[0] == -1:
+				a = (0xffff,) + a[1:]  # 0xffff is an arbitrary large number
+			return a
+		items.sort(key=myKey)
+		
+		for charnum, (charname, width, (l, b, r, t)) in items:
+			lines.append("C %d ; WX %d ; N %s ; B %d %d %d %d ;" %
+					(charnum, width, charname, l, b, r, t))
+		lines.append("EndCharMetrics")
+		
+		# write kerning info
+		lines.append("StartKernData")
+		lines.append("StartKernPairs " + repr(len(self._kerning)))
+		items = sorted(self._kerning.items())
+		for (leftchar, rightchar), value in items:
+			lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
+		lines.append("EndKernPairs")
+		lines.append("EndKernData")
+		
+		if self._composites:
+			composites = sorted(self._composites.items())
+			lines.append("StartComposites %s" % len(self._composites))
+			for charname, components in composites:
+				line = "CC %s %s ;" % (charname, len(components))
+				for basechar, xoffset, yoffset in components:
+					line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
+				lines.append(line)
+			lines.append("EndComposites")
+		
+		lines.append("EndFontMetrics")
+		
+		writelines(path, lines, sep)
+	
+	def has_kernpair(self, pair):
+		return pair in self._kerning
+	
+	def kernpairs(self):
+		return list(self._kerning.keys())
+	
+	def has_char(self, char):
+		return char in self._chars
+	
+	def chars(self):
+		return list(self._chars.keys())
+	
+	def comments(self):
+		return self._comments
+	
+	def addComment(self, comment):
+		self._comments.append(comment)
+	
+	def addComposite(self, glyphName, components):
+		self._composites[glyphName] = components
+	
+	def __getattr__(self, attr):
+		if attr in self._attrs:
+			return self._attrs[attr]
+		else:
+			raise AttributeError(attr)
+	
+	def __setattr__(self, attr, value):
+		# all attrs *not* starting with "_" are consider to be AFM keywords
+		if attr[:1] == "_":
+			self.__dict__[attr] = value
+		else:
+			self._attrs[attr] = value
+	
+	def __delattr__(self, attr):
+		# all attrs *not* starting with "_" are consider to be AFM keywords
+		if attr[:1] == "_":
+			try:
+				del self.__dict__[attr]
+			except KeyError:
+				raise AttributeError(attr)
+		else:
+			try:
+				del self._attrs[attr]
+			except KeyError:
+				raise AttributeError(attr)
+	
+	def __getitem__(self, key):
+		if isinstance(key, tuple):
+			# key is a tuple, return the kernpair
+			return self._kerning[key]
+		else:
+			# return the metrics instead
+			return self._chars[key]
+	
+	def __setitem__(self, key, value):
+		if isinstance(key, tuple):
+			# key is a tuple, set kernpair
+			self._kerning[key] = value
+		else:
+			# set char metrics
+			self._chars[key] = value
+	
+	def __delitem__(self, key):
+		if isinstance(key, tuple):
+			# key is a tuple, del kernpair
+			del self._kerning[key]
+		else:
+			# del char metrics
+			del self._chars[key]
+	
+	def __repr__(self):
+		if hasattr(self, "FullName"):
+			return '<AFM object for %s>' % self.FullName
+		else:
+			return '<AFM object at %x>' % id(self)
+
+
+def readlines(path):
+	f = open(path, 'rb')
+	data = f.read()
+	f.close()
+	# read any text file, regardless whether it's formatted for Mac, Unix or Dos
+	sep = ""
+	if '\r' in data:
+		sep = sep + '\r'	# mac or dos
+	if '\n' in data:
+		sep = sep + '\n'	# unix or dos
+	return data.split(sep)
+
+def writelines(path, lines, sep='\r'):
+	f = open(path, 'wb')
+	for line in lines:
+		f.write(line + sep)
+	f.close()
+	
+	
+
+if __name__ == "__main__":
+	import EasyDialogs
+	path = EasyDialogs.AskFileForOpen()
+	if path:
+		afm = AFM(path)
+		char = 'A'
+		if afm.has_char(char):
+			print(afm[char])	# print charnum, width and boundingbox
+		pair = ('A', 'V')
+		if afm.has_kernpair(pair):
+			print(afm[pair])	# print kerning value for pair
+		print(afm.Version)	# various other afm entries have become attributes
+		print(afm.Weight)
+		# afm.comments() returns a list of all Comment lines found in the AFM
+		print(afm.comments())
+		#print afm.chars()
+		#print afm.kernpairs()
+		print(afm)
+		afm.write(path + ".muck")
+
diff --git a/Lib/fontTools/agl.py b/Lib/fontTools/agl.py
new file mode 100644
index 0000000..5f20f51
--- /dev/null
+++ b/Lib/fontTools/agl.py
@@ -0,0 +1,737 @@
+# The table below is taken from
+# http://www.adobe.com/devnet/opentype/archives/aglfn.txt
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+_aglText = """\
+# -----------------------------------------------------------
+# Copyright 2003, 2005-2008, 2010 Adobe Systems Incorporated.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or
+# without modification, are permitted provided that the
+# following conditions are met:
+#
+# Redistributions of source code must retain the above
+# copyright notice, this list of conditions and the following
+# disclaimer.
+#
+# Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials
+# provided with the distribution.
+#
+# Neither the name of Adobe Systems Incorporated nor the names
+# of its contributors may be used to endorse or promote
+# products derived from this software without specific prior
+# written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
+# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
+# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# -----------------------------------------------------------
+# Name:          Adobe Glyph List For New Fonts
+# Table version: 1.7
+# Date:          November 6, 2008
+# URL:           http://sourceforge.net/adobe/aglfn/
+#
+# Description:
+#
+# AGLFN (Adobe Glyph List For New Fonts) provides a list of base glyph
+# names that are recommended for new fonts, which are compatible with
+# the AGL (Adobe Glyph List) Specification, and which should be used
+# as described in Section 6 of that document. AGLFN comprises the set
+# of glyph names from AGL that map via the AGL Specification rules to
+# the semantically correct UV (Unicode Value). For example, "Asmall"
+# is omitted because AGL maps this glyph name to the PUA (Private Use
+# Area) value U+F761, rather than to the UV that maps from the glyph
+# name "A." Also omitted is "ffi," because AGL maps this to the
+# Alphabetic Presentation Forms value U+FB03, rather than decomposing
+# it into the following sequence of three UVs: U+0066, U+0066, and
+# U+0069. The name "arrowvertex" has been omitted because this glyph
+# now has a real UV, and AGL is now incorrect in mapping it to the PUA
+# value U+F8E6. If you do not find an appropriate name for your glyph
+# in this list, then please refer to Section 6 of the AGL
+# Specification.
+#
+# Format: three semicolon-delimited fields:
+#   (1) Standard UV or CUS UV--four uppercase hexadecimal digits
+#   (2) Glyph name--upper/lowercase letters and digits
+#   (3) Character names: Unicode character names for standard UVs, and
+#       descriptive names for CUS UVs--uppercase letters, hyphen, and
+#       space
+#
+# The records are sorted by glyph name in increasing ASCII order,
+# entries with the same glyph name are sorted in decreasing priority
+# order, the UVs and Unicode character names are provided for
+# convenience, lines starting with "#" are comments, and blank lines
+# should be ignored.
+#
+# Revision History:
+#
+# 1.7 [6 November 2008]
+# - Reverted to the original 1.4 and earlier mappings for Delta,
+#   Omega, and mu.
+# - Removed mappings for "afii" names. These should now be assigned
+#   "uni" names.
+# - Removed mappings for "commaaccent" names. These should now be
+#   assigned "uni" names.
+#
+# 1.6 [30 January 2006]
+# - Completed work intended in 1.5.
+#
+# 1.5 [23 November 2005]
+# - Removed duplicated block at end of file.
+# - Changed mappings:
+#   2206;Delta;INCREMENT changed to 0394;Delta;GREEK CAPITAL LETTER DELTA
+#   2126;Omega;OHM SIGN changed to 03A9;Omega;GREEK CAPITAL LETTER OMEGA
+#   03BC;mu;MICRO SIGN changed to 03BC;mu;GREEK SMALL LETTER MU
+# - Corrected statement above about why "ffi" is omitted.
+#
+# 1.4 [24 September 2003]
+# - Changed version to 1.4, to avoid confusion with the AGL 1.3.
+# - Fixed spelling errors in the header.
+# - Fully removed "arrowvertex," as it is mapped only to a PUA Unicode
+#   value in some fonts.
+#
+# 1.1 [17 April 2003]
+# - Renamed [Tt]cedilla back to [Tt]commaaccent.
+#
+# 1.0 [31 January 2003]
+# - Original version.
+# - Derived from the AGLv1.2 by:
+#   removing the PUA area codes;
+#   removing duplicate Unicode mappings; and
+#   renaming "tcommaaccent" to "tcedilla" and "Tcommaaccent" to "Tcedilla"
+#
+0041;A;LATIN CAPITAL LETTER A
+00C6;AE;LATIN CAPITAL LETTER AE
+01FC;AEacute;LATIN CAPITAL LETTER AE WITH ACUTE
+00C1;Aacute;LATIN CAPITAL LETTER A WITH ACUTE
+0102;Abreve;LATIN CAPITAL LETTER A WITH BREVE
+00C2;Acircumflex;LATIN CAPITAL LETTER A WITH CIRCUMFLEX
+00C4;Adieresis;LATIN CAPITAL LETTER A WITH DIAERESIS
+00C0;Agrave;LATIN CAPITAL LETTER A WITH GRAVE
+0391;Alpha;GREEK CAPITAL LETTER ALPHA
+0386;Alphatonos;GREEK CAPITAL LETTER ALPHA WITH TONOS
+0100;Amacron;LATIN CAPITAL LETTER A WITH MACRON
+0104;Aogonek;LATIN CAPITAL LETTER A WITH OGONEK
+00C5;Aring;LATIN CAPITAL LETTER A WITH RING ABOVE
+01FA;Aringacute;LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
+00C3;Atilde;LATIN CAPITAL LETTER A WITH TILDE
+0042;B;LATIN CAPITAL LETTER B
+0392;Beta;GREEK CAPITAL LETTER BETA
+0043;C;LATIN CAPITAL LETTER C
+0106;Cacute;LATIN CAPITAL LETTER C WITH ACUTE
+010C;Ccaron;LATIN CAPITAL LETTER C WITH CARON
+00C7;Ccedilla;LATIN CAPITAL LETTER C WITH CEDILLA
+0108;Ccircumflex;LATIN CAPITAL LETTER C WITH CIRCUMFLEX
+010A;Cdotaccent;LATIN CAPITAL LETTER C WITH DOT ABOVE
+03A7;Chi;GREEK CAPITAL LETTER CHI
+0044;D;LATIN CAPITAL LETTER D
+010E;Dcaron;LATIN CAPITAL LETTER D WITH CARON
+0110;Dcroat;LATIN CAPITAL LETTER D WITH STROKE
+2206;Delta;INCREMENT
+0045;E;LATIN CAPITAL LETTER E
+00C9;Eacute;LATIN CAPITAL LETTER E WITH ACUTE
+0114;Ebreve;LATIN CAPITAL LETTER E WITH BREVE
+011A;Ecaron;LATIN CAPITAL LETTER E WITH CARON
+00CA;Ecircumflex;LATIN CAPITAL LETTER E WITH CIRCUMFLEX
+00CB;Edieresis;LATIN CAPITAL LETTER E WITH DIAERESIS
+0116;Edotaccent;LATIN CAPITAL LETTER E WITH DOT ABOVE
+00C8;Egrave;LATIN CAPITAL LETTER E WITH GRAVE
+0112;Emacron;LATIN CAPITAL LETTER E WITH MACRON
+014A;Eng;LATIN CAPITAL LETTER ENG
+0118;Eogonek;LATIN CAPITAL LETTER E WITH OGONEK
+0395;Epsilon;GREEK CAPITAL LETTER EPSILON
+0388;Epsilontonos;GREEK CAPITAL LETTER EPSILON WITH TONOS
+0397;Eta;GREEK CAPITAL LETTER ETA
+0389;Etatonos;GREEK CAPITAL LETTER ETA WITH TONOS
+00D0;Eth;LATIN CAPITAL LETTER ETH
+20AC;Euro;EURO SIGN
+0046;F;LATIN CAPITAL LETTER F
+0047;G;LATIN CAPITAL LETTER G
+0393;Gamma;GREEK CAPITAL LETTER GAMMA
+011E;Gbreve;LATIN CAPITAL LETTER G WITH BREVE
+01E6;Gcaron;LATIN CAPITAL LETTER G WITH CARON
+011C;Gcircumflex;LATIN CAPITAL LETTER G WITH CIRCUMFLEX
+0120;Gdotaccent;LATIN CAPITAL LETTER G WITH DOT ABOVE
+0048;H;LATIN CAPITAL LETTER H
+25CF;H18533;BLACK CIRCLE
+25AA;H18543;BLACK SMALL SQUARE
+25AB;H18551;WHITE SMALL SQUARE
+25A1;H22073;WHITE SQUARE
+0126;Hbar;LATIN CAPITAL LETTER H WITH STROKE
+0124;Hcircumflex;LATIN CAPITAL LETTER H WITH CIRCUMFLEX
+0049;I;LATIN CAPITAL LETTER I
+0132;IJ;LATIN CAPITAL LIGATURE IJ
+00CD;Iacute;LATIN CAPITAL LETTER I WITH ACUTE
+012C;Ibreve;LATIN CAPITAL LETTER I WITH BREVE
+00CE;Icircumflex;LATIN CAPITAL LETTER I WITH CIRCUMFLEX
+00CF;Idieresis;LATIN CAPITAL LETTER I WITH DIAERESIS
+0130;Idotaccent;LATIN CAPITAL LETTER I WITH DOT ABOVE
+2111;Ifraktur;BLACK-LETTER CAPITAL I
+00CC;Igrave;LATIN CAPITAL LETTER I WITH GRAVE
+012A;Imacron;LATIN CAPITAL LETTER I WITH MACRON
+012E;Iogonek;LATIN CAPITAL LETTER I WITH OGONEK
+0399;Iota;GREEK CAPITAL LETTER IOTA
+03AA;Iotadieresis;GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
+038A;Iotatonos;GREEK CAPITAL LETTER IOTA WITH TONOS
+0128;Itilde;LATIN CAPITAL LETTER I WITH TILDE
+004A;J;LATIN CAPITAL LETTER J
+0134;Jcircumflex;LATIN CAPITAL LETTER J WITH CIRCUMFLEX
+004B;K;LATIN CAPITAL LETTER K
+039A;Kappa;GREEK CAPITAL LETTER KAPPA
+004C;L;LATIN CAPITAL LETTER L
+0139;Lacute;LATIN CAPITAL LETTER L WITH ACUTE
+039B;Lambda;GREEK CAPITAL LETTER LAMDA
+013D;Lcaron;LATIN CAPITAL LETTER L WITH CARON
+013F;Ldot;LATIN CAPITAL LETTER L WITH MIDDLE DOT
+0141;Lslash;LATIN CAPITAL LETTER L WITH STROKE
+004D;M;LATIN CAPITAL LETTER M
+039C;Mu;GREEK CAPITAL LETTER MU
+004E;N;LATIN CAPITAL LETTER N
+0143;Nacute;LATIN CAPITAL LETTER N WITH ACUTE
+0147;Ncaron;LATIN CAPITAL LETTER N WITH CARON
+00D1;Ntilde;LATIN CAPITAL LETTER N WITH TILDE
+039D;Nu;GREEK CAPITAL LETTER NU
+004F;O;LATIN CAPITAL LETTER O
+0152;OE;LATIN CAPITAL LIGATURE OE
+00D3;Oacute;LATIN CAPITAL LETTER O WITH ACUTE
+014E;Obreve;LATIN CAPITAL LETTER O WITH BREVE
+00D4;Ocircumflex;LATIN CAPITAL LETTER O WITH CIRCUMFLEX
+00D6;Odieresis;LATIN CAPITAL LETTER O WITH DIAERESIS
+00D2;Ograve;LATIN CAPITAL LETTER O WITH GRAVE
+01A0;Ohorn;LATIN CAPITAL LETTER O WITH HORN
+0150;Ohungarumlaut;LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
+014C;Omacron;LATIN CAPITAL LETTER O WITH MACRON
+2126;Omega;OHM SIGN
+038F;Omegatonos;GREEK CAPITAL LETTER OMEGA WITH TONOS
+039F;Omicron;GREEK CAPITAL LETTER OMICRON
+038C;Omicrontonos;GREEK CAPITAL LETTER OMICRON WITH TONOS
+00D8;Oslash;LATIN CAPITAL LETTER O WITH STROKE
+01FE;Oslashacute;LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
+00D5;Otilde;LATIN CAPITAL LETTER O WITH TILDE
+0050;P;LATIN CAPITAL LETTER P
+03A6;Phi;GREEK CAPITAL LETTER PHI
+03A0;Pi;GREEK CAPITAL LETTER PI
+03A8;Psi;GREEK CAPITAL LETTER PSI
+0051;Q;LATIN CAPITAL LETTER Q
+0052;R;LATIN CAPITAL LETTER R
+0154;Racute;LATIN CAPITAL LETTER R WITH ACUTE
+0158;Rcaron;LATIN CAPITAL LETTER R WITH CARON
+211C;Rfraktur;BLACK-LETTER CAPITAL R
+03A1;Rho;GREEK CAPITAL LETTER RHO
+0053;S;LATIN CAPITAL LETTER S
+250C;SF010000;BOX DRAWINGS LIGHT DOWN AND RIGHT
+2514;SF020000;BOX DRAWINGS LIGHT UP AND RIGHT
+2510;SF030000;BOX DRAWINGS LIGHT DOWN AND LEFT
+2518;SF040000;BOX DRAWINGS LIGHT UP AND LEFT
+253C;SF050000;BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
+252C;SF060000;BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
+2534;SF070000;BOX DRAWINGS LIGHT UP AND HORIZONTAL
+251C;SF080000;BOX DRAWINGS LIGHT VERTICAL AND RIGHT
+2524;SF090000;BOX DRAWINGS LIGHT VERTICAL AND LEFT
+2500;SF100000;BOX DRAWINGS LIGHT HORIZONTAL
+2502;SF110000;BOX DRAWINGS LIGHT VERTICAL
+2561;SF190000;BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
+2562;SF200000;BOX DRAWINGS VERTICAL DOUBLE AND LEFT SINGLE
+2556;SF210000;BOX DRAWINGS DOWN DOUBLE AND LEFT SINGLE
+2555;SF220000;BOX DRAWINGS DOWN SINGLE AND LEFT DOUBLE
+2563;SF230000;BOX DRAWINGS DOUBLE VERTICAL AND LEFT
+2551;SF240000;BOX DRAWINGS DOUBLE VERTICAL
+2557;SF250000;BOX DRAWINGS DOUBLE DOWN AND LEFT
+255D;SF260000;BOX DRAWINGS DOUBLE UP AND LEFT
+255C;SF270000;BOX DRAWINGS UP DOUBLE AND LEFT SINGLE
+255B;SF280000;BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
+255E;SF360000;BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
+255F;SF370000;BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
+255A;SF380000;BOX DRAWINGS DOUBLE UP AND RIGHT
+2554;SF390000;BOX DRAWINGS DOUBLE DOWN AND RIGHT
+2569;SF400000;BOX DRAWINGS DOUBLE UP AND HORIZONTAL
+2566;SF410000;BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
+2560;SF420000;BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
+2550;SF430000;BOX DRAWINGS DOUBLE HORIZONTAL
+256C;SF440000;BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
+2567;SF450000;BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
+2568;SF460000;BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
+2564;SF470000;BOX DRAWINGS DOWN SINGLE AND HORIZONTAL DOUBLE
+2565;SF480000;BOX DRAWINGS DOWN DOUBLE AND HORIZONTAL SINGLE
+2559;SF490000;BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
+2558;SF500000;BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
+2552;SF510000;BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
+2553;SF520000;BOX DRAWINGS DOWN DOUBLE AND RIGHT SINGLE
+256B;SF530000;BOX DRAWINGS VERTICAL DOUBLE AND HORIZONTAL SINGLE
+256A;SF540000;BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
+015A;Sacute;LATIN CAPITAL LETTER S WITH ACUTE
+0160;Scaron;LATIN CAPITAL LETTER S WITH CARON
+015E;Scedilla;LATIN CAPITAL LETTER S WITH CEDILLA
+015C;Scircumflex;LATIN CAPITAL LETTER S WITH CIRCUMFLEX
+03A3;Sigma;GREEK CAPITAL LETTER SIGMA
+0054;T;LATIN CAPITAL LETTER T
+03A4;Tau;GREEK CAPITAL LETTER TAU
+0166;Tbar;LATIN CAPITAL LETTER T WITH STROKE
+0164;Tcaron;LATIN CAPITAL LETTER T WITH CARON
+0398;Theta;GREEK CAPITAL LETTER THETA
+00DE;Thorn;LATIN CAPITAL LETTER THORN
+0055;U;LATIN CAPITAL LETTER U
+00DA;Uacute;LATIN CAPITAL LETTER U WITH ACUTE
+016C;Ubreve;LATIN CAPITAL LETTER U WITH BREVE
+00DB;Ucircumflex;LATIN CAPITAL LETTER U WITH CIRCUMFLEX
+00DC;Udieresis;LATIN CAPITAL LETTER U WITH DIAERESIS
+00D9;Ugrave;LATIN CAPITAL LETTER U WITH GRAVE
+01AF;Uhorn;LATIN CAPITAL LETTER U WITH HORN
+0170;Uhungarumlaut;LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
+016A;Umacron;LATIN CAPITAL LETTER U WITH MACRON
+0172;Uogonek;LATIN CAPITAL LETTER U WITH OGONEK
+03A5;Upsilon;GREEK CAPITAL LETTER UPSILON
+03D2;Upsilon1;GREEK UPSILON WITH HOOK SYMBOL
+03AB;Upsilondieresis;GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
+038E;Upsilontonos;GREEK CAPITAL LETTER UPSILON WITH TONOS
+016E;Uring;LATIN CAPITAL LETTER U WITH RING ABOVE
+0168;Utilde;LATIN CAPITAL LETTER U WITH TILDE
+0056;V;LATIN CAPITAL LETTER V
+0057;W;LATIN CAPITAL LETTER W
+1E82;Wacute;LATIN CAPITAL LETTER W WITH ACUTE
+0174;Wcircumflex;LATIN CAPITAL LETTER W WITH CIRCUMFLEX
+1E84;Wdieresis;LATIN CAPITAL LETTER W WITH DIAERESIS
+1E80;Wgrave;LATIN CAPITAL LETTER W WITH GRAVE
+0058;X;LATIN CAPITAL LETTER X
+039E;Xi;GREEK CAPITAL LETTER XI
+0059;Y;LATIN CAPITAL LETTER Y
+00DD;Yacute;LATIN CAPITAL LETTER Y WITH ACUTE
+0176;Ycircumflex;LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
+0178;Ydieresis;LATIN CAPITAL LETTER Y WITH DIAERESIS
+1EF2;Ygrave;LATIN CAPITAL LETTER Y WITH GRAVE
+005A;Z;LATIN CAPITAL LETTER Z
+0179;Zacute;LATIN CAPITAL LETTER Z WITH ACUTE
+017D;Zcaron;LATIN CAPITAL LETTER Z WITH CARON
+017B;Zdotaccent;LATIN CAPITAL LETTER Z WITH DOT ABOVE
+0396;Zeta;GREEK CAPITAL LETTER ZETA
+0061;a;LATIN SMALL LETTER A
+00E1;aacute;LATIN SMALL LETTER A WITH ACUTE
+0103;abreve;LATIN SMALL LETTER A WITH BREVE
+00E2;acircumflex;LATIN SMALL LETTER A WITH CIRCUMFLEX
+00B4;acute;ACUTE ACCENT
+0301;acutecomb;COMBINING ACUTE ACCENT
+00E4;adieresis;LATIN SMALL LETTER A WITH DIAERESIS
+00E6;ae;LATIN SMALL LETTER AE
+01FD;aeacute;LATIN SMALL LETTER AE WITH ACUTE
+00E0;agrave;LATIN SMALL LETTER A WITH GRAVE
+2135;aleph;ALEF SYMBOL
+03B1;alpha;GREEK SMALL LETTER ALPHA
+03AC;alphatonos;GREEK SMALL LETTER ALPHA WITH TONOS
+0101;amacron;LATIN SMALL LETTER A WITH MACRON
+0026;ampersand;AMPERSAND
+2220;angle;ANGLE
+2329;angleleft;LEFT-POINTING ANGLE BRACKET
+232A;angleright;RIGHT-POINTING ANGLE BRACKET
+0387;anoteleia;GREEK ANO TELEIA
+0105;aogonek;LATIN SMALL LETTER A WITH OGONEK
+2248;approxequal;ALMOST EQUAL TO
+00E5;aring;LATIN SMALL LETTER A WITH RING ABOVE
+01FB;aringacute;LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
+2194;arrowboth;LEFT RIGHT ARROW
+21D4;arrowdblboth;LEFT RIGHT DOUBLE ARROW
+21D3;arrowdbldown;DOWNWARDS DOUBLE ARROW
+21D0;arrowdblleft;LEFTWARDS DOUBLE ARROW
+21D2;arrowdblright;RIGHTWARDS DOUBLE ARROW
+21D1;arrowdblup;UPWARDS DOUBLE ARROW
+2193;arrowdown;DOWNWARDS ARROW
+2190;arrowleft;LEFTWARDS ARROW
+2192;arrowright;RIGHTWARDS ARROW
+2191;arrowup;UPWARDS ARROW
+2195;arrowupdn;UP DOWN ARROW
+21A8;arrowupdnbse;UP DOWN ARROW WITH BASE
+005E;asciicircum;CIRCUMFLEX ACCENT
+007E;asciitilde;TILDE
+002A;asterisk;ASTERISK
+2217;asteriskmath;ASTERISK OPERATOR
+0040;at;COMMERCIAL AT
+00E3;atilde;LATIN SMALL LETTER A WITH TILDE
+0062;b;LATIN SMALL LETTER B
+005C;backslash;REVERSE SOLIDUS
+007C;bar;VERTICAL LINE
+03B2;beta;GREEK SMALL LETTER BETA
+2588;block;FULL BLOCK
+007B;braceleft;LEFT CURLY BRACKET
+007D;braceright;RIGHT CURLY BRACKET
+005B;bracketleft;LEFT SQUARE BRACKET
+005D;bracketright;RIGHT SQUARE BRACKET
+02D8;breve;BREVE
+00A6;brokenbar;BROKEN BAR
+2022;bullet;BULLET
+0063;c;LATIN SMALL LETTER C
+0107;cacute;LATIN SMALL LETTER C WITH ACUTE
+02C7;caron;CARON
+21B5;carriagereturn;DOWNWARDS ARROW WITH CORNER LEFTWARDS
+010D;ccaron;LATIN SMALL LETTER C WITH CARON
+00E7;ccedilla;LATIN SMALL LETTER C WITH CEDILLA
+0109;ccircumflex;LATIN SMALL LETTER C WITH CIRCUMFLEX
+010B;cdotaccent;LATIN SMALL LETTER C WITH DOT ABOVE
+00B8;cedilla;CEDILLA
+00A2;cent;CENT SIGN
+03C7;chi;GREEK SMALL LETTER CHI
+25CB;circle;WHITE CIRCLE
+2297;circlemultiply;CIRCLED TIMES
+2295;circleplus;CIRCLED PLUS
+02C6;circumflex;MODIFIER LETTER CIRCUMFLEX ACCENT
+2663;club;BLACK CLUB SUIT
+003A;colon;COLON
+20A1;colonmonetary;COLON SIGN
+002C;comma;COMMA
+2245;congruent;APPROXIMATELY EQUAL TO
+00A9;copyright;COPYRIGHT SIGN
+00A4;currency;CURRENCY SIGN
+0064;d;LATIN SMALL LETTER D
+2020;dagger;DAGGER
+2021;daggerdbl;DOUBLE DAGGER
+010F;dcaron;LATIN SMALL LETTER D WITH CARON
+0111;dcroat;LATIN SMALL LETTER D WITH STROKE
+00B0;degree;DEGREE SIGN
+03B4;delta;GREEK SMALL LETTER DELTA
+2666;diamond;BLACK DIAMOND SUIT
+00A8;dieresis;DIAERESIS
+0385;dieresistonos;GREEK DIALYTIKA TONOS
+00F7;divide;DIVISION SIGN
+2593;dkshade;DARK SHADE
+2584;dnblock;LOWER HALF BLOCK
+0024;dollar;DOLLAR SIGN
+20AB;dong;DONG SIGN
+02D9;dotaccent;DOT ABOVE
+0323;dotbelowcomb;COMBINING DOT BELOW
+0131;dotlessi;LATIN SMALL LETTER DOTLESS I
+22C5;dotmath;DOT OPERATOR
+0065;e;LATIN SMALL LETTER E
+00E9;eacute;LATIN SMALL LETTER E WITH ACUTE
+0115;ebreve;LATIN SMALL LETTER E WITH BREVE
+011B;ecaron;LATIN SMALL LETTER E WITH CARON
+00EA;ecircumflex;LATIN SMALL LETTER E WITH CIRCUMFLEX
+00EB;edieresis;LATIN SMALL LETTER E WITH DIAERESIS
+0117;edotaccent;LATIN SMALL LETTER E WITH DOT ABOVE
+00E8;egrave;LATIN SMALL LETTER E WITH GRAVE
+0038;eight;DIGIT EIGHT
+2208;element;ELEMENT OF
+2026;ellipsis;HORIZONTAL ELLIPSIS
+0113;emacron;LATIN SMALL LETTER E WITH MACRON
+2014;emdash;EM DASH
+2205;emptyset;EMPTY SET
+2013;endash;EN DASH
+014B;eng;LATIN SMALL LETTER ENG
+0119;eogonek;LATIN SMALL LETTER E WITH OGONEK
+03B5;epsilon;GREEK SMALL LETTER EPSILON
+03AD;epsilontonos;GREEK SMALL LETTER EPSILON WITH TONOS
+003D;equal;EQUALS SIGN
+2261;equivalence;IDENTICAL TO
+212E;estimated;ESTIMATED SYMBOL
+03B7;eta;GREEK SMALL LETTER ETA
+03AE;etatonos;GREEK SMALL LETTER ETA WITH TONOS
+00F0;eth;LATIN SMALL LETTER ETH
+0021;exclam;EXCLAMATION MARK
+203C;exclamdbl;DOUBLE EXCLAMATION MARK
+00A1;exclamdown;INVERTED EXCLAMATION MARK
+2203;existential;THERE EXISTS
+0066;f;LATIN SMALL LETTER F
+2640;female;FEMALE SIGN
+2012;figuredash;FIGURE DASH
+25A0;filledbox;BLACK SQUARE
+25AC;filledrect;BLACK RECTANGLE
+0035;five;DIGIT FIVE
+215D;fiveeighths;VULGAR FRACTION FIVE EIGHTHS
+0192;florin;LATIN SMALL LETTER F WITH HOOK
+0034;four;DIGIT FOUR
+2044;fraction;FRACTION SLASH
+20A3;franc;FRENCH FRANC SIGN
+0067;g;LATIN SMALL LETTER G
+03B3;gamma;GREEK SMALL LETTER GAMMA
+011F;gbreve;LATIN SMALL LETTER G WITH BREVE
+01E7;gcaron;LATIN SMALL LETTER G WITH CARON
+011D;gcircumflex;LATIN SMALL LETTER G WITH CIRCUMFLEX
+0121;gdotaccent;LATIN SMALL LETTER G WITH DOT ABOVE
+00DF;germandbls;LATIN SMALL LETTER SHARP S
+2207;gradient;NABLA
+0060;grave;GRAVE ACCENT
+0300;gravecomb;COMBINING GRAVE ACCENT
+003E;greater;GREATER-THAN SIGN
+2265;greaterequal;GREATER-THAN OR EQUAL TO
+00AB;guillemotleft;LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
+00BB;guillemotright;RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
+2039;guilsinglleft;SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+203A;guilsinglright;SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+0068;h;LATIN SMALL LETTER H
+0127;hbar;LATIN SMALL LETTER H WITH STROKE
+0125;hcircumflex;LATIN SMALL LETTER H WITH CIRCUMFLEX
+2665;heart;BLACK HEART SUIT
+0309;hookabovecomb;COMBINING HOOK ABOVE
+2302;house;HOUSE
+02DD;hungarumlaut;DOUBLE ACUTE ACCENT
+002D;hyphen;HYPHEN-MINUS
+0069;i;LATIN SMALL LETTER I
+00ED;iacute;LATIN SMALL LETTER I WITH ACUTE
+012D;ibreve;LATIN SMALL LETTER I WITH BREVE
+00EE;icircumflex;LATIN SMALL LETTER I WITH CIRCUMFLEX
+00EF;idieresis;LATIN SMALL LETTER I WITH DIAERESIS
+00EC;igrave;LATIN SMALL LETTER I WITH GRAVE
+0133;ij;LATIN SMALL LIGATURE IJ
+012B;imacron;LATIN SMALL LETTER I WITH MACRON
+221E;infinity;INFINITY
+222B;integral;INTEGRAL
+2321;integralbt;BOTTOM HALF INTEGRAL
+2320;integraltp;TOP HALF INTEGRAL
+2229;intersection;INTERSECTION
+25D8;invbullet;INVERSE BULLET
+25D9;invcircle;INVERSE WHITE CIRCLE
+263B;invsmileface;BLACK SMILING FACE
+012F;iogonek;LATIN SMALL LETTER I WITH OGONEK
+03B9;iota;GREEK SMALL LETTER IOTA
+03CA;iotadieresis;GREEK SMALL LETTER IOTA WITH DIALYTIKA
+0390;iotadieresistonos;GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
+03AF;iotatonos;GREEK SMALL LETTER IOTA WITH TONOS
+0129;itilde;LATIN SMALL LETTER I WITH TILDE
+006A;j;LATIN SMALL LETTER J
+0135;jcircumflex;LATIN SMALL LETTER J WITH CIRCUMFLEX
+006B;k;LATIN SMALL LETTER K
+03BA;kappa;GREEK SMALL LETTER KAPPA
+0138;kgreenlandic;LATIN SMALL LETTER KRA
+006C;l;LATIN SMALL LETTER L
+013A;lacute;LATIN SMALL LETTER L WITH ACUTE
+03BB;lambda;GREEK SMALL LETTER LAMDA
+013E;lcaron;LATIN SMALL LETTER L WITH CARON
+0140;ldot;LATIN SMALL LETTER L WITH MIDDLE DOT
+003C;less;LESS-THAN SIGN
+2264;lessequal;LESS-THAN OR EQUAL TO
+258C;lfblock;LEFT HALF BLOCK
+20A4;lira;LIRA SIGN
+2227;logicaland;LOGICAL AND
+00AC;logicalnot;NOT SIGN
+2228;logicalor;LOGICAL OR
+017F;longs;LATIN SMALL LETTER LONG S
+25CA;lozenge;LOZENGE
+0142;lslash;LATIN SMALL LETTER L WITH STROKE
+2591;ltshade;LIGHT SHADE
+006D;m;LATIN SMALL LETTER M
+00AF;macron;MACRON
+2642;male;MALE SIGN
+2212;minus;MINUS SIGN
+2032;minute;PRIME
+00B5;mu;MICRO SIGN
+00D7;multiply;MULTIPLICATION SIGN
+266A;musicalnote;EIGHTH NOTE
+266B;musicalnotedbl;BEAMED EIGHTH NOTES
+006E;n;LATIN SMALL LETTER N
+0144;nacute;LATIN SMALL LETTER N WITH ACUTE
+0149;napostrophe;LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
+0148;ncaron;LATIN SMALL LETTER N WITH CARON
+0039;nine;DIGIT NINE
+2209;notelement;NOT AN ELEMENT OF
+2260;notequal;NOT EQUAL TO
+2284;notsubset;NOT A SUBSET OF
+00F1;ntilde;LATIN SMALL LETTER N WITH TILDE
+03BD;nu;GREEK SMALL LETTER NU
+0023;numbersign;NUMBER SIGN
+006F;o;LATIN SMALL LETTER O
+00F3;oacute;LATIN SMALL LETTER O WITH ACUTE
+014F;obreve;LATIN SMALL LETTER O WITH BREVE
+00F4;ocircumflex;LATIN SMALL LETTER O WITH CIRCUMFLEX
+00F6;odieresis;LATIN SMALL LETTER O WITH DIAERESIS
+0153;oe;LATIN SMALL LIGATURE OE
+02DB;ogonek;OGONEK
+00F2;ograve;LATIN SMALL LETTER O WITH GRAVE
+01A1;ohorn;LATIN SMALL LETTER O WITH HORN
+0151;ohungarumlaut;LATIN SMALL LETTER O WITH DOUBLE ACUTE
+014D;omacron;LATIN SMALL LETTER O WITH MACRON
+03C9;omega;GREEK SMALL LETTER OMEGA
+03D6;omega1;GREEK PI SYMBOL
+03CE;omegatonos;GREEK SMALL LETTER OMEGA WITH TONOS
+03BF;omicron;GREEK SMALL LETTER OMICRON
+03CC;omicrontonos;GREEK SMALL LETTER OMICRON WITH TONOS
+0031;one;DIGIT ONE
+2024;onedotenleader;ONE DOT LEADER
+215B;oneeighth;VULGAR FRACTION ONE EIGHTH
+00BD;onehalf;VULGAR FRACTION ONE HALF
+00BC;onequarter;VULGAR FRACTION ONE QUARTER
+2153;onethird;VULGAR FRACTION ONE THIRD
+25E6;openbullet;WHITE BULLET
+00AA;ordfeminine;FEMININE ORDINAL INDICATOR
+00BA;ordmasculine;MASCULINE ORDINAL INDICATOR
+221F;orthogonal;RIGHT ANGLE
+00F8;oslash;LATIN SMALL LETTER O WITH STROKE
+01FF;oslashacute;LATIN SMALL LETTER O WITH STROKE AND ACUTE
+00F5;otilde;LATIN SMALL LETTER O WITH TILDE
+0070;p;LATIN SMALL LETTER P
+00B6;paragraph;PILCROW SIGN
+0028;parenleft;LEFT PARENTHESIS
+0029;parenright;RIGHT PARENTHESIS
+2202;partialdiff;PARTIAL DIFFERENTIAL
+0025;percent;PERCENT SIGN
+002E;period;FULL STOP
+00B7;periodcentered;MIDDLE DOT
+22A5;perpendicular;UP TACK
+2030;perthousand;PER MILLE SIGN
+20A7;peseta;PESETA SIGN
+03C6;phi;GREEK SMALL LETTER PHI
+03D5;phi1;GREEK PHI SYMBOL
+03C0;pi;GREEK SMALL LETTER PI
+002B;plus;PLUS SIGN
+00B1;plusminus;PLUS-MINUS SIGN
+211E;prescription;PRESCRIPTION TAKE
+220F;product;N-ARY PRODUCT
+2282;propersubset;SUBSET OF
+2283;propersuperset;SUPERSET OF
+221D;proportional;PROPORTIONAL TO
+03C8;psi;GREEK SMALL LETTER PSI
+0071;q;LATIN SMALL LETTER Q
+003F;question;QUESTION MARK
+00BF;questiondown;INVERTED QUESTION MARK
+0022;quotedbl;QUOTATION MARK
+201E;quotedblbase;DOUBLE LOW-9 QUOTATION MARK
+201C;quotedblleft;LEFT DOUBLE QUOTATION MARK
+201D;quotedblright;RIGHT DOUBLE QUOTATION MARK
+2018;quoteleft;LEFT SINGLE QUOTATION MARK
+201B;quotereversed;SINGLE HIGH-REVERSED-9 QUOTATION MARK
+2019;quoteright;RIGHT SINGLE QUOTATION MARK
+201A;quotesinglbase;SINGLE LOW-9 QUOTATION MARK
+0027;quotesingle;APOSTROPHE
+0072;r;LATIN SMALL LETTER R
+0155;racute;LATIN SMALL LETTER R WITH ACUTE
+221A;radical;SQUARE ROOT
+0159;rcaron;LATIN SMALL LETTER R WITH CARON
+2286;reflexsubset;SUBSET OF OR EQUAL TO
+2287;reflexsuperset;SUPERSET OF OR EQUAL TO
+00AE;registered;REGISTERED SIGN
+2310;revlogicalnot;REVERSED NOT SIGN
+03C1;rho;GREEK SMALL LETTER RHO
+02DA;ring;RING ABOVE
+2590;rtblock;RIGHT HALF BLOCK
+0073;s;LATIN SMALL LETTER S
+015B;sacute;LATIN SMALL LETTER S WITH ACUTE
+0161;scaron;LATIN SMALL LETTER S WITH CARON
+015F;scedilla;LATIN SMALL LETTER S WITH CEDILLA
+015D;scircumflex;LATIN SMALL LETTER S WITH CIRCUMFLEX
+2033;second;DOUBLE PRIME
+00A7;section;SECTION SIGN
+003B;semicolon;SEMICOLON
+0037;seven;DIGIT SEVEN
+215E;seveneighths;VULGAR FRACTION SEVEN EIGHTHS
+2592;shade;MEDIUM SHADE
+03C3;sigma;GREEK SMALL LETTER SIGMA
+03C2;sigma1;GREEK SMALL LETTER FINAL SIGMA
+223C;similar;TILDE OPERATOR
+0036;six;DIGIT SIX
+002F;slash;SOLIDUS
+263A;smileface;WHITE SMILING FACE
+0020;space;SPACE
+2660;spade;BLACK SPADE SUIT
+00A3;sterling;POUND SIGN
+220B;suchthat;CONTAINS AS MEMBER
+2211;summation;N-ARY SUMMATION
+263C;sun;WHITE SUN WITH RAYS
+0074;t;LATIN SMALL LETTER T
+03C4;tau;GREEK SMALL LETTER TAU
+0167;tbar;LATIN SMALL LETTER T WITH STROKE
+0165;tcaron;LATIN SMALL LETTER T WITH CARON
+2234;therefore;THEREFORE
+03B8;theta;GREEK SMALL LETTER THETA
+03D1;theta1;GREEK THETA SYMBOL
+00FE;thorn;LATIN SMALL LETTER THORN
+0033;three;DIGIT THREE
+215C;threeeighths;VULGAR FRACTION THREE EIGHTHS
+00BE;threequarters;VULGAR FRACTION THREE QUARTERS
+02DC;tilde;SMALL TILDE
+0303;tildecomb;COMBINING TILDE
+0384;tonos;GREEK TONOS
+2122;trademark;TRADE MARK SIGN
+25BC;triagdn;BLACK DOWN-POINTING TRIANGLE
+25C4;triaglf;BLACK LEFT-POINTING POINTER
+25BA;triagrt;BLACK RIGHT-POINTING POINTER
+25B2;triagup;BLACK UP-POINTING TRIANGLE
+0032;two;DIGIT TWO
+2025;twodotenleader;TWO DOT LEADER
+2154;twothirds;VULGAR FRACTION TWO THIRDS
+0075;u;LATIN SMALL LETTER U
+00FA;uacute;LATIN SMALL LETTER U WITH ACUTE
+016D;ubreve;LATIN SMALL LETTER U WITH BREVE
+00FB;ucircumflex;LATIN SMALL LETTER U WITH CIRCUMFLEX
+00FC;udieresis;LATIN SMALL LETTER U WITH DIAERESIS
+00F9;ugrave;LATIN SMALL LETTER U WITH GRAVE
+01B0;uhorn;LATIN SMALL LETTER U WITH HORN
+0171;uhungarumlaut;LATIN SMALL LETTER U WITH DOUBLE ACUTE
+016B;umacron;LATIN SMALL LETTER U WITH MACRON
+005F;underscore;LOW LINE
+2017;underscoredbl;DOUBLE LOW LINE
+222A;union;UNION
+2200;universal;FOR ALL
+0173;uogonek;LATIN SMALL LETTER U WITH OGONEK
+2580;upblock;UPPER HALF BLOCK
+03C5;upsilon;GREEK SMALL LETTER UPSILON
+03CB;upsilondieresis;GREEK SMALL LETTER UPSILON WITH DIALYTIKA
+03B0;upsilondieresistonos;GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
+03CD;upsilontonos;GREEK SMALL LETTER UPSILON WITH TONOS
+016F;uring;LATIN SMALL LETTER U WITH RING ABOVE
+0169;utilde;LATIN SMALL LETTER U WITH TILDE
+0076;v;LATIN SMALL LETTER V
+0077;w;LATIN SMALL LETTER W
+1E83;wacute;LATIN SMALL LETTER W WITH ACUTE
+0175;wcircumflex;LATIN SMALL LETTER W WITH CIRCUMFLEX
+1E85;wdieresis;LATIN SMALL LETTER W WITH DIAERESIS
+2118;weierstrass;SCRIPT CAPITAL P
+1E81;wgrave;LATIN SMALL LETTER W WITH GRAVE
+0078;x;LATIN SMALL LETTER X
+03BE;xi;GREEK SMALL LETTER XI
+0079;y;LATIN SMALL LETTER Y
+00FD;yacute;LATIN SMALL LETTER Y WITH ACUTE
+0177;ycircumflex;LATIN SMALL LETTER Y WITH CIRCUMFLEX
+00FF;ydieresis;LATIN SMALL LETTER Y WITH DIAERESIS
+00A5;yen;YEN SIGN
+1EF3;ygrave;LATIN SMALL LETTER Y WITH GRAVE
+007A;z;LATIN SMALL LETTER Z
+017A;zacute;LATIN SMALL LETTER Z WITH ACUTE
+017E;zcaron;LATIN SMALL LETTER Z WITH CARON
+017C;zdotaccent;LATIN SMALL LETTER Z WITH DOT ABOVE
+0030;zero;DIGIT ZERO
+03B6;zeta;GREEK SMALL LETTER ZETA
+#END
+"""
+
+
+AGLError = "AGLError"
+
+AGL2UV = {}
+UV2AGL = {}
+
+def _builddicts():
+	import re
+	
+	lines = _aglText.splitlines()
+	
+	parseAGL_RE = re.compile("([0-9A-F]{4});([A-Za-z_0-9.]+);.*?$")
+	
+	for line in lines:
+		if not line or line[:1] == '#':
+			continue
+		m = parseAGL_RE.match(line)
+		if not m:
+			raise AGLError("syntax error in glyphlist.txt: %s" % repr(line[:20]))
+		unicode = m.group(1)
+		assert len(unicode) == 4
+		unicode = int(unicode, 16)
+		glyphName = m.group(2)
+		if glyphName in AGL2UV:
+			# the above table contains identical duplicates
+			assert AGL2UV[glyphName] == unicode
+		else:
+			AGL2UV[glyphName] = unicode
+		UV2AGL[unicode] = glyphName
+	
+_builddicts()
diff --git a/Lib/fontTools/cffLib.py b/Lib/fontTools/cffLib.py
new file mode 100644
index 0000000..e371521
--- /dev/null
+++ b/Lib/fontTools/cffLib.py
@@ -0,0 +1,1815 @@
+"""cffLib.py -- read/write tools for Adobe CFF fonts."""
+
+#
+# $Id: cffLib.py,v 1.34 2008-03-07 19:56:17 jvr Exp $
+#
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc import psCharStrings
+from fontTools.misc.textTools import safeEval
+import struct
+
+DEBUG = 0
+
+
+cffHeaderFormat = """
+	major:   B
+	minor:   B
+	hdrSize: B
+	offSize: B
+"""
+
+class CFFFontSet(object):
+	
+	def __init__(self):
+		pass
+	
+	def decompile(self, file, otFont):
+		sstruct.unpack(cffHeaderFormat, file.read(4), self)
+		assert self.major == 1 and self.minor == 0, \
+				"unknown CFF format: %d.%d" % (self.major, self.minor)
+		
+		file.seek(self.hdrSize)
+		self.fontNames = list(Index(file))
+		self.topDictIndex = TopDictIndex(file)
+		self.strings = IndexedStrings(file)
+		self.GlobalSubrs = GlobalSubrsIndex(file)
+		self.topDictIndex.strings = self.strings
+		self.topDictIndex.GlobalSubrs = self.GlobalSubrs
+	
+	def __len__(self):
+		return len(self.fontNames)
+	
+	def keys(self):
+		return list(self.fontNames)
+	
+	def values(self):
+		return self.topDictIndex
+	
+	def __getitem__(self, name):
+		try:
+			index = self.fontNames.index(name)
+		except ValueError:
+			raise KeyError(name)
+		return self.topDictIndex[index]
+	
+	def compile(self, file, otFont):
+		strings = IndexedStrings()
+		writer = CFFWriter()
+		writer.add(sstruct.pack(cffHeaderFormat, self))
+		fontNames = Index()
+		for name in self.fontNames:
+			fontNames.append(name)
+		writer.add(fontNames.getCompiler(strings, None))
+		topCompiler = self.topDictIndex.getCompiler(strings, None)
+		writer.add(topCompiler)
+		writer.add(strings.getCompiler())
+		writer.add(self.GlobalSubrs.getCompiler(strings, None))
+		
+		for topDict in self.topDictIndex:
+			if not hasattr(topDict, "charset") or topDict.charset is None:
+				charset = otFont.getGlyphOrder()
+				topDict.charset = charset
+		
+		for child in topCompiler.getChildren(strings):
+			writer.add(child)
+		
+		writer.toFile(file)
+	
+	def toXML(self, xmlWriter, progress=None):
+		for fontName in self.fontNames:
+			xmlWriter.begintag("CFFFont", name=tostr(fontName))
+			xmlWriter.newline()
+			font = self[fontName]
+			font.toXML(xmlWriter, progress)
+			xmlWriter.endtag("CFFFont")
+			xmlWriter.newline()
+		xmlWriter.newline()
+		xmlWriter.begintag("GlobalSubrs")
+		xmlWriter.newline()
+		self.GlobalSubrs.toXML(xmlWriter, progress)
+		xmlWriter.endtag("GlobalSubrs")
+		xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content):
+		if not hasattr(self, "GlobalSubrs"):
+			self.GlobalSubrs = GlobalSubrsIndex()
+			self.major = 1
+			self.minor = 0
+			self.hdrSize = 4
+			self.offSize = 4  # XXX ??
+		if name == "CFFFont":
+			if not hasattr(self, "fontNames"):
+				self.fontNames = []
+				self.topDictIndex = TopDictIndex()
+			fontName = attrs["name"]
+			topDict = TopDict(GlobalSubrs=self.GlobalSubrs)
+			topDict.charset = None  # gets filled in later
+			self.fontNames.append(fontName)
+			self.topDictIndex.append(topDict)
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				name, attrs, content = element
+				topDict.fromXML(name, attrs, content)
+		elif name == "GlobalSubrs":
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				name, attrs, content = element
+				subr = psCharStrings.T2CharString()
+				subr.fromXML(name, attrs, content)
+				self.GlobalSubrs.append(subr)
+
+
+class CFFWriter(object):
+	
+	def __init__(self):
+		self.data = []
+	
+	def add(self, table):
+		self.data.append(table)
+	
+	def toFile(self, file):
+		lastPosList = None
+		count = 1
+		while True:
+			if DEBUG:
+				print("CFFWriter.toFile() iteration:", count)
+			count = count + 1
+			pos = 0
+			posList = [pos]
+			for item in self.data:
+				if hasattr(item, "getDataLength"):
+					endPos = pos + item.getDataLength()
+				else:
+					endPos = pos + len(item)
+				if hasattr(item, "setPos"):
+					item.setPos(pos, endPos)
+				pos = endPos
+				posList.append(pos)
+			if posList == lastPosList:
+				break
+			lastPosList = posList
+		if DEBUG:
+			print("CFFWriter.toFile() writing to file.")
+		begin = file.tell()
+		posList = [0]
+		for item in self.data:
+			if hasattr(item, "toFile"):
+				item.toFile(file)
+			else:
+				file.write(item)
+			posList.append(file.tell() - begin)
+		assert posList == lastPosList
+
+
+def calcOffSize(largestOffset):
+	if largestOffset < 0x100:
+		offSize = 1
+	elif largestOffset < 0x10000:
+		offSize = 2
+	elif largestOffset < 0x1000000:
+		offSize = 3
+	else:
+		offSize = 4
+	return offSize
+
+
+class IndexCompiler(object):
+	
+	def __init__(self, items, strings, parent):
+		self.items = self.getItems(items, strings)
+		self.parent = parent
+	
+	def getItems(self, items, strings):
+		return items
+	
+	def getOffsets(self):
+		pos = 1
+		offsets = [pos]
+		for item in self.items:
+			if hasattr(item, "getDataLength"):
+				pos = pos + item.getDataLength()
+			else:
+				pos = pos + len(item)
+			offsets.append(pos)
+		return offsets
+	
+	def getDataLength(self):
+		lastOffset = self.getOffsets()[-1]
+		offSize = calcOffSize(lastOffset)
+		dataLength = (
+			2 +                                # count
+			1 +                                # offSize
+			(len(self.items) + 1) * offSize +  # the offsets
+			lastOffset - 1                     # size of object data
+		)
+		return dataLength
+	
+	def toFile(self, file):
+		offsets = self.getOffsets()
+		writeCard16(file, len(self.items))
+		offSize = calcOffSize(offsets[-1])
+		writeCard8(file, offSize)
+		offSize = -offSize
+		pack = struct.pack
+		for offset in offsets:
+			binOffset = pack(">l", offset)[offSize:]
+			assert len(binOffset) == -offSize
+			file.write(binOffset)
+		for item in self.items:
+			if hasattr(item, "toFile"):
+				item.toFile(file)
+			else:
+				file.write(tobytes(item, encoding="latin1"))
+
+
+class IndexedStringsCompiler(IndexCompiler):
+	
+	def getItems(self, items, strings):
+		return items.strings
+
+
+class TopDictIndexCompiler(IndexCompiler):
+	
+	def getItems(self, items, strings):
+		out = []
+		for item in items:
+			out.append(item.getCompiler(strings, self))
+		return out
+	
+	def getChildren(self, strings):
+		children = []
+		for topDict in self.items:
+			children.extend(topDict.getChildren(strings))
+		return children
+
+
+class FDArrayIndexCompiler(IndexCompiler):
+	
+	def getItems(self, items, strings):
+		out = []
+		for item in items:
+			out.append(item.getCompiler(strings, self))
+		return out
+	
+	def getChildren(self, strings):
+		children = []
+		for fontDict in self.items:
+			children.extend(fontDict.getChildren(strings))
+		return children
+
+	def toFile(self, file):
+		offsets = self.getOffsets()
+		writeCard16(file, len(self.items))
+		offSize = calcOffSize(offsets[-1])
+		writeCard8(file, offSize)
+		offSize = -offSize
+		pack = struct.pack
+		for offset in offsets:
+			binOffset = pack(">l", offset)[offSize:]
+			assert len(binOffset) == -offSize
+			file.write(binOffset)
+		for item in self.items:
+			if hasattr(item, "toFile"):
+				item.toFile(file)
+			else:
+				file.write(item)
+
+	def setPos(self, pos, endPos):
+		self.parent.rawDict["FDArray"] = pos
+
+
+class GlobalSubrsCompiler(IndexCompiler):
+	def getItems(self, items, strings):
+		out = []
+		for cs in items:
+			cs.compile()
+			out.append(cs.bytecode)
+		return out
+
+class SubrsCompiler(GlobalSubrsCompiler):
+	def setPos(self, pos, endPos):
+		offset = pos - self.parent.pos
+		self.parent.rawDict["Subrs"] = offset
+
+class CharStringsCompiler(GlobalSubrsCompiler):
+	def setPos(self, pos, endPos):
+		self.parent.rawDict["CharStrings"] = pos
+
+
+class Index(object):
+	
+	"""This class represents what the CFF spec calls an INDEX."""
+	
+	compilerClass = IndexCompiler
+	
+	def __init__(self, file=None):
+		name = self.__class__.__name__
+		if file is None:
+			self.items = []
+			return
+		if DEBUG:
+			print("loading %s at %s" % (name, file.tell()))
+		self.file = file
+		count = readCard16(file)
+		self.count = count
+		self.items = [None] * count
+		if count == 0:
+			self.items = []
+			return
+		offSize = readCard8(file)
+		if DEBUG:
+			print("    index count: %s offSize: %s" % (count, offSize))
+		assert offSize <= 4, "offSize too large: %s" % offSize
+		self.offsets = offsets = []
+		pad = b'\0' * (4 - offSize)
+		for index in range(count+1):
+			chunk = file.read(offSize)
+			chunk = pad + chunk
+			offset, = struct.unpack(">L", chunk)
+			offsets.append(int(offset))
+		self.offsetBase = file.tell() - 1
+		file.seek(self.offsetBase + offsets[-1])  # pretend we've read the whole lot
+		if DEBUG:
+			print("    end of %s at %s" % (name, file.tell()))
+	
+	def __len__(self):
+		return len(self.items)
+	
+	def __getitem__(self, index):
+		item = self.items[index]
+		if item is not None:
+			return item
+		offset = self.offsets[index] + self.offsetBase
+		size = self.offsets[index+1] - self.offsets[index]
+		file = self.file
+		file.seek(offset)
+		data = file.read(size)
+		assert len(data) == size
+		item = self.produceItem(index, data, file, offset, size)
+		self.items[index] = item
+		return item
+	
+	def produceItem(self, index, data, file, offset, size):
+		return data
+	
+	def append(self, item):
+		self.items.append(item)
+	
+	def getCompiler(self, strings, parent):
+		return self.compilerClass(self, strings, parent)
+
+
+class GlobalSubrsIndex(Index):
+	
+	compilerClass = GlobalSubrsCompiler
+	
+	def __init__(self, file=None, globalSubrs=None, private=None, fdSelect=None, fdArray=None):
+		Index.__init__(self, file)
+		self.globalSubrs = globalSubrs
+		self.private = private
+		if fdSelect:
+			self.fdSelect = fdSelect
+		if fdArray:
+			self.fdArray = fdArray
+	
+	def produceItem(self, index, data, file, offset, size):
+		if self.private is not None:
+			private = self.private
+		elif hasattr(self, 'fdArray') and self.fdArray is not None:
+			private = self.fdArray[self.fdSelect[index]].Private
+		else:
+			private = None
+		return psCharStrings.T2CharString(data, private=private, globalSubrs=self.globalSubrs)
+	
+	def toXML(self, xmlWriter, progress):
+		xmlWriter.comment("The 'index' attribute is only for humans; it is ignored when parsed.")
+		xmlWriter.newline()
+		for i in range(len(self)):
+			subr = self[i]
+			if subr.needsDecompilation():
+				xmlWriter.begintag("CharString", index=i, raw=1)
+			else:
+				xmlWriter.begintag("CharString", index=i)
+			xmlWriter.newline()
+			subr.toXML(xmlWriter)
+			xmlWriter.endtag("CharString")
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content):
+		if name != "CharString":
+			return
+		subr = psCharStrings.T2CharString()
+		subr.fromXML(name, attrs, content)
+		self.append(subr)
+	
+	def getItemAndSelector(self, index):
+		sel = None
+		if hasattr(self, 'fdSelect'):
+			sel = self.fdSelect[index]
+		return self[index], sel
+
+
+class SubrsIndex(GlobalSubrsIndex):
+	compilerClass = SubrsCompiler
+
+
+class TopDictIndex(Index):
+	
+	compilerClass = TopDictIndexCompiler
+	
+	def produceItem(self, index, data, file, offset, size):
+		top = TopDict(self.strings, file, offset, self.GlobalSubrs)
+		top.decompile(data)
+		return top
+	
+	def toXML(self, xmlWriter, progress):
+		for i in range(len(self)):
+			xmlWriter.begintag("FontDict", index=i)
+			xmlWriter.newline()
+			self[i].toXML(xmlWriter, progress)
+			xmlWriter.endtag("FontDict")
+			xmlWriter.newline()
+
+
+class FDArrayIndex(TopDictIndex):
+	
+	compilerClass = FDArrayIndexCompiler
+
+	def fromXML(self, name, attrs, content):
+		if name != "FontDict":
+			return
+		fontDict = FontDict()
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			fontDict.fromXML(name, attrs, content)
+		self.append(fontDict)
+
+
+class	FDSelect:
+	def __init__(self, file = None, numGlyphs = None, format=None):
+		if file:
+			# read data in from file
+			self.format = readCard8(file)
+			if self.format == 0:
+				from array import array
+				self.gidArray = array("B", file.read(numGlyphs)).tolist()
+			elif self.format == 3:
+				gidArray = [None] * numGlyphs
+				nRanges = readCard16(file)
+				prev = None
+				for i in range(nRanges):
+					first = readCard16(file)
+					if prev is not None:
+						for glyphID in range(prev, first):
+							gidArray[glyphID] = fd
+					prev = first
+					fd = readCard8(file)
+				if prev is not None:
+					first = readCard16(file)
+					for glyphID in range(prev, first):
+						gidArray[glyphID] = fd
+				self.gidArray = gidArray
+			else:
+				assert False, "unsupported FDSelect format: %s" % format
+		else:
+			# reading from XML. Make empty gidArray,, and leave format as passed in.
+			# format is None will result in the smallest representation being used.
+			self.format = format
+			self.gidArray = []
+
+
+	def __len__(self):
+		return len(self.gidArray)
+	
+	def __getitem__(self, index):
+		return self.gidArray[index]
+	
+	def __setitem__(self, index, fdSelectValue):
+		self.gidArray[index] = fdSelectValue
+
+	def append(self, fdSelectValue):
+		self.gidArray.append(fdSelectValue)
+	
+
+class CharStrings(object):
+	
+	def __init__(self, file, charset, globalSubrs, private, fdSelect, fdArray):
+		if file is not None:
+			self.charStringsIndex = SubrsIndex(file, globalSubrs, private, fdSelect, fdArray)
+			self.charStrings = charStrings = {}
+			for i in range(len(charset)):
+				charStrings[charset[i]] = i
+			self.charStringsAreIndexed = 1
+		else:
+			self.charStrings = {}
+			self.charStringsAreIndexed = 0
+			self.globalSubrs = globalSubrs
+			self.private = private
+			if fdSelect is not None:
+				self.fdSelect = fdSelect
+			if fdArray is not None:
+				self.fdArray = fdArray
+	
+	def keys(self):
+		return list(self.charStrings.keys())
+	
+	def values(self):
+		if self.charStringsAreIndexed:
+			return self.charStringsIndex
+		else:
+			return list(self.charStrings.values())
+	
+	def has_key(self, name):
+		return name in self.charStrings
+	
+	def __len__(self):
+		return len(self.charStrings)
+	
+	def __getitem__(self, name):
+		charString = self.charStrings[name]
+		if self.charStringsAreIndexed:
+			charString = self.charStringsIndex[charString]
+		return charString
+	
+	def __setitem__(self, name, charString):
+		if self.charStringsAreIndexed:
+			index = self.charStrings[name]
+			self.charStringsIndex[index] = charString
+		else:
+			self.charStrings[name] = charString
+	
+	def getItemAndSelector(self, name):
+		if self.charStringsAreIndexed:
+			index = self.charStrings[name]
+			return self.charStringsIndex.getItemAndSelector(index)
+		else:
+			if hasattr(self, 'fdSelect'):
+				sel = self.fdSelect[index]  # index is not defined at this point. Read R. ?
+			else:
+				raise KeyError("fdSelect array not yet defined.")
+			return self.charStrings[name], sel
+	
+	def toXML(self, xmlWriter, progress):
+		names = sorted(self.keys())
+		i = 0
+		step = 10
+		numGlyphs = len(names)
+		for name in names:
+			charStr, fdSelectIndex = self.getItemAndSelector(name)
+			if charStr.needsDecompilation():
+				raw = [("raw", 1)]
+			else:
+				raw = []
+			if fdSelectIndex is None:
+				xmlWriter.begintag("CharString", [('name', name)] + raw)
+			else:
+				xmlWriter.begintag("CharString",
+						[('name', name), ('fdSelectIndex', fdSelectIndex)] + raw)
+			xmlWriter.newline()
+			charStr.toXML(xmlWriter)
+			xmlWriter.endtag("CharString")
+			xmlWriter.newline()
+			if not i % step and progress is not None:
+				progress.setLabel("Dumping 'CFF ' table... (%s)" % name)
+				progress.increment(step / numGlyphs)
+			i = i + 1
+	
+	def fromXML(self, name, attrs, content):
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			if name != "CharString":
+				continue
+			fdID = -1
+			if hasattr(self, "fdArray"):
+				fdID = safeEval(attrs["fdSelectIndex"])
+				private = self.fdArray[fdID].Private
+			else:
+				private = self.private
+				
+			glyphName = attrs["name"]
+			charString = psCharStrings.T2CharString(
+					private=private,
+					globalSubrs=self.globalSubrs)
+			charString.fromXML(name, attrs, content)
+			if fdID >= 0:
+				charString.fdSelectIndex = fdID
+			self[glyphName] = charString
+
+
+def readCard8(file):
+	return byteord(file.read(1))
+
+def readCard16(file):
+	value, = struct.unpack(">H", file.read(2))
+	return value
+
+def writeCard8(file, value):
+	file.write(bytechr(value))
+
+def writeCard16(file, value):
+	file.write(struct.pack(">H", value))
+
+def packCard8(value):
+	return bytechr(value)
+
+def packCard16(value):
+	return struct.pack(">H", value)
+
+def buildOperatorDict(table):
+	d = {}
+	for op, name, arg, default, conv in table:
+		d[op] = (name, arg)
+	return d
+
+def buildOpcodeDict(table):
+	d = {}
+	for op, name, arg, default, conv in table:
+		if isinstance(op, tuple):
+			op = bytechr(op[0]) + bytechr(op[1])
+		else:
+			op = bytechr(op)
+		d[name] = (op, arg)
+	return d
+
+def buildOrder(table):
+	l = []
+	for op, name, arg, default, conv in table:
+		l.append(name)
+	return l
+
+def buildDefaults(table):
+	d = {}
+	for op, name, arg, default, conv in table:
+		if default is not None:
+			d[name] = default
+	return d
+
+def buildConverters(table):
+	d = {}
+	for op, name, arg, default, conv in table:
+		d[name] = conv
+	return d
+
+
+class SimpleConverter(object):
+	def read(self, parent, value):
+		return value
+	def write(self, parent, value):
+		return value
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		xmlWriter.simpletag(name, value=value)
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		return attrs["value"]
+
+class ASCIIConverter(SimpleConverter):
+	def read(self, parent, value):
+		return tostr(value, encoding='ascii')
+	def write(self, parent, value):
+		return tobytes(value, encoding='ascii')
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		xmlWriter.simpletag(name, value=tostr(value, encoding="ascii"))
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		return tobytes(attrs["value"], encoding=("ascii"))
+
+class Latin1Converter(SimpleConverter):
+	def read(self, parent, value):
+		return tostr(value, encoding='latin1')
+	def write(self, parent, value):
+		return tobytes(value, encoding='latin1')
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		xmlWriter.simpletag(name, value=tostr(value, encoding="latin1"))
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		return tobytes(attrs["value"], encoding=("latin1"))
+
+
+def parseNum(s):
+	try:
+		value = int(s)
+	except:
+		value = float(s)
+	return value
+
+class NumberConverter(SimpleConverter):
+	def xmlRead(self, name, attrs, content, parent):
+		return parseNum(attrs["value"])
+
+class ArrayConverter(SimpleConverter):
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		value = " ".join(map(str, value))
+		xmlWriter.simpletag(name, value=value)
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		values = attrs["value"].split()
+		return [parseNum(value) for value in values]
+
+class TableConverter(SimpleConverter):
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		xmlWriter.begintag(name)
+		xmlWriter.newline()
+		value.toXML(xmlWriter, progress)
+		xmlWriter.endtag(name)
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		ob = self.getClass()()
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			ob.fromXML(name, attrs, content)
+		return ob
+
+class PrivateDictConverter(TableConverter):
+	def getClass(self):
+		return PrivateDict
+	def read(self, parent, value):
+		size, offset = value
+		file = parent.file
+		priv = PrivateDict(parent.strings, file, offset)
+		file.seek(offset)
+		data = file.read(size)
+		assert len(data) == size
+		priv.decompile(data)
+		return priv
+	def write(self, parent, value):
+		return (0, 0)  # dummy value
+
+class SubrsConverter(TableConverter):
+	def getClass(self):
+		return SubrsIndex
+	def read(self, parent, value):
+		file = parent.file
+		file.seek(parent.offset + value)  # Offset(self)
+		return SubrsIndex(file)
+	def write(self, parent, value):
+		return 0  # dummy value
+
+class CharStringsConverter(TableConverter):
+	def read(self, parent, value):
+		file = parent.file
+		charset = parent.charset
+		globalSubrs = parent.GlobalSubrs
+		if hasattr(parent, "ROS"):
+			fdSelect, fdArray = parent.FDSelect, parent.FDArray
+			private = None
+		else:
+			fdSelect, fdArray = None, None
+			private = parent.Private
+		file.seek(value)  # Offset(0)
+		return CharStrings(file, charset, globalSubrs, private, fdSelect, fdArray)
+	def write(self, parent, value):
+		return 0  # dummy value
+	def xmlRead(self, name, attrs, content, parent):
+		if hasattr(parent, "ROS"):
+			# if it is a CID-keyed font, then the private Dict is extracted from the parent.FDArray 
+			private, fdSelect, fdArray = None, parent.FDSelect, parent.FDArray
+		else:
+			# if it is a name-keyed font, then the private dict is in the top dict, and there is no fdArray. 
+			private, fdSelect, fdArray = parent.Private, None, None
+		charStrings = CharStrings(None, None, parent.GlobalSubrs, private, fdSelect, fdArray)
+		charStrings.fromXML(name, attrs, content)
+		return charStrings
+
+class CharsetConverter(object):
+	def read(self, parent, value):
+		isCID = hasattr(parent, "ROS")
+		if value > 2:
+			numGlyphs = parent.numGlyphs
+			file = parent.file
+			file.seek(value)
+			if DEBUG:
+				print("loading charset at %s" % value)
+			format = readCard8(file)
+			if format == 0:
+				charset = parseCharset0(numGlyphs, file, parent.strings, isCID)
+			elif format == 1 or format == 2:
+				charset = parseCharset(numGlyphs, file, parent.strings, isCID, format)
+			else:
+				raise NotImplementedError
+			assert len(charset) == numGlyphs
+			if DEBUG:
+				print("    charset end at %s" % file.tell())
+		else: # offset == 0 -> no charset data.
+			if isCID or "CharStrings" not in parent.rawDict: 
+				assert value == 0 # We get here only when processing fontDicts from the FDArray of CFF-CID fonts. Only the real topDict references the chrset.
+				charset = None
+			elif value == 0:
+				charset = cffISOAdobeStrings
+			elif value == 1:
+				charset = cffIExpertStrings
+			elif value == 2:
+				charset = cffExpertSubsetStrings
+		return charset
+
+	def write(self, parent, value):
+		return 0  # dummy value
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		# XXX only write charset when not in OT/TTX context, where we
+		# dump charset as a separate "GlyphOrder" table.
+		##xmlWriter.simpletag("charset")
+		xmlWriter.comment("charset is dumped separately as the 'GlyphOrder' element")
+		xmlWriter.newline()
+	def xmlRead(self, name, attrs, content, parent):
+		if 0:
+			return safeEval(attrs["value"])
+
+
+class CharsetCompiler(object):
+	
+	def __init__(self, strings, charset, parent):
+		assert charset[0] == '.notdef'
+		isCID = hasattr(parent.dictObj, "ROS")
+		data0 = packCharset0(charset, isCID, strings)
+		data = packCharset(charset, isCID, strings)
+		if len(data) < len(data0):
+			self.data = data
+		else:
+			self.data = data0
+		self.parent = parent
+	
+	def setPos(self, pos, endPos):
+		self.parent.rawDict["charset"] = pos
+	
+	def getDataLength(self):
+		return len(self.data)
+	
+	def toFile(self, file):
+		file.write(self.data)
+
+
+def getCIDfromName(name, strings):
+	return int(name[3:])
+
+def getSIDfromName(name, strings):
+	return strings.getSID(name)
+
+def packCharset0(charset, isCID, strings):
+	fmt = 0
+	data = [packCard8(fmt)]
+	if isCID:
+		getNameID = getCIDfromName
+	else:
+		getNameID = getSIDfromName
+
+	for name in charset[1:]:
+		data.append(packCard16(getNameID(name,strings)))
+	return bytesjoin(data)
+
+
+def packCharset(charset, isCID, strings):
+	fmt = 1
+	ranges = []
+	first = None
+	end = 0
+	if isCID:
+		getNameID = getCIDfromName
+	else:
+		getNameID = getSIDfromName
+	
+	for name in charset[1:]:
+		SID = getNameID(name, strings)
+		if first is None:
+			first = SID
+		elif end + 1 != SID:
+			nLeft = end - first
+			if nLeft > 255:
+				fmt = 2
+			ranges.append((first, nLeft))
+			first = SID
+		end = SID
+	nLeft = end - first
+	if nLeft > 255:
+		fmt = 2
+	ranges.append((first, nLeft))
+	
+	data = [packCard8(fmt)]
+	if fmt == 1:
+		nLeftFunc = packCard8
+	else:
+		nLeftFunc = packCard16
+	for first, nLeft in ranges:
+		data.append(packCard16(first) + nLeftFunc(nLeft))
+	return bytesjoin(data)
+
+def parseCharset0(numGlyphs, file, strings, isCID):
+	charset = [".notdef"]
+	if isCID:
+		for i in range(numGlyphs - 1):
+			CID = readCard16(file)
+			charset.append("cid" + str(CID).zfill(5))
+	else:
+		for i in range(numGlyphs - 1):
+			SID = readCard16(file)
+			charset.append(strings[SID])
+	return charset
+
+def parseCharset(numGlyphs, file, strings, isCID, fmt):
+	charset = ['.notdef']
+	count = 1
+	if fmt == 1:
+		nLeftFunc = readCard8
+	else:
+		nLeftFunc = readCard16
+	while count < numGlyphs:
+		first = readCard16(file)
+		nLeft = nLeftFunc(file)
+		if isCID:
+			for CID in range(first, first+nLeft+1):
+				charset.append("cid" + str(CID).zfill(5))
+		else:
+			for SID in range(first, first+nLeft+1):
+				charset.append(strings[SID])
+		count = count + nLeft + 1
+	return charset
+
+
+class EncodingCompiler(object):
+
+	def __init__(self, strings, encoding, parent):
+		assert not isinstance(encoding, basestring)
+		data0 = packEncoding0(parent.dictObj.charset, encoding, parent.strings)
+		data1 = packEncoding1(parent.dictObj.charset, encoding, parent.strings)
+		if len(data0) < len(data1):
+			self.data = data0
+		else:
+			self.data = data1
+		self.parent = parent
+
+	def setPos(self, pos, endPos):
+		self.parent.rawDict["Encoding"] = pos
+	
+	def getDataLength(self):
+		return len(self.data)
+	
+	def toFile(self, file):
+		file.write(self.data)
+
+
+class EncodingConverter(SimpleConverter):
+
+	def read(self, parent, value):
+		if value == 0:
+			return "StandardEncoding"
+		elif value == 1:
+			return "ExpertEncoding"
+		else:
+			assert value > 1
+			file = parent.file
+			file.seek(value)
+			if DEBUG:
+				print("loading Encoding at %s" % value)
+			fmt = readCard8(file)
+			haveSupplement = fmt & 0x80
+			if haveSupplement:
+				raise NotImplementedError("Encoding supplements are not yet supported")
+			fmt = fmt & 0x7f
+			if fmt == 0:
+				encoding = parseEncoding0(parent.charset, file, haveSupplement,
+						parent.strings)
+			elif fmt == 1:
+				encoding = parseEncoding1(parent.charset, file, haveSupplement,
+						parent.strings)
+			return encoding
+
+	def write(self, parent, value):
+		if value == "StandardEncoding":
+			return 0
+		elif value == "ExpertEncoding":
+			return 1
+		return 0  # dummy value
+
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		if value in ("StandardEncoding", "ExpertEncoding"):
+			xmlWriter.simpletag(name, name=value)
+			xmlWriter.newline()
+			return
+		xmlWriter.begintag(name)
+		xmlWriter.newline()
+		for code in range(len(value)):
+			glyphName = value[code]
+			if glyphName != ".notdef":
+				xmlWriter.simpletag("map", code=hex(code), name=glyphName)
+				xmlWriter.newline()
+		xmlWriter.endtag(name)
+		xmlWriter.newline()
+
+	def xmlRead(self, name, attrs, content, parent):
+		if "name" in attrs:
+			return attrs["name"]
+		encoding = [".notdef"] * 256
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			code = safeEval(attrs["code"])
+			glyphName = attrs["name"]
+			encoding[code] = glyphName
+		return encoding
+
+
+def parseEncoding0(charset, file, haveSupplement, strings):
+	nCodes = readCard8(file)
+	encoding = [".notdef"] * 256
+	for glyphID in range(1, nCodes + 1):
+		code = readCard8(file)
+		if code != 0:
+			encoding[code] = charset[glyphID]
+	return encoding
+
+def parseEncoding1(charset, file, haveSupplement, strings):
+	nRanges = readCard8(file)
+	encoding = [".notdef"] * 256
+	glyphID = 1
+	for i in range(nRanges):
+		code = readCard8(file)
+		nLeft = readCard8(file)
+		for glyphID in range(glyphID, glyphID + nLeft + 1):
+			encoding[code] = charset[glyphID]
+			code = code + 1
+		glyphID = glyphID + 1
+	return encoding
+
+def packEncoding0(charset, encoding, strings):
+	fmt = 0
+	m = {}
+	for code in range(len(encoding)):
+		name = encoding[code]
+		if name != ".notdef":
+			m[name] = code
+	codes = []
+	for name in charset[1:]:
+		code = m.get(name)
+		codes.append(code)
+	
+	while codes and codes[-1] is None:
+		codes.pop()
+
+	data = [packCard8(fmt), packCard8(len(codes))]
+	for code in codes:
+		if code is None:
+			code = 0
+		data.append(packCard8(code))
+	return bytesjoin(data)
+
+def packEncoding1(charset, encoding, strings):
+	fmt = 1
+	m = {}
+	for code in range(len(encoding)):
+		name = encoding[code]
+		if name != ".notdef":
+			m[name] = code
+	ranges = []
+	first = None
+	end = 0
+	for name in charset[1:]:
+		code = m.get(name, -1)
+		if first is None:
+			first = code
+		elif end + 1 != code:
+			nLeft = end - first
+			ranges.append((first, nLeft))
+			first = code
+		end = code
+	nLeft = end - first
+	ranges.append((first, nLeft))
+	
+	# remove unencoded glyphs at the end.
+	while ranges and ranges[-1][0] == -1:
+		ranges.pop()
+
+	data = [packCard8(fmt), packCard8(len(ranges))]
+	for first, nLeft in ranges:
+		if first == -1:  # unencoded
+			first = 0
+		data.append(packCard8(first) + packCard8(nLeft))
+	return bytesjoin(data)
+
+
+class FDArrayConverter(TableConverter):
+
+	def read(self, parent, value):
+		file = parent.file
+		file.seek(value)
+		fdArray = FDArrayIndex(file)
+		fdArray.strings = parent.strings
+		fdArray.GlobalSubrs = parent.GlobalSubrs
+		return fdArray
+
+	def write(self, parent, value):
+		return 0  # dummy value
+
+	def xmlRead(self, name, attrs, content, parent):
+		fdArray = FDArrayIndex()
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			fdArray.fromXML(name, attrs, content)
+		return fdArray
+
+
+class FDSelectConverter(object):
+
+	def read(self, parent, value):
+		file = parent.file
+		file.seek(value)
+		fdSelect = FDSelect(file, parent.numGlyphs)
+		return 	fdSelect
+
+	def write(self, parent, value):
+		return 0  # dummy value
+
+	# The FDSelect glyph data is written out to XML in the charstring keys,
+	# so we write out only the format selector
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		xmlWriter.simpletag(name, [('format', value.format)])
+		xmlWriter.newline()
+
+	def xmlRead(self, name, attrs, content, parent):
+		fmt = safeEval(attrs["format"])
+		file = None
+		numGlyphs = None
+		fdSelect = FDSelect(file, numGlyphs, fmt)
+		return fdSelect
+		
+
+def packFDSelect0(fdSelectArray):
+	fmt = 0
+	data = [packCard8(fmt)]
+	for index in fdSelectArray:
+		data.append(packCard8(index))
+	return bytesjoin(data)
+
+
+def packFDSelect3(fdSelectArray):
+	fmt = 3
+	fdRanges = []
+	first = None
+	end = 0
+	lenArray = len(fdSelectArray)
+	lastFDIndex = -1
+	for i in range(lenArray):
+		fdIndex = fdSelectArray[i]
+		if lastFDIndex != fdIndex:
+			fdRanges.append([i, fdIndex])
+			lastFDIndex = fdIndex
+	sentinelGID = i + 1
+		
+	data = [packCard8(fmt)]
+	data.append(packCard16( len(fdRanges) ))
+	for fdRange in fdRanges:
+		data.append(packCard16(fdRange[0]))
+		data.append(packCard8(fdRange[1]))
+	data.append(packCard16(sentinelGID))
+	return bytesjoin(data)
+
+
+class FDSelectCompiler(object):
+	
+	def __init__(self, fdSelect, parent):
+		fmt = fdSelect.format
+		fdSelectArray = fdSelect.gidArray
+		if fmt == 0:
+			self.data = packFDSelect0(fdSelectArray)
+		elif fmt == 3:
+			self.data = packFDSelect3(fdSelectArray)
+		else:
+			# choose smaller of the two formats
+			data0 = packFDSelect0(fdSelectArray)
+			data3 = packFDSelect3(fdSelectArray)
+			if len(data0) < len(data3):
+				self.data = data0
+				fdSelect.format = 0
+			else:
+				self.data = data3
+				fdSelect.format = 3
+
+		self.parent = parent
+	
+	def setPos(self, pos, endPos):
+		self.parent.rawDict["FDSelect"] = pos
+	
+	def getDataLength(self):
+		return len(self.data)
+	
+	def toFile(self, file):
+		file.write(self.data)
+
+
+class ROSConverter(SimpleConverter):
+
+	def xmlWrite(self, xmlWriter, name, value, progress):
+		registry, order, supplement = value
+		xmlWriter.simpletag(name, [('Registry', tostr(registry)), ('Order', tostr(order)),
+			('Supplement', supplement)])
+		xmlWriter.newline()
+
+	def xmlRead(self, name, attrs, content, parent):
+		return (attrs['Registry'], attrs['Order'], safeEval(attrs['Supplement']))
+
+
+
+topDictOperators = [
+#	opcode     name                  argument type   default    converter
+	((12, 30), 'ROS',        ('SID','SID','number'), None,      ROSConverter()),
+	((12, 20), 'SyntheticBase',      'number',       None,      None),
+	(0,        'version',            'SID',          None,      None),
+	(1,        'Notice',             'SID',          None,      Latin1Converter()),
+	((12, 0),  'Copyright',          'SID',          None,      Latin1Converter()),
+	(2,        'FullName',           'SID',          None,      None),
+	((12, 38), 'FontName',           'SID',          None,      None),
+	(3,        'FamilyName',         'SID',          None,      None),
+	(4,        'Weight',             'SID',          None,      None),
+	((12, 1),  'isFixedPitch',       'number',       0,         None),
+	((12, 2),  'ItalicAngle',        'number',       0,         None),
+	((12, 3),  'UnderlinePosition',  'number',       None,      None),
+	((12, 4),  'UnderlineThickness', 'number',       50,        None),
+	((12, 5),  'PaintType',          'number',       0,         None),
+	((12, 6),  'CharstringType',     'number',       2,         None),
+	((12, 7),  'FontMatrix',         'array',  [0.001,0,0,0.001,0,0],  None),
+	(13,       'UniqueID',           'number',       None,      None),
+	(5,        'FontBBox',           'array',  [0,0,0,0],       None),
+	((12, 8),  'StrokeWidth',        'number',       0,         None),
+	(14,       'XUID',               'array',        None,      None),
+	((12, 21), 'PostScript',         'SID',          None,      None),
+	((12, 22), 'BaseFontName',       'SID',          None,      None),
+	((12, 23), 'BaseFontBlend',      'delta',        None,      None),
+	((12, 31), 'CIDFontVersion',     'number',       0,         None),
+	((12, 32), 'CIDFontRevision',    'number',       0,         None),
+	((12, 33), 'CIDFontType',        'number',       0,         None),
+	((12, 34), 'CIDCount',           'number',       8720,      None),
+	(15,       'charset',            'number',       0,         CharsetConverter()),
+	((12, 35), 'UIDBase',            'number',       None,      None),
+	(16,       'Encoding',           'number',       0,         EncodingConverter()),
+	(18,       'Private',       ('number','number'), None,      PrivateDictConverter()),
+	((12, 37), 'FDSelect',           'number',       None,      FDSelectConverter()),
+	((12, 36), 'FDArray',            'number',       None,      FDArrayConverter()),
+	(17,       'CharStrings',        'number',       None,      CharStringsConverter()),
+]
+
+# Note! FDSelect and FDArray must both preceed CharStrings in the output XML build order,
+# in order for the font to compile back from xml.
+
+
+privateDictOperators = [
+#	opcode     name                  argument type   default    converter
+	(6,        'BlueValues',         'delta',        None,      None),
+	(7,        'OtherBlues',         'delta',        None,      None),
+	(8,        'FamilyBlues',        'delta',        None,      None),
+	(9,        'FamilyOtherBlues',   'delta',        None,      None),
+	((12, 9),  'BlueScale',          'number',       0.039625,  None),
+	((12, 10), 'BlueShift',          'number',       7,         None),
+	((12, 11), 'BlueFuzz',           'number',       1,         None),
+	(10,       'StdHW',              'number',       None,      None),
+	(11,       'StdVW',              'number',       None,      None),
+	((12, 12), 'StemSnapH',          'delta',        None,      None),
+	((12, 13), 'StemSnapV',          'delta',        None,      None),
+	((12, 14), 'ForceBold',          'number',       0,         None),
+	((12, 15), 'ForceBoldThreshold', 'number',       None,      None),  # deprecated
+	((12, 16), 'lenIV',              'number',       None,      None),  # deprecated
+	((12, 17), 'LanguageGroup',      'number',       0,         None),
+	((12, 18), 'ExpansionFactor',    'number',       0.06,      None),
+	((12, 19), 'initialRandomSeed',  'number',       0,         None),
+	(20,       'defaultWidthX',      'number',       0,         None),
+	(21,       'nominalWidthX',      'number',       0,         None),
+	(19,       'Subrs',              'number',       None,      SubrsConverter()),
+]
+
+def addConverters(table):
+	for i in range(len(table)):
+		op, name, arg, default, conv = table[i]
+		if conv is not None:
+			continue
+		if arg in ("delta", "array"):
+			conv = ArrayConverter()
+		elif arg == "number":
+			conv = NumberConverter()
+		elif arg == "SID":
+			conv = ASCIIConverter()
+		else:
+			assert False
+		table[i] = op, name, arg, default, conv
+
+addConverters(privateDictOperators)
+addConverters(topDictOperators)
+
+
+class TopDictDecompiler(psCharStrings.DictDecompiler):
+	operators = buildOperatorDict(topDictOperators)
+
+
+class PrivateDictDecompiler(psCharStrings.DictDecompiler):
+	operators = buildOperatorDict(privateDictOperators)
+
+
+class DictCompiler(object):
+	
+	def __init__(self, dictObj, strings, parent):
+		assert isinstance(strings, IndexedStrings)
+		self.dictObj = dictObj
+		self.strings = strings
+		self.parent = parent
+		rawDict = {}
+		for name in dictObj.order:
+			value = getattr(dictObj, name, None)
+			if value is None:
+				continue
+			conv = dictObj.converters[name]
+			value = conv.write(dictObj, value)
+			if value == dictObj.defaults.get(name):
+				continue
+			rawDict[name] = value
+		self.rawDict = rawDict
+	
+	def setPos(self, pos, endPos):
+		pass
+	
+	def getDataLength(self):
+		return len(self.compile("getDataLength"))
+	
+	def compile(self, reason):
+		if DEBUG:
+			print("-- compiling %s for %s" % (self.__class__.__name__, reason))
+			print("in baseDict: ", self)
+		rawDict = self.rawDict
+		data = []
+		for name in self.dictObj.order:
+			value = rawDict.get(name)
+			if value is None:
+				continue
+			op, argType = self.opcodes[name]
+			if isinstance(argType, tuple):
+				l = len(argType)
+				assert len(value) == l, "value doesn't match arg type"
+				for i in range(l):
+					arg = argType[i]
+					v = value[i]
+					arghandler = getattr(self, "arg_" + arg)
+					data.append(arghandler(v))
+			else:
+				arghandler = getattr(self, "arg_" + argType)
+				data.append(arghandler(value))
+			data.append(op)
+		return bytesjoin(data)
+	
+	def toFile(self, file):
+		file.write(self.compile("toFile"))
+	
+	def arg_number(self, num):
+		return encodeNumber(num)
+	def arg_SID(self, s):
+		return psCharStrings.encodeIntCFF(self.strings.getSID(s))
+	def arg_array(self, value):
+		data = []
+		for num in value:
+			data.append(encodeNumber(num))
+		return bytesjoin(data)
+	def arg_delta(self, value):
+		out = []
+		last = 0
+		for v in value:
+			out.append(v - last)
+			last = v
+		data = []
+		for num in out:
+			data.append(encodeNumber(num))
+		return bytesjoin(data)
+
+
+def encodeNumber(num):
+	if isinstance(num, float):
+		return psCharStrings.encodeFloat(num)
+	else:
+		return psCharStrings.encodeIntCFF(num)
+
+
+class TopDictCompiler(DictCompiler):
+	
+	opcodes = buildOpcodeDict(topDictOperators)
+	
+	def getChildren(self, strings):
+		children = []
+		if hasattr(self.dictObj, "charset") and self.dictObj.charset:
+			children.append(CharsetCompiler(strings, self.dictObj.charset, self))
+		if hasattr(self.dictObj, "Encoding"):
+			encoding = self.dictObj.Encoding
+			if not isinstance(encoding, basestring):
+				children.append(EncodingCompiler(strings, encoding, self))
+		if hasattr(self.dictObj, "FDSelect"):
+			# I have not yet supported merging a ttx CFF-CID font, as there are interesting
+			# issues about merging the FDArrays. Here I assume that
+			# either the font was read from XML, and teh FDSelect indices are all
+			# in the charstring data, or the FDSelect array is already fully defined.
+			fdSelect = self.dictObj.FDSelect
+			if len(fdSelect) == 0: # probably read in from XML; assume fdIndex in CharString data
+				charStrings = self.dictObj.CharStrings
+				for name in self.dictObj.charset:
+					fdSelect.append(charStrings[name].fdSelectIndex)
+			fdSelectComp = FDSelectCompiler(fdSelect, self)
+			children.append(fdSelectComp)
+		if hasattr(self.dictObj, "CharStrings"):
+			items = []
+			charStrings = self.dictObj.CharStrings
+			for name in self.dictObj.charset:
+				items.append(charStrings[name])
+			charStringsComp = CharStringsCompiler(items, strings, self)
+			children.append(charStringsComp)
+		if hasattr(self.dictObj, "FDArray"):
+			# I have not yet supported merging a ttx CFF-CID font, as there are interesting
+			# issues about merging the FDArrays. Here I assume that the FDArray info is correct
+			# and complete.
+			fdArrayIndexComp = self.dictObj.FDArray.getCompiler(strings, self)
+			children.append(fdArrayIndexComp)
+			children.extend(fdArrayIndexComp.getChildren(strings))
+		if hasattr(self.dictObj, "Private"):
+			privComp = self.dictObj.Private.getCompiler(strings, self)
+			children.append(privComp)
+			children.extend(privComp.getChildren(strings))
+		return children
+
+
+class FontDictCompiler(DictCompiler):
+	
+	opcodes = buildOpcodeDict(topDictOperators)
+	
+	def getChildren(self, strings):
+		children = []
+		if hasattr(self.dictObj, "Private"):
+			privComp = self.dictObj.Private.getCompiler(strings, self)
+			children.append(privComp)
+			children.extend(privComp.getChildren(strings))
+		return children
+
+
+class PrivateDictCompiler(DictCompiler):
+	
+	opcodes = buildOpcodeDict(privateDictOperators)
+	
+	def setPos(self, pos, endPos):
+		size = endPos - pos
+		self.parent.rawDict["Private"] = size, pos
+		self.pos = pos
+	
+	def getChildren(self, strings):
+		children = []
+		if hasattr(self.dictObj, "Subrs"):
+			children.append(self.dictObj.Subrs.getCompiler(strings, self))
+		return children
+
+
+class BaseDict(object):
+	
+	def __init__(self, strings=None, file=None, offset=None):
+		self.rawDict = {}
+		if DEBUG:
+			print("loading %s at %s" % (self.__class__.__name__, offset))
+		self.file = file
+		self.offset = offset
+		self.strings = strings
+		self.skipNames = []
+	
+	def decompile(self, data):
+		if DEBUG:
+			print("    length %s is %s" % (self.__class__.__name__, len(data)))
+		dec = self.decompilerClass(self.strings)
+		dec.decompile(data)
+		self.rawDict = dec.getDict()
+		self.postDecompile()
+	
+	def postDecompile(self):
+		pass
+	
+	def getCompiler(self, strings, parent):
+		return self.compilerClass(self, strings, parent)
+	
+	def __getattr__(self, name):
+		value = self.rawDict.get(name)
+		if value is None:
+			value = self.defaults.get(name)
+		if value is None:
+			raise AttributeError(name)
+		conv = self.converters[name]
+		value = conv.read(self, value)
+		setattr(self, name, value)
+		return value
+	
+	def toXML(self, xmlWriter, progress):
+		for name in self.order:
+			if name in self.skipNames:
+				continue
+			value = getattr(self, name, None)
+			if value is None:
+				continue
+			conv = self.converters[name]
+			conv.xmlWrite(xmlWriter, name, value, progress)
+	
+	def fromXML(self, name, attrs, content):
+		conv = self.converters[name]
+		value = conv.xmlRead(name, attrs, content, self)
+		setattr(self, name, value)
+
+
+class TopDict(BaseDict):
+	
+	defaults = buildDefaults(topDictOperators)
+	converters = buildConverters(topDictOperators)
+	order = buildOrder(topDictOperators)
+	decompilerClass = TopDictDecompiler
+	compilerClass = TopDictCompiler
+	
+	def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None):
+		BaseDict.__init__(self, strings, file, offset)
+		self.GlobalSubrs = GlobalSubrs
+	
+	def getGlyphOrder(self):
+		return self.charset
+	
+	def postDecompile(self):
+		offset = self.rawDict.get("CharStrings")
+		if offset is None:
+			return
+		# get the number of glyphs beforehand.
+		self.file.seek(offset)
+		self.numGlyphs = readCard16(self.file)
+	
+	def toXML(self, xmlWriter, progress):
+		if hasattr(self, "CharStrings"):
+			self.decompileAllCharStrings(progress)
+		if hasattr(self, "ROS"):
+			self.skipNames = ['Encoding']
+		if not hasattr(self, "ROS") or not hasattr(self, "CharStrings"):
+			# these values have default values, but I only want them to show up
+			# in CID fonts.
+			self.skipNames = ['CIDFontVersion', 'CIDFontRevision', 'CIDFontType',
+					'CIDCount']
+		BaseDict.toXML(self, xmlWriter, progress)
+	
+	def decompileAllCharStrings(self, progress):
+		# XXX only when doing ttdump -i?
+		i = 0
+		for charString in self.CharStrings.values():
+			try:
+				charString.decompile()
+			except:
+				print("Error in charstring ", i)
+				import sys
+				typ, value = sys.exc_info()[0:2]
+				raise typ(value)
+			if not i % 30 and progress:
+				progress.increment(0)  # update
+			i = i + 1
+
+
+class FontDict(BaseDict):
+	
+	defaults = buildDefaults(topDictOperators)
+	converters = buildConverters(topDictOperators)
+	order = buildOrder(topDictOperators)
+	decompilerClass = None
+	compilerClass = FontDictCompiler
+	
+	def __init__(self, strings=None, file=None, offset=None, GlobalSubrs=None):
+		BaseDict.__init__(self, strings, file, offset)
+		self.GlobalSubrs = GlobalSubrs
+	
+	def getGlyphOrder(self):
+		return self.charset
+	
+	def toXML(self, xmlWriter, progress):
+		self.skipNames = ['Encoding']
+		BaseDict.toXML(self, xmlWriter, progress)
+	
+
+
+class PrivateDict(BaseDict):
+	defaults = buildDefaults(privateDictOperators)
+	converters = buildConverters(privateDictOperators)
+	order = buildOrder(privateDictOperators)
+	decompilerClass = PrivateDictDecompiler
+	compilerClass = PrivateDictCompiler
+
+
+class IndexedStrings(object):
+	
+	"""SID -> string mapping."""
+	
+	def __init__(self, file=None):
+		if file is None:
+			strings = []
+		else:
+			strings = [tostr(s, encoding="latin1") for s in Index(file)]
+		self.strings = strings
+	
+	def getCompiler(self):
+		return IndexedStringsCompiler(self, None, None)
+	
+	def __len__(self):
+		return len(self.strings)
+	
+	def __getitem__(self, SID):
+		if SID < cffStandardStringCount:
+			return cffStandardStrings[SID]
+		else:
+			return self.strings[SID - cffStandardStringCount]
+	
+	def getSID(self, s):
+		if not hasattr(self, "stringMapping"):
+			self.buildStringMapping()
+		if s in cffStandardStringMapping:
+			SID = cffStandardStringMapping[s]
+		elif s in self.stringMapping:
+			SID = self.stringMapping[s]
+		else:
+			SID = len(self.strings) + cffStandardStringCount
+			self.strings.append(s)
+			self.stringMapping[s] = SID
+		return SID
+	
+	def getStrings(self):
+		return self.strings
+	
+	def buildStringMapping(self):
+		self.stringMapping = {}
+		for index in range(len(self.strings)):
+			self.stringMapping[self.strings[index]] = index + cffStandardStringCount
+
+
+# The 391 Standard Strings as used in the CFF format.
+# from Adobe Technical None #5176, version 1.0, 18 March 1998
+
+cffStandardStrings = ['.notdef', 'space', 'exclam', 'quotedbl', 'numbersign', 
+		'dollar', 'percent', 'ampersand', 'quoteright', 'parenleft', 'parenright', 
+		'asterisk', 'plus', 'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 
+		'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'colon', 
+		'semicolon', 'less', 'equal', 'greater', 'question', 'at', 'A', 'B', 'C', 
+		'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 
+		'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash', 
+		'bracketright', 'asciicircum', 'underscore', 'quoteleft', 'a', 'b', 'c', 
+		'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 
+		's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 'braceright', 
+		'asciitilde', 'exclamdown', 'cent', 'sterling', 'fraction', 'yen', 'florin', 
+		'section', 'currency', 'quotesingle', 'quotedblleft', 'guillemotleft', 
+		'guilsinglleft', 'guilsinglright', 'fi', 'fl', 'endash', 'dagger', 
+		'daggerdbl', 'periodcentered', 'paragraph', 'bullet', 'quotesinglbase', 
+		'quotedblbase', 'quotedblright', 'guillemotright', 'ellipsis', 'perthousand', 
+		'questiondown', 'grave', 'acute', 'circumflex', 'tilde', 'macron', 'breve', 
+		'dotaccent', 'dieresis', 'ring', 'cedilla', 'hungarumlaut', 'ogonek', 'caron', 
+		'emdash', 'AE', 'ordfeminine', 'Lslash', 'Oslash', 'OE', 'ordmasculine', 'ae', 
+		'dotlessi', 'lslash', 'oslash', 'oe', 'germandbls', 'onesuperior', 
+		'logicalnot', 'mu', 'trademark', 'Eth', 'onehalf', 'plusminus', 'Thorn', 
+		'onequarter', 'divide', 'brokenbar', 'degree', 'thorn', 'threequarters', 
+		'twosuperior', 'registered', 'minus', 'eth', 'multiply', 'threesuperior', 
+		'copyright', 'Aacute', 'Acircumflex', 'Adieresis', 'Agrave', 'Aring', 
+		'Atilde', 'Ccedilla', 'Eacute', 'Ecircumflex', 'Edieresis', 'Egrave', 
+		'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Ntilde', 'Oacute', 
+		'Ocircumflex', 'Odieresis', 'Ograve', 'Otilde', 'Scaron', 'Uacute', 
+		'Ucircumflex', 'Udieresis', 'Ugrave', 'Yacute', 'Ydieresis', 'Zcaron', 
+		'aacute', 'acircumflex', 'adieresis', 'agrave', 'aring', 'atilde', 'ccedilla', 
+		'eacute', 'ecircumflex', 'edieresis', 'egrave', 'iacute', 'icircumflex', 
+		'idieresis', 'igrave', 'ntilde', 'oacute', 'ocircumflex', 'odieresis', 
+		'ograve', 'otilde', 'scaron', 'uacute', 'ucircumflex', 'udieresis', 'ugrave', 
+		'yacute', 'ydieresis', 'zcaron', 'exclamsmall', 'Hungarumlautsmall', 
+		'dollaroldstyle', 'dollarsuperior', 'ampersandsmall', 'Acutesmall', 
+		'parenleftsuperior', 'parenrightsuperior', 'twodotenleader', 'onedotenleader', 
+		'zerooldstyle', 'oneoldstyle', 'twooldstyle', 'threeoldstyle', 'fouroldstyle', 
+		'fiveoldstyle', 'sixoldstyle', 'sevenoldstyle', 'eightoldstyle', 
+		'nineoldstyle', 'commasuperior', 'threequartersemdash', 'periodsuperior', 
+		'questionsmall', 'asuperior', 'bsuperior', 'centsuperior', 'dsuperior', 
+		'esuperior', 'isuperior', 'lsuperior', 'msuperior', 'nsuperior', 'osuperior', 
+		'rsuperior', 'ssuperior', 'tsuperior', 'ff', 'ffi', 'ffl', 'parenleftinferior', 
+		'parenrightinferior', 'Circumflexsmall', 'hyphensuperior', 'Gravesmall', 
+		'Asmall', 'Bsmall', 'Csmall', 'Dsmall', 'Esmall', 'Fsmall', 'Gsmall', 'Hsmall', 
+		'Ismall', 'Jsmall', 'Ksmall', 'Lsmall', 'Msmall', 'Nsmall', 'Osmall', 'Psmall', 
+		'Qsmall', 'Rsmall', 'Ssmall', 'Tsmall', 'Usmall', 'Vsmall', 'Wsmall', 'Xsmall', 
+		'Ysmall', 'Zsmall', 'colonmonetary', 'onefitted', 'rupiah', 'Tildesmall', 
+		'exclamdownsmall', 'centoldstyle', 'Lslashsmall', 'Scaronsmall', 'Zcaronsmall', 
+		'Dieresissmall', 'Brevesmall', 'Caronsmall', 'Dotaccentsmall', 'Macronsmall', 
+		'figuredash', 'hypheninferior', 'Ogoneksmall', 'Ringsmall', 'Cedillasmall', 
+		'questiondownsmall', 'oneeighth', 'threeeighths', 'fiveeighths', 'seveneighths', 
+		'onethird', 'twothirds', 'zerosuperior', 'foursuperior', 'fivesuperior', 
+		'sixsuperior', 'sevensuperior', 'eightsuperior', 'ninesuperior', 'zeroinferior', 
+		'oneinferior', 'twoinferior', 'threeinferior', 'fourinferior', 'fiveinferior', 
+		'sixinferior', 'seveninferior', 'eightinferior', 'nineinferior', 'centinferior', 
+		'dollarinferior', 'periodinferior', 'commainferior', 'Agravesmall', 
+		'Aacutesmall', 'Acircumflexsmall', 'Atildesmall', 'Adieresissmall', 'Aringsmall', 
+		'AEsmall', 'Ccedillasmall', 'Egravesmall', 'Eacutesmall', 'Ecircumflexsmall', 
+		'Edieresissmall', 'Igravesmall', 'Iacutesmall', 'Icircumflexsmall', 
+		'Idieresissmall', 'Ethsmall', 'Ntildesmall', 'Ogravesmall', 'Oacutesmall', 
+		'Ocircumflexsmall', 'Otildesmall', 'Odieresissmall', 'OEsmall', 'Oslashsmall', 
+		'Ugravesmall', 'Uacutesmall', 'Ucircumflexsmall', 'Udieresissmall', 
+		'Yacutesmall', 'Thornsmall', 'Ydieresissmall', '001.000', '001.001', '001.002', 
+		'001.003', 'Black', 'Bold', 'Book', 'Light', 'Medium', 'Regular', 'Roman', 
+		'Semibold'
+]
+
+cffStandardStringCount = 391
+assert len(cffStandardStrings) == cffStandardStringCount
+# build reverse mapping
+cffStandardStringMapping = {}
+for _i in range(cffStandardStringCount):
+	cffStandardStringMapping[cffStandardStrings[_i]] = _i
+
+cffISOAdobeStrings = [".notdef", "space", "exclam", "quotedbl", "numbersign",
+"dollar", "percent", "ampersand", "quoteright", "parenleft", "parenright",
+"asterisk", "plus", "comma", "hyphen", "period", "slash", "zero", "one", "two",
+"three", "four", "five", "six", "seven", "eight", "nine", "colon", "semicolon",
+"less", "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G",
+"H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W",
+"X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum",
+"underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j",
+"k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",
+"braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent",
+"sterling", "fraction", "yen", "florin", "section", "currency", "quotesingle",
+"quotedblleft", "guillemotleft", "guilsinglleft", "guilsinglright", "fi", "fl",
+"endash", "dagger", "daggerdbl", "periodcentered", "paragraph", "bullet",
+"quotesinglbase", "quotedblbase", "quotedblright", "guillemotright", "ellipsis",
+"perthousand", "questiondown", "grave", "acute", "circumflex", "tilde",
+"macron", "breve", "dotaccent", "dieresis", "ring", "cedilla", "hungarumlaut",
+"ogonek", "caron", "emdash", "AE", "ordfeminine", "Lslash", "Oslash", "OE",
+"ordmasculine", "ae", "dotlessi", "lslash", "oslash", "oe", "germandbls",
+"onesuperior", "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus",
+"Thorn", "onequarter", "divide", "brokenbar", "degree", "thorn",
+"threequarters", "twosuperior", "registered", "minus", "eth", "multiply",
+"threesuperior", "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave",
+"Aring", "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave",
+"Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute",
+"Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute",
+"Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", "aacute",
+"acircumflex", "adieresis", "agrave", "aring", "atilde", "ccedilla", "eacute",
+"ecircumflex", "edieresis", "egrave", "iacute", "icircumflex", "idieresis",
+"igrave", "ntilde", "oacute", "ocircumflex", "odieresis", "ograve", "otilde",
+"scaron", "uacute", "ucircumflex", "udieresis", "ugrave", "yacute", "ydieresis",
+"zcaron"]
+
+cffISOAdobeStringCount = 229
+assert len(cffISOAdobeStrings) == cffISOAdobeStringCount
+
+cffIExpertStrings = [".notdef", "space", "exclamsmall", "Hungarumlautsmall",
+"dollaroldstyle", "dollarsuperior", "ampersandsmall", "Acutesmall",
+"parenleftsuperior", "parenrightsuperior", "twodotenleader", "onedotenleader",
+"comma", "hyphen", "period", "fraction", "zerooldstyle", "oneoldstyle",
+"twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle",
+"sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon", "semicolon",
+"commasuperior", "threequartersemdash", "periodsuperior", "questionsmall",
+"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
+"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
+"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
+"parenrightinferior", "Circumflexsmall", "hyphensuperior", "Gravesmall",
+"Asmall", "Bsmall", "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall",
+"Ismall", "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall",
+"Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", "Xsmall",
+"Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", "Tildesmall",
+"exclamdownsmall", "centoldstyle", "Lslashsmall", "Scaronsmall", "Zcaronsmall",
+"Dieresissmall", "Brevesmall", "Caronsmall", "Dotaccentsmall", "Macronsmall",
+"figuredash", "hypheninferior", "Ogoneksmall", "Ringsmall", "Cedillasmall",
+"onequarter", "onehalf", "threequarters", "questiondownsmall", "oneeighth",
+"threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds",
+"zerosuperior", "onesuperior", "twosuperior", "threesuperior", "foursuperior",
+"fivesuperior", "sixsuperior", "sevensuperior", "eightsuperior", "ninesuperior",
+"zeroinferior", "oneinferior", "twoinferior", "threeinferior", "fourinferior",
+"fiveinferior", "sixinferior", "seveninferior", "eightinferior", "nineinferior",
+"centinferior", "dollarinferior", "periodinferior", "commainferior",
+"Agravesmall", "Aacutesmall", "Acircumflexsmall", "Atildesmall",
+"Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", "Egravesmall",
+"Eacutesmall", "Ecircumflexsmall", "Edieresissmall", "Igravesmall",
+"Iacutesmall", "Icircumflexsmall", "Idieresissmall", "Ethsmall", "Ntildesmall",
+"Ogravesmall", "Oacutesmall", "Ocircumflexsmall", "Otildesmall",
+"Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", "Uacutesmall",
+"Ucircumflexsmall", "Udieresissmall", "Yacutesmall", "Thornsmall",
+"Ydieresissmall"]
+
+cffExpertStringCount = 166
+assert len(cffIExpertStrings) == cffExpertStringCount
+
+cffExpertSubsetStrings = [".notdef", "space", "dollaroldstyle",
+"dollarsuperior", "parenleftsuperior", "parenrightsuperior", "twodotenleader",
+"onedotenleader", "comma", "hyphen", "period", "fraction", "zerooldstyle",
+"oneoldstyle", "twooldstyle", "threeoldstyle", "fouroldstyle", "fiveoldstyle",
+"sixoldstyle", "sevenoldstyle", "eightoldstyle", "nineoldstyle", "colon",
+"semicolon", "commasuperior", "threequartersemdash", "periodsuperior",
+"asuperior", "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior",
+"lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior",
+"tsuperior", "ff", "fi", "fl", "ffi", "ffl", "parenleftinferior",
+"parenrightinferior", "hyphensuperior", "colonmonetary", "onefitted", "rupiah",
+"centoldstyle", "figuredash", "hypheninferior", "onequarter", "onehalf",
+"threequarters", "oneeighth", "threeeighths", "fiveeighths", "seveneighths",
+"onethird", "twothirds", "zerosuperior", "onesuperior", "twosuperior",
+"threesuperior", "foursuperior", "fivesuperior", "sixsuperior", "sevensuperior",
+"eightsuperior", "ninesuperior", "zeroinferior", "oneinferior", "twoinferior",
+"threeinferior", "fourinferior", "fiveinferior", "sixinferior", "seveninferior",
+"eightinferior", "nineinferior", "centinferior", "dollarinferior",
+"periodinferior", "commainferior"]
+
+cffExpertSubsetStringCount = 87
+assert len(cffExpertSubsetStrings) == cffExpertSubsetStringCount
diff --git a/Lib/fontTools/encodings/MacRoman.py b/Lib/fontTools/encodings/MacRoman.py
new file mode 100644
index 0000000..bfeb0d5
--- /dev/null
+++ b/Lib/fontTools/encodings/MacRoman.py
@@ -0,0 +1,37 @@
+MacRoman = [
+		'NUL', 'Eth', 'eth', 'Lslash', 'lslash', 'Scaron', 'scaron', 'Yacute', 
+		'yacute', 'HT', 'LF', 'Thorn', 'thorn', 'CR', 'Zcaron', 'zcaron', 'DLE', 'DC1', 
+		'DC2', 'DC3', 'DC4', 'onehalf', 'onequarter', 'onesuperior', 'threequarters', 
+		'threesuperior', 'twosuperior', 'brokenbar', 'minus', 'multiply', 'RS', 'US', 
+		'space', 'exclam', 'quotedbl', 'numbersign', 'dollar', 'percent', 'ampersand', 
+		'quotesingle', 'parenleft', 'parenright', 'asterisk', 'plus', 'comma', 
+		'hyphen', 'period', 'slash', 'zero', 'one', 'two', 'three', 'four', 'five', 
+		'six', 'seven', 'eight', 'nine', 'colon', 'semicolon', 'less', 'equal', 
+		'greater', 'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 
+		'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 
+		'bracketleft', 'backslash', 'bracketright', 'asciicircum', 'underscore', 
+		'grave', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 
+		'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'braceleft', 'bar', 
+		'braceright', 'asciitilde', 'DEL', 'Adieresis', 'Aring', 'Ccedilla', 'Eacute', 
+		'Ntilde', 'Odieresis', 'Udieresis', 'aacute', 'agrave', 'acircumflex', 
+		'adieresis', 'atilde', 'aring', 'ccedilla', 'eacute', 'egrave', 'ecircumflex', 
+		'edieresis', 'iacute', 'igrave', 'icircumflex', 'idieresis', 'ntilde', 
+		'oacute', 'ograve', 'ocircumflex', 'odieresis', 'otilde', 'uacute', 'ugrave', 
+		'ucircumflex', 'udieresis', 'dagger', 'degree', 'cent', 'sterling', 'section', 
+		'bullet', 'paragraph', 'germandbls', 'registered', 'copyright', 'trademark', 
+		'acute', 'dieresis', 'notequal', 'AE', 'Oslash', 'infinity', 'plusminus', 
+		'lessequal', 'greaterequal', 'yen', 'mu', 'partialdiff', 'summation', 
+		'product', 'pi', 'integral', 'ordfeminine', 'ordmasculine', 'Omega', 'ae', 
+		'oslash', 'questiondown', 'exclamdown', 'logicalnot', 'radical', 'florin', 
+		'approxequal', 'Delta', 'guillemotleft', 'guillemotright', 'ellipsis', 
+		'nbspace', 'Agrave', 'Atilde', 'Otilde', 'OE', 'oe', 'endash', 'emdash', 
+		'quotedblleft', 'quotedblright', 'quoteleft', 'quoteright', 'divide', 'lozenge', 
+		'ydieresis', 'Ydieresis', 'fraction', 'currency', 'guilsinglleft', 
+		'guilsinglright', 'fi', 'fl', 'daggerdbl', 'periodcentered', 'quotesinglbase', 
+		'quotedblbase', 'perthousand', 'Acircumflex', 'Ecircumflex', 'Aacute', 
+		'Edieresis', 'Egrave', 'Iacute', 'Icircumflex', 'Idieresis', 'Igrave', 'Oacute', 
+		'Ocircumflex', 'apple', 'Ograve', 'Uacute', 'Ucircumflex', 'Ugrave', 'dotlessi', 
+		'circumflex', 'tilde', 'macron', 'breve', 'dotaccent', 'ring', 'cedilla', 
+		'hungarumlaut', 'ogonek', 'caron'
+		]
+
diff --git a/Lib/fontTools/encodings/StandardEncoding.py b/Lib/fontTools/encodings/StandardEncoding.py
new file mode 100644
index 0000000..810b2a0
--- /dev/null
+++ b/Lib/fontTools/encodings/StandardEncoding.py
@@ -0,0 +1,48 @@
+StandardEncoding = [
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', 'space', 'exclam', 'quotedbl',
+		'numbersign', 'dollar', 'percent', 'ampersand',
+		'quoteright', 'parenleft', 'parenright', 'asterisk', 'plus',
+		'comma', 'hyphen', 'period', 'slash', 'zero', 'one', 'two',
+		'three', 'four', 'five', 'six', 'seven', 'eight', 'nine',
+		'colon', 'semicolon', 'less', 'equal', 'greater',
+		'question', 'at', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
+		'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
+		'U', 'V', 'W', 'X', 'Y', 'Z', 'bracketleft', 'backslash',
+		'bracketright', 'asciicircum', 'underscore', 'quoteleft',
+		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
+		'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
+		'y', 'z', 'braceleft', 'bar', 'braceright', 'asciitilde',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', 'exclamdown',
+		'cent', 'sterling', 'fraction', 'yen', 'florin', 'section',
+		'currency', 'quotesingle', 'quotedblleft', 'guillemotleft',
+		'guilsinglleft', 'guilsinglright', 'fi', 'fl', '.notdef',
+		'endash', 'dagger', 'daggerdbl', 'periodcentered',
+		'.notdef', 'paragraph', 'bullet', 'quotesinglbase',
+		'quotedblbase', 'quotedblright', 'guillemotright',
+		'ellipsis', 'perthousand', '.notdef', 'questiondown',
+		'.notdef', 'grave', 'acute', 'circumflex', 'tilde',
+		'macron', 'breve', 'dotaccent', 'dieresis', '.notdef',
+		'ring', 'cedilla', '.notdef', 'hungarumlaut', 'ogonek',
+		'caron', 'emdash', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', '.notdef',
+		'.notdef', '.notdef', '.notdef', 'AE', '.notdef',
+		'ordfeminine', '.notdef', '.notdef', '.notdef', '.notdef',
+		'Lslash', 'Oslash', 'OE', 'ordmasculine', '.notdef',
+		'.notdef', '.notdef', '.notdef', '.notdef', 'ae', '.notdef',
+		'.notdef', '.notdef', 'dotlessi', '.notdef', '.notdef',
+		'lslash', 'oslash', 'oe', 'germandbls', '.notdef',
+		'.notdef', '.notdef', '.notdef'
+		]
diff --git a/Lib/fontTools/encodings/__init__.py b/Lib/fontTools/encodings/__init__.py
new file mode 100644
index 0000000..e001bb2
--- /dev/null
+++ b/Lib/fontTools/encodings/__init__.py
@@ -0,0 +1,3 @@
+"""Empty __init__.py file to signal Python this directory is a package.
+(It can't be completely empty since WinZip seems to skip empty files.)
+"""
diff --git a/Lib/fontTools/inspect.py b/Lib/fontTools/inspect.py
new file mode 100644
index 0000000..875736d
--- /dev/null
+++ b/Lib/fontTools/inspect.py
@@ -0,0 +1,265 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod
+
+"""GUI font inspector.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import misc, ttLib, cffLib
+import pygtk
+pygtk.require('2.0')
+import gtk
+import sys
+
+
+
+class Row(object):
+	def __init__(self, parent, index, key, value, font):
+		self._parent = parent
+		self._index = index
+		self._key = key
+		self._value = value
+		self._font = font
+
+		if isinstance(value, ttLib.TTFont):
+			self._add_font(value)
+			return
+
+		if not isinstance(value, basestring):
+			# Try sequences
+			is_sequence = True
+			try:
+				len(value)
+				iter(value)
+				# It's hard to differentiate list-type sequences
+				# from dict-type ones.  Try fetching item 0.
+				value[0]
+			except (TypeError, AttributeError, KeyError, IndexError):
+				is_sequence = False
+			if is_sequence:
+				self._add_list(key, value)
+				return
+		if hasattr(value, '__dict__'):
+			self._add_object(key, value)
+			return
+		if hasattr(value, 'items'):
+			self._add_dict(key, value)
+			return
+
+		if isinstance(value, basestring):
+			self._value_str = '"'+value+'"'
+			self._children = []
+			return
+
+		# Everything else
+		self._children = []
+
+	def _filter_items(self):
+		items = []
+		for k,v in self._items:
+			if isinstance(v, ttLib.TTFont):
+				continue
+			if k in ['reader', 'file', 'tableTag', 'compileStatus', 'recurse']:
+				continue
+			if isinstance(k, basestring) and k[0] == '_':
+				continue
+			items.append((k,v))
+		self._items = items
+
+	def _add_font(self, font):
+		self._items = [(tag,font[tag]) for tag in font.keys()]
+
+	def _add_object(self, key, value):
+		# Make sure item is decompiled
+		try:
+			value["asdf"]
+		except (AttributeError, KeyError, TypeError, ttLib.TTLibError):
+			pass
+		if isinstance(value, ttLib.getTableModule('glyf').Glyph):
+			# Glyph type needs explicit expanding to be useful
+			value.expand(self._font['glyf'])
+		if isinstance(value, misc.psCharStrings.T2CharString):
+			try:
+				value.decompile()
+			except TypeError:  # Subroutines can't be decompiled
+				pass
+		if isinstance(value, cffLib.BaseDict):
+			for k in value.rawDict.keys():
+				getattr(value, k)
+		if isinstance(value, cffLib.Index):
+			# Load all items
+			for i in range(len(value)):
+				value[i]
+			# Discard offsets as should not be needed anymore
+			if hasattr(value, 'offsets'):
+				del value.offsets
+
+		self._value_str = value.__class__.__name__
+		if isinstance(value, ttLib.tables.DefaultTable.DefaultTable):
+			self._value_str += ' (%d Bytes)' % self._font.reader.tables[key].length
+		self._items = sorted(value.__dict__.items())
+		self._filter_items()
+
+	def _add_dict(self, key, value):
+		self._value_str = '%s of %d items' % (value.__class__.__name__, len(value))
+		self._items = sorted(value.items())
+
+	def _add_list(self, key, value):
+		if len(value) and len(value) <= 32:
+			self._value_str = str(value)
+		else:
+			self._value_str = '%s of %d items' % (value.__class__.__name__,
+							      len(value))
+		self._items = list(enumerate(value))
+
+	def __len__(self):
+		if hasattr(self, '_children'):
+			return len(self._children)
+		if hasattr(self, '_items'):
+			return len(self._items)
+		assert False
+
+	def _ensure_children(self):
+		if hasattr(self, '_children'):
+			return
+		children = []
+		for i,(k,v) in enumerate(self._items):
+			children.append(Row(self, i, k, v, self._font))
+		self._children = children
+		del self._items
+
+	def __getitem__(self, n):
+		if n >= len(self):
+			return None
+		if not hasattr(self, '_children'):
+			self._children = [None] * len(self)
+		c = self._children[n]
+		if c is None:
+			k,v = self._items[n]
+			c = self._children[n] = Row(self, n, k, v, self._font)
+			self._items[n] = None
+		return c
+
+	def get_parent(self):
+		return self._parent
+
+	def get_index(self):
+		return self._index
+
+	def get_key(self):
+		return self._key
+
+	def get_value(self):
+		return self._value
+
+	def get_value_str(self):
+		if hasattr(self,'_value_str'):
+			return self._value_str
+		return str(self._value)
+
+class FontTreeModel(gtk.GenericTreeModel):
+
+	__gtype_name__ = 'FontTreeModel'
+
+	def __init__(self, font):
+		super(FontTreeModel, self).__init__()
+		self._columns = (str, str)
+		self.font = font
+		self._root = Row(None, 0, "font", font, font)
+
+	def on_get_flags(self):
+		return 0
+
+	def on_get_n_columns(self):
+		return len(self._columns)
+
+	def on_get_column_type(self, index):
+		return self._columns[index]
+
+	def on_get_iter(self, path):
+		rowref = self._root
+		while path:
+			rowref = rowref[path[0]]
+			path = path[1:]
+		return rowref
+
+	def on_get_path(self, rowref):
+		path = []
+		while rowref != self._root:
+			path.append(rowref.get_index())
+			rowref = rowref.get_parent()
+		path.reverse()
+		return tuple(path)
+
+	def on_get_value(self, rowref, column):
+		if column == 0:
+			return rowref.get_key()
+		else:
+			return rowref.get_value_str()
+
+	def on_iter_next(self, rowref):
+		return rowref.get_parent()[rowref.get_index() + 1]
+
+	def on_iter_children(self, rowref):
+		return rowref[0]
+
+	def on_iter_has_child(self, rowref):
+		return bool(len(rowref))
+
+	def on_iter_n_children(self, rowref):
+		return len(rowref)
+
+	def on_iter_nth_child(self, rowref, n):
+		if not rowref: rowref = self._root
+		return rowref[n]
+
+	def on_iter_parent(self, rowref):
+		return rowref.get_parent()
+
+class Inspect(object):
+
+	def _delete_event(self, widget, event, data=None):
+		gtk.main_quit()
+		return False
+
+	def __init__(self, fontfile):
+
+		self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
+		self.window.set_title("%s - pyftinspect" % fontfile)
+		self.window.connect("delete_event", self._delete_event)
+		self.window.set_size_request(400, 600)
+
+		self.scrolled_window = gtk.ScrolledWindow()
+		self.window.add(self.scrolled_window)
+
+		self.font = ttLib.TTFont(fontfile, lazy=True)
+		self.treemodel = FontTreeModel(self.font)
+		self.treeview = gtk.TreeView(self.treemodel)
+		#self.treeview.set_reorderable(True)
+
+		for i in range(2):
+			col_name = ('Key', 'Value')[i]
+			col = gtk.TreeViewColumn(col_name)
+			col.set_sort_column_id(-1)
+			self.treeview.append_column(col)
+
+			cell = gtk.CellRendererText()
+			col.pack_start(cell, True)
+			col.add_attribute(cell, 'text', i)
+
+		self.treeview.set_search_column(1)
+		self.scrolled_window.add(self.treeview)
+		self.window.show_all()
+
+def main(args):
+	if len(args) < 1:
+		print("usage: pyftinspect font...", file=sys.stderr)
+		sys.exit(1)
+	for arg in args:
+		Inspect(arg)
+	gtk.main()
+
+if __name__ == "__main__":
+	main(sys.argv[1:])
diff --git a/Lib/fontTools/merge.py b/Lib/fontTools/merge.py
new file mode 100644
index 0000000..54e91e3
--- /dev/null
+++ b/Lib/fontTools/merge.py
@@ -0,0 +1,921 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
+
+"""Font merger.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import ttLib, cffLib
+from fontTools.ttLib.tables import otTables, _h_e_a_d
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+from functools import reduce
+import sys
+import time
+import operator
+
+
+def _add_method(*clazzes, **kwargs):
+	"""Returns a decorator function that adds a new method to one or
+	more classes."""
+	allowDefault = kwargs.get('allowDefaultTable', False)
+	def wrapper(method):
+		for clazz in clazzes:
+			assert allowDefault or clazz != DefaultTable, 'Oops, table class not found.'
+			assert method.__name__ not in clazz.__dict__, \
+				"Oops, class '%s' has method '%s'." % (clazz.__name__,
+								       method.__name__)
+			setattr(clazz, method.__name__, method)
+		return None
+	return wrapper
+
+# General utility functions for merging values from different fonts
+
+def equal(lst):
+	lst = list(lst)
+	t = iter(lst)
+	first = next(t)
+	assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
+	return first
+
+def first(lst):
+	return next(iter(lst))
+
+def recalculate(lst):
+	return NotImplemented
+
+def current_time(lst):
+	return int(time.time() - _h_e_a_d.mac_epoch_diff)
+
+def bitwise_and(lst):
+	return reduce(operator.and_, lst)
+
+def bitwise_or(lst):
+	return reduce(operator.or_, lst)
+
+def avg_int(lst):
+	lst = list(lst)
+	return sum(lst) // len(lst)
+
+def onlyExisting(func):
+	"""Returns a filter func that when called with a list,
+	only calls func on the non-NotImplemented items of the list,
+	and only so if there's at least one item remaining.
+	Otherwise returns NotImplemented."""
+
+	def wrapper(lst):
+		items = [item for item in lst if item is not NotImplemented]
+		return func(items) if items else NotImplemented
+
+	return wrapper
+
+def sumLists(lst):
+	l = []
+	for item in lst:
+		l.extend(item)
+	return l
+
+def sumDicts(lst):
+	d = {}
+	for item in lst:
+		d.update(item)
+	return d
+
+def mergeObjects(lst):
+	lst = [item for item in lst if item is not NotImplemented]
+	if not lst:
+		return NotImplemented
+	lst = [item for item in lst if item is not None]
+	if not lst:
+		return None
+
+	clazz = lst[0].__class__
+	assert all(type(item) == clazz for item in lst), lst
+
+	logic = clazz.mergeMap
+	returnTable = clazz()
+	returnDict = {}
+
+	allKeys = set.union(set(), *(vars(table).keys() for table in lst))
+	for key in allKeys:
+		try:
+			mergeLogic = logic[key]
+		except KeyError:
+			try:
+				mergeLogic = logic['*']
+			except KeyError:
+				raise Exception("Don't know how to merge key %s of class %s" %
+						(key, clazz.__name__))
+		if mergeLogic is NotImplemented:
+			continue
+		value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
+		if value is not NotImplemented:
+			returnDict[key] = value
+
+	returnTable.__dict__ = returnDict
+
+	return returnTable
+
+def mergeBits(bitmap):
+
+	def wrapper(lst):
+		lst = list(lst)
+		returnValue = 0
+		for bitNumber in range(bitmap['size']):
+			try:
+				mergeLogic = bitmap[bitNumber]
+			except KeyError:
+				try:
+					mergeLogic = bitmap['*']
+				except KeyError:
+					raise Exception("Don't know how to merge bit %s" % bitNumber)
+			shiftedBit = 1 << bitNumber
+			mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
+			returnValue |= mergedValue << bitNumber
+		return returnValue
+
+	return wrapper
+
+
+@_add_method(DefaultTable, allowDefaultTable=True)
+def merge(self, m, tables):
+	if not hasattr(self, 'mergeMap'):
+		m.log("Don't know how to merge '%s'." % self.tableTag)
+		return NotImplemented
+
+	logic = self.mergeMap
+
+	if isinstance(logic, dict):
+		return m.mergeObjects(self, self.mergeMap, tables)
+	else:
+		return logic(tables)
+
+
+ttLib.getTableClass('maxp').mergeMap = {
+	'*': max,
+	'tableTag': equal,
+	'tableVersion': equal,
+	'numGlyphs': sum,
+	'maxStorage': first,
+	'maxFunctionDefs': first,
+	'maxInstructionDefs': first,
+	# TODO When we correctly merge hinting data, update these values:
+	# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
+}
+
+headFlagsMergeBitMap = {
+	'size': 16,
+	'*': bitwise_or,
+	1: bitwise_and, # Baseline at y = 0
+	2: bitwise_and, # lsb at x = 0
+	3: bitwise_and, # Force ppem to integer values. FIXME?
+	5: bitwise_and, # Font is vertical
+	6: lambda bit: 0, # Always set to zero
+	11: bitwise_and, # Font data is 'lossless'
+	13: bitwise_and, # Optimized for ClearType
+	14: bitwise_and, # Last resort font. FIXME? equal or first may be better
+	15: lambda bit: 0, # Always set to zero
+}
+
+ttLib.getTableClass('head').mergeMap = {
+	'tableTag': equal,
+	'tableVersion': max,
+	'fontRevision': max,
+	'checkSumAdjustment': lambda lst: 0, # We need *something* here
+	'magicNumber': equal,
+	'flags': mergeBits(headFlagsMergeBitMap),
+	'unitsPerEm': equal,
+	'created': current_time,
+	'modified': current_time,
+	'xMin': min,
+	'yMin': min,
+	'xMax': max,
+	'yMax': max,
+	'macStyle': first,
+	'lowestRecPPEM': max,
+	'fontDirectionHint': lambda lst: 2,
+	'indexToLocFormat': recalculate,
+	'glyphDataFormat': equal,
+}
+
+ttLib.getTableClass('hhea').mergeMap = {
+	'*': equal,
+	'tableTag': equal,
+	'tableVersion': max,
+	'ascent': max,
+	'descent': min,
+	'lineGap': max,
+	'advanceWidthMax': max,
+	'minLeftSideBearing': min,
+	'minRightSideBearing': min,
+	'xMaxExtent': max,
+	'caretSlopeRise': first,
+	'caretSlopeRun': first,
+	'caretOffset': first,
+	'numberOfHMetrics': recalculate,
+}
+
+os2FsTypeMergeBitMap = {
+	'size': 16,
+	'*': lambda bit: 0,
+	1: bitwise_or, # no embedding permitted
+	2: bitwise_and, # allow previewing and printing documents
+	3: bitwise_and, # allow editing documents
+	8: bitwise_or, # no subsetting permitted
+	9: bitwise_or, # no embedding of outlines permitted
+}
+
+def mergeOs2FsType(lst):
+	lst = list(lst)
+	if all(item == 0 for item in lst):
+		return 0
+
+	# Compute least restrictive logic for each fsType value
+	for i in range(len(lst)):
+		# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
+		if lst[i] & 0x000C:
+			lst[i] &= ~0x0002
+		# set bit 2 (allow previewing) if bit 3 is set (allow editing)
+		elif lst[i] & 0x0008:
+			lst[i] |= 0x0004
+		# set bits 2 and 3 if everything is allowed
+		elif lst[i] == 0:
+			lst[i] = 0x000C
+
+	fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
+	# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
+	if fsType & 0x0002:
+		fsType &= ~0x000C
+	return fsType
+
+
+ttLib.getTableClass('OS/2').mergeMap = {
+	'*': first,
+	'tableTag': equal,
+	'version': max,
+	'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
+	'fsType': mergeOs2FsType, # Will be overwritten
+	'panose': first, # FIXME: should really be the first Latin font
+	'ulUnicodeRange1': bitwise_or,
+	'ulUnicodeRange2': bitwise_or,
+	'ulUnicodeRange3': bitwise_or,
+	'ulUnicodeRange4': bitwise_or,
+	'fsFirstCharIndex': min,
+	'fsLastCharIndex': max,
+	'sTypoAscender': max,
+	'sTypoDescender': min,
+	'sTypoLineGap': max,
+	'usWinAscent': max,
+	'usWinDescent': max,
+	# Version 2,3,4
+	'ulCodePageRange1': onlyExisting(bitwise_or),
+	'ulCodePageRange2': onlyExisting(bitwise_or),
+	'usMaxContex': onlyExisting(max),
+	# TODO version 5
+}
+
+@_add_method(ttLib.getTableClass('OS/2'))
+def merge(self, m, tables):
+	DefaultTable.merge(self, m, tables)
+	if self.version < 2:
+		# bits 8 and 9 are reserved and should be set to zero
+		self.fsType &= ~0x0300
+	if self.version >= 3:
+		# Only one of bits 1, 2, and 3 may be set. We already take
+		# care of bit 1 implications in mergeOs2FsType. So unset
+		# bit 2 if bit 3 is already set.
+		if self.fsType & 0x0008:
+			self.fsType &= ~0x0004
+	return self
+
+ttLib.getTableClass('post').mergeMap = {
+	'*': first,
+	'tableTag': equal,
+	'formatType': max,
+	'isFixedPitch': min,
+	'minMemType42': max,
+	'maxMemType42': lambda lst: 0,
+	'minMemType1': max,
+	'maxMemType1': lambda lst: 0,
+	'mapping': onlyExisting(sumDicts),
+	'extraNames': lambda lst: [],
+}
+
+ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
+	'tableTag': equal,
+	'metrics': sumDicts,
+}
+
+ttLib.getTableClass('gasp').mergeMap = {
+	'tableTag': equal,
+	'version': max,
+	'gaspRange': first, # FIXME? Appears irreconcilable
+}
+
+ttLib.getTableClass('name').mergeMap = {
+	'tableTag': equal,
+	'names': first, # FIXME? Does mixing name records make sense?
+}
+
+ttLib.getTableClass('loca').mergeMap = {
+	'*': recalculate,
+	'tableTag': equal,
+}
+
+ttLib.getTableClass('glyf').mergeMap = {
+	'tableTag': equal,
+	'glyphs': sumDicts,
+	'glyphOrder': sumLists,
+}
+
+@_add_method(ttLib.getTableClass('glyf'))
+def merge(self, m, tables):
+	for i,table in enumerate(tables):
+		for g in table.glyphs.values():
+			if i:
+				# Drop hints for all but first font, since
+				# we don't map functions / CVT values.
+				g.removeHinting()
+			# Expand composite glyphs to load their
+			# composite glyph names.
+			if g.isComposite():
+				g.expand(table)
+	return DefaultTable.merge(self, m, tables)
+
+ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
+ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
+
+@_add_method(ttLib.getTableClass('cmap'))
+def merge(self, m, tables):
+	# TODO Handle format=14.
+	cmapTables = [(t,fontIdx) for fontIdx,table in enumerate(tables) for t in table.tables
+		      if t.isUnicode()]
+	# TODO Better handle format-4 and format-12 coexisting in same font.
+	# TODO Insert both a format-4 and format-12 if needed.
+	module = ttLib.getTableModule('cmap')
+	assert all(t.format in [4, 12] for t,_ in cmapTables)
+	format = max(t.format for t,_ in cmapTables)
+	cmapTable = module.cmap_classes[format](format)
+	cmapTable.cmap = {}
+	cmapTable.platformID = 3
+	cmapTable.platEncID = max(t.platEncID for t,_ in cmapTables)
+	cmapTable.language = 0
+	cmap = cmapTable.cmap
+	for table,fontIdx in cmapTables:
+		# TODO handle duplicates.
+		for uni,gid in table.cmap.items():
+			oldgid = cmap.get(uni, None)
+			if oldgid is None:
+				cmap[uni] = gid
+			elif oldgid != gid:
+				# Char previously mapped to oldgid, now to gid.
+				# Record, to fix up in GSUB 'locl' later.
+				assert m.duplicateGlyphsPerFont[fontIdx].get(oldgid, gid) == gid
+				m.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
+	self.tableVersion = 0
+	self.tables = [cmapTable]
+	self.numSubTables = len(self.tables)
+	return self
+
+
+otTables.ScriptList.mergeMap = {
+	'ScriptCount': sum,
+	'ScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.ScriptTag),
+}
+
+otTables.FeatureList.mergeMap = {
+	'FeatureCount': sum,
+	'FeatureRecord': sumLists,
+}
+
+otTables.LookupList.mergeMap = {
+	'LookupCount': sum,
+	'Lookup': sumLists,
+}
+
+otTables.Coverage.mergeMap = {
+	'glyphs': sumLists,
+}
+
+otTables.ClassDef.mergeMap = {
+	'classDefs': sumDicts,
+}
+
+otTables.LigCaretList.mergeMap = {
+	'Coverage': mergeObjects,
+	'LigGlyphCount': sum,
+	'LigGlyph': sumLists,
+}
+
+otTables.AttachList.mergeMap = {
+	'Coverage': mergeObjects,
+	'GlyphCount': sum,
+	'AttachPoint': sumLists,
+}
+
+# XXX Renumber MarkFilterSets of lookups
+otTables.MarkGlyphSetsDef.mergeMap = {
+	'MarkSetTableFormat': equal,
+	'MarkSetCount': sum,
+	'Coverage': sumLists,
+}
+
+otTables.GDEF.mergeMap = {
+	'*': mergeObjects,
+	'Version': max,
+}
+
+otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = {
+	'*': mergeObjects,
+	'Version': max,
+}
+
+ttLib.getTableClass('GDEF').mergeMap = \
+ttLib.getTableClass('GSUB').mergeMap = \
+ttLib.getTableClass('GPOS').mergeMap = \
+ttLib.getTableClass('BASE').mergeMap = \
+ttLib.getTableClass('JSTF').mergeMap = \
+ttLib.getTableClass('MATH').mergeMap = \
+{
+	'tableTag': onlyExisting(equal), # XXX clean me up
+	'table': mergeObjects,
+}
+
+@_add_method(ttLib.getTableClass('GSUB'))
+def merge(self, m, tables):
+
+	assert len(tables) == len(m.duplicateGlyphsPerFont)
+	for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
+		if not dups: continue
+		assert (table is not None and table is not NotImplemented), "Have duplicates to resolve for font %d but no GSUB" % (i + 1)
+		lookupMap = dict((id(v),v) for v in table.table.LookupList.Lookup)
+		featureMap = dict((id(v),v) for v in table.table.FeatureList.FeatureRecord)
+		synthFeature = None
+		synthLookup = None
+		for script in table.table.ScriptList.ScriptRecord:
+			if script.ScriptTag == 'DFLT': continue # XXX
+			for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
+				feature = [featureMap[v] for v in langsys.FeatureIndex if featureMap[v].FeatureTag == 'locl']
+				assert len(feature) <= 1
+				if feature:
+					feature = feature[0]
+				else:
+					if not synthFeature:
+						synthFeature = otTables.FeatureRecord()
+						synthFeature.FeatureTag = 'locl'
+						f = synthFeature.Feature = otTables.Feature()
+						f.FeatureParams = None
+						f.LookupCount = 0
+						f.LookupListIndex = []
+						langsys.FeatureIndex.append(id(synthFeature))
+						featureMap[id(synthFeature)] = synthFeature
+						langsys.FeatureIndex.sort(key=lambda v: featureMap[v].FeatureTag)
+						table.table.FeatureList.FeatureRecord.append(synthFeature)
+						table.table.FeatureList.FeatureCount += 1
+					feature = synthFeature
+
+				if not synthLookup:
+					subtable = otTables.SingleSubst()
+					subtable.mapping = dups
+					synthLookup = otTables.Lookup()
+					synthLookup.LookupFlag = 0
+					synthLookup.LookupType = 1
+					synthLookup.SubTableCount = 1
+					synthLookup.SubTable = [subtable]
+					table.table.LookupList.Lookup.append(synthLookup)
+					table.table.LookupList.LookupCount += 1
+
+				feature.Feature.LookupListIndex[:0] = [id(synthLookup)]
+				feature.Feature.LookupCount += 1
+
+
+	DefaultTable.merge(self, m, tables)
+	return self
+
+
+
+@_add_method(otTables.SingleSubst,
+             otTables.MultipleSubst,
+             otTables.AlternateSubst,
+             otTables.LigatureSubst,
+             otTables.ReverseChainSingleSubst,
+             otTables.SinglePos,
+             otTables.PairPos,
+             otTables.CursivePos,
+             otTables.MarkBasePos,
+             otTables.MarkLigPos,
+             otTables.MarkMarkPos)
+def mapLookups(self, lookupMap):
+  pass
+
+# Copied and trimmed down from subset.py
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def __classify_context(self):
+
+  class ContextHelper(object):
+    def __init__(self, klass, Format):
+      if klass.__name__.endswith('Subst'):
+        Typ = 'Sub'
+        Type = 'Subst'
+      else:
+        Typ = 'Pos'
+        Type = 'Pos'
+      if klass.__name__.startswith('Chain'):
+        Chain = 'Chain'
+      else:
+        Chain = ''
+      ChainTyp = Chain+Typ
+
+      self.Typ = Typ
+      self.Type = Type
+      self.Chain = Chain
+      self.ChainTyp = ChainTyp
+
+      self.LookupRecord = Type+'LookupRecord'
+
+      if Format == 1:
+        self.Rule = ChainTyp+'Rule'
+        self.RuleSet = ChainTyp+'RuleSet'
+      elif Format == 2:
+        self.Rule = ChainTyp+'ClassRule'
+        self.RuleSet = ChainTyp+'ClassSet'
+
+  if self.Format not in [1, 2, 3]:
+    return None  # Don't shoot the messenger; let it go
+  if not hasattr(self.__class__, "__ContextHelpers"):
+    self.__class__.__ContextHelpers = {}
+  if self.Format not in self.__class__.__ContextHelpers:
+    helper = ContextHelper(self.__class__, self.Format)
+    self.__class__.__ContextHelpers[self.Format] = helper
+  return self.__class__.__ContextHelpers[self.Format]
+
+
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def mapLookups(self, lookupMap):
+  c = self.__classify_context()
+
+  if self.Format in [1, 2]:
+    for rs in getattr(self, c.RuleSet):
+      if not rs: continue
+      for r in getattr(rs, c.Rule):
+        if not r: continue
+        for ll in getattr(r, c.LookupRecord):
+          if not ll: continue
+          ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+  elif self.Format == 3:
+    for ll in getattr(self, c.LookupRecord):
+      if not ll: continue
+      ll.LookupListIndex = lookupMap[ll.LookupListIndex]
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.Lookup)
+def mapLookups(self, lookupMap):
+	for st in self.SubTable:
+		if not st: continue
+		st.mapLookups(lookupMap)
+
+@_add_method(otTables.LookupList)
+def mapLookups(self, lookupMap):
+	for l in self.Lookup:
+		if not l: continue
+		l.mapLookups(lookupMap)
+
+@_add_method(otTables.Feature)
+def mapLookups(self, lookupMap):
+	self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
+
+@_add_method(otTables.FeatureList)
+def mapLookups(self, lookupMap):
+	for f in self.FeatureRecord:
+		if not f or not f.Feature: continue
+		f.Feature.mapLookups(lookupMap)
+
+@_add_method(otTables.DefaultLangSys,
+             otTables.LangSys)
+def mapFeatures(self, featureMap):
+	self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
+	if self.ReqFeatureIndex != 65535:
+		self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
+
+@_add_method(otTables.Script)
+def mapFeatures(self, featureMap):
+	if self.DefaultLangSys:
+		self.DefaultLangSys.mapFeatures(featureMap)
+	for l in self.LangSysRecord:
+		if not l or not l.LangSys: continue
+		l.LangSys.mapFeatures(featureMap)
+
+@_add_method(otTables.ScriptList)
+def mapFeatures(self, featureMap):
+	for s in self.ScriptRecord:
+		if not s or not s.Script: continue
+		s.Script.mapFeatures(featureMap)
+
+
+class Options(object):
+
+  class UnknownOptionError(Exception):
+    pass
+
+  def __init__(self, **kwargs):
+
+    self.set(**kwargs)
+
+  def set(self, **kwargs):
+    for k,v in kwargs.items():
+      if not hasattr(self, k):
+        raise self.UnknownOptionError("Unknown option '%s'" % k)
+      setattr(self, k, v)
+
+  def parse_opts(self, argv, ignore_unknown=False):
+    ret = []
+    opts = {}
+    for a in argv:
+      orig_a = a
+      if not a.startswith('--'):
+        ret.append(a)
+        continue
+      a = a[2:]
+      i = a.find('=')
+      op = '='
+      if i == -1:
+        if a.startswith("no-"):
+          k = a[3:]
+          v = False
+        else:
+          k = a
+          v = True
+      else:
+        k = a[:i]
+        if k[-1] in "-+":
+          op = k[-1]+'='  # Ops is '-=' or '+=' now.
+          k = k[:-1]
+        v = a[i+1:]
+      k = k.replace('-', '_')
+      if not hasattr(self, k):
+        if ignore_unknown == True or k in ignore_unknown:
+          ret.append(orig_a)
+          continue
+        else:
+          raise self.UnknownOptionError("Unknown option '%s'" % a)
+
+      ov = getattr(self, k)
+      if isinstance(ov, bool):
+        v = bool(v)
+      elif isinstance(ov, int):
+        v = int(v)
+      elif isinstance(ov, list):
+        vv = v.split(',')
+        if vv == ['']:
+          vv = []
+        vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
+        if op == '=':
+          v = vv
+        elif op == '+=':
+          v = ov
+          v.extend(vv)
+        elif op == '-=':
+          v = ov
+          for x in vv:
+            if x in v:
+              v.remove(x)
+        else:
+          assert 0
+
+      opts[k] = v
+    self.set(**opts)
+
+    return ret
+
+
+class Merger(object):
+
+	def __init__(self, options=None, log=None):
+
+		if not log:
+			log = Logger()
+		if not options:
+			options = Options()
+
+		self.options = options
+		self.log = log
+
+	def merge(self, fontfiles):
+
+		mega = ttLib.TTFont()
+
+		#
+		# Settle on a mega glyph order.
+		#
+		fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
+		glyphOrders = [font.getGlyphOrder() for font in fonts]
+		megaGlyphOrder = self._mergeGlyphOrders(glyphOrders)
+		# Reload fonts and set new glyph names on them.
+		# TODO Is it necessary to reload font?  I think it is.  At least
+		# it's safer, in case tables were loaded to provide glyph names.
+		fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
+		for font,glyphOrder in zip(fonts, glyphOrders):
+			font.setGlyphOrder(glyphOrder)
+		mega.setGlyphOrder(megaGlyphOrder)
+
+		for font in fonts:
+			self._preMerge(font)
+
+		self.duplicateGlyphsPerFont = [{} for f in fonts]
+
+		allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
+		allTags.remove('GlyphOrder')
+		allTags.remove('cmap')
+		allTags.remove('GSUB')
+		allTags = ['cmap', 'GSUB'] + list(allTags)
+		for tag in allTags:
+
+			tables = [font.get(tag, NotImplemented) for font in fonts]
+
+			clazz = ttLib.getTableClass(tag)
+			table = clazz(tag).merge(self, tables)
+			# XXX Clean this up and use:  table = mergeObjects(tables)
+
+			if table is not NotImplemented and table is not False:
+				mega[tag] = table
+				self.log("Merged '%s'." % tag)
+			else:
+				self.log("Dropped '%s'." % tag)
+			self.log.lapse("merge '%s'" % tag)
+
+		del self.duplicateGlyphsPerFont
+
+		self._postMerge(mega)
+
+		return mega
+
+	def _mergeGlyphOrders(self, glyphOrders):
+		"""Modifies passed-in glyphOrders to reflect new glyph names.
+		Returns glyphOrder for the merged font."""
+		# Simply append font index to the glyph name for now.
+		# TODO Even this simplistic numbering can result in conflicts.
+		# But then again, we have to improve this soon anyway.
+		mega = []
+		for n,glyphOrder in enumerate(glyphOrders):
+			for i,glyphName in enumerate(glyphOrder):
+				glyphName += "#" + repr(n)
+				glyphOrder[i] = glyphName
+				mega.append(glyphName)
+		return mega
+
+	def mergeObjects(self, returnTable, logic, tables):
+		# Right now we don't use self at all.  Will use in the future
+		# for options and logging.
+
+		allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
+		for key in allKeys:
+			try:
+				mergeLogic = logic[key]
+			except KeyError:
+				try:
+					mergeLogic = logic['*']
+				except KeyError:
+					raise Exception("Don't know how to merge key %s of class %s" % 
+							(key, returnTable.__class__.__name__))
+			if mergeLogic is NotImplemented:
+				continue
+			value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
+			if value is not NotImplemented:
+				setattr(returnTable, key, value)
+
+		return returnTable
+
+	def _preMerge(self, font):
+
+		# Map indices to references
+
+		GDEF = font.get('GDEF')
+		GSUB = font.get('GSUB')
+		GPOS = font.get('GPOS')
+
+		for t in [GSUB, GPOS]:
+			if not t: continue
+
+			if t.table.LookupList:
+				lookupMap = dict((i,id(v)) for i,v in enumerate(t.table.LookupList.Lookup))
+				t.table.LookupList.mapLookups(lookupMap)
+				if t.table.FeatureList:
+					# XXX Handle present FeatureList but absent LookupList
+					t.table.FeatureList.mapLookups(lookupMap)
+
+			if t.table.FeatureList and t.table.ScriptList:
+				featureMap = dict((i,id(v)) for i,v in enumerate(t.table.FeatureList.FeatureRecord))
+				t.table.ScriptList.mapFeatures(featureMap)
+
+		# TODO GDEF/Lookup MarkFilteringSets
+		# TODO FeatureParams nameIDs
+
+	def _postMerge(self, font):
+
+		# Map references back to indices
+
+		GDEF = font.get('GDEF')
+		GSUB = font.get('GSUB')
+		GPOS = font.get('GPOS')
+
+		for t in [GSUB, GPOS]:
+			if not t: continue
+
+			if t.table.LookupList:
+				lookupMap = dict((id(v),i) for i,v in enumerate(t.table.LookupList.Lookup))
+				t.table.LookupList.mapLookups(lookupMap)
+				if t.table.FeatureList:
+					# XXX Handle present FeatureList but absent LookupList
+					t.table.FeatureList.mapLookups(lookupMap)
+
+			if t.table.FeatureList and t.table.ScriptList:
+				# XXX Handle present ScriptList but absent FeatureList
+				featureMap = dict((id(v),i) for i,v in enumerate(t.table.FeatureList.FeatureRecord))
+				t.table.ScriptList.mapFeatures(featureMap)
+
+		# TODO GDEF/Lookup MarkFilteringSets
+		# TODO FeatureParams nameIDs
+
+
+class Logger(object):
+
+  def __init__(self, verbose=False, xml=False, timing=False):
+    self.verbose = verbose
+    self.xml = xml
+    self.timing = timing
+    self.last_time = self.start_time = time.time()
+
+  def parse_opts(self, argv):
+    argv = argv[:]
+    for v in ['verbose', 'xml', 'timing']:
+      if "--"+v in argv:
+        setattr(self, v, True)
+        argv.remove("--"+v)
+    return argv
+
+  def __call__(self, *things):
+    if not self.verbose:
+      return
+    print(' '.join(str(x) for x in things))
+
+  def lapse(self, *things):
+    if not self.timing:
+      return
+    new_time = time.time()
+    print("Took %0.3fs to %s" %(new_time - self.last_time,
+                                 ' '.join(str(x) for x in things)))
+    self.last_time = new_time
+
+  def font(self, font, file=sys.stdout):
+    if not self.xml:
+      return
+    from fontTools.misc import xmlWriter
+    writer = xmlWriter.XMLWriter(file)
+    font.disassembleInstructions = False  # Work around ttLib bug
+    for tag in font.keys():
+      writer.begintag(tag)
+      writer.newline()
+      font[tag].toXML(writer, font)
+      writer.endtag(tag)
+      writer.newline()
+
+
+__all__ = [
+  'Options',
+  'Merger',
+  'Logger',
+  'main'
+]
+
+def main(args):
+
+	log = Logger()
+	args = log.parse_opts(args)
+
+	options = Options()
+	args = options.parse_opts(args)
+
+	if len(args) < 1:
+		print("usage: pyftmerge font...", file=sys.stderr)
+		sys.exit(1)
+
+	merger = Merger(options=options, log=log)
+	font = merger.merge(args)
+	outfile = 'merged.ttf'
+	font.save(outfile)
+	log.lapse("compile and save font")
+
+	log.last_time = log.start_time
+	log.lapse("make one with everything(TOTAL TIME)")
+
+if __name__ == "__main__":
+	main(sys.argv[1:])
diff --git a/Lib/fontTools/misc/__init__.py b/Lib/fontTools/misc/__init__.py
new file mode 100644
index 0000000..e001bb2
--- /dev/null
+++ b/Lib/fontTools/misc/__init__.py
@@ -0,0 +1,3 @@
+"""Empty __init__.py file to signal Python this directory is a package.
+(It can't be completely empty since WinZip seems to skip empty files.)
+"""
diff --git a/Lib/fontTools/misc/arrayTools.py b/Lib/fontTools/misc/arrayTools.py
new file mode 100644
index 0000000..0daabd9
--- /dev/null
+++ b/Lib/fontTools/misc/arrayTools.py
@@ -0,0 +1,184 @@
+#
+# Various array and rectangle tools, but mostly rectangles, hence the
+# name of this module (not).
+#
+
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import math
+
+def calcBounds(array):
+    """Return the bounding rectangle of a 2D points array as a tuple:
+    (xMin, yMin, xMax, yMax)
+    """
+    if len(array) == 0:
+        return 0, 0, 0, 0
+    xs = [x for x, y in array]
+    ys = [y for x, y in array]
+    return min(xs), min(ys), max(xs), max(ys)
+
+def calcIntBounds(array):
+    """Return the integer bounding rectangle of a 2D points array as a
+    tuple: (xMin, yMin, xMax, yMax)
+    """
+    xMin, yMin, xMax, yMax = calcBounds(array)
+    xMin = int(math.floor(xMin))
+    xMax = int(math.ceil(xMax))
+    yMin = int(math.floor(yMin))
+    yMax = int(math.ceil(yMax))
+    return xMin, yMin, xMax, yMax
+
+
+def updateBounds(bounds, p, min=min, max=max):
+    """Return the bounding recangle of rectangle bounds and point (x, y)."""
+    (x, y) = p
+    xMin, yMin, xMax, yMax = bounds
+    return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
+
+def pointInRect(p, rect):
+    """Return True when point (x, y) is inside rect."""
+    (x, y) = p
+    xMin, yMin, xMax, yMax = rect
+    return (xMin <= x <= xMax) and (yMin <= y <= yMax)
+
+def pointsInRect(array, rect):
+    """Find out which points or array are inside rect. 
+    Returns an array with a boolean for each point.
+    """
+    if len(array) < 1:
+        return []
+    xMin, yMin, xMax, yMax = rect
+    return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
+
+def vectorLength(vector):
+    """Return the length of the given vector."""
+    x, y = vector
+    return math.sqrt(x**2 + y**2)
+
+def asInt16(array):
+    """Round and cast to 16 bit integer."""
+    return [int(math.floor(i+0.5)) for i in array]
+    
+
+def normRect(rect):
+    """Normalize the rectangle so that the following holds:
+        xMin <= xMax and yMin <= yMax
+    """
+    (xMin, yMin, xMax, yMax) = rect
+    return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
+
+def scaleRect(rect, x, y):
+    """Scale the rectangle by x, y."""
+    (xMin, yMin, xMax, yMax) = rect
+    return xMin * x, yMin * y, xMax * x, yMax * y
+
+def offsetRect(rect, dx, dy):
+    """Offset the rectangle by dx, dy."""
+    (xMin, yMin, xMax, yMax) = rect
+    return xMin+dx, yMin+dy, xMax+dx, yMax+dy
+
+def insetRect(rect, dx, dy):
+    """Inset the rectangle by dx, dy on all sides."""
+    (xMin, yMin, xMax, yMax) = rect
+    return xMin+dx, yMin+dy, xMax-dx, yMax-dy
+
+def sectRect(rect1, rect2):
+    """Return a boolean and a rectangle. If the input rectangles intersect, return
+    True and the intersecting rectangle. Return False and (0, 0, 0, 0) if the input
+    rectangles don't intersect.
+    """
+    (xMin1, yMin1, xMax1, yMax1) = rect1
+    (xMin2, yMin2, xMax2, yMax2) = rect2
+    xMin, yMin, xMax, yMax = (max(xMin1, xMin2), max(yMin1, yMin2),
+                              min(xMax1, xMax2), min(yMax1, yMax2))
+    if xMin >= xMax or yMin >= yMax:
+        return False, (0, 0, 0, 0)
+    return True, (xMin, yMin, xMax, yMax)
+
+def unionRect(rect1, rect2):
+    """Return the smallest rectangle in which both input rectangles are fully
+    enclosed. In other words, return the total bounding rectangle of both input
+    rectangles.
+    """
+    (xMin1, yMin1, xMax1, yMax1) = rect1
+    (xMin2, yMin2, xMax2, yMax2) = rect2
+    xMin, yMin, xMax, yMax = (min(xMin1, xMin2), min(yMin1, yMin2),
+                              max(xMax1, xMax2), max(yMax1, yMax2))
+    return (xMin, yMin, xMax, yMax)
+
+def rectCenter(rect0):
+    """Return the center of the rectangle as an (x, y) coordinate."""
+    (xMin, yMin, xMax, yMax) = rect0
+    return (xMin+xMax)/2, (yMin+yMax)/2
+
+def intRect(rect1):
+    """Return the rectangle, rounded off to integer values, but guaranteeing that
+    the resulting rectangle is NOT smaller than the original.
+    """
+    (xMin, yMin, xMax, yMax) = rect1
+    xMin = int(math.floor(xMin))
+    yMin = int(math.floor(yMin))
+    xMax = int(math.ceil(xMax))
+    yMax = int(math.ceil(yMax))
+    return (xMin, yMin, xMax, yMax)
+
+
+def _test():
+    """
+    >>> import math
+    >>> calcBounds([])
+    (0, 0, 0, 0)
+    >>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)])
+    (0, 10, 80, 100)
+    >>> updateBounds((0, 0, 0, 0), (100, 100))
+    (0, 0, 100, 100)
+    >>> pointInRect((50, 50), (0, 0, 100, 100))
+    True
+    >>> pointInRect((0, 0), (0, 0, 100, 100))
+    True
+    >>> pointInRect((100, 100), (0, 0, 100, 100))
+    True
+    >>> not pointInRect((101, 100), (0, 0, 100, 100))
+    True
+    >>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100)))
+    [True, True, True, False]
+    >>> vectorLength((3, 4))
+    5.0
+    >>> vectorLength((1, 1)) == math.sqrt(2)
+    True
+    >>> list(asInt16([0, 0.1, 0.5, 0.9]))
+    [0, 0, 1, 1]
+    >>> normRect((0, 10, 100, 200))
+    (0, 10, 100, 200)
+    >>> normRect((100, 200, 0, 10))
+    (0, 10, 100, 200)
+    >>> scaleRect((10, 20, 50, 150), 1.5, 2)
+    (15.0, 40, 75.0, 300)
+    >>> offsetRect((10, 20, 30, 40), 5, 6)
+    (15, 26, 35, 46)
+    >>> insetRect((10, 20, 50, 60), 5, 10)
+    (15, 30, 45, 50)
+    >>> insetRect((10, 20, 50, 60), -5, -10)
+    (5, 10, 55, 70)
+    >>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50))
+    >>> not intersects
+    True
+    >>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50))
+    >>> intersects
+    1
+    >>> rect
+    (5, 20, 20, 30)
+    >>> unionRect((0, 10, 20, 30), (0, 40, 20, 50))
+    (0, 10, 20, 50)
+    >>> rectCenter((0, 0, 100, 200))
+    (50.0, 100.0)
+    >>> rectCenter((0, 0, 100, 199.0))
+    (50.0, 99.5)
+    >>> intRect((0.9, 2.9, 3.1, 4.1))
+    (0, 2, 4, 5)
+    """
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
diff --git a/Lib/fontTools/misc/bezierTools.py b/Lib/fontTools/misc/bezierTools.py
new file mode 100644
index 0000000..6d9f8ce
--- /dev/null
+++ b/Lib/fontTools/misc/bezierTools.py
@@ -0,0 +1,406 @@
+"""fontTools.misc.bezierTools.py -- tools for working with bezier path segments.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+__all__ = [
+    "calcQuadraticBounds",
+    "calcCubicBounds",
+    "splitLine",
+    "splitQuadratic",
+    "splitCubic",
+    "splitQuadraticAtT",
+    "splitCubicAtT",
+    "solveQuadratic",
+    "solveCubic",
+]
+
+from fontTools.misc.arrayTools import calcBounds
+
+epsilon = 1e-12
+
+
+def calcQuadraticBounds(pt1, pt2, pt3):
+    """Return the bounding rectangle for a qudratic bezier segment.
+    pt1 and pt3 are the "anchor" points, pt2 is the "handle".
+
+        >>> calcQuadraticBounds((0, 0), (50, 100), (100, 0))
+        (0, 0, 100, 50.0)
+        >>> calcQuadraticBounds((0, 0), (100, 0), (100, 100))
+        (0.0, 0.0, 100, 100)
+    """
+    (ax, ay), (bx, by), (cx, cy) = calcQuadraticParameters(pt1, pt2, pt3)
+    ax2 = ax*2.0
+    ay2 = ay*2.0
+    roots = []
+    if ax2 != 0:
+        roots.append(-bx/ax2)
+    if ay2 != 0:
+        roots.append(-by/ay2)
+    points = [(ax*t*t + bx*t + cx, ay*t*t + by*t + cy) for t in roots if 0 <= t < 1] + [pt1, pt3]
+    return calcBounds(points)
+
+
+def calcCubicBounds(pt1, pt2, pt3, pt4):
+    """Return the bounding rectangle for a cubic bezier segment.
+    pt1 and pt4 are the "anchor" points, pt2 and pt3 are the "handles".
+
+        >>> calcCubicBounds((0, 0), (25, 100), (75, 100), (100, 0))
+        (0, 0, 100, 75.0)
+        >>> calcCubicBounds((0, 0), (50, 0), (100, 50), (100, 100))
+        (0.0, 0.0, 100, 100)
+        >>> print "%f %f %f %f" % calcCubicBounds((50, 0), (0, 100), (100, 100), (50, 0))
+        35.566243 0.000000 64.433757 75.000000
+    """
+    (ax, ay), (bx, by), (cx, cy), (dx, dy) = calcCubicParameters(pt1, pt2, pt3, pt4)
+    # calc first derivative
+    ax3 = ax * 3.0
+    ay3 = ay * 3.0
+    bx2 = bx * 2.0
+    by2 = by * 2.0
+    xRoots = [t for t in solveQuadratic(ax3, bx2, cx) if 0 <= t < 1]
+    yRoots = [t for t in solveQuadratic(ay3, by2, cy) if 0 <= t < 1]
+    roots = xRoots + yRoots
+    
+    points = [(ax*t*t*t + bx*t*t + cx * t + dx, ay*t*t*t + by*t*t + cy * t + dy) for t in roots] + [pt1, pt4]
+    return calcBounds(points)
+
+
+def splitLine(pt1, pt2, where, isHorizontal):
+    """Split the line between pt1 and pt2 at position 'where', which
+    is an x coordinate if isHorizontal is False, a y coordinate if
+    isHorizontal is True. Return a list of two line segments if the
+    line was successfully split, or a list containing the original
+    line.
+
+        >>> printSegments(splitLine((0, 0), (100, 100), 50, True))
+        ((0, 0), (50.0, 50.0))
+        ((50.0, 50.0), (100, 100))
+        >>> printSegments(splitLine((0, 0), (100, 100), 100, True))
+        ((0, 0), (100, 100))
+        >>> printSegments(splitLine((0, 0), (100, 100), 0, True))
+        ((0, 0), (0.0, 0.0))
+        ((0.0, 0.0), (100, 100))
+        >>> printSegments(splitLine((0, 0), (100, 100), 0, False))
+        ((0, 0), (0.0, 0.0))
+        ((0.0, 0.0), (100, 100))
+    """
+    pt1x, pt1y = pt1
+    pt2x, pt2y = pt2
+
+    ax = (pt2x - pt1x)
+    ay = (pt2y - pt1y)
+
+    bx = pt1x
+    by = pt1y
+
+    if ax == 0:
+        return [(pt1, pt2)]
+
+    t = (where - (bx, by)[isHorizontal]) / ax
+    if 0 <= t < 1:
+        midPt = ax * t + bx, ay * t + by
+        return [(pt1, midPt), (midPt, pt2)]
+    else:
+        return [(pt1, pt2)]
+
+
+def splitQuadratic(pt1, pt2, pt3, where, isHorizontal):
+    """Split the quadratic curve between pt1, pt2 and pt3 at position 'where',
+    which is an x coordinate if isHorizontal is False, a y coordinate if
+    isHorizontal is True. Return a list of curve segments.
+
+        >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 150, False))
+        ((0, 0), (50, 100), (100, 0))
+        >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, False))
+        ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
+        ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
+        >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, False))
+        ((0.0, 0.0), (12.5, 25.0), (25.0, 37.5))
+        ((25.0, 37.5), (62.5, 75.0), (100.0, 0.0))
+        >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 25, True))
+        ((0.0, 0.0), (7.32233047034, 14.6446609407), (14.6446609407, 25.0))
+        ((14.6446609407, 25.0), (50.0, 75.0), (85.3553390593, 25.0))
+        ((85.3553390593, 25.0), (92.6776695297, 14.6446609407), (100.0, -7.1054273576e-15))
+        >>> # XXX I'm not at all sure if the following behavior is desirable:
+        >>> printSegments(splitQuadratic((0, 0), (50, 100), (100, 0), 50, True))
+        ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
+        ((50.0, 50.0), (50.0, 50.0), (50.0, 50.0))
+        ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
+    """
+    a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
+    solutions = solveQuadratic(a[isHorizontal], b[isHorizontal],
+        c[isHorizontal] - where)
+    solutions = sorted([t for t in solutions if 0 <= t < 1])
+    if not solutions:
+        return [(pt1, pt2, pt3)]
+    return _splitQuadraticAtT(a, b, c, *solutions)
+
+
+def splitCubic(pt1, pt2, pt3, pt4, where, isHorizontal):
+    """Split the cubic curve between pt1, pt2, pt3 and pt4 at position 'where',
+    which is an x coordinate if isHorizontal is False, a y coordinate if
+    isHorizontal is True. Return a list of curve segments.
+
+        >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 150, False))
+        ((0, 0), (25, 100), (75, 100), (100, 0))
+        >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 50, False))
+        ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
+        ((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
+        >>> printSegments(splitCubic((0, 0), (25, 100), (75, 100), (100, 0), 25, True))
+        ((0.0, 0.0), (2.2937927384, 9.17517095361), (4.79804488188, 17.5085042869), (7.47413641001, 25.0))
+        ((7.47413641001, 25.0), (31.2886200204, 91.6666666667), (68.7113799796, 91.6666666667), (92.52586359, 25.0))
+        ((92.52586359, 25.0), (95.2019551181, 17.5085042869), (97.7062072616, 9.17517095361), (100.0, 1.7763568394e-15))
+    """
+    a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
+    solutions = solveCubic(a[isHorizontal], b[isHorizontal], c[isHorizontal],
+        d[isHorizontal] - where)
+    solutions = sorted([t for t in solutions if 0 <= t < 1])
+    if not solutions:
+        return [(pt1, pt2, pt3, pt4)]
+    return _splitCubicAtT(a, b, c, d, *solutions)
+
+
+def splitQuadraticAtT(pt1, pt2, pt3, *ts):
+    """Split the quadratic curve between pt1, pt2 and pt3 at one or more
+    values of t. Return a list of curve segments.
+
+        >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5))
+        ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
+        ((50.0, 50.0), (75.0, 50.0), (100.0, 0.0))
+        >>> printSegments(splitQuadraticAtT((0, 0), (50, 100), (100, 0), 0.5, 0.75))
+        ((0.0, 0.0), (25.0, 50.0), (50.0, 50.0))
+        ((50.0, 50.0), (62.5, 50.0), (75.0, 37.5))
+        ((75.0, 37.5), (87.5, 25.0), (100.0, 0.0))
+    """
+    a, b, c = calcQuadraticParameters(pt1, pt2, pt3)
+    return _splitQuadraticAtT(a, b, c, *ts)
+
+
+def splitCubicAtT(pt1, pt2, pt3, pt4, *ts):
+    """Split the cubic curve between pt1, pt2, pt3 and pt4 at one or more
+    values of t. Return a list of curve segments.
+
+        >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5))
+        ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
+        ((50.0, 75.0), (68.75, 75.0), (87.5, 50.0), (100.0, 0.0))
+        >>> printSegments(splitCubicAtT((0, 0), (25, 100), (75, 100), (100, 0), 0.5, 0.75))
+        ((0.0, 0.0), (12.5, 50.0), (31.25, 75.0), (50.0, 75.0))
+        ((50.0, 75.0), (59.375, 75.0), (68.75, 68.75), (77.34375, 56.25))
+        ((77.34375, 56.25), (85.9375, 43.75), (93.75, 25.0), (100.0, 0.0))
+    """
+    a, b, c, d = calcCubicParameters(pt1, pt2, pt3, pt4)
+    return _splitCubicAtT(a, b, c, d, *ts)
+
+
+def _splitQuadraticAtT(a, b, c, *ts):
+    ts = list(ts)
+    segments = []
+    ts.insert(0, 0.0)
+    ts.append(1.0)
+    ax, ay = a
+    bx, by = b
+    cx, cy = c
+    for i in range(len(ts) - 1):
+        t1 = ts[i]
+        t2 = ts[i+1]
+        delta = (t2 - t1)
+        # calc new a, b and c
+        a1x = ax * delta**2
+        a1y = ay * delta**2
+        b1x = (2*ax*t1 + bx) * delta
+        b1y = (2*ay*t1 + by) * delta
+        c1x = ax*t1**2 + bx*t1 + cx
+        c1y = ay*t1**2 + by*t1 + cy
+    
+        pt1, pt2, pt3 = calcQuadraticPoints((a1x, a1y), (b1x, b1y), (c1x, c1y))
+        segments.append((pt1, pt2, pt3))
+    return segments
+
+
+def _splitCubicAtT(a, b, c, d, *ts):
+    ts = list(ts)
+    ts.insert(0, 0.0)
+    ts.append(1.0)
+    segments = []
+    ax, ay = a
+    bx, by = b
+    cx, cy = c
+    dx, dy = d
+    for i in range(len(ts) - 1):
+        t1 = ts[i]
+        t2 = ts[i+1]
+        delta = (t2 - t1)
+        # calc new a, b, c and d
+        a1x = ax * delta**3
+        a1y = ay * delta**3
+        b1x = (3*ax*t1 + bx) * delta**2
+        b1y = (3*ay*t1 + by) * delta**2
+        c1x = (2*bx*t1 + cx + 3*ax*t1**2) * delta
+        c1y = (2*by*t1 + cy + 3*ay*t1**2) * delta
+        d1x = ax*t1**3 + bx*t1**2 + cx*t1 + dx
+        d1y = ay*t1**3 + by*t1**2 + cy*t1 + dy
+        pt1, pt2, pt3, pt4 = calcCubicPoints((a1x, a1y), (b1x, b1y), (c1x, c1y), (d1x, d1y))
+        segments.append((pt1, pt2, pt3, pt4))
+    return segments
+
+
+#
+# Equation solvers.
+#
+
+from math import sqrt, acos, cos, pi
+
+
+def solveQuadratic(a, b, c,
+        sqrt=sqrt):
+    """Solve a quadratic equation where a, b and c are real.
+        a*x*x + b*x + c = 0
+    This function returns a list of roots. Note that the returned list
+    is neither guaranteed to be sorted nor to contain unique values!
+    """
+    if abs(a) < epsilon:
+        if abs(b) < epsilon:
+            # We have a non-equation; therefore, we have no valid solution
+            roots = []
+        else:
+            # We have a linear equation with 1 root.
+            roots = [-c/b]
+    else:
+        # We have a true quadratic equation.  Apply the quadratic formula to find two roots.
+        DD = b*b - 4.0*a*c
+        if DD >= 0.0:
+            rDD = sqrt(DD)
+            roots = [(-b+rDD)/2.0/a, (-b-rDD)/2.0/a]
+        else:
+            # complex roots, ignore
+            roots = []
+    return roots
+
+
+def solveCubic(a, b, c, d):
+    """Solve a cubic equation where a, b, c and d are real.
+        a*x*x*x + b*x*x + c*x + d = 0
+    This function returns a list of roots. Note that the returned list
+    is neither guaranteed to be sorted nor to contain unique values!
+    """
+    #
+    # adapted from:
+    #   CUBIC.C - Solve a cubic polynomial
+    #   public domain by Ross Cottrell
+    # found at: http://www.strangecreations.com/library/snippets/Cubic.C
+    #
+    if abs(a) < epsilon:
+        # don't just test for zero; for very small values of 'a' solveCubic()
+        # returns unreliable results, so we fall back to quad.
+        return solveQuadratic(b, c, d)
+    a = float(a)
+    a1 = b/a
+    a2 = c/a
+    a3 = d/a
+    
+    Q = (a1*a1 - 3.0*a2)/9.0
+    R = (2.0*a1*a1*a1 - 9.0*a1*a2 + 27.0*a3)/54.0
+    R2_Q3 = R*R - Q*Q*Q
+
+    if R2_Q3 < 0:
+        theta = acos(R/sqrt(Q*Q*Q))
+        rQ2 = -2.0*sqrt(Q)
+        x0 = rQ2*cos(theta/3.0) - a1/3.0
+        x1 = rQ2*cos((theta+2.0*pi)/3.0) - a1/3.0
+        x2 = rQ2*cos((theta+4.0*pi)/3.0) - a1/3.0
+        return [x0, x1, x2]
+    else:
+        if Q == 0 and R == 0:
+            x = 0
+        else:
+            x = pow(sqrt(R2_Q3)+abs(R), 1/3.0)
+            x = x + Q/x
+        if R >= 0.0:
+            x = -x
+        x = x - a1/3.0
+        return [x]
+
+
+#
+# Conversion routines for points to parameters and vice versa
+#
+
+def calcQuadraticParameters(pt1, pt2, pt3):
+    x2, y2 = pt2
+    x3, y3 = pt3
+    cx, cy = pt1
+    bx = (x2 - cx) * 2.0
+    by = (y2 - cy) * 2.0
+    ax = x3 - cx - bx
+    ay = y3 - cy - by
+    return (ax, ay), (bx, by), (cx, cy)
+
+
+def calcCubicParameters(pt1, pt2, pt3, pt4):
+    x2, y2 = pt2
+    x3, y3 = pt3
+    x4, y4 = pt4
+    dx, dy = pt1
+    cx = (x2 -dx) * 3.0
+    cy = (y2 -dy) * 3.0
+    bx = (x3 - x2) * 3.0 - cx
+    by = (y3 - y2) * 3.0 - cy
+    ax = x4 - dx - cx - bx
+    ay = y4 - dy - cy - by
+    return (ax, ay), (bx, by), (cx, cy), (dx, dy)
+
+
+def calcQuadraticPoints(a, b, c):
+    ax, ay = a
+    bx, by = b
+    cx, cy = c
+    x1 = cx
+    y1 = cy
+    x2 = (bx * 0.5) + cx
+    y2 = (by * 0.5) + cy
+    x3 = ax + bx + cx
+    y3 = ay + by + cy
+    return (x1, y1), (x2, y2), (x3, y3)
+
+
+def calcCubicPoints(a, b, c, d):
+    ax, ay = a
+    bx, by = b
+    cx, cy = c
+    dx, dy = d
+    x1 = dx
+    y1 = dy
+    x2 = (cx / 3.0) + dx
+    y2 = (cy / 3.0) + dy
+    x3 = (bx + cx) / 3.0 + x2
+    y3 = (by + cy) / 3.0 + y2
+    x4 = ax + dx + cx + bx
+    y4 = ay + dy + cy + by
+    return (x1, y1), (x2, y2), (x3, y3), (x4, y4)
+
+
+def _segmentrepr(obj):
+    """
+        >>> _segmentrepr([1, [2, 3], [], [[2, [3, 4], [0.1, 2.2]]]])
+        '(1, (2, 3), (), ((2, (3, 4), (0.1, 2.2))))'
+    """
+    try:
+        it = iter(obj)
+    except TypeError:
+        return str(obj)
+    else:
+        return "(%s)" % ", ".join([_segmentrepr(x) for x in it])
+
+
+def printSegments(segments):
+    """Helper for the doctests, displaying each segment in a list of
+    segments on a single line as a tuple.
+    """
+    for segment in segments:
+        print(_segmentrepr(segment))
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
diff --git a/Lib/fontTools/misc/eexec.py b/Lib/fontTools/misc/eexec.py
new file mode 100644
index 0000000..b7656d7
--- /dev/null
+++ b/Lib/fontTools/misc/eexec.py
@@ -0,0 +1,55 @@
+"""fontTools.misc.eexec.py -- Module implementing the eexec and 
+charstring encryption algorithm as used by PostScript Type 1 fonts.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+def _decryptChar(cipher, R):
+	cipher = byteord(cipher)
+	plain = ( (cipher ^ (R>>8)) ) & 0xFF
+	R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
+	return bytechr(plain), R
+
+def _encryptChar(plain, R):
+	plain = byteord(plain)
+	cipher = ( (plain ^ (R>>8)) ) & 0xFF
+	R = ( (cipher + R) * 52845 + 22719 ) & 0xFFFF
+	return bytechr(cipher), R
+
+
+def decrypt(cipherstring, R):
+	plainList = []
+	for cipher in cipherstring:
+		plain, R = _decryptChar(cipher, R)
+		plainList.append(plain)
+	plainstring = strjoin(plainList)
+	return plainstring, int(R)
+
+def encrypt(plainstring, R):
+	cipherList = []
+	for plain in plainstring:
+		cipher, R = _encryptChar(plain, R)
+		cipherList.append(cipher)
+	cipherstring = strjoin(cipherList)
+	return cipherstring, int(R)
+
+
+def hexString(s):
+	import binascii
+	return binascii.hexlify(s)
+
+def deHexString(h):
+	import binascii
+	h = strjoin(h.split())
+	return binascii.unhexlify(h)
+
+
+def _test():
+	testStr = "\0\0asdadads asds\265"
+	print(decrypt, decrypt(testStr, 12321))
+	print(encrypt, encrypt(testStr, 12321))
+
+
+if __name__ == "__main__":
+	_test()
diff --git a/Lib/fontTools/misc/fixedTools.py b/Lib/fontTools/misc/fixedTools.py
new file mode 100644
index 0000000..59c55dd
--- /dev/null
+++ b/Lib/fontTools/misc/fixedTools.py
@@ -0,0 +1,65 @@
+"""fontTools.misc.fixedTools.py -- tools for working with fixed numbers.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+__all__ = [
+    "fixedToFloat",
+    "floatToFixed",
+]
+
+def fixedToFloat(value, precisionBits):
+	"""Converts a fixed-point number to a float, choosing the float
+	that has the shortest decimal reprentation.  Eg. to convert a
+	fixed number in a 2.14 format, use precisionBits=14.  This is
+	pretty slow compared to a simple division.  Use sporadically.
+	
+	>>> fixedToFloat(13107, 14)
+	0.8
+	>>> fixedToFloat(0, 14)
+	0.0
+	>>> fixedToFloat(0x4000, 14)
+	1.0
+	"""
+
+	if not value: return 0.0
+
+	scale = 1 << precisionBits
+	value /= scale
+	eps = .5 / scale
+	digits = (precisionBits + 2) // 3
+	fmt = "%%.%df" % digits
+	lo = fmt % (value - eps)
+	hi = fmt % (value + eps)
+	out = []
+	length = min(len(lo), len(hi))
+	for i in range(length):
+		if lo[i] != hi[i]:
+			break;
+		out.append(lo[i])
+	outlen = len(out)
+	if outlen < length:
+		out.append(max(lo[outlen], hi[outlen]))
+	return float(strjoin(out))
+
+def floatToFixed(value, precisionBits):
+	"""Converts a float to a fixed-point number given the number of
+	precisionBits.  Ie. int(round(value * (1<<precisionBits))).
+
+	>>> floatToFixed(0.8, 14)
+	13107
+	>>> floatToFixed(1.0, 14)
+	16384
+	>>> floatToFixed(1, 14)
+	16384
+	>>> floatToFixed(0, 14)
+	0
+	"""
+
+	return int(round(value * (1<<precisionBits)))
+
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
diff --git a/Lib/fontTools/misc/homeResFile.py b/Lib/fontTools/misc/homeResFile.py
new file mode 100644
index 0000000..a2d1c8c
--- /dev/null
+++ b/Lib/fontTools/misc/homeResFile.py
@@ -0,0 +1,96 @@
+"""Mac-only module to find the home file of a resource."""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+import array
+import calldll
+import macfs, Res
+
+
+def HomeResFile(res):
+	"""Return a path to the file in which resource 'res' lives."""
+	return GetFileLocation(res.HomeResFile())
+
+
+def GetFileLocation(refNum):
+	"""Return a path to the open file identified with refNum."""
+	pb = ParamBlock(refNum)
+	return pb.getPath()
+
+#
+# Internal cruft, adapted from MoreFiles
+#
+
+_InterfaceLib = calldll.getlibrary("InterfaceLib")
+GetVRefNum = calldll.newcall(_InterfaceLib.GetVRefNum, "None", "InShort", "OutShort")
+_getInfo = calldll.newcall(_InterfaceLib.PBGetFCBInfoSync, "Short", "InLong")
+
+
+_FCBPBFormat = """
+	qLink:        l
+	qType:        h
+	ioTrap:       h
+	ioCmdAddr:    l
+	ioCompletion: l
+	ioResult:     h
+	ioNamePtr:    l
+	ioVRefNum:    h
+	ioRefNum:     h
+	filler:       h
+	ioFCBIndx:    h
+	filler1:      h
+	ioFCBFINm:    l
+	ioFCBFlags:   h
+	ioFCBStBlk:   h
+	ioFCBEOF:     l
+	ioFCBPLen:    l
+	ioFCBCrPs:    l
+	ioFCBVRefNum: h
+	ioFCBClpSiz:  l
+	ioFCBParID:   l
+"""
+
+class ParamBlock(object):
+	
+	"""Wrapper for the very low level FCBPB record."""
+	
+	def __init__(self, refNum):
+		self.__fileName = array.array("c", "\0" * 64)
+		sstruct.unpack(_FCBPBFormat, 
+				"\0" * sstruct.calcsize(_FCBPBFormat), self)
+		self.ioNamePtr = self.__fileName.buffer_info()[0]
+		self.ioRefNum = refNum
+		self.ioVRefNum = GetVRefNum(refNum)
+		self.__haveInfo = 0
+	
+	def getInfo(self):
+		if self.__haveInfo:
+			return
+		data = sstruct.pack(_FCBPBFormat, self)
+		buf = array.array("c", data)
+		ptr = buf.buffer_info()[0]
+		err = _getInfo(ptr)
+		if err:
+			raise Res.Error("can't get file info", err)
+		sstruct.unpack(_FCBPBFormat, buf.tostring(), self)
+		self.__haveInfo = 1
+	
+	def getFileName(self):
+		self.getInfo()
+		data = self.__fileName.tostring()
+		return data[1:byteord(data[0])+1]
+	
+	def getFSSpec(self):
+		self.getInfo()
+		vRefNum = self.ioVRefNum
+		parID = self.ioFCBParID
+		return macfs.FSSpec((vRefNum, parID, self.getFileName()))
+	
+	def getPath(self):
+		return self.getFSSpec().as_pathname()
+
+
+if __name__ == "__main__":
+	fond = Res.GetNamedResource("FOND", "Helvetica")
+	print(HomeResFile(fond))
diff --git a/Lib/fontTools/misc/macCreatorType.py b/Lib/fontTools/misc/macCreatorType.py
new file mode 100644
index 0000000..5f2e18a
--- /dev/null
+++ b/Lib/fontTools/misc/macCreatorType.py
@@ -0,0 +1,36 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import sys
+try:
+	import MacOS
+except ImportError:
+	MacOS = None
+from .py23 import *
+
+def _reverseString(s):
+	s = list(s)
+	s.reverse()
+	return strjoin(s)
+
+
+def getMacCreatorAndType(path):
+	if MacOS is not None:
+		fileCreator, fileType = MacOS.GetCreatorAndType(path)
+		if sys.byteorder == "little":
+			# work around bug in MacOS.GetCreatorAndType() on intel:
+			# http://bugs.python.org/issue1594
+			fileCreator = _reverseString(fileCreator)
+			fileType = _reverseString(fileType)
+		return fileCreator, fileType
+	else:
+		return None, None
+
+
+def setMacCreatorAndType(path, fileCreator, fileType):
+	if MacOS is not None:
+		if sys.byteorder == "little":
+			# work around bug in MacOS.SetCreatorAndType() on intel:
+			# http://bugs.python.org/issue1594
+			fileCreator = _reverseString(fileCreator)
+			fileType = _reverseString(fileType)
+		MacOS.SetCreatorAndType(path, fileCreator, fileType)
diff --git a/Lib/fontTools/misc/psCharStrings.py b/Lib/fontTools/misc/psCharStrings.py
new file mode 100644
index 0000000..6ffdb99
--- /dev/null
+++ b/Lib/fontTools/misc/psCharStrings.py
@@ -0,0 +1,1177 @@
+"""psCharStrings.py -- module implementing various kinds of CharStrings: 
+CFF dictionary data and Type1/Type2 CharStrings.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import struct
+
+
+DEBUG = 0
+
+
+t1OperandEncoding = [None] * 256
+t1OperandEncoding[0:32] = (32) * ["do_operator"]
+t1OperandEncoding[32:247] = (247 - 32) * ["read_byte"]
+t1OperandEncoding[247:251] = (251 - 247) * ["read_smallInt1"]
+t1OperandEncoding[251:255] = (255 - 251) * ["read_smallInt2"]
+t1OperandEncoding[255] = "read_longInt"
+assert len(t1OperandEncoding) == 256
+
+t2OperandEncoding = t1OperandEncoding[:]
+t2OperandEncoding[28] = "read_shortInt"
+t2OperandEncoding[255] = "read_fixed1616"
+
+cffDictOperandEncoding = t2OperandEncoding[:]
+cffDictOperandEncoding[29] = "read_longInt"
+cffDictOperandEncoding[30] = "read_realNumber"
+cffDictOperandEncoding[255] = "reserved"
+
+
+realNibbles = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 
+		'.', 'E', 'E-', None, '-']
+realNibblesDict = {}
+for _i in range(len(realNibbles)):
+	realNibblesDict[realNibbles[_i]] = _i
+
+
+class ByteCodeBase(object):
+	
+	def read_byte(self, b0, data, index):
+		return b0 - 139, index
+	
+	def read_smallInt1(self, b0, data, index):
+		b1 = byteord(data[index])
+		return (b0-247)*256 + b1 + 108, index+1
+	
+	def read_smallInt2(self, b0, data, index):
+		b1 = byteord(data[index])
+		return -(b0-251)*256 - b1 - 108, index+1
+	
+	def read_shortInt(self, b0, data, index):
+		value, = struct.unpack(">h", data[index:index+2])
+		return value, index+2
+	
+	def read_longInt(self, b0, data, index):
+		value, = struct.unpack(">l", data[index:index+4])
+		return value, index+4
+	
+	def read_fixed1616(self, b0, data, index):
+		value, = struct.unpack(">l", data[index:index+4])
+		return value / 65536, index+4
+	
+	def read_realNumber(self, b0, data, index):
+		number = ''
+		while True:
+			b = byteord(data[index])
+			index = index + 1
+			nibble0 = (b & 0xf0) >> 4
+			nibble1 = b & 0x0f
+			if nibble0 == 0xf:
+				break
+			number = number + realNibbles[nibble0]
+			if nibble1 == 0xf:
+				break
+			number = number + realNibbles[nibble1]
+		return float(number), index
+
+
+def buildOperatorDict(operatorList):
+	oper = {}
+	opc = {}
+	for item in operatorList:
+		if len(item) == 2:
+			oper[item[0]] = item[1]
+		else:
+			oper[item[0]] = item[1:]
+		if isinstance(item[0], tuple):
+			opc[item[1]] = item[0]
+		else:
+			opc[item[1]] = (item[0],)
+	return oper, opc
+
+
+t2Operators = [
+#	opcode     name
+	(1,        'hstem'),
+	(3,        'vstem'),
+	(4,        'vmoveto'),
+	(5,        'rlineto'),
+	(6,        'hlineto'),
+	(7,        'vlineto'),
+	(8,        'rrcurveto'),
+	(10,       'callsubr'),
+	(11,       'return'),
+	(14,       'endchar'),
+	(16,       'blend'),
+	(18,       'hstemhm'),
+	(19,       'hintmask'),
+	(20,       'cntrmask'),
+	(21,       'rmoveto'),
+	(22,       'hmoveto'),
+	(23,       'vstemhm'),
+	(24,       'rcurveline'),
+	(25,       'rlinecurve'),
+	(26,       'vvcurveto'),
+	(27,       'hhcurveto'),
+#	(28,       'shortint'),  # not really an operator
+	(29,       'callgsubr'),
+	(30,       'vhcurveto'),
+	(31,       'hvcurveto'),
+	((12, 0),  'ignore'),  # dotsection. Yes, there a few very early OTF/CFF
+	                   # fonts with this deprecated operator. Just ignore it.
+	((12, 3),  'and'),
+	((12, 4),  'or'),
+	((12, 5),  'not'),
+	((12, 8),  'store'),
+	((12, 9),  'abs'),
+	((12, 10), 'add'),
+	((12, 11), 'sub'),
+	((12, 12), 'div'),
+	((12, 13), 'load'),
+	((12, 14), 'neg'),
+	((12, 15), 'eq'),
+	((12, 18), 'drop'),
+	((12, 20), 'put'),
+	((12, 21), 'get'),
+	((12, 22), 'ifelse'),
+	((12, 23), 'random'),
+	((12, 24), 'mul'),
+	((12, 26), 'sqrt'),
+	((12, 27), 'dup'),
+	((12, 28), 'exch'),
+	((12, 29), 'index'),
+	((12, 30), 'roll'),
+	((12, 34), 'hflex'),
+	((12, 35), 'flex'),
+	((12, 36), 'hflex1'),
+	((12, 37), 'flex1'),
+]
+
+
+def getIntEncoder(format):
+	if format == "cff":
+		fourByteOp = bytechr(29)
+	elif format == "t1":
+		fourByteOp = bytechr(255)
+	else:
+		assert format == "t2"
+		fourByteOp = None
+	
+	def encodeInt(value, fourByteOp=fourByteOp, bytechr=bytechr,
+			pack=struct.pack, unpack=struct.unpack):
+		if -107 <= value <= 107:
+			code = bytechr(value + 139)
+		elif 108 <= value <= 1131:
+			value = value - 108
+			code = bytechr((value >> 8) + 247) + bytechr(value & 0xFF)
+		elif -1131 <= value <= -108:
+			value = -value - 108
+			code = bytechr((value >> 8) + 251) + bytechr(value & 0xFF)
+		elif fourByteOp is None:
+			# T2 only supports 2 byte ints
+			if -32768 <= value <= 32767:
+				code = bytechr(28) + pack(">h", value)
+			else:
+				# Backwards compatible hack: due to a previous bug in FontTools,
+				# 16.16 fixed numbers were written out as 4-byte ints. When
+				# these numbers were small, they were wrongly written back as
+				# small ints instead of 4-byte ints, breaking round-tripping.
+				# This here workaround doesn't do it any better, since we can't
+				# distinguish anymore between small ints that were supposed to
+				# be small fixed numbers and small ints that were just small
+				# ints. Hence the warning.
+				import sys
+				sys.stderr.write("Warning: 4-byte T2 number got passed to the "
+					"IntType handler. This should happen only when reading in "
+					"old XML files.\n")
+				code = bytechr(255) + pack(">l", value)
+		else:
+			code = fourByteOp + pack(">l", value)
+		return code
+	
+	return encodeInt
+
+
+encodeIntCFF = getIntEncoder("cff")
+encodeIntT1 = getIntEncoder("t1")
+encodeIntT2 = getIntEncoder("t2")
+
+def encodeFixed(f, pack=struct.pack):
+	# For T2 only
+	return b"\xff" + pack(">l", int(round(f * 65536)))
+
+def encodeFloat(f):
+	# For CFF only, used in cffLib
+	s = str(f).upper()
+	if s[:2] == "0.":
+		s = s[1:]
+	elif s[:3] == "-0.":
+		s = "-" + s[2:]
+	nibbles = []
+	while s:
+		c = s[0]
+		s = s[1:]
+		if c == "E" and s[:1] == "-":
+			s = s[1:]
+			c = "E-"
+		nibbles.append(realNibblesDict[c])
+	nibbles.append(0xf)
+	if len(nibbles) % 2:
+		nibbles.append(0xf)
+	d = bytechr(30)
+	for i in range(0, len(nibbles), 2):
+		d = d + bytechr(nibbles[i] << 4 | nibbles[i+1])
+	return d
+
+
+class CharStringCompileError(Exception): pass
+
+
+class T2CharString(ByteCodeBase):
+	
+	operandEncoding = t2OperandEncoding
+	operators, opcodes = buildOperatorDict(t2Operators)
+	
+	def __init__(self, bytecode=None, program=None, private=None, globalSubrs=None):
+		if program is None:
+			program = []
+		self.bytecode = bytecode
+		self.program = program
+		self.private = private
+		self.globalSubrs = globalSubrs if globalSubrs is not None else []
+	
+	def __repr__(self):
+		if self.bytecode is None:
+			return "<%s (source) at %x>" % (self.__class__.__name__, id(self))
+		else:
+			return "<%s (bytecode) at %x>" % (self.__class__.__name__, id(self))
+	
+	def getIntEncoder(self):
+		return encodeIntT2
+	
+	def getFixedEncoder(self):
+		return encodeFixed
+
+	def decompile(self):
+		if not self.needsDecompilation():
+			return
+		subrs = getattr(self.private, "Subrs", [])
+		decompiler = SimpleT2Decompiler(subrs, self.globalSubrs)
+		decompiler.execute(self)
+	
+	def draw(self, pen):
+		subrs = getattr(self.private, "Subrs", [])
+		extractor = T2OutlineExtractor(pen, subrs, self.globalSubrs,
+				self.private.nominalWidthX, self.private.defaultWidthX)
+		extractor.execute(self)
+		self.width = extractor.width
+	
+	def compile(self):
+		if self.bytecode is not None:
+			return
+		assert self.program, "illegal CharString: decompiled to empty program"
+		assert self.program[-1] in ("endchar", "return", "callsubr", "callgsubr",
+				"seac"), "illegal CharString"
+		bytecode = []
+		opcodes = self.opcodes
+		program = self.program
+		encodeInt = self.getIntEncoder()
+		encodeFixed = self.getFixedEncoder()
+		i = 0
+		end = len(program)
+		while i < end:
+			token = program[i]
+			i = i + 1
+			tp = type(token)
+			if issubclass(tp, basestring):
+				try:
+					bytecode.extend(bytechr(b) for b in opcodes[token])
+				except KeyError:
+					raise CharStringCompileError("illegal operator: %s" % token)
+				if token in ('hintmask', 'cntrmask'):
+					bytecode.append(program[i])  # hint mask
+					i = i + 1
+			elif tp == int:
+				bytecode.append(encodeInt(token))
+			elif tp == float:
+				bytecode.append(encodeFixed(token))
+			else:
+				assert 0, "unsupported type: %s" % tp
+		try:
+			bytecode = bytesjoin(bytecode)
+		except TypeError:
+			print(bytecode)
+			raise
+		self.setBytecode(bytecode)
+	
+	def needsDecompilation(self):
+		return self.bytecode is not None
+	
+	def setProgram(self, program):
+		self.program = program
+		self.bytecode = None
+	
+	def setBytecode(self, bytecode):
+		self.bytecode = bytecode
+		self.program = None
+	
+	def getToken(self, index, 
+			len=len, byteord=byteord, getattr=getattr, type=type, StringType=str):
+		if self.bytecode is not None:
+			if index >= len(self.bytecode):
+				return None, 0, 0
+			b0 = byteord(self.bytecode[index])
+			index = index + 1
+			code = self.operandEncoding[b0]
+			handler = getattr(self, code)
+			token, index = handler(b0, self.bytecode, index)
+		else:
+			if index >= len(self.program):
+				return None, 0, 0
+			token = self.program[index]
+			index = index + 1
+		isOperator = isinstance(token, StringType)
+		return token, isOperator, index
+	
+	def getBytes(self, index, nBytes):
+		if self.bytecode is not None:
+			newIndex = index + nBytes
+			bytes = self.bytecode[index:newIndex]
+			index = newIndex
+		else:
+			bytes = self.program[index]
+			index = index + 1
+		assert len(bytes) == nBytes
+		return bytes, index
+	
+	def do_operator(self, b0, data, index):
+		if b0 == 12:
+			op = (b0, byteord(data[index]))
+			index = index+1
+		else:
+			op = b0
+		operator = self.operators[op]
+		return operator, index
+	
+	def toXML(self, xmlWriter):
+		from fontTools.misc.textTools import num2binary
+		if self.bytecode is not None:
+			xmlWriter.dumphex(self.bytecode)
+		else:
+			index = 0
+			args = []
+			while True:
+				token, isOperator, index = self.getToken(index)
+				if token is None:
+					break
+				if isOperator:
+					args = [str(arg) for arg in args]
+					if token in ('hintmask', 'cntrmask'):
+						hintMask, isOperator, index = self.getToken(index)
+						bits = []
+						for byte in hintMask:
+							bits.append(num2binary(byteord(byte), 8))
+						hintMask = strjoin(bits)
+						line = ' '.join(args + [token, hintMask])
+					else:
+						line = ' '.join(args + [token])
+					xmlWriter.write(line)
+					xmlWriter.newline()
+					args = []
+				else:
+					args.append(token)
+	
+	def fromXML(self, name, attrs, content):
+		from fontTools.misc.textTools import binary2num, readHex
+		if attrs.get("raw"):
+			self.setBytecode(readHex(content))
+			return
+		content = strjoin(content)
+		content = content.split()
+		program = []
+		end = len(content)
+		i = 0
+		while i < end:
+			token = content[i]
+			i = i + 1
+			try:
+				token = int(token)
+			except ValueError:
+				try:
+					token = float(token)
+				except ValueError:
+					program.append(token)
+					if token in ('hintmask', 'cntrmask'):
+						mask = content[i]
+						maskBytes = b""
+						for j in range(0, len(mask), 8):
+							maskBytes = maskBytes + bytechr(binary2num(mask[j:j+8]))
+						program.append(maskBytes)
+						i = i + 1
+				else:
+					program.append(token)
+			else:
+				program.append(token)
+		self.setProgram(program)
+
+
+t1Operators = [
+#	opcode     name
+	(1,        'hstem'),
+	(3,        'vstem'),
+	(4,        'vmoveto'),
+	(5,        'rlineto'),
+	(6,        'hlineto'),
+	(7,        'vlineto'),
+	(8,        'rrcurveto'),
+	(9,        'closepath'),
+	(10,       'callsubr'),
+	(11,       'return'),
+	(13,       'hsbw'),
+	(14,       'endchar'),
+	(21,       'rmoveto'),
+	(22,       'hmoveto'),
+	(30,       'vhcurveto'),
+	(31,       'hvcurveto'),
+	((12, 0),  'dotsection'),
+	((12, 1),  'vstem3'),
+	((12, 2),  'hstem3'),
+	((12, 6),  'seac'),
+	((12, 7),  'sbw'),
+	((12, 12), 'div'),
+	((12, 16), 'callothersubr'),
+	((12, 17), 'pop'),
+	((12, 33), 'setcurrentpoint'),
+]
+
+class T1CharString(T2CharString):
+	
+	operandEncoding = t1OperandEncoding
+	operators, opcodes = buildOperatorDict(t1Operators)
+	
+	def __init__(self, bytecode=None, program=None, subrs=None):
+		if program is None:
+			program = []
+		self.bytecode = bytecode
+		self.program = program
+		self.subrs = subrs
+
+	def getIntEncoder(self):
+		return encodeIntT1
+
+	def getFixedEncoder(self):
+		def encodeFixed(value):
+			raise TypeError("Type 1 charstrings don't support floating point operands")
+
+	def decompile(self):
+		if self.bytecode is None:
+			return
+		program = []
+		index = 0
+		while True:
+			token, isOperator, index = self.getToken(index)
+			if token is None:
+				break
+			program.append(token)
+		self.setProgram(program)
+
+	def draw(self, pen):
+		extractor = T1OutlineExtractor(pen, self.subrs)
+		extractor.execute(self)
+		self.width = extractor.width
+
+
+class SimpleT2Decompiler(object):
+	
+	def __init__(self, localSubrs, globalSubrs):
+		self.localSubrs = localSubrs
+		self.localBias = calcSubrBias(localSubrs)
+		self.globalSubrs = globalSubrs
+		self.globalBias = calcSubrBias(globalSubrs)
+		self.reset()
+	
+	def reset(self):
+		self.callingStack = []
+		self.operandStack = []
+		self.hintCount = 0
+		self.hintMaskBytes = 0
+	
+	def execute(self, charString):
+		self.callingStack.append(charString)
+		needsDecompilation = charString.needsDecompilation()
+		if needsDecompilation:
+			program = []
+			pushToProgram = program.append
+		else:
+			pushToProgram = lambda x: None
+		pushToStack = self.operandStack.append
+		index = 0
+		while True:
+			token, isOperator, index = charString.getToken(index)
+			if token is None:
+				break  # we're done!
+			pushToProgram(token)
+			if isOperator:
+				handlerName = "op_" + token
+				if hasattr(self, handlerName):
+					handler = getattr(self, handlerName)
+					rv = handler(index)
+					if rv:
+						hintMaskBytes, index = rv
+						pushToProgram(hintMaskBytes)
+				else:
+					self.popall()
+			else:
+				pushToStack(token)
+		if needsDecompilation:
+			assert program, "illegal CharString: decompiled to empty program"
+			assert program[-1] in ("endchar", "return", "callsubr", "callgsubr",
+					"seac"), "illegal CharString"
+			charString.setProgram(program)
+		del self.callingStack[-1]
+	
+	def pop(self):
+		value = self.operandStack[-1]
+		del self.operandStack[-1]
+		return value
+	
+	def popall(self):
+		stack = self.operandStack[:]
+		self.operandStack[:] = []
+		return stack
+	
+	def push(self, value):
+		self.operandStack.append(value)
+	
+	def op_return(self, index):
+		if self.operandStack:
+			pass
+	
+	def op_endchar(self, index):
+		pass
+
+	def op_ignore(self, index):
+		pass
+
+	def op_callsubr(self, index):
+		subrIndex = self.pop()
+		subr = self.localSubrs[subrIndex+self.localBias]
+		self.execute(subr)
+	
+	def op_callgsubr(self, index):
+		subrIndex = self.pop()
+		subr = self.globalSubrs[subrIndex+self.globalBias]
+		self.execute(subr)
+	
+	def op_hstem(self, index):
+		self.countHints()
+	def op_vstem(self, index):
+		self.countHints()
+	def op_hstemhm(self, index):
+		self.countHints()
+	def op_vstemhm(self, index):
+		self.countHints()
+	
+	def op_hintmask(self, index):
+		if not self.hintMaskBytes:
+			self.countHints()
+			self.hintMaskBytes = (self.hintCount + 7) // 8
+		hintMaskBytes, index = self.callingStack[-1].getBytes(index, self.hintMaskBytes)
+		return hintMaskBytes, index
+	
+	op_cntrmask = op_hintmask
+	
+	def countHints(self):
+		args = self.popall()
+		self.hintCount = self.hintCount + len(args) // 2
+
+	# misc
+	def op_and(self, index):
+		raise NotImplementedError
+	def op_or(self, index):
+		raise NotImplementedError
+	def op_not(self, index):
+		raise NotImplementedError
+	def op_store(self, index):
+		raise NotImplementedError
+	def op_abs(self, index):
+		raise NotImplementedError
+	def op_add(self, index):
+		raise NotImplementedError
+	def op_sub(self, index):
+		raise NotImplementedError
+	def op_div(self, index):
+		raise NotImplementedError
+	def op_load(self, index):
+		raise NotImplementedError
+	def op_neg(self, index):
+		raise NotImplementedError
+	def op_eq(self, index):
+		raise NotImplementedError
+	def op_drop(self, index):
+		raise NotImplementedError
+	def op_put(self, index):
+		raise NotImplementedError
+	def op_get(self, index):
+		raise NotImplementedError
+	def op_ifelse(self, index):
+		raise NotImplementedError
+	def op_random(self, index):
+		raise NotImplementedError
+	def op_mul(self, index):
+		raise NotImplementedError
+	def op_sqrt(self, index):
+		raise NotImplementedError
+	def op_dup(self, index):
+		raise NotImplementedError
+	def op_exch(self, index):
+		raise NotImplementedError
+	def op_index(self, index):
+		raise NotImplementedError
+	def op_roll(self, index):
+		raise NotImplementedError
+
+class T2OutlineExtractor(SimpleT2Decompiler):
+	
+	def __init__(self, pen, localSubrs, globalSubrs, nominalWidthX, defaultWidthX):
+		SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs)
+		self.pen = pen
+		self.nominalWidthX = nominalWidthX
+		self.defaultWidthX = defaultWidthX
+	
+	def reset(self):
+		SimpleT2Decompiler.reset(self)
+		self.hints = []
+		self.gotWidth = 0
+		self.width = 0
+		self.currentPoint = (0, 0)
+		self.sawMoveTo = 0
+	
+	def _nextPoint(self, point):
+		x, y = self.currentPoint
+		point = x + point[0], y + point[1]
+		self.currentPoint = point
+		return point
+	
+	def rMoveTo(self, point):
+		self.pen.moveTo(self._nextPoint(point))
+		self.sawMoveTo = 1
+
+	def rLineTo(self, point):
+		if not self.sawMoveTo:
+			self.rMoveTo((0, 0))
+		self.pen.lineTo(self._nextPoint(point))
+
+	def rCurveTo(self, pt1, pt2, pt3):
+		if not self.sawMoveTo:
+			self.rMoveTo((0, 0))
+		nextPoint = self._nextPoint
+		self.pen.curveTo(nextPoint(pt1), nextPoint(pt2), nextPoint(pt3))
+	
+	def closePath(self):
+		if self.sawMoveTo:
+			self.pen.closePath()
+		self.sawMoveTo = 0
+	
+	def endPath(self):
+		# In T2 there are no open paths, so always do a closePath when
+		# finishing a sub path.
+		self.closePath()
+
+	def popallWidth(self, evenOdd=0):
+		args = self.popall()
+		if not self.gotWidth:
+			if evenOdd ^ (len(args) % 2):
+				self.width = self.nominalWidthX + args[0]
+				args = args[1:]
+			else:
+				self.width = self.defaultWidthX
+			self.gotWidth = 1
+		return args
+	
+	def countHints(self):
+		args = self.popallWidth()
+		self.hintCount = self.hintCount + len(args) // 2
+	
+	#
+	# hint operators
+	#
+	#def op_hstem(self, index):
+	#	self.countHints()
+	#def op_vstem(self, index):
+	#	self.countHints()
+	#def op_hstemhm(self, index):
+	#	self.countHints()
+	#def op_vstemhm(self, index):
+	#	self.countHints()
+	#def op_hintmask(self, index):
+	#	self.countHints()
+	#def op_cntrmask(self, index):
+	#	self.countHints()
+	
+	#
+	# path constructors, moveto
+	#
+	def op_rmoveto(self, index):
+		self.endPath()
+		self.rMoveTo(self.popallWidth())
+	def op_hmoveto(self, index):
+		self.endPath()
+		self.rMoveTo((self.popallWidth(1)[0], 0))
+	def op_vmoveto(self, index):
+		self.endPath()
+		self.rMoveTo((0, self.popallWidth(1)[0]))
+	def op_endchar(self, index):
+		self.endPath()
+		args = self.popallWidth()
+		if args:
+			from fontTools.encodings.StandardEncoding import StandardEncoding
+			# endchar can do seac accent bulding; The T2 spec says it's deprecated,
+			# but recent software that shall remain nameless does output it.
+			adx, ady, bchar, achar = args
+			baseGlyph = StandardEncoding[bchar]
+			self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
+			accentGlyph = StandardEncoding[achar]
+			self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
+	
+	#
+	# path constructors, lines
+	#
+	def op_rlineto(self, index):
+		args = self.popall()
+		for i in range(0, len(args), 2):
+			point = args[i:i+2]
+			self.rLineTo(point)
+	
+	def op_hlineto(self, index):
+		self.alternatingLineto(1)
+	def op_vlineto(self, index):
+		self.alternatingLineto(0)
+	
+	#
+	# path constructors, curves
+	#
+	def op_rrcurveto(self, index):
+		"""{dxa dya dxb dyb dxc dyc}+ rrcurveto"""
+		args = self.popall()
+		for i in range(0, len(args), 6):
+			dxa, dya, dxb, dyb, dxc, dyc, = args[i:i+6]
+			self.rCurveTo((dxa, dya), (dxb, dyb), (dxc, dyc))
+	
+	def op_rcurveline(self, index):
+		"""{dxa dya dxb dyb dxc dyc}+ dxd dyd rcurveline"""
+		args = self.popall()
+		for i in range(0, len(args)-2, 6):
+			dxb, dyb, dxc, dyc, dxd, dyd = args[i:i+6]
+			self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
+		self.rLineTo(args[-2:])
+	
+	def op_rlinecurve(self, index):
+		"""{dxa dya}+ dxb dyb dxc dyc dxd dyd rlinecurve"""
+		args = self.popall()
+		lineArgs = args[:-6]
+		for i in range(0, len(lineArgs), 2):
+			self.rLineTo(lineArgs[i:i+2])
+		dxb, dyb, dxc, dyc, dxd, dyd = args[-6:]
+		self.rCurveTo((dxb, dyb), (dxc, dyc), (dxd, dyd))
+	
+	def op_vvcurveto(self, index):
+		"dx1? {dya dxb dyb dyc}+ vvcurveto"
+		args = self.popall()
+		if len(args) % 2:
+			dx1 = args[0]
+			args = args[1:]
+		else:
+			dx1 = 0
+		for i in range(0, len(args), 4):
+			dya, dxb, dyb, dyc = args[i:i+4]
+			self.rCurveTo((dx1, dya), (dxb, dyb), (0, dyc))
+			dx1 = 0
+	
+	def op_hhcurveto(self, index):
+		"""dy1? {dxa dxb dyb dxc}+ hhcurveto"""
+		args = self.popall()
+		if len(args) % 2:
+			dy1 = args[0]
+			args = args[1:]
+		else:
+			dy1 = 0
+		for i in range(0, len(args), 4):
+			dxa, dxb, dyb, dxc = args[i:i+4]
+			self.rCurveTo((dxa, dy1), (dxb, dyb), (dxc, 0))
+			dy1 = 0
+	
+	def op_vhcurveto(self, index):
+		"""dy1 dx2 dy2 dx3 {dxa dxb dyb dyc dyd dxe dye dxf}* dyf? vhcurveto (30)
+		{dya dxb dyb dxc dxd dxe dye dyf}+ dxf? vhcurveto
+		"""
+		args = self.popall()
+		while args:
+			args = self.vcurveto(args)
+			if args:
+				args = self.hcurveto(args)
+	
+	def op_hvcurveto(self, index):
+		"""dx1 dx2 dy2 dy3 {dya dxb dyb dxc dxd dxe dye dyf}* dxf?
+		{dxa dxb dyb dyc dyd dxe dye dxf}+ dyf?
+		"""
+		args = self.popall()
+		while args:
+			args = self.hcurveto(args)
+			if args:
+				args = self.vcurveto(args)
+	
+	#
+	# path constructors, flex
+	#
+	def op_hflex(self, index):
+		dx1, dx2, dy2, dx3, dx4, dx5, dx6 = self.popall()
+		dy1 = dy3 = dy4 = dy6 = 0
+		dy5 = -dy2
+		self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+		self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+	def op_flex(self, index):
+		dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, dx6, dy6, fd = self.popall()
+		self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+		self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+	def op_hflex1(self, index):
+		dx1, dy1, dx2, dy2, dx3, dx4, dx5, dy5, dx6 = self.popall()
+		dy3 = dy4 = 0
+		dy6 = -(dy1 + dy2 + dy3 + dy4 + dy5)
+
+		self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+		self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+	def op_flex1(self, index):
+		dx1, dy1, dx2, dy2, dx3, dy3, dx4, dy4, dx5, dy5, d6 = self.popall()
+		dx = dx1 + dx2 + dx3 + dx4 + dx5
+		dy = dy1 + dy2 + dy3 + dy4 + dy5
+		if abs(dx) > abs(dy):
+			dx6 = d6
+			dy6 = -dy
+		else:
+			dx6 = -dx
+			dy6 = d6
+		self.rCurveTo((dx1, dy1), (dx2, dy2), (dx3, dy3))
+		self.rCurveTo((dx4, dy4), (dx5, dy5), (dx6, dy6))
+	
+	#
+	# MultipleMaster. Well...
+	#
+	def op_blend(self, index):
+		self.popall()
+	
+	# misc
+	def op_and(self, index):
+		raise NotImplementedError
+	def op_or(self, index):
+		raise NotImplementedError
+	def op_not(self, index):
+		raise NotImplementedError
+	def op_store(self, index):
+		raise NotImplementedError
+	def op_abs(self, index):
+		raise NotImplementedError
+	def op_add(self, index):
+		raise NotImplementedError
+	def op_sub(self, index):
+		raise NotImplementedError
+	def op_div(self, index):
+		num2 = self.pop()
+		num1 = self.pop()
+		d1 = num1//num2
+		d2 = num1/num2
+		if d1 == d2:
+			self.push(d1)
+		else:
+			self.push(d2)
+	def op_load(self, index):
+		raise NotImplementedError
+	def op_neg(self, index):
+		raise NotImplementedError
+	def op_eq(self, index):
+		raise NotImplementedError
+	def op_drop(self, index):
+		raise NotImplementedError
+	def op_put(self, index):
+		raise NotImplementedError
+	def op_get(self, index):
+		raise NotImplementedError
+	def op_ifelse(self, index):
+		raise NotImplementedError
+	def op_random(self, index):
+		raise NotImplementedError
+	def op_mul(self, index):
+		raise NotImplementedError
+	def op_sqrt(self, index):
+		raise NotImplementedError
+	def op_dup(self, index):
+		raise NotImplementedError
+	def op_exch(self, index):
+		raise NotImplementedError
+	def op_index(self, index):
+		raise NotImplementedError
+	def op_roll(self, index):
+		raise NotImplementedError
+	
+	#
+	# miscellaneous helpers
+	#
+	def alternatingLineto(self, isHorizontal):
+		args = self.popall()
+		for arg in args:
+			if isHorizontal:
+				point = (arg, 0)
+			else:
+				point = (0, arg)
+			self.rLineTo(point)
+			isHorizontal = not isHorizontal
+	
+	def vcurveto(self, args):
+		dya, dxb, dyb, dxc = args[:4]
+		args = args[4:]
+		if len(args) == 1:
+			dyc = args[0]
+			args = []
+		else:
+			dyc = 0
+		self.rCurveTo((0, dya), (dxb, dyb), (dxc, dyc))
+		return args
+	
+	def hcurveto(self, args):
+		dxa, dxb, dyb, dyc = args[:4]
+		args = args[4:]
+		if len(args) == 1:
+			dxc = args[0]
+			args = []
+		else:
+			dxc = 0
+		self.rCurveTo((dxa, 0), (dxb, dyb), (dxc, dyc))
+		return args
+
+
+class T1OutlineExtractor(T2OutlineExtractor):
+	
+	def __init__(self, pen, subrs):
+		self.pen = pen
+		self.subrs = subrs
+		self.reset()
+	
+	def reset(self):
+		self.flexing = 0
+		self.width = 0
+		self.sbx = 0
+		T2OutlineExtractor.reset(self)
+	
+	def endPath(self):
+		if self.sawMoveTo:
+			self.pen.endPath()
+		self.sawMoveTo = 0
+
+	def popallWidth(self, evenOdd=0):
+		return self.popall()
+	
+	def exch(self):
+		stack = self.operandStack
+		stack[-1], stack[-2] = stack[-2], stack[-1]
+	
+	#
+	# path constructors
+	#
+	def op_rmoveto(self, index):
+		if self.flexing:
+			return
+		self.endPath()
+		self.rMoveTo(self.popall())
+	def op_hmoveto(self, index):
+		if self.flexing:
+			# We must add a parameter to the stack if we are flexing
+			self.push(0)
+			return
+		self.endPath()
+		self.rMoveTo((self.popall()[0], 0))
+	def op_vmoveto(self, index):
+		if self.flexing:
+			# We must add a parameter to the stack if we are flexing
+			self.push(0)
+			self.exch()
+			return
+		self.endPath()
+		self.rMoveTo((0, self.popall()[0]))
+	def op_closepath(self, index):
+		self.closePath()
+	def op_setcurrentpoint(self, index):
+		args = self.popall()
+		x, y = args
+		self.currentPoint = x, y
+	
+	def op_endchar(self, index):
+		self.endPath()
+	
+	def op_hsbw(self, index):
+		sbx, wx = self.popall()
+		self.width = wx
+		self.sbx = sbx
+		self.currentPoint = sbx, self.currentPoint[1]
+	def op_sbw(self, index):
+		self.popall()  # XXX
+	
+	#
+	def op_callsubr(self, index):
+		subrIndex = self.pop()
+		subr = self.subrs[subrIndex]
+		self.execute(subr)
+	def op_callothersubr(self, index):
+		subrIndex = self.pop()
+		nArgs = self.pop()
+		#print nArgs, subrIndex, "callothersubr"
+		if subrIndex == 0 and nArgs == 3:
+			self.doFlex()
+			self.flexing = 0
+		elif subrIndex == 1 and nArgs == 0:
+			self.flexing = 1
+		# ignore...
+	def op_pop(self, index):
+		pass  # ignore...
+	
+	def doFlex(self):
+		finaly = self.pop()
+		finalx = self.pop()
+		self.pop()	# flex height is unused
+		
+		p3y = self.pop()
+		p3x = self.pop()
+		bcp4y = self.pop()
+		bcp4x = self.pop()
+		bcp3y = self.pop()
+		bcp3x = self.pop()
+		p2y = self.pop()
+		p2x = self.pop()
+		bcp2y = self.pop()
+		bcp2x = self.pop()
+		bcp1y = self.pop()
+		bcp1x = self.pop()
+		rpy = self.pop()
+		rpx = self.pop()
+		
+		# call rrcurveto
+		self.push(bcp1x+rpx)
+		self.push(bcp1y+rpy)
+		self.push(bcp2x)
+		self.push(bcp2y)
+		self.push(p2x)
+		self.push(p2y)
+		self.op_rrcurveto(None)
+		
+		# call rrcurveto
+		self.push(bcp3x)
+		self.push(bcp3y)
+		self.push(bcp4x)
+		self.push(bcp4y)
+		self.push(p3x)
+		self.push(p3y)
+		self.op_rrcurveto(None)
+		
+		# Push back final coords so subr 0 can find them
+		self.push(finalx)
+		self.push(finaly)
+	
+	def op_dotsection(self, index):
+		self.popall()  # XXX
+	def op_hstem3(self, index):
+		self.popall()  # XXX
+	def op_seac(self, index):
+		"asb adx ady bchar achar seac"
+		from fontTools.encodings.StandardEncoding import StandardEncoding
+		asb, adx, ady, bchar, achar = self.popall()
+		baseGlyph = StandardEncoding[bchar]
+		self.pen.addComponent(baseGlyph, (1, 0, 0, 1, 0, 0))
+		accentGlyph = StandardEncoding[achar]
+		adx = adx + self.sbx - asb  # seac weirdness
+		self.pen.addComponent(accentGlyph, (1, 0, 0, 1, adx, ady))
+	def op_vstem3(self, index):
+		self.popall()  # XXX
+
+
+class DictDecompiler(ByteCodeBase):
+	
+	operandEncoding = cffDictOperandEncoding
+	
+	def __init__(self, strings):
+		self.stack = []
+		self.strings = strings
+		self.dict = {}
+	
+	def getDict(self):
+		assert len(self.stack) == 0, "non-empty stack"
+		return self.dict
+	
+	def decompile(self, data):
+		index = 0
+		lenData = len(data)
+		push = self.stack.append
+		while index < lenData:
+			b0 = byteord(data[index])
+			index = index + 1
+			code = self.operandEncoding[b0]
+			handler = getattr(self, code)
+			value, index = handler(b0, data, index)
+			if value is not None:
+				push(value)
+	
+	def pop(self):
+		value = self.stack[-1]
+		del self.stack[-1]
+		return value
+	
+	def popall(self):
+		args = self.stack[:]
+		del self.stack[:]
+		return args
+	
+	def do_operator(self, b0, data, index):
+		if b0 == 12:
+			op = (b0, byteord(data[index]))
+			index = index+1
+		else:
+			op = b0
+		operator, argType = self.operators[op]
+		self.handle_operator(operator, argType)
+		return None, index
+	
+	def handle_operator(self, operator, argType):
+		if isinstance(argType, type(())):
+			value = ()
+			for i in range(len(argType)-1, -1, -1):
+				arg = argType[i]
+				arghandler = getattr(self, "arg_" + arg)
+				value = (arghandler(operator),) + value
+		else:
+			arghandler = getattr(self, "arg_" + argType)
+			value = arghandler(operator)
+		self.dict[operator] = value
+	
+	def arg_number(self, name):
+		return self.pop()
+	def arg_SID(self, name):
+		return self.strings[self.pop()]
+	def arg_array(self, name):
+		return self.popall()
+	def arg_delta(self, name):
+		out = []
+		current = 0
+		for v in self.popall():
+			current = current + v
+			out.append(current)
+		return out
+
+
+def calcSubrBias(subrs):
+	nSubrs = len(subrs)
+	if nSubrs < 1240:
+		bias = 107
+	elif nSubrs < 33900:
+		bias = 1131
+	else:
+		bias = 32768
+	return bias
diff --git a/Lib/fontTools/misc/psLib.py b/Lib/fontTools/misc/psLib.py
new file mode 100644
index 0000000..90faa90
--- /dev/null
+++ b/Lib/fontTools/misc/psLib.py
@@ -0,0 +1,351 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import eexec
+from .psOperators import *
+import re
+import collections
+from string import whitespace
+
+
+ps_special = '()<>[]{}%'	# / is one too, but we take care of that one differently
+
+skipwhiteRE = re.compile("[%s]*" % whitespace)
+endofthingPat = "[^][(){}<>/%%%s]*" % whitespace
+endofthingRE = re.compile(endofthingPat)
+commentRE = re.compile("%[^\n\r]*")
+
+# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
+stringPat = r"""
+	\(
+		(
+			(
+				[^()]*   \   [()]
+			)
+			|
+			(
+				[^()]*  \(   [^()]*  \)
+			)
+		)*
+		[^()]*
+	\)
+"""
+stringPat = "".join(stringPat.split())
+stringRE = re.compile(stringPat)
+
+hexstringRE = re.compile("<[%s0-9A-Fa-f]*>" % whitespace)
+
+class PSTokenError(Exception): pass
+class PSError(Exception): pass
+
+
+class PSTokenizer(StringIO):
+	
+	def getnexttoken(self,
+			# localize some stuff, for performance
+			len=len,
+			ps_special=ps_special,
+			stringmatch=stringRE.match,
+			hexstringmatch=hexstringRE.match,
+			commentmatch=commentRE.match,
+			endmatch=endofthingRE.match, 
+			whitematch=skipwhiteRE.match):
+		
+		_, nextpos = whitematch(self.buf, self.pos).span()
+		self.pos = nextpos
+		if self.pos >= self.len:
+			return None, None
+		pos = self.pos
+		buf = self.buf
+		char = buf[pos]
+		if char in ps_special:
+			if char in '{}[]':
+				tokentype = 'do_special'
+				token = char
+			elif char == '%':
+				tokentype = 'do_comment'
+				_, nextpos = commentmatch(buf, pos).span()
+				token = buf[pos:nextpos]
+			elif char == '(':
+				tokentype = 'do_string'
+				m = stringmatch(buf, pos)
+				if m is None:
+					raise PSTokenError('bad string at character %d' % pos)
+				_, nextpos = m.span()
+				token = buf[pos:nextpos]
+			elif char == '<':
+				tokentype = 'do_hexstring'
+				m = hexstringmatch(buf, pos)
+				if m is None:
+					raise PSTokenError('bad hexstring at character %d' % pos)
+				_, nextpos = m.span()
+				token = buf[pos:nextpos]
+			else:
+				raise PSTokenError('bad token at character %d' % pos)
+		else:
+			if char == '/':
+				tokentype = 'do_literal'
+				m = endmatch(buf, pos+1)
+			else:
+				tokentype = ''
+				m = endmatch(buf, pos)
+			if m is None:
+				raise PSTokenError('bad token at character %d' % pos)
+			_, nextpos = m.span()
+			token = buf[pos:nextpos]
+		self.pos = pos + len(token)
+		return tokentype, token
+	
+	def skipwhite(self, whitematch=skipwhiteRE.match):
+		_, nextpos = whitematch(self.buf, self.pos).span()
+		self.pos = nextpos
+	
+	def starteexec(self):
+		self.pos = self.pos + 1
+		#self.skipwhite()
+		self.dirtybuf = self.buf[self.pos:]
+		self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
+		self.len = len(self.buf)
+		self.pos = 4
+	
+	def stopeexec(self):
+		if not hasattr(self, 'dirtybuf'):
+			return
+		self.buf = self.dirtybuf
+		del self.dirtybuf
+	
+	def flush(self):
+		if self.buflist:
+			self.buf = self.buf + "".join(self.buflist)
+			self.buflist = []
+
+
+class PSInterpreter(PSOperators):
+	
+	def __init__(self):
+		systemdict = {}
+		userdict = {}
+		self.dictstack = [systemdict, userdict]
+		self.stack = []
+		self.proclevel = 0
+		self.procmark = ps_procmark()
+		self.fillsystemdict()
+	
+	def fillsystemdict(self):
+		systemdict = self.dictstack[0]
+		systemdict['['] = systemdict['mark'] = self.mark = ps_mark()
+		systemdict[']'] = ps_operator(']', self.do_makearray)
+		systemdict['true'] = ps_boolean(1)
+		systemdict['false'] = ps_boolean(0)
+		systemdict['StandardEncoding'] = ps_array(ps_StandardEncoding)
+		systemdict['FontDirectory'] = ps_dict({})
+		self.suckoperators(systemdict, self.__class__)
+	
+	def suckoperators(self, systemdict, klass):
+		for name in dir(klass):
+			attr = getattr(self, name)
+			if isinstance(attr, collections.Callable) and name[:3] == 'ps_':
+				name = name[3:]
+				systemdict[name] = ps_operator(name, attr)
+		for baseclass in klass.__bases__:
+			self.suckoperators(systemdict, baseclass)
+	
+	def interpret(self, data, getattr = getattr):
+		tokenizer = self.tokenizer = PSTokenizer(data)
+		getnexttoken = tokenizer.getnexttoken
+		do_token = self.do_token
+		handle_object = self.handle_object
+		try:
+			while 1:
+				tokentype, token = getnexttoken()
+				#print token
+				if not token:
+					break
+				if tokentype:
+					handler = getattr(self, tokentype)
+					object = handler(token)
+				else:
+					object = do_token(token)
+				if object is not None:
+					handle_object(object)
+			tokenizer.close()
+			self.tokenizer = None
+		finally:
+			if self.tokenizer is not None:
+				if 0:
+					print('ps error:\n- - - - - - -')
+					print(self.tokenizer.buf[self.tokenizer.pos-50:self.tokenizer.pos])
+					print('>>>')
+					print(self.tokenizer.buf[self.tokenizer.pos:self.tokenizer.pos+50])
+					print('- - - - - - -')
+	
+	def handle_object(self, object):
+		if not (self.proclevel or object.literal or object.type == 'proceduretype'):
+			if object.type != 'operatortype':
+				object = self.resolve_name(object.value)
+			if object.literal:
+				self.push(object)
+			else:
+				if object.type == 'proceduretype':
+					self.call_procedure(object)
+				else:
+					object.function()
+		else:
+			self.push(object)
+	
+	def call_procedure(self, proc):
+		handle_object = self.handle_object
+		for item in proc.value:
+			handle_object(item)
+	
+	def resolve_name(self, name):
+		dictstack = self.dictstack
+		for i in range(len(dictstack)-1, -1, -1):
+			if name in dictstack[i]:
+				return dictstack[i][name]
+		raise PSError('name error: ' + str(name))
+	
+	def do_token(self, token,
+				int=int, 
+				float=float,
+				ps_name=ps_name,
+				ps_integer=ps_integer,
+				ps_real=ps_real):
+		try:
+			num = int(token)
+		except (ValueError, OverflowError):
+			try:
+				num = float(token)
+			except (ValueError, OverflowError):
+				if '#' in token:
+					hashpos = token.find('#')
+					try:
+						base = int(token[:hashpos])
+						num = int(token[hashpos+1:], base)
+					except (ValueError, OverflowError):
+						return ps_name(token)
+					else:
+						return ps_integer(num)
+				else:
+					return ps_name(token)
+			else:
+				return ps_real(num)
+		else:
+			return ps_integer(num)
+	
+	def do_comment(self, token):
+		pass
+	
+	def do_literal(self, token):
+		return ps_literal(token[1:])
+	
+	def do_string(self, token):
+		return ps_string(token[1:-1])
+	
+	def do_hexstring(self, token):
+		hexStr = "".join(token[1:-1].split())
+		if len(hexStr) % 2:
+			hexStr = hexStr + '0'
+		cleanstr = []
+		for i in range(0, len(hexStr), 2):
+			cleanstr.append(chr(int(hexStr[i:i+2], 16)))
+		cleanstr = "".join(cleanstr)
+		return ps_string(cleanstr)
+	
+	def do_special(self, token):
+		if token == '{':
+			self.proclevel = self.proclevel + 1
+			return self.procmark
+		elif token == '}':
+			proc = []
+			while 1:
+				topobject = self.pop()
+				if topobject == self.procmark:
+					break
+				proc.append(topobject)
+			self.proclevel = self.proclevel - 1
+			proc.reverse()
+			return ps_procedure(proc)
+		elif token == '[':
+			return self.mark
+		elif token == ']':
+			return ps_name(']')
+		else:
+			raise PSTokenError('huh?')
+	
+	def push(self, object):
+		self.stack.append(object)
+	
+	def pop(self, *types):
+		stack = self.stack
+		if not stack:
+			raise PSError('stack underflow')
+		object = stack[-1]
+		if types:
+			if object.type not in types:
+				raise PSError('typecheck, expected %s, found %s' % (repr(types), object.type))
+		del stack[-1]
+		return object
+	
+	def do_makearray(self):
+		array = []
+		while 1:
+			topobject = self.pop()
+			if topobject == self.mark:
+				break
+			array.append(topobject)
+		array.reverse()
+		self.push(ps_array(array))
+	
+	def close(self):
+		"""Remove circular references."""
+		del self.stack
+		del self.dictstack
+
+
+def unpack_item(item):
+	tp = type(item.value)
+	if tp == dict:
+		newitem = {}
+		for key, value in item.value.items():
+			newitem[key] = unpack_item(value)
+	elif tp == list:
+		newitem = [None] * len(item.value)
+		for i in range(len(item.value)):
+			newitem[i] = unpack_item(item.value[i])
+		if item.type == 'proceduretype':
+			newitem = tuple(newitem)
+	else:
+		newitem = item.value
+	return newitem
+
+def suckfont(data):
+	import re
+	m = re.search(br"/FontName\s+/([^ \t\n\r]+)\s+def", data)
+	if m:
+		fontName = m.group(1)
+	else:
+		fontName = None
+	interpreter = PSInterpreter()
+	interpreter.interpret(b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop")
+	interpreter.interpret(data)
+	fontdir = interpreter.dictstack[0]['FontDirectory'].value
+	if fontName in fontdir:
+		rawfont = fontdir[fontName]
+	else:
+		# fall back, in case fontName wasn't found
+		fontNames = list(fontdir.keys())
+		if len(fontNames) > 1:
+			fontNames.remove("Helvetica")
+		fontNames.sort()
+		rawfont = fontdir[fontNames[0]]
+	interpreter.close()
+	return unpack_item(rawfont)
+
+
+if __name__ == "__main__":
+	import EasyDialogs
+	path = EasyDialogs.AskFileForOpen()
+	if path:
+		from fontTools import t1Lib
+		data, kind = t1Lib.read(path)
+		font = suckfont(data)
diff --git a/Lib/fontTools/misc/psOperators.py b/Lib/fontTools/misc/psOperators.py
new file mode 100644
index 0000000..57cfbe8
--- /dev/null
+++ b/Lib/fontTools/misc/psOperators.py
@@ -0,0 +1,541 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
+
+
+class ps_object:
+	
+	literal = 1
+	access = 0
+	value = None
+	
+	def __init__(self, value):
+		self.value = value
+		self.type = self.__class__.__name__[3:] + "type"
+	
+	def __repr__(self):
+		return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
+
+
+class ps_operator(ps_object):
+	
+	literal = 0
+	
+	def __init__(self, name, function):
+		self.name = name
+		self.function = function
+		self.type = self.__class__.__name__[3:] + "type"
+	def __repr__(self):
+		return "<operator %s>" % self.name
+
+class ps_procedure(ps_object):
+	literal = 0
+	def __repr__(self):
+		return "<procedure>"
+	def __str__(self):
+		psstring = '{'
+		for i in range(len(self.value)):
+			if i:
+				psstring = psstring + ' ' + str(self.value[i])
+			else:
+				psstring = psstring + str(self.value[i])
+		return psstring + '}'
+
+class ps_name(ps_object):
+	literal = 0
+	def __str__(self):
+		if self.literal:
+			return '/' + self.value
+		else:
+			return self.value
+
+class ps_literal(ps_object):
+	def __str__(self):
+		return '/' + self.value
+
+class ps_array(ps_object):
+	def __str__(self):
+		psstring = '['
+		for i in range(len(self.value)):
+			item = self.value[i]
+			access = _accessstrings[item.access]
+			if access:
+				access = ' ' + access
+			if i:
+				psstring = psstring + ' ' + str(item) + access
+			else:
+				psstring = psstring + str(item) + access
+		return psstring + ']'
+	def __repr__(self):
+		return "<array>"
+
+_type1_pre_eexec_order = [
+		"FontInfo",
+		"FontName",
+		"Encoding",
+		"PaintType",
+		"FontType",
+		"FontMatrix",
+		"FontBBox",
+		"UniqueID",
+		"Metrics",
+		"StrokeWidth"
+	]
+
+_type1_fontinfo_order = [
+		"version",
+		"Notice",
+		"FullName",
+		"FamilyName",
+		"Weight",
+		"ItalicAngle",
+		"isFixedPitch",
+		"UnderlinePosition",
+		"UnderlineThickness"
+	]
+
+_type1_post_eexec_order = [
+		"Private",
+		"CharStrings",
+		"FID"
+	]
+
+def _type1_item_repr(key, value):
+	psstring = ""
+	access = _accessstrings[value.access]
+	if access:
+		access = access + ' '
+	if key == 'CharStrings':
+		psstring = psstring + "/%s %s def\n" % (key, _type1_CharString_repr(value.value))
+	elif key == 'Encoding':
+		psstring = psstring + _type1_Encoding_repr(value, access)
+	else:
+		psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
+	return psstring
+
+def _type1_Encoding_repr(encoding, access):
+	encoding = encoding.value
+	psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
+	for i in range(256):
+		name = encoding[i].value
+		if name != '.notdef':
+			psstring = psstring + "dup %d /%s put\n" % (i, name)
+	return psstring + access + "def\n"
+
+def _type1_CharString_repr(charstrings):
+	items = sorted(charstrings.items())
+	return 'xxx'
+
+class ps_font(ps_object):
+	def __str__(self):
+		psstring = "%d dict dup begin\n" % len(self.value)
+		for key in _type1_pre_eexec_order:
+			try:
+				value = self.value[key]
+			except KeyError:
+				pass
+			else:
+				psstring = psstring + _type1_item_repr(key, value)
+		items = sorted(self.value.items())
+		for key, value in items:
+			if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
+				psstring = psstring + _type1_item_repr(key, value)
+		psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
+		for key in _type1_post_eexec_order:
+			try:
+				value = self.value[key]
+			except KeyError:
+				pass
+			else:
+				psstring = psstring + _type1_item_repr(key, value)
+		return psstring + 'dup/FontName get exch definefont pop\nmark currentfile closefile\n' + \
+				8 * (64 * '0' + '\n') + 'cleartomark' + '\n'
+	def __repr__(self):
+		return '<font>'
+
+class ps_file(ps_object):
+	pass
+
+class ps_dict(ps_object):
+	def __str__(self):
+		psstring = "%d dict dup begin\n" % len(self.value)
+		items = sorted(self.value.items())
+		for key, value in items:
+			access = _accessstrings[value.access]
+			if access:
+				access = access + ' '
+			psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
+		return psstring + 'end '
+	def __repr__(self):
+		return "<dict>"
+
+class ps_mark(ps_object):
+	def __init__(self): 
+		self.value = 'mark'
+		self.type = self.__class__.__name__[3:] + "type"
+
+class ps_procmark(ps_object):
+	def __init__(self):
+		self.value = 'procmark'
+		self.type = self.__class__.__name__[3:] + "type"
+
+class ps_null(ps_object):
+	def __init__(self):
+		self.type = self.__class__.__name__[3:] + "type"
+
+class ps_boolean(ps_object):
+	def __str__(self):
+		if self.value:
+			return 'true'
+		else:
+			return 'false'
+
+class ps_string(ps_object):
+	def __str__(self):
+		return "(%s)" % repr(self.value)[1:-1]
+
+class ps_integer(ps_object):
+	def __str__(self):
+		return repr(self.value)
+
+class ps_real(ps_object):
+	def __str__(self):
+		return repr(self.value)
+
+
+class PSOperators:
+	
+	def ps_def(self):
+		obj = self.pop()
+		name = self.pop()
+		self.dictstack[-1][name.value] = obj
+	
+	def ps_bind(self):
+		proc = self.pop('proceduretype')
+		self.proc_bind(proc)
+		self.push(proc)
+	
+	def proc_bind(self, proc):
+		for i in range(len(proc.value)):
+			item = proc.value[i]
+			if item.type == 'proceduretype':
+				self.proc_bind(item)
+			else:
+				if not item.literal:
+					try:
+						obj = self.resolve_name(item.value)
+					except:
+						pass
+					else:
+						if obj.type == 'operatortype':
+							proc.value[i] = obj
+	
+	def ps_exch(self):
+		if len(self.stack) < 2:
+			raise RuntimeError('stack underflow')
+		obj1 = self.pop()
+		obj2 = self.pop()
+		self.push(obj1)
+		self.push(obj2)
+	
+	def ps_dup(self):
+		if not self.stack:
+			raise RuntimeError('stack underflow')
+		self.push(self.stack[-1])
+	
+	def ps_exec(self):
+		obj = self.pop()
+		if obj.type == 'proceduretype':
+			self.call_procedure(obj)
+		else:
+			self.handle_object(obj)
+	
+	def ps_count(self):
+		self.push(ps_integer(len(self.stack)))
+	
+	def ps_eq(self):
+		any1 = self.pop()
+		any2 = self.pop()
+		self.push(ps_boolean(any1.value == any2.value))
+	
+	def ps_ne(self):
+		any1 = self.pop()
+		any2 = self.pop()
+		self.push(ps_boolean(any1.value != any2.value))
+	
+	def ps_cvx(self):
+		obj = self.pop()
+		obj.literal = 0
+		self.push(obj)
+	
+	def ps_matrix(self):
+		matrix = [ps_real(1.0), ps_integer(0), ps_integer(0), ps_real(1.0), ps_integer(0), ps_integer(0)]
+		self.push(ps_array(matrix))
+	
+	def ps_string(self):
+		num = self.pop('integertype').value
+		self.push(ps_string('\0' * num))
+	
+	def ps_type(self):
+		obj = self.pop()
+		self.push(ps_string(obj.type))
+	
+	def ps_store(self):
+		value = self.pop()
+		key = self.pop()
+		name = key.value
+		for i in range(len(self.dictstack)-1, -1, -1):
+			if name in self.dictstack[i]:
+				self.dictstack[i][name] = value
+				break
+		self.dictstack[-1][name] = value
+	
+	def ps_where(self):
+		name = self.pop()
+		# XXX
+		self.push(ps_boolean(0))
+	
+	def ps_systemdict(self):
+		self.push(ps_dict(self.dictstack[0]))
+	
+	def ps_userdict(self):
+		self.push(ps_dict(self.dictstack[1]))
+	
+	def ps_currentdict(self):
+		self.push(ps_dict(self.dictstack[-1]))
+	
+	def ps_currentfile(self):
+		self.push(ps_file(self.tokenizer))
+	
+	def ps_eexec(self):
+		f = self.pop('filetype').value
+		f.starteexec()
+	
+	def ps_closefile(self):
+		f = self.pop('filetype').value
+		f.skipwhite()
+		f.stopeexec()
+	
+	def ps_cleartomark(self):
+		obj = self.pop()
+		while obj != self.mark:
+			obj = self.pop()
+	
+	def ps_readstring(self,
+				ps_boolean = ps_boolean,
+				len = len):
+		s = self.pop('stringtype')
+		oldstr = s.value
+		f = self.pop('filetype')
+		#pad = file.value.read(1)
+		# for StringIO, this is faster
+		f.value.pos = f.value.pos + 1
+		newstr = f.value.read(len(oldstr))
+		s.value = newstr
+		self.push(s)
+		self.push(ps_boolean(len(oldstr) == len(newstr)))
+	
+	def ps_known(self):
+		key = self.pop()
+		d = self.pop('dicttype', 'fonttype')
+		self.push(ps_boolean(key.value in d.value))
+	
+	def ps_if(self):
+		proc = self.pop('proceduretype')
+		if self.pop('booleantype').value:
+			self.call_procedure(proc)
+	
+	def ps_ifelse(self):
+		proc2 = self.pop('proceduretype')
+		proc1 = self.pop('proceduretype')
+		if self.pop('booleantype').value:
+			self.call_procedure(proc1)
+		else:
+			self.call_procedure(proc2)
+	
+	def ps_readonly(self):
+		obj = self.pop()
+		if obj.access < 1:
+			obj.access = 1
+		self.push(obj)
+	
+	def ps_executeonly(self):
+		obj = self.pop()
+		if obj.access < 2:
+			obj.access = 2
+		self.push(obj)
+	
+	def ps_noaccess(self):
+		obj = self.pop()
+		if obj.access < 3:
+			obj.access = 3
+		self.push(obj)
+	
+	def ps_not(self):
+		obj = self.pop('booleantype', 'integertype')
+		if obj.type == 'booleantype':
+			self.push(ps_boolean(not obj.value))
+		else:
+			self.push(ps_integer(~obj.value))
+	
+	def ps_print(self):
+		str = self.pop('stringtype')
+		print('PS output --->', str.value)
+	
+	def ps_anchorsearch(self):
+		seek = self.pop('stringtype')
+		s = self.pop('stringtype')
+		seeklen = len(seek.value)
+		if s.value[:seeklen] == seek.value:
+			self.push(ps_string(s.value[seeklen:]))
+			self.push(seek)
+			self.push(ps_boolean(1))
+		else:
+			self.push(s)
+			self.push(ps_boolean(0))
+	
+	def ps_array(self):
+		num = self.pop('integertype')
+		array = ps_array([None] * num.value)
+		self.push(array)
+	
+	def ps_astore(self):
+		array = self.pop('arraytype')
+		for i in range(len(array.value)-1, -1, -1):
+			array.value[i] = self.pop()
+		self.push(array)
+	
+	def ps_load(self):
+		name = self.pop()
+		self.push(self.resolve_name(name.value))
+	
+	def ps_put(self):
+		obj1 = self.pop()
+		obj2 = self.pop()
+		obj3 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype')
+		tp = obj3.type
+		if tp == 'arraytype' or tp == 'proceduretype':
+			obj3.value[obj2.value] = obj1
+		elif tp == 'dicttype':
+			obj3.value[obj2.value] = obj1
+		elif tp == 'stringtype':
+			index = obj2.value
+			obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index+1:]
+	
+	def ps_get(self):
+		obj1 = self.pop()
+		if obj1.value == "Encoding":
+			pass
+		obj2 = self.pop('arraytype', 'dicttype', 'stringtype', 'proceduretype', 'fonttype')
+		tp = obj2.type
+		if tp in ('arraytype', 'proceduretype'):
+			self.push(obj2.value[obj1.value])
+		elif tp in ('dicttype', 'fonttype'):
+			self.push(obj2.value[obj1.value])
+		elif tp == 'stringtype':
+			self.push(ps_integer(ord(obj2.value[obj1.value])))
+		else:
+			assert False, "shouldn't get here"
+	
+	def ps_getinterval(self):
+		obj1 = self.pop('integertype')
+		obj2 = self.pop('integertype')
+		obj3 = self.pop('arraytype', 'stringtype')
+		tp = obj3.type
+		if tp == 'arraytype':
+			self.push(ps_array(obj3.value[obj2.value:obj2.value + obj1.value]))
+		elif tp == 'stringtype':
+			self.push(ps_string(obj3.value[obj2.value:obj2.value + obj1.value]))
+	
+	def ps_putinterval(self):
+		obj1 = self.pop('arraytype', 'stringtype')
+		obj2 = self.pop('integertype')
+		obj3 = self.pop('arraytype', 'stringtype')
+		tp = obj3.type
+		if tp == 'arraytype':
+			obj3.value[obj2.value:obj2.value + len(obj1.value)] = obj1.value
+		elif tp == 'stringtype':
+			newstr = obj3.value[:obj2.value]
+			newstr = newstr + obj1.value
+			newstr = newstr + obj3.value[obj2.value + len(obj1.value):]
+			obj3.value = newstr
+	
+	def ps_cvn(self):
+		self.push(ps_name(self.pop('stringtype').value))
+	
+	def ps_index(self):
+		n = self.pop('integertype').value
+		if n < 0:
+			raise RuntimeError('index may not be negative')
+		self.push(self.stack[-1-n])
+	
+	def ps_for(self):
+		proc = self.pop('proceduretype')
+		limit = self.pop('integertype', 'realtype').value
+		increment = self.pop('integertype', 'realtype').value
+		i = self.pop('integertype', 'realtype').value
+		while 1:
+			if increment > 0:
+				if i > limit:
+					break
+			else:
+				if i < limit:
+					break
+			if type(i) == type(0.0):
+				self.push(ps_real(i))
+			else:
+				self.push(ps_integer(i))
+			self.call_procedure(proc)
+			i = i + increment
+	
+	def ps_forall(self):
+		proc = self.pop('proceduretype')
+		obj = self.pop('arraytype', 'stringtype', 'dicttype')
+		tp = obj.type
+		if tp == 'arraytype':
+			for item in obj.value:
+				self.push(item)
+				self.call_procedure(proc)
+		elif tp == 'stringtype':
+			for item in obj.value:
+				self.push(ps_integer(ord(item)))
+				self.call_procedure(proc)
+		elif tp == 'dicttype':
+			for key, value in obj.value.items():
+				self.push(ps_name(key))
+				self.push(value)
+				self.call_procedure(proc)		
+	
+	def ps_definefont(self):
+		font = self.pop('dicttype')
+		name = self.pop()
+		font = ps_font(font.value)
+		self.dictstack[0]['FontDirectory'].value[name.value] = font
+		self.push(font)
+	
+	def ps_findfont(self):
+		name = self.pop()
+		font = self.dictstack[0]['FontDirectory'].value[name.value]
+		self.push(font)
+	
+	def ps_pop(self):
+		self.pop()
+	
+	def ps_dict(self):
+		self.pop('integertype')
+		self.push(ps_dict({}))
+	
+	def ps_begin(self):
+		self.dictstack.append(self.pop('dicttype').value)
+	
+	def ps_end(self):
+		if len(self.dictstack) > 2:
+			del self.dictstack[-1]
+		else:
+			raise RuntimeError('dictstack underflow')
+	
+notdef = '.notdef'
+from fontTools.encodings.StandardEncoding import StandardEncoding
+ps_StandardEncoding = list(map(ps_name, StandardEncoding))
+
diff --git a/Lib/fontTools/misc/py23.py b/Lib/fontTools/misc/py23.py
new file mode 100644
index 0000000..90217a3
--- /dev/null
+++ b/Lib/fontTools/misc/py23.py
@@ -0,0 +1,83 @@
+"""Python 2/3 compat layer."""
+
+from __future__ import print_function, division, absolute_import
+
+try:
+	basestring
+except NameError:
+	basestring = str
+
+try:
+	unicode
+except NameError:
+	unicode = str
+
+try:
+	unichr
+	bytechr = chr
+	byteord = ord
+except:
+	unichr = chr
+	def bytechr(n):
+		return bytes([n])
+	def byteord(c):
+		return c if isinstance(c, int) else ord(c)
+
+try:
+	from StringIO import StringIO
+except ImportError:
+	from io import BytesIO as StringIO
+
+def strjoin(iterable):
+	return ''.join(iterable)
+if str == bytes:
+	class Tag(str):
+		def tobytes(self):
+			if isinstance(self, bytes):
+				return self
+			else:
+				return self.encode('latin1')
+
+	def tostr(s, encoding='ascii'):
+		if not isinstance(s, str):
+			return s.encode(encoding)
+		else:
+			return s
+	tobytes = tostr
+
+	bytesjoin = strjoin
+else:
+	class Tag(str):
+
+		@staticmethod
+		def transcode(blob):
+			if not isinstance(blob, str):
+				blob = blob.decode('latin-1')
+			return blob
+
+		def __new__(self, content):
+			return str.__new__(self, self.transcode(content))
+		def __ne__(self, other):
+			return not self.__eq__(other)
+		def __eq__(self, other):
+			return str.__eq__(self, self.transcode(other))
+
+		def __hash__(self):
+			return str.__hash__(self)
+
+		def tobytes(self):
+			return self.encode('latin-1')
+
+	def tostr(s, encoding='ascii'):
+		if not isinstance(s, str):
+			return s.decode(encoding)
+		else:
+			return s
+	def tobytes(s, encoding='ascii'):
+		if not isinstance(s, bytes):
+			return s.encode(encoding)
+		else:
+			return s
+
+	def bytesjoin(iterable):
+		return b''.join(tobytes(item) for item in iterable)
diff --git a/Lib/fontTools/misc/sstruct.py b/Lib/fontTools/misc/sstruct.py
new file mode 100644
index 0000000..8a2b073
--- /dev/null
+++ b/Lib/fontTools/misc/sstruct.py
@@ -0,0 +1,211 @@
+"""sstruct.py -- SuperStruct
+
+Higher level layer on top of the struct module, enabling to 
+bind names to struct elements. The interface is similar to 
+struct, except the objects passed and returned are not tuples 
+(or argument lists), but dictionaries or instances. 
+
+Just like struct, we use fmt strings to describe a data 
+structure, except we use one line per element. Lines are 
+separated by newlines or semi-colons. Each line contains 
+either one of the special struct characters ('@', '=', '<', 
+'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f'). 
+Repetitions, like the struct module offers them are not useful 
+in this context, except for fixed length strings  (eg. 'myInt:5h' 
+is not allowed but 'myString:5s' is). The 'x' fmt character 
+(pad byte) is treated as 'special', since it is by definition 
+anonymous. Extra whitespace is allowed everywhere.
+
+The sstruct module offers one feature that the "normal" struct
+module doesn't: support for fixed point numbers. These are spelled
+as "n.mF", where n is the number of bits before the point, and m
+the number of bits after the point. Fixed point numbers get 
+converted to floats.
+
+pack(fmt, object):
+	'object' is either a dictionary or an instance (or actually
+	anything that has a __dict__ attribute). If it is a dictionary, 
+	its keys are used for names. If it is an instance, it's 
+	attributes are used to grab struct elements from. Returns
+	a string containing the data.
+
+unpack(fmt, data, object=None)
+	If 'object' is omitted (or None), a new dictionary will be 
+	returned. If 'object' is a dictionary, it will be used to add 
+	struct elements to. If it is an instance (or in fact anything
+	that has a __dict__ attribute), an attribute will be added for 
+	each struct element. In the latter two cases, 'object' itself 
+	is returned.
+
+unpack2(fmt, data, object=None)
+	Convenience function. Same as unpack, except data may be longer 
+	than needed. The returned value is a tuple: (object, leftoverdata).
+
+calcsize(fmt)
+	like struct.calcsize(), but uses our own fmt strings:
+	it returns the size of the data in bytes.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
+import struct
+import re
+
+__version__ = "1.2"
+__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
+
+
+class Error(Exception):
+	pass
+
+def pack(fmt, obj):
+	formatstring, names, fixes = getformat(fmt)
+	elements = []
+	if not isinstance(obj, dict):
+		obj = obj.__dict__
+	for name in names:
+		value = obj[name]
+		if name in fixes:
+			# fixed point conversion
+			value = fl2fi(value, fixes[name])
+		elif isinstance(value, basestring):
+			value = tobytes(value)
+		elements.append(value)
+	data = struct.pack(*(formatstring,) + tuple(elements))
+	return data
+
+def unpack(fmt, data, obj=None):
+	if obj is None:
+		obj = {}
+	data = tobytes(data)
+	formatstring, names, fixes = getformat(fmt)
+	if isinstance(obj, dict):
+		d = obj
+	else:
+		d = obj.__dict__
+	elements = struct.unpack(formatstring, data)
+	for i in range(len(names)):
+		name = names[i]
+		value = elements[i]
+		if name in fixes:
+			# fixed point conversion
+			value = fi2fl(value, fixes[name])
+		elif isinstance(value, bytes):
+			try:
+				value = tostr(value)
+			except UnicodeDecodeError:
+				pass
+		d[name] = value
+	return obj
+
+def unpack2(fmt, data, obj=None):
+	length = calcsize(fmt)
+	return unpack(fmt, data[:length], obj), data[length:]
+
+def calcsize(fmt):
+	formatstring, names, fixes = getformat(fmt)
+	return struct.calcsize(formatstring)
+
+
+# matches "name:formatchar" (whitespace is allowed)
+_elementRE = re.compile(
+		"\s*"							# whitespace
+		"([A-Za-z_][A-Za-z_0-9]*)"		# name (python identifier)
+		"\s*:\s*"						# whitespace : whitespace
+		"([cbBhHiIlLqQfd]|[0-9]+[ps]|"	# formatchar...
+			"([0-9]+)\.([0-9]+)(F))"	# ...formatchar
+		"\s*"							# whitespace
+		"(#.*)?$"						# [comment] + end of string
+	)
+
+# matches the special struct fmt chars and 'x' (pad byte)
+_extraRE = re.compile("\s*([x@=<>!])\s*(#.*)?$")
+
+# matches an "empty" string, possibly containing whitespace and/or a comment
+_emptyRE = re.compile("\s*(#.*)?$")
+
+_fixedpointmappings = {
+		8: "b",
+		16: "h",
+		32: "l"}
+
+_formatcache = {}
+
+def getformat(fmt):
+	try:
+		formatstring, names, fixes = _formatcache[fmt]
+	except KeyError:
+		lines = re.split("[\n;]", fmt)
+		formatstring = ""
+		names = []
+		fixes = {}
+		for line in lines:
+			if _emptyRE.match(line):
+				continue
+			m = _extraRE.match(line)
+			if m:
+				formatchar = m.group(1)
+				if formatchar != 'x' and formatstring:
+					raise Error("a special fmt char must be first")
+			else:
+				m = _elementRE.match(line)
+				if not m:
+					raise Error("syntax error in fmt: '%s'" % line)
+				name = m.group(1)
+				names.append(name)
+				formatchar = m.group(2)
+				if m.group(3):
+					# fixed point
+					before = int(m.group(3))
+					after = int(m.group(4))
+					bits = before + after
+					if bits not in [8, 16, 32]:
+						raise Error("fixed point must be 8, 16 or 32 bits long")
+					formatchar = _fixedpointmappings[bits]
+					assert m.group(5) == "F"
+					fixes[name] = after
+			formatstring = formatstring + formatchar
+		_formatcache[fmt] = formatstring, names, fixes
+	return formatstring, names, fixes
+
+def _test():
+	fmt = """
+		# comments are allowed
+		>  # big endian (see documentation for struct)
+		# empty lines are allowed:
+		
+		ashort: h
+		along: l
+		abyte: b	# a byte
+		achar: c
+		astr: 5s
+		afloat: f; adouble: d	# multiple "statements" are allowed
+		afixed: 16.16F
+	"""
+	
+	print('size:', calcsize(fmt))
+	
+	class foo(object):
+		pass
+	
+	i = foo()
+	
+	i.ashort = 0x7fff
+	i.along = 0x7fffffff
+	i.abyte = 0x7f
+	i.achar = "a"
+	i.astr = "12345"
+	i.afloat = 0.5
+	i.adouble = 0.5
+	i.afixed = 1.5
+	
+	data = pack(fmt, i)
+	print('data:', repr(data))
+	print(unpack(fmt, data))
+	i2 = foo()
+	unpack(fmt, data, i2)
+	print(vars(i2))
+
+if __name__ == "__main__":
+	_test()
diff --git a/Lib/fontTools/misc/textTools.py b/Lib/fontTools/misc/textTools.py
new file mode 100644
index 0000000..d6e6a2f
--- /dev/null
+++ b/Lib/fontTools/misc/textTools.py
@@ -0,0 +1,75 @@
+"""fontTools.misc.textTools.py -- miscellaneous routines."""
+
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import ast
+import string
+
+
+safeEval = ast.literal_eval
+
+def readHex(content):
+	"""Convert a list of hex strings to binary data."""
+	return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, basestring)))
+
+def deHexStr(hexdata):
+	"""Convert a hex string to binary data."""
+	hexdata = strjoin(hexdata.split())
+	if len(hexdata) % 2:
+		hexdata = hexdata + "0"
+	data = []
+	for i in range(0, len(hexdata), 2):
+		data.append(bytechr(int(hexdata[i:i+2], 16)))
+	return bytesjoin(data)
+
+
+def hexStr(data):
+	"""Convert binary data to a hex string."""
+	h = string.hexdigits
+	r = ''
+	for c in data:
+		i = byteord(c)
+		r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+	return r
+
+
+def num2binary(l, bits=32):
+	items = []
+	binary = ""
+	for i in range(bits):
+		if l & 0x1:
+			binary = "1" + binary
+		else:
+			binary = "0" + binary
+		l = l >> 1
+		if not ((i+1) % 8):
+			items.append(binary)
+			binary = ""
+	if binary:
+		items.append(binary)
+	items.reverse()
+	assert l in (0, -1), "number doesn't fit in number of bits"
+	return ' '.join(items)
+
+
+def binary2num(bin):
+	bin = strjoin(bin.split())
+	l = 0
+	for digit in bin:
+		l = l << 1
+		if digit != "0":
+			l = l | 0x1
+	return l
+
+
+def caselessSort(alist):
+	"""Return a sorted copy of a list. If there are only strings 
+	in the list, it will not consider case.
+	"""
+	
+	try:
+		return sorted(alist, key=lambda a: (a.lower(), a))
+	except TypeError:
+		return sorted(alist)
+
diff --git a/Lib/fontTools/misc/transform.py b/Lib/fontTools/misc/transform.py
new file mode 100644
index 0000000..be7d21a
--- /dev/null
+++ b/Lib/fontTools/misc/transform.py
@@ -0,0 +1,356 @@
+"""Affine 2D transformation matrix class.
+
+The Transform class implements various transformation matrix operations,
+both on the matrix itself, as well as on 2D coordinates.
+
+Transform instances are effectively immutable: all methods that operate on the
+transformation itself always return a new instance. This has as the
+interesting side effect that Transform instances are hashable, ie. they can be
+used as dictionary keys.
+
+This module exports the following symbols:
+
+	Transform -- this is the main class
+	Identity  -- Transform instance set to the identity transformation
+	Offset    -- Convenience function that returns a translating transformation
+	Scale     -- Convenience function that returns a scaling transformation
+
+Examples:
+
+	>>> t = Transform(2, 0, 0, 3, 0, 0)
+	>>> t.transformPoint((100, 100))
+	(200, 300)
+	>>> t = Scale(2, 3)
+	>>> t.transformPoint((100, 100))
+	(200, 300)
+	>>> t.transformPoint((0, 0))
+	(0, 0)
+	>>> t = Offset(2, 3)
+	>>> t.transformPoint((100, 100))
+	(102, 103)
+	>>> t.transformPoint((0, 0))
+	(2, 3)
+	>>> t2 = t.scale(0.5)
+	>>> t2.transformPoint((100, 100))
+	(52.0, 53.0)
+	>>> import math
+	>>> t3 = t2.rotate(math.pi / 2)
+	>>> t3.transformPoint((0, 0))
+	(2.0, 3.0)
+	>>> t3.transformPoint((100, 100))
+	(-48.0, 53.0)
+	>>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2)
+	>>> t.transformPoints([(0, 0), (1, 1), (100, 100)])
+	[(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)]
+	>>>
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+__all__ = ["Transform", "Identity", "Offset", "Scale"]
+
+
+_EPSILON = 1e-15
+_ONE_EPSILON = 1 - _EPSILON
+_MINUS_ONE_EPSILON = -1 + _EPSILON
+
+
+def _normSinCos(v):
+	if abs(v) < _EPSILON:
+		v = 0
+	elif v > _ONE_EPSILON:
+		v = 1
+	elif v < _MINUS_ONE_EPSILON:
+		v = -1
+	return v
+
+
+class Transform(object):
+
+	"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
+	Transform instances are immutable: all transforming methods, eg.
+	rotate(), return a new Transform instance.
+
+	Examples:
+		>>> t = Transform()
+		>>> t
+		<Transform [1 0 0 1 0 0]>
+		>>> t.scale(2)
+		<Transform [2 0 0 2 0 0]>
+		>>> t.scale(2.5, 5.5)
+		<Transform [2.5 0.0 0.0 5.5 0 0]>
+		>>>
+		>>> t.scale(2, 3).transformPoint((100, 100))
+		(200, 300)
+	"""
+
+	def __init__(self, xx=1, xy=0, yx=0, yy=1, dx=0, dy=0):
+		"""Transform's constructor takes six arguments, all of which are
+		optional, and can be used as keyword arguments:
+			>>> Transform(12)
+			<Transform [12 0 0 1 0 0]>
+			>>> Transform(dx=12)
+			<Transform [1 0 0 1 12 0]>
+			>>> Transform(yx=12)
+			<Transform [1 0 12 1 0 0]>
+			>>>
+		"""
+		self.__affine = xx, xy, yx, yy, dx, dy
+
+	def transformPoint(self, p):
+		"""Transform a point.
+
+		Example:
+			>>> t = Transform()
+			>>> t = t.scale(2.5, 5.5)
+			>>> t.transformPoint((100, 100))
+			(250.0, 550.0)
+		"""
+		(x, y) = p
+		xx, xy, yx, yy, dx, dy = self.__affine
+		return (xx*x + yx*y + dx, xy*x + yy*y + dy)
+
+	def transformPoints(self, points):
+		"""Transform a list of points.
+
+		Example:
+			>>> t = Scale(2, 3)
+			>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
+			[(0, 0), (0, 300), (200, 300), (200, 0)]
+			>>>
+		"""
+		xx, xy, yx, yy, dx, dy = self.__affine
+		return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points]
+
+	def translate(self, x=0, y=0):
+		"""Return a new transformation, translated (offset) by x, y.
+
+		Example:
+			>>> t = Transform()
+			>>> t.translate(20, 30)
+			<Transform [1 0 0 1 20 30]>
+			>>>
+		"""
+		return self.transform((1, 0, 0, 1, x, y))
+
+	def scale(self, x=1, y=None):
+		"""Return a new transformation, scaled by x, y. The 'y' argument
+		may be None, which implies to use the x value for y as well.
+
+		Example:
+			>>> t = Transform()
+			>>> t.scale(5)
+			<Transform [5 0 0 5 0 0]>
+			>>> t.scale(5, 6)
+			<Transform [5 0 0 6 0 0]>
+			>>>
+		"""
+		if y is None:
+			y = x
+		return self.transform((x, 0, 0, y, 0, 0))
+
+	def rotate(self, angle):
+		"""Return a new transformation, rotated by 'angle' (radians).
+
+		Example:
+			>>> import math
+			>>> t = Transform()
+			>>> t.rotate(math.pi / 2)
+			<Transform [0 1 -1 0 0 0]>
+			>>>
+		"""
+		import math
+		c = _normSinCos(math.cos(angle))
+		s = _normSinCos(math.sin(angle))
+		return self.transform((c, s, -s, c, 0, 0))
+
+	def skew(self, x=0, y=0):
+		"""Return a new transformation, skewed by x and y.
+
+		Example:
+			>>> import math
+			>>> t = Transform()
+			>>> t.skew(math.pi / 4)
+			<Transform [1.0 0.0 1.0 1.0 0 0]>
+			>>>
+		"""
+		import math
+		return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
+
+	def transform(self, other):
+		"""Return a new transformation, transformed by another
+		transformation.
+
+		Example:
+			>>> t = Transform(2, 0, 0, 3, 1, 6)
+			>>> t.transform((4, 3, 2, 1, 5, 6))
+			<Transform [8 9 4 3 11 24]>
+			>>>
+		"""
+		xx1, xy1, yx1, yy1, dx1, dy1 = other
+		xx2, xy2, yx2, yy2, dx2, dy2 = self.__affine
+		return self.__class__(
+				xx1*xx2 + xy1*yx2,
+				xx1*xy2 + xy1*yy2,
+				yx1*xx2 + yy1*yx2,
+				yx1*xy2 + yy1*yy2,
+				xx2*dx1 + yx2*dy1 + dx2,
+				xy2*dx1 + yy2*dy1 + dy2)
+
+	def reverseTransform(self, other):
+		"""Return a new transformation, which is the other transformation
+		transformed by self. self.reverseTransform(other) is equivalent to
+		other.transform(self).
+
+		Example:
+			>>> t = Transform(2, 0, 0, 3, 1, 6)
+			>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
+			<Transform [8 6 6 3 21 15]>
+			>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
+			<Transform [8 6 6 3 21 15]>
+			>>>
+		"""
+		xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine
+		xx2, xy2, yx2, yy2, dx2, dy2 = other
+		return self.__class__(
+				xx1*xx2 + xy1*yx2,
+				xx1*xy2 + xy1*yy2,
+				yx1*xx2 + yy1*yx2,
+				yx1*xy2 + yy1*yy2,
+				xx2*dx1 + yx2*dy1 + dx2,
+				xy2*dx1 + yy2*dy1 + dy2)
+
+	def inverse(self):
+		"""Return the inverse transformation.
+
+		Example:
+			>>> t = Identity.translate(2, 3).scale(4, 5)
+			>>> t.transformPoint((10, 20))
+			(42, 103)
+			>>> it = t.inverse()
+			>>> it.transformPoint((42, 103))
+			(10.0, 20.0)
+			>>>
+		"""
+		if self.__affine == (1, 0, 0, 1, 0, 0):
+			return self
+		xx, xy, yx, yy, dx, dy = self.__affine
+		det = xx*yy - yx*xy
+		xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det
+		dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy
+		return self.__class__(xx, xy, yx, yy, dx, dy)
+
+	def toPS(self):
+		"""Return a PostScript representation:
+			>>> t = Identity.scale(2, 3).translate(4, 5)
+			>>> t.toPS()
+			'[2 0 0 3 8 15]'
+			>>>
+		"""
+		return "[%s %s %s %s %s %s]" % self.__affine
+
+	def __len__(self):
+		"""Transform instances also behave like sequences of length 6:
+			>>> len(Identity)
+			6
+			>>>
+		"""
+		return 6
+
+	def __getitem__(self, index):
+		"""Transform instances also behave like sequences of length 6:
+			>>> list(Identity)
+			[1, 0, 0, 1, 0, 0]
+			>>> tuple(Identity)
+			(1, 0, 0, 1, 0, 0)
+			>>>
+		"""
+		return self.__affine[index]
+
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		"""Transform instances are comparable:
+			>>> t1 = Identity.scale(2, 3).translate(4, 6)
+			>>> t2 = Identity.translate(8, 18).scale(2, 3)
+			>>> t1 == t2
+			1
+			>>>
+
+		But beware of floating point rounding errors:
+			>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
+			>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
+			>>> t1
+			<Transform [0.2 0.0 0.0 0.3 0.08 0.18]>
+			>>> t2
+			<Transform [0.2 0.0 0.0 0.3 0.08 0.18]>
+			>>> t1 == t2
+			0
+			>>>
+		"""
+		xx1, xy1, yx1, yy1, dx1, dy1 = self.__affine
+		xx2, xy2, yx2, yy2, dx2, dy2 = other
+		return (xx1, xy1, yx1, yy1, dx1, dy1) == \
+				(xx2, xy2, yx2, yy2, dx2, dy2)
+
+	def __hash__(self):
+		"""Transform instances are hashable, meaning you can use them as
+		keys in dictionaries:
+			>>> d = {Scale(12, 13): None}
+			>>> d
+			{<Transform [12 0 0 13 0 0]>: None}
+			>>>
+
+		But again, beware of floating point rounding errors:
+			>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
+			>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
+			>>> t1
+			<Transform [0.2 0.0 0.0 0.3 0.08 0.18]>
+			>>> t2
+			<Transform [0.2 0.0 0.0 0.3 0.08 0.18]>
+			>>> d = {t1: None}
+			>>> d
+			{<Transform [0.2 0.0 0.0 0.3 0.08 0.18]>: None}
+			>>> d[t2]
+			Traceback (most recent call last):
+			  File "<stdin>", line 1, in ?
+			KeyError: <Transform [0.2 0.0 0.0 0.3 0.08 0.18]>
+			>>>
+		"""
+		return hash(self.__affine)
+
+	def __repr__(self):
+		return "<%s [%s %s %s %s %s %s]>" % ((self.__class__.__name__,) \
+				 + tuple(map(str, self.__affine)))
+
+
+Identity = Transform()
+
+def Offset(x=0, y=0):
+	"""Return the identity transformation offset by x, y.
+
+	Example:
+		>>> Offset(2, 3)
+		<Transform [1 0 0 1 2 3]>
+		>>>
+	"""
+	return Transform(1, 0, 0, 1, x, y)
+
+def Scale(x, y=None):
+	"""Return the identity transformation scaled by x, y. The 'y' argument
+	may be None, which implies to use the x value for y as well.
+
+	Example:
+		>>> Scale(2, 3)
+		<Transform [2 0 0 3 0 0]>
+		>>>
+	"""
+	if y is None:
+		y = x
+	return Transform(x, 0, 0, y, 0, 0)
+
+
+if __name__ == "__main__":
+	import doctest
+	doctest.testmod()
diff --git a/Lib/fontTools/misc/xmlReader.py b/Lib/fontTools/misc/xmlReader.py
new file mode 100644
index 0000000..85dd441
--- /dev/null
+++ b/Lib/fontTools/misc/xmlReader.py
@@ -0,0 +1,132 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import ttLib
+from fontTools.misc.textTools import safeEval
+from fontTools.ttLib.tables.DefaultTable import DefaultTable
+import os
+
+
+class TTXParseError(Exception): pass
+
+BUFSIZE = 0x4000
+
+
+class XMLReader(object):
+	
+	def __init__(self, fileName, ttFont, progress=None, quiet=False):
+		self.ttFont = ttFont
+		self.fileName = fileName
+		self.progress = progress
+		self.quiet = quiet
+		self.root = None
+		self.contentStack = []
+		self.stackSize = 0
+	
+	def read(self):
+		if self.progress:
+			import stat
+			self.progress.set(0, os.stat(self.fileName)[stat.ST_SIZE] // 100 or 1)
+		file = open(self.fileName)
+		self._parseFile(file)
+		file.close()
+	
+	def _parseFile(self, file):
+		from xml.parsers.expat import ParserCreate
+		parser = ParserCreate()
+		parser.StartElementHandler = self._startElementHandler
+		parser.EndElementHandler = self._endElementHandler
+		parser.CharacterDataHandler = self._characterDataHandler
+		
+		pos = 0
+		while True:
+			chunk = file.read(BUFSIZE)
+			if not chunk:
+				parser.Parse(chunk, 1)
+				break
+			pos = pos + len(chunk)
+			if self.progress:
+				self.progress.set(pos // 100)
+			parser.Parse(chunk, 0)
+	
+	def _startElementHandler(self, name, attrs):
+		stackSize = self.stackSize
+		self.stackSize = stackSize + 1
+		if not stackSize:
+			if name != "ttFont":
+				raise TTXParseError("illegal root tag: %s" % name)
+			sfntVersion = attrs.get("sfntVersion")
+			if sfntVersion is not None:
+				if len(sfntVersion) != 4:
+					sfntVersion = safeEval('"' + sfntVersion + '"')
+				self.ttFont.sfntVersion = sfntVersion
+			self.contentStack.append([])
+		elif stackSize == 1:
+			subFile = attrs.get("src")
+			if subFile is not None:
+				subFile = os.path.join(os.path.dirname(self.fileName), subFile)
+				subReader = XMLReader(subFile, self.ttFont, self.progress, self.quiet)
+				subReader.read()
+				self.contentStack.append([])
+				return
+			tag = ttLib.xmlToTag(name)
+			msg = "Parsing '%s' table..." % tag
+			if self.progress:
+				self.progress.setlabel(msg)
+			elif self.ttFont.verbose:
+				ttLib.debugmsg(msg)
+			else:
+				if not self.quiet:
+					print(msg)
+			if tag == "GlyphOrder":
+				tableClass = ttLib.GlyphOrder
+			elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
+				tableClass = DefaultTable
+			else:
+				tableClass = ttLib.getTableClass(tag)
+				if tableClass is None:
+					tableClass = DefaultTable
+			if tag == 'loca' and tag in self.ttFont:
+				# Special-case the 'loca' table as we need the
+				#    original if the 'glyf' table isn't recompiled.
+				self.currentTable = self.ttFont[tag]
+			else:
+				self.currentTable = tableClass(tag)
+				self.ttFont[tag] = self.currentTable
+			self.contentStack.append([])
+		elif stackSize == 2:
+			self.contentStack.append([])
+			self.root = (name, attrs, self.contentStack[-1])
+		else:
+			l = []
+			self.contentStack[-1].append((name, attrs, l))
+			self.contentStack.append(l)
+	
+	def _characterDataHandler(self, data):
+		if self.stackSize > 1:
+			self.contentStack[-1].append(data)
+	
+	def _endElementHandler(self, name):
+		self.stackSize = self.stackSize - 1
+		del self.contentStack[-1]
+		if self.stackSize == 1:
+			self.root = None
+		elif self.stackSize == 2:
+			name, attrs, content = self.root
+			self.currentTable.fromXML(name, attrs, content, self.ttFont)
+			self.root = None
+
+
+class ProgressPrinter(object):
+	
+	def __init__(self, title, maxval=100):
+		print(title)
+	
+	def set(self, val, maxval=None):
+		pass
+	
+	def increment(self, val=1):
+		pass
+	
+	def setLabel(self, text):
+		print(text)
+
diff --git a/Lib/fontTools/misc/xmlWriter.py b/Lib/fontTools/misc/xmlWriter.py
new file mode 100644
index 0000000..b067c2d
--- /dev/null
+++ b/Lib/fontTools/misc/xmlWriter.py
@@ -0,0 +1,185 @@
+"""xmlWriter.py -- Simple XML authoring class"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import sys
+import string
+
+INDENT = "  "
+
+
+class XMLWriter(object):
+	
+	def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None):
+		if not hasattr(fileOrPath, "write"):
+			try:
+				# Python3 has encoding support.
+				self.file = open(fileOrPath, "w", encoding="utf-8")
+			except TypeError:
+				self.file = open(fileOrPath, "w")
+		else:
+			# assume writable file object
+			self.file = fileOrPath
+		self.indentwhite = indentwhite
+		self.indentlevel = 0
+		self.stack = []
+		self.needindent = 1
+		self.idlefunc = idlefunc
+		self.idlecounter = 0
+		self._writeraw('<?xml version="1.0" encoding="utf-8"?>')
+		self.newline()
+	
+	def close(self):
+		self.file.close()
+	
+	def write(self, string, indent=True):
+		"""Writes text."""
+		self._writeraw(escape(string), indent=indent)
+
+	def writecdata(self, string):
+		"""Writes text in a CDATA section."""
+		self._writeraw("<![CDATA[" + string + "]]>")
+
+	def write8bit(self, data, strip=False):
+		"""Writes a bytes() sequence into the XML, escaping
+		non-ASCII bytes.  When this is read in xmlReader,
+		the original bytes can be recovered by encoding to
+		'latin-1'."""
+		self._writeraw(escape8bit(data), strip=strip)
+
+	def write16bit(self, data, strip=False):
+		self._writeraw(escape16bit(data), strip=strip)
+	
+	def write_noindent(self, string):
+		"""Writes text without indentation."""
+		self._writeraw(escape(string), indent=False)
+	
+	def _writeraw(self, data, indent=True, strip=False):
+		"""Writes bytes, possibly indented."""
+		if indent and self.needindent:
+			self.file.write(self.indentlevel * self.indentwhite)
+			self.needindent = 0
+		s = tostr(data, encoding="utf-8")
+		if (strip):
+			s = s.strip()
+		self.file.write(s)
+	
+	def newline(self):
+		self.file.write("\n")
+		self.needindent = 1
+		idlecounter = self.idlecounter
+		if not idlecounter % 100 and self.idlefunc is not None:
+			self.idlefunc()
+		self.idlecounter = idlecounter + 1
+	
+	def comment(self, data):
+		data = escape(data)
+		lines = data.split("\n")
+		self._writeraw("<!-- " + lines[0])
+		for line in lines[1:]:
+			self.newline()
+			self._writeraw("     " + line)
+		self._writeraw(" -->")
+	
+	def simpletag(self, _TAG_, *args, **kwargs):
+		attrdata = self.stringifyattrs(*args, **kwargs)
+		data = "<%s%s/>" % (_TAG_, attrdata)
+		self._writeraw(data)
+	
+	def begintag(self, _TAG_, *args, **kwargs):
+		attrdata = self.stringifyattrs(*args, **kwargs)
+		data = "<%s%s>" % (_TAG_, attrdata)
+		self._writeraw(data)
+		self.stack.append(_TAG_)
+		self.indent()
+	
+	def endtag(self, _TAG_):
+		assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
+		del self.stack[-1]
+		self.dedent()
+		data = "</%s>" % _TAG_
+		self._writeraw(data)
+	
+	def dumphex(self, data):
+		linelength = 16
+		hexlinelength = linelength * 2
+		chunksize = 8
+		for i in range(0, len(data), linelength):
+			hexline = hexStr(data[i:i+linelength])
+			line = ""
+			white = ""
+			for j in range(0, hexlinelength, chunksize):
+				line = line + white + hexline[j:j+chunksize]
+				white = " "
+			self._writeraw(line)
+			self.newline()
+	
+	def indent(self):
+		self.indentlevel = self.indentlevel + 1
+	
+	def dedent(self):
+		assert self.indentlevel > 0
+		self.indentlevel = self.indentlevel - 1
+	
+	def stringifyattrs(self, *args, **kwargs):
+		if kwargs:
+			assert not args
+			attributes = sorted(kwargs.items())
+		elif args:
+			assert len(args) == 1
+			attributes = args[0]
+		else:
+			return ""
+		data = ""
+		for attr, value in attributes:
+			data = data + ' %s="%s"' % (attr, escapeattr(str(value)))
+		return data
+	
+
+def escape(data):
+	data = tostr(data, 'utf-8')
+	data = data.replace("&", "&amp;")
+	data = data.replace("<", "&lt;")
+	data = data.replace(">", "&gt;")
+	return data
+
+def escapeattr(data):
+	data = escape(data)
+	data = data.replace('"', "&quot;")
+	return data
+
+def escape8bit(data):
+	"""Input is Unicode string."""
+	def escapechar(c):
+		n = ord(c)
+		if 32 <= n <= 127 and c not in "<&>":
+			return c
+		else:
+			return "&#" + repr(n) + ";"
+	return strjoin(map(escapechar, data.decode('latin-1')))
+
+def escape16bit(data):
+	import array
+	a = array.array("H")
+	a.fromstring(data)
+	if sys.byteorder != "big":
+		a.byteswap()
+	def escapenum(n, amp=byteord("&"), lt=byteord("<")):
+		if n == amp:
+			return "&amp;"
+		elif n == lt:
+			return "&lt;"
+		elif 32 <= n <= 127:
+			return chr(n)
+		else:
+			return "&#" + repr(n) + ";"
+	return strjoin(map(escapenum, a))
+
+
+def hexStr(s):
+	h = string.hexdigits
+	r = ''
+	for c in s:
+		i = byteord(c)
+		r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
+	return r
diff --git a/Lib/fontTools/pens/__init__.py b/Lib/fontTools/pens/__init__.py
new file mode 100644
index 0000000..e001bb2
--- /dev/null
+++ b/Lib/fontTools/pens/__init__.py
@@ -0,0 +1,3 @@
+"""Empty __init__.py file to signal Python this directory is a package.
+(It can't be completely empty since WinZip seems to skip empty files.)
+"""
diff --git a/Lib/fontTools/pens/basePen.py b/Lib/fontTools/pens/basePen.py
new file mode 100644
index 0000000..eee4269
--- /dev/null
+++ b/Lib/fontTools/pens/basePen.py
@@ -0,0 +1,363 @@
+"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects.
+
+The Pen Protocol
+
+A Pen is a kind of object that standardizes the way how to "draw" outlines:
+it is a middle man between an outline and a drawing. In other words:
+it is an abstraction for drawing outlines, making sure that outline objects
+don't need to know the details about how and where they're being drawn, and
+that drawings don't need to know the details of how outlines are stored.
+
+The most basic pattern is this:
+
+    outline.draw(pen)  # 'outline' draws itself onto 'pen'
+
+Pens can be used to render outlines to the screen, but also to construct
+new outlines. Eg. an outline object can be both a drawable object (it has a
+draw() method) as well as a pen itself: you *build* an outline using pen
+methods.
+
+The AbstractPen class defines the Pen protocol. It implements almost
+nothing (only no-op closePath() and endPath() methods), but is useful
+for documentation purposes. Subclassing it basically tells the reader:
+"this class implements the Pen protocol.". An examples of an AbstractPen
+subclass is fontTools.pens.transformPen.TransformPen.
+
+The BasePen class is a base implementation useful for pens that actually
+draw (for example a pen renders outlines using a native graphics engine).
+BasePen contains a lot of base functionality, making it very easy to build
+a pen that fully conforms to the pen protocol. Note that if you subclass
+BasePen, you _don't_ override moveTo(), lineTo(), etc., but _moveTo(),
+_lineTo(), etc. See the BasePen doc string for details. Examples of
+BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and
+fontTools.pens.cocoaPen.CocoaPen.
+
+Coordinates are usually expressed as (x, y) tuples, but generally any
+sequence of length 2 will do.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+__all__ = ["AbstractPen", "NullPen", "BasePen",
+           "decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
+
+
+class AbstractPen(object):
+
+	def moveTo(self, pt):
+		"""Begin a new sub path, set the current point to 'pt'. You must
+		end each sub path with a call to pen.closePath() or pen.endPath().
+		"""
+		raise NotImplementedError
+
+	def lineTo(self, pt):
+		"""Draw a straight line from the current point to 'pt'."""
+		raise NotImplementedError
+
+	def curveTo(self, *points):
+		"""Draw a cubic bezier with an arbitrary number of control points.
+
+		The last point specified is on-curve, all others are off-curve
+		(control) points. If the number of control points is > 2, the
+		segment is split into multiple bezier segments. This works
+		like this:
+
+		Let n be the number of control points (which is the number of
+		arguments to this call minus 1). If n==2, a plain vanilla cubic
+		bezier is drawn. If n==1, we fall back to a quadratic segment and
+		if n==0 we draw a straight line. It gets interesting when n>2:
+		n-1 PostScript-style cubic segments will be drawn as if it were
+		one curve. See decomposeSuperBezierSegment().
+
+		The conversion algorithm used for n>2 is inspired by NURB
+		splines, and is conceptually equivalent to the TrueType "implied
+		points" principle. See also decomposeQuadraticSegment().
+		"""
+		raise NotImplementedError
+
+	def qCurveTo(self, *points):
+		"""Draw a whole string of quadratic curve segments.
+
+		The last point specified is on-curve, all others are off-curve
+		points.
+
+		This method implements TrueType-style curves, breaking up curves
+		using 'implied points': between each two consequtive off-curve points,
+		there is one implied point exactly in the middle between them. See
+		also decomposeQuadraticSegment().
+
+		The last argument (normally the on-curve point) may be None.
+		This is to support contours that have NO on-curve points (a rarely
+		seen feature of TrueType outlines).
+		"""
+		raise NotImplementedError
+
+	def closePath(self):
+		"""Close the current sub path. You must call either pen.closePath()
+		or pen.endPath() after each sub path.
+		"""
+		pass
+
+	def endPath(self):
+		"""End the current sub path, but don't close it. You must call
+		either pen.closePath() or pen.endPath() after each sub path.
+		"""
+		pass
+
+	def addComponent(self, glyphName, transformation):
+		"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
+		containing an affine transformation, or a Transform object from the
+		fontTools.misc.transform module. More precisely: it should be a
+		sequence containing 6 numbers.
+		"""
+		raise NotImplementedError
+
+
+class NullPen(object):
+
+	"""A pen that does nothing.
+	"""
+
+	def moveTo(self, pt):
+		pass
+
+	def lineTo(self, pt):
+		pass
+
+	def curveTo(self, *points):
+		pass
+
+	def qCurveTo(self, *points):
+		pass
+
+	def closePath(self):
+		pass
+
+	def endPath(self):
+		pass
+
+	def addComponent(self, glyphName, transformation):
+		pass
+
+
+class BasePen(AbstractPen):
+
+	"""Base class for drawing pens. You must override _moveTo, _lineTo and
+	_curveToOne. You may additionally override _closePath, _endPath,
+	addComponent and/or _qCurveToOne. You should not override any other
+	methods.
+	"""
+
+	def __init__(self, glyphSet):
+		self.glyphSet = glyphSet
+		self.__currentPoint = None
+
+	# must override
+
+	def _moveTo(self, pt):
+		raise NotImplementedError
+
+	def _lineTo(self, pt):
+		raise NotImplementedError
+
+	def _curveToOne(self, pt1, pt2, pt3):
+		raise NotImplementedError
+
+	# may override
+
+	def _closePath(self):
+		pass
+
+	def _endPath(self):
+		pass
+
+	def _qCurveToOne(self, pt1, pt2):
+		"""This method implements the basic quadratic curve type. The
+		default implementation delegates the work to the cubic curve
+		function. Optionally override with a native implementation.
+		"""
+		pt0x, pt0y = self.__currentPoint
+		pt1x, pt1y = pt1
+		pt2x, pt2y = pt2
+		mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
+		mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
+		mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
+		mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
+		self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
+
+	def addComponent(self, glyphName, transformation):
+		"""This default implementation simply transforms the points
+		of the base glyph and draws it onto self.
+		"""
+		from fontTools.pens.transformPen import TransformPen
+		try:
+			glyph = self.glyphSet[glyphName]
+		except KeyError:
+			pass
+		else:
+			tPen = TransformPen(self, transformation)
+			glyph.draw(tPen)
+
+	# don't override
+
+	def _getCurrentPoint(self):
+		"""Return the current point. This is not part of the public
+		interface, yet is useful for subclasses.
+		"""
+		return self.__currentPoint
+
+	def closePath(self):
+		self._closePath()
+		self.__currentPoint = None
+
+	def endPath(self):
+		self._endPath()
+		self.__currentPoint = None
+
+	def moveTo(self, pt):
+		self._moveTo(pt)
+		self.__currentPoint = pt
+
+	def lineTo(self, pt):
+		self._lineTo(pt)
+		self.__currentPoint = pt
+
+	def curveTo(self, *points):
+		n = len(points) - 1  # 'n' is the number of control points
+		assert n >= 0
+		if n == 2:
+			# The common case, we have exactly two BCP's, so this is a standard
+			# cubic bezier. Even though decomposeSuperBezierSegment() handles
+			# this case just fine, we special-case it anyway since it's so
+			# common.
+			self._curveToOne(*points)
+			self.__currentPoint = points[-1]
+		elif n > 2:
+			# n is the number of control points; split curve into n-1 cubic
+			# bezier segments. The algorithm used here is inspired by NURB
+			# splines and the TrueType "implied point" principle, and ensures
+			# the smoothest possible connection between two curve segments,
+			# with no disruption in the curvature. It is practical since it
+			# allows one to construct multiple bezier segments with a much
+			# smaller amount of points.
+			_curveToOne = self._curveToOne
+			for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
+				_curveToOne(pt1, pt2, pt3)
+				self.__currentPoint = pt3
+		elif n == 1:
+			self.qCurveTo(*points)
+		elif n == 0:
+			self.lineTo(points[0])
+		else:
+			raise AssertionError("can't get there from here")
+
+	def qCurveTo(self, *points):
+		n = len(points) - 1  # 'n' is the number of control points
+		assert n >= 0
+		if points[-1] is None:
+			# Special case for TrueType quadratics: it is possible to
+			# define a contour with NO on-curve points. BasePen supports
+			# this by allowing the final argument (the expected on-curve
+			# point) to be None. We simulate the feature by making the implied
+			# on-curve point between the last and the first off-curve points
+			# explicit.
+			x, y = points[-2]  # last off-curve point
+			nx, ny = points[0] # first off-curve point
+			impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
+			self.__currentPoint = impliedStartPoint
+			self._moveTo(impliedStartPoint)
+			points = points[:-1] + (impliedStartPoint,)
+		if n > 0:
+			# Split the string of points into discrete quadratic curve
+			# segments. Between any two consecutive off-curve points
+			# there's an implied on-curve point exactly in the middle.
+			# This is where the segment splits.
+			_qCurveToOne = self._qCurveToOne
+			for pt1, pt2 in decomposeQuadraticSegment(points):
+				_qCurveToOne(pt1, pt2)
+				self.__currentPoint = pt2
+		else:
+			self.lineTo(points[0])
+
+
+def decomposeSuperBezierSegment(points):
+	"""Split the SuperBezier described by 'points' into a list of regular
+	bezier segments. The 'points' argument must be a sequence with length
+	3 or greater, containing (x, y) coordinates. The last point is the
+	destination on-curve point, the rest of the points are off-curve points.
+	The start point should not be supplied.
+
+	This function returns a list of (pt1, pt2, pt3) tuples, which each
+	specify a regular curveto-style bezier segment.
+	"""
+	n = len(points) - 1
+	assert n > 1
+	bezierSegments = []
+	pt1, pt2, pt3 = points[0], None, None
+	for i in range(2, n+1):
+		# calculate points in between control points.
+		nDivisions = min(i, 3, n-i+2)
+		for j in range(1, nDivisions):
+			factor = j / nDivisions
+			temp1 = points[i-1]
+			temp2 = points[i-2]
+			temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
+					temp2[1] + factor * (temp1[1] - temp2[1]))
+			if pt2 is None:
+				pt2 = temp
+			else:
+				pt3 = (0.5 * (pt2[0] + temp[0]),
+					   0.5 * (pt2[1] + temp[1]))
+				bezierSegments.append((pt1, pt2, pt3))
+				pt1, pt2, pt3 = temp, None, None
+	bezierSegments.append((pt1, points[-2], points[-1]))
+	return bezierSegments
+
+
+def decomposeQuadraticSegment(points):
+	"""Split the quadratic curve segment described by 'points' into a list
+	of "atomic" quadratic segments. The 'points' argument must be a sequence
+	with length 2 or greater, containing (x, y) coordinates. The last point
+	is the destination on-curve point, the rest of the points are off-curve
+	points. The start point should not be supplied.
+
+	This function returns a list of (pt1, pt2) tuples, which each specify a
+	plain quadratic bezier segment.
+	"""
+	n = len(points) - 1
+	assert n > 0
+	quadSegments = []
+	for i in range(n - 1):
+		x, y = points[i]
+		nx, ny = points[i+1]
+		impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
+		quadSegments.append((points[i], impliedPt))
+	quadSegments.append((points[-2], points[-1]))
+	return quadSegments
+
+
+class _TestPen(BasePen):
+	"""Test class that prints PostScript to stdout."""
+	def _moveTo(self, pt):
+		print("%s %s moveto" % (pt[0], pt[1]))
+	def _lineTo(self, pt):
+		print("%s %s lineto" % (pt[0], pt[1]))
+	def _curveToOne(self, bcp1, bcp2, pt):
+		print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
+				bcp2[0], bcp2[1], pt[0], pt[1]))
+	def _closePath(self):
+		print("closepath")
+
+
+if __name__ == "__main__":
+	pen = _TestPen(None)
+	pen.moveTo((0, 0))
+	pen.lineTo((0, 100))
+	pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
+	pen.closePath()
+
+	pen = _TestPen(None)
+	# testing the "no on-curve point" scenario
+	pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
+	pen.closePath()
diff --git a/Lib/fontTools/pens/boundsPen.py b/Lib/fontTools/pens/boundsPen.py
new file mode 100644
index 0000000..4d14a0a
--- /dev/null
+++ b/Lib/fontTools/pens/boundsPen.py
@@ -0,0 +1,95 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect
+from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
+from fontTools.pens.basePen import BasePen
+
+
+__all__ = ["BoundsPen", "ControlBoundsPen"]
+
+
+class ControlBoundsPen(BasePen):
+
+	"""Pen to calculate the "control bounds" of a shape. This is the
+	bounding box of all control points, so may be larger than the
+	actual bounding box if there are curves that don't have points
+	on their extremes.
+
+	When the shape has been drawn, the bounds are available as the
+	'bounds' attribute of the pen object. It's a 4-tuple:
+		(xMin, yMin, xMax, yMax)
+	"""
+
+	def __init__(self, glyphSet):
+		BasePen.__init__(self, glyphSet)
+		self.bounds = None
+
+	def _moveTo(self, pt):
+		bounds = self.bounds
+		if bounds:
+			self.bounds = updateBounds(bounds, pt)
+		else:
+			x, y = pt
+			self.bounds = (x, y, x, y)
+
+	def _lineTo(self, pt):
+		self.bounds = updateBounds(self.bounds, pt)
+
+	def _curveToOne(self, bcp1, bcp2, pt):
+		bounds = self.bounds
+		bounds = updateBounds(bounds, bcp1)
+		bounds = updateBounds(bounds, bcp2)
+		bounds = updateBounds(bounds, pt)
+		self.bounds = bounds
+
+	def _qCurveToOne(self, bcp, pt):
+		bounds = self.bounds
+		bounds = updateBounds(bounds, bcp)
+		bounds = updateBounds(bounds, pt)
+		self.bounds = bounds
+
+
+class BoundsPen(ControlBoundsPen):
+
+	"""Pen to calculate the bounds of a shape. It calculates the
+	correct bounds even when the shape contains curves that don't
+	have points on their extremes. This is somewhat slower to compute
+	than the "control bounds".
+
+	When the shape has been drawn, the bounds are available as the
+	'bounds' attribute of the pen object. It's a 4-tuple:
+		(xMin, yMin, xMax, yMax)
+	"""
+
+	def _curveToOne(self, bcp1, bcp2, pt):
+		bounds = self.bounds
+		bounds = updateBounds(bounds, pt)
+		if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
+			bounds = unionRect(bounds, calcCubicBounds(
+					self._getCurrentPoint(), bcp1, bcp2, pt))
+		self.bounds = bounds
+
+	def _qCurveToOne(self, bcp, pt):
+		bounds = self.bounds
+		bounds = updateBounds(bounds, pt)
+		if not pointInRect(bcp, bounds):
+			bounds = unionRect(bounds, calcQuadraticBounds(
+					self._getCurrentPoint(), bcp, pt))
+		self.bounds = bounds
+
+
+if __name__ == "__main__":
+	def draw(pen):
+		pen.moveTo((0, 0))
+		pen.lineTo((0, 100))
+		pen.qCurveTo((50, 75), (60, 50), (50, 25), (0, 0))
+		pen.curveTo((-50, 25), (-60, 50), (-50, 75), (0, 100))
+		pen.closePath()
+
+	pen = ControlBoundsPen(None)
+	draw(pen)
+	print(pen.bounds)
+
+	pen = BoundsPen(None)
+	draw(pen)
+	print(pen.bounds)
diff --git a/Lib/fontTools/pens/cocoaPen.py b/Lib/fontTools/pens/cocoaPen.py
new file mode 100644
index 0000000..9920ab0
--- /dev/null
+++ b/Lib/fontTools/pens/cocoaPen.py
@@ -0,0 +1,28 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.pens.basePen import BasePen
+
+
+__all__ = ["CocoaPen"]
+
+
+class CocoaPen(BasePen):
+
+	def __init__(self, glyphSet, path=None):
+		BasePen.__init__(self, glyphSet)
+		if path is None:
+			from AppKit import NSBezierPath
+			path = NSBezierPath.bezierPath()
+		self.path = path
+
+	def _moveTo(self, p):
+		self.path.moveToPoint_(p)
+
+	def _lineTo(self, p):
+		self.path.lineToPoint_(p)
+
+	def _curveToOne(self, p1, p2, p3):
+		self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
+
+	def _closePath(self):
+		self.path.closePath()
diff --git a/Lib/fontTools/pens/pointInsidePen.py b/Lib/fontTools/pens/pointInsidePen.py
new file mode 100644
index 0000000..0b3373f
--- /dev/null
+++ b/Lib/fontTools/pens/pointInsidePen.py
@@ -0,0 +1,191 @@
+"""fontTools.pens.pointInsidePen -- Pen implementing "point inside" testing
+for shapes.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.pens.basePen import BasePen
+from fontTools.misc.bezierTools import solveQuadratic, solveCubic
+
+
+__all__ = ["PointInsidePen"]
+
+
+# working around floating point errors
+EPSILON = 1e-10
+ONE_PLUS_EPSILON = 1 + EPSILON
+ZERO_MINUS_EPSILON = 0 - EPSILON
+
+
+class PointInsidePen(BasePen):
+
+	"""This pen implements "point inside" testing: to test whether
+	a given point lies inside the shape (black) or outside (white).
+	Instances of this class can be recycled, as long as the
+	setTestPoint() method is used to set the new point to test.
+
+	Typical usage:
+
+		pen = PointInsidePen(glyphSet, (100, 200))
+		outline.draw(pen)
+		isInside = pen.getResult()
+
+	Both the even-odd algorithm and the non-zero-winding-rule
+	algorithm are implemented. The latter is the default, specify
+	True for the evenOdd argument of __init__ or setTestPoint
+	to use the even-odd algorithm.
+	"""
+
+	# This class implements the classical "shoot a ray from the test point
+	# to infinity and count how many times it intersects the outline" (as well
+	# as the non-zero variant, where the counter is incremented if the outline
+	# intersects the ray in one direction and decremented if it intersects in
+	# the other direction).
+	# I found an amazingly clear explanation of the subtleties involved in
+	# implementing this correctly for polygons here:
+	#   http://graphics.cs.ucdavis.edu/~okreylos/TAship/Spring2000/PointInPolygon.html
+	# I extended the principles outlined on that page to curves.
+
+	def __init__(self, glyphSet, testPoint, evenOdd=0):
+		BasePen.__init__(self, glyphSet)
+		self.setTestPoint(testPoint, evenOdd)
+
+	def setTestPoint(self, testPoint, evenOdd=0):
+		"""Set the point to test. Call this _before_ the outline gets drawn."""
+		self.testPoint = testPoint
+		self.evenOdd = evenOdd
+		self.firstPoint = None
+		self.intersectionCount = 0
+
+	def getResult(self):
+		"""After the shape has been drawn, getResult() returns True if the test
+		point lies within the (black) shape, and False if it doesn't.
+		"""
+		if self.firstPoint is not None:
+			# always make sure the sub paths are closed; the algorithm only works
+			# for closed paths.
+			self.closePath()
+		if self.evenOdd:
+			result = self.intersectionCount % 2
+		else:
+			result = self.intersectionCount
+		return not not result
+
+	def _addIntersection(self, goingUp):
+		if self.evenOdd or goingUp:
+			self.intersectionCount += 1
+		else:
+			self.intersectionCount -= 1
+
+	def _moveTo(self, point):
+		if self.firstPoint is not None:
+			# always make sure the sub paths are closed; the algorithm only works
+			# for closed paths.
+			self.closePath()
+		self.firstPoint = point
+
+	def _lineTo(self, point):
+		x, y = self.testPoint
+		x1, y1 = self._getCurrentPoint()
+		x2, y2 = point
+
+		if x1 < x and x2 < x:
+			return
+		if y1 < y and y2 < y:
+			return
+		if y1 >= y and y2 >= y:
+			return
+
+		dx = x2 - x1
+		dy = y2 - y1
+		t = (y - y1) / dy
+		ix = dx * t + x1
+		if ix < x:
+			return
+		self._addIntersection(y2 > y1)
+
+	def _curveToOne(self, bcp1, bcp2, point):
+		x, y = self.testPoint
+		x1, y1 = self._getCurrentPoint()
+		x2, y2 = bcp1
+		x3, y3 = bcp2
+		x4, y4 = point
+
+		if x1 < x and x2 < x and x3 < x and x4 < x:
+			return
+		if y1 < y and y2 < y and y3 < y and y4 < y:
+			return
+		if y1 >= y and y2 >= y and y3 >= y and y4 >= y:
+			return
+
+		dy = y1
+		cy = (y2 - dy) * 3.0
+		by = (y3 - y2) * 3.0 - cy
+		ay = y4 - dy - cy - by
+		solutions = sorted(solveCubic(ay, by, cy, dy - y))
+		solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
+		if not solutions:
+			return
+
+		dx = x1
+		cx = (x2 - dx) * 3.0
+		bx = (x3 - x2) * 3.0 - cx
+		ax = x4 - dx - cx - bx
+
+		above = y1 >= y
+		lastT = None
+		for t in solutions:
+			if t == lastT:
+				continue
+			lastT = t
+			t2 = t * t
+			t3 = t2 * t
+
+			direction = 3*ay*t2 + 2*by*t + cy
+			if direction == 0.0:
+				direction = 6*ay*t + 2*by
+				if direction == 0.0:
+					direction = ay
+			goingUp = direction > 0.0
+
+			xt = ax*t3 + bx*t2 + cx*t + dx
+			if xt < x:
+				above = goingUp
+				continue
+
+			if t == 0.0:
+				if not goingUp:
+					self._addIntersection(goingUp)
+			elif t == 1.0:
+				if not above:
+					self._addIntersection(goingUp)
+			else:
+				if above != goingUp:
+					self._addIntersection(goingUp)
+				#else:
+				#   we're not really intersecting, merely touching the 'top'
+			above = goingUp
+
+	def _qCurveToOne_unfinished(self, bcp, point):
+		# XXX need to finish this, for now doing it through a cubic
+		# (BasePen implements _qCurveTo in terms of a cubic) will
+		# have to do.
+		x, y = self.testPoint
+		x1, y1 = self._getCurrentPoint()
+		x2, y2 = bcp
+		x3, y3 = point
+		c = y1
+		b = (y2 - c) * 2.0
+		a = y3 - c - b
+		solutions = sorted(solveQuadratic(a, b, c - y))
+		solutions = [t for t in solutions if ZERO_MINUS_EPSILON <= t <= ONE_PLUS_EPSILON]
+		if not solutions:
+			return
+		XXX
+
+	def _closePath(self):
+		if self._getCurrentPoint() != self.firstPoint:
+			self.lineTo(self.firstPoint)
+		self.firstPoint = None
+
+	_endPath = _closePath
diff --git a/Lib/fontTools/pens/reportLabPen.py b/Lib/fontTools/pens/reportLabPen.py
new file mode 100644
index 0000000..60792f7
--- /dev/null
+++ b/Lib/fontTools/pens/reportLabPen.py
@@ -0,0 +1,72 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.pens.basePen import BasePen
+from reportlab.graphics.shapes import Path
+
+
+class ReportLabPen(BasePen):
+
+	"""A pen for drawing onto a reportlab.graphics.shapes.Path object."""
+
+	def __init__(self, glyphSet, path=None):
+		BasePen.__init__(self, glyphSet)
+		if path is None:
+			path = Path()
+		self.path = path
+
+	def _moveTo(self, p):
+		(x,y) = p
+		self.path.moveTo(x,y)
+
+	def _lineTo(self, p):
+		(x,y) = p
+		self.path.lineTo(x,y)
+
+	def _curveToOne(self, p1, p2, p3):
+		(x1,y1) = p1
+		(x2,y2) = p2
+		(x3,y3) = p3
+		self.path.curveTo(x1, y1, x2, y2, x3, y3)
+
+	def _closePath(self):
+		self.path.closePath()
+
+
+if __name__=="__main__":
+	import sys
+	if len(sys.argv) < 3:
+		print("Usage: reportLabPen.py <OTF/TTF font> <glyphname> [<image file to create>]")
+		print("  If no image file name is created, by default <glyphname>.png is created.")
+		print("  example: reportLabPen.py Arial.TTF R test.png")
+		print("  (The file format will be PNG, regardless of the image file name supplied)")
+		sys.exit(0)
+
+	from fontTools.ttLib import TTFont
+	from reportlab.lib import colors
+
+	path = sys.argv[1]
+	glyphName = sys.argv[2]
+	if (len(sys.argv) > 3):
+		imageFile = sys.argv[3]
+	else:
+		imageFile = "%s.png" % glyphName
+
+	font = TTFont(path)  # it would work just as well with fontTools.t1Lib.T1Font
+	gs = font.getGlyphSet()
+	pen = ReportLabPen(gs, Path(fillColor=colors.red, strokeWidth=5))
+	g = gs[glyphName]
+	g.draw(pen)
+
+	w, h = g.width, 1000
+	from reportlab.graphics import renderPM
+	from reportlab.graphics.shapes import Group, Drawing, scale
+
+	# Everything is wrapped in a group to allow transformations.
+	g = Group(pen.path)
+	g.translate(0, 200)
+	g.scale(0.3, 0.3)
+
+	d = Drawing(w, h)
+	d.add(g)
+
+	renderPM.drawToFile(d, imageFile, fmt="PNG")
diff --git a/Lib/fontTools/pens/transformPen.py b/Lib/fontTools/pens/transformPen.py
new file mode 100644
index 0000000..9fca009
--- /dev/null
+++ b/Lib/fontTools/pens/transformPen.py
@@ -0,0 +1,65 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.pens.basePen import AbstractPen
+
+
+__all__ = ["TransformPen"]
+
+
+class TransformPen(AbstractPen):
+
+	"""Pen that transforms all coordinates using a Affine transformation,
+	and passes them to another pen.
+	"""
+
+	def __init__(self, outPen, transformation):
+		"""The 'outPen' argument is another pen object. It will receive the
+		transformed coordinates. The 'transformation' argument can either
+		be a six-tuple, or a fontTools.misc.transform.Transform object.
+		"""
+		if not hasattr(transformation, "transformPoint"):
+			from fontTools.misc.transform import Transform
+			transformation = Transform(*transformation)
+		self._transformation = transformation
+		self._transformPoint = transformation.transformPoint
+		self._outPen = outPen
+		self._stack = []
+
+	def moveTo(self, pt):
+		self._outPen.moveTo(self._transformPoint(pt))
+
+	def lineTo(self, pt):
+		self._outPen.lineTo(self._transformPoint(pt))
+
+	def curveTo(self, *points):
+		self._outPen.curveTo(*self._transformPoints(points))
+
+	def qCurveTo(self, *points):
+		if points[-1] is None:
+			points = self._transformPoints(points[:-1]) + [None]
+		else:
+			points = self._transformPoints(points)
+		self._outPen.qCurveTo(*points)
+
+	def _transformPoints(self, points):
+		new = []
+		transformPoint = self._transformPoint
+		for pt in points:
+			new.append(transformPoint(pt))
+		return new
+
+	def closePath(self):
+		self._outPen.closePath()
+
+	def addComponent(self, glyphName, transformation):
+		transformation = self._transformation.transform(transformation)
+		self._outPen.addComponent(glyphName, transformation)
+
+
+if __name__ == "__main__":
+	from fontTools.pens.basePen import _TestPen
+	pen = TransformPen(_TestPen(None), (2, 0, 0.5, 2, -10, 0))
+	pen.moveTo((0, 0))
+	pen.lineTo((0, 100))
+	pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
+	pen.closePath()
diff --git a/Lib/fontTools/subset.py b/Lib/fontTools/subset.py
new file mode 100644
index 0000000..45bd457
--- /dev/null
+++ b/Lib/fontTools/subset.py
@@ -0,0 +1,2251 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod
+
+"""Python OpenType Layout Subsetter.
+
+Later grown into full OpenType subsetter, supporting all standard tables.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import ttLib
+from fontTools.ttLib.tables import otTables
+from fontTools.misc import psCharStrings
+from fontTools.pens import basePen
+import sys
+import struct
+import time
+import array
+
+
+def _add_method(*clazzes):
+  """Returns a decorator function that adds a new method to one or
+  more classes."""
+  def wrapper(method):
+    for clazz in clazzes:
+      assert clazz.__name__ != 'DefaultTable', 'Oops, table class not found.'
+      assert not hasattr(clazz, method.__name__), \
+          "Oops, class '%s' has method '%s'." % (clazz.__name__,
+                                                 method.__name__)
+      setattr(clazz, method.__name__, method)
+    return None
+  return wrapper
+
+def _uniq_sort(l):
+  return sorted(set(l))
+
+def _set_update(s, *others):
+  # Jython's set.update only takes one other argument.
+  # Emulate real set.update...
+  for other in others:
+    s.update(other)
+
+
+@_add_method(otTables.Coverage)
+def intersect(self, glyphs):
+  "Returns ascending list of matching coverage values."
+  return [i for i,g in enumerate(self.glyphs) if g in glyphs]
+
+@_add_method(otTables.Coverage)
+def intersect_glyphs(self, glyphs):
+  "Returns set of intersecting glyphs."
+  return set(g for g in self.glyphs if g in glyphs)
+
+@_add_method(otTables.Coverage)
+def subset(self, glyphs):
+  "Returns ascending list of remaining coverage values."
+  indices = self.intersect(glyphs)
+  self.glyphs = [g for g in self.glyphs if g in glyphs]
+  return indices
+
+@_add_method(otTables.Coverage)
+def remap(self, coverage_map):
+  "Remaps coverage."
+  self.glyphs = [self.glyphs[i] for i in coverage_map]
+
+@_add_method(otTables.ClassDef)
+def intersect(self, glyphs):
+  "Returns ascending list of matching class values."
+  return _uniq_sort(
+     ([0] if any(g not in self.classDefs for g in glyphs) else []) +
+      [v for g,v in self.classDefs.items() if g in glyphs])
+
+@_add_method(otTables.ClassDef)
+def intersect_class(self, glyphs, klass):
+  "Returns set of glyphs matching class."
+  if klass == 0:
+    return set(g for g in glyphs if g not in self.classDefs)
+  return set(g for g,v in self.classDefs.items()
+              if v == klass and g in glyphs)
+
+@_add_method(otTables.ClassDef)
+def subset(self, glyphs, remap=False):
+  "Returns ascending list of remaining classes."
+  self.classDefs = dict((g,v) for g,v in self.classDefs.items() if g in glyphs)
+  # Note: while class 0 has the special meaning of "not matched",
+  # if no glyph will ever /not match/, we can optimize class 0 out too.
+  indices = _uniq_sort(
+     ([0] if any(g not in self.classDefs for g in glyphs) else []) +
+      list(self.classDefs.values()))
+  if remap:
+    self.remap(indices)
+  return indices
+
+@_add_method(otTables.ClassDef)
+def remap(self, class_map):
+  "Remaps classes."
+  self.classDefs = dict((g,class_map.index(v))
+                         for g,v in self.classDefs.items())
+
+@_add_method(otTables.SingleSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  s.glyphs.update(v for g,v in self.mapping.items() if g in cur_glyphs)
+
+@_add_method(otTables.SingleSubst)
+def subset_glyphs(self, s):
+  self.mapping = dict((g,v) for g,v in self.mapping.items()
+                      if g in s.glyphs and v in s.glyphs)
+  return bool(self.mapping)
+
+@_add_method(otTables.MultipleSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  indices = self.Coverage.intersect(cur_glyphs)
+  _set_update(s.glyphs, *(self.Sequence[i].Substitute for i in indices))
+
+@_add_method(otTables.MultipleSubst)
+def subset_glyphs(self, s):
+  indices = self.Coverage.subset(s.glyphs)
+  self.Sequence = [self.Sequence[i] for i in indices]
+  # Now drop rules generating glyphs we don't want
+  indices = [i for i,seq in enumerate(self.Sequence)
+       if all(sub in s.glyphs for sub in seq.Substitute)]
+  self.Sequence = [self.Sequence[i] for i in indices]
+  self.Coverage.remap(indices)
+  self.SequenceCount = len(self.Sequence)
+  return bool(self.SequenceCount)
+
+@_add_method(otTables.AlternateSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  _set_update(s.glyphs, *(vlist for g,vlist in self.alternates.items()
+                          if g in cur_glyphs))
+
+@_add_method(otTables.AlternateSubst)
+def subset_glyphs(self, s):
+  self.alternates = dict((g,vlist)
+                         for g,vlist in self.alternates.items()
+                         if g in s.glyphs and
+                            all(v in s.glyphs for v in vlist))
+  return bool(self.alternates)
+
+@_add_method(otTables.LigatureSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  _set_update(s.glyphs, *([seq.LigGlyph for seq in seqs
+                           if all(c in s.glyphs for c in seq.Component)]
+                          for g,seqs in self.ligatures.items()
+                          if g in cur_glyphs))
+
+@_add_method(otTables.LigatureSubst)
+def subset_glyphs(self, s):
+  self.ligatures = dict((g,v) for g,v in self.ligatures.items()
+                        if g in s.glyphs)
+  self.ligatures = dict((g,[seq for seq in seqs
+                            if seq.LigGlyph in s.glyphs and
+                               all(c in s.glyphs for c in seq.Component)])
+                         for g,seqs in self.ligatures.items())
+  self.ligatures = dict((g,v) for g,v in self.ligatures.items() if v)
+  return bool(self.ligatures)
+
+@_add_method(otTables.ReverseChainSingleSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  if self.Format == 1:
+    indices = self.Coverage.intersect(cur_glyphs)
+    if(not indices or
+        not all(c.intersect(s.glyphs)
+                 for c in self.LookAheadCoverage + self.BacktrackCoverage)):
+      return
+    s.glyphs.update(self.Substitute[i] for i in indices)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ReverseChainSingleSubst)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    indices = self.Coverage.subset(s.glyphs)
+    self.Substitute = [self.Substitute[i] for i in indices]
+    # Now drop rules generating glyphs we don't want
+    indices = [i for i,sub in enumerate(self.Substitute)
+         if sub in s.glyphs]
+    self.Substitute = [self.Substitute[i] for i in indices]
+    self.Coverage.remap(indices)
+    self.GlyphCount = len(self.Substitute)
+    return bool(self.GlyphCount and
+                 all(c.subset(s.glyphs)
+                      for c in self.LookAheadCoverage+self.BacktrackCoverage))
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.SinglePos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    return len(self.Coverage.subset(s.glyphs))
+  elif self.Format == 2:
+    indices = self.Coverage.subset(s.glyphs)
+    self.Value = [self.Value[i] for i in indices]
+    self.ValueCount = len(self.Value)
+    return bool(self.ValueCount)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.SinglePos)
+def prune_post_subset(self, options):
+  if not options.hinting:
+    # Drop device tables
+    self.ValueFormat &= ~0x00F0
+  return True
+
+@_add_method(otTables.PairPos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    indices = self.Coverage.subset(s.glyphs)
+    self.PairSet = [self.PairSet[i] for i in indices]
+    for p in self.PairSet:
+      p.PairValueRecord = [r for r in p.PairValueRecord
+                           if r.SecondGlyph in s.glyphs]
+      p.PairValueCount = len(p.PairValueRecord)
+    # Remove empty pairsets
+    indices = [i for i,p in enumerate(self.PairSet) if p.PairValueCount]
+    self.Coverage.remap(indices)
+    self.PairSet = [self.PairSet[i] for i in indices]
+    self.PairSetCount = len(self.PairSet)
+    return bool(self.PairSetCount)
+  elif self.Format == 2:
+    class1_map = self.ClassDef1.subset(s.glyphs, remap=True)
+    class2_map = self.ClassDef2.subset(s.glyphs, remap=True)
+    self.Class1Record = [self.Class1Record[i] for i in class1_map]
+    for c in self.Class1Record:
+      c.Class2Record = [c.Class2Record[i] for i in class2_map]
+    self.Class1Count = len(class1_map)
+    self.Class2Count = len(class2_map)
+    return bool(self.Class1Count and
+                 self.Class2Count and
+                 self.Coverage.subset(s.glyphs))
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.PairPos)
+def prune_post_subset(self, options):
+  if not options.hinting:
+    # Drop device tables
+    self.ValueFormat1 &= ~0x00F0
+    self.ValueFormat2 &= ~0x00F0
+  return True
+
+@_add_method(otTables.CursivePos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    indices = self.Coverage.subset(s.glyphs)
+    self.EntryExitRecord = [self.EntryExitRecord[i] for i in indices]
+    self.EntryExitCount = len(self.EntryExitRecord)
+    return bool(self.EntryExitCount)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.Anchor)
+def prune_hints(self):
+  # Drop device tables / contour anchor point
+  self.ensureDecompiled()
+  self.Format = 1
+
+@_add_method(otTables.CursivePos)
+def prune_post_subset(self, options):
+  if not options.hinting:
+    for rec in self.EntryExitRecord:
+      if rec.EntryAnchor: rec.EntryAnchor.prune_hints()
+      if rec.ExitAnchor: rec.ExitAnchor.prune_hints()
+  return True
+
+@_add_method(otTables.MarkBasePos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    mark_indices = self.MarkCoverage.subset(s.glyphs)
+    self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
+                                 for i in mark_indices]
+    self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
+    base_indices = self.BaseCoverage.subset(s.glyphs)
+    self.BaseArray.BaseRecord = [self.BaseArray.BaseRecord[i]
+                                 for i in base_indices]
+    self.BaseArray.BaseCount = len(self.BaseArray.BaseRecord)
+    # Prune empty classes
+    class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
+    self.ClassCount = len(class_indices)
+    for m in self.MarkArray.MarkRecord:
+      m.Class = class_indices.index(m.Class)
+    for b in self.BaseArray.BaseRecord:
+      b.BaseAnchor = [b.BaseAnchor[i] for i in class_indices]
+    return bool(self.ClassCount and
+                 self.MarkArray.MarkCount and
+                 self.BaseArray.BaseCount)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.MarkBasePos)
+def prune_post_subset(self, options):
+    if not options.hinting:
+      for m in self.MarkArray.MarkRecord:
+        if m.MarkAnchor:
+          m.MarkAnchor.prune_hints()
+      for b in self.BaseArray.BaseRecord:
+        for a in b.BaseAnchor:
+          if a:
+            a.prune_hints()
+    return True
+
+@_add_method(otTables.MarkLigPos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    mark_indices = self.MarkCoverage.subset(s.glyphs)
+    self.MarkArray.MarkRecord = [self.MarkArray.MarkRecord[i]
+                                 for i in mark_indices]
+    self.MarkArray.MarkCount = len(self.MarkArray.MarkRecord)
+    ligature_indices = self.LigatureCoverage.subset(s.glyphs)
+    self.LigatureArray.LigatureAttach = [self.LigatureArray.LigatureAttach[i]
+                                         for i in ligature_indices]
+    self.LigatureArray.LigatureCount = len(self.LigatureArray.LigatureAttach)
+    # Prune empty classes
+    class_indices = _uniq_sort(v.Class for v in self.MarkArray.MarkRecord)
+    self.ClassCount = len(class_indices)
+    for m in self.MarkArray.MarkRecord:
+      m.Class = class_indices.index(m.Class)
+    for l in self.LigatureArray.LigatureAttach:
+      for c in l.ComponentRecord:
+        c.LigatureAnchor = [c.LigatureAnchor[i] for i in class_indices]
+    return bool(self.ClassCount and
+                 self.MarkArray.MarkCount and
+                 self.LigatureArray.LigatureCount)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.MarkLigPos)
+def prune_post_subset(self, options):
+    if not options.hinting:
+      for m in self.MarkArray.MarkRecord:
+        if m.MarkAnchor:
+          m.MarkAnchor.prune_hints()
+      for l in self.LigatureArray.LigatureAttach:
+        for c in l.ComponentRecord:
+          for a in c.LigatureAnchor:
+            if a:
+              a.prune_hints()
+    return True
+
+@_add_method(otTables.MarkMarkPos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    mark1_indices = self.Mark1Coverage.subset(s.glyphs)
+    self.Mark1Array.MarkRecord = [self.Mark1Array.MarkRecord[i]
+                                  for i in mark1_indices]
+    self.Mark1Array.MarkCount = len(self.Mark1Array.MarkRecord)
+    mark2_indices = self.Mark2Coverage.subset(s.glyphs)
+    self.Mark2Array.Mark2Record = [self.Mark2Array.Mark2Record[i]
+                                   for i in mark2_indices]
+    self.Mark2Array.MarkCount = len(self.Mark2Array.Mark2Record)
+    # Prune empty classes
+    class_indices = _uniq_sort(v.Class for v in self.Mark1Array.MarkRecord)
+    self.ClassCount = len(class_indices)
+    for m in self.Mark1Array.MarkRecord:
+      m.Class = class_indices.index(m.Class)
+    for b in self.Mark2Array.Mark2Record:
+      b.Mark2Anchor = [b.Mark2Anchor[i] for i in class_indices]
+    return bool(self.ClassCount and
+                 self.Mark1Array.MarkCount and
+                 self.Mark2Array.MarkCount)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.MarkMarkPos)
+def prune_post_subset(self, options):
+    if not options.hinting:
+      # Drop device tables or contour anchor point
+      for m in self.Mark1Array.MarkRecord:
+        if m.MarkAnchor:
+          m.MarkAnchor.prune_hints()
+      for b in self.Mark2Array.Mark2Record:
+        for m in b.Mark2Anchor:
+          if m:
+            m.prune_hints()
+    return True
+
+@_add_method(otTables.SingleSubst,
+             otTables.MultipleSubst,
+             otTables.AlternateSubst,
+             otTables.LigatureSubst,
+             otTables.ReverseChainSingleSubst,
+             otTables.SinglePos,
+             otTables.PairPos,
+             otTables.CursivePos,
+             otTables.MarkBasePos,
+             otTables.MarkLigPos,
+             otTables.MarkMarkPos)
+def subset_lookups(self, lookup_indices):
+  pass
+
+@_add_method(otTables.SingleSubst,
+             otTables.MultipleSubst,
+             otTables.AlternateSubst,
+             otTables.LigatureSubst,
+             otTables.ReverseChainSingleSubst,
+             otTables.SinglePos,
+             otTables.PairPos,
+             otTables.CursivePos,
+             otTables.MarkBasePos,
+             otTables.MarkLigPos,
+             otTables.MarkMarkPos)
+def collect_lookups(self):
+  return []
+
+@_add_method(otTables.SingleSubst,
+             otTables.MultipleSubst,
+             otTables.AlternateSubst,
+             otTables.LigatureSubst,
+             otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ReverseChainSingleSubst,
+             otTables.SinglePos,
+             otTables.PairPos,
+             otTables.CursivePos,
+             otTables.MarkBasePos,
+             otTables.MarkLigPos,
+             otTables.MarkMarkPos,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def prune_pre_subset(self, options):
+  return True
+
+@_add_method(otTables.SingleSubst,
+             otTables.MultipleSubst,
+             otTables.AlternateSubst,
+             otTables.LigatureSubst,
+             otTables.ReverseChainSingleSubst,
+             otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def prune_post_subset(self, options):
+  return True
+
+@_add_method(otTables.SingleSubst,
+             otTables.AlternateSubst,
+             otTables.ReverseChainSingleSubst)
+def may_have_non_1to1(self):
+  return False
+
+@_add_method(otTables.MultipleSubst,
+             otTables.LigatureSubst,
+             otTables.ContextSubst,
+             otTables.ChainContextSubst)
+def may_have_non_1to1(self):
+  return True
+
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def __classify_context(self):
+
+  class ContextHelper(object):
+    def __init__(self, klass, Format):
+      if klass.__name__.endswith('Subst'):
+        Typ = 'Sub'
+        Type = 'Subst'
+      else:
+        Typ = 'Pos'
+        Type = 'Pos'
+      if klass.__name__.startswith('Chain'):
+        Chain = 'Chain'
+      else:
+        Chain = ''
+      ChainTyp = Chain+Typ
+
+      self.Typ = Typ
+      self.Type = Type
+      self.Chain = Chain
+      self.ChainTyp = ChainTyp
+
+      self.LookupRecord = Type+'LookupRecord'
+
+      if Format == 1:
+        Coverage = lambda r: r.Coverage
+        ChainCoverage = lambda r: r.Coverage
+        ContextData = lambda r:(None,)
+        ChainContextData = lambda r:(None, None, None)
+        RuleData = lambda r:(r.Input,)
+        ChainRuleData = lambda r:(r.Backtrack, r.Input, r.LookAhead)
+        SetRuleData = None
+        ChainSetRuleData = None
+      elif Format == 2:
+        Coverage = lambda r: r.Coverage
+        ChainCoverage = lambda r: r.Coverage
+        ContextData = lambda r:(r.ClassDef,)
+        ChainContextData = lambda r:(r.LookAheadClassDef,
+                                      r.InputClassDef,
+                                      r.BacktrackClassDef)
+        RuleData = lambda r:(r.Class,)
+        ChainRuleData = lambda r:(r.LookAhead, r.Input, r.Backtrack)
+        def SetRuleData(r, d):(r.Class,) = d
+        def ChainSetRuleData(r, d):(r.LookAhead, r.Input, r.Backtrack) = d
+      elif Format == 3:
+        Coverage = lambda r: r.Coverage[0]
+        ChainCoverage = lambda r: r.InputCoverage[0]
+        ContextData = None
+        ChainContextData = None
+        RuleData = lambda r: r.Coverage
+        ChainRuleData = lambda r:(r.LookAheadCoverage +
+                                   r.InputCoverage +
+                                   r.BacktrackCoverage)
+        SetRuleData = None
+        ChainSetRuleData = None
+      else:
+        assert 0, "unknown format: %s" % Format
+
+      if Chain:
+        self.Coverage = ChainCoverage
+        self.ContextData = ChainContextData
+        self.RuleData = ChainRuleData
+        self.SetRuleData = ChainSetRuleData
+      else:
+        self.Coverage = Coverage
+        self.ContextData = ContextData
+        self.RuleData = RuleData
+        self.SetRuleData = SetRuleData
+
+      if Format == 1:
+        self.Rule = ChainTyp+'Rule'
+        self.RuleCount = ChainTyp+'RuleCount'
+        self.RuleSet = ChainTyp+'RuleSet'
+        self.RuleSetCount = ChainTyp+'RuleSetCount'
+        self.Intersect = lambda glyphs, c, r: [r] if r in glyphs else []
+      elif Format == 2:
+        self.Rule = ChainTyp+'ClassRule'
+        self.RuleCount = ChainTyp+'ClassRuleCount'
+        self.RuleSet = ChainTyp+'ClassSet'
+        self.RuleSetCount = ChainTyp+'ClassSetCount'
+        self.Intersect = lambda glyphs, c, r: c.intersect_class(glyphs, r)
+
+        self.ClassDef = 'InputClassDef' if Chain else 'ClassDef'
+        self.ClassDefIndex = 1 if Chain else 0
+        self.Input = 'Input' if Chain else 'Class'
+
+  if self.Format not in [1, 2, 3]:
+    return None  # Don't shoot the messenger; let it go
+  if not hasattr(self.__class__, "__ContextHelpers"):
+    self.__class__.__ContextHelpers = {}
+  if self.Format not in self.__class__.__ContextHelpers:
+    helper = ContextHelper(self.__class__, self.Format)
+    self.__class__.__ContextHelpers[self.Format] = helper
+  return self.__class__.__ContextHelpers[self.Format]
+
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if cur_glyphs is None: cur_glyphs = s.glyphs
+  c = self.__classify_context()
+
+  indices = c.Coverage(self).intersect(s.glyphs)
+  if not indices:
+    return []
+  cur_glyphs = c.Coverage(self).intersect_glyphs(s.glyphs);
+
+  if self.Format == 1:
+    ContextData = c.ContextData(self)
+    rss = getattr(self, c.RuleSet)
+    rssCount = getattr(self, c.RuleSetCount)
+    for i in indices:
+      if i >= rssCount or not rss[i]: continue
+      for r in getattr(rss[i], c.Rule):
+        if not r: continue
+        if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
+          for cd,klist in zip(ContextData, c.RuleData(r))):
+          chaos = False
+          for ll in getattr(r, c.LookupRecord):
+            if not ll: continue
+            seqi = ll.SequenceIndex
+            if chaos:
+              pos_glyphs = s.glyphs
+            else:
+              if seqi == 0:
+                pos_glyphs = set([c.Coverage(self).glyphs[i]])
+              else:
+                pos_glyphs = set([r.Input[seqi - 1]])
+            lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+            chaos = chaos or lookup.may_have_non_1to1()
+            lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+  elif self.Format == 2:
+    ClassDef = getattr(self, c.ClassDef)
+    indices = ClassDef.intersect(cur_glyphs)
+    ContextData = c.ContextData(self)
+    rss = getattr(self, c.RuleSet)
+    rssCount = getattr(self, c.RuleSetCount)
+    for i in indices:
+      if i >= rssCount or not rss[i]: continue
+      for r in getattr(rss[i], c.Rule):
+        if not r: continue
+        if all(all(c.Intersect(s.glyphs, cd, k) for k in klist)
+          for cd,klist in zip(ContextData, c.RuleData(r))):
+          chaos = False
+          for ll in getattr(r, c.LookupRecord):
+            if not ll: continue
+            seqi = ll.SequenceIndex
+            if chaos:
+              pos_glyphs = s.glyphs
+            else:
+              if seqi == 0:
+                pos_glyphs = ClassDef.intersect_class(cur_glyphs, i)
+              else:
+                pos_glyphs = ClassDef.intersect_class(s.glyphs,
+                                                      getattr(r, c.Input)[seqi - 1])
+            lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+            chaos = chaos or lookup.may_have_non_1to1()
+            lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+  elif self.Format == 3:
+    if not all(x.intersect(s.glyphs) for x in c.RuleData(self)):
+      return []
+    r = self
+    chaos = False
+    for ll in getattr(r, c.LookupRecord):
+      if not ll: continue
+      seqi = ll.SequenceIndex
+      if chaos:
+        pos_glyphs = s.glyphs
+      else:
+        if seqi == 0:
+          pos_glyphs = cur_glyphs
+        else:
+          pos_glyphs = r.InputCoverage[seqi].intersect_glyphs(s.glyphs)
+      lookup = s.table.LookupList.Lookup[ll.LookupListIndex]
+      chaos = chaos or lookup.may_have_non_1to1()
+      lookup.closure_glyphs(s, cur_glyphs=pos_glyphs)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextSubst,
+             otTables.ChainContextPos)
+def subset_glyphs(self, s):
+  c = self.__classify_context()
+
+  if self.Format == 1:
+    indices = self.Coverage.subset(s.glyphs)
+    rss = getattr(self, c.RuleSet)
+    rss = [rss[i] for i in indices]
+    for rs in rss:
+      if not rs: continue
+      ss = getattr(rs, c.Rule)
+      ss = [r for r in ss
+            if r and all(all(g in s.glyphs for g in glist)
+              for glist in c.RuleData(r))]
+      setattr(rs, c.Rule, ss)
+      setattr(rs, c.RuleCount, len(ss))
+    # Prune empty subrulesets
+    rss = [rs for rs in rss if rs and getattr(rs, c.Rule)]
+    setattr(self, c.RuleSet, rss)
+    setattr(self, c.RuleSetCount, len(rss))
+    return bool(rss)
+  elif self.Format == 2:
+    if not self.Coverage.subset(s.glyphs):
+      return False
+    ContextData = c.ContextData(self)
+    klass_maps = [x.subset(s.glyphs, remap=True) for x in ContextData]
+
+    # Keep rulesets for class numbers that survived.
+    indices = klass_maps[c.ClassDefIndex]
+    rss = getattr(self, c.RuleSet)
+    rssCount = getattr(self, c.RuleSetCount)
+    rss = [rss[i] for i in indices if i < rssCount]
+    del rssCount
+    # Delete, but not renumber, unreachable rulesets.
+    indices = getattr(self, c.ClassDef).intersect(self.Coverage.glyphs)
+    rss = [rss if i in indices else None for i,rss in enumerate(rss)]
+    while rss and rss[-1] is None:
+      del rss[-1]
+
+    for rs in rss:
+      if not rs: continue
+      ss = getattr(rs, c.Rule)
+      ss = [r for r in ss
+            if r and all(all(k in klass_map for k in klist)
+              for klass_map,klist in zip(klass_maps, c.RuleData(r)))]
+      setattr(rs, c.Rule, ss)
+      setattr(rs, c.RuleCount, len(ss))
+
+      # Remap rule classes
+      for r in ss:
+        c.SetRuleData(r, [[klass_map.index(k) for k in klist]
+               for klass_map,klist in zip(klass_maps, c.RuleData(r))])
+    return bool(rss)
+  elif self.Format == 3:
+    return all(x.subset(s.glyphs) for x in c.RuleData(self))
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def subset_lookups(self, lookup_indices):
+  c = self.__classify_context()
+
+  if self.Format in [1, 2]:
+    for rs in getattr(self, c.RuleSet):
+      if not rs: continue
+      for r in getattr(rs, c.Rule):
+        if not r: continue
+        setattr(r, c.LookupRecord,
+                 [ll for ll in getattr(r, c.LookupRecord)
+                  if ll and ll.LookupListIndex in lookup_indices])
+        for ll in getattr(r, c.LookupRecord):
+          if not ll: continue
+          ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
+  elif self.Format == 3:
+    setattr(self, c.LookupRecord,
+             [ll for ll in getattr(self, c.LookupRecord)
+              if ll and ll.LookupListIndex in lookup_indices])
+    for ll in getattr(self, c.LookupRecord):
+      if not ll: continue
+      ll.LookupListIndex = lookup_indices.index(ll.LookupListIndex)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ContextSubst,
+             otTables.ChainContextSubst,
+             otTables.ContextPos,
+             otTables.ChainContextPos)
+def collect_lookups(self):
+  c = self.__classify_context()
+
+  if self.Format in [1, 2]:
+    return [ll.LookupListIndex
+      for rs in getattr(self, c.RuleSet) if rs
+      for r in getattr(rs, c.Rule) if r
+      for ll in getattr(r, c.LookupRecord) if ll]
+  elif self.Format == 3:
+    return [ll.LookupListIndex
+      for ll in getattr(self, c.LookupRecord) if ll]
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst)
+def closure_glyphs(self, s, cur_glyphs=None):
+  if self.Format == 1:
+    self.ExtSubTable.closure_glyphs(s, cur_glyphs)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst)
+def may_have_non_1to1(self):
+  if self.Format == 1:
+    return self.ExtSubTable.may_have_non_1to1()
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst,
+             otTables.ExtensionPos)
+def prune_pre_subset(self, options):
+  if self.Format == 1:
+    return self.ExtSubTable.prune_pre_subset(options)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst,
+             otTables.ExtensionPos)
+def subset_glyphs(self, s):
+  if self.Format == 1:
+    return self.ExtSubTable.subset_glyphs(s)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst,
+             otTables.ExtensionPos)
+def prune_post_subset(self, options):
+  if self.Format == 1:
+    return self.ExtSubTable.prune_post_subset(options)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst,
+             otTables.ExtensionPos)
+def subset_lookups(self, lookup_indices):
+  if self.Format == 1:
+    return self.ExtSubTable.subset_lookups(lookup_indices)
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.ExtensionSubst,
+             otTables.ExtensionPos)
+def collect_lookups(self):
+  if self.Format == 1:
+    return self.ExtSubTable.collect_lookups()
+  else:
+    assert 0, "unknown format: %s" % self.Format
+
+@_add_method(otTables.Lookup)
+def closure_glyphs(self, s, cur_glyphs=None):
+  for st in self.SubTable:
+    if not st: continue
+    st.closure_glyphs(s, cur_glyphs)
+
+@_add_method(otTables.Lookup)
+def prune_pre_subset(self, options):
+  ret = False
+  for st in self.SubTable:
+    if not st: continue
+    if st.prune_pre_subset(options): ret = True
+  return ret
+
+@_add_method(otTables.Lookup)
+def subset_glyphs(self, s):
+  self.SubTable = [st for st in self.SubTable if st and st.subset_glyphs(s)]
+  self.SubTableCount = len(self.SubTable)
+  return bool(self.SubTableCount)
+
+@_add_method(otTables.Lookup)
+def prune_post_subset(self, options):
+  ret = False
+  for st in self.SubTable:
+    if not st: continue
+    if st.prune_post_subset(options): ret = True
+  return ret
+
+@_add_method(otTables.Lookup)
+def subset_lookups(self, lookup_indices):
+  for s in self.SubTable:
+    s.subset_lookups(lookup_indices)
+
+@_add_method(otTables.Lookup)
+def collect_lookups(self):
+  return _uniq_sort(sum((st.collect_lookups() for st in self.SubTable
+                         if st), []))
+
+@_add_method(otTables.Lookup)
+def may_have_non_1to1(self):
+  return any(st.may_have_non_1to1() for st in self.SubTable if st)
+
+@_add_method(otTables.LookupList)
+def prune_pre_subset(self, options):
+  ret = False
+  for l in self.Lookup:
+    if not l: continue
+    if l.prune_pre_subset(options): ret = True
+  return ret
+
+@_add_method(otTables.LookupList)
+def subset_glyphs(self, s):
+  "Returns the indices of nonempty lookups."
+  return [i for i,l in enumerate(self.Lookup) if l and l.subset_glyphs(s)]
+
+@_add_method(otTables.LookupList)
+def prune_post_subset(self, options):
+  ret = False
+  for l in self.Lookup:
+    if not l: continue
+    if l.prune_post_subset(options): ret = True
+  return ret
+
+@_add_method(otTables.LookupList)
+def subset_lookups(self, lookup_indices):
+  self.ensureDecompiled()
+  self.Lookup = [self.Lookup[i] for i in lookup_indices
+                 if i < self.LookupCount]
+  self.LookupCount = len(self.Lookup)
+  for l in self.Lookup:
+    l.subset_lookups(lookup_indices)
+
+@_add_method(otTables.LookupList)
+def closure_lookups(self, lookup_indices):
+  lookup_indices = _uniq_sort(lookup_indices)
+  recurse = lookup_indices
+  while True:
+    recurse_lookups = sum((self.Lookup[i].collect_lookups()
+                            for i in recurse if i < self.LookupCount), [])
+    recurse_lookups = [l for l in recurse_lookups
+                       if l not in lookup_indices and l < self.LookupCount]
+    if not recurse_lookups:
+      return _uniq_sort(lookup_indices)
+    recurse_lookups = _uniq_sort(recurse_lookups)
+    lookup_indices.extend(recurse_lookups)
+    recurse = recurse_lookups
+
+@_add_method(otTables.Feature)
+def subset_lookups(self, lookup_indices):
+  self.LookupListIndex = [l for l in self.LookupListIndex
+                          if l in lookup_indices]
+  # Now map them.
+  self.LookupListIndex = [lookup_indices.index(l)
+                          for l in self.LookupListIndex]
+  self.LookupCount = len(self.LookupListIndex)
+  return self.LookupCount or self.FeatureParams
+
+@_add_method(otTables.Feature)
+def collect_lookups(self):
+  return self.LookupListIndex[:]
+
+@_add_method(otTables.FeatureList)
+def subset_lookups(self, lookup_indices):
+  "Returns the indices of nonempty features."
+  # Note: Never ever drop feature 'pref', even if it's empty.
+  # HarfBuzz chooses shaper for Khmer based on presence of this
+  # feature.  See thread at:
+  # http://lists.freedesktop.org/archives/harfbuzz/2012-November/002660.html
+  feature_indices = [i for i,f in enumerate(self.FeatureRecord)
+                     if (f.Feature.subset_lookups(lookup_indices) or
+                         f.FeatureTag == 'pref')]
+  self.subset_features(feature_indices)
+  return feature_indices
+
+@_add_method(otTables.FeatureList)
+def collect_lookups(self, feature_indices):
+  return _uniq_sort(sum((self.FeatureRecord[i].Feature.collect_lookups()
+                         for i in feature_indices
+                          if i < self.FeatureCount), []))
+
+@_add_method(otTables.FeatureList)
+def subset_features(self, feature_indices):
+  self.ensureDecompiled()
+  self.FeatureRecord = [self.FeatureRecord[i] for i in feature_indices]
+  self.FeatureCount = len(self.FeatureRecord)
+  return bool(self.FeatureCount)
+
+@_add_method(otTables.DefaultLangSys,
+             otTables.LangSys)
+def subset_features(self, feature_indices):
+  if self.ReqFeatureIndex in feature_indices:
+    self.ReqFeatureIndex = feature_indices.index(self.ReqFeatureIndex)
+  else:
+    self.ReqFeatureIndex = 65535
+  self.FeatureIndex = [f for f in self.FeatureIndex if f in feature_indices]
+  # Now map them.
+  self.FeatureIndex = [feature_indices.index(f) for f in self.FeatureIndex
+                       if f in feature_indices]
+  self.FeatureCount = len(self.FeatureIndex)
+  return bool(self.FeatureCount or self.ReqFeatureIndex != 65535)
+
+@_add_method(otTables.DefaultLangSys,
+             otTables.LangSys)
+def collect_features(self):
+  feature_indices = self.FeatureIndex[:]
+  if self.ReqFeatureIndex != 65535:
+    feature_indices.append(self.ReqFeatureIndex)
+  return _uniq_sort(feature_indices)
+
+@_add_method(otTables.Script)
+def subset_features(self, feature_indices):
+  if(self.DefaultLangSys and
+      not self.DefaultLangSys.subset_features(feature_indices)):
+    self.DefaultLangSys = None
+  self.LangSysRecord = [l for l in self.LangSysRecord
+                        if l.LangSys.subset_features(feature_indices)]
+  self.LangSysCount = len(self.LangSysRecord)
+  return bool(self.LangSysCount or self.DefaultLangSys)
+
+@_add_method(otTables.Script)
+def collect_features(self):
+  feature_indices = [l.LangSys.collect_features() for l in self.LangSysRecord]
+  if self.DefaultLangSys:
+    feature_indices.append(self.DefaultLangSys.collect_features())
+  return _uniq_sort(sum(feature_indices, []))
+
+@_add_method(otTables.ScriptList)
+def subset_features(self, feature_indices):
+  self.ScriptRecord = [s for s in self.ScriptRecord
+                       if s.Script.subset_features(feature_indices)]
+  self.ScriptCount = len(self.ScriptRecord)
+  return bool(self.ScriptCount)
+
+@_add_method(otTables.ScriptList)
+def collect_features(self):
+  return _uniq_sort(sum((s.Script.collect_features()
+                         for s in self.ScriptRecord), []))
+
+@_add_method(ttLib.getTableClass('GSUB'))
+def closure_glyphs(self, s):
+  s.table = self.table
+  if self.table.ScriptList:
+    feature_indices = self.table.ScriptList.collect_features()
+  else:
+    feature_indices = []
+  if self.table.FeatureList:
+    lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
+  else:
+    lookup_indices = []
+  if self.table.LookupList:
+    while True:
+      orig_glyphs = s.glyphs.copy()
+      for i in lookup_indices:
+        if i >= self.table.LookupList.LookupCount: continue
+        if not self.table.LookupList.Lookup[i]: continue
+        self.table.LookupList.Lookup[i].closure_glyphs(s)
+      if orig_glyphs == s.glyphs:
+        break
+  del s.table
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def subset_glyphs(self, s):
+  s.glyphs = s.glyphs_gsubed
+  if self.table.LookupList:
+    lookup_indices = self.table.LookupList.subset_glyphs(s)
+  else:
+    lookup_indices = []
+  self.subset_lookups(lookup_indices)
+  self.prune_lookups()
+  return True
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def subset_lookups(self, lookup_indices):
+  """Retains specified lookups, then removes empty features, language
+     systems, and scripts."""
+  if self.table.LookupList:
+    self.table.LookupList.subset_lookups(lookup_indices)
+  if self.table.FeatureList:
+    feature_indices = self.table.FeatureList.subset_lookups(lookup_indices)
+  else:
+    feature_indices = []
+  if self.table.ScriptList:
+    self.table.ScriptList.subset_features(feature_indices)
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def prune_lookups(self):
+  "Remove unreferenced lookups"
+  if self.table.ScriptList:
+    feature_indices = self.table.ScriptList.collect_features()
+  else:
+    feature_indices = []
+  if self.table.FeatureList:
+    lookup_indices = self.table.FeatureList.collect_lookups(feature_indices)
+  else:
+    lookup_indices = []
+  if self.table.LookupList:
+    lookup_indices = self.table.LookupList.closure_lookups(lookup_indices)
+  else:
+    lookup_indices = []
+  self.subset_lookups(lookup_indices)
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def subset_feature_tags(self, feature_tags):
+  if self.table.FeatureList:
+    feature_indices = [i for i,f in
+                       enumerate(self.table.FeatureList.FeatureRecord)
+                       if f.FeatureTag in feature_tags]
+    self.table.FeatureList.subset_features(feature_indices)
+  else:
+    feature_indices = []
+  if self.table.ScriptList:
+    self.table.ScriptList.subset_features(feature_indices)
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def prune_features(self):
+  "Remove unreferenced featurs"
+  if self.table.ScriptList:
+    feature_indices = self.table.ScriptList.collect_features()
+  else:
+    feature_indices = []
+  if self.table.FeatureList:
+    self.table.FeatureList.subset_features(feature_indices)
+  if self.table.ScriptList:
+    self.table.ScriptList.subset_features(feature_indices)
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def prune_pre_subset(self, options):
+  # Drop undesired features
+  if '*' not in options.layout_features:
+    self.subset_feature_tags(options.layout_features)
+  # Drop unreferenced lookups
+  self.prune_lookups()
+  # Prune lookups themselves
+  if self.table.LookupList:
+    self.table.LookupList.prune_pre_subset(options);
+  return True
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def remove_redundant_langsys(self):
+  table = self.table
+  if not table.ScriptList or not table.FeatureList:
+    return
+
+  features = table.FeatureList.FeatureRecord
+
+  for s in table.ScriptList.ScriptRecord:
+    d = s.Script.DefaultLangSys
+    if not d:
+      continue
+    for lr in s.Script.LangSysRecord[:]:
+      l = lr.LangSys
+      # Compare d and l
+      if len(d.FeatureIndex) != len(l.FeatureIndex):
+        continue
+      if (d.ReqFeatureIndex == 65535) != (l.ReqFeatureIndex == 65535):
+        continue
+
+      if d.ReqFeatureIndex != 65535:
+        if features[d.ReqFeatureIndex] != features[l.ReqFeatureIndex]:
+          continue
+
+      for i in range(len(d.FeatureIndex)):
+        if features[d.FeatureIndex[i]] != features[l.FeatureIndex[i]]:
+          break
+      else:
+        # LangSys and default are equal; delete LangSys
+        s.Script.LangSysRecord.remove(lr)
+
+@_add_method(ttLib.getTableClass('GSUB'),
+             ttLib.getTableClass('GPOS'))
+def prune_post_subset(self, options):
+  table = self.table
+
+  # LookupList looks good.  Just prune lookups themselves
+  if table.LookupList:
+    table.LookupList.prune_post_subset(options);
+    # XXX Next two lines disabled because OTS is stupid and
+    # doesn't like NULL offsetse here.
+    #if not table.LookupList.Lookup:
+    #  table.LookupList = None
+
+  if not table.LookupList:
+    table.FeatureList = None
+
+  if table.FeatureList:
+    self.remove_redundant_langsys()
+    # Remove unreferenced features
+    self.prune_features()
+
+  # XXX Next two lines disabled because OTS is stupid and
+  # doesn't like NULL offsetse here.
+  #if table.FeatureList and not table.FeatureList.FeatureRecord:
+  #  table.FeatureList = None
+
+  # Never drop scripts themselves as them just being available
+  # holds semantic significance.
+  # XXX Next two lines disabled because OTS is stupid and
+  # doesn't like NULL offsetse here.
+  #if table.ScriptList and not table.ScriptList.ScriptRecord:
+  #  table.ScriptList = None
+
+  return True
+
+@_add_method(ttLib.getTableClass('GDEF'))
+def subset_glyphs(self, s):
+  glyphs = s.glyphs_gsubed
+  table = self.table
+  if table.LigCaretList:
+    indices = table.LigCaretList.Coverage.subset(glyphs)
+    table.LigCaretList.LigGlyph = [table.LigCaretList.LigGlyph[i]
+                                   for i in indices]
+    table.LigCaretList.LigGlyphCount = len(table.LigCaretList.LigGlyph)
+  if table.MarkAttachClassDef:
+    table.MarkAttachClassDef.classDefs = dict((g,v) for g,v in
+                                              table.MarkAttachClassDef.
+                                                classDefs.items()
+                                              if g in glyphs)
+  if table.GlyphClassDef:
+    table.GlyphClassDef.classDefs = dict((g,v) for g,v in
+                                         table.GlyphClassDef.
+                                           classDefs.items()
+                                         if g in glyphs)
+  if table.AttachList:
+    indices = table.AttachList.Coverage.subset(glyphs)
+    GlyphCount = table.AttachList.GlyphCount
+    table.AttachList.AttachPoint = [table.AttachList.AttachPoint[i]
+                                    for i in indices
+                                    if i < GlyphCount]
+    table.AttachList.GlyphCount = len(table.AttachList.AttachPoint)
+  if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef:
+    for coverage in table.MarkGlyphSetsDef.Coverage:
+      coverage.subset(glyphs)
+    # TODO: The following is disabled.  If enabling, we need to go fixup all
+    # lookups that use MarkFilteringSet and map their set.
+    #indices = table.MarkGlyphSetsDef.Coverage = [c for c in table.MarkGlyphSetsDef.Coverage if c.glyphs]
+  return True
+
+@_add_method(ttLib.getTableClass('GDEF'))
+def prune_post_subset(self, options):
+  table = self.table
+  # XXX check these against OTS
+  if table.LigCaretList and not table.LigCaretList.LigGlyphCount:
+    table.LigCaretList = None
+  if table.MarkAttachClassDef and not table.MarkAttachClassDef.classDefs:
+    table.MarkAttachClassDef = None
+  if table.GlyphClassDef and not table.GlyphClassDef.classDefs:
+    table.GlyphClassDef = None
+  if table.AttachList and not table.AttachList.GlyphCount:
+    table.AttachList = None
+  if hasattr(table, "MarkGlyphSetsDef") and table.MarkGlyphSetsDef and not table.MarkGlyphSetsDef.Coverage:
+    table.MarkGlyphSetsDef = None
+    if table.Version == 0x00010002/0x10000:
+      table.Version = 1.0
+  return bool(table.LigCaretList or
+              table.MarkAttachClassDef or
+              table.GlyphClassDef or
+              table.AttachList or
+              (table.Version >= 0x00010002/0x10000 and table.MarkGlyphSetsDef))
+
+@_add_method(ttLib.getTableClass('kern'))
+def prune_pre_subset(self, options):
+  # Prune unknown kern table types
+  self.kernTables = [t for t in self.kernTables if hasattr(t, 'kernTable')]
+  return bool(self.kernTables)
+
+@_add_method(ttLib.getTableClass('kern'))
+def subset_glyphs(self, s):
+  glyphs = s.glyphs_gsubed
+  for t in self.kernTables:
+    t.kernTable = dict(((a,b),v) for (a,b),v in t.kernTable.items()
+                       if a in glyphs and b in glyphs)
+  self.kernTables = [t for t in self.kernTables if t.kernTable]
+  return bool(self.kernTables)
+
+@_add_method(ttLib.getTableClass('vmtx'))
+def subset_glyphs(self, s):
+  self.metrics = dict((g,v) for g,v in self.metrics.items() if g in s.glyphs)
+  return bool(self.metrics)
+
+@_add_method(ttLib.getTableClass('hmtx'))
+def subset_glyphs(self, s):
+  self.metrics = dict((g,v) for g,v in self.metrics.items() if g in s.glyphs)
+  return True # Required table
+
+@_add_method(ttLib.getTableClass('hdmx'))
+def subset_glyphs(self, s):
+  self.hdmx = dict((sz,dict((g,v) for g,v in l.items() if g in s.glyphs))
+                   for sz,l in self.hdmx.items())
+  return bool(self.hdmx)
+
+@_add_method(ttLib.getTableClass('VORG'))
+def subset_glyphs(self, s):
+  self.VOriginRecords = dict((g,v) for g,v in self.VOriginRecords.items()
+                             if g in s.glyphs)
+  self.numVertOriginYMetrics = len(self.VOriginRecords)
+  return True  # Never drop; has default metrics
+
+@_add_method(ttLib.getTableClass('post'))
+def prune_pre_subset(self, options):
+  if not options.glyph_names:
+    self.formatType = 3.0
+  return True # Required table
+
+@_add_method(ttLib.getTableClass('post'))
+def subset_glyphs(self, s):
+  self.extraNames = []  # This seems to do it
+  return True # Required table
+
+@_add_method(ttLib.getTableModule('glyf').Glyph)
+def remapComponentsFast(self, indices):
+  if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
+    return  # Not composite
+  data = array.array("B", self.data)
+  i = 10
+  more = 1
+  while more:
+    flags =(data[i] << 8) | data[i+1]
+    glyphID =(data[i+2] << 8) | data[i+3]
+    # Remap
+    glyphID = indices.index(glyphID)
+    data[i+2] = glyphID >> 8
+    data[i+3] = glyphID & 0xFF
+    i += 4
+    flags = int(flags)
+
+    if flags & 0x0001: i += 4  # ARG_1_AND_2_ARE_WORDS
+    else: i += 2
+    if flags & 0x0008: i += 2  # WE_HAVE_A_SCALE
+    elif flags & 0x0040: i += 4  # WE_HAVE_AN_X_AND_Y_SCALE
+    elif flags & 0x0080: i += 8  # WE_HAVE_A_TWO_BY_TWO
+    more = flags & 0x0020  # MORE_COMPONENTS
+
+  self.data = data.tostring()
+
+@_add_method(ttLib.getTableClass('glyf'))
+def closure_glyphs(self, s):
+  decompose = s.glyphs
+  while True:
+    components = set()
+    for g in decompose:
+      if g not in self.glyphs:
+        continue
+      gl = self.glyphs[g]
+      for c in gl.getComponentNames(self):
+        if c not in s.glyphs:
+          components.add(c)
+    components = set(c for c in components if c not in s.glyphs)
+    if not components:
+      break
+    decompose = components
+    s.glyphs.update(components)
+
+@_add_method(ttLib.getTableClass('glyf'))
+def prune_pre_subset(self, options):
+  if options.notdef_glyph and not options.notdef_outline:
+    g = self[self.glyphOrder[0]]
+    # Yay, easy!
+    g.__dict__.clear()
+    g.data = ""
+  return True
+
+@_add_method(ttLib.getTableClass('glyf'))
+def subset_glyphs(self, s):
+  self.glyphs = dict((g,v) for g,v in self.glyphs.items() if g in s.glyphs)
+  indices = [i for i,g in enumerate(self.glyphOrder) if g in s.glyphs]
+  for v in self.glyphs.values():
+    if hasattr(v, "data"):
+      v.remapComponentsFast(indices)
+    else:
+      pass  # No need
+  self.glyphOrder = [g for g in self.glyphOrder if g in s.glyphs]
+  # Don't drop empty 'glyf' tables, otherwise 'loca' doesn't get subset.
+  return True
+
+@_add_method(ttLib.getTableClass('glyf'))
+def prune_post_subset(self, options):
+  if not options.hinting:
+    for v in self.glyphs.values():
+      v.removeHinting()
+  return True
+
+@_add_method(ttLib.getTableClass('CFF '))
+def prune_pre_subset(self, options):
+  cff = self.cff
+  # CFF table must have one font only
+  cff.fontNames = cff.fontNames[:1]
+
+  if options.notdef_glyph and not options.notdef_outline:
+    for fontname in cff.keys():
+      font = cff[fontname]
+      c,_ = font.CharStrings.getItemAndSelector('.notdef')
+      # XXX we should preserve the glyph width
+      c.bytecode = '\x0e' # endchar
+      c.program = None
+
+  return True # bool(cff.fontNames)
+
+@_add_method(ttLib.getTableClass('CFF '))
+def subset_glyphs(self, s):
+  cff = self.cff
+  for fontname in cff.keys():
+    font = cff[fontname]
+    cs = font.CharStrings
+
+    # Load all glyphs
+    for g in font.charset:
+      if g not in s.glyphs: continue
+      c,sel = cs.getItemAndSelector(g)
+
+    if cs.charStringsAreIndexed:
+      indices = [i for i,g in enumerate(font.charset) if g in s.glyphs]
+      csi = cs.charStringsIndex
+      csi.items = [csi.items[i] for i in indices]
+      csi.count = len(csi.items)
+      del csi.file, csi.offsets
+      if hasattr(font, "FDSelect"):
+        sel = font.FDSelect
+        sel.format = None
+        sel.gidArray = [sel.gidArray[i] for i in indices]
+      cs.charStrings = dict((g,indices.index(v))
+                            for g,v in cs.charStrings.items()
+                            if g in s.glyphs)
+    else:
+      cs.charStrings = dict((g,v)
+                            for g,v in cs.charStrings.items()
+                            if g in s.glyphs)
+    font.charset = [g for g in font.charset if g in s.glyphs]
+    font.numGlyphs = len(font.charset)
+
+  return True # any(cff[fontname].numGlyphs for fontname in cff.keys())
+
+@_add_method(psCharStrings.T2CharString)
+def subset_subroutines(self, subrs, gsubrs):
+  p = self.program
+  assert len(p)
+  for i in range(1, len(p)):
+    if p[i] == 'callsubr':
+      assert isinstance(p[i-1], int)
+      p[i-1] = subrs._used.index(p[i-1] + subrs._old_bias) - subrs._new_bias
+    elif p[i] == 'callgsubr':
+      assert isinstance(p[i-1], int)
+      p[i-1] = gsubrs._used.index(p[i-1] + gsubrs._old_bias) - gsubrs._new_bias
+
+@_add_method(psCharStrings.T2CharString)
+def drop_hints(self):
+  hints = self._hints
+
+  if hints.has_hint:
+    self.program = self.program[hints.last_hint:]
+    if hasattr(self, 'width'):
+      # Insert width back if needed
+      if self.width != self.private.defaultWidthX:
+        self.program.insert(0, self.width - self.private.nominalWidthX)
+
+  if hints.has_hintmask:
+    i = 0
+    p = self.program
+    while i < len(p):
+      if p[i] in ['hintmask', 'cntrmask']:
+        assert i + 1 <= len(p)
+        del p[i:i+2]
+        continue
+      i += 1
+
+  # TODO: we currently don't drop calls to "empty" subroutines.
+
+  assert len(self.program)
+
+  del self._hints
+
+class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler):
+
+  def __init__(self, localSubrs, globalSubrs):
+    psCharStrings.SimpleT2Decompiler.__init__(self,
+                                              localSubrs,
+                                              globalSubrs)
+    for subrs in [localSubrs, globalSubrs]:
+      if subrs and not hasattr(subrs, "_used"):
+        subrs._used = set()
+
+  def op_callsubr(self, index):
+    self.localSubrs._used.add(self.operandStack[-1]+self.localBias)
+    psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
+
+  def op_callgsubr(self, index):
+    self.globalSubrs._used.add(self.operandStack[-1]+self.globalBias)
+    psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
+
+class _DehintingT2Decompiler(psCharStrings.SimpleT2Decompiler):
+
+  class Hints(object):
+    def __init__(self):
+      # Whether calling this charstring produces any hint stems
+      self.has_hint = False
+      # Index to start at to drop all hints
+      self.last_hint = 0
+      # Index up to which we know more hints are possible.  Only
+      # relevant if status is 0 or 1.
+      self.last_checked = 0
+      # The status means:
+      # 0: after dropping hints, this charstring is empty
+      # 1: after dropping hints, there may be more hints continuing after this
+      # 2: no more hints possible after this charstring
+      self.status = 0
+      # Has hintmask instructions; not recursive
+      self.has_hintmask = False
+    pass
+
+  def __init__(self, css, localSubrs, globalSubrs):
+    self._css = css
+    psCharStrings.SimpleT2Decompiler.__init__(self,
+                                              localSubrs,
+                                              globalSubrs)
+
+  def execute(self, charString):
+    old_hints = charString._hints if hasattr(charString, '_hints') else None
+    charString._hints = self.Hints()
+
+    psCharStrings.SimpleT2Decompiler.execute(self, charString)
+
+    hints = charString._hints
+
+    if hints.has_hint or hints.has_hintmask:
+      self._css.add(charString)
+
+    if hints.status != 2:
+      # Check from last_check, make sure we didn't have any operators.
+      for i in range(hints.last_checked, len(charString.program) - 1):
+        if isinstance(charString.program[i], str):
+          hints.status = 2
+          break;
+        else:
+          hints.status = 1 # There's *something* here
+      hints.last_checked = len(charString.program)
+
+    if old_hints:
+      assert hints.__dict__ == old_hints.__dict__
+
+  def op_callsubr(self, index):
+    subr = self.localSubrs[self.operandStack[-1]+self.localBias]
+    psCharStrings.SimpleT2Decompiler.op_callsubr(self, index)
+    self.processSubr(index, subr)
+
+  def op_callgsubr(self, index):
+    subr = self.globalSubrs[self.operandStack[-1]+self.globalBias]
+    psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index)
+    self.processSubr(index, subr)
+
+  def op_hstem(self, index):
+    psCharStrings.SimpleT2Decompiler.op_hstem(self, index)
+    self.processHint(index)
+  def op_vstem(self, index):
+    psCharStrings.SimpleT2Decompiler.op_vstem(self, index)
+    self.processHint(index)
+  def op_hstemhm(self, index):
+    psCharStrings.SimpleT2Decompiler.op_hstemhm(self, index)
+    self.processHint(index)
+  def op_vstemhm(self, index):
+    psCharStrings.SimpleT2Decompiler.op_vstemhm(self, index)
+    self.processHint(index)
+  def op_hintmask(self, index):
+    psCharStrings.SimpleT2Decompiler.op_hintmask(self, index)
+    self.processHintmask(index)
+  def op_cntrmask(self, index):
+    psCharStrings.SimpleT2Decompiler.op_cntrmask(self, index)
+    self.processHintmask(index)
+
+  def processHintmask(self, index):
+    cs = self.callingStack[-1]
+    hints = cs._hints
+    hints.has_hintmask = True
+    if hints.status != 2 and hints.has_hint:
+      # Check from last_check, see if we may be an implicit vstem
+      for i in range(hints.last_checked, index - 1):
+        if isinstance(cs.program[i], str):
+          hints.status = 2
+          break;
+      if hints.status != 2:
+        # We are an implicit vstem
+        hints.last_hint = index + 1
+        hints.status = 0
+    hints.last_checked = index + 1
+
+  def processHint(self, index):
+    cs = self.callingStack[-1]
+    hints = cs._hints
+    hints.has_hint = True
+    hints.last_hint = index
+    hints.last_checked = index
+
+  def processSubr(self, index, subr):
+    cs = self.callingStack[-1]
+    hints = cs._hints
+    subr_hints = subr._hints
+
+    if subr_hints.has_hint:
+      if hints.status != 2:
+        hints.has_hint = True
+        hints.last_checked = index
+        hints.status = subr_hints.status
+        # Decide where to chop off from
+        if subr_hints.status == 0:
+          hints.last_hint = index
+        else:
+          hints.last_hint = index - 2 # Leave the subr call in
+      else:
+        # In my understanding, this is a font bug.  Ie. it has hint stems
+        # *after* path construction.  I've seen this in widespread fonts.
+        # Best to ignore the hints I suppose...
+        pass
+        #assert 0
+    else:
+      hints.status = max(hints.status, subr_hints.status)
+      if hints.status != 2:
+        # Check from last_check, make sure we didn't have
+        # any operators.
+        for i in range(hints.last_checked, index - 1):
+          if isinstance(cs.program[i], str):
+            hints.status = 2
+            break;
+        hints.last_checked = index
+      if hints.status != 2:
+        # Decide where to chop off from
+        if subr_hints.status == 0:
+          hints.last_hint = index
+        else:
+          hints.last_hint = index - 2 # Leave the subr call in
+
+@_add_method(ttLib.getTableClass('CFF '))
+def prune_post_subset(self, options):
+  cff = self.cff
+  for fontname in cff.keys():
+    font = cff[fontname]
+    cs = font.CharStrings
+
+
+    #
+    # Drop unused FontDictionaries
+    #
+    if hasattr(font, "FDSelect"):
+      sel = font.FDSelect
+      indices = _uniq_sort(sel.gidArray)
+      sel.gidArray = [indices.index (ss) for ss in sel.gidArray]
+      arr = font.FDArray
+      arr.items = [arr[i] for i in indices]
+      arr.count = len(arr.items)
+      del arr.file, arr.offsets
+
+
+    #
+    # Drop hints if not needed
+    #
+    if not options.hinting:
+
+      #
+      # This can be tricky, but doesn't have to.  What we do is:
+      #
+      # - Run all used glyph charstrings and recurse into subroutines,
+      # - For each charstring (including subroutines), if it has any
+      #   of the hint stem operators, we mark it as such.  Upon returning,
+      #   for each charstring we note all the subroutine calls it makes
+      #   that (recursively) contain a stem,
+      # - Dropping hinting then consists of the following two ops:
+      #   * Drop the piece of the program in each charstring before the
+      #     last call to a stem op or a stem-calling subroutine,
+      #   * Drop all hintmask operations.
+      # - It's trickier... A hintmask right after hints and a few numbers
+      #   will act as an implicit vstemhm.  As such, we track whether
+      #   we have seen any non-hint operators so far and do the right
+      #   thing, recursively...  Good luck understanding that :(
+      #
+      css = set()
+      for g in font.charset:
+        c,sel = cs.getItemAndSelector(g)
+        # Make sure it's decompiled.  We want our "decompiler" to walk
+        # the program, not the bytecode.
+        c.draw(basePen.NullPen())
+        subrs = getattr(c.private, "Subrs", [])
+        decompiler = _DehintingT2Decompiler(css, subrs, c.globalSubrs)
+        decompiler.execute(c)
+      for charstring in css:
+        charstring.drop_hints()
+
+      # Drop font-wide hinting values
+      all_privs = []
+      if hasattr(font, 'FDSelect'):
+        all_privs.extend(fd.Private for fd in font.FDArray)
+      else:
+        all_privs.append(font.Private)
+      for priv in all_privs:
+        for k in ['BlueValues', 'OtherBlues', 'FamilyBlues', 'FamilyOtherBlues',
+                  'BlueScale', 'BlueShift', 'BlueFuzz',
+                  'StemSnapH', 'StemSnapV', 'StdHW', 'StdVW']:
+          if hasattr(priv, k):
+            setattr(priv, k, None)
+
+
+    #
+    # Renumber subroutines to remove unused ones
+    #
+
+    # Mark all used subroutines
+    for g in font.charset:
+      c,sel = cs.getItemAndSelector(g)
+      subrs = getattr(c.private, "Subrs", [])
+      decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs)
+      decompiler.execute(c)
+
+    all_subrs = [font.GlobalSubrs]
+    if hasattr(font, 'FDSelect'):
+      all_subrs.extend(fd.Private.Subrs for fd in font.FDArray if hasattr(fd.Private, 'Subrs') and fd.Private.Subrs)
+    elif hasattr(font.Private, 'Subrs') and font.Private.Subrs:
+      all_subrs.append(font.Private.Subrs)
+
+    subrs = set(subrs) # Remove duplicates
+
+    # Prepare
+    for subrs in all_subrs:
+      if not hasattr(subrs, '_used'):
+        subrs._used = set()
+      subrs._used = _uniq_sort(subrs._used)
+      subrs._old_bias = psCharStrings.calcSubrBias(subrs)
+      subrs._new_bias = psCharStrings.calcSubrBias(subrs._used)
+
+    # Renumber glyph charstrings
+    for g in font.charset:
+      c,sel = cs.getItemAndSelector(g)
+      subrs = getattr(c.private, "Subrs", [])
+      c.subset_subroutines (subrs, font.GlobalSubrs)
+
+    # Renumber subroutines themselves
+    for subrs in all_subrs:
+
+      if subrs == font.GlobalSubrs:
+        if not hasattr(font, 'FDSelect') and hasattr(font.Private, 'Subrs'):
+          local_subrs = font.Private.Subrs
+        else:
+          local_subrs = []
+      else:
+        local_subrs = subrs
+
+      subrs.items = [subrs.items[i] for i in subrs._used]
+      subrs.count = len(subrs.items)
+      del subrs.file
+      if hasattr(subrs, 'offsets'):
+        del subrs.offsets
+
+      for i in range (subrs.count):
+        subrs[i].subset_subroutines (local_subrs, font.GlobalSubrs)
+
+    # Cleanup
+    for subrs in all_subrs:
+      del subrs._used, subrs._old_bias, subrs._new_bias
+
+  return True
+
+@_add_method(ttLib.getTableClass('cmap'))
+def closure_glyphs(self, s):
+  tables = [t for t in self.tables if t.isUnicode()]
+  for u in s.unicodes_requested:
+    found = False
+    for table in tables:
+      if table.format == 14:
+        for l in table.uvsDict.values():
+          # TODO(behdad) Speed this up!
+          gids = [g for uc,g in l if u == uc and g is not None]
+          s.glyphs.update(gids)
+          # Intentionally not setting found=True here.
+      else:
+        if u in table.cmap:
+          s.glyphs.add(table.cmap[u])
+          found = True
+    if not found:
+      s.log("No default glyph for Unicode %04X found." % u)
+
+@_add_method(ttLib.getTableClass('cmap'))
+def prune_pre_subset(self, options):
+  if not options.legacy_cmap:
+    # Drop non-Unicode / non-Symbol cmaps
+    self.tables = [t for t in self.tables if t.isUnicode() or t.isSymbol()]
+  if not options.symbol_cmap:
+    self.tables = [t for t in self.tables if not t.isSymbol()]
+  # TODO(behdad) Only keep one subtable?
+  # For now, drop format=0 which can't be subset_glyphs easily?
+  self.tables = [t for t in self.tables if t.format != 0]
+  self.numSubTables = len(self.tables)
+  return True # Required table
+
+@_add_method(ttLib.getTableClass('cmap'))
+def subset_glyphs(self, s):
+  s.glyphs = s.glyphs_cmaped
+  for t in self.tables:
+    # For reasons I don't understand I need this here
+    # to force decompilation of the cmap format 14.
+    try:
+      getattr(t, "asdf")
+    except AttributeError:
+      pass
+    if t.format == 14:
+      # TODO(behdad) We drop all the default-UVS mappings for glyphs_requested.
+      # I don't think we care about that...
+      t.uvsDict = dict((v,[(u,g) for u,g in l
+                           if g in s.glyphs or u in s.unicodes_requested])
+                       for v,l in t.uvsDict.items())
+      t.uvsDict = dict((v,l) for v,l in t.uvsDict.items() if l)
+    elif t.isUnicode():
+      t.cmap = dict((u,g) for u,g in t.cmap.items()
+                    if g in s.glyphs_requested or u in s.unicodes_requested)
+    else:
+      t.cmap = dict((u,g) for u,g in t.cmap.items()
+                    if g in s.glyphs_requested)
+  self.tables = [t for t in self.tables
+                 if (t.cmap if t.format != 14 else t.uvsDict)]
+  self.numSubTables = len(self.tables)
+  # TODO(behdad) Convert formats when needed.
+  # In particular, if we have a format=12 without non-BMP
+  # characters, either drop format=12 one or convert it
+  # to format=4 if there's not one.
+  return True # Required table
+
+@_add_method(ttLib.getTableClass('name'))
+def prune_pre_subset(self, options):
+  if '*' not in options.name_IDs:
+    self.names = [n for n in self.names if n.nameID in options.name_IDs]
+  if not options.name_legacy:
+    self.names = [n for n in self.names if n.isUnicode()]
+  # TODO(behdad) Option to keep only one platform's
+  if '*' not in options.name_languages:
+    # TODO(behdad) This is Windows-platform specific!
+    self.names = [n for n in self.names if n.langID in options.name_languages]
+  return True  # Required table
+
+
+# TODO(behdad) OS/2 ulUnicodeRange / ulCodePageRange?
+# TODO(behdad) Drop AAT tables.
+# TODO(behdad) Drop unneeded GSUB/GPOS Script/LangSys entries.
+# TODO(behdad) Drop empty GSUB/GPOS, and GDEF if no GSUB/GPOS left
+# TODO(behdad) Drop GDEF subitems if unused by lookups
+# TODO(behdad) Avoid recursing too much (in GSUB/GPOS and in CFF)
+# TODO(behdad) Text direction considerations.
+# TODO(behdad) Text script / language considerations.
+# TODO(behdad) Optionally drop 'kern' table if GPOS available
+# TODO(behdad) Implement --unicode='*' to choose all cmap'ed
+# TODO(behdad) Drop old-spec Indic scripts
+
+
+class Options(object):
+
+  class UnknownOptionError(Exception):
+    pass
+
+  _drop_tables_default = ['BASE', 'JSTF', 'DSIG', 'EBDT', 'EBLC', 'EBSC', 'SVG ',
+                          'PCLT', 'LTSH']
+  _drop_tables_default += ['Feat', 'Glat', 'Gloc', 'Silf', 'Sill']  # Graphite
+  _drop_tables_default += ['CBLC', 'CBDT', 'sbix', 'COLR', 'CPAL']  # Color
+  _no_subset_tables_default = ['gasp', 'head', 'hhea', 'maxp', 'vhea', 'OS/2',
+                               'loca', 'name', 'cvt ', 'fpgm', 'prep']
+  _hinting_tables_default = ['cvt ', 'fpgm', 'prep', 'hdmx', 'VDMX']
+
+  # Based on HarfBuzz shapers
+  _layout_features_groups = {
+    # Default shaper
+    'common': ['ccmp', 'liga', 'locl', 'mark', 'mkmk', 'rlig'],
+    'horizontal': ['calt', 'clig', 'curs', 'kern', 'rclt'],
+    'vertical':  ['valt', 'vert', 'vkrn', 'vpal', 'vrt2'],
+    'ltr': ['ltra', 'ltrm'],
+    'rtl': ['rtla', 'rtlm'],
+    # Complex shapers
+    'arabic': ['init', 'medi', 'fina', 'isol', 'med2', 'fin2', 'fin3',
+               'cswh', 'mset'],
+    'hangul': ['ljmo', 'vjmo', 'tjmo'],
+    'tibetan': ['abvs', 'blws', 'abvm', 'blwm'],
+    'indic': ['nukt', 'akhn', 'rphf', 'rkrf', 'pref', 'blwf', 'half',
+              'abvf', 'pstf', 'cfar', 'vatu', 'cjct', 'init', 'pres',
+              'abvs', 'blws', 'psts', 'haln', 'dist', 'abvm', 'blwm'],
+  }
+  _layout_features_default = _uniq_sort(sum(
+      iter(_layout_features_groups.values()), []))
+
+  drop_tables = _drop_tables_default
+  no_subset_tables = _no_subset_tables_default
+  hinting_tables = _hinting_tables_default
+  layout_features = _layout_features_default
+  hinting = True
+  glyph_names = False
+  legacy_cmap = False
+  symbol_cmap = False
+  name_IDs = [1, 2]  # Family and Style
+  name_legacy = False
+  name_languages = [0x0409]  # English
+  notdef_glyph = True # gid0 for TrueType / .notdef for CFF
+  notdef_outline = False # No need for notdef to have an outline really
+  recommended_glyphs = False  # gid1, gid2, gid3 for TrueType
+  recalc_bounds = False # Recalculate font bounding boxes
+  recalc_timestamp = False # Recalculate font modified timestamp
+  canonical_order = False # Order tables as recommended
+  flavor = None # May be 'woff'
+
+  def __init__(self, **kwargs):
+
+    self.set(**kwargs)
+
+  def set(self, **kwargs):
+    for k,v in kwargs.items():
+      if not hasattr(self, k):
+        raise self.UnknownOptionError("Unknown option '%s'" % k)
+      setattr(self, k, v)
+
+  def parse_opts(self, argv, ignore_unknown=False):
+    ret = []
+    opts = {}
+    for a in argv:
+      orig_a = a
+      if not a.startswith('--'):
+        ret.append(a)
+        continue
+      a = a[2:]
+      i = a.find('=')
+      op = '='
+      if i == -1:
+        if a.startswith("no-"):
+          k = a[3:]
+          v = False
+        else:
+          k = a
+          v = True
+      else:
+        k = a[:i]
+        if k[-1] in "-+":
+          op = k[-1]+'='  # Ops is '-=' or '+=' now.
+          k = k[:-1]
+        v = a[i+1:]
+      k = k.replace('-', '_')
+      if not hasattr(self, k):
+        if ignore_unknown is True or k in ignore_unknown:
+          ret.append(orig_a)
+          continue
+        else:
+          raise self.UnknownOptionError("Unknown option '%s'" % a)
+
+      ov = getattr(self, k)
+      if isinstance(ov, bool):
+        v = bool(v)
+      elif isinstance(ov, int):
+        v = int(v)
+      elif isinstance(ov, list):
+        vv = v.split(',')
+        if vv == ['']:
+          vv = []
+        vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
+        if op == '=':
+          v = vv
+        elif op == '+=':
+          v = ov
+          v.extend(vv)
+        elif op == '-=':
+          v = ov
+          for x in vv:
+            if x in v:
+              v.remove(x)
+        else:
+          assert False
+
+      opts[k] = v
+    self.set(**opts)
+
+    return ret
+
+
+class Subsetter(object):
+
+  def __init__(self, options=None, log=None):
+
+    if not log:
+      log = Logger()
+    if not options:
+      options = Options()
+
+    self.options = options
+    self.log = log
+    self.unicodes_requested = set()
+    self.glyphs_requested = set()
+    self.glyphs = set()
+
+  def populate(self, glyphs=[], unicodes=[], text=""):
+    self.unicodes_requested.update(unicodes)
+    if isinstance(text, bytes):
+      text = text.decode("utf8")
+    for u in text:
+      self.unicodes_requested.add(ord(u))
+    self.glyphs_requested.update(glyphs)
+    self.glyphs.update(glyphs)
+
+  def _prune_pre_subset(self, font):
+
+    for tag in font.keys():
+      if tag == 'GlyphOrder': continue
+
+      if(tag in self.options.drop_tables or
+         (tag in self.options.hinting_tables and not self.options.hinting)):
+        self.log(tag, "dropped")
+        del font[tag]
+        continue
+
+      clazz = ttLib.getTableClass(tag)
+
+      if hasattr(clazz, 'prune_pre_subset'):
+        table = font[tag]
+        self.log.lapse("load '%s'" % tag)
+        retain = table.prune_pre_subset(self.options)
+        self.log.lapse("prune  '%s'" % tag)
+        if not retain:
+          self.log(tag, "pruned to empty; dropped")
+          del font[tag]
+          continue
+        else:
+          self.log(tag, "pruned")
+
+  def _closure_glyphs(self, font):
+
+    realGlyphs = set(font.getGlyphOrder())
+
+    self.glyphs = self.glyphs_requested.copy()
+
+    if 'cmap' in font:
+      font['cmap'].closure_glyphs(self)
+      self.glyphs.intersection_update(realGlyphs)
+    self.glyphs_cmaped = self.glyphs
+
+    if self.options.notdef_glyph:
+      if 'glyf' in font:
+        self.glyphs.add(font.getGlyphName(0))
+        self.log("Added gid0 to subset")
+      else:
+        self.glyphs.add('.notdef')
+        self.log("Added .notdef to subset")
+    if self.options.recommended_glyphs:
+      if 'glyf' in font:
+        for i in range(min(4, len(font.getGlyphOrder()))):
+          self.glyphs.add(font.getGlyphName(i))
+        self.log("Added first four glyphs to subset")
+
+    if 'GSUB' in font:
+      self.log("Closing glyph list over 'GSUB': %d glyphs before" %
+                len(self.glyphs))
+      self.log.glyphs(self.glyphs, font=font)
+      font['GSUB'].closure_glyphs(self)
+      self.glyphs.intersection_update(realGlyphs)
+      self.log("Closed  glyph list over 'GSUB': %d glyphs after" %
+                len(self.glyphs))
+      self.log.glyphs(self.glyphs, font=font)
+      self.log.lapse("close glyph list over 'GSUB'")
+    self.glyphs_gsubed = self.glyphs.copy()
+
+    if 'glyf' in font:
+      self.log("Closing glyph list over 'glyf': %d glyphs before" %
+                len(self.glyphs))
+      self.log.glyphs(self.glyphs, font=font)
+      font['glyf'].closure_glyphs(self)
+      self.glyphs.intersection_update(realGlyphs)
+      self.log("Closed  glyph list over 'glyf': %d glyphs after" %
+                len(self.glyphs))
+      self.log.glyphs(self.glyphs, font=font)
+      self.log.lapse("close glyph list over 'glyf'")
+    self.glyphs_glyfed = self.glyphs.copy()
+
+    self.glyphs_all = self.glyphs.copy()
+
+    self.log("Retaining %d glyphs: " % len(self.glyphs_all))
+
+    del self.glyphs
+
+
+  def _subset_glyphs(self, font):
+    for tag in font.keys():
+      if tag == 'GlyphOrder': continue
+      clazz = ttLib.getTableClass(tag)
+
+      if tag in self.options.no_subset_tables:
+        self.log(tag, "subsetting not needed")
+      elif hasattr(clazz, 'subset_glyphs'):
+        table = font[tag]
+        self.glyphs = self.glyphs_all
+        retain = table.subset_glyphs(self)
+        del self.glyphs
+        self.log.lapse("subset '%s'" % tag)
+        if not retain:
+          self.log(tag, "subsetted to empty; dropped")
+          del font[tag]
+        else:
+          self.log(tag, "subsetted")
+      else:
+        self.log(tag, "NOT subset; don't know how to subset; dropped")
+        del font[tag]
+
+    glyphOrder = font.getGlyphOrder()
+    glyphOrder = [g for g in glyphOrder if g in self.glyphs_all]
+    font.setGlyphOrder(glyphOrder)
+    font._buildReverseGlyphOrderDict()
+    self.log.lapse("subset GlyphOrder")
+
+  def _prune_post_subset(self, font):
+    for tag in font.keys():
+      if tag == 'GlyphOrder': continue
+      clazz = ttLib.getTableClass(tag)
+      if hasattr(clazz, 'prune_post_subset'):
+        table = font[tag]
+        retain = table.prune_post_subset(self.options)
+        self.log.lapse("prune  '%s'" % tag)
+        if not retain:
+          self.log(tag, "pruned to empty; dropped")
+          del font[tag]
+        else:
+          self.log(tag, "pruned")
+
+  def subset(self, font):
+
+    self._prune_pre_subset(font)
+    self._closure_glyphs(font)
+    self._subset_glyphs(font)
+    self._prune_post_subset(font)
+
+
+class Logger(object):
+
+  def __init__(self, verbose=False, xml=False, timing=False):
+    self.verbose = verbose
+    self.xml = xml
+    self.timing = timing
+    self.last_time = self.start_time = time.time()
+
+  def parse_opts(self, argv):
+    argv = argv[:]
+    for v in ['verbose', 'xml', 'timing']:
+      if "--"+v in argv:
+        setattr(self, v, True)
+        argv.remove("--"+v)
+    return argv
+
+  def __call__(self, *things):
+    if not self.verbose:
+      return
+    print(' '.join(str(x) for x in things))
+
+  def lapse(self, *things):
+    if not self.timing:
+      return
+    new_time = time.time()
+    print("Took %0.3fs to %s" %(new_time - self.last_time,
+                                 ' '.join(str(x) for x in things)))
+    self.last_time = new_time
+
+  def glyphs(self, glyphs, font=None):
+    if not self.verbose:
+      return
+    self("Names: ", sorted(glyphs))
+    if font:
+      reverseGlyphMap = font.getReverseGlyphMap()
+      self("Gids : ", sorted(reverseGlyphMap[g] for g in glyphs))
+
+  def font(self, font, file=sys.stdout):
+    if not self.xml:
+      return
+    from fontTools.misc import xmlWriter
+    writer = xmlWriter.XMLWriter(file)
+    for tag in font.keys():
+      writer.begintag(tag)
+      writer.newline()
+      font[tag].toXML(writer, font)
+      writer.endtag(tag)
+      writer.newline()
+
+
+def load_font(fontFile,
+              options,
+              allowVID=False,
+              checkChecksums=False,
+              dontLoadGlyphNames=False,
+              lazy=True):
+
+  font = ttLib.TTFont(fontFile,
+                      allowVID=allowVID,
+                      checkChecksums=checkChecksums,
+                      recalcBBoxes=options.recalc_bounds,
+                      recalcTimestamp=options.recalc_timestamp,
+                      lazy=lazy)
+
+  # Hack:
+  #
+  # If we don't need glyph names, change 'post' class to not try to
+  # load them.  It avoid lots of headache with broken fonts as well
+  # as loading time.
+  #
+  # Ideally ttLib should provide a way to ask it to skip loading
+  # glyph names.  But it currently doesn't provide such a thing.
+  #
+  if dontLoadGlyphNames:
+    post = ttLib.getTableClass('post')
+    saved = post.decode_format_2_0
+    post.decode_format_2_0 = post.decode_format_3_0
+    f = font['post']
+    if f.formatType == 2.0:
+      f.formatType = 3.0
+    post.decode_format_2_0 = saved
+
+  return font
+
+def save_font(font, outfile, options):
+  if options.flavor and not hasattr(font, 'flavor'):
+    raise Exception("fonttools version does not support flavors.")
+  font.flavor = options.flavor
+  font.save(outfile, reorderTables=options.canonical_order)
+
+def main(args):
+
+  log = Logger()
+  args = log.parse_opts(args)
+
+  options = Options()
+  args = options.parse_opts(args, ignore_unknown=['text'])
+
+  if len(args) < 2:
+    print("usage: pyftsubset font-file glyph... [--text=ABC]... [--option=value]...", file=sys.stderr)
+    sys.exit(1)
+
+  fontfile = args[0]
+  args = args[1:]
+
+  dontLoadGlyphNames =(not options.glyph_names and
+         all(any(g.startswith(p)
+             for p in ['gid', 'glyph', 'uni', 'U+'])
+              for g in args))
+
+  font = load_font(fontfile, options, dontLoadGlyphNames=dontLoadGlyphNames)
+  log.lapse("load font")
+  subsetter = Subsetter(options=options, log=log)
+
+  names = font.getGlyphNames()
+  log.lapse("loading glyph names")
+
+  glyphs = []
+  unicodes = []
+  text = ""
+  for g in args:
+    if g == '*':
+      glyphs.extend(font.getGlyphOrder())
+      continue
+    if g in names:
+      glyphs.append(g)
+      continue
+    if g.startswith('--text='):
+      text += g[7:]
+      continue
+    if g.startswith('uni') or g.startswith('U+'):
+      if g.startswith('uni') and len(g) > 3:
+        g = g[3:]
+      elif g.startswith('U+') and len(g) > 2:
+        g = g[2:]
+      u = int(g, 16)
+      unicodes.append(u)
+      continue
+    if g.startswith('gid') or g.startswith('glyph'):
+      if g.startswith('gid') and len(g) > 3:
+        g = g[3:]
+      elif g.startswith('glyph') and len(g) > 5:
+        g = g[5:]
+      try:
+        glyphs.append(font.getGlyphName(int(g), requireReal=True))
+      except ValueError:
+        raise Exception("Invalid glyph identifier: %s" % g)
+      continue
+    raise Exception("Invalid glyph identifier: %s" % g)
+  log.lapse("compile glyph list")
+  log("Unicodes:", unicodes)
+  log("Glyphs:", glyphs)
+
+  subsetter.populate(glyphs=glyphs, unicodes=unicodes, text=text)
+  subsetter.subset(font)
+
+  outfile = fontfile + '.subset'
+
+  save_font (font, outfile, options)
+  log.lapse("compile and save font")
+
+  log.last_time = log.start_time
+  log.lapse("make one with everything(TOTAL TIME)")
+
+  if log.verbose:
+    import os
+    log("Input  font: %d bytes" % os.path.getsize(fontfile))
+    log("Subset font: %d bytes" % os.path.getsize(outfile))
+
+  log.font(font)
+
+  font.close()
+
+
+__all__ = [
+  'Options',
+  'Subsetter',
+  'Logger',
+  'load_font',
+  'save_font',
+  'main'
+]
+
+if __name__ == '__main__':
+  main(sys.argv[1:])
diff --git a/Lib/fontTools/t1Lib.py b/Lib/fontTools/t1Lib.py
new file mode 100644
index 0000000..14cc904
--- /dev/null
+++ b/Lib/fontTools/t1Lib.py
@@ -0,0 +1,372 @@
+"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts
+
+Functions for reading and writing raw Type 1 data:
+
+read(path)
+	reads any Type 1 font file, returns the raw data and a type indicator: 
+	'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed 
+	to by 'path'. 
+	Raises an error when the file does not contain valid Type 1 data.
+
+write(path, data, kind='OTHER', dohex=False)
+	writes raw Type 1 data to the file pointed to by 'path'. 
+	'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
+	'dohex' is a flag which determines whether the eexec encrypted
+	part should be written as hexadecimal or binary, but only if kind
+	is 'LWFN' or 'PFB'.
+"""
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import eexec
+from fontTools.misc.macCreatorType import getMacCreatorAndType
+import os
+import re
+
+__author__ = "jvr"
+__version__ = "1.0b2"
+DEBUG = 0
+
+
+try:
+	try:
+		from Carbon import Res
+	except ImportError:
+		import Res  # MacPython < 2.2
+except ImportError:
+	haveMacSupport = 0
+else:
+	haveMacSupport = 1
+	import MacOS
+	
+
+class T1Error(Exception): pass
+
+
+class T1Font(object):
+	
+	"""Type 1 font class.
+	
+	Uses a minimal interpeter that supports just about enough PS to parse
+	Type 1 fonts.
+	"""
+	
+	def __init__(self, path=None):
+		if path is not None:
+			self.data, type = read(path)
+		else:
+			pass # XXX
+	
+	def saveAs(self, path, type):
+		write(path, self.getData(), type)
+	
+	def getData(self):
+		# XXX Todo: if the data has been converted to Python object,
+		# recreate the PS stream
+		return self.data
+	
+	def getGlyphSet(self):
+		"""Return a generic GlyphSet, which is a dict-like object
+		mapping glyph names to glyph objects. The returned glyph objects
+		have a .draw() method that supports the Pen protocol, and will
+		have an attribute named 'width', but only *after* the .draw() method
+		has been called.
+		
+		In the case of Type 1, the GlyphSet is simply the CharStrings dict.
+		"""
+		return self["CharStrings"]
+	
+	def __getitem__(self, key):
+		if not hasattr(self, "font"):
+			self.parse()
+		return self.font[key]
+	
+	def parse(self):
+		from fontTools.misc import psLib
+		from fontTools.misc import psCharStrings
+		self.font = psLib.suckfont(self.data)
+		charStrings = self.font["CharStrings"]
+		lenIV = self.font["Private"].get("lenIV", 4)
+		assert lenIV >= 0
+		subrs = self.font["Private"]["Subrs"]
+		for glyphName, charString in charStrings.items():
+			charString, R = eexec.decrypt(charString, 4330)
+			charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
+					subrs=subrs)
+		for i in range(len(subrs)):
+			charString, R = eexec.decrypt(subrs[i], 4330)
+			subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
+		del self.data
+
+
+# low level T1 data read and write functions
+
+def read(path, onlyHeader=False):
+	"""reads any Type 1 font file, returns raw data"""
+	normpath = path.lower()
+	creator, typ = getMacCreatorAndType(path)
+	if typ == 'LWFN':
+		return readLWFN(path, onlyHeader), 'LWFN'
+	if normpath[-4:] == '.pfb':
+		return readPFB(path, onlyHeader), 'PFB'
+	else:
+		return readOther(path), 'OTHER'
+
+def write(path, data, kind='OTHER', dohex=False):
+	assertType1(data)
+	kind = kind.upper()
+	try:
+		os.remove(path)
+	except os.error:
+		pass
+	err = 1
+	try:
+		if kind == 'LWFN':
+			writeLWFN(path, data)
+		elif kind == 'PFB':
+			writePFB(path, data)
+		else:
+			writeOther(path, data, dohex)
+		err = 0
+	finally:
+		if err and not DEBUG:
+			try:
+				os.remove(path)
+			except os.error:
+				pass
+
+
+# -- internal -- 
+
+LWFNCHUNKSIZE = 2000
+HEXLINELENGTH = 80
+
+
+def readLWFN(path, onlyHeader=False):
+	"""reads an LWFN font file, returns raw data"""
+	resRef = Res.FSOpenResFile(path, 1)  # read-only
+	try:
+		Res.UseResFile(resRef)
+		n = Res.Count1Resources('POST')
+		data = []
+		for i in range(501, 501 + n):
+			res = Res.Get1Resource('POST', i)
+			code = byteord(res.data[0])
+			if byteord(res.data[1]) != 0:
+				raise T1Error('corrupt LWFN file')
+			if code in [1, 2]:
+				if onlyHeader and code == 2:
+					break
+				data.append(res.data[2:])
+			elif code in [3, 5]:
+				break
+			elif code == 4:
+				f = open(path, "rb")
+				data.append(f.read())
+				f.close()
+			elif code == 0:
+				pass # comment, ignore
+			else:
+				raise T1Error('bad chunk code: ' + repr(code))
+	finally:
+		Res.CloseResFile(resRef)
+	data = bytesjoin(data)
+	assertType1(data)
+	return data
+
+def readPFB(path, onlyHeader=False):
+	"""reads a PFB font file, returns raw data"""
+	f = open(path, "rb")
+	data = []
+	while True:
+		if f.read(1) != bytechr(128):
+			raise T1Error('corrupt PFB file')
+		code = byteord(f.read(1))
+		if code in [1, 2]:
+			chunklen = stringToLong(f.read(4))
+			chunk = f.read(chunklen)
+			assert len(chunk) == chunklen
+			data.append(chunk)
+		elif code == 3:
+			break
+		else:
+			raise T1Error('bad chunk code: ' + repr(code))
+		if onlyHeader:
+			break
+	f.close()
+	data = bytesjoin(data)
+	assertType1(data)
+	return data
+
+def readOther(path):
+	"""reads any (font) file, returns raw data"""
+	f = open(path, "rb")
+	data = f.read()
+	f.close()
+	assertType1(data)
+	
+	chunks = findEncryptedChunks(data)
+	data = []
+	for isEncrypted, chunk in chunks:
+		if isEncrypted and isHex(chunk[:4]):
+			data.append(deHexString(chunk))
+		else:
+			data.append(chunk)
+	return bytesjoin(data)
+
+# file writing tools
+
+def writeLWFN(path, data):
+	Res.FSpCreateResFile(path, "just", "LWFN", 0)
+	resRef = Res.FSOpenResFile(path, 2)  # write-only
+	try:
+		Res.UseResFile(resRef)
+		resID = 501
+		chunks = findEncryptedChunks(data)
+		for isEncrypted, chunk in chunks:
+			if isEncrypted:
+				code = 2
+			else:
+				code = 1
+			while chunk:
+				res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
+				res.AddResource('POST', resID, '')
+				chunk = chunk[LWFNCHUNKSIZE - 2:]
+				resID = resID + 1
+		res = Res.Resource(bytechr(5) + '\0')
+		res.AddResource('POST', resID, '')
+	finally:
+		Res.CloseResFile(resRef)
+
+def writePFB(path, data):
+	chunks = findEncryptedChunks(data)
+	f = open(path, "wb")
+	try:
+		for isEncrypted, chunk in chunks:
+			if isEncrypted:
+				code = 2
+			else:
+				code = 1
+			f.write(bytechr(128) + bytechr(code))
+			f.write(longToString(len(chunk)))
+			f.write(chunk)
+		f.write(bytechr(128) + bytechr(3))
+	finally:
+		f.close()
+
+def writeOther(path, data, dohex=False):
+	chunks = findEncryptedChunks(data)
+	f = open(path, "wb")
+	try:
+		hexlinelen = HEXLINELENGTH // 2
+		for isEncrypted, chunk in chunks:
+			if isEncrypted:
+				code = 2
+			else:
+				code = 1
+			if code == 2 and dohex:
+				while chunk:
+					f.write(eexec.hexString(chunk[:hexlinelen]))
+					f.write('\r')
+					chunk = chunk[hexlinelen:]
+			else:
+				f.write(chunk)
+	finally:
+		f.close()
+
+
+# decryption tools
+
+EEXECBEGIN = "currentfile eexec"
+EEXECEND = '0' * 64
+EEXECINTERNALEND = "currentfile closefile"
+EEXECBEGINMARKER = "%-- eexec start\r"
+EEXECENDMARKER = "%-- eexec end\r"
+
+_ishexRE = re.compile('[0-9A-Fa-f]*$')
+
+def isHex(text):
+	return _ishexRE.match(text) is not None
+
+
+def decryptType1(data):
+	chunks = findEncryptedChunks(data)
+	data = []
+	for isEncrypted, chunk in chunks:
+		if isEncrypted:
+			if isHex(chunk[:4]):
+				chunk = deHexString(chunk)
+			decrypted, R = eexec.decrypt(chunk, 55665)
+			decrypted = decrypted[4:]
+			if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
+					and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
+				raise T1Error("invalid end of eexec part")
+			decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + '\r'
+			data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
+		else:
+			if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
+				data.append(chunk[:-len(EEXECBEGIN)-1])
+			else:
+				data.append(chunk)
+	return bytesjoin(data)
+
+def findEncryptedChunks(data):
+	chunks = []
+	while True:
+		eBegin = data.find(EEXECBEGIN)
+		if eBegin < 0:
+			break
+		eBegin = eBegin + len(EEXECBEGIN) + 1
+		eEnd = data.find(EEXECEND, eBegin)
+		if eEnd < 0:
+			raise T1Error("can't find end of eexec part")
+		cypherText = data[eBegin:eEnd + 2]
+		if isHex(cypherText[:4]):
+			cypherText = deHexString(cypherText)
+		plainText, R = eexec.decrypt(cypherText, 55665)
+		eEndLocal = plainText.find(EEXECINTERNALEND)
+		if eEndLocal < 0:
+			raise T1Error("can't find end of eexec part")
+		chunks.append((0, data[:eBegin]))
+		chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1]))
+		data = data[eEnd:]
+	chunks.append((0, data))
+	return chunks
+
+def deHexString(hexstring):
+	return eexec.deHexString(strjoin(hexstring.split()))
+
+
+# Type 1 assertion
+
+_fontType1RE = re.compile(br"/FontType\s+1\s+def")
+
+def assertType1(data):
+	for head in [b'%!PS-AdobeFont', b'%!FontType1']:
+		if data[:len(head)] == head:
+			break
+	else:
+		raise T1Error("not a PostScript font")
+	if not _fontType1RE.search(data):
+		raise T1Error("not a Type 1 font")
+	if data.find(b"currentfile eexec") < 0:
+		raise T1Error("not an encrypted Type 1 font")
+	# XXX what else?
+	return data
+
+
+# pfb helpers
+
+def longToString(long):
+	s = ""
+	for i in range(4):
+		s += bytechr((long & (0xff << (i * 8))) >> i * 8)
+	return s
+
+def stringToLong(s):
+	if len(s) != 4:
+		raise ValueError('string must be 4 bytes long')
+	l = 0
+	for i in range(4):
+		l += byteord(s[i]) << (i * 8)
+	return l
+
diff --git a/Lib/fontTools/ttLib/__init__.py b/Lib/fontTools/ttLib/__init__.py
new file mode 100644
index 0000000..9879a4e
--- /dev/null
+++ b/Lib/fontTools/ttLib/__init__.py
@@ -0,0 +1,972 @@
+"""fontTools.ttLib -- a package for dealing with TrueType fonts.
+
+This package offers translators to convert TrueType fonts to Python 
+objects and vice versa, and additionally from Python to TTX (an XML-based
+text format) and vice versa.
+
+Example interactive session:
+
+Python 1.5.2c1 (#43, Mar  9 1999, 13:06:43)  [CW PPC w/GUSI w/MSL]
+Copyright 1991-1995 Stichting Mathematisch Centrum, Amsterdam
+>>> from fontTools import ttLib
+>>> tt = ttLib.TTFont("afont.ttf")
+>>> tt['maxp'].numGlyphs
+242
+>>> tt['OS/2'].achVendID
+'B&H\000'
+>>> tt['head'].unitsPerEm
+2048
+>>> tt.saveXML("afont.ttx")
+Dumping 'LTSH' table...
+Dumping 'OS/2' table...
+Dumping 'VDMX' table...
+Dumping 'cmap' table...
+Dumping 'cvt ' table...
+Dumping 'fpgm' table...
+Dumping 'glyf' table...
+Dumping 'hdmx' table...
+Dumping 'head' table...
+Dumping 'hhea' table...
+Dumping 'hmtx' table...
+Dumping 'loca' table...
+Dumping 'maxp' table...
+Dumping 'name' table...
+Dumping 'post' table...
+Dumping 'prep' table...
+>>> tt2 = ttLib.TTFont()
+>>> tt2.importXML("afont.ttx")
+>>> tt2['maxp'].numGlyphs
+242
+>>> 
+
+"""
+
+#
+# $Id: __init__.py,v 1.51 2009-02-22 08:55:00 pabs3 Exp $
+#
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import os
+import sys
+
+haveMacSupport = 0
+if sys.platform == "mac":
+	haveMacSupport = 1
+elif sys.platform == "darwin" and sys.version_info[:3] != (2, 2, 0):
+	# Python 2.2's Mac support is broken, so don't enable it there.
+	haveMacSupport = 1
+
+
+class TTLibError(Exception): pass
+
+
+class TTFont(object):
+	
+	"""The main font object. It manages file input and output, and offers
+	a convenient way of accessing tables. 
+	Tables will be only decompiled when necessary, ie. when they're actually
+	accessed. This means that simple operations can be extremely fast.
+	"""
+	
+	def __init__(self, file=None, res_name_or_index=None,
+			sfntVersion="\000\001\000\000", flavor=None, checkChecksums=False,
+			verbose=False, recalcBBoxes=True, allowVID=False, ignoreDecompileErrors=False,
+			recalcTimestamp=True, fontNumber=-1, lazy=False, quiet=False):
+		
+		"""The constructor can be called with a few different arguments.
+		When reading a font from disk, 'file' should be either a pathname
+		pointing to a file, or a readable file object. 
+		
+		It we're running on a Macintosh, 'res_name_or_index' maybe an sfnt 
+		resource name or an sfnt resource index number or zero. The latter 
+		case will cause TTLib to autodetect whether the file is a flat file 
+		or a suitcase. (If it's a suitcase, only the first 'sfnt' resource
+		will be read!)
+		
+		The 'checkChecksums' argument is used to specify how sfnt
+		checksums are treated upon reading a file from disk:
+			0: don't check (default)
+			1: check, print warnings if a wrong checksum is found
+			2: check, raise an exception if a wrong checksum is found.
+		
+		The TTFont constructor can also be called without a 'file' 
+		argument: this is the way to create a new empty font. 
+		In this case you can optionally supply the 'sfntVersion' argument,
+		and a 'flavor' which can be None, or 'woff'.
+		
+		If the recalcBBoxes argument is false, a number of things will *not*
+		be recalculated upon save/compile:
+			1) glyph bounding boxes
+			2) maxp font bounding box
+			3) hhea min/max values
+		(1) is needed for certain kinds of CJK fonts (ask Werner Lemberg ;-).
+		Additionally, upon importing an TTX file, this option cause glyphs
+		to be compiled right away. This should reduce memory consumption 
+		greatly, and therefore should have some impact on the time needed 
+		to parse/compile large fonts.
+
+		If the recalcTimestamp argument is false, the modified timestamp in the
+		'head' table will *not* be recalculated upon save/compile.
+
+		If the allowVID argument is set to true, then virtual GID's are
+		supported. Asking for a glyph ID with a glyph name or GID that is not in
+		the font will return a virtual GID.   This is valid for GSUB and cmap
+		tables. For SING glyphlets, the cmap table is used to specify Unicode
+		values for virtual GI's used in GSUB/GPOS rules. If the gid N is requested
+		and does not exist in the font, or the glyphname has the form glyphN
+		and does not exist in the font, then N is used as the virtual GID.
+		Else, the first virtual GID is assigned as 0x1000 -1; for subsequent new
+		virtual GIDs, the next is one less than the previous.
+
+		If ignoreDecompileErrors is set to True, exceptions raised in
+		individual tables during decompilation will be ignored, falling
+		back to the DefaultTable implementation, which simply keeps the
+		binary data.
+
+		If lazy is set to True, many data structures are loaded lazily, upon
+		access only.
+		"""
+		
+		from fontTools.ttLib import sfnt
+		self.verbose = verbose
+		self.quiet = quiet
+		self.lazy = lazy
+		self.recalcBBoxes = recalcBBoxes
+		self.recalcTimestamp = recalcTimestamp
+		self.tables = {}
+		self.reader = None
+
+		# Permit the user to reference glyphs that are not int the font.
+		self.last_vid = 0xFFFE # Can't make it be 0xFFFF, as the world is full unsigned short integer counters that get incremented after the last seen GID value.
+		self.reverseVIDDict = {}
+		self.VIDDict = {}
+		self.allowVID = allowVID
+		self.ignoreDecompileErrors = ignoreDecompileErrors
+
+		if not file:
+			self.sfntVersion = sfntVersion
+			self.flavor = flavor
+			self.flavorData = None
+			return
+		if not hasattr(file, "read"):
+			# assume file is a string
+			if haveMacSupport and res_name_or_index is not None:
+				# on the mac, we deal with sfnt resources as well as flat files
+				from . import macUtils
+				if res_name_or_index == 0:
+					if macUtils.getSFNTResIndices(file):
+						# get the first available sfnt font.
+						file = macUtils.SFNTResourceReader(file, 1)
+					else:
+						file = open(file, "rb")
+				else:
+					file = macUtils.SFNTResourceReader(file, res_name_or_index)
+			else:
+				file = open(file, "rb")
+		else:
+			pass # assume "file" is a readable file object
+		self.reader = sfnt.SFNTReader(file, checkChecksums, fontNumber=fontNumber)
+		self.sfntVersion = self.reader.sfntVersion
+		self.flavor = self.reader.flavor
+		self.flavorData = self.reader.flavorData
+	
+	def close(self):
+		"""If we still have a reader object, close it."""
+		if self.reader is not None:
+			self.reader.close()
+	
+	def save(self, file, makeSuitcase=False, reorderTables=True):
+		"""Save the font to disk. Similarly to the constructor, 
+		the 'file' argument can be either a pathname or a writable
+		file object.
+		
+		On the Mac, if makeSuitcase is true, a suitcase (resource fork)
+		file will we made instead of a flat .ttf file. 
+		"""
+		from fontTools.ttLib import sfnt
+		if not hasattr(file, "write"):
+			closeStream = 1
+			if os.name == "mac" and makeSuitcase:
+				from . import macUtils
+				file = macUtils.SFNTResourceWriter(file, self)
+			else:
+				file = open(file, "wb")
+				if os.name == "mac":
+					from fontTools.misc.macCreator import setMacCreatorAndType
+					setMacCreatorAndType(file.name, 'mdos', 'BINA')
+		else:
+			# assume "file" is a writable file object
+			closeStream = 0
+		
+		tags = list(self.keys())
+		if "GlyphOrder" in tags:
+			tags.remove("GlyphOrder")
+		numTables = len(tags)
+		if reorderTables:
+			import tempfile
+			tmp = tempfile.TemporaryFile(prefix="ttx-fonttools")
+		else:
+			tmp = file
+		writer = sfnt.SFNTWriter(tmp, numTables, self.sfntVersion, self.flavor, self.flavorData)
+		
+		done = []
+		for tag in tags:
+			self._writeTable(tag, writer, done)
+		
+		writer.close()
+
+		if reorderTables:
+			tmp.flush()
+			tmp.seek(0)
+			reorderFontTables(tmp, file)
+			tmp.close()
+
+		if closeStream:
+			file.close()
+	
+	def saveXML(self, fileOrPath, progress=None, quiet=False,
+			tables=None, skipTables=None, splitTables=False, disassembleInstructions=True,
+			bitmapGlyphDataFormat='raw'):
+		"""Export the font as TTX (an XML-based text file), or as a series of text
+		files when splitTables is true. In the latter case, the 'fileOrPath'
+		argument should be a path to a directory.
+		The 'tables' argument must either be false (dump all tables) or a
+		list of tables to dump. The 'skipTables' argument may be a list of tables
+		to skip, but only when the 'tables' argument is false.
+		"""
+		from fontTools import version
+		from fontTools.misc import xmlWriter
+		
+		self.disassembleInstructions = disassembleInstructions
+		self.bitmapGlyphDataFormat = bitmapGlyphDataFormat
+		if not tables:
+			tables = list(self.keys())
+			if "GlyphOrder" not in tables:
+				tables = ["GlyphOrder"] + tables
+			if skipTables:
+				for tag in skipTables:
+					if tag in tables:
+						tables.remove(tag)
+		numTables = len(tables)
+		if progress:
+			progress.set(0, numTables)
+			idlefunc = getattr(progress, "idle", None)
+		else:
+			idlefunc = None
+		
+		writer = xmlWriter.XMLWriter(fileOrPath, idlefunc=idlefunc)
+		writer.begintag("ttFont", sfntVersion=repr(self.sfntVersion)[1:-1], 
+				ttLibVersion=version)
+		writer.newline()
+		
+		if not splitTables:
+			writer.newline()
+		else:
+			# 'fileOrPath' must now be a path
+			path, ext = os.path.splitext(fileOrPath)
+			fileNameTemplate = path + ".%s" + ext
+		
+		for i in range(numTables):
+			if progress:
+				progress.set(i)
+			tag = tables[i]
+			if splitTables:
+				tablePath = fileNameTemplate % tagToIdentifier(tag)
+				tableWriter = xmlWriter.XMLWriter(tablePath, idlefunc=idlefunc)
+				tableWriter.begintag("ttFont", ttLibVersion=version)
+				tableWriter.newline()
+				tableWriter.newline()
+				writer.simpletag(tagToXML(tag), src=os.path.basename(tablePath))
+				writer.newline()
+			else:
+				tableWriter = writer
+			self._tableToXML(tableWriter, tag, progress, quiet)
+			if splitTables:
+				tableWriter.endtag("ttFont")
+				tableWriter.newline()
+				tableWriter.close()
+		if progress:
+			progress.set((i + 1))
+		writer.endtag("ttFont")
+		writer.newline()
+		writer.close()
+		if self.verbose:
+			debugmsg("Done dumping TTX")
+	
+	def _tableToXML(self, writer, tag, progress, quiet):
+		if tag in self:
+			table = self[tag]
+			report = "Dumping '%s' table..." % tag
+		else:
+			report = "No '%s' table found." % tag
+		if progress:
+			progress.setLabel(report)
+		elif self.verbose:
+			debugmsg(report)
+		else:
+			if not quiet:
+				print(report)
+		if tag not in self:
+			return
+		xmlTag = tagToXML(tag)
+		if hasattr(table, "ERROR"):
+			writer.begintag(xmlTag, ERROR="decompilation error")
+		else:
+			writer.begintag(xmlTag)
+		writer.newline()
+		if tag in ("glyf", "CFF "):
+			table.toXML(writer, self, progress)
+		else:
+			table.toXML(writer, self)
+		writer.endtag(xmlTag)
+		writer.newline()
+		writer.newline()
+	
+	def importXML(self, file, progress=None, quiet=False):
+		"""Import a TTX file (an XML-based text format), so as to recreate
+		a font object.
+		"""
+		if "maxp" in self and "post" in self:
+			# Make sure the glyph order is loaded, as it otherwise gets
+			# lost if the XML doesn't contain the glyph order, yet does
+			# contain the table which was originally used to extract the
+			# glyph names from (ie. 'post', 'cmap' or 'CFF ').
+			self.getGlyphOrder()
+
+		from fontTools.misc import xmlReader
+
+		reader = xmlReader.XMLReader(file, self, progress, quiet)
+		reader.read()
+	
+	def isLoaded(self, tag):
+		"""Return true if the table identified by 'tag' has been 
+		decompiled and loaded into memory."""
+		return tag in self.tables
+	
+	def has_key(self, tag):
+		if self.isLoaded(tag):
+			return True
+		elif self.reader and tag in self.reader:
+			return True
+		elif tag == "GlyphOrder":
+			return True
+		else:
+			return False
+	
+	__contains__ = has_key
+	
+	def keys(self):
+		keys = list(self.tables.keys())
+		if self.reader:
+			for key in list(self.reader.keys()):
+				if key not in keys:
+					keys.append(key)
+
+		if "GlyphOrder" in keys:
+			keys.remove("GlyphOrder")
+		keys = sortedTagList(keys)
+		return ["GlyphOrder"] + keys
+	
+	def __len__(self):
+		return len(list(self.keys()))
+	
+	def __getitem__(self, tag):
+		tag = Tag(tag)
+		try:
+			return self.tables[tag]
+		except KeyError:
+			if tag == "GlyphOrder":
+				table = GlyphOrder(tag)
+				self.tables[tag] = table
+				return table
+			if self.reader is not None:
+				import traceback
+				if self.verbose:
+					debugmsg("Reading '%s' table from disk" % tag)
+				data = self.reader[tag]
+				tableClass = getTableClass(tag)
+				table = tableClass(tag)
+				self.tables[tag] = table
+				if self.verbose:
+					debugmsg("Decompiling '%s' table" % tag)
+				try:
+					table.decompile(data, self)
+				except:
+					if not self.ignoreDecompileErrors:
+						raise
+					# fall back to DefaultTable, retaining the binary table data
+					print("An exception occurred during the decompilation of the '%s' table" % tag)
+					from .tables.DefaultTable import DefaultTable
+					file = StringIO()
+					traceback.print_exc(file=file)
+					table = DefaultTable(tag)
+					table.ERROR = file.getvalue()
+					self.tables[tag] = table
+					table.decompile(data, self)
+				return table
+			else:
+				raise KeyError("'%s' table not found" % tag)
+	
+	def __setitem__(self, tag, table):
+		self.tables[Tag(tag)] = table
+	
+	def __delitem__(self, tag):
+		if tag not in self:
+			raise KeyError("'%s' table not found" % tag)
+		if tag in self.tables:
+			del self.tables[tag]
+		if self.reader and tag in self.reader:
+			del self.reader[tag]
+
+	def get(self, tag, default=None):
+		try:
+			return self[tag]
+		except KeyError:
+			return default
+	
+	def setGlyphOrder(self, glyphOrder):
+		self.glyphOrder = glyphOrder
+	
+	def getGlyphOrder(self):
+		try:
+			return self.glyphOrder
+		except AttributeError:
+			pass
+		if 'CFF ' in self:
+			cff = self['CFF ']
+			self.glyphOrder = cff.getGlyphOrder()
+		elif 'post' in self:
+			# TrueType font
+			glyphOrder = self['post'].getGlyphOrder()
+			if glyphOrder is None:
+				#
+				# No names found in the 'post' table.
+				# Try to create glyph names from the unicode cmap (if available) 
+				# in combination with the Adobe Glyph List (AGL).
+				#
+				self._getGlyphNamesFromCmap()
+			else:
+				self.glyphOrder = glyphOrder
+		else:
+			self._getGlyphNamesFromCmap()
+		return self.glyphOrder
+	
+	def _getGlyphNamesFromCmap(self):
+		#
+		# This is rather convoluted, but then again, it's an interesting problem:
+		# - we need to use the unicode values found in the cmap table to
+		#   build glyph names (eg. because there is only a minimal post table,
+		#   or none at all).
+		# - but the cmap parser also needs glyph names to work with...
+		# So here's what we do:
+		# - make up glyph names based on glyphID
+		# - load a temporary cmap table based on those names
+		# - extract the unicode values, build the "real" glyph names
+		# - unload the temporary cmap table
+		#
+		if self.isLoaded("cmap"):
+			# Bootstrapping: we're getting called by the cmap parser
+			# itself. This means self.tables['cmap'] contains a partially
+			# loaded cmap, making it impossible to get at a unicode
+			# subtable here. We remove the partially loaded cmap and
+			# restore it later.
+			# This only happens if the cmap table is loaded before any
+			# other table that does f.getGlyphOrder()  or f.getGlyphName().
+			cmapLoading = self.tables['cmap']
+			del self.tables['cmap']
+		else:
+			cmapLoading = None
+		# Make up glyph names based on glyphID, which will be used by the
+		# temporary cmap and by the real cmap in case we don't find a unicode
+		# cmap.
+		numGlyphs = int(self['maxp'].numGlyphs)
+		glyphOrder = [None] * numGlyphs
+		glyphOrder[0] = ".notdef"
+		for i in range(1, numGlyphs):
+			glyphOrder[i] = "glyph%.5d" % i
+		# Set the glyph order, so the cmap parser has something
+		# to work with (so we don't get called recursively).
+		self.glyphOrder = glyphOrder
+		# Get a (new) temporary cmap (based on the just invented names)
+		tempcmap = self['cmap'].getcmap(3, 1)
+		if tempcmap is not None:
+			# we have a unicode cmap
+			from fontTools import agl
+			cmap = tempcmap.cmap
+			# create a reverse cmap dict
+			reversecmap = {}
+			for unicode, name in list(cmap.items()):
+				reversecmap[name] = unicode
+			allNames = {}
+			for i in range(numGlyphs):
+				tempName = glyphOrder[i]
+				if tempName in reversecmap:
+					unicode = reversecmap[tempName]
+					if unicode in agl.UV2AGL:
+						# get name from the Adobe Glyph List
+						glyphName = agl.UV2AGL[unicode]
+					else:
+						# create uni<CODE> name
+						glyphName = "uni%04X" % unicode
+					tempName = glyphName
+					n = 1
+					while tempName in allNames:
+						tempName = glyphName + "#" + repr(n)
+						n = n + 1
+					glyphOrder[i] = tempName
+					allNames[tempName] = 1
+			# Delete the temporary cmap table from the cache, so it can
+			# be parsed again with the right names.
+			del self.tables['cmap']
+		else:
+			pass # no unicode cmap available, stick with the invented names
+		self.glyphOrder = glyphOrder
+		if cmapLoading:
+			# restore partially loaded cmap, so it can continue loading
+			# using the proper names.
+			self.tables['cmap'] = cmapLoading
+	
+	def getGlyphNames(self):
+		"""Get a list of glyph names, sorted alphabetically."""
+		glyphNames = sorted(self.getGlyphOrder()[:])
+		return glyphNames
+	
+	def getGlyphNames2(self):
+		"""Get a list of glyph names, sorted alphabetically, 
+		but not case sensitive.
+		"""
+		from fontTools.misc import textTools
+		return textTools.caselessSort(self.getGlyphOrder())
+	
+	def getGlyphName(self, glyphID, requireReal=False):
+		try:
+			return self.getGlyphOrder()[glyphID]
+		except IndexError:
+			if requireReal or not self.allowVID:
+				# XXX The ??.W8.otf font that ships with OSX uses higher glyphIDs in
+				# the cmap table than there are glyphs. I don't think it's legal...
+				return "glyph%.5d" % glyphID
+			else:
+				# user intends virtual GID support 	
+				try:
+					glyphName = self.VIDDict[glyphID]
+				except KeyError:
+					glyphName  ="glyph%.5d" % glyphID
+					self.last_vid = min(glyphID, self.last_vid )
+					self.reverseVIDDict[glyphName] = glyphID
+					self.VIDDict[glyphID] = glyphName
+				return glyphName
+
+	def getGlyphID(self, glyphName, requireReal=False):
+		if not hasattr(self, "_reverseGlyphOrderDict"):
+			self._buildReverseGlyphOrderDict()
+		glyphOrder = self.getGlyphOrder()
+		d = self._reverseGlyphOrderDict
+		if glyphName not in d:
+			if glyphName in glyphOrder:
+				self._buildReverseGlyphOrderDict()
+				return self.getGlyphID(glyphName)
+			else:
+				if requireReal:
+					raise KeyError(glyphName)
+				elif not self.allowVID:
+					# Handle glyphXXX only
+					if glyphName[:5] == "glyph":
+						try:
+							return int(glyphName[5:])
+						except (NameError, ValueError):
+							raise KeyError(glyphName)
+				else:
+					# user intends virtual GID support 	
+					try:
+						glyphID = self.reverseVIDDict[glyphName]
+					except KeyError:
+						# if name is in glyphXXX format, use the specified name.
+						if glyphName[:5] == "glyph":
+							try:
+								glyphID = int(glyphName[5:])
+							except (NameError, ValueError):
+								glyphID = None
+						if glyphID is None:
+							glyphID = self.last_vid -1
+							self.last_vid = glyphID
+						self.reverseVIDDict[glyphName] = glyphID
+						self.VIDDict[glyphID] = glyphName
+					return glyphID
+
+		glyphID = d[glyphName]
+		if glyphName != glyphOrder[glyphID]:
+			self._buildReverseGlyphOrderDict()
+			return self.getGlyphID(glyphName)
+		return glyphID
+
+	def getReverseGlyphMap(self, rebuild=False):
+		if rebuild or not hasattr(self, "_reverseGlyphOrderDict"):
+			self._buildReverseGlyphOrderDict()
+		return self._reverseGlyphOrderDict
+
+	def _buildReverseGlyphOrderDict(self):
+		self._reverseGlyphOrderDict = d = {}
+		glyphOrder = self.getGlyphOrder()
+		for glyphID in range(len(glyphOrder)):
+			d[glyphOrder[glyphID]] = glyphID
+	
+	def _writeTable(self, tag, writer, done):
+		"""Internal helper function for self.save(). Keeps track of 
+		inter-table dependencies.
+		"""
+		if tag in done:
+			return
+		tableClass = getTableClass(tag)
+		for masterTable in tableClass.dependencies:
+			if masterTable not in done:
+				if masterTable in self:
+					self._writeTable(masterTable, writer, done)
+				else:
+					done.append(masterTable)
+		tabledata = self.getTableData(tag)
+		if self.verbose:
+			debugmsg("writing '%s' table to disk" % tag)
+		writer[tag] = tabledata
+		done.append(tag)
+	
+	def getTableData(self, tag):
+		"""Returns raw table data, whether compiled or directly read from disk.
+		"""
+		tag = Tag(tag)
+		if self.isLoaded(tag):
+			if self.verbose:
+				debugmsg("compiling '%s' table" % tag)
+			return self.tables[tag].compile(self)
+		elif self.reader and tag in self.reader:
+			if self.verbose:
+				debugmsg("Reading '%s' table from disk" % tag)
+			return self.reader[tag]
+		else:
+			raise KeyError(tag)
+	
+	def getGlyphSet(self, preferCFF=True):
+		"""Return a generic GlyphSet, which is a dict-like object
+		mapping glyph names to glyph objects. The returned glyph objects
+		have a .draw() method that supports the Pen protocol, and will
+		have an attribute named 'width', but only *after* the .draw() method
+		has been called.
+		
+		If the font is CFF-based, the outlines will be taken from the 'CFF '
+		table. Otherwise the outlines will be taken from the 'glyf' table.
+		If the font contains both a 'CFF ' and a 'glyf' table, you can use
+		the 'preferCFF' argument to specify which one should be taken.
+		"""
+		if preferCFF and "CFF " in self:
+			return list(self["CFF "].cff.values())[0].CharStrings
+		if "glyf" in self:
+			return _TTGlyphSet(self)
+		if "CFF " in self:
+			return list(self["CFF "].cff.values())[0].CharStrings
+		raise TTLibError("Font contains no outlines")
+
+
+class _TTGlyphSet(object):
+	
+	"""Generic dict-like GlyphSet class, meant as a TrueType counterpart
+	to CFF's CharString dict. See TTFont.getGlyphSet().
+	"""
+	
+	# This class is distinct from the 'glyf' table itself because we need
+	# access to the 'hmtx' table, which could cause a dependency problem
+	# there when reading from XML.
+	
+	def __init__(self, ttFont):
+		self._ttFont = ttFont
+	
+	def keys(self):
+		return list(self._ttFont["glyf"].keys())
+	
+	def has_key(self, glyphName):
+		return glyphName in self._ttFont["glyf"]
+	
+	__contains__ = has_key
+
+	def __getitem__(self, glyphName):
+		return _TTGlyph(glyphName, self._ttFont)
+
+	def get(self, glyphName, default=None):
+		try:
+			return self[glyphName]
+		except KeyError:
+			return default
+
+
+class _TTGlyph(object):
+	
+	"""Wrapper for a TrueType glyph that supports the Pen protocol, meaning
+	that it has a .draw() method that takes a pen object as its only
+	argument. Additionally there is a 'width' attribute.
+	"""
+	
+	def __init__(self, glyphName, ttFont):
+		self._glyphName = glyphName
+		self._ttFont = ttFont
+		self.width, self.lsb = self._ttFont['hmtx'][self._glyphName]
+	
+	def draw(self, pen):
+		"""Draw the glyph onto Pen. See fontTools.pens.basePen for details
+		how that works.
+		"""
+		glyfTable = self._ttFont['glyf']
+		glyph = glyfTable[self._glyphName]
+		if hasattr(glyph, "xMin"):
+			offset = self.lsb - glyph.xMin
+		else:
+			offset = 0
+		if glyph.isComposite():
+			for component in glyph:
+				glyphName, transform = component.getComponentInfo()
+				pen.addComponent(glyphName, transform)
+		else:
+			coordinates, endPts, flags = glyph.getCoordinates(glyfTable)
+			if offset:
+				coordinates = coordinates + (offset, 0)
+			start = 0
+			for end in endPts:
+				end = end + 1
+				contour = coordinates[start:end].tolist()
+				cFlags = flags[start:end].tolist()
+				start = end
+				if 1 not in cFlags:
+					# There is not a single on-curve point on the curve,
+					# use pen.qCurveTo's special case by specifying None
+					# as the on-curve point.
+					contour.append(None)
+					pen.qCurveTo(*contour)
+				else:
+					# Shuffle the points so that contour the is guaranteed
+					# to *end* in an on-curve point, which we'll use for
+					# the moveTo.
+					firstOnCurve = cFlags.index(1) + 1
+					contour = contour[firstOnCurve:] + contour[:firstOnCurve]
+					cFlags = cFlags[firstOnCurve:] + cFlags[:firstOnCurve]
+					pen.moveTo(contour[-1])
+					while contour:
+						nextOnCurve = cFlags.index(1) + 1
+						if nextOnCurve == 1:
+							pen.lineTo(contour[0])
+						else:
+							pen.qCurveTo(*contour[:nextOnCurve])
+						contour = contour[nextOnCurve:]
+						cFlags = cFlags[nextOnCurve:]
+				pen.closePath()
+
+
+class GlyphOrder(object):
+	
+	"""A pseudo table. The glyph order isn't in the font as a separate
+	table, but it's nice to present it as such in the TTX format.
+	"""
+	
+	def __init__(self, tag=None):
+		pass
+	
+	def toXML(self, writer, ttFont):
+		glyphOrder = ttFont.getGlyphOrder()
+		writer.comment("The 'id' attribute is only for humans; "
+				"it is ignored when parsed.")
+		writer.newline()
+		for i in range(len(glyphOrder)):
+			glyphName = glyphOrder[i]
+			writer.simpletag("GlyphID", id=i, name=glyphName)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "glyphOrder"):
+			self.glyphOrder = []
+			ttFont.setGlyphOrder(self.glyphOrder)
+		if name == "GlyphID":
+			self.glyphOrder.append(attrs["name"])
+
+
+def getTableModule(tag):
+	"""Fetch the packer/unpacker module for a table. 
+	Return None when no module is found.
+	"""
+	from . import tables
+	pyTag = tagToIdentifier(tag)
+	try:
+		__import__("fontTools.ttLib.tables." + pyTag)
+	except ImportError as err:
+		# If pyTag is found in the ImportError message,
+		# means table is not implemented.  If it's not
+		# there, then some other module is missing, don't
+		# suppress the error.
+		if str(err).find(pyTag) >= 0:
+			return None
+		else:
+			raise err
+	else:
+		return getattr(tables, pyTag)
+
+
+def getTableClass(tag):
+	"""Fetch the packer/unpacker class for a table. 
+	Return None when no class is found.
+	"""
+	module = getTableModule(tag)
+	if module is None:
+		from .tables.DefaultTable import DefaultTable
+		return DefaultTable
+	pyTag = tagToIdentifier(tag)
+	tableClass = getattr(module, "table_" + pyTag)
+	return tableClass
+
+
+def getClassTag(klass):
+	"""Fetch the table tag for a class object."""
+	name = klass.__name__
+	assert name[:6] == 'table_'
+	name = name[6:] # Chop 'table_'
+	return identifierToTag(name)
+
+
+
+def newTable(tag):
+	"""Return a new instance of a table."""
+	tableClass = getTableClass(tag)
+	return tableClass(tag)
+
+
+def _escapechar(c):
+	"""Helper function for tagToIdentifier()"""
+	import re
+	if re.match("[a-z0-9]", c):
+		return "_" + c
+	elif re.match("[A-Z]", c):
+		return c + "_"
+	else:
+		return hex(byteord(c))[2:]
+
+
+def tagToIdentifier(tag):
+	"""Convert a table tag to a valid (but UGLY) python identifier, 
+	as well as a filename that's guaranteed to be unique even on a 
+	caseless file system. Each character is mapped to two characters.
+	Lowercase letters get an underscore before the letter, uppercase
+	letters get an underscore after the letter. Trailing spaces are
+	trimmed. Illegal characters are escaped as two hex bytes. If the
+	result starts with a number (as the result of a hex escape), an
+	extra underscore is prepended. Examples: 
+		'glyf' -> '_g_l_y_f'
+		'cvt ' -> '_c_v_t'
+		'OS/2' -> 'O_S_2f_2'
+	"""
+	import re
+	tag = Tag(tag)
+	if tag == "GlyphOrder":
+		return tag
+	assert len(tag) == 4, "tag should be 4 characters long"
+	while len(tag) > 1 and tag[-1] == ' ':
+		tag = tag[:-1]
+	ident = ""
+	for c in tag:
+		ident = ident + _escapechar(c)
+	if re.match("[0-9]", ident):
+		ident = "_" + ident
+	return ident
+
+
+def identifierToTag(ident):
+	"""the opposite of tagToIdentifier()"""
+	if ident == "GlyphOrder":
+		return ident
+	if len(ident) % 2 and ident[0] == "_":
+		ident = ident[1:]
+	assert not (len(ident) % 2)
+	tag = ""
+	for i in range(0, len(ident), 2):
+		if ident[i] == "_":
+			tag = tag + ident[i+1]
+		elif ident[i+1] == "_":
+			tag = tag + ident[i]
+		else:
+			# assume hex
+			tag = tag + chr(int(ident[i:i+2], 16))
+	# append trailing spaces
+	tag = tag + (4 - len(tag)) * ' '
+	return Tag(tag)
+
+
+def tagToXML(tag):
+	"""Similarly to tagToIdentifier(), this converts a TT tag
+	to a valid XML element name. Since XML element names are
+	case sensitive, this is a fairly simple/readable translation.
+	"""
+	import re
+	tag = Tag(tag)
+	if tag == "OS/2":
+		return "OS_2"
+	elif tag == "GlyphOrder":
+		return tag
+	if re.match("[A-Za-z_][A-Za-z_0-9]* *$", tag):
+		return tag.strip()
+	else:
+		return tagToIdentifier(tag)
+
+
+def xmlToTag(tag):
+	"""The opposite of tagToXML()"""
+	if tag == "OS_2":
+		return Tag("OS/2")
+	if len(tag) == 8:
+		return identifierToTag(tag)
+	else:
+		return Tag(tag + " " * (4 - len(tag)))
+
+
+def debugmsg(msg):
+	import time
+	print(msg + time.strftime("  (%H:%M:%S)", time.localtime(time.time())))
+
+
+# Table order as recommended in the OpenType specification 1.4
+TTFTableOrder = ["head", "hhea", "maxp", "OS/2", "hmtx", "LTSH", "VDMX",
+                  "hdmx", "cmap", "fpgm", "prep", "cvt ", "loca", "glyf",
+                  "kern", "name", "post", "gasp", "PCLT"]
+
+OTFTableOrder = ["head", "hhea", "maxp", "OS/2", "name", "cmap", "post",
+                  "CFF "]
+
+def sortedTagList(tagList, tableOrder=None):
+	"""Return a sorted copy of tagList, sorted according to the OpenType
+	specification, or according to a custom tableOrder. If given and not
+	None, tableOrder needs to be a list of tag names.
+	"""
+	tagList = sorted(tagList)
+	if tableOrder is None:
+		if "DSIG" in tagList:
+			# DSIG should be last (XXX spec reference?)
+			tagList.remove("DSIG")
+			tagList.append("DSIG")
+		if "CFF " in tagList:
+			tableOrder = OTFTableOrder
+		else:
+			tableOrder = TTFTableOrder
+	orderedTables = []
+	for tag in tableOrder:
+		if tag in tagList:
+			orderedTables.append(tag)
+			tagList.remove(tag)
+	orderedTables.extend(tagList)
+	return orderedTables
+
+
+def reorderFontTables(inFile, outFile, tableOrder=None, checkChecksums=False):
+	"""Rewrite a font file, ordering the tables as recommended by the
+	OpenType specification 1.4.
+	"""
+	from fontTools.ttLib.sfnt import SFNTReader, SFNTWriter
+	reader = SFNTReader(inFile, checkChecksums=checkChecksums)
+	writer = SFNTWriter(outFile, len(reader.tables), reader.sfntVersion, reader.flavor, reader.flavorData)
+	tables = list(reader.keys())
+	for tag in sortedTagList(tables, tableOrder):
+		writer[tag] = reader[tag]
+	writer.close()
diff --git a/Lib/fontTools/ttLib/macUtils.py b/Lib/fontTools/ttLib/macUtils.py
new file mode 100644
index 0000000..d565528
--- /dev/null
+++ b/Lib/fontTools/ttLib/macUtils.py
@@ -0,0 +1,202 @@
+"""ttLib.macUtils.py -- Various Mac-specific stuff."""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+import sys
+import os
+if sys.platform not in ("mac", "darwin"):
+	raise ImportError("This module is Mac-only!")
+try:
+	from Carbon import Res
+except ImportError:
+	import Res
+
+
+
+def MyOpenResFile(path):
+	mode = 1  # read only
+	try:
+		resref = Res.FSOpenResFile(path, mode)
+	except Res.Error:
+		# try data fork
+		resref = Res.FSOpenResourceFile(path, unicode(), mode)
+	return resref
+
+
+def getSFNTResIndices(path):
+	"""Determine whether a file has a resource fork or not."""
+	try:
+		resref = MyOpenResFile(path)
+	except Res.Error:
+		return []
+	Res.UseResFile(resref)
+	numSFNTs = Res.Count1Resources('sfnt')
+	Res.CloseResFile(resref)
+	return list(range(1, numSFNTs + 1))
+
+
+def openTTFonts(path):
+	"""Given a pathname, return a list of TTFont objects. In the case 
+	of a flat TTF/OTF file, the list will contain just one font object;
+	but in the case of a Mac font suitcase it will contain as many
+	font objects as there are sfnt resources in the file.
+	"""
+	from fontTools import ttLib
+	fonts = []
+	sfnts = getSFNTResIndices(path)
+	if not sfnts:
+		fonts.append(ttLib.TTFont(path))
+	else:
+		for index in sfnts:
+			fonts.append(ttLib.TTFont(path, index))
+		if not fonts:
+			raise ttLib.TTLibError("no fonts found in file '%s'" % path)
+	return fonts
+
+
+class SFNTResourceReader(object):
+	
+	"""Simple (Mac-only) read-only file wrapper for 'sfnt' resources."""
+	
+	def __init__(self, path, res_name_or_index):
+		resref = MyOpenResFile(path)
+		Res.UseResFile(resref)
+		if isinstance(res_name_or_index, basestring):
+			res = Res.Get1NamedResource('sfnt', res_name_or_index)
+		else:
+			res = Res.Get1IndResource('sfnt', res_name_or_index)
+		self.file = StringIO(res.data)
+		Res.CloseResFile(resref)
+		self.name = path
+	
+	def __getattr__(self, attr):
+		# cheap inheritance
+		return getattr(self.file, attr)
+
+
+class SFNTResourceWriter(object):
+	
+	"""Simple (Mac-only) file wrapper for 'sfnt' resources."""
+	
+	def __init__(self, path, ttFont, res_id=None):
+		self.file = StringIO()
+		self.name = path
+		self.closed = 0
+		fullname = ttFont['name'].getName(4, 1, 0) # Full name, mac, default encoding
+		familyname = ttFont['name'].getName(1, 1, 0) # Fam. name, mac, default encoding
+		psname = ttFont['name'].getName(6, 1, 0) # PostScript name, etc.
+		if fullname is None or fullname is None or psname is None:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("can't make 'sfnt' resource, no Macintosh 'name' table found")
+		self.fullname = fullname.string
+		self.familyname = familyname.string
+		self.psname = psname.string
+		if self.familyname != self.psname[:len(self.familyname)]:
+			# ugh. force fam name to be the same as first part of ps name,
+			# fondLib otherwise barfs.
+			for i in range(min(len(self.psname), len(self.familyname))):
+				if self.familyname[i] != self.psname[i]:
+					break
+			self.familyname = self.psname[:i]
+		
+		self.ttFont = ttFont
+		self.res_id = res_id
+		if os.path.exists(self.name):
+			os.remove(self.name)
+		# XXX datafork support
+		Res.FSpCreateResFile(self.name, 'DMOV', 'FFIL', 0)
+		self.resref = Res.FSOpenResFile(self.name, 3)  # exclusive read/write permission
+	
+	def close(self):
+		if self.closed:
+			return
+		Res.UseResFile(self.resref)
+		try:
+			res = Res.Get1NamedResource('sfnt', self.fullname)
+		except Res.Error:
+			pass
+		else:
+			res.RemoveResource()
+		res = Res.Resource(self.file.getvalue())
+		if self.res_id is None:
+			self.res_id = Res.Unique1ID('sfnt')
+		res.AddResource('sfnt', self.res_id, self.fullname)
+		res.ChangedResource()
+		
+		self.createFond()
+		del self.ttFont
+		Res.CloseResFile(self.resref)
+		self.file.close()
+		self.closed = 1
+	
+	def createFond(self):
+		fond_res = Res.Resource("")
+		fond_res.AddResource('FOND', self.res_id, self.fullname)
+		
+		from fontTools import fondLib
+		fond = fondLib.FontFamily(fond_res, "w")
+		
+		fond.ffFirstChar = 0
+		fond.ffLastChar = 255
+		fond.fondClass = 0
+		fond.fontAssoc = [(0, 0, self.res_id)]
+		fond.ffFlags = 20480	# XXX ???
+		fond.ffIntl = (0, 0)
+		fond.ffLeading = 0
+		fond.ffProperty = (0, 0, 0, 0, 0, 0, 0, 0, 0)
+		fond.ffVersion = 0
+		fond.glyphEncoding = {}
+		if self.familyname == self.psname:
+			fond.styleIndices = (1,) * 48  # uh-oh, fondLib is too dumb.
+		else:
+			fond.styleIndices = (2,) * 48
+		fond.styleStrings = []
+		fond.boundingBoxes = None
+		fond.ffFamID = self.res_id
+		fond.changed = 1
+		fond.glyphTableOffset = 0
+		fond.styleMappingReserved = 0
+		
+		# calc:
+		scale = 4096 / self.ttFont['head'].unitsPerEm
+		fond.ffAscent = scale * self.ttFont['hhea'].ascent
+		fond.ffDescent = scale * self.ttFont['hhea'].descent
+		fond.ffWidMax = scale * self.ttFont['hhea'].advanceWidthMax
+		
+		fond.ffFamilyName = self.familyname
+		fond.psNames = {0: self.psname}
+		
+		fond.widthTables = {}
+		fond.kernTables = {}
+		cmap = self.ttFont['cmap'].getcmap(1, 0)
+		if cmap:
+			names = {}
+			for code, name in cmap.cmap.items():
+				names[name] = code
+			if 'kern' in self.ttFont:
+				kern = self.ttFont['kern'].getkern(0)
+				if kern:
+					fondkerning = []
+					for (left, right), value in kern.kernTable.items():
+						if left in names and right in names:
+							fondkerning.append((names[left], names[right], scale * value))
+					fondkerning.sort()
+					fond.kernTables = {0: fondkerning}
+			if 'hmtx' in self.ttFont:
+				hmtx = self.ttFont['hmtx']
+				fondwidths = [2048] * 256 + [0, 0]  # default width, + plus two zeros.
+				for name, (width, lsb) in hmtx.metrics.items():
+					if name in names:
+						fondwidths[names[name]] = scale * width
+				fond.widthTables = {0: fondwidths}
+		fond.save()
+	
+	def __del__(self):
+		if not self.closed:
+			self.close()
+	
+	def __getattr__(self, attr):
+		# cheap inheritance
+		return getattr(self.file, attr)
+	
+
diff --git a/Lib/fontTools/ttLib/sfnt.py b/Lib/fontTools/ttLib/sfnt.py
new file mode 100644
index 0000000..c6bc93a
--- /dev/null
+++ b/Lib/fontTools/ttLib/sfnt.py
@@ -0,0 +1,492 @@
+"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format.
+
+Defines two public classes:
+	SFNTReader
+	SFNTWriter
+
+(Normally you don't have to use these classes explicitly; they are 
+used automatically by ttLib.TTFont.)
+
+The reading and writing of sfnt files is separated in two distinct 
+classes, since whenever to number of tables changes or whenever
+a table's length chages you need to rewrite the whole file anyway.
+"""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+import struct
+
+
+class SFNTReader(object):
+	
+	def __init__(self, file, checkChecksums=1, fontNumber=-1):
+		self.file = file
+		self.checkChecksums = checkChecksums
+
+		self.flavor = None
+		self.flavorData = None
+		self.DirectoryEntry = SFNTDirectoryEntry
+		self.sfntVersion = self.file.read(4)
+		self.file.seek(0)
+		if self.sfntVersion == b"ttcf":
+			sstruct.unpack(ttcHeaderFormat, self.file.read(ttcHeaderSize), self)
+			assert self.Version == 0x00010000 or self.Version == 0x00020000, "unrecognized TTC version 0x%08x" % self.Version
+			if not 0 <= fontNumber < self.numFonts:
+				from fontTools import ttLib
+				raise ttLib.TTLibError("specify a font number between 0 and %d (inclusive)" % (self.numFonts - 1))
+			offsetTable = struct.unpack(">%dL" % self.numFonts, self.file.read(self.numFonts * 4))
+			if self.Version == 0x00020000:
+				pass # ignoring version 2.0 signatures
+			self.file.seek(offsetTable[fontNumber])
+			sstruct.unpack(sfntDirectoryFormat, self.file.read(sfntDirectorySize), self)
+		elif self.sfntVersion == b"wOFF":
+			self.flavor = "woff"
+			self.DirectoryEntry = WOFFDirectoryEntry
+			sstruct.unpack(woffDirectoryFormat, self.file.read(woffDirectorySize), self)
+		else:
+			sstruct.unpack(sfntDirectoryFormat, self.file.read(sfntDirectorySize), self)
+		self.sfntVersion = Tag(self.sfntVersion)
+
+		if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
+			from fontTools import ttLib
+			raise ttLib.TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
+		self.tables = {}
+		for i in range(self.numTables):
+			entry = self.DirectoryEntry()
+			entry.fromFile(self.file)
+			self.tables[Tag(entry.tag)] = entry
+
+		# Load flavor data if any
+		if self.flavor == "woff":
+			self.flavorData = WOFFFlavorData(self)
+
+	def has_key(self, tag):
+		return tag in self.tables
+
+	__contains__ = has_key
+	
+	def keys(self):
+		return self.tables.keys()
+	
+	def __getitem__(self, tag):
+		"""Fetch the raw table data."""
+		entry = self.tables[Tag(tag)]
+		data = entry.loadData (self.file)
+		if self.checkChecksums:
+			if tag == 'head':
+				# Beh: we have to special-case the 'head' table.
+				checksum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
+			else:
+				checksum = calcChecksum(data)
+			if self.checkChecksums > 1:
+				# Be obnoxious, and barf when it's wrong
+				assert checksum == entry.checksum, "bad checksum for '%s' table" % tag
+			elif checksum != entry.checkSum:
+				# Be friendly, and just print a warning.
+				print("bad checksum for '%s' table" % tag)
+		return data
+	
+	def __delitem__(self, tag):
+		del self.tables[Tag(tag)]
+	
+	def close(self):
+		self.file.close()
+
+
+class SFNTWriter(object):
+	
+	def __init__(self, file, numTables, sfntVersion="\000\001\000\000",
+		     flavor=None, flavorData=None):
+		self.file = file
+		self.numTables = numTables
+		self.sfntVersion = Tag(sfntVersion)
+		self.flavor = flavor
+		self.flavorData = flavorData
+
+		if self.flavor == "woff":
+			self.directoryFormat = woffDirectoryFormat
+			self.directorySize = woffDirectorySize
+			self.DirectoryEntry = WOFFDirectoryEntry
+
+			self.signature = "wOFF"
+		else:
+			assert not self.flavor,  "Unknown flavor '%s'" % self.flavor
+			self.directoryFormat = sfntDirectoryFormat
+			self.directorySize = sfntDirectorySize
+			self.DirectoryEntry = SFNTDirectoryEntry
+
+			self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(numTables)
+
+		self.nextTableOffset = self.directorySize + numTables * self.DirectoryEntry.formatSize
+		# clear out directory area
+		self.file.seek(self.nextTableOffset)
+		# make sure we're actually where we want to be. (old cStringIO bug)
+		self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
+		self.tables = {}
+	
+	def __setitem__(self, tag, data):
+		"""Write raw table data to disk."""
+		reuse = False
+		if tag in self.tables:
+			# We've written this table to file before. If the length
+			# of the data is still the same, we allow overwriting it.
+			entry = self.tables[tag]
+			assert not hasattr(entry.__class__, 'encodeData')
+			if len(data) != entry.length:
+				from fontTools import ttLib
+				raise ttLib.TTLibError("cannot rewrite '%s' table: length does not match directory entry" % tag)
+			reuse = True
+		else:
+			entry = self.DirectoryEntry()
+			entry.tag = tag
+
+		if tag == 'head':
+			entry.checkSum = calcChecksum(data[:8] + b'\0\0\0\0' + data[12:])
+			self.headTable = data
+			entry.uncompressed = True
+		else:
+			entry.checkSum = calcChecksum(data)
+
+		entry.offset = self.nextTableOffset
+		entry.saveData (self.file, data)
+
+		if not reuse:
+			self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
+
+		# Add NUL bytes to pad the table data to a 4-byte boundary.
+		# Don't depend on f.seek() as we need to add the padding even if no
+		# subsequent write follows (seek is lazy), ie. after the final table
+		# in the font.
+		self.file.write(b'\0' * (self.nextTableOffset - self.file.tell()))
+		assert self.nextTableOffset == self.file.tell()
+		
+		self.tables[tag] = entry
+	
+	def close(self):
+		"""All tables must have been written to disk. Now write the
+		directory.
+		"""
+		tables = sorted(self.tables.items())
+		if len(tables) != self.numTables:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("wrong number of tables; expected %d, found %d" % (self.numTables, len(tables)))
+
+		if self.flavor == "woff":
+			self.signature = b"wOFF"
+			self.reserved = 0
+
+			self.totalSfntSize = 12
+			self.totalSfntSize += 16 * len(tables)
+			for tag, entry in tables:
+				self.totalSfntSize += (entry.origLength + 3) & ~3
+
+			data = self.flavorData if self.flavorData else WOFFFlavorData()
+			if data.majorVersion is not None and data.minorVersion is not None:
+				self.majorVersion = data.majorVersion
+				self.minorVersion = data.minorVersion
+			else:
+				if hasattr(self, 'headTable'):
+					self.majorVersion, self.minorVersion = struct.unpack(">HH", self.headTable[4:8])
+				else:
+					self.majorVersion = self.minorVersion = 0
+			if data.metaData:
+				self.metaOrigLength = len(data.metaData)
+				self.file.seek(0,2)
+				self.metaOffset = self.file.tell()
+				import zlib
+				compressedMetaData = zlib.compress(data.metaData)
+				self.metaLength = len(compressedMetaData)
+				self.file.write(compressedMetaData)
+			else:
+				self.metaOffset = self.metaLength = self.metaOrigLength = 0
+			if data.privData:
+				self.file.seek(0,2)
+				off = self.file.tell()
+				paddedOff = (off + 3) & ~3
+				self.file.write('\0' * (paddedOff - off))
+				self.privOffset = self.file.tell()
+				self.privLength = len(data.privData)
+				self.file.write(data.privData)
+			else:
+				self.privOffset = self.privLength = 0
+
+			self.file.seek(0,2)
+			self.length = self.file.tell()
+
+		else:
+			assert not self.flavor,  "Unknown flavor '%s'" % self.flavor
+			pass
+		
+		directory = sstruct.pack(self.directoryFormat, self)
+		
+		self.file.seek(self.directorySize)
+		seenHead = 0
+		for tag, entry in tables:
+			if tag == "head":
+				seenHead = 1
+			directory = directory + entry.toString()
+		if seenHead:
+			self.writeMasterChecksum(directory)
+		self.file.seek(0)
+		self.file.write(directory)
+
+	def _calcMasterChecksum(self, directory):
+		# calculate checkSumAdjustment
+		tags = list(self.tables.keys())
+		checksums = []
+		for i in range(len(tags)):
+			checksums.append(self.tables[tags[i]].checkSum)
+
+		# TODO(behdad) I'm fairly sure the checksum for woff is not working correctly.
+		# Haven't debugged.
+		if self.DirectoryEntry != SFNTDirectoryEntry:
+			# Create a SFNT directory for checksum calculation purposes
+			self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(self.numTables)
+			directory = sstruct.pack(sfntDirectoryFormat, self)
+			tables = sorted(self.tables.items())
+			for tag, entry in tables:
+				sfntEntry = SFNTDirectoryEntry()
+				for item in ['tag', 'checkSum', 'offset', 'length']:
+					setattr(sfntEntry, item, getattr(entry, item))
+				directory = directory + sfntEntry.toString()
+
+		directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
+		assert directory_end == len(directory)
+
+		checksums.append(calcChecksum(directory))
+		checksum = sum(checksums) & 0xffffffff
+		# BiboAfba!
+		checksumadjustment = (0xB1B0AFBA - checksum) & 0xffffffff
+		return checksumadjustment
+
+	def writeMasterChecksum(self, directory):
+		checksumadjustment = self._calcMasterChecksum(directory)
+		# write the checksum to the file
+		self.file.seek(self.tables['head'].offset + 8)
+		self.file.write(struct.pack(">L", checksumadjustment))
+
+
+# -- sfnt directory helpers and cruft
+
+ttcHeaderFormat = """
+		> # big endian
+		TTCTag:                  4s # "ttcf"
+		Version:                 L  # 0x00010000 or 0x00020000
+		numFonts:                L  # number of fonts
+		# OffsetTable[numFonts]: L  # array with offsets from beginning of file
+		# ulDsigTag:             L  # version 2.0 only
+		# ulDsigLength:          L  # version 2.0 only
+		# ulDsigOffset:          L  # version 2.0 only
+"""
+
+ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat)
+
+sfntDirectoryFormat = """
+		> # big endian
+		sfntVersion:    4s
+		numTables:      H    # number of tables
+		searchRange:    H    # (max2 <= numTables)*16
+		entrySelector:  H    # log2(max2 <= numTables)
+		rangeShift:     H    # numTables*16-searchRange
+"""
+
+sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat)
+
+sfntDirectoryEntryFormat = """
+		> # big endian
+		tag:            4s
+		checkSum:       L
+		offset:         L
+		length:         L
+"""
+
+sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat)
+
+woffDirectoryFormat = """
+		> # big endian
+		signature:      4s   # "wOFF"
+		sfntVersion:    4s
+		length:         L    # total woff file size
+		numTables:      H    # number of tables
+		reserved:       H    # set to 0
+		totalSfntSize:  L    # uncompressed size
+		majorVersion:   H    # major version of WOFF file
+		minorVersion:   H    # minor version of WOFF file
+		metaOffset:     L    # offset to metadata block
+		metaLength:     L    # length of compressed metadata
+		metaOrigLength: L    # length of uncompressed metadata
+		privOffset:     L    # offset to private data block
+		privLength:     L    # length of private data block
+"""
+
+woffDirectorySize = sstruct.calcsize(woffDirectoryFormat)
+
+woffDirectoryEntryFormat = """
+		> # big endian
+		tag:            4s
+		offset:         L
+		length:         L    # compressed length
+		origLength:     L    # original length
+		checkSum:       L    # original checksum
+"""
+
+woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
+
+
+class DirectoryEntry(object):
+	
+	def __init__(self):
+		self.uncompressed = False # if True, always embed entry raw
+
+	def fromFile(self, file):
+		sstruct.unpack(self.format, file.read(self.formatSize), self)
+	
+	def fromString(self, str):
+		sstruct.unpack(self.format, str, self)
+	
+	def toString(self):
+		return sstruct.pack(self.format, self)
+	
+	def __repr__(self):
+		if hasattr(self, "tag"):
+			return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
+		else:
+			return "<%s at %x>" % (self.__class__.__name__, id(self))
+
+	def loadData(self, file):
+		file.seek(self.offset)
+		data = file.read(self.length)
+		assert len(data) == self.length
+		if hasattr(self.__class__, 'decodeData'):
+			data = self.decodeData(data)
+		return data
+
+	def saveData(self, file, data):
+		if hasattr(self.__class__, 'encodeData'):
+			data = self.encodeData(data)
+		self.length = len(data)
+		file.seek(self.offset)
+		file.write(data)
+
+	def decodeData(self, rawData):
+		return rawData
+
+	def encodeData(self, data):
+		return data
+
+class SFNTDirectoryEntry(DirectoryEntry):
+
+	format = sfntDirectoryEntryFormat
+	formatSize = sfntDirectoryEntrySize
+
+class WOFFDirectoryEntry(DirectoryEntry):
+
+	format = woffDirectoryEntryFormat
+	formatSize = woffDirectoryEntrySize
+	zlibCompressionLevel = 6
+
+	def decodeData(self, rawData):
+		import zlib
+		if self.length == self.origLength:
+			data = rawData
+		else:
+			assert self.length < self.origLength
+			data = zlib.decompress(rawData)
+			assert len (data) == self.origLength
+		return data
+
+	def encodeData(self, data):
+		import zlib
+		self.origLength = len(data)
+		if not self.uncompressed:
+			compressedData = zlib.compress(data, self.zlibCompressionLevel)
+		if self.uncompressed or len(compressedData) >= self.origLength:
+			# Encode uncompressed
+			rawData = data
+			self.length = self.origLength
+		else:
+			rawData = compressedData
+			self.length = len(rawData)
+		return rawData
+
+class WOFFFlavorData():
+
+	Flavor = 'woff'
+
+	def __init__(self, reader=None):
+		self.majorVersion = None
+		self.minorVersion = None
+		self.metaData = None
+		self.privData = None
+		if reader:
+			self.majorVersion = reader.majorVersion
+			self.minorVersion = reader.minorVersion
+			if reader.metaLength:
+				reader.file.seek(reader.metaOffset)
+				rawData = reader.file.read(reader.metaLength)
+				assert len(rawData) == reader.metaLength
+				import zlib
+				data = zlib.decompress(rawData)
+				assert len(data) == reader.metaOrigLength
+				self.metaData = data
+			if reader.privLength:
+				reader.file.seek(reader.privOffset)
+				data = reader.file.read(reader.privLength)
+				assert len(data) == reader.privLength
+				self.privData = data
+
+
+def calcChecksum(data):
+	"""Calculate the checksum for an arbitrary block of data.
+	Optionally takes a 'start' argument, which allows you to
+	calculate a checksum in chunks by feeding it a previous
+	result.
+	
+	If the data length is not a multiple of four, it assumes
+	it is to be padded with null byte. 
+
+		>>> print calcChecksum(b"abcd")
+		1633837924
+		>>> print calcChecksum(b"abcdxyz")
+		3655064932
+	"""
+	remainder = len(data) % 4
+	if remainder:
+		data += b"\0" * (4 - remainder)
+	value = 0
+	blockSize = 4096
+	assert blockSize % 4 == 0
+	for i in range(0, len(data), blockSize):
+		block = data[i:i+blockSize]
+		longs = struct.unpack(">%dL" % (len(block) // 4), block)
+		value = (value + sum(longs)) & 0xffffffff
+	return value
+
+
+def maxPowerOfTwo(x):
+	"""Return the highest exponent of two, so that
+	(2 ** exponent) <= x
+	"""
+	exponent = 0
+	while x:
+		x = x >> 1
+		exponent = exponent + 1
+	return max(exponent - 1, 0)
+
+
+def getSearchRange(n):
+	"""Calculate searchRange, entrySelector, rangeShift for the
+	sfnt directory. 'n' is the number of tables.
+	"""
+	# This stuff needs to be stored in the file, because?
+	exponent = maxPowerOfTwo(n)
+	searchRange = (2 ** exponent) * 16
+	entrySelector = exponent
+	rangeShift = n * 16 - searchRange
+	return searchRange, entrySelector, rangeShift
+
+
+if __name__ == "__main__":
+    import doctest
+    doctest.testmod()
diff --git a/Lib/fontTools/ttLib/standardGlyphOrder.py b/Lib/fontTools/ttLib/standardGlyphOrder.py
new file mode 100644
index 0000000..fdb666a
--- /dev/null
+++ b/Lib/fontTools/ttLib/standardGlyphOrder.py
@@ -0,0 +1,271 @@
+#
+# 'post' table formats 1.0 and 2.0 rely on this list of "standard"
+# glyphs.
+#
+# My list is correct according to the Apple documentation for the 'post'
+# table: http://developer.apple.com/fonts/TTRefMan/RM06/Chap6post.html
+# (However, it seems that TTFdump (from MS) and FontLab disagree, at
+# least with respect to the last glyph, which they list as 'dslash'
+# instead of 'dcroat'.)
+#
+
+standardGlyphOrder = [
+	".notdef",              # 0 
+	".null",                # 1 
+	"nonmarkingreturn",     # 2 
+	"space",                # 3 
+	"exclam",               # 4 
+	"quotedbl",             # 5 
+	"numbersign",           # 6 
+	"dollar",               # 7 
+	"percent",              # 8 
+	"ampersand",            # 9 
+	"quotesingle",          # 10 
+	"parenleft",            # 11 
+	"parenright",           # 12 
+	"asterisk",             # 13 
+	"plus",                 # 14 
+	"comma",                # 15 
+	"hyphen",               # 16 
+	"period",               # 17 
+	"slash",                # 18 
+	"zero",                 # 19 
+	"one",                  # 20 
+	"two",                  # 21 
+	"three",                # 22 
+	"four",                 # 23 
+	"five",                 # 24 
+	"six",                  # 25 
+	"seven",                # 26 
+	"eight",                # 27 
+	"nine",                 # 28 
+	"colon",                # 29 
+	"semicolon",            # 30 
+	"less",                 # 31 
+	"equal",                # 32 
+	"greater",              # 33 
+	"question",             # 34 
+	"at",                   # 35 
+	"A",                    # 36 
+	"B",                    # 37 
+	"C",                    # 38 
+	"D",                    # 39 
+	"E",                    # 40 
+	"F",                    # 41 
+	"G",                    # 42 
+	"H",                    # 43 
+	"I",                    # 44 
+	"J",                    # 45 
+	"K",                    # 46 
+	"L",                    # 47 
+	"M",                    # 48 
+	"N",                    # 49 
+	"O",                    # 50 
+	"P",                    # 51 
+	"Q",                    # 52 
+	"R",                    # 53 
+	"S",                    # 54 
+	"T",                    # 55 
+	"U",                    # 56 
+	"V",                    # 57 
+	"W",                    # 58 
+	"X",                    # 59 
+	"Y",                    # 60 
+	"Z",                    # 61 
+	"bracketleft",          # 62 
+	"backslash",            # 63 
+	"bracketright",         # 64 
+	"asciicircum",          # 65 
+	"underscore",           # 66 
+	"grave",                # 67 
+	"a",                    # 68 
+	"b",                    # 69 
+	"c",                    # 70 
+	"d",                    # 71 
+	"e",                    # 72 
+	"f",                    # 73 
+	"g",                    # 74 
+	"h",                    # 75 
+	"i",                    # 76 
+	"j",                    # 77 
+	"k",                    # 78 
+	"l",                    # 79 
+	"m",                    # 80 
+	"n",                    # 81 
+	"o",                    # 82 
+	"p",                    # 83 
+	"q",                    # 84 
+	"r",                    # 85 
+	"s",                    # 86 
+	"t",                    # 87 
+	"u",                    # 88 
+	"v",                    # 89 
+	"w",                    # 90 
+	"x",                    # 91 
+	"y",                    # 92 
+	"z",                    # 93 
+	"braceleft",            # 94 
+	"bar",                  # 95 
+	"braceright",           # 96 
+	"asciitilde",           # 97 
+	"Adieresis",            # 98 
+	"Aring",                # 99 
+	"Ccedilla",             # 100 
+	"Eacute",               # 101 
+	"Ntilde",               # 102 
+	"Odieresis",            # 103 
+	"Udieresis",            # 104 
+	"aacute",               # 105 
+	"agrave",               # 106 
+	"acircumflex",          # 107 
+	"adieresis",            # 108 
+	"atilde",               # 109 
+	"aring",                # 110 
+	"ccedilla",             # 111 
+	"eacute",               # 112 
+	"egrave",               # 113 
+	"ecircumflex",          # 114 
+	"edieresis",            # 115 
+	"iacute",               # 116 
+	"igrave",               # 117 
+	"icircumflex",          # 118 
+	"idieresis",            # 119 
+	"ntilde",               # 120 
+	"oacute",               # 121 
+	"ograve",               # 122 
+	"ocircumflex",          # 123 
+	"odieresis",            # 124 
+	"otilde",               # 125 
+	"uacute",               # 126 
+	"ugrave",               # 127 
+	"ucircumflex",          # 128 
+	"udieresis",            # 129 
+	"dagger",               # 130 
+	"degree",               # 131 
+	"cent",                 # 132 
+	"sterling",             # 133 
+	"section",              # 134 
+	"bullet",               # 135 
+	"paragraph",            # 136 
+	"germandbls",           # 137 
+	"registered",           # 138 
+	"copyright",            # 139 
+	"trademark",            # 140 
+	"acute",                # 141 
+	"dieresis",             # 142 
+	"notequal",             # 143 
+	"AE",                   # 144 
+	"Oslash",               # 145 
+	"infinity",             # 146 
+	"plusminus",            # 147 
+	"lessequal",            # 148 
+	"greaterequal",         # 149 
+	"yen",                  # 150 
+	"mu",                   # 151 
+	"partialdiff",          # 152 
+	"summation",            # 153 
+	"product",              # 154 
+	"pi",                   # 155 
+	"integral",             # 156 
+	"ordfeminine",          # 157 
+	"ordmasculine",         # 158 
+	"Omega",                # 159 
+	"ae",                   # 160 
+	"oslash",               # 161 
+	"questiondown",         # 162 
+	"exclamdown",           # 163 
+	"logicalnot",           # 164 
+	"radical",              # 165 
+	"florin",               # 166 
+	"approxequal",          # 167 
+	"Delta",                # 168 
+	"guillemotleft",        # 169 
+	"guillemotright",       # 170 
+	"ellipsis",             # 171 
+	"nonbreakingspace",     # 172 
+	"Agrave",               # 173 
+	"Atilde",               # 174 
+	"Otilde",               # 175 
+	"OE",                   # 176 
+	"oe",                   # 177 
+	"endash",               # 178 
+	"emdash",               # 179 
+	"quotedblleft",         # 180 
+	"quotedblright",        # 181 
+	"quoteleft",            # 182 
+	"quoteright",           # 183 
+	"divide",               # 184 
+	"lozenge",              # 185 
+	"ydieresis",            # 186 
+	"Ydieresis",            # 187 
+	"fraction",             # 188
+	"currency",             # 189
+	"guilsinglleft",        # 190 
+	"guilsinglright",       # 191 
+	"fi",                   # 192 
+	"fl",                   # 193 
+	"daggerdbl",            # 194 
+	"periodcentered",       # 195 
+	"quotesinglbase",       # 196 
+	"quotedblbase",         # 197 
+	"perthousand",          # 198 
+	"Acircumflex",          # 199 
+	"Ecircumflex",          # 200 
+	"Aacute",               # 201 
+	"Edieresis",            # 202 
+	"Egrave",               # 203 
+	"Iacute",               # 204 
+	"Icircumflex",          # 205 
+	"Idieresis",            # 206 
+	"Igrave",               # 207 
+	"Oacute",               # 208 
+	"Ocircumflex",          # 209 
+	"apple",                # 210 
+	"Ograve",               # 211 
+	"Uacute",               # 212 
+	"Ucircumflex",          # 213 
+	"Ugrave",               # 214 
+	"dotlessi",             # 215 
+	"circumflex",           # 216 
+	"tilde",                # 217 
+	"macron",               # 218 
+	"breve",                # 219 
+	"dotaccent",            # 220 
+	"ring",                 # 221 
+	"cedilla",              # 222 
+	"hungarumlaut",         # 223 
+	"ogonek",               # 224 
+	"caron",                # 225 
+	"Lslash",               # 226 
+	"lslash",               # 227 
+	"Scaron",               # 228 
+	"scaron",               # 229 
+	"Zcaron",               # 230 
+	"zcaron",               # 231 
+	"brokenbar",            # 232 
+	"Eth",                  # 233 
+	"eth",                  # 234 
+	"Yacute",               # 235 
+	"yacute",               # 236 
+	"Thorn",                # 237 
+	"thorn",                # 238 
+	"minus",                # 239 
+	"multiply",             # 240 
+	"onesuperior",          # 241 
+	"twosuperior",          # 242 
+	"threesuperior",        # 243 
+	"onehalf",              # 244 
+	"onequarter",           # 245 
+	"threequarters",        # 246 
+	"franc",                # 247 
+	"Gbreve",               # 248 
+	"gbreve",               # 249 
+	"Idotaccent",           # 250 
+	"Scedilla",             # 251 
+	"scedilla",             # 252 
+	"Cacute",               # 253 
+	"cacute",               # 254 
+	"Ccaron",               # 255 
+	"ccaron",               # 256 
+	"dcroat"                # 257 
+]
diff --git a/Lib/fontTools/ttLib/tables/B_A_S_E_.py b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
new file mode 100644
index 0000000..9551e2c
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/B_A_S_E_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_B_A_S_E_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
new file mode 100644
index 0000000..dfe86f2
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/BitmapGlyphMetrics.py
@@ -0,0 +1,58 @@
+# Since bitmap glyph metrics are shared between EBLC and EBDT
+# this class gets its own python file.
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+
+
+bigGlyphMetricsFormat = """
+  > # big endian
+  height:       B
+  width:        B
+  horiBearingX: b
+  horiBearingY: b
+  horiAdvance:  B
+  vertBearingX: b
+  vertBearingY: b
+  vertAdvance:  B
+"""
+
+smallGlyphMetricsFormat = """
+  > # big endian
+  height:   B
+  width:    B
+  BearingX: b
+  BearingY: b
+  Advance:  B
+"""
+
+class BitmapGlyphMetrics(object):
+
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__)
+		writer.newline()
+		for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
+			writer.simpletag(metricName, value=getattr(self, metricName))
+			writer.newline()
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			# Make sure this is a metric that is needed by GlyphMetrics.
+			if name in metricNames:
+				vars(self)[name] = safeEval(attrs['value'])
+			else:
+				print("Warning: unknown name '%s' being ignored in %s." % name, self.__class__.__name__)
+
+
+class BigGlyphMetrics(BitmapGlyphMetrics):
+	binaryFormat = bigGlyphMetricsFormat
+	
+class SmallGlyphMetrics(BitmapGlyphMetrics):
+	binaryFormat = smallGlyphMetricsFormat
diff --git a/Lib/fontTools/ttLib/tables/C_B_D_T_.py b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
new file mode 100644
index 0000000..ba02910
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/C_B_D_T_.py
@@ -0,0 +1,93 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Matt Fontaine
+
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from . import E_B_D_T_
+from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from .E_B_D_T_ import BitmapGlyph, BitmapPlusSmallMetricsMixin, BitmapPlusBigMetricsMixin
+import struct
+
+class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
+
+	# Change the data locator table being referenced.
+	locatorName = 'CBLC'
+
+	# Modify the format class accessor for color bitmap use.
+	def getImageFormatClass(self, imageFormat):
+		try:
+			return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
+		except KeyError:
+			return cbdt_bitmap_classes[imageFormat]
+
+# Helper method for removing export features not supported by color bitmaps.
+# Write data in the parent class will default to raw if an option is unsupported.
+def _removeUnsupportedForColor(dataFunctions):
+	dataFunctions = dict(dataFunctions)
+	del dataFunctions['row']
+	return dataFunctions
+
+class ColorBitmapGlyph(BitmapGlyph):
+
+	fileExtension = '.png'
+	xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
+
+class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
+
+	def decompile(self):
+		self.metrics = SmallGlyphMetrics()
+		dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+		(dataLen,) = struct.unpack(">L", data[:4])
+		data = data[4:]
+
+		# For the image data cut it to the size specified by dataLen.
+		assert dataLen <= len(data), "Data overun in format 17"
+		self.imageData = data[:dataLen]
+
+	def compile(self, ttFont):
+		dataList = []
+		dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+		dataList.append(struct.pack(">L", len(self.imageData)))
+		dataList.append(self.imageData)
+		return bytesjoin(dataList)
+
+class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
+
+	def decompile(self):
+		self.metrics = BigGlyphMetrics()
+		dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+		(dataLen,) = struct.unpack(">L", data[:4])
+		data = data[4:]
+
+		# For the image data cut it to the size specified by dataLen.
+		assert dataLen <= len(data), "Data overun in format 18"
+		self.imageData = data[:dataLen]
+
+	def compile(self, ttFont):
+		dataList = []
+		dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+		dataList.append(struct.pack(">L", len(self.imageData)))
+		dataList.append(self.imageData)
+		return bytesjoin(dataList)
+
+class cbdt_bitmap_format_19(ColorBitmapGlyph):
+
+	def decompile(self):
+		(dataLen,) = struct.unpack(">L", self.data[:4])
+		data = self.data[4:]
+
+		assert dataLen <= len(data), "Data overun in format 19"
+		self.imageData = data[:dataLen]
+
+	def compile(self, ttFont):
+		return struct.pack(">L", len(self.imageData)) + self.imageData
+
+# Dict for CBDT extended formats.
+cbdt_bitmap_classes = {
+	17: cbdt_bitmap_format_17,
+	18: cbdt_bitmap_format_18,
+	19: cbdt_bitmap_format_19,
+}
diff --git a/Lib/fontTools/ttLib/tables/C_B_L_C_.py b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
new file mode 100644
index 0000000..2f78571
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/C_B_L_C_.py
@@ -0,0 +1,9 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Matt Fontaine
+
+from . import E_B_L_C_
+
+class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
+
+	dependencies = ['CBDT']
diff --git a/Lib/fontTools/ttLib/tables/C_F_F_.py b/Lib/fontTools/ttLib/tables/C_F_F_.py
new file mode 100644
index 0000000..8167fdf
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/C_F_F_.py
@@ -0,0 +1,48 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import cffLib
+from . import DefaultTable
+
+
+class table_C_F_F_(DefaultTable.DefaultTable):
+	
+	def __init__(self, tag):
+		DefaultTable.DefaultTable.__init__(self, tag)
+		self.cff = cffLib.CFFFontSet()
+		self._gaveGlyphOrder = False
+	
+	def decompile(self, data, otFont):
+		self.cff.decompile(StringIO(data), otFont)
+		assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
+	
+	def compile(self, otFont):
+		f = StringIO()
+		self.cff.compile(f, otFont)
+		return f.getvalue()
+	
+	def haveGlyphNames(self):
+		if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
+			return False  # CID-keyed font
+		else:
+			return True
+	
+	def getGlyphOrder(self):
+		if self._gaveGlyphOrder:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+		self._gaveGlyphOrder = True
+		return self.cff[self.cff.fontNames[0]].getGlyphOrder()
+	
+	def setGlyphOrder(self, glyphOrder):
+		pass
+		# XXX
+		#self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
+	
+	def toXML(self, writer, otFont, progress=None):
+		self.cff.toXML(writer, progress)
+	
+	def fromXML(self, name, attrs, content, otFont):
+		if not hasattr(self, "cff"):
+			self.cff = cffLib.CFFFontSet()
+		self.cff.fromXML(name, attrs, content)
+
diff --git a/Lib/fontTools/ttLib/tables/C_O_L_R_.py b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
new file mode 100644
index 0000000..139de3c
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/C_O_L_R_.py
@@ -0,0 +1,161 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import operator
+import struct
+
+
+class table_C_O_L_R_(DefaultTable.DefaultTable):
+
+	""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
+	ttFont['COLR'][<glyphName>] will return the color layers for any glyph
+	ttFont['COLR'][<glyphName>] = <value> will set the color layers for any glyph.
+	"""
+
+	def decompile(self, data, ttFont):
+		self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
+		self.version, numBaseGlyphRecords, offsetBaseGlyphRecord, offsetLayerRecord, numLayerRecords = struct.unpack(">HHLLH", data[:14])
+		assert (self.version == 0), "Version of COLR table is higher than I know how to handle"
+		glyphOrder = ttFont.getGlyphOrder()
+		gids = []
+		layerLists = []
+		glyphPos = offsetBaseGlyphRecord
+		for i in range(numBaseGlyphRecords):
+			gid, firstLayerIndex, numLayers = struct.unpack(">HHH", data[glyphPos:glyphPos+6])
+			glyphPos += 6
+			gids.append(gid)
+			assert (firstLayerIndex + numLayers <= numLayerRecords)
+			layerPos = offsetLayerRecord + firstLayerIndex * 4
+			layers = []
+			for j in range(numLayers):
+				layerGid, colorID = struct.unpack(">HH", data[layerPos:layerPos+4])
+				try:
+					layerName = glyphOrder[layerGid]
+				except IndexError:
+					layerName = self.getGlyphName(layerGid)
+				layerPos += 4
+				layers.append(LayerRecord(layerName, colorID))
+			layerLists.append(layers)
+
+		self.ColorLayers = colorLayerLists = {}
+		try:
+			names = list(map(operator.getitem, [glyphOrder]*numBaseGlyphRecords, gids))
+		except IndexError:
+			getGlyphName = self.getGlyphName
+			names = list(map(getGlyphName, gids ))
+
+		list(map(operator.setitem, [colorLayerLists]*numBaseGlyphRecords, names, layerLists))
+
+
+	def compile(self, ttFont):
+		ordered = []
+		ttFont.getReverseGlyphMap(rebuild=True)
+		glyphNames = self.ColorLayers.keys()
+		for glyphName in glyphNames:
+			try:
+				gid = ttFont.getGlyphID(glyphName)
+			except:
+				assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
+			ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
+		ordered.sort()
+
+		glyphMap = []
+		layerMap = []
+		for (gid, glyphName, layers) in ordered:
+			glyphMap.append(struct.pack(">HHH", gid, len(layerMap), len(layers)))
+			for layer in layers:
+				layerMap.append(struct.pack(">HH", ttFont.getGlyphID(layer.name), layer.colorID))
+
+		dataList = [struct.pack(">HHLLH", self.version, len(glyphMap), 14, 14+6*len(glyphMap), len(layerMap))]
+		dataList.extend(glyphMap)
+		dataList.extend(layerMap)
+		data = bytesjoin(dataList)
+		return data
+
+	def toXML(self, writer, ttFont):
+		writer.simpletag("version", value=self.version)
+		writer.newline()
+		ordered = []
+		glyphNames = self.ColorLayers.keys()
+		for glyphName in glyphNames:
+			try:
+				gid = ttFont.getGlyphID(glyphName)
+			except:
+				assert 0, "COLR table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
+			ordered.append([gid, glyphName, self.ColorLayers[glyphName]])
+		ordered.sort()
+		for entry in ordered:
+			writer.begintag("ColorGlyph", name=entry[1])
+			writer.newline()
+			for layer in entry[2]:
+				layer.toXML(writer, ttFont)
+			writer.endtag("ColorGlyph")
+			writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "ColorLayers"):
+			self.ColorLayers = {}
+		self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
+		if name == "ColorGlyph":
+			glyphName = attrs["name"]
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+			layers = []
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				layer = LayerRecord()
+				layer.fromXML(element[0], element[1], element[2], ttFont)
+				layers.append (layer)
+			operator.setitem(self, glyphName, layers)
+		elif "value" in attrs:
+			setattr(self, name, safeEval(attrs["value"]))
+
+
+	def __getitem__(self, glyphSelector):
+		if isinstance(glyphSelector, int):
+			# its a gid, convert to glyph name
+			glyphSelector = self.getGlyphName(glyphSelector)
+
+		if glyphSelector not in self.ColorLayers:
+			return None
+			
+		return self.ColorLayers[glyphSelector]
+
+	def __setitem__(self, glyphSelector, value):
+		if isinstance(glyphSelector, int):
+			# its a gid, convert to glyph name
+			glyphSelector = self.getGlyphName(glyphSelector)
+
+		if  value:
+			self.ColorLayers[glyphSelector] = value
+		elif glyphSelector in self.ColorLayers:
+			del self.ColorLayers[glyphSelector]
+
+	def __delitem__(self, glyphSelector):
+		del self.ColorLayers[glyphSelector]
+
+class LayerRecord(object):
+
+	def __init__(self, name = None, colorID = None):
+		self.name = name
+		self.colorID = colorID
+
+	def toXML(self, writer, ttFont):
+		writer.simpletag("layer", name=self.name, colorID=self.colorID)
+		writer.newline()
+
+	def fromXML(self, eltname, attrs, content, ttFont):
+		for (name, value) in attrs.items():
+			if name == "name":
+				if isinstance(value, int):
+					value = ttFont.getGlyphName(value)
+				setattr(self, name, value)
+			else:
+				setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/C_P_A_L_.py b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
new file mode 100644
index 0000000..7c2721a
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/C_P_A_L_.py
@@ -0,0 +1,100 @@
+# Copyright 2013 Google, Inc. All Rights Reserved.
+#
+# Google Author(s): Behdad Esfahbod
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import struct
+
+
+class table_C_P_A_L_(DefaultTable.DefaultTable):
+
+	def decompile(self, data, ttFont):
+		self.version, self.numPaletteEntries, numPalettes, numColorRecords, goffsetFirstColorRecord = struct.unpack(">HHHHL", data[:12])
+		assert (self.version == 0), "Version of COLR table is higher than I know how to handle"
+		self.palettes = []
+		pos = 12
+		for i in range(numPalettes):
+			startIndex = struct.unpack(">H", data[pos:pos+2])[0]
+			assert (startIndex + self.numPaletteEntries <= numColorRecords)
+			pos += 2
+			palette = []
+			ppos = goffsetFirstColorRecord + startIndex * 4
+			for j in range(self.numPaletteEntries):
+				palette.append( Color(*struct.unpack(">BBBB", data[ppos:ppos+4])) )
+				ppos += 4
+			self.palettes.append(palette)
+
+	def compile(self, ttFont):
+		dataList = [struct.pack(">HHHHL", self.version, self.numPaletteEntries, len(self.palettes), self.numPaletteEntries * len(self.palettes), 12+2*len(self.palettes))]
+		for i in range(len(self.palettes)):
+			dataList.append(struct.pack(">H", i*self.numPaletteEntries))
+		for palette in self.palettes:
+			assert(len(palette) == self.numPaletteEntries)
+			for color in palette:
+				dataList.append(struct.pack(">BBBB", color.blue,color.green,color.red,color.alpha))
+		data = bytesjoin(dataList)
+		return data
+
+	def toXML(self, writer, ttFont):
+		writer.simpletag("version", value=self.version)
+		writer.newline()
+		writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
+		writer.newline()
+		for index, palette in enumerate(self.palettes):
+			writer.begintag("palette", index=index)
+			writer.newline()
+			assert(len(palette) == self.numPaletteEntries)
+			for cindex, color in enumerate(palette):
+				color.toXML(writer, ttFont, cindex)
+			writer.endtag("palette")
+			writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "palettes"):
+			self.palettes = []
+		if name == "palette":
+			palette = []
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+			palette = []
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				color = Color()
+				color.fromXML(element[0], element[1], element[2], ttFont)
+				palette.append (color)
+			self.palettes.append(palette)
+		elif "value" in attrs:
+			value =  safeEval(attrs["value"])
+			setattr(self, name, value)
+
+class Color(object):
+
+	def __init__(self, blue=None, green=None, red=None, alpha=None):
+		self.blue  = blue
+		self.green = green
+		self.red   = red
+		self.alpha = alpha
+
+	def hex(self):
+		return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
+
+	def __repr__(self):
+		return self.hex()
+
+	def toXML(self, writer, ttFont, index=None):
+		writer.simpletag("color", value=self.hex(), index=index)
+		writer.newline()
+
+	def fromXML(self, eltname, attrs, content, ttFont):
+		value = attrs["value"]
+		if value[0] == '#':
+			value = value[1:]
+		self.red   = int(value[0:2], 16)
+		self.green = int(value[2:4], 16)
+		self.blue  = int(value[4:6], 16)
+		self.alpha = int(value[6:8], 16) if len (value) >= 8 else 0xFF
diff --git a/Lib/fontTools/ttLib/tables/D_S_I_G_.py b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
new file mode 100644
index 0000000..7794bda
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/D_S_I_G_.py
@@ -0,0 +1,131 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from fontTools.misc import sstruct
+from . import DefaultTable
+import base64
+
+DSIG_HeaderFormat = """
+	> # big endian
+	ulVersion:      L
+	usNumSigs:      H
+	usFlag:         H
+"""
+# followed by an array of usNumSigs DSIG_Signature records
+DSIG_SignatureFormat = """
+	> # big endian
+	ulFormat:       L
+	ulLength:       L # length includes DSIG_SignatureBlock header
+	ulOffset:       L
+"""
+# followed by an array of usNumSigs DSIG_SignatureBlock records,
+# each followed immediately by the pkcs7 bytes
+DSIG_SignatureBlockFormat = """
+	> # big endian
+	usReserved1:    H
+	usReserved2:    H
+	cbSignature:    l # length of following raw pkcs7 data
+"""
+
+#
+# NOTE
+# the DSIG table format allows for SignatureBlocks residing
+# anywhere in the table and possibly in a different order as
+# listed in the array after the first table header
+#
+# this implementation does not keep track of any gaps and/or data
+# before or after the actual signature blocks while decompiling,
+# and puts them in the same physical order as listed in the header
+# on compilation with no padding whatsoever.
+#
+
+class table_D_S_I_G_(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
+		assert self.ulVersion == 1, "DSIG ulVersion must be 1"
+		assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
+		self.signatureRecords = sigrecs = []
+		for n in range(self.usNumSigs):
+			sigrec, newData = sstruct.unpack2(DSIG_SignatureFormat, newData, SignatureRecord())
+			assert sigrec.ulFormat == 1, "DSIG signature record #%d ulFormat must be 1" % n
+			sigrecs.append(sigrec)
+		for sigrec in sigrecs:
+			dummy, newData = sstruct.unpack2(DSIG_SignatureBlockFormat, data[sigrec.ulOffset:], sigrec)
+			assert sigrec.usReserved1 == 0, "DSIG signature record #%d usReserverd1 must be 0" % n
+			assert sigrec.usReserved2 == 0, "DSIG signature record #%d usReserverd2 must be 0" % n
+			sigrec.pkcs7 = newData[:sigrec.cbSignature]
+	
+	def compile(self, ttFont):
+		packed = sstruct.pack(DSIG_HeaderFormat, self)
+		headers = [packed]
+		offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
+		data = []
+		for sigrec in self.signatureRecords:
+			# first pack signature block
+			sigrec.cbSignature = len(sigrec.pkcs7)
+			packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
+			data.append(packed)
+			# update redundant length field
+			sigrec.ulLength = len(packed)
+			# update running table offset
+			sigrec.ulOffset = offset
+			headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
+			offset += sigrec.ulLength
+		if offset % 2:
+			# Pad to even bytes
+			data.append(b'\0')
+		return bytesjoin(headers+data)
+	
+	def toXML(self, xmlWriter, ttFont):
+		xmlWriter.comment("note that the Digital Signature will be invalid after recompilation!")
+		xmlWriter.newline()
+		xmlWriter.simpletag("tableHeader", version=self.ulVersion, numSigs=self.usNumSigs, flag="0x%X" % self.usFlag)
+		for sigrec in self.signatureRecords:
+			xmlWriter.newline()
+			sigrec.toXML(xmlWriter, ttFont)
+		xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "tableHeader":
+			self.signatureRecords = []
+			self.ulVersion = safeEval(attrs["version"])
+			self.usNumSigs = safeEval(attrs["numSigs"])
+			self.usFlag = safeEval(attrs["flag"])
+			return
+		if name == "SignatureRecord":
+			sigrec = SignatureRecord()
+			sigrec.fromXML(name, attrs, content, ttFont)
+			self.signatureRecords.append(sigrec)
+
+pem_spam = lambda l, spam = {
+	"-----BEGIN PKCS7-----": True, "-----END PKCS7-----": True, "": True
+}: not spam.get(l.strip())
+
+def b64encode(b):
+	s = base64.b64encode(b)
+	# Line-break at 76 chars.
+	items = []
+	while s:
+		items.append(tostr(s[:76]))
+		items.append('\n')
+		s = s[76:]
+	return strjoin(items)
+
+class SignatureRecord(object):
+	def __repr__(self):
+		return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
+	
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__, format=self.ulFormat)
+		writer.newline()
+		writer.write_noindent("-----BEGIN PKCS7-----\n")
+		writer.write_noindent(b64encode(self.pkcs7))
+		writer.write_noindent("-----END PKCS7-----\n")
+		writer.endtag(self.__class__.__name__)
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.ulFormat = safeEval(attrs["format"])
+		self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
+		self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
+		self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
diff --git a/Lib/fontTools/ttLib/tables/DefaultTable.py b/Lib/fontTools/ttLib/tables/DefaultTable.py
new file mode 100644
index 0000000..3a6886c
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/DefaultTable.py
@@ -0,0 +1,47 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.ttLib import getClassTag
+
+class DefaultTable(object):
+	
+	dependencies = []
+	
+	def __init__(self, tag=None):
+		if tag is None:
+			tag = getClassTag(self.__class__)
+		self.tableTag = Tag(tag)
+	
+	def decompile(self, data, ttFont):
+		self.data = data
+	
+	def compile(self, ttFont):
+		return self.data
+	
+	def toXML(self, writer, ttFont, progress=None):
+		if hasattr(self, "ERROR"):
+			writer.comment("An error occurred during the decompilation of this table")
+			writer.newline()
+			writer.comment(self.ERROR)
+			writer.newline()
+		writer.begintag("hexdata")
+		writer.newline()
+		writer.dumphex(self.compile(ttFont))
+		writer.endtag("hexdata")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		from fontTools.misc.textTools import readHex
+		from fontTools import ttLib
+		if name != "hexdata":
+			raise ttLib.TTLibError("can't handle '%s' element" % name)
+		self.decompile(readHex(content), ttFont)
+	
+	def __repr__(self):
+		return "<'%s' table at %x>" % (self.tableTag, id(self))
+	
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self.__dict__ == other.__dict__
diff --git a/Lib/fontTools/ttLib/tables/E_B_D_T_.py b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
new file mode 100644
index 0000000..f119291
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/E_B_D_T_.py
@@ -0,0 +1,754 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval, readHex, hexStr, deHexStr
+from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+from . import DefaultTable
+import itertools
+import os
+import struct
+
+ebdtTableVersionFormat = """
+	> # big endian
+	version: 16.16F
+"""
+
+ebdtComponentFormat = """
+	> # big endian
+	glyphCode: H
+	xOffset:   b
+	yOffset:   b
+"""
+
+class table_E_B_D_T_(DefaultTable.DefaultTable):
+
+	# Keep a reference to the name of the data locator table.
+	locatorName = 'EBLC'
+
+	# This method can be overridden in subclasses to support new formats
+	# without changing the other implementation. Also can be used as a
+	# convenience method for coverting a font file to an alternative format.
+	def getImageFormatClass(self, imageFormat):
+		return ebdt_bitmap_classes[imageFormat]
+
+	def decompile(self, data, ttFont):
+		# Get the version but don't advance the slice.
+		# Most of the lookup for this table is done relative
+		# to the begining so slice by the offsets provided
+		# in the EBLC table.
+		sstruct.unpack2(ebdtTableVersionFormat, data, self)
+
+		# Keep a dict of glyphs that have been seen so they aren't remade.
+		# This dict maps intervals of data to the BitmapGlyph.
+		glyphDict = {}
+
+		# Pull out the EBLC table and loop through glyphs.
+		# A strike is a concept that spans both tables.
+		# The actual bitmap data is stored in the EBDT.
+		locator = ttFont[self.__class__.locatorName]
+		self.strikeData = []
+		for curStrike in locator.strikes:
+			bitmapGlyphDict = {}
+			self.strikeData.append(bitmapGlyphDict)
+			for indexSubTable in curStrike.indexSubTables:
+				dataIter = zip(indexSubTable.names, indexSubTable.locations)
+				for curName, curLoc in dataIter:
+					# Don't create duplicate data entries for the same glyphs.
+					# Instead just use the structures that already exist if they exist.
+					if curLoc in glyphDict:
+						curGlyph = glyphDict[curLoc]
+					else:
+						curGlyphData = data[slice(*curLoc)]
+						imageFormatClass = self.getImageFormatClass(indexSubTable.imageFormat)
+						curGlyph = imageFormatClass(curGlyphData, ttFont)
+						glyphDict[curLoc] = curGlyph
+					bitmapGlyphDict[curName] = curGlyph
+
+	def compile(self, ttFont):
+
+		dataList = []
+		dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
+		dataSize = len(dataList[0])
+
+		# Keep a dict of glyphs that have been seen so they aren't remade.
+		# This dict maps the id of the BitmapGlyph to the interval
+		# in the data.
+		glyphDict = {}
+
+		# Go through the bitmap glyph data. Just in case the data for a glyph
+		# changed the size metrics should be recalculated. There are a variety
+		# of formats and they get stored in the EBLC table. That is why
+		# recalculation is defered to the EblcIndexSubTable class and just
+		# pass what is known about bitmap glyphs from this particular table.
+		locator = ttFont[self.__class__.locatorName]
+		for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+			for curIndexSubTable in curStrike.indexSubTables:
+				dataLocations = []
+				for curName in curIndexSubTable.names:
+					# Handle the data placement based on seeing the glyph or not.
+					# Just save a reference to the location if the glyph has already
+					# been saved in compile. This code assumes that glyphs will only
+					# be referenced multiple times from indexFormat5. By luck the
+					# code may still work when referencing poorly ordered fonts with
+					# duplicate references. If there is a font that is unlucky the
+					# respective compile methods for the indexSubTables will fail
+					# their assertions. All fonts seem to follow this assumption.
+					# More complicated packing may be needed if a counter-font exists.
+					glyph = curGlyphDict[curName]
+					objectId = id(glyph)
+					if objectId not in glyphDict:
+						data = glyph.compile(ttFont)
+						data = curIndexSubTable.padBitmapData(data)
+						startByte = dataSize
+						dataSize += len(data)
+						endByte = dataSize
+						dataList.append(data)
+						dataLoc = (startByte, endByte)
+						glyphDict[objectId] = dataLoc
+					else:
+						dataLoc = glyphDict[objectId]
+					dataLocations.append(dataLoc)
+				# Just use the new data locations in the indexSubTable.
+				# The respective compile implementations will take care
+				# of any of the problems in the convertion that may arise.
+				curIndexSubTable.locations = dataLocations
+
+		return bytesjoin(dataList)
+
+	def toXML(self, writer, ttFont):
+		# When exporting to XML if one of the data export formats
+		# requires metrics then those metrics may be in the locator.
+		# In this case populate the bitmaps with "export metrics".
+		if ttFont.bitmapGlyphDataFormat in ('row', 'bitwise'):
+			locator = ttFont[self.__class__.locatorName]
+			for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
+				for curIndexSubTable in curStrike.indexSubTables:
+					for curName in curIndexSubTable.names:
+						glyph = curGlyphDict[curName]
+						# I'm not sure which metrics have priority here.
+						# For now if both metrics exist go with glyph metrics.
+						if hasattr(glyph, 'metrics'):
+							glyph.exportMetrics = glyph.metrics
+						else:
+							glyph.exportMetrics = curIndexSubTable.metrics
+						glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
+
+		writer.simpletag("header", [('version', self.version)])
+		writer.newline()
+		locator = ttFont[self.__class__.locatorName]
+		for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
+			writer.begintag('strikedata', [('index', strikeIndex)])
+			writer.newline()
+			for curName, curBitmap in bitmapGlyphDict.items():
+				curBitmap.toXML(strikeIndex, curName, writer, ttFont)
+			writer.endtag('strikedata')
+			writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == 'header':
+			self.version = safeEval(attrs['version'])
+		elif name == 'strikedata':
+			if not hasattr(self, 'strikeData'):
+				self.strikeData = []
+			strikeIndex = safeEval(attrs['index'])
+
+			bitmapGlyphDict = {}
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
+					imageFormat =	safeEval(name[len(_bitmapGlyphSubclassPrefix):])
+					glyphName = attrs['name']
+					imageFormatClass = self.getImageFormatClass(imageFormat)
+					curGlyph = imageFormatClass(None, None)
+					curGlyph.fromXML(name, attrs, content, ttFont)
+					assert glyphName not in bitmapGlyphDict, "Duplicate glyphs with the same name '%s' in the same strike." % glyphName
+					bitmapGlyphDict[glyphName] = curGlyph
+				else:
+					print("Warning: %s being ignored by %s", name, self.__class__.__name__)
+
+			# Grow the strike data array to the appropriate size. The XML
+			# format allows the strike index value to be out of order.
+			if strikeIndex >= len(self.strikeData):
+				self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
+			assert self.strikeData[strikeIndex] is None, "Duplicate strike EBDT indices."
+			self.strikeData[strikeIndex] = bitmapGlyphDict
+
+class EbdtComponent(object):
+
+	def toXML(self, writer, ttFont):
+		writer.begintag('ebdtComponent', [('name', self.name)])
+		writer.newline()
+		for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
+			writer.simpletag(componentName, value=getattr(self, componentName))
+			writer.newline()
+		writer.endtag('ebdtComponent')
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.name = attrs['name']
+		componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name in componentNames:
+				vars(self)[name] = safeEval(attrs['value'])
+			else:
+				print("Warning: unknown name '%s' being ignored by EbdtComponent." % name)
+
+# Helper functions for dealing with binary.
+
+def _data2binary(data, numBits):
+	binaryList = []
+	for curByte in data:
+		value = byteord(curByte)
+		numBitsCut = min(8, numBits)
+		for i in range(numBitsCut):
+			if value & 0x1:
+				binaryList.append('1')
+			else:
+				binaryList.append('0')
+			value = value >> 1
+		numBits -= numBitsCut
+	return strjoin(binaryList)
+
+def _binary2data(binary):
+	byteList = []
+	for bitLoc in range(0, len(binary), 8):
+		byteString = binary[bitLoc:bitLoc+8]
+		curByte = 0
+		for curBit in reversed(byteString):
+			curByte = curByte << 1
+			if curBit == '1':
+				curByte |= 1
+		byteList.append(bytechr(curByte))
+	return bytesjoin(byteList)
+
+def _memoize(f):
+	class memodict(dict):
+		def __missing__(self, key):
+			ret = f(key)
+			if len(key) == 1:
+				self[key] = ret
+			return ret
+	return memodict().__getitem__
+
+# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
+# Bitmap data per byte is in the order that binary is written on the page
+# with the least significant bit as far right as possible. This is the
+# opposite of what makes sense algorithmically and hence this function.
+@_memoize
+def _reverseBytes(data):
+	if len(data) != 1:
+		return bytesjoin(map(_reverseBytes, data))
+	byte = byteord(data)
+	result = 0
+	for i in range(8):
+		result = result << 1
+		result |= byte & 1
+		byte = byte >> 1
+	return bytechr(result)
+
+# This section of code is for reading and writing image data to/from XML.
+
+def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
+	writer.begintag('rawimagedata')
+	writer.newline()
+	writer.dumphex(bitmapObject.imageData)
+	writer.endtag('rawimagedata')
+	writer.newline()
+
+def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
+	bitmapObject.imageData = readHex(content)
+
+def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
+	metrics = bitmapObject.exportMetrics
+	del bitmapObject.exportMetrics
+	bitDepth = bitmapObject.exportBitDepth
+	del bitmapObject.exportBitDepth
+
+	writer.begintag('rowimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
+	writer.newline()
+	for curRow in range(metrics.height):
+		rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
+		writer.simpletag('row', value=hexStr(rowData))
+		writer.newline()
+	writer.endtag('rowimagedata')
+	writer.newline()
+
+def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
+	bitDepth = safeEval(attrs['bitDepth'])
+	metrics = SmallGlyphMetrics()
+	metrics.width = safeEval(attrs['width'])
+	metrics.height = safeEval(attrs['height'])
+
+	dataRows = []
+	for element in content:
+		if not isinstance(element, tuple):
+			continue
+		name, attr, content = element
+		# Chop off 'imagedata' from the tag to get just the option.
+		if name == 'row':
+			dataRows.append(deHexStr(attr['value']))
+	bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
+
+def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
+	metrics = bitmapObject.exportMetrics
+	del bitmapObject.exportMetrics
+	bitDepth = bitmapObject.exportBitDepth
+	del bitmapObject.exportBitDepth
+
+	# A dict for mapping binary to more readable/artistic ASCII characters.
+	binaryConv = {'0':'.', '1':'@'}
+
+	writer.begintag('bitwiseimagedata', bitDepth=bitDepth, width=metrics.width, height=metrics.height)
+	writer.newline()
+	for curRow in range(metrics.height):
+		rowData = bitmapObject.getRow(curRow, bitDepth=1, metrics=metrics, reverseBytes=True)
+		rowData = _data2binary(rowData, metrics.width)
+		# Make the output a readable ASCII art form.
+		rowData = strjoin(map(binaryConv.get, rowData))
+		writer.simpletag('row', value=rowData)
+		writer.newline()
+	writer.endtag('bitwiseimagedata')
+	writer.newline()
+
+def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
+	bitDepth = safeEval(attrs['bitDepth'])
+	metrics = SmallGlyphMetrics()
+	metrics.width = safeEval(attrs['width'])
+	metrics.height = safeEval(attrs['height'])
+
+	# A dict for mapping from ASCII to binary. All characters are considered
+	# a '1' except space, period and '0' which maps to '0'.
+	binaryConv = {' ':'0', '.':'0', '0':'0'}
+
+	dataRows = []
+	for element in content:
+		if not isinstance(element, tuple):
+			continue
+		name, attr, content = element
+		if name == 'row':
+			mapParams = zip(attr['value'], itertools.repeat('1'))
+			rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
+			dataRows.append(_binary2data(rowData))
+
+	bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True)
+
+def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
+	folder = 'bitmaps/'
+	filename = glyphName + bitmapObject.fileExtension
+	if not os.path.isdir(folder):
+		os.makedirs(folder)
+	folder += 'strike%d/' % strikeIndex
+	if not os.path.isdir(folder):
+		os.makedirs(folder)
+
+	fullPath = folder + filename
+	writer.simpletag('extfileimagedata', value=fullPath)
+	writer.newline()
+
+	with open(fullPath, "wb") as file:
+		file.write(bitmapObject.imageData)
+
+def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
+	fullPath = attrs['value']
+	with open(fullPath, "rb") as file:
+		bitmapObject.imageData = file.read()
+
+# End of XML writing code.
+
+# Important information about the naming scheme. Used for identifying formats
+# in XML.
+_bitmapGlyphSubclassPrefix = 'ebdt_bitmap_format_'
+
+class BitmapGlyph(object):
+
+	# For the external file format. This can be changed in subclasses. This way
+	# when the extfile option is turned on files have the form: glyphName.ext
+	# The default is just a flat binary file with no meaning.
+	fileExtension = '.bin'
+
+	# Keep track of reading and writing of various forms.
+	xmlDataFunctions = {
+		'raw':       (_writeRawImageData, _readRawImageData),
+		'row':       (_writeRowImageData, _readRowImageData),
+		'bitwise':   (_writeBitwiseImageData, _readBitwiseImageData),
+		'extfile':   (_writeExtFileImageData, _readExtFileImageData),
+		}
+
+	def __init__(self, data, ttFont):
+		self.data = data
+		self.ttFont = ttFont
+		# TODO Currently non-lazy decompilation is untested here...
+		#if not ttFont.lazy:
+		#	self.decompile()
+		#	del self.data
+
+	def __getattr__(self, attr):
+		# Allow lazy decompile.
+		if attr[:2] == '__':
+			raise AttributeError(attr)
+		if not hasattr(self, "data"):
+			raise AttributeError(attr)
+		self.decompile()
+		del self.data
+		return getattr(self, attr)
+
+	# Not a fan of this but it is needed for safer safety checking.
+	def getFormat(self):
+		return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix):])
+
+	def toXML(self, strikeIndex, glyphName, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [('name', glyphName)])
+		writer.newline()
+
+		self.writeMetrics(writer, ttFont)
+		# Use the internal write method to write using the correct output format.
+		self.writeData(strikeIndex, glyphName, writer, ttFont)
+
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.readMetrics(name, attrs, content, ttFont)
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attr, content = element
+			if not name.endswith('imagedata'):
+				continue
+			# Chop off 'imagedata' from the tag to get just the option.
+			option = name[:-len('imagedata')]
+			assert option in self.__class__.xmlDataFunctions
+			self.readData(name, attrs, content, ttFont)
+
+	# Some of the glyphs have the metrics. This allows for metrics to be
+	# added if the glyph format has them. Default behavior is to do nothing.
+	def writeMetrics(self, writer, ttFont):
+		pass
+
+	# The opposite of write metrics.
+	def readMetrics(self, name, attrs, content, ttFont):
+		pass
+
+	def writeData(self, strikeIndex, glyphName, writer, ttFont):
+		try:
+			writeFunc, readFunc = self.__class__.xmlDataFunctions[ttFont.bitmapGlyphDataFormat]
+		except KeyError:
+			writeFunc = _writeRawImageData
+		writeFunc(strikeIndex, glyphName, self, writer, ttFont)
+
+	def readData(self, name, attrs, content, ttFont):
+		# Chop off 'imagedata' from the tag to get just the option.
+		option = name[:-len('imagedata')]
+		writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
+		readFunc(self, name, attrs, content, ttFont)
+
+
+# A closure for creating a mixin for the two types of metrics handling.
+# Most of the code is very similar so its easier to deal with here.
+# Everything works just by passing the class that the mixin is for.
+def _createBitmapPlusMetricsMixin(metricsClass):
+	# Both metrics names are listed here to make meaningful error messages.
+	metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
+	curMetricsName = metricsClass.__name__
+	# Find which metrics this is for and determine the opposite name.
+	metricsId = metricStrings.index(curMetricsName)
+	oppositeMetricsName = metricStrings[1-metricsId]
+
+	class BitmapPlusMetricsMixin(object):
+
+		def writeMetrics(self, writer, ttFont):
+			self.metrics.toXML(writer, ttFont)
+
+		def readMetrics(self, name, attrs, content, ttFont):
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				if name == curMetricsName:
+					self.metrics = metricsClass()
+					self.metrics.fromXML(name, attrs, content, ttFont)
+				elif name == oppositeMetricsName:
+					print("Warning: %s being ignored in format %d." % oppositeMetricsName, self.getFormat())
+
+	return BitmapPlusMetricsMixin
+
+# Since there are only two types of mixin's just create them here.
+BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
+BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
+
+# Data that is bit aligned can be tricky to deal with. These classes implement
+# helper functionality for dealing with the data and getting a particular row
+# of bitwise data. Also helps implement fancy data export/import in XML.
+class BitAlignedBitmapMixin(object):
+
+	def _getBitRange(self, row, bitDepth, metrics):
+		rowBits = (bitDepth * metrics.width)
+		bitOffset = row * rowBits
+		return (bitOffset, bitOffset+rowBits)
+
+	def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+		if metrics is None:
+			metrics = self.metrics
+		assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+
+		# Loop through each byte. This can cover two bytes in the original data or
+		# a single byte if things happen to be aligned. The very last entry might
+		# not be aligned so take care to trim the binary data to size and pad with
+		# zeros in the row data. Bit aligned data is somewhat tricky.
+		#
+		# Example of data cut. Data cut represented in x's.
+		# '|' represents byte boundary.
+		# data = ...0XX|XXXXXX00|000... => XXXXXXXX
+		#		or
+		# data = ...0XX|XXXX0000|000... => XXXXXX00
+		#   or
+		# data = ...000|XXXXXXXX|000... => XXXXXXXX
+		#   or
+		# data = ...000|00XXXX00|000... => XXXX0000
+		#
+		dataList = []
+		bitRange = self._getBitRange(row, bitDepth, metrics)
+		stepRange = bitRange + (8,)
+		for curBit in range(*stepRange):
+			endBit = min(curBit+8, bitRange[1])
+			numBits = endBit - curBit
+			cutPoint = curBit % 8
+			firstByteLoc = curBit // 8
+			secondByteLoc = endBit // 8
+			if firstByteLoc < secondByteLoc:
+				numBitsCut = 8 - cutPoint
+			else:
+				numBitsCut = endBit - curBit
+			curByte = _reverseBytes(self.imageData[firstByteLoc])
+			firstHalf = byteord(curByte) >> cutPoint
+			firstHalf = ((1<<numBitsCut)-1) & firstHalf
+			newByte = firstHalf
+			if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
+				curByte = _reverseBytes(self.imageData[secondByteLoc])
+				secondHalf = byteord(curByte) << numBitsCut
+				newByte = (firstHalf | secondHalf) & ((1<<numBits)-1)
+			dataList.append(bytechr(newByte))
+
+		# The way the data is kept is opposite the algorithm used.
+		data = bytesjoin(dataList)
+		if not reverseBytes:
+			data = _reverseBytes(data)
+		return data
+
+	def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+		if metrics is None:
+			metrics = self.metrics
+		if not reverseBytes:
+			dataRows = list(map(_reverseBytes, dataRows))
+
+		# Keep track of a list of ordinal values as they are easier to modify
+		# than a list of strings. Map to actual strings later.
+		numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
+		ordDataList = [0] * numBytes
+		for row, data in enumerate(dataRows):
+			bitRange = self._getBitRange(row, bitDepth, metrics)
+			stepRange = bitRange + (8,)
+			for curBit, curByte in zip(range(*stepRange), data):
+				endBit = min(curBit+8, bitRange[1])
+				cutPoint = curBit % 8
+				firstByteLoc = curBit // 8
+				secondByteLoc = endBit // 8
+				if firstByteLoc < secondByteLoc:
+					numBitsCut = 8 - cutPoint
+				else:
+					numBitsCut = endBit - curBit
+				curByte = byteord(curByte)
+				firstByte = curByte & ((1<<numBitsCut)-1)
+				ordDataList[firstByteLoc] |= (firstByte << cutPoint)
+				if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
+					secondByte = (curByte >> numBitsCut) & ((1<<8-numBitsCut)-1)
+					ordDataList[secondByteLoc] |= secondByte
+
+		# Save the image data with the bits going the correct way.
+		self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
+
+class ByteAlignedBitmapMixin(object):
+
+	def _getByteRange(self, row, bitDepth, metrics):
+		rowBytes = (bitDepth * metrics.width + 7) // 8
+		byteOffset = row * rowBytes
+		return (byteOffset, byteOffset+rowBytes)
+
+	def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
+		if metrics is None:
+			metrics = self.metrics
+		assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
+		byteRange = self._getByteRange(row, bitDepth, metrics)
+		data = self.imageData[slice(*byteRange)]
+		if reverseBytes:
+			data = _reverseBytes(data)
+		return data
+
+	def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
+		if metrics is None:
+			metrics = self.metrics
+		if reverseBytes:
+			dataRows = map(_reverseBytes, dataRows)
+		self.imageData = bytesjoin(dataRows)
+
+class ebdt_bitmap_format_1(ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
+
+	def decompile(self):
+		self.metrics = SmallGlyphMetrics()
+		dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+		self.imageData = data
+
+	def compile(self, ttFont):
+		data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+		return data + self.imageData
+
+
+class ebdt_bitmap_format_2(BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph):
+
+	def decompile(self):
+		self.metrics = SmallGlyphMetrics()
+		dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+		self.imageData = data
+
+	def compile(self, ttFont):
+		data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
+		return data + self.imageData
+
+
+class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
+
+	def decompile(self):
+		self.imageData = self.data
+
+	def compile(self, ttFont):
+		return self.imageData
+
+class ebdt_bitmap_format_6(ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
+
+	def decompile(self):
+		self.metrics = BigGlyphMetrics()
+		dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+		self.imageData = data
+
+	def compile(self, ttFont):
+		data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+		return data + self.imageData
+
+
+class ebdt_bitmap_format_7(BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph):
+
+	def decompile(self):
+		self.metrics = BigGlyphMetrics()
+		dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+		self.imageData = data
+
+	def compile(self, ttFont):
+		data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
+		return data + self.imageData
+
+
+class ComponentBitmapGlyph(BitmapGlyph):
+
+	def toXML(self, strikeIndex, glyphName, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [('name', glyphName)])
+		writer.newline()
+
+		self.writeMetrics(writer, ttFont)
+
+		writer.begintag('components')
+		writer.newline()
+		for curComponent in self.componentArray:
+			curComponent.toXML(writer, ttFont)
+		writer.endtag('components')
+		writer.newline()
+
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.readMetrics(name, attrs, content, ttFont)
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attr, content = element
+			if name == 'components':
+				self.componentArray = []
+				for compElement in content:
+					if not isinstance(compElement, tuple):
+						continue
+					name, attrs, content = compElement
+					if name == 'ebdtComponent':
+						curComponent = EbdtComponent()
+						curComponent.fromXML(name, attrs, content, ttFont)
+						self.componentArray.append(curComponent)
+					else:
+						print("Warning: '%s' being ignored in component array." % name)
+
+
+class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
+
+	def decompile(self):
+		self.metrics = SmallGlyphMetrics()
+		dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
+		data = data[1:]
+
+		(numComponents,) = struct.unpack(">H", data[:2])
+		data = data[2:]
+		self.componentArray = []
+		for i in range(numComponents):
+			curComponent = EbdtComponent()
+			dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+			curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+			self.componentArray.append(curComponent)
+
+	def compile(self, ttFont):
+		dataList = []
+		dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
+		dataList.append(b'\0')
+		dataList.append(struct.pack(">H", len(self.componentArray)))
+		for curComponent in self.componentArray:
+			curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+			dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+		return bytesjoin(dataList)
+
+
+class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
+
+	def decompile(self):
+		self.metrics = BigGlyphMetrics()
+		dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
+		(numComponents,) = struct.unpack(">H", data[:2])
+		data = data[2:]
+		self.componentArray = []
+		for i in range(numComponents):
+			curComponent = EbdtComponent()
+			dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
+			curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
+			self.componentArray.append(curComponent)
+
+	def compile(self, ttFont):
+		dataList = []
+		dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+		dataList.append(struct.pack(">H", len(self.componentArray)))
+		for curComponent in self.componentArray:
+			curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
+			dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
+		return bytesjoin(dataList)
+
+
+# Dictionary of bitmap formats to the class representing that format
+# currently only the ones listed in this map are the ones supported.
+ebdt_bitmap_classes = {
+		1: ebdt_bitmap_format_1,
+		2: ebdt_bitmap_format_2,
+		5: ebdt_bitmap_format_5,
+		6: ebdt_bitmap_format_6,
+		7: ebdt_bitmap_format_7,
+		8: ebdt_bitmap_format_8,
+		9: ebdt_bitmap_format_9,
+	}
diff --git a/Lib/fontTools/ttLib/tables/E_B_L_C_.py b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
new file mode 100644
index 0000000..28a2635
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/E_B_L_C_.py
@@ -0,0 +1,617 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from . import DefaultTable
+from fontTools.misc.textTools import safeEval
+from .BitmapGlyphMetrics import BigGlyphMetrics, bigGlyphMetricsFormat, SmallGlyphMetrics, smallGlyphMetricsFormat
+import struct
+import itertools
+from collections import deque
+
+eblcHeaderFormat = """
+	> # big endian
+	version:  16.16F
+	numSizes: I
+"""
+# The table format string is split to handle sbitLineMetrics simply.
+bitmapSizeTableFormatPart1 = """
+	> # big endian
+	indexSubTableArrayOffset: I
+	indexTablesSize:          I
+	numberOfIndexSubTables:   I
+	colorRef:                 I
+"""
+# The compound type for hori and vert.
+sbitLineMetricsFormat = """
+	> # big endian
+	ascender:              b
+	descender:             b
+	widthMax:              B
+	caretSlopeNumerator:   b
+	caretSlopeDenominator: b
+	caretOffset:           b
+	minOriginSB:           b
+	minAdvanceSB:          b
+	maxBeforeBL:           b
+	minAfterBL:            b
+	pad1:                  b
+	pad2:                  b
+"""
+# hori and vert go between the two parts.
+bitmapSizeTableFormatPart2 = """
+	> # big endian
+	startGlyphIndex: H
+	endGlyphIndex:   H
+	ppemX:           B
+	ppemY:           B
+	bitDepth:        B
+	flags:           b
+"""
+
+indexSubTableArrayFormat = ">HHL"
+indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
+
+indexSubHeaderFormat = ">HHL"
+indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
+
+codeOffsetPairFormat = ">HH"
+codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
+
+class table_E_B_L_C_(DefaultTable.DefaultTable):
+
+	dependencies = ['EBDT']
+
+	# This method can be overridden in subclasses to support new formats
+	# without changing the other implementation. Also can be used as a
+	# convenience method for coverting a font file to an alternative format.
+	def getIndexFormatClass(self, indexFormat):
+		return eblc_sub_table_classes[indexFormat]
+
+	def decompile(self, data, ttFont):
+
+		# Save the original data because offsets are from the start of the table.
+		origData = data
+
+		dummy, data = sstruct.unpack2(eblcHeaderFormat, data, self)
+
+		self.strikes = []
+		for curStrikeIndex in range(self.numSizes):
+			curStrike = Strike()
+			self.strikes.append(curStrike)
+			curTable = curStrike.bitmapSizeTable
+			dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart1, data, curTable)
+			for metric in ('hori', 'vert'):
+				metricObj = SbitLineMetrics()
+				vars(curTable)[metric] = metricObj
+				dummy, data = sstruct.unpack2(sbitLineMetricsFormat, data, metricObj)
+			dummy, data = sstruct.unpack2(bitmapSizeTableFormatPart2, data, curTable)
+
+		for curStrike in self.strikes:
+			curTable = curStrike.bitmapSizeTable
+			for subtableIndex in range(curTable.numberOfIndexSubTables):
+				lowerBound = curTable.indexSubTableArrayOffset + subtableIndex * indexSubTableArraySize
+				upperBound = lowerBound + indexSubTableArraySize
+				data = origData[lowerBound:upperBound]
+
+				tup = struct.unpack(indexSubTableArrayFormat, data)
+				(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
+				offsetToIndexSubTable = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
+				data = origData[offsetToIndexSubTable:]
+
+				tup = struct.unpack(indexSubHeaderFormat, data[:indexSubHeaderSize])
+				(indexFormat, imageFormat, imageDataOffset) = tup
+
+				indexFormatClass = self.getIndexFormatClass(indexFormat)
+				indexSubTable = indexFormatClass(data[indexSubHeaderSize:], ttFont)
+				indexSubTable.firstGlyphIndex = firstGlyphIndex
+				indexSubTable.lastGlyphIndex = lastGlyphIndex
+				indexSubTable.additionalOffsetToIndexSubtable = additionalOffsetToIndexSubtable
+				indexSubTable.indexFormat = indexFormat
+				indexSubTable.imageFormat = imageFormat
+				indexSubTable.imageDataOffset = imageDataOffset
+				curStrike.indexSubTables.append(indexSubTable)
+
+	def compile(self, ttFont):
+
+		dataList = []
+		self.numSizes = len(self.strikes)
+		dataList.append(sstruct.pack(eblcHeaderFormat, self))
+
+		# Data size of the header + bitmapSizeTable needs to be calculated
+		# in order to form offsets. This value will hold the size of the data
+		# in dataList after all the data is consolidated in dataList.
+		dataSize = len(dataList[0])
+
+		# The table will be structured in the following order:
+		# (0) header
+		# (1) Each bitmapSizeTable [1 ... self.numSizes]
+		# (2) Alternate between indexSubTableArray and indexSubTable
+		#     for each bitmapSizeTable present.
+		#
+		# The issue is maintaining the proper offsets when table information
+		# gets moved around. All offsets and size information must be recalculated
+		# when building the table to allow editing within ttLib and also allow easy
+		# import/export to and from XML. All of this offset information is lost
+		# when exporting to XML so everything must be calculated fresh so importing
+		# from XML will work cleanly. Only byte offset and size information is
+		# calculated fresh. Count information like numberOfIndexSubTables is
+		# checked through assertions. If the information in this table was not
+		# touched or was changed properly then these types of values should match.
+		#
+		# The table will be rebuilt the following way:
+		# (0) Precompute the size of all the bitmapSizeTables. This is needed to
+		#     compute the offsets properly.
+		# (1) For each bitmapSizeTable compute the indexSubTable and
+		#    	indexSubTableArray pair. The indexSubTable must be computed first
+		#     so that the offset information in indexSubTableArray can be
+		#     calculated. Update the data size after each pairing.
+		# (2) Build each bitmapSizeTable.
+		# (3) Consolidate all the data into the main dataList in the correct order.
+
+		for curStrike in self.strikes:
+			dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
+			dataSize += len(('hori', 'vert')) * sstruct.calcsize(sbitLineMetricsFormat)
+			dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
+
+		indexSubTablePairDataList = []
+		for curStrike in self.strikes:
+			curTable = curStrike.bitmapSizeTable
+			curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
+			curTable.indexSubTableArrayOffset = dataSize
+
+			# Precompute the size of the indexSubTableArray. This information
+			# is important for correctly calculating the new value for
+			# additionalOffsetToIndexSubtable.
+			sizeOfSubTableArray = curTable.numberOfIndexSubTables * indexSubTableArraySize
+			lowerBound = dataSize
+			dataSize += sizeOfSubTableArray
+			upperBound = dataSize
+
+			indexSubTableDataList = []
+			for indexSubTable in curStrike.indexSubTables:
+				indexSubTable.additionalOffsetToIndexSubtable = dataSize - curTable.indexSubTableArrayOffset
+				glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
+				indexSubTable.firstGlyphIndex = min(glyphIds)
+				indexSubTable.lastGlyphIndex = max(glyphIds)
+				data = indexSubTable.compile(ttFont)
+				indexSubTableDataList.append(data)
+				dataSize += len(data)
+			curTable.startGlyphIndex = min(ist.firstGlyphIndex for ist in curStrike.indexSubTables)
+			curTable.endGlyphIndex = max(ist.lastGlyphIndex for ist in curStrike.indexSubTables)
+
+			for i in curStrike.indexSubTables:
+				data = struct.pack(indexSubHeaderFormat, i.firstGlyphIndex, i.lastGlyphIndex, i.additionalOffsetToIndexSubtable)
+				indexSubTablePairDataList.append(data)
+			indexSubTablePairDataList.extend(indexSubTableDataList)
+			curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
+
+		for curStrike in self.strikes:
+			curTable = curStrike.bitmapSizeTable
+			data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
+			dataList.append(data)
+			for metric in ('hori', 'vert'):
+				metricObj = vars(curTable)[metric]
+				data = sstruct.pack(sbitLineMetricsFormat, metricObj)
+				dataList.append(data)
+			data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
+			dataList.append(data)
+		dataList.extend(indexSubTablePairDataList)
+
+		return bytesjoin(dataList)
+
+	def toXML(self, writer, ttFont):
+		writer.simpletag('header', [('version', self.version)])
+		writer.newline()
+		for curIndex, curStrike in enumerate(self.strikes):
+			curStrike.toXML(curIndex, writer, ttFont)
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == 'header':
+			self.version = safeEval(attrs['version'])
+		elif name == 'strike':
+			if not hasattr(self, 'strikes'):
+				self.strikes = []
+			strikeIndex = safeEval(attrs['index'])
+			curStrike = Strike()
+			curStrike.fromXML(name, attrs, content, ttFont, self)
+
+			# Grow the strike array to the appropriate size. The XML format
+			# allows for the strike index value to be out of order.
+			if strikeIndex >= len(self.strikes):
+				self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
+			assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
+			self.strikes[strikeIndex] = curStrike
+
+class Strike(object):
+
+	def __init__(self):
+		self.bitmapSizeTable = BitmapSizeTable()
+		self.indexSubTables = []
+
+	def toXML(self, strikeIndex, writer, ttFont):
+		writer.begintag('strike', [('index', strikeIndex)])
+		writer.newline()
+		self.bitmapSizeTable.toXML(writer, ttFont)
+		writer.comment('GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler.')
+		writer.newline()
+		for indexSubTable in self.indexSubTables:
+			indexSubTable.toXML(writer, ttFont)
+		writer.endtag('strike')
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont, locator):
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name == 'bitmapSizeTable':
+				self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
+			elif name.startswith(_indexSubTableSubclassPrefix):
+				indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix):])
+				indexFormatClass = locator.getIndexFormatClass(indexFormat)
+				indexSubTable = indexFormatClass(None, None)
+				indexSubTable.indexFormat = indexFormat
+				indexSubTable.fromXML(name, attrs, content, ttFont)
+				self.indexSubTables.append(indexSubTable)
+
+
+class BitmapSizeTable(object):
+
+	# Returns all the simple metric names that bitmap size table
+	# cares about in terms of XML creation.
+	def _getXMLMetricNames(self):
+		dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
+		dataNames = dataNames + sstruct.getformat(bitmapSizeTableFormatPart2)[1]
+		# Skip the first 3 data names because they are byte offsets and counts.
+		return dataNames[3:]
+
+	def toXML(self, writer, ttFont):
+		writer.begintag('bitmapSizeTable')
+		writer.newline()
+		for metric in ('hori', 'vert'):
+			getattr(self, metric).toXML(metric, writer, ttFont)
+		for metricName in self._getXMLMetricNames():
+			writer.simpletag(metricName, value=getattr(self, metricName))
+			writer.newline()
+		writer.endtag('bitmapSizeTable')
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		# Create a lookup for all the simple names that make sense to
+		# bitmap size table. Only read the information from these names.
+		dataNames = set(self._getXMLMetricNames())
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name == 'sbitLineMetrics':
+				direction = attrs['direction']
+				assert direction in ('hori', 'vert'), "SbitLineMetrics direction specified invalid."
+				metricObj = SbitLineMetrics()
+				metricObj.fromXML(name, attrs, content, ttFont)
+				vars(self)[direction] = metricObj
+			elif name in dataNames:
+				vars(self)[name] = safeEval(attrs['value'])
+			else:
+				print("Warning: unknown name '%s' being ignored in BitmapSizeTable." % name)
+
+
+class SbitLineMetrics(object):
+
+	def toXML(self, name, writer, ttFont):
+		writer.begintag('sbitLineMetrics', [('direction', name)])
+		writer.newline()
+		for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
+			writer.simpletag(metricName, value=getattr(self, metricName))
+			writer.newline()
+		writer.endtag('sbitLineMetrics')
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name in metricNames:
+				vars(self)[name] = safeEval(attrs['value'])
+
+# Important information about the naming scheme. Used for identifying subtables.
+_indexSubTableSubclassPrefix = 'eblc_index_sub_table_'
+
+class EblcIndexSubTable(object):
+
+	def __init__(self, data, ttFont):
+		self.data = data
+		self.ttFont = ttFont
+		# TODO Currently non-lazy decompiling doesn't work for this class...
+		#if not ttFont.lazy:
+		#	self.decompile()
+		#	del self.data, self.ttFont
+
+	def __getattr__(self, attr):
+		# Allow lazy decompile.
+		if attr[:2] == '__':
+			raise AttributeError(attr)
+		if not hasattr(self, "data"):
+			raise AttributeError(attr)
+		self.decompile()
+		del self.data, self.ttFont
+		return getattr(self, attr)
+
+	# This method just takes care of the indexSubHeader. Implementing subclasses
+	# should call it to compile the indexSubHeader and then continue compiling
+	# the remainder of their unique format.
+	def compile(self, ttFont):
+		return struct.pack(indexSubHeaderFormat, self.indexFormat, self.imageFormat, self.imageDataOffset)
+
+	# Creates the XML for bitmap glyphs. Each index sub table basically makes
+	# the same XML except for specific metric information that is written
+	# out via a method call that a subclass implements optionally.
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [
+				('imageFormat', self.imageFormat),
+				('firstGlyphIndex', self.firstGlyphIndex),
+				('lastGlyphIndex', self.lastGlyphIndex),
+				])
+		writer.newline()
+		self.writeMetrics(writer, ttFont)
+		# Write out the names as thats all thats needed to rebuild etc.
+		# For font debugging of consecutive formats the ids are also written.
+		# The ids are not read when moving from the XML format.
+		glyphIds = map(ttFont.getGlyphID, self.names)
+		for glyphName, glyphId in zip(self.names, glyphIds):
+			writer.simpletag('glyphLoc', name=glyphName, id=glyphId)
+			writer.newline()
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		# Read all the attributes. Even though the glyph indices are
+		# recalculated, they are still read in case there needs to
+		# be an immediate export of the data.
+		self.imageFormat = safeEval(attrs['imageFormat'])
+		self.firstGlyphIndex = safeEval(attrs['firstGlyphIndex'])
+		self.lastGlyphIndex = safeEval(attrs['lastGlyphIndex'])
+
+		self.readMetrics(name, attrs, content, ttFont)
+
+		self.names = []
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name == 'glyphLoc':
+				self.names.append(attrs['name'])
+
+	# A helper method that writes the metrics for the index sub table. It also
+	# is responsible for writing the image size for fixed size data since fixed
+	# size is not recalculated on compile. Default behavior is to do nothing.
+	def writeMetrics(self, writer, ttFont):
+		pass
+
+	# A helper method that is the inverse of writeMetrics.
+	def readMetrics(self, name, attrs, content, ttFont):
+		pass
+
+	# This method is for fixed glyph data sizes. There are formats where
+	# the glyph data is fixed but are actually composite glyphs. To handle
+	# this the font spec in indexSubTable makes the data the size of the
+	# fixed size by padding the component arrays. This function abstracts
+	# out this padding process. Input is data unpadded. Output is data
+	# padded only in fixed formats. Default behavior is to return the data.
+	def padBitmapData(self, data):
+		return data
+
+	# Remove any of the glyph locations and names that are flagged as skipped.
+	# This only occurs in formats {1,3}.
+	def removeSkipGlyphs(self):
+		# Determines if a name, location pair is a valid data location.
+		# Skip glyphs are marked when the size is equal to zero.
+		def isValidLocation(args):
+			(name, (startByte, endByte)) = args
+			return startByte < endByte
+		# Remove all skip glyphs.
+		dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
+		self.names, self.locations = list(map(list, zip(*dataPairs)))
+
+# A closure for creating a custom mixin. This is done because formats 1 and 3
+# are very similar. The only difference between them is the size per offset
+# value. Code put in here should handle both cases generally.
+def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
+
+	# Prep the data size for the offset array data format.
+	dataFormat = '>'+formatStringForDataType
+	offsetDataSize = struct.calcsize(dataFormat)
+
+	class OffsetArrayIndexSubTableMixin(object):
+
+		def decompile(self):
+
+			numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
+			indexingOffsets = [glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs+2)]
+			indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+			offsetArray = [struct.unpack(dataFormat, self.data[slice(*loc)])[0] for loc in indexingLocations]
+
+			glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
+			modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
+			self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
+
+			self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+			self.removeSkipGlyphs()
+
+		def compile(self, ttFont):
+			# First make sure that all the data lines up properly. Formats 1 and 3
+			# must have all its data lined up consecutively. If not this will fail.
+			for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+				assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable offset formats"
+
+			glyphIds = list(map(ttFont.getGlyphID, self.names))
+			# Make sure that all ids are sorted strictly increasing.
+			assert all(glyphIds[i] < glyphIds[i+1] for i in range(len(glyphIds)-1))
+
+			# Run a simple algorithm to add skip glyphs to the data locations at
+			# the places where an id is not present.
+			idQueue = deque(glyphIds)
+			locQueue = deque(self.locations)
+			allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
+			allLocations = []
+			for curId in allGlyphIds:
+				if curId != idQueue[0]:
+					allLocations.append((locQueue[0][0], locQueue[0][0]))
+				else:
+					idQueue.popleft()
+					allLocations.append(locQueue.popleft())
+
+			# Now that all the locations are collected, pack them appropriately into
+			# offsets. This is the form where offset[i] is the location and
+			# offset[i+1]-offset[i] is the size of the data location.
+			offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
+			# Image data offset must be less than or equal to the minimum of locations.
+			# This offset may change the value for round tripping but is safer and
+			# allows imageDataOffset to not be required to be in the XML version.
+			self.imageDataOffset = min(offsets)
+			offsetArray = [offset - self.imageDataOffset for offset in offsets]
+
+			dataList = [EblcIndexSubTable.compile(self, ttFont)]
+			dataList += [struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray]
+			# Take care of any padding issues. Only occurs in format 3.
+			if offsetDataSize * len(dataList) % 4 != 0:
+				dataList.append(struct.pack(dataFormat, 0))
+			return bytesjoin(dataList)
+
+	return OffsetArrayIndexSubTableMixin
+
+# A Mixin for functionality shared between the different kinds
+# of fixed sized data handling. Both kinds have big metrics so
+# that kind of special processing is also handled in this mixin.
+class FixedSizeIndexSubTableMixin(object):
+
+	def writeMetrics(self, writer, ttFont):
+		writer.simpletag('imageSize', value=self.imageSize)
+		writer.newline()
+		self.metrics.toXML(writer, ttFont)
+
+	def readMetrics(self, name, attrs, content, ttFont):
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name == 'imageSize':
+				self.imageSize = safeEval(attrs['value'])
+			elif name == BigGlyphMetrics.__name__:
+				self.metrics = BigGlyphMetrics()
+				self.metrics.fromXML(name, attrs, content, ttFont)
+			elif name == SmallGlyphMetrics.__name__:
+				print("Warning: SmallGlyphMetrics being ignored in format %d." % self.indexFormat)
+
+	def padBitmapData(self, data):
+		# Make sure that the data isn't bigger than the fixed size.
+		assert len(data) <= self.imageSize, "Data in indexSubTable format %d must be less than the fixed size." % self.indexFormat
+		# Pad the data so that it matches the fixed size.
+		pad = (self.imageSize - len(data)) * b'\0'
+		return data + pad
+
+class eblc_index_sub_table_1(_createOffsetArrayIndexSubTableMixin('L'), EblcIndexSubTable):
+	pass
+
+class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+
+	def decompile(self):
+		(self.imageSize,) = struct.unpack(">L", self.data[:4])
+		self.metrics = BigGlyphMetrics()
+		sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
+		glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex+1))
+		offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
+		self.locations = list(zip(offsets, offsets[1:]))
+		self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+
+	def compile(self, ttFont):
+		glyphIds = list(map(ttFont.getGlyphID, self.names))
+		# Make sure all the ids are consecutive. This is required by Format 2.
+		assert glyphIds == list(range(self.firstGlyphIndex, self.lastGlyphIndex+1)), "Format 2 ids must be consecutive."
+		self.imageDataOffset = min(zip(*self.locations)[0])
+
+		dataList = [EblcIndexSubTable.compile(self, ttFont)]
+		dataList.append(struct.pack(">L", self.imageSize))
+		dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+		return bytesjoin(dataList)
+
+class eblc_index_sub_table_3(_createOffsetArrayIndexSubTableMixin('H'), EblcIndexSubTable):
+	pass
+
+class eblc_index_sub_table_4(EblcIndexSubTable):
+
+	def decompile(self):
+
+		(numGlyphs,) = struct.unpack(">L", self.data[:4])
+		data = self.data[4:]
+		indexingOffsets = [glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs+2)]
+		indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
+		glyphArray = [struct.unpack(codeOffsetPairFormat, data[slice(*loc)]) for loc in indexingLocations]
+		glyphIds, offsets = list(map(list, zip(*glyphArray)))
+		# There are one too many glyph ids. Get rid of the last one.
+		glyphIds.pop()
+
+		offsets = [offset + self.imageDataOffset for offset in offsets]
+		self.locations = list(zip(offsets, offsets[1:]))
+		self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+
+	def compile(self, ttFont):
+		# First make sure that all the data lines up properly. Format 4
+		# must have all its data lined up consecutively. If not this will fail.
+		for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
+			assert curLoc[1] == nxtLoc[0], "Data must be consecutive in indexSubTable format 4"
+
+		offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
+		# Image data offset must be less than or equal to the minimum of locations.
+		# Resetting this offset may change the value for round tripping but is safer
+		# and allows imageDataOffset to not be required to be in the XML version.
+		self.imageDataOffset = min(offsets)
+		offsets = [offset - self.imageDataOffset for offset in offsets]
+		glyphIds = list(map(ttFont.getGlyphID, self.names))
+		# Create an iterator over the ids plus a padding value.
+		idsPlusPad = list(itertools.chain(glyphIds, [0]))
+
+		dataList = [EblcIndexSubTable.compile(self, ttFont)]
+		dataList.append(struct.pack(">L", len(glyphIds)))
+		tmp = [struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)]
+		dataList += tmp
+		data = bytesjoin(dataList)
+		return data
+
+class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
+
+	def decompile(self):
+		self.origDataLen = 0
+		(self.imageSize,) = struct.unpack(">L", self.data[:4])
+		data = self.data[4:]
+		self.metrics, data = sstruct.unpack2(bigGlyphMetricsFormat, data, BigGlyphMetrics())
+		(numGlyphs,) = struct.unpack(">L", data[:4])
+		data = data[4:]
+		glyphIds = [struct.unpack(">H", data[2*i:2*(i+1)])[0] for i in range(numGlyphs)]
+
+		offsets = [self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds)+1)]
+		self.locations = list(zip(offsets, offsets[1:]))
+		self.names = list(map(self.ttFont.getGlyphName, glyphIds))
+
+	def compile(self, ttFont):
+		self.imageDataOffset = min(zip(*self.locations)[0])
+		dataList = [EblcIndexSubTable.compile(self, ttFont)]
+		dataList.append(struct.pack(">L", self.imageSize))
+		dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
+		glyphIds = list(map(ttFont.getGlyphID, self.names))
+		dataList.append(struct.pack(">L", len(glyphIds)))
+		dataList += [struct.pack(">H", curId) for curId in glyphIds]
+		if len(glyphIds) % 2 == 1:
+			dataList.append(struct.pack(">H", 0))
+		return bytesjoin(dataList)
+
+# Dictionary of indexFormat to the class representing that format.
+eblc_sub_table_classes = {
+		1: eblc_index_sub_table_1,
+		2: eblc_index_sub_table_2,
+		3: eblc_index_sub_table_3,
+		4: eblc_index_sub_table_4,
+		5: eblc_index_sub_table_5,
+	}
diff --git a/Lib/fontTools/ttLib/tables/F_F_T_M_.py b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
new file mode 100644
index 0000000..e8b1d29
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/F_F_T_M_.py
@@ -0,0 +1,46 @@
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from ._h_e_a_d import mac_epoch_diff
+from . import DefaultTable
+import time
+import calendar
+
+FFTMFormat = """
+		>	# big endian
+		version:        I
+		FFTimeStamp:    Q
+		sourceCreated:  Q
+		sourceModified: Q
+"""
+
+class table_F_F_T_M_(DefaultTable.DefaultTable):
+
+  def decompile(self, data, ttFont):
+    dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
+
+  def compile(self, ttFont):
+    data = sstruct.pack(FFTMFormat, self)
+    return data
+
+  def toXML(self, writer, ttFont):
+    writer.comment("FontForge's timestamp, font source creation and modification dates")
+    writer.newline()
+    formatstring, names, fixes = sstruct.getformat(FFTMFormat)
+    for name in names:
+      value = getattr(self, name)
+      if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+        try:
+          value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
+        except ValueError:
+          value = time.asctime(time.gmtime(0))
+      writer.simpletag(name, value=value)
+      writer.newline()
+
+  def fromXML(self, name, attrs, content, ttFont):
+    value = attrs["value"]
+    if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
+      value = calendar.timegm(time.strptime(value)) - mac_epoch_diff
+    else:
+      value = safeEval(value)
+    setattr(self, name, value)
\ No newline at end of file
diff --git a/Lib/fontTools/ttLib/tables/G_D_E_F_.py b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
new file mode 100644
index 0000000..d4a5741
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/G_D_E_F_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_G_D_E_F_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/G_M_A_P_.py b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
new file mode 100644
index 0000000..5db94d9
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/G_M_A_P_.py
@@ -0,0 +1,133 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+
+GMAPFormat = """
+		>	# big endian
+		tableVersionMajor:	H
+		tableVersionMinor: 	H
+		flags:	H
+		recordsCount:		H
+		recordsOffset:		H
+		fontNameLength:		H
+"""
+# psFontName is a byte string which follows the record above. This is zero padded 
+# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
+
+GMAPRecordFormat1 = """
+		>	# big endian
+		UV:			L
+		cid:		H
+		gid:		H
+		ggid:		H
+		name:		32s
+"""
+		
+
+
+class GMAPRecord(object):
+	def __init__(self, uv = 0, cid = 0, gid = 0, ggid = 0, name = ""):
+		self.UV = uv
+		self.cid = cid
+		self.gid = gid
+		self.ggid = ggid
+		self.name = name
+		
+	def toXML(self, writer, ttFont):
+		writer.begintag("GMAPRecord")
+		writer.newline()
+		writer.simpletag("UV", value=self.UV)
+		writer.newline()
+		writer.simpletag("cid", value=self.cid)
+		writer.newline()
+		writer.simpletag("gid", value=self.gid)
+		writer.newline()
+		writer.simpletag("glyphletGid", value=self.gid)
+		writer.newline()
+		writer.simpletag("GlyphletName", value=self.name)
+		writer.newline()
+		writer.endtag("GMAPRecord")
+		writer.newline()
+
+
+	def fromXML(self, name, attrs, content, ttFont):
+		value = attrs["value"]
+		if name == "GlyphletName":
+			self.name = value
+		else:
+			setattr(self, name, safeEval(value))
+		
+
+	def compile(self, ttFont):
+		if 	self.UV is None:
+			self.UV = 0
+		nameLen =  len(self.name)
+		if nameLen < 32:
+			self.name = self.name + "\0"*(32 - nameLen)
+		data = sstruct.pack(GMAPRecordFormat1, self)
+		return data
+
+	def __repr__(self):
+		return "GMAPRecord[ UV: " + str(self.UV) + ", cid: " + str(self.cid) + ", gid: " + str(self.gid) + ", ggid: " + str(self.ggid) + ", Glyphlet Name: " + str(self.name) + " ]"
+
+
+class table_G_M_A_P_(DefaultTable.DefaultTable):
+	
+	dependencies = []
+	
+	def decompile(self, data, ttFont):
+		dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
+		self.psFontName = tostr(newData[:self.fontNameLength])
+		assert (self.recordsOffset % 4) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
+		newData = data[self.recordsOffset:]
+		self.gmapRecords = []
+		for i in range (self.recordsCount):
+			gmapRecord, newData = sstruct.unpack2(GMAPRecordFormat1, newData, GMAPRecord())
+			gmapRecord.name = gmapRecord.name.strip('\0')
+			self.gmapRecords.append(gmapRecord)
+		
+
+	def compile(self, ttFont):
+		self.recordsCount = len(self.gmapRecords)
+		self.fontNameLength = len(self.psFontName)
+		self.recordsOffset = 4 *(((self.fontNameLength + 12)  + 3) // 4)
+		data = sstruct.pack(GMAPFormat, self)
+		data = data + tobytes(self.psFontName)
+		data = data + b"\0" * (self.recordsOffset - len(data))
+		for record in self.gmapRecords:
+			data = data + record.compile(ttFont)
+		return data
+	
+
+	def toXML(self, writer, ttFont):
+		writer.comment("Most of this table will be recalculated by the compiler")
+		writer.newline()
+		formatstring, names, fixes = sstruct.getformat(GMAPFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+		writer.simpletag("PSFontName", value=self.psFontName)
+		writer.newline()
+		for gmapRecord in self.gmapRecords:
+			gmapRecord.toXML(writer, ttFont)
+		
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "GMAPRecord":
+			if not hasattr(self, "gmapRecords"):
+				self.gmapRecords = []
+			gmapRecord = GMAPRecord()
+			self.gmapRecords.append(gmapRecord)
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				name, attrs, content = element
+				gmapRecord.fromXML(name, attrs, content, ttFont)
+		else:
+			value = attrs["value"]
+			if name == "PSFontName":
+				self.psFontName = value
+			else:	
+				setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/G_P_K_G_.py b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
new file mode 100644
index 0000000..4df666f
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/G_P_K_G_.py
@@ -0,0 +1,130 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval, readHex
+from . import DefaultTable
+import sys
+import array
+
+GPKGFormat = """
+		>	# big endian
+		version:	H
+		flags:	H
+		numGMAPs:		H
+		numGlyplets:		H
+"""
+# psFontName is a byte string which follows the record above. This is zero padded 
+# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
+
+
+class table_G_P_K_G_(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
+
+		GMAPoffsets = array.array("I")
+		endPos = (self.numGMAPs+1) * 4
+		GMAPoffsets.fromstring(newData[:endPos])
+		if sys.byteorder != "big":
+			GMAPoffsets.byteswap()
+		self.GMAPs = []
+		for i in range(self.numGMAPs):
+			start = GMAPoffsets[i]
+			end = GMAPoffsets[i+1]
+			self.GMAPs.append(data[start:end])
+		pos = endPos
+		endPos = pos + (self.numGlyplets + 1)*4
+		glyphletOffsets = array.array("I")
+		glyphletOffsets.fromstring(newData[pos:endPos])
+		if sys.byteorder != "big":
+			glyphletOffsets.byteswap()
+		self.glyphlets = []
+		for i in range(self.numGlyplets):
+			start = glyphletOffsets[i]
+			end = glyphletOffsets[i+1]
+			self.glyphlets.append(data[start:end])
+
+
+	def compile(self, ttFont):
+		self.numGMAPs = len(self.GMAPs)
+		self.numGlyplets = len(self.glyphlets)
+		GMAPoffsets = [0]*(self.numGMAPs + 1)
+		glyphletOffsets = [0]*(self.numGlyplets + 1)
+
+		dataList =[ sstruct.pack(GPKGFormat, self)]
+
+		pos = len(dataList[0]) + (self.numGMAPs + 1)*4 + (self.numGlyplets + 1)*4
+		GMAPoffsets[0] = pos
+		for i in range(1, self.numGMAPs +1):
+			pos += len(self.GMAPs[i-1])
+			GMAPoffsets[i] = pos
+		gmapArray = array.array("I", GMAPoffsets)
+		if sys.byteorder != "big":
+			gmapArray.byteswap()
+		dataList.append(gmapArray.tostring())
+
+		glyphletOffsets[0] = pos
+		for i in range(1, self.numGlyplets +1):
+			pos += len(self.glyphlets[i-1])
+			glyphletOffsets[i] = pos
+		glyphletArray = array.array("I", glyphletOffsets)
+		if sys.byteorder != "big":
+			glyphletArray.byteswap()
+		dataList.append(glyphletArray.tostring())
+		dataList += self.GMAPs
+		dataList += self.glyphlets
+		data = bytesjoin(dataList)
+		return data
+	
+	def toXML(self, writer, ttFont):
+		writer.comment("Most of this table will be recalculated by the compiler")
+		writer.newline()
+		formatstring, names, fixes = sstruct.getformat(GPKGFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+
+		writer.begintag("GMAPs")
+		writer.newline()
+		for gmapData in self.GMAPs:
+			writer.begintag("hexdata")
+			writer.newline()
+			writer.dumphex(gmapData)
+			writer.endtag("hexdata")
+			writer.newline()
+		writer.endtag("GMAPs")
+		writer.newline()
+
+		writer.begintag("glyphlets")
+		writer.newline()
+		for glyphletData in self.glyphlets:
+			writer.begintag("hexdata")
+			writer.newline()
+			writer.dumphex(glyphletData)
+			writer.endtag("hexdata")
+			writer.newline()
+		writer.endtag("glyphlets")
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "GMAPs":
+			if not hasattr(self, "GMAPs"):
+				self.GMAPs = []
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				itemName, itemAttrs, itemContent = element
+				if itemName == "hexdata":
+					self.GMAPs.append(readHex(itemContent))
+		elif name == "glyphlets":
+			if not hasattr(self, "glyphlets"):
+				self.glyphlets = []
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				itemName, itemAttrs, itemContent = element
+				if itemName == "hexdata":
+					self.glyphlets.append(readHex(itemContent))
+		else:	
+			setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/G_P_O_S_.py b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
new file mode 100644
index 0000000..013c820
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/G_P_O_S_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_G_P_O_S_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/G_S_U_B_.py b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
new file mode 100644
index 0000000..4403649
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/G_S_U_B_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_G_S_U_B_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/J_S_T_F_.py b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
new file mode 100644
index 0000000..ddf5405
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/J_S_T_F_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_J_S_T_F_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/L_T_S_H_.py b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
new file mode 100644
index 0000000..de79236
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/L_T_S_H_.py
@@ -0,0 +1,51 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import struct
+import array
+
+# XXX I've lowered the strictness, to make sure Apple's own Chicago
+# XXX gets through. They're looking into it, I hope to raise the standards
+# XXX back to normal eventually.
+
+class table_L_T_S_H_(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		version, numGlyphs = struct.unpack(">HH", data[:4])
+		data = data[4:]
+		assert version == 0, "unknown version: %s" % version
+		assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
+		# ouch: the assertion is not true in Chicago!
+		#assert numGlyphs == ttFont['maxp'].numGlyphs
+		yPels = array.array("B")
+		yPels.fromstring(data)
+		self.yPels = {}
+		for i in range(numGlyphs):
+			self.yPels[ttFont.getGlyphName(i)] = yPels[i]
+	
+	def compile(self, ttFont):
+		version = 0
+		names = list(self.yPels.keys())
+		numGlyphs = len(names)
+		yPels = [0] * numGlyphs
+		# ouch: the assertion is not true in Chicago!
+		#assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
+		for name in names:
+			yPels[ttFont.getGlyphID(name)] = self.yPels[name]
+		yPels = array.array("B", yPels)
+		return struct.pack(">HH", version, numGlyphs) + yPels.tostring()
+	
+	def toXML(self, writer, ttFont):
+		names = sorted(self.yPels.keys())
+		for name in names:
+			writer.simpletag("yPel", name=name, value=self.yPels[name])
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "yPels"):
+			self.yPels = {}
+		if name != "yPel":
+			return # ignore unknown tags
+		self.yPels[attrs["name"]] = safeEval(attrs["value"])
+
diff --git a/Lib/fontTools/ttLib/tables/M_A_T_H_.py b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
new file mode 100644
index 0000000..d894c08
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/M_A_T_H_.py
@@ -0,0 +1,5 @@
+from .otBase import BaseTTXConverter
+
+
+class table_M_A_T_H_(BaseTTXConverter):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/M_E_T_A_.py b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
new file mode 100644
index 0000000..60214e8
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/M_E_T_A_.py
@@ -0,0 +1,307 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import struct
+
+
+METAHeaderFormat = """
+		>	# big endian
+		tableVersionMajor:			H
+		tableVersionMinor:			H
+		metaEntriesVersionMajor:	H
+		metaEntriesVersionMinor:	H
+		unicodeVersion:				L
+		metaFlags:					H
+		nMetaRecs:					H
+"""
+# This record is followed by nMetaRecs of METAGlyphRecordFormat.
+# This in turn is followd by as many METAStringRecordFormat entries
+# as specified by the METAGlyphRecordFormat entries
+# this is followed by the strings specifried in the  METAStringRecordFormat
+METAGlyphRecordFormat = """
+		>	# big endian
+		glyphID:			H
+		nMetaEntry:			H
+"""
+# This record is followd by a variable data length field:
+# 	USHORT or ULONG	hdrOffset	
+# Offset from start of META table to the beginning
+# of this glyphs array of ns Metadata string entries.
+# Size determined by metaFlags field		
+# METAGlyphRecordFormat entries must be sorted by glyph ID
+ 
+METAStringRecordFormat = """
+		>	# big endian
+		labelID:			H
+		stringLen:			H
+"""
+# This record is followd by a variable data length field:
+# 	USHORT or ULONG	stringOffset	
+# METAStringRecordFormat entries must be sorted in order of labelID
+# There may be more than one entry with the same labelID
+# There may be more than one strign with the same content.
+
+# Strings shall be Unicode UTF-8 encoded, and null-terminated.
+
+METALabelDict = {
+	0 : "MojikumiX4051", # An integer in the range 1-20
+	1 : "UNIUnifiedBaseChars",
+	2 : "BaseFontName",
+	3 : "Language",
+	4 : "CreationDate",
+	5 : "FoundryName",
+	6 : "FoundryCopyright",
+	7 : "OwnerURI",
+	8 : "WritingScript",
+	10 : "StrokeCount",
+	11 : "IndexingRadical",
+}
+
+
+def getLabelString(labelID):
+	try:
+		label = METALabelDict[labelID]
+	except KeyError:
+		label = "Unknown label"
+	return str(label)
+
+
+class table_M_E_T_A_(DefaultTable.DefaultTable):
+	
+	dependencies = []
+	
+	def decompile(self, data, ttFont):
+		dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
+		self.glyphRecords = []
+		for i in range(self.nMetaRecs):
+			glyphRecord, newData = sstruct.unpack2(METAGlyphRecordFormat, newData, GlyphRecord())
+			if self.metaFlags == 0:
+				[glyphRecord.offset] = struct.unpack(">H", newData[:2])
+				newData = newData[2:]
+			elif self.metaFlags == 1:
+				[glyphRecord.offset] = struct.unpack(">H", newData[:4])
+				newData = newData[4:]
+			else:
+				assert 0, "The metaFlags field in the META table header has a value other than 0 or 1 :" + str(self.metaFlags)
+			glyphRecord.stringRecs = []
+			newData = data[glyphRecord.offset:]
+			for j in range(glyphRecord.nMetaEntry):
+				stringRec, newData = sstruct.unpack2(METAStringRecordFormat, newData, StringRecord())
+				if self.metaFlags == 0:
+					[stringRec.offset] = struct.unpack(">H", newData[:2])
+					newData = newData[2:]
+				else:
+					[stringRec.offset] = struct.unpack(">H", newData[:4])
+					newData = newData[4:]
+				stringRec.string = data[stringRec.offset:stringRec.offset + stringRec.stringLen]
+				glyphRecord.stringRecs.append(stringRec)
+			self.glyphRecords.append(glyphRecord)	
+			
+	def compile(self, ttFont):
+		offsetOK = 0
+		self.nMetaRecs = len(self.glyphRecords)
+		count = 0
+		while ( offsetOK != 1):
+			count = count + 1
+			if count > 4:
+				pdb_set_trace()
+			metaData = sstruct.pack(METAHeaderFormat, self)
+			stringRecsOffset = len(metaData) + self.nMetaRecs * (6 + 2*(self.metaFlags & 1))
+			stringRecSize = (6 + 2*(self.metaFlags & 1))
+			for glyphRec in self.glyphRecords:
+				glyphRec.offset = stringRecsOffset
+				if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+					self.metaFlags = self.metaFlags + 1
+					offsetOK = -1
+					break
+				metaData = metaData + glyphRec.compile(self)
+				stringRecsOffset = stringRecsOffset + (glyphRec.nMetaEntry * stringRecSize) 
+				# this will be the String Record offset for the next GlyphRecord.
+			if 	offsetOK == -1:
+				offsetOK = 0
+				continue
+			
+			# metaData now contains the header and all of the GlyphRecords. Its length should bw
+			# the offset to the first StringRecord.
+			stringOffset = stringRecsOffset
+			for glyphRec in self.glyphRecords:
+				assert (glyphRec.offset == len(metaData)), "Glyph record offset did not compile correctly! for rec:" + str(glyphRec)
+				for stringRec in glyphRec.stringRecs:
+					stringRec.offset = stringOffset
+					if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
+						self.metaFlags = self.metaFlags + 1
+						offsetOK = -1
+						break
+					metaData = metaData + stringRec.compile(self)
+					stringOffset = stringOffset + stringRec.stringLen
+			if 	offsetOK == -1:
+				offsetOK = 0
+				continue
+				
+			if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
+				self.metaFlags = self.metaFlags - 1
+				continue
+			else:
+				offsetOK = 1
+					
+								
+			# metaData now contains the header and all of the GlyphRecords and all of the String Records.
+			# Its length should be the offset to the first string datum.
+			for glyphRec in self.glyphRecords:
+				for stringRec in glyphRec.stringRecs:
+					assert (stringRec.offset == len(metaData)), "String offset did not compile correctly! for string:" + str(stringRec.string)
+					metaData = metaData + stringRec.string
+		
+		return metaData
+	
+	def toXML(self, writer, ttFont):
+		writer.comment("Lengths and number of entries in this table will be recalculated by the compiler")
+		writer.newline()
+		formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+		for glyphRec in self.glyphRecords:
+			glyphRec.toXML(writer, ttFont)
+		
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "GlyphRecord":
+			if not hasattr(self, "glyphRecords"):
+				self.glyphRecords = []
+			glyphRec = GlyphRecord()
+			self.glyphRecords.append(glyphRec)
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				name, attrs, content = element
+				glyphRec.fromXML(name, attrs, content, ttFont)
+			glyphRec.offset = -1
+			glyphRec.nMetaEntry = len(glyphRec.stringRecs)
+		else:			
+			setattr(self, name, safeEval(attrs["value"]))
+
+
+class GlyphRecord(object):
+	def __init__(self):
+		self.glyphID = -1
+		self.nMetaEntry = -1
+		self.offset = -1
+		self.stringRecs = []
+		
+	def toXML(self, writer, ttFont):
+		writer.begintag("GlyphRecord")
+		writer.newline()
+		writer.simpletag("glyphID", value=self.glyphID)
+		writer.newline()
+		writer.simpletag("nMetaEntry", value=self.nMetaEntry)
+		writer.newline()
+		for stringRec in self.stringRecs:
+			stringRec.toXML(writer, ttFont)
+		writer.endtag("GlyphRecord")
+		writer.newline()
+
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "StringRecord":
+			stringRec = StringRecord()
+			self.stringRecs.append(stringRec)
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				stringRec.fromXML(name, attrs, content, ttFont)
+			stringRec.stringLen = len(stringRec.string)
+		else:			
+			setattr(self, name, safeEval(attrs["value"]))
+
+	def compile(self, parentTable):
+		data = sstruct.pack(METAGlyphRecordFormat, self)
+		if parentTable.metaFlags == 0:
+			datum = struct.pack(">H", self.offset)
+		elif parentTable.metaFlags == 1:
+			datum = struct.pack(">L", self.offset)
+		data = data + datum
+		return data
+	
+	def __repr__(self):
+		return "GlyphRecord[ glyphID: " + str(self.glyphID) + ", nMetaEntry: " + str(self.nMetaEntry) + ", offset: " + str(self.offset) + " ]"
+
+# XXX The following two functions are really broken around UTF-8 vs Unicode
+
+def mapXMLToUTF8(string):
+	uString = unicode()
+	strLen = len(string)
+	i = 0
+	while i < strLen:
+		prefixLen = 0
+		if  (string[i:i+3] == "&#x"):
+			prefixLen = 3
+		elif  (string[i:i+7] == "&amp;#x"):
+			prefixLen = 7
+		if prefixLen:
+			i = i+prefixLen
+			j= i
+			while string[i] != ";":
+				i = i+1
+			valStr = string[j:i]
+			
+			uString = uString + unichr(eval('0x' + valStr))
+		else:
+			uString = uString + unichr(byteord(string[i]))
+		i = i +1
+			
+	return uString.encode('utf8')
+
+
+def mapUTF8toXML(string):
+	uString = string.decode('utf8')
+	string = ""
+	for uChar in uString:
+		i = ord(uChar)
+		if (i < 0x80) and (i > 0x1F):
+			string = string + uChar
+		else:
+			string = string + "&#x" + hex(i)[2:] + ";"
+	return string
+
+
+class StringRecord(object):
+
+	def toXML(self, writer, ttFont):
+		writer.begintag("StringRecord")
+		writer.newline()
+		writer.simpletag("labelID", value=self.labelID)
+		writer.comment(getLabelString(self.labelID))
+		writer.newline()
+		writer.newline()
+		writer.simpletag("string", value=mapUTF8toXML(self.string))
+		writer.newline()
+		writer.endtag("StringRecord")
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		for element in content:
+			if isinstance(element, basestring):
+				continue
+			name, attrs, content = element
+			value = attrs["value"]
+			if name == "string":
+				self.string = mapXMLToUTF8(value)
+			else:
+				setattr(self, name, safeEval(value))
+
+	def compile(self, parentTable):
+		data = sstruct.pack(METAStringRecordFormat, self)
+		if parentTable.metaFlags == 0:
+			datum = struct.pack(">H", self.offset)
+		elif parentTable.metaFlags == 1:
+			datum = struct.pack(">L", self.offset)
+		data = data + datum
+		return data
+	
+	def __repr__(self):
+		return "StringRecord [ labelID: " + str(self.labelID) + " aka " + getLabelString(self.labelID) \
+			+ ", offset: " + str(self.offset) + ", length: " + str(self.stringLen) + ", string: " +self.string + " ]"
+
diff --git a/Lib/fontTools/ttLib/tables/O_S_2f_2.py b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
new file mode 100644
index 0000000..d29212f
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/O_S_2f_2.py
@@ -0,0 +1,189 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval, num2binary, binary2num
+from . import DefaultTable
+import warnings
+
+
+# panose classification
+
+panoseFormat = """
+	bFamilyType:        B
+	bSerifStyle:        B
+	bWeight:            B
+	bProportion:        B
+	bContrast:          B
+	bStrokeVariation:   B
+	bArmStyle:          B
+	bLetterForm:        B
+	bMidline:           B
+	bXHeight:           B
+"""
+
+class Panose(object):
+	
+	def toXML(self, writer, ttFont):
+		formatstring, names, fixes = sstruct.getformat(panoseFormat)
+		for name in names:
+			writer.simpletag(name, value=getattr(self, name))
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		setattr(self, name, safeEval(attrs["value"]))
+
+
+# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
+
+OS2_format_0 = """
+	>   # big endian
+	version:                H       # version
+	xAvgCharWidth:          h       # average character width
+	usWeightClass:          H       # degree of thickness of strokes
+	usWidthClass:           H       # aspect ratio
+	fsType:                 h       # type flags
+	ySubscriptXSize:        h       # subscript horizontal font size
+	ySubscriptYSize:        h       # subscript vertical font size
+	ySubscriptXOffset:      h       # subscript x offset
+	ySubscriptYOffset:      h       # subscript y offset
+	ySuperscriptXSize:      h       # superscript horizontal font size
+	ySuperscriptYSize:      h       # superscript vertical font size
+	ySuperscriptXOffset:    h       # superscript x offset
+	ySuperscriptYOffset:    h       # superscript y offset
+	yStrikeoutSize:         h       # strikeout size
+	yStrikeoutPosition:     h       # strikeout position
+	sFamilyClass:           h       # font family class and subclass
+	panose:                 10s     # panose classification number
+	ulUnicodeRange1:        L       # character range
+	ulUnicodeRange2:        L       # character range
+	ulUnicodeRange3:        L       # character range
+	ulUnicodeRange4:        L       # character range
+	achVendID:              4s      # font vendor identification
+	fsSelection:            H       # font selection flags
+	fsFirstCharIndex:       H       # first unicode character index
+	fsLastCharIndex:        H       # last unicode character index
+	sTypoAscender:          h       # typographic ascender
+	sTypoDescender:         h       # typographic descender
+	sTypoLineGap:           h       # typographic line gap
+	usWinAscent:            H       # Windows ascender
+	usWinDescent:           H       # Windows descender
+"""
+
+OS2_format_1_addition =  """
+	ulCodePageRange1:   L
+	ulCodePageRange2:   L
+"""
+
+OS2_format_2_addition =  OS2_format_1_addition + """
+	sxHeight:           h
+	sCapHeight:         h
+	usDefaultChar:      H
+	usBreakChar:        H
+	usMaxContex:        H
+"""
+
+OS2_format_5_addition =  OS2_format_2_addition + """
+	usLowerOpticalPointSize:    H
+	usUpperOpticalPointSize:    H
+"""
+
+bigendian = "	>	# big endian\n"
+
+OS2_format_1 = OS2_format_0 + OS2_format_1_addition
+OS2_format_2 = OS2_format_0 + OS2_format_2_addition
+OS2_format_5 = OS2_format_0 + OS2_format_5_addition
+OS2_format_1_addition = bigendian + OS2_format_1_addition
+OS2_format_2_addition = bigendian + OS2_format_2_addition
+OS2_format_5_addition = bigendian + OS2_format_5_addition
+
+
+class table_O_S_2f_2(DefaultTable.DefaultTable):
+	
+	"""the OS/2 table"""
+	
+	def decompile(self, data, ttFont):
+		dummy, data = sstruct.unpack2(OS2_format_0, data, self)
+
+		if self.version == 1:
+			dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
+		elif self.version in (2, 3, 4):
+			dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
+		elif self.version == 5:
+			dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
+			self.usLowerOpticalPointSize /= 20
+			self.usUpperOpticalPointSize /= 20
+		elif self.version != 0:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
+		if len(data):
+			warnings.warn("too much 'OS/2' table data")
+
+		self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
+	
+	def compile(self, ttFont):
+		panose = self.panose
+		self.panose = sstruct.pack(panoseFormat, self.panose)
+		if self.version == 0:
+			data = sstruct.pack(OS2_format_0, self)
+		elif self.version == 1:
+			data = sstruct.pack(OS2_format_1, self)
+		elif self.version in (2, 3, 4):
+			data = sstruct.pack(OS2_format_2, self)
+		elif self.version == 5:
+			d = self.__dict__.copy()
+			d['usLowerOpticalPointSize'] = int(round(self.usLowerOpticalPointSize * 20))
+			d['usUpperOpticalPointSize'] = int(round(self.usUpperOpticalPointSize * 20))
+			data = sstruct.pack(OS2_format_5, d)
+		else:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("unknown format for OS/2 table: version %s" % self.version)
+		self.panose = panose
+		return data
+	
+	def toXML(self, writer, ttFont):
+		if self.version == 1:
+			format = OS2_format_1
+		elif self.version in (2, 3, 4):
+			format = OS2_format_2
+		elif self.version == 5:
+			format = OS2_format_5
+		else:
+			format = OS2_format_0
+		formatstring, names, fixes = sstruct.getformat(format)
+		for name in names:
+			value = getattr(self, name)
+			if name=="panose":
+				writer.begintag("panose")
+				writer.newline()
+				value.toXML(writer, ttFont)
+				writer.endtag("panose")
+			elif name in ("ulUnicodeRange1", "ulUnicodeRange2", 
+					"ulUnicodeRange3", "ulUnicodeRange4",
+					"ulCodePageRange1", "ulCodePageRange2"):
+				writer.simpletag(name, value=num2binary(value))
+			elif name in ("fsType", "fsSelection"):
+				writer.simpletag(name, value=num2binary(value, 16))
+			elif name == "achVendID":
+				writer.simpletag(name, value=repr(value)[1:-1])
+			else:
+				writer.simpletag(name, value=value)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "panose":
+			self.panose = panose = Panose()
+			for element in content:
+				if isinstance(element, tuple):
+					name, attrs, content = element
+					panose.fromXML(name, attrs, content, ttFont)
+		elif name in ("ulUnicodeRange1", "ulUnicodeRange2", 
+				"ulUnicodeRange3", "ulUnicodeRange4",
+				"ulCodePageRange1", "ulCodePageRange2",
+				"fsType", "fsSelection"):
+			setattr(self, name, binary2num(attrs["value"]))
+		elif name == "achVendID":
+			setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
+		else:
+			setattr(self, name, safeEval(attrs["value"]))
+
+
diff --git a/Lib/fontTools/ttLib/tables/S_I_N_G_.py b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
new file mode 100644
index 0000000..d9177e0
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/S_I_N_G_.py
@@ -0,0 +1,98 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+
+SINGFormat = """
+		>	# big endian
+		tableVersionMajor:	H
+		tableVersionMinor: 	H
+		glyphletVersion:	H
+		permissions:		h
+		mainGID:			H
+		unitsPerEm:			H
+		vertAdvance:		h
+		vertOrigin:			h
+		uniqueName:			28s
+		METAMD5:			16s
+		nameLength:			1s
+"""
+# baseGlyphName is a byte string which follows the record above.
+		
+
+
+class table_S_I_N_G_(DefaultTable.DefaultTable):
+	
+	dependencies = []
+	
+	def decompile(self, data, ttFont):
+		dummy, rest = sstruct.unpack2(SINGFormat, data, self)
+		self.uniqueName = self.decompileUniqueName(self.uniqueName)
+		self.nameLength = byteord(self.nameLength)
+		assert len(rest) == self.nameLength
+		self.baseGlyphName = tostr(rest)
+		
+		rawMETAMD5 = self.METAMD5
+		self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
+		for char in rawMETAMD5[1:]:
+			self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
+		self.METAMD5 = self.METAMD5 + "]"
+		
+	def decompileUniqueName(self, data):
+		name = ""
+		for char in data:
+			val = byteord(char)
+			if val == 0:
+				break
+			if (val > 31) or (val < 128):
+				name += chr(val)
+			else:
+				octString = oct(val)
+				if len(octString) > 3:
+					octString = octString[1:] # chop off that leading zero.
+				elif len(octString) < 3:
+					octString.zfill(3)
+				name += "\\" + octString
+		return name
+		
+		
+	def compile(self, ttFont):
+		d = self.__dict__.copy()
+		d["nameLength"] = bytechr(len(self.baseGlyphName))
+		d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
+		METAMD5List = eval(self.METAMD5)
+		d["METAMD5"] = b""
+		for val in METAMD5List:
+			d["METAMD5"] += bytechr(val)
+		assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
+		data = sstruct.pack(SINGFormat, d)
+		data = data + tobytes(self.baseGlyphName)
+		return data
+	
+	def compilecompileUniqueName(self, name, length):
+		nameLen = len(name)
+		if length <= nameLen:
+			name = name[:length-1] + "\000"
+		else:
+			name += (nameLen - length) * "\000"
+		return name
+
+
+	def toXML(self, writer, ttFont):
+		writer.comment("Most of this table will be recalculated by the compiler")
+		writer.newline()
+		formatstring, names, fixes = sstruct.getformat(SINGFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+		writer.simpletag("baseGlyphName", value=self.baseGlyphName)
+		writer.newline()
+		
+	def fromXML(self, name, attrs, content, ttFont):
+		value = attrs["value"]
+		if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
+			setattr(self, name, value)
+		else:
+			setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/S_V_G_.py b/Lib/fontTools/ttLib/tables/S_V_G_.py
new file mode 100644
index 0000000..c3f00dd
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/S_V_G_.py
@@ -0,0 +1,370 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from . import DefaultTable
+try:
+    import xml.etree.cElementTree as ET
+except ImportError:
+    import xml.etree.ElementTree as ET
+import struct
+import re
+
+__doc__="""
+Compiles/decompiles version 0 and 1 SVG tables from/to XML.
+
+Version 1 is the first SVG definition, implemented in Mozilla before Aug 2013, now deprecated.
+This module will decompile this correctly, but will compile a version 1 table
+only if you add the secret element "<version1/>" to the SVG element in the TTF file.
+
+Version 0 is the joint Adobe-Mozilla proposal, which supports color palettes.
+
+The XML format is:
+  <SVG>
+    <svgDoc endGlyphID="1" startGlyphID="1">
+      <![CDATA[ <complete SVG doc> ]]
+    </svgDoc>
+...
+	<svgDoc endGlyphID="n" startGlyphID="m">
+      <![CDATA[ <complete SVG doc> ]]
+    </svgDoc>
+
+    <colorPalettes>
+    	<colorParamUINameID>n</colorParamUINameID>
+    	...
+    	<colorParamUINameID>m</colorParamUINameID>
+    	<colorPalette uiNameID="n">
+    		<colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
+    		...
+    		<colorRecord red="<int>" green="<int>" blue="<int>" alpha="<int>" />
+    	</colorPalette>
+    	...
+    	<colorPalette uiNameID="m">
+    		<colorRecord red="<int> green="<int>" blue="<int>" alpha="<int>" />
+    		...
+    		<colorRecord red=<int>" green="<int>" blue="<int>" alpha="<int>" />
+    	</colorPalette>
+    </colorPalettes>
+</SVG>
+
+Color values must be less than 256. 
+
+The number of color records in each </colorPalette> must be the same as
+the number of <colorParamUINameID> elements.
+
+"""
+
+XML = ET.XML
+XMLElement = ET.Element
+xmlToString = ET.tostring
+
+SVG_format_0 = """
+	>   # big endian
+	version:                  H
+	offsetToSVGDocIndex:      L
+	offsetToColorPalettes:    L
+"""
+
+SVG_format_0Size = sstruct.calcsize(SVG_format_0)
+
+SVG_format_1 = """
+	>   # big endian
+	version:                  H
+	numIndicies:              H
+"""
+
+SVG_format_1Size = sstruct.calcsize(SVG_format_1)
+
+doc_index_entry_format_0 = """
+	>   # big endian
+	startGlyphID:             H
+	endGlyphID:               H
+	svgDocOffset:             L
+	svgDocLength:             L
+"""
+
+doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
+
+colorRecord_format_0 = """
+	red:                      B
+	green:                    B
+	blue:                     B
+	alpha:                    B
+"""
+
+
+class table_S_V_G_(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		self.docList = None
+		self.colorPalettes = None
+		pos = 0
+		self.version = struct.unpack(">H", data[pos:pos+2])[0]
+		
+		if self.version == 1:
+			self.decompile_format_1(data, ttFont)
+		else:
+			if self.version != 0:
+				print("Unknown SVG table version '%s'. Decompiling as version 0." % (self.version))
+			self.decompile_format_0(data, ttFont)
+
+
+	def decompile_format_0(self, data, ttFont):
+		dummy, data2 = sstruct.unpack2(SVG_format_0, data, self)
+		# read in SVG Documents Index
+		self.decompileEntryList(data)
+
+		# read in colorPalettes table.
+		self.colorPalettes = colorPalettes = ColorPalettes()
+		pos = self.offsetToColorPalettes
+		if pos > 0:
+			colorPalettes.numColorParams = numColorParams = struct.unpack(">H", data[pos:pos+2])[0]
+			if numColorParams > 0:
+				colorPalettes.colorParamUINameIDs = colorParamUINameIDs = []
+				pos = pos + 2
+				i = 0
+				while i < numColorParams:
+					nameID = struct.unpack(">H", data[pos:pos+2])[0]
+					colorParamUINameIDs.append(nameID)
+					pos = pos + 2
+					i += 1
+
+				colorPalettes.numColorPalettes = numColorPalettes = struct.unpack(">H", data[pos:pos+2])[0]
+				pos = pos + 2
+				if numColorPalettes > 0:
+					colorPalettes.colorPaletteList = colorPaletteList = []
+					i = 0
+					while i < numColorPalettes:
+						colorPalette = ColorPalette()
+						colorPaletteList.append(colorPalette)
+						colorPalette.uiNameID = struct.unpack(">H", data[pos:pos+2])[0]
+						pos = pos + 2
+						colorPalette.paletteColors = paletteColors = []
+						j = 0
+						while j < numColorParams:
+							colorRecord, colorPaletteData = sstruct.unpack2(colorRecord_format_0, data[pos:], ColorRecord())
+							paletteColors.append(colorRecord)
+							j += 1
+							pos += 4
+						i += 1
+
+	def decompile_format_1(self, data, ttFont):
+		pos = 2
+		self.numEntries = struct.unpack(">H", data[pos:pos+2])[0]
+		pos += 2
+		self.decompileEntryList(data, pos)
+
+	def decompileEntryList(self, data):
+		# data starts with the first entry of the entry list.
+		pos = subTableStart = self.offsetToSVGDocIndex
+		self.numEntries = numEntries = struct.unpack(">H", data[pos:pos+2])[0]
+		pos += 2
+		if self.numEntries > 0:
+			data2 = data[pos:]
+			self.docList = []
+			self.entries = entries = []
+			i = 0
+			while i < self.numEntries:
+				docIndexEntry, data2 = sstruct.unpack2(doc_index_entry_format_0, data2, DocumentIndexEntry())
+				entries.append(docIndexEntry)
+				i += 1
+
+			for entry in entries:
+				start = entry.svgDocOffset + subTableStart
+				end = start + entry.svgDocLength
+				doc = tostr(data[start:end], "utf-8")
+				self.docList.append( [doc, entry.startGlyphID, entry.endGlyphID] )
+
+	def compile(self, ttFont):
+		if hasattr(self, "version1"):
+			data = self.compileFormat1(ttFont)
+		else:
+			data = self.compileFormat0(ttFont)
+		return data
+
+	def compileFormat0(self, ttFont):
+		version = 0
+		offsetToSVGDocIndex = SVG_format_0Size # I start the SVGDocIndex right after the header.
+		# get SGVDoc info.
+		docList = []
+		entryList = []
+		numEntries = len(self.docList)
+		datum = struct.pack(">H",numEntries)
+		entryList.append(datum)
+		curOffset = len(datum) + doc_index_entry_format_0Size*numEntries
+		for doc, startGlyphID, endGlyphID in self.docList:
+			docOffset = curOffset
+			docLength = len(doc)
+			curOffset += docLength
+			entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
+			entryList.append(entry)
+			docList.append(tobytes(doc, encoding="utf-8"))
+		entryList.extend(docList)
+		svgDocData = bytesjoin(entryList)
+
+		# get colorpalette info.
+		if self.colorPalettes is None:
+			offsetToColorPalettes = 0
+			palettesData = ""
+		else:
+			offsetToColorPalettes = SVG_format_0Size + len(svgDocData)
+			dataList = []
+			numColorParams = len(self.colorPalettes.colorParamUINameIDs)
+			datum = struct.pack(">H", numColorParams)
+			dataList.append(datum)
+			for uiNameId in self.colorPalettes.colorParamUINameIDs:
+				datum = struct.pack(">H", uiNameId)
+				dataList.append(datum)
+			numColorPalettes = len(self.colorPalettes.colorPaletteList)
+			datum = struct.pack(">H", numColorPalettes)
+			dataList.append(datum)
+			for colorPalette in self.colorPalettes.colorPaletteList:
+				datum = struct.pack(">H", colorPalette.uiNameID)
+				dataList.append(datum)
+				for colorRecord in colorPalette.paletteColors:
+					data = struct.pack(">BBBB", colorRecord.red, colorRecord.green, colorRecord.blue, colorRecord.alpha)
+					dataList.append(data)
+			palettesData = bytesjoin(dataList)
+
+		header = struct.pack(">HLL", version, offsetToSVGDocIndex, offsetToColorPalettes)
+		data = [header, svgDocData, palettesData]
+		data = bytesjoin(data)
+		return data
+
+	def compileFormat1(self, ttFont):
+		version = 1
+		numEntries = len(self.docList)
+		header = struct.pack(">HH", version, numEntries)
+		dataList = [header]
+		docList = []
+		curOffset = SVG_format_1Size + doc_index_entry_format_0Size*numEntries
+		for doc, startGlyphID, endGlyphID in self.docList:
+			docOffset = curOffset
+			docLength = len(doc)
+			curOffset += docLength
+			entry = struct.pack(">HHLL", startGlyphID, endGlyphID, docOffset, docLength)
+			dataList.append(entry)
+			docList.append(tobytes(doc, encoding="utf-8"))
+		dataList.extend(docList)
+		data = bytesjoin(dataList)
+		return data
+
+	def toXML(self, writer, ttFont):
+		writer.newline()
+		for doc, startGID, endGID in self.docList:
+			writer.begintag("svgDoc", startGlyphID=startGID, endGlyphID=endGID)
+			writer.newline()
+			writer.writecdata(doc)
+			writer.newline()
+			writer.endtag("svgDoc")
+			writer.newline()
+
+		if (self.colorPalettes is not None) and (self.colorPalettes.numColorParams is not None):
+			writer.begintag("colorPalettes")
+			writer.newline()
+			for uiNameID in self.colorPalettes.colorParamUINameIDs:
+				writer.begintag("colorParamUINameID")
+				writer.writeraw(str(uiNameID))
+				writer.endtag("colorParamUINameID")
+				writer.newline()
+			for colorPalette in self.colorPalettes.colorPaletteList:
+				writer.begintag("colorPalette", [("uiNameID", str(colorPalette.uiNameID))])
+				writer.newline()
+				for colorRecord in colorPalette.paletteColors:
+					colorAttributes = [
+							("red", hex(colorRecord.red)),
+							("green", hex(colorRecord.green)),
+							("blue", hex(colorRecord.blue)),
+							("alpha", hex(colorRecord.alpha)),
+						]
+					writer.begintag("colorRecord", colorAttributes)
+					writer.endtag("colorRecord")
+					writer.newline()
+				writer.endtag("colorPalette")
+				writer.newline()
+
+			writer.endtag("colorPalettes")
+			writer.newline()
+		else:
+			writer.begintag("colorPalettes")
+			writer.endtag("colorPalettes")
+			writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		import re
+		if name == "svgDoc":
+			if not hasattr(self, "docList"):
+				self.docList = []
+			doc = strjoin(content)
+			doc = doc.strip()
+			startGID = int(attrs["startGlyphID"])
+			endGID = int(attrs["endGlyphID"])
+			self.docList.append( [doc, startGID, endGID] )
+		elif  name == "colorPalettes":
+			self.colorPalettes = ColorPalettes()
+			self.colorPalettes.fromXML(name, attrs, content, ttFont)
+			if self.colorPalettes.numColorParams == 0:
+				self.colorPalettes = None
+		else:
+			print("Unknown", name, content)
+
+class DocumentIndexEntry(object):
+	def __init__(self):
+		self.startGlyphID = None # USHORT
+		self.endGlyphID = None # USHORT
+		self.svgDocOffset = None # ULONG
+		self.svgDocLength = None # ULONG
+
+	def __repr__(self):
+		return "startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s" % (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
+
+class ColorPalettes(object):
+	def __init__(self):
+		self.numColorParams = None # USHORT
+		self.colorParamUINameIDs = [] # list of name table name ID values that provide UI description of each color palette.
+		self.numColorPalettes = None # USHORT
+		self.colorPaletteList = [] # list of ColorPalette records
+
+	def fromXML(self, name, attrs, content, ttFont):
+		for element in content:
+			if isinstance(element, type("")):
+				continue
+			name, attrib, content = element
+			if name == "colorParamUINameID":
+				uiNameID = int(content[0])
+				self.colorParamUINameIDs.append(uiNameID)
+			elif name == "colorPalette":
+				colorPalette = ColorPalette()
+				self.colorPaletteList.append(colorPalette)
+				colorPalette.fromXML((name, attrib, content), ttFont)
+
+		self.numColorParams = len(self.colorParamUINameIDs)
+		self.numColorPalettes = len(self.colorPaletteList)
+		for colorPalette in self.colorPaletteList:
+			if len(colorPalette.paletteColors) != self.numColorParams:
+				raise ValueError("Number of color records in a colorPalette ('%s') does not match the number of colorParamUINameIDs elements ('%s')." % (len(colorPalette.paletteColors), self.numColorParams))
+
+class ColorPalette(object):
+	def __init__(self):
+		self.uiNameID = None # USHORT. name table ID that describes user interface strings associated with this color palette. 
+		self.paletteColors = [] # list of ColorRecords
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.uiNameID = int(attrs["uiNameID"])
+		for element in content:
+			if isinstance(element, type("")):
+				continue
+			name, attrib, content = element
+			if name == "colorRecord":
+				colorRecord = ColorRecord()
+				self.paletteColors.append(colorRecord)
+				colorRecord.red = eval(attrib["red"])
+				colorRecord.green = eval(attrib["green"])
+				colorRecord.blue = eval(attrib["blue"])
+				colorRecord.alpha = eval(attrib["alpha"])
+
+class ColorRecord(object):
+	def __init__(self):
+		self.red = 255 # all are one byte values.
+		self.green = 255
+		self.blue = 255
+		self.alpha = 255
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_B_.py b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
new file mode 100644
index 0000000..5cc54e2
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_B_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_B_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_D_.py b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
new file mode 100644
index 0000000..8228f8a
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_D_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_D_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_J_.py b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
new file mode 100644
index 0000000..0983b57
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_J_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_J_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_P_.py b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
new file mode 100644
index 0000000..e34a18c
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_P_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_P_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_S_.py b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
new file mode 100644
index 0000000..56373e6
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_S_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_S_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I_V_.py b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
new file mode 100644
index 0000000..a87e3f7
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I_V_.py
@@ -0,0 +1,5 @@
+from . import asciiTable
+
+class table_T_S_I_V_(asciiTable.asciiTable):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__0.py b/Lib/fontTools/ttLib/tables/T_S_I__0.py
new file mode 100644
index 0000000..bcd6d15
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I__0.py
@@ -0,0 +1,50 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from . import DefaultTable
+import struct
+
+tsi0Format = '>HHl'
+
+def fixlongs(glyphID, textLength, textOffset):
+	return int(glyphID), int(textLength), textOffset	
+
+
+class table_T_S_I__0(DefaultTable.DefaultTable):
+	
+	dependencies = ["TSI1"]
+	
+	def decompile(self, data, ttFont):
+		numGlyphs = ttFont['maxp'].numGlyphs
+		indices = []
+		size = struct.calcsize(tsi0Format)
+		for i in range(numGlyphs + 5):
+			glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size]))
+			indices.append((glyphID, textLength, textOffset))
+			data = data[size:]
+		assert len(data) == 0
+		assert indices[-5] == (0XFFFE, 0, -1409540300), "bad magic number"  # 0xABFC1F34
+		self.indices = indices[:-5]
+		self.extra_indices = indices[-4:]
+	
+	def compile(self, ttFont):
+		if not hasattr(self, "indices"):
+			# We have no corresponding table (TSI1 or TSI3); let's return
+			# no data, which effectively means "ignore us".
+			return ""
+		data = b""
+		for index, textLength, textOffset in self.indices:
+			data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+		data = data + struct.pack(tsi0Format, 0XFFFE, 0, -1409540300)  # 0xABFC1F34
+		for index, textLength, textOffset in self.extra_indices:
+			data = data + struct.pack(tsi0Format, index, textLength, textOffset)
+		return data
+	
+	def set(self, indices, extra_indices):
+		# gets called by 'TSI1' or 'TSI3'
+		self.indices = indices
+		self.extra_indices = extra_indices
+	
+	def toXML(self, writer, ttFont):
+		writer.comment("This table will be calculated by the compiler")
+		writer.newline()
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__1.py b/Lib/fontTools/ttLib/tables/T_S_I__1.py
new file mode 100644
index 0000000..558ce9d
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I__1.py
@@ -0,0 +1,117 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from . import DefaultTable
+
+class table_T_S_I__1(DefaultTable.DefaultTable):
+	
+	extras = {0xfffa: "ppgm", 0xfffb: "cvt", 0xfffc: "reserved", 0xfffd: "fpgm"}
+	
+	indextable = "TSI0"
+	
+	def decompile(self, data, ttFont):
+		indextable = ttFont[self.indextable]
+		self.glyphPrograms = {}
+		for i in range(len(indextable.indices)):
+			glyphID, textLength, textOffset = indextable.indices[i]
+			if textLength == 0x8000:
+				# Ugh. Hi Beat!
+				textLength = indextable.indices[i+1][1]
+			if textLength > 0x8000:
+				pass  # XXX Hmmm.
+			text = data[textOffset:textOffset+textLength]
+			assert len(text) == textLength
+			if text:
+				self.glyphPrograms[ttFont.getGlyphName(glyphID)] = text
+		
+		self.extraPrograms = {}
+		for i in range(len(indextable.extra_indices)):
+			extraCode, textLength, textOffset = indextable.extra_indices[i]
+			if textLength == 0x8000:
+				if self.extras[extraCode] == "fpgm":	# this is the last one
+					textLength = len(data) - textOffset
+				else:
+					textLength = indextable.extra_indices[i+1][1]
+			text = data[textOffset:textOffset+textLength]
+			assert len(text) == textLength
+			if text:
+				self.extraPrograms[self.extras[extraCode]] = text
+	
+	def compile(self, ttFont):
+		if not hasattr(self, "glyphPrograms"):
+			self.glyphPrograms = {}
+			self.extraPrograms = {}
+		data = b''
+		indextable = ttFont[self.indextable]
+		glyphNames = ttFont.getGlyphOrder()
+		
+		indices = []
+		for i in range(len(glyphNames)):
+			if len(data) % 2:
+				data = data + b"\015"  # align on 2-byte boundaries, fill with return chars. Yum.
+			name = glyphNames[i]
+			if name in self.glyphPrograms:
+				text = self.glyphPrograms[name]
+			else:
+				text = b""
+			textLength = len(text)
+			if textLength >= 0x8000:
+				textLength = 0x8000  # XXX ???
+			indices.append((i, textLength, len(data)))
+			data = data + text
+		
+		extra_indices = []
+		codes = sorted(self.extras.items())
+		for i in range(len(codes)):
+			if len(data) % 2:
+				data = data + b"\015"  # align on 2-byte boundaries, fill with return chars.
+			code, name = codes[i]
+			if name in self.extraPrograms:
+				text = self.extraPrograms[name]
+			else:
+				text = b""
+			textLength = len(text)
+			if textLength >= 0x8000:
+				textLength = 0x8000  # XXX ???
+			extra_indices.append((code, textLength, len(data)))
+			data = data + text
+		indextable.set(indices, extra_indices)
+		return data
+	
+	def toXML(self, writer, ttFont):
+		names = sorted(self.glyphPrograms.keys())
+		writer.newline()
+		for name in names:
+			text = self.glyphPrograms[name]
+			if not text:
+				continue
+			writer.begintag("glyphProgram", name=name)
+			writer.newline()
+			writer.write_noindent(text.replace("\r", "\n"))
+			writer.newline()
+			writer.endtag("glyphProgram")
+			writer.newline()
+			writer.newline()
+		extra_names = sorted(self.extraPrograms.keys())
+		for name in extra_names:
+			text = self.extraPrograms[name]
+			if not text:
+				continue
+			writer.begintag("extraProgram", name=name)
+			writer.newline()
+			writer.write_noindent(text.replace("\r", "\n"))
+			writer.newline()
+			writer.endtag("extraProgram")
+			writer.newline()
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "glyphPrograms"):
+			self.glyphPrograms = {}
+			self.extraPrograms = {}
+		lines = strjoin(content).replace("\r", "\n").split("\n")
+		text = '\r'.join(lines[1:-1])
+		if name == "glyphProgram":
+			self.glyphPrograms[attrs["name"]] = text
+		elif name == "extraProgram":
+			self.extraPrograms[attrs["name"]] = text
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__2.py b/Lib/fontTools/ttLib/tables/T_S_I__2.py
new file mode 100644
index 0000000..15c02ab
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I__2.py
@@ -0,0 +1,8 @@
+from fontTools import ttLib
+
+superclass = ttLib.getTableClass("TSI0")
+
+class table_T_S_I__2(superclass):
+	
+	dependencies = ["TSI3"]
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__3.py b/Lib/fontTools/ttLib/tables/T_S_I__3.py
new file mode 100644
index 0000000..eb4087c
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I__3.py
@@ -0,0 +1,11 @@
+from fontTools import ttLib
+
+superclass = ttLib.getTableClass("TSI1")
+
+class table_T_S_I__3(superclass):
+	
+	extras = {0xfffa: "reserved0", 0xfffb: "reserved1", 0xfffc: "reserved2", 0xfffd: "reserved3"}
+	
+	indextable = "TSI2"
+
+
diff --git a/Lib/fontTools/ttLib/tables/T_S_I__5.py b/Lib/fontTools/ttLib/tables/T_S_I__5.py
new file mode 100644
index 0000000..8fa801b
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/T_S_I__5.py
@@ -0,0 +1,43 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import sys
+import array
+
+
+class table_T_S_I__5(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		numGlyphs = ttFont['maxp'].numGlyphs
+		assert len(data) == 2 * numGlyphs
+		a = array.array("H")
+		a.fromstring(data)
+		if sys.byteorder != "big":
+			a.byteswap()
+		self.glyphGrouping = {}
+		for i in range(numGlyphs):
+			self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
+	
+	def compile(self, ttFont):
+		glyphNames = ttFont.getGlyphOrder()
+		a = array.array("H")
+		for i in range(len(glyphNames)):
+			a.append(self.glyphGrouping[glyphNames[i]])
+		if sys.byteorder != "big":
+			a.byteswap()
+		return a.tostring()
+	
+	def toXML(self, writer, ttFont):
+		names = sorted(self.glyphGrouping.keys())
+		for glyphName in names:
+			writer.simpletag("glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName])
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "glyphGrouping"):
+			self.glyphGrouping = {}
+		if name != "glyphgroup":
+			return
+		self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
+
diff --git a/Lib/fontTools/ttLib/tables/V_O_R_G_.py b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
new file mode 100644
index 0000000..19f25b5
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/V_O_R_G_.py
@@ -0,0 +1,142 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import operator
+import struct
+
+
+class table_V_O_R_G_(DefaultTable.DefaultTable):
+
+	""" This table is structured so that you can treat it like a dictionary keyed by glyph name.
+	ttFont['VORG'][<glyphName>] will return the vertical origin for any glyph
+	ttFont['VORG'][<glyphName>] = <value> will set the vertical origin for any glyph.
+	"""
+
+	def decompile(self, data, ttFont):
+		self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
+		self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics = struct.unpack(">HHhH", data[:8])
+		assert (self.majorVersion <= 1), "Major version of VORG table is higher than I know how to handle"
+		data = data[8:]
+		vids = []
+		gids = []
+		pos = 0
+		for i in range(self.numVertOriginYMetrics):
+			gid, vOrigin = struct.unpack(">Hh", data[pos:pos+4])
+			pos += 4
+			gids.append(gid)
+			vids.append(vOrigin)
+
+		self.VOriginRecords = vOrig = {}
+		glyphOrder = ttFont.getGlyphOrder()
+		try:
+			names = map(operator.getitem, [glyphOrder]*self.numVertOriginYMetrics, gids)
+		except IndexError:
+			getGlyphName = self.getGlyphName
+			names = map(getGlyphName, gids )
+
+		list(map(operator.setitem, [vOrig]*self.numVertOriginYMetrics, names, vids))
+
+
+	def compile(self, ttFont):
+		vorgs = list(self.VOriginRecords.values())
+		names = list(self.VOriginRecords.keys())
+		nameMap = ttFont.getReverseGlyphMap()
+		lenRecords = len(vorgs) 
+		try:
+			gids = map(operator.getitem, [nameMap]*lenRecords, names)
+		except KeyError:
+			nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+			gids = map(operator.getitem, [nameMap]*lenRecords, names)
+		vOriginTable = list(zip(gids, vorgs))
+		self.numVertOriginYMetrics = lenRecords
+		vOriginTable.sort() # must be in ascending GID order
+		dataList = [ struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
+		header = struct.pack(">HHhH", self.majorVersion, self.minorVersion, self.defaultVertOriginY, self.numVertOriginYMetrics)
+		dataList.insert(0, header)
+		data = bytesjoin(dataList)
+		return data
+
+	def toXML(self, writer, ttFont):
+		writer.simpletag("majorVersion", value=self.majorVersion)
+		writer.newline()
+		writer.simpletag("minorVersion", value=self.minorVersion)
+		writer.newline()
+		writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
+		writer.newline()
+		writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
+		writer.newline()
+		vOriginTable = []
+		glyphNames = self.VOriginRecords.keys()
+		for glyphName in glyphNames:
+			try:
+				gid = ttFont.getGlyphID(glyphName)
+			except:
+				assert 0, "VORG table contains a glyph name not in ttFont.getGlyphNames(): " + str(glyphName)
+			vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
+		vOriginTable.sort()
+		for entry in vOriginTable:
+			vOriginRec = VOriginRecord(entry[1], entry[2])
+			vOriginRec.toXML(writer, ttFont)
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "VOriginRecords"):
+			self.VOriginRecords = {}
+		self.getGlyphName = ttFont.getGlyphName # for use in get/set item functions, for access by GID
+		if name == "VOriginRecord":
+			vOriginRec = VOriginRecord()
+			for element in content:
+				if isinstance(element, basestring):
+					continue
+				name, attrs, content = element
+				vOriginRec.fromXML(name, attrs, content, ttFont)
+			self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
+		elif "value" in attrs:
+			setattr(self, name, safeEval(attrs["value"]))
+
+
+	def __getitem__(self, glyphSelector):
+		if isinstance(glyphSelector, int):
+			# its a gid, convert to glyph name
+			glyphSelector = self.getGlyphName(glyphSelector)
+
+		if glyphSelector not in self.VOriginRecords:
+			return self.defaultVertOriginY
+			
+		return self.VOriginRecords[glyphSelector]
+
+	def __setitem__(self, glyphSelector, value):
+		if isinstance(glyphSelector, int):
+			# its a gid, convert to glyph name
+			glyphSelector = self.getGlyphName(glyphSelector)
+
+		if  value != self.defaultVertOriginY:
+			self.VOriginRecords[glyphSelector] = value
+		elif glyphSelector in self.VOriginRecords:
+			del self.VOriginRecords[glyphSelector]
+
+	def __delitem__(self, glyphSelector):
+		del self.VOriginRecords[glyphSelector]
+
+class VOriginRecord(object):
+
+	def __init__(self, name = None, vOrigin = None):
+		self.glyphName = name
+		self.vOrigin = vOrigin
+
+	def toXML(self, writer, ttFont):
+		writer.begintag("VOriginRecord")
+		writer.newline()
+		writer.simpletag("glyphName", value=self.glyphName)
+		writer.newline()
+		writer.simpletag("vOrigin", value=self.vOrigin)
+		writer.newline()
+		writer.endtag("VOriginRecord")
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		value = attrs["value"]
+		if name == "glyphName":
+			setattr(self, name, value)
+		else:
+			setattr(self, name, safeEval(value))
diff --git a/Lib/fontTools/ttLib/tables/__init__.py b/Lib/fontTools/ttLib/tables/__init__.py
new file mode 100644
index 0000000..bdf8d96
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/__init__.py
@@ -0,0 +1,56 @@
+# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
+def _moduleFinderHint():
+	"""Dummy function to let modulefinder know what tables may be
+	dynamically imported. Generated by MetaTools/buildTableList.py.
+	"""
+	from . import B_A_S_E_
+	from . import C_B_D_T_
+	from . import C_B_L_C_
+	from . import C_F_F_
+	from . import C_O_L_R_
+	from . import C_P_A_L_
+	from . import D_S_I_G_
+	from . import E_B_D_T_
+	from . import E_B_L_C_
+	from . import F_F_T_M_
+	from . import G_D_E_F_
+	from . import G_M_A_P_
+	from . import G_P_K_G_
+	from . import G_P_O_S_
+	from . import G_S_U_B_
+	from . import J_S_T_F_
+	from . import L_T_S_H_
+	from . import M_E_T_A_
+	from . import O_S_2f_2
+	from . import S_I_N_G_
+	from . import S_V_G_
+	from . import T_S_I_B_
+	from . import T_S_I_D_
+	from . import T_S_I_J_
+	from . import T_S_I_P_
+	from . import T_S_I_S_
+	from . import T_S_I_V_
+	from . import T_S_I__0
+	from . import T_S_I__1
+	from . import T_S_I__2
+	from . import T_S_I__3
+	from . import T_S_I__5
+	from . import V_O_R_G_
+	from . import _c_m_a_p
+	from . import _c_v_t
+	from . import _f_p_g_m
+	from . import _g_a_s_p
+	from . import _g_l_y_f
+	from . import _h_d_m_x
+	from . import _h_e_a_d
+	from . import _h_h_e_a
+	from . import _h_m_t_x
+	from . import _k_e_r_n
+	from . import _l_o_c_a
+	from . import _m_a_x_p
+	from . import _n_a_m_e
+	from . import _p_o_s_t
+	from . import _p_r_e_p
+	from . import _s_b_i_x
+	from . import _v_h_e_a
+	from . import _v_m_t_x
diff --git a/Lib/fontTools/ttLib/tables/_c_m_a_p.py b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
new file mode 100644
index 0000000..fbfd2ee
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_c_m_a_p.py
@@ -0,0 +1,1303 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval, readHex
+from fontTools.unicode import Unicode
+from . import DefaultTable
+import sys
+import struct
+import array
+import operator
+
+
+class table__c_m_a_p(DefaultTable.DefaultTable):
+	
+	def getcmap(self, platformID, platEncID):
+		for subtable in self.tables:
+			if (subtable.platformID == platformID and 
+					subtable.platEncID == platEncID):
+				return subtable
+		return None # not found
+	
+	def decompile(self, data, ttFont):
+		tableVersion, numSubTables = struct.unpack(">HH", data[:4])
+		self.tableVersion = int(tableVersion)
+		self.tables = tables = []
+		seenOffsets = {}
+		for i in range(numSubTables):
+			platformID, platEncID, offset = struct.unpack(
+					">HHl", data[4+i*8:4+(i+1)*8])
+			platformID, platEncID = int(platformID), int(platEncID)
+			format, length = struct.unpack(">HH", data[offset:offset+4])
+			if format in [8,10,12,13]:
+				format, reserved, length = struct.unpack(">HHL", data[offset:offset+8])
+			elif format in [14]:
+				format, length = struct.unpack(">HL", data[offset:offset+6])
+				
+			if not length:
+				print("Error: cmap subtable is reported as having zero length: platformID %s, platEncID %s,  format %s offset %s. Skipping table." % (platformID, platEncID,format, offset))
+				continue
+			if format not in cmap_classes:
+				table = cmap_format_unknown(format)
+			else:
+				table = cmap_classes[format](format)
+			table.platformID = platformID
+			table.platEncID = platEncID
+			# Note that by default we decompile only the subtable header info;
+			# any other data gets decompiled only when an attribute of the
+			# subtable is referenced.
+			table.decompileHeader(data[offset:offset+int(length)], ttFont)
+			if offset in seenOffsets:
+				table.cmap = tables[seenOffsets[offset]].cmap
+			else:
+				seenOffsets[offset] = i
+			tables.append(table)
+	
+	def compile(self, ttFont):
+		self.tables.sort()    # sort according to the spec; see CmapSubtable.__lt__()
+		numSubTables = len(self.tables)
+		totalOffset = 4 + 8 * numSubTables
+		data = struct.pack(">HH", self.tableVersion, numSubTables)
+		tableData = b""
+		seen = {}  # Some tables are the same object reference. Don't compile them twice.
+		done = {}  # Some tables are different objects, but compile to the same data chunk
+		for table in self.tables:
+			try:
+				offset = seen[id(table.cmap)]
+			except KeyError:
+				chunk = table.compile(ttFont)
+				if chunk in done:
+					offset = done[chunk]
+				else:
+					offset = seen[id(table.cmap)] = done[chunk] = totalOffset + len(tableData)
+					tableData = tableData + chunk
+			data = data + struct.pack(">HHl", table.platformID, table.platEncID, offset)
+		return data + tableData
+	
+	def toXML(self, writer, ttFont):
+		writer.simpletag("tableVersion", version=self.tableVersion)
+		writer.newline()
+		for table in self.tables:
+			table.toXML(writer, ttFont)
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "tableVersion":
+			self.tableVersion = safeEval(attrs["version"])
+			return
+		if name[:12] != "cmap_format_":
+			return
+		if not hasattr(self, "tables"):
+			self.tables = []
+		format = safeEval(name[12:])
+		if format not in cmap_classes:
+			table = cmap_format_unknown(format)
+		else:
+			table = cmap_classes[format](format)
+		table.platformID = safeEval(attrs["platformID"])
+		table.platEncID = safeEval(attrs["platEncID"])
+		table.fromXML(name, attrs, content, ttFont)
+		self.tables.append(table)
+
+
+class CmapSubtable(object):
+	
+	def __init__(self, format):
+		self.format = format
+		self.data = None
+		self.ttFont = None
+
+	def __getattr__(self, attr):
+		# allow lazy decompilation of subtables.
+		if attr[:2] == '__': # don't handle requests for member functions like '__lt__'
+			raise AttributeError(attr)
+		if self.data is None:
+			raise AttributeError(attr)
+		self.decompile(None, None) # use saved data.
+		self.data = None # Once this table has been decompiled, make sure we don't
+						# just return the original data. Also avoids recursion when
+						# called with an attribute that the cmap subtable doesn't have.
+		return getattr(self, attr)
+	
+	def decompileHeader(self, data, ttFont):
+		format, length, language = struct.unpack(">HHH", data[:6])
+		assert len(data) == length, "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
+		self.format = int(format)
+		self.length = int(length)
+		self.language = int(language)
+		self.data = data[6:]
+		self.ttFont = ttFont
+
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [
+				("platformID", self.platformID),
+				("platEncID", self.platEncID),
+				("language", self.language),
+				])
+		writer.newline()
+		codes = sorted(self.cmap.items())
+		self._writeCodes(codes, writer)
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def isUnicode(self):
+		return (self.platformID == 0 or
+			(self.platformID == 3 and self.platEncID in [1, 10]))
+
+	def isSymbol(self):
+		return self.platformID == 3 and self.platEncID == 0
+
+	def _writeCodes(self, codes, writer):
+		isUnicode = self.isUnicode()
+		for code, name in codes:
+			writer.simpletag("map", code=hex(code), name=name)
+			if isUnicode:
+				writer.comment(Unicode[code])
+			writer.newline()
+	
+	def __lt__(self, other):
+		if not isinstance(other, CmapSubtable):
+			return NotImplemented
+
+		# implemented so that list.sort() sorts according to the spec.
+		selfTuple = (
+			getattr(self, "platformID", None),
+			getattr(self, "platEncID", None),
+			getattr(self, "language", None),
+			self.__dict__)
+		otherTuple = (
+			getattr(other, "platformID", None),
+			getattr(other, "platEncID", None),
+			getattr(other, "language", None),
+			other.__dict__)
+		return selfTuple < otherTuple
+
+
+class cmap_format_0(CmapSubtable):
+	
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+		data = self.data # decompileHeader assigns the data after the header to self.data
+		assert 262 == self.length, "Format 0 cmap subtable not 262 bytes"
+		glyphIdArray = array.array("B")
+		glyphIdArray.fromstring(self.data)
+		self.cmap = cmap = {}
+		lenArray = len(glyphIdArray)
+		charCodes = list(range(lenArray))
+		names = map(self.ttFont.getGlyphName, glyphIdArray)
+		list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
+
+	
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HHH", 0, 262, self.language) + self.data
+
+		charCodeList = sorted(self.cmap.items())
+		charCodes = [entry[0] for entry in charCodeList]
+		valueList = [entry[1] for entry in charCodeList]
+		assert charCodes == list(range(256))
+		valueList = map(ttFont.getGlyphID, valueList)
+
+		glyphIdArray = array.array("B", valueList)
+		data = struct.pack(">HHH", 0, 262, self.language) + glyphIdArray.tostring()
+		assert len(data) == 262
+		return data
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.language = safeEval(attrs["language"])
+		if not hasattr(self, "cmap"):
+			self.cmap = {}
+		cmap = self.cmap
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name != "map":
+				continue
+			cmap[safeEval(attrs["code"])] = attrs["name"]
+
+
+subHeaderFormat = ">HHhH"
+class SubHeader(object):
+	def __init__(self):
+		self.firstCode = None
+		self.entryCount = None
+		self.idDelta = None
+		self.idRangeOffset = None
+		self.glyphIndexArray = []
+		
+class cmap_format_2(CmapSubtable):
+	
+	def setIDDelta(self, subHeader):
+		subHeader.idDelta = 0
+		# find the minGI which is not zero.
+		minGI = subHeader.glyphIndexArray[0]
+		for gid in subHeader.glyphIndexArray:
+			if (gid != 0) and (gid < minGI):
+				minGI = gid
+		# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
+		# idDelta is a short, and must be between -32K and 32K. minGI can be between 1 and 64K.
+		# We would like to pick an idDelta such that the first glyphArray GID is 1, 
+		# so that we are more likely to be able to combine glypharray GID subranges.
+		# This means that we have a problem when minGI is > 32K
+		# Since the final gi is reconstructed from the glyphArray GID by:
+		#    (short)finalGID = (gid +  idDelta) % 0x10000),
+		# we can get from a glypharray GID of 1 to a final GID of 65K by subtracting 2, and casting the
+		# negative number to an unsigned short. 
+
+		if  (minGI > 1):
+			if  minGI > 0x7FFF:
+				subHeader.idDelta = -(0x10000 - minGI) -1
+			else:
+				subHeader.idDelta =  minGI -1
+			idDelta = subHeader.idDelta
+			for i in range(subHeader.entryCount):
+				gid = subHeader.glyphIndexArray[i]
+				if gid > 0: 
+					subHeader.glyphIndexArray[i] = gid - idDelta 
+
+
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+
+		data = self.data # decompileHeader assigns the data after the header to self.data
+		subHeaderKeys = []
+		maxSubHeaderindex = 0
+		# get the key array, and determine the number of subHeaders.
+		allKeys = array.array("H")
+		allKeys.fromstring(data[:512])
+		data = data[512:]
+		if sys.byteorder != "big":
+			allKeys.byteswap()
+		subHeaderKeys = [ key//8 for key in allKeys]
+		maxSubHeaderindex = max(subHeaderKeys)
+	
+		#Load subHeaders
+		subHeaderList = []
+		pos = 0
+		for i in range(maxSubHeaderindex + 1):
+			subHeader = SubHeader()
+			(subHeader.firstCode, subHeader.entryCount, subHeader.idDelta, \
+				subHeader.idRangeOffset) = struct.unpack(subHeaderFormat, data[pos:pos + 8])
+			pos += 8
+			giDataPos = pos + subHeader.idRangeOffset-2
+			giList = array.array("H")
+			giList.fromstring(data[giDataPos:giDataPos + subHeader.entryCount*2])
+			if sys.byteorder != "big":
+				giList.byteswap()
+			subHeader.glyphIndexArray = giList
+			subHeaderList.append(subHeader)
+		# How this gets processed. 
+		# Charcodes may be one or two bytes.
+		# The first byte of a charcode is mapped through the  subHeaderKeys, to select
+		# a subHeader. For any subheader but 0, the next byte is then mapped through the
+		# selected subheader. If subheader Index 0 is selected, then the byte itself is 
+		# mapped through the subheader, and there is no second byte.
+		# Then assume that the subsequent byte is the first byte of the next charcode,and repeat.
+		# 
+		# Each subheader references a range in the glyphIndexArray whose length is entryCount.
+		# The range in glyphIndexArray referenced by a sunheader may overlap with the range in glyphIndexArray
+		# referenced by another subheader.
+		# The only subheader that will be referenced by more than one first-byte value is the subheader
+		# that maps the entire range of glyphID values to glyphIndex 0, e.g notdef:
+		#	 {firstChar 0, EntryCount 0,idDelta 0,idRangeOffset xx}
+		# A byte being mapped though a subheader is treated as in index into a mapping of array index to font glyphIndex.
+		# A subheader specifies a subrange within (0...256) by the
+		# firstChar and EntryCount values. If the byte value is outside the subrange, then the glyphIndex is zero
+		# (e.g. glyph not in font).
+		# If the byte index is in the subrange, then an offset index is calculated as (byteIndex - firstChar).
+		# The index to glyphIndex mapping is a subrange of the glyphIndexArray. You find the start of the subrange by 
+		# counting idRangeOffset bytes from the idRangeOffset word. The first value in this subrange is the
+		# glyphIndex for the index firstChar. The offset index should then be used in this array to get the glyphIndex.
+		# Example for Logocut-Medium
+		# first byte of charcode = 129; selects subheader 1.
+		# subheader 1 = {firstChar 64, EntryCount 108,idDelta 42,idRangeOffset 0252}
+		# second byte of charCode = 66
+		# the index offset = 66-64 = 2.
+		# The subrange of the glyphIndexArray starting at 0x0252 bytes from the idRangeOffset word is:
+		# [glyphIndexArray index], [subrange array index] = glyphIndex
+		# [256], [0]=1 	from charcode [129, 64]
+		# [257], [1]=2  	from charcode [129, 65]
+		# [258], [2]=3  	from charcode [129, 66]
+		# [259], [3]=4  	from charcode [129, 67]
+		# So, the glyphIndex = 3 from the array. Then if idDelta is not zero and the glyph ID is not zero, 
+		# add it to the glyphID to get the final glyphIndex
+		# value. In this case the final glyph index = 3+ 42 -> 45 for the final glyphIndex. Whew!
+		
+		self.data = b""
+		self.cmap = cmap = {}
+		notdefGI = 0
+		for firstByte in range(256):
+			subHeadindex = subHeaderKeys[firstByte]
+			subHeader = subHeaderList[subHeadindex]
+			if subHeadindex == 0:
+				if (firstByte < subHeader.firstCode) or (firstByte >= subHeader.firstCode + subHeader.entryCount):
+					continue # gi is notdef.
+				else:
+					charCode = firstByte
+					offsetIndex = firstByte - subHeader.firstCode
+					gi = subHeader.glyphIndexArray[offsetIndex]
+					if gi != 0:
+						gi = (gi + subHeader.idDelta) % 0x10000
+					else:
+						continue # gi is notdef.
+				cmap[charCode] = gi
+			else:
+				if subHeader.entryCount:
+					charCodeOffset = firstByte * 256 + subHeader.firstCode
+					for offsetIndex in range(subHeader.entryCount):
+						charCode = charCodeOffset + offsetIndex
+						gi = subHeader.glyphIndexArray[offsetIndex]
+						if gi != 0:
+							gi = (gi + subHeader.idDelta) % 0x10000
+						else:
+							continue
+						cmap[charCode] = gi
+				# If not subHeader.entryCount, then all char codes with this first byte are
+				# mapped to .notdef. We can skip this subtable, and leave the glyphs un-encoded, which is the 
+				# same as mapping it to .notdef.
+		# cmap values are GID's.
+		glyphOrder = self.ttFont.getGlyphOrder()
+		gids = list(cmap.values())
+		charCodes = list(cmap.keys())
+		lenCmap = len(gids)
+		try:
+			names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
+		except IndexError:
+			getGlyphName = self.ttFont.getGlyphName
+			names = list(map(getGlyphName, gids ))
+		list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
+	
+		
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HHH", self.format, self.length, self.language) + self.data
+		kEmptyTwoCharCodeRange = -1
+		notdefGI = 0
+
+		items = sorted(self.cmap.items())
+		charCodes = [item[0] for item in items]
+		names = [item[1] for item in items]
+		nameMap = ttFont.getReverseGlyphMap()
+		lenCharCodes = len(charCodes) 
+		try:
+			gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+		except KeyError:
+			nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+			try:
+				gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+			except KeyError:
+				# allow virtual GIDs in format 2 tables
+				gids = []
+				for name in names:
+					try:
+						gid = nameMap[name]
+					except KeyError:
+						try:
+							if (name[:3] == 'gid'):
+								gid = eval(name[3:])
+							else:
+								gid = ttFont.getGlyphID(name)
+						except:
+							raise KeyError(name)
+
+					gids.append(gid)
+
+		# Process the (char code to gid) item list  in char code order.
+		# By definition, all one byte char codes map to subheader 0. 
+		# For all the two byte char codes, we assume that the first byte maps maps to the empty subhead (with an entry count of 0, 
+		# which defines all char codes in its range to map to notdef) unless proven otherwise.
+		# Note that since the char code items are processed in char code order, all the char codes with the
+		# same first byte are in sequential order.
+
+		subHeaderKeys = [ kEmptyTwoCharCodeRange for x in  range(256)] # list of indices into subHeaderList.
+		subHeaderList = []
+
+		# We force this subheader entry 0  to exist in the subHeaderList in the case where some one comes up
+		# with a cmap where all the one byte char codes map to notdef,
+		# with the result that the subhead 0 would not get created just by processing the item list.
+		charCode = charCodes[0]
+		if charCode > 255:
+			subHeader = SubHeader()
+			subHeader.firstCode = 0
+			subHeader.entryCount = 0
+			subHeader.idDelta = 0
+			subHeader.idRangeOffset = 0
+			subHeaderList.append(subHeader)
+			
+		
+		lastFirstByte = -1
+		items = zip(charCodes, gids)
+		for charCode, gid in items:
+			if gid == 0:
+				continue
+			firstbyte = charCode >> 8
+			secondByte = charCode & 0x00FF
+
+			if firstbyte != lastFirstByte: # Need to update the current subhead, and start a new one.
+				if lastFirstByte > -1:
+					# fix GI's and iDelta of current subheader.
+					self.setIDDelta(subHeader)
+
+					# If it was sunheader 0 for one-byte charCodes, then we need to set the subHeaderKeys value to zero
+					# for the indices matching the char codes.
+					if lastFirstByte == 0:
+						for index in range(subHeader.entryCount):
+							charCode = subHeader.firstCode + index
+							subHeaderKeys[charCode] = 0
+
+					assert (subHeader.entryCount == len(subHeader.glyphIndexArray)), "Error - subhead entry count does not match len of glyphID subrange."
+				# init new subheader
+				subHeader = SubHeader()
+				subHeader.firstCode = secondByte
+				subHeader.entryCount = 1
+				subHeader.glyphIndexArray.append(gid)
+				subHeaderList.append(subHeader)
+				subHeaderKeys[firstbyte] = len(subHeaderList) -1
+				lastFirstByte = firstbyte
+			else:
+				# need to fill in with notdefs all the code points between the last charCode and the current charCode.
+				codeDiff = secondByte - (subHeader.firstCode + subHeader.entryCount)
+				for i in range(codeDiff):
+					subHeader.glyphIndexArray.append(notdefGI)
+				subHeader.glyphIndexArray.append(gid)
+				subHeader.entryCount = subHeader.entryCount + codeDiff + 1
+					
+		# fix GI's and iDelta of last subheader that we we added to the subheader array.
+		self.setIDDelta(subHeader)
+
+		# Now we add a final subheader for the subHeaderKeys which maps to empty two byte charcode ranges.
+		subHeader = SubHeader()
+		subHeader.firstCode = 0
+		subHeader.entryCount = 0
+		subHeader.idDelta = 0
+		subHeader.idRangeOffset = 2
+		subHeaderList.append(subHeader)
+		emptySubheadIndex = len(subHeaderList) - 1
+		for index in range(256):
+			if subHeaderKeys[index] == kEmptyTwoCharCodeRange:
+				subHeaderKeys[index] = emptySubheadIndex
+		# Since this is the last subheader, the GlyphIndex Array starts two bytes after the start of the
+		# idRangeOffset word of this subHeader. We can safely point to the first entry in the GlyphIndexArray,
+		# since the first subrange of the GlyphIndexArray is for subHeader 0, which always starts with 
+		# charcode 0 and GID 0.
+		
+		idRangeOffset = (len(subHeaderList)-1)*8  + 2 # offset to beginning of glyphIDArray from first subheader idRangeOffset.
+		subheadRangeLen = len(subHeaderList) -1 # skip last special empty-set subheader; we've already hardocodes its idRangeOffset to 2.
+		for index in range(subheadRangeLen): 
+			subHeader = subHeaderList[index]
+			subHeader.idRangeOffset = 0
+			for j  in range(index):
+				prevSubhead = subHeaderList[j]
+				if prevSubhead.glyphIndexArray == subHeader.glyphIndexArray: # use the glyphIndexArray subarray
+					subHeader.idRangeOffset = prevSubhead.idRangeOffset - (index-j)*8
+					subHeader.glyphIndexArray = []
+					break
+			if subHeader.idRangeOffset == 0: # didn't find one. 
+				subHeader.idRangeOffset = idRangeOffset
+				idRangeOffset = (idRangeOffset - 8) + subHeader.entryCount*2 # one less subheader, one more subArray.
+			else:
+				idRangeOffset = idRangeOffset - 8  # one less subheader
+
+		# Now we can write out the data!
+		length = 6 + 512 + 8*len(subHeaderList) # header, 256 subHeaderKeys, and subheader array.
+		for subhead in 	subHeaderList[:-1]:
+			length = length + len(subhead.glyphIndexArray)*2  # We can't use subhead.entryCount, as some of the subhead may share subArrays.
+		dataList = [struct.pack(">HHH", 2, length, self.language)]
+		for index in subHeaderKeys:
+			dataList.append(struct.pack(">H", index*8))
+		for subhead in 	subHeaderList:
+			dataList.append(struct.pack(subHeaderFormat, subhead.firstCode, subhead.entryCount, subhead.idDelta, subhead.idRangeOffset))
+		for subhead in 	subHeaderList[:-1]:
+			for gi in subhead.glyphIndexArray:
+				dataList.append(struct.pack(">H", gi))
+		data = bytesjoin(dataList)
+		assert (len(data) == length), "Error: cmap format 2 is not same length as calculated! actual: " + str(len(data))+ " calc : " + str(length)
+		return data
+
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.language = safeEval(attrs["language"])
+		if not hasattr(self, "cmap"):
+			self.cmap = {}
+		cmap = self.cmap
+
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name != "map":
+				continue
+			cmap[safeEval(attrs["code"])] = attrs["name"]
+
+
+cmap_format_4_format = ">7H"
+
+#uint16  endCode[segCount]          # Ending character code for each segment, last = 0xFFFF.
+#uint16  reservedPad                # This value should be zero
+#uint16  startCode[segCount]        # Starting character code for each segment
+#uint16  idDelta[segCount]          # Delta for all character codes in segment
+#uint16  idRangeOffset[segCount]    # Offset in bytes to glyph indexArray, or 0
+#uint16  glyphIndexArray[variable]  # Glyph index array
+
+def splitRange(startCode, endCode, cmap):
+	# Try to split a range of character codes into subranges with consecutive
+	# glyph IDs in such a way that the cmap4 subtable can be stored "most"
+	# efficiently. I can't prove I've got the optimal solution, but it seems
+	# to do well with the fonts I tested: none became bigger, many became smaller.
+	if startCode == endCode:
+		return [], [endCode]
+	
+	lastID = cmap[startCode]
+	lastCode = startCode
+	inOrder = None
+	orderedBegin = None
+	subRanges = []
+	
+	# Gather subranges in which the glyph IDs are consecutive.
+	for code in range(startCode + 1, endCode + 1):
+		glyphID = cmap[code]
+		
+		if glyphID - 1 == lastID:
+			if inOrder is None or not inOrder:
+				inOrder = 1
+				orderedBegin = lastCode
+		else:
+			if inOrder:
+				inOrder = 0
+				subRanges.append((orderedBegin, lastCode))
+				orderedBegin = None
+				
+		lastID = glyphID
+		lastCode = code
+	
+	if inOrder:
+		subRanges.append((orderedBegin, lastCode))
+	assert lastCode == endCode
+	
+	# Now filter out those new subranges that would only make the data bigger.
+	# A new segment cost 8 bytes, not using a new segment costs 2 bytes per
+	# character.
+	newRanges = []
+	for b, e in subRanges:
+		if b == startCode and e == endCode:
+			break  # the whole range, we're fine
+		if b == startCode or e == endCode:
+			threshold = 4  # split costs one more segment
+		else:
+			threshold = 8  # split costs two more segments
+		if (e - b + 1) > threshold:
+			newRanges.append((b, e))
+	subRanges = newRanges
+	
+	if not subRanges:
+		return [], [endCode]
+	
+	if subRanges[0][0] != startCode:
+		subRanges.insert(0, (startCode, subRanges[0][0] - 1))
+	if subRanges[-1][1] != endCode:
+		subRanges.append((subRanges[-1][1] + 1, endCode))
+	
+	# Fill the "holes" in the segments list -- those are the segments in which
+	# the glyph IDs are _not_ consecutive.
+	i = 1
+	while i < len(subRanges):
+		if subRanges[i-1][1] + 1 != subRanges[i][0]:
+			subRanges.insert(i, (subRanges[i-1][1] + 1, subRanges[i][0] - 1))
+			i = i + 1
+		i = i + 1
+	
+	# Transform the ranges into startCode/endCode lists.
+	start = []
+	end = []
+	for b, e in subRanges:
+		start.append(b)
+		end.append(e)
+	start.pop(0)
+	
+	assert len(start) + 1 == len(end)
+	return start, end
+
+
+class cmap_format_4(CmapSubtable):
+	
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(self.data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+
+		data = self.data # decompileHeader assigns the data after the header to self.data
+		(segCountX2, searchRange, entrySelector, rangeShift) = \
+					struct.unpack(">4H", data[:8])
+		data = data[8:]
+		segCount = segCountX2 // 2
+		
+		allCodes = array.array("H")
+		allCodes.fromstring(data)
+		self.data = data = None
+
+		if sys.byteorder != "big":
+			allCodes.byteswap()
+		
+		# divide the data
+		endCode = allCodes[:segCount]
+		allCodes = allCodes[segCount+1:]  # the +1 is skipping the reservedPad field
+		startCode = allCodes[:segCount]
+		allCodes = allCodes[segCount:]
+		idDelta = allCodes[:segCount]
+		allCodes = allCodes[segCount:]
+		idRangeOffset = allCodes[:segCount]
+		glyphIndexArray = allCodes[segCount:]
+		lenGIArray = len(glyphIndexArray)
+
+		# build 2-byte character mapping
+		charCodes = []
+		gids = []
+		for i in range(len(startCode) - 1):	# don't do 0xffff!
+			start = startCode[i]
+			delta = idDelta[i]
+			rangeOffset = idRangeOffset[i]
+			# *someone* needs to get killed.
+			partial = rangeOffset // 2 - start + i - len(idRangeOffset)
+
+			rangeCharCodes = list(range(startCode[i], endCode[i] + 1))
+			charCodes.extend(rangeCharCodes)
+			if rangeOffset == 0:
+				gids.extend([(charCode + delta) & 0xFFFF for charCode in rangeCharCodes])
+			else:
+				for charCode in rangeCharCodes:
+					index = charCode + partial
+					assert (index < lenGIArray), "In format 4 cmap, range (%d), the calculated index (%d) into the glyph index array  is not less than the length of the array (%d) !" % (i, index, lenGIArray)
+					if glyphIndexArray[index] != 0:  # if not missing glyph
+						glyphID = glyphIndexArray[index] + delta
+					else:
+						glyphID = 0  # missing glyph
+					gids.append(glyphID & 0xFFFF)
+
+		self.cmap = cmap = {}
+		lenCmap = len(gids)
+		glyphOrder = self.ttFont.getGlyphOrder()
+		try:
+			names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
+		except IndexError:
+			getGlyphName = self.ttFont.getGlyphName
+			names = list(map(getGlyphName, gids ))
+		list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
+		
+
+
+	def setIDDelta(self, idDelta):
+		# The lowest gid in glyphIndexArray, after subtracting idDelta, must be 1.
+		# idDelta is a short, and must be between -32K and 32K
+		# startCode can be between 0 and 64K-1, and the first glyph index can be between 1 and 64K-1
+		# This means that we have a problem because we can need to assign to idDelta values
+		# between -(64K-2) and 64K -1.
+		# Since the final gi is reconstructed from the glyphArray GID by:
+		#    (short)finalGID = (gid +  idDelta) % 0x10000),
+		# we can get from a startCode of 0 to a final GID of 64 -1K by subtracting 1, and casting the
+		# negative number to an unsigned short.
+		# Similarly , we can get from a startCode of 64K-1 to a final GID of 1 by adding 2, because of
+		# the modulo arithmetic.
+
+		if idDelta > 0x7FFF:
+			idDelta = idDelta - 0x10000
+		elif idDelta <  -0x7FFF:
+			idDelta = idDelta + 0x10000
+
+		return idDelta
+
+
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HHH", self.format, self.length, self.language) + self.data
+
+		from fontTools.ttLib.sfnt import maxPowerOfTwo
+		
+		charCodes = list(self.cmap.keys())
+		lenCharCodes = len(charCodes)
+		if lenCharCodes == 0:
+			startCode = [0xffff]
+			endCode = [0xffff]
+		else:
+			charCodes.sort()
+			names = list(map(operator.getitem, [self.cmap]*lenCharCodes, charCodes))
+			nameMap = ttFont.getReverseGlyphMap()
+			try:
+				gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+			except KeyError:
+				nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+				try:
+					gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+				except KeyError:
+					# allow virtual GIDs in format 4 tables
+					gids = []
+					for name in names:
+						try:
+							gid = nameMap[name]
+						except KeyError:
+							try:
+								if (name[:3] == 'gid'):
+									gid = eval(name[3:])
+								else:
+									gid = ttFont.getGlyphID(name)
+							except:
+								raise KeyError(name)
+	
+						gids.append(gid)
+			cmap = {}  # code:glyphID mapping
+			list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
+		
+			# Build startCode and endCode lists.
+			# Split the char codes in ranges of consecutive char codes, then split
+			# each range in more ranges of consecutive/not consecutive glyph IDs.
+			# See splitRange().
+			lastCode = charCodes[0]
+			endCode = []
+			startCode = [lastCode]
+			for charCode in charCodes[1:]:  # skip the first code, it's the first start code
+				if charCode == lastCode + 1:
+					lastCode = charCode
+					continue
+				start, end = splitRange(startCode[-1], lastCode, cmap)
+				startCode.extend(start)
+				endCode.extend(end)
+				startCode.append(charCode)
+				lastCode = charCode
+			endCode.append(lastCode)
+			startCode.append(0xffff)
+			endCode.append(0xffff)
+		
+		# build up rest of cruft
+		idDelta = []
+		idRangeOffset = []
+		glyphIndexArray = []
+		for i in range(len(endCode)-1):  # skip the closing codes (0xffff)
+			indices = []
+			for charCode in range(startCode[i], endCode[i] + 1):
+				indices.append(cmap[charCode])
+			if  (indices == list(range(indices[0], indices[0] + len(indices)))):
+				idDeltaTemp = self.setIDDelta(indices[0] - startCode[i])
+				idDelta.append( idDeltaTemp)
+				idRangeOffset.append(0)
+			else:
+				# someone *definitely* needs to get killed.
+				idDelta.append(0)
+				idRangeOffset.append(2 * (len(endCode) + len(glyphIndexArray) - i))
+				glyphIndexArray.extend(indices)
+		idDelta.append(1)  # 0xffff + 1 == (tadaa!) 0. So this end code maps to .notdef
+		idRangeOffset.append(0)
+		
+		# Insane. 
+		segCount = len(endCode)
+		segCountX2 = segCount * 2
+		maxExponent = maxPowerOfTwo(segCount)
+		searchRange = 2 * (2 ** maxExponent)
+		entrySelector = maxExponent
+		rangeShift = 2 * segCount - searchRange
+		
+		charCodeArray = array.array("H", endCode + [0] + startCode)
+		idDeltaeArray = array.array("h", idDelta)
+		restArray = array.array("H", idRangeOffset + glyphIndexArray)
+		if sys.byteorder != "big":
+			charCodeArray.byteswap()
+			idDeltaeArray.byteswap()
+			restArray.byteswap()
+		data = charCodeArray.tostring() + idDeltaeArray.tostring() + restArray.tostring()
+
+		length = struct.calcsize(cmap_format_4_format) + len(data)
+		header = struct.pack(cmap_format_4_format, self.format, length, self.language, 
+				segCountX2, searchRange, entrySelector, rangeShift)
+		return header + data
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.language = safeEval(attrs["language"])
+		if not hasattr(self, "cmap"):
+			self.cmap = {}
+		cmap = self.cmap
+
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			nameMap, attrsMap, dummyContent = element
+			if nameMap != "map":
+				assert 0, "Unrecognized keyword in cmap subtable"
+			cmap[safeEval(attrsMap["code"])] = attrsMap["name"]
+
+
+class cmap_format_6(CmapSubtable):
+	
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+
+		data = self.data # decompileHeader assigns the data after the header to self.data
+		firstCode, entryCount = struct.unpack(">HH", data[:4])
+		firstCode = int(firstCode)
+		data = data[4:]
+		#assert len(data) == 2 * entryCount  # XXX not true in Apple's Helvetica!!!
+		glyphIndexArray = array.array("H")
+		glyphIndexArray.fromstring(data[:2 * int(entryCount)])
+		if sys.byteorder != "big":
+			glyphIndexArray.byteswap()
+		self.data = data = None
+
+		self.cmap = cmap = {}
+
+		lenArray = len(glyphIndexArray)
+		charCodes = list(range(firstCode, firstCode + lenArray))
+		glyphOrder = self.ttFont.getGlyphOrder()
+		try:
+			names = list(map(operator.getitem, [glyphOrder]*lenArray, glyphIndexArray ))
+		except IndexError:
+			getGlyphName = self.ttFont.getGlyphName
+			names = list(map(getGlyphName, glyphIndexArray ))
+		list(map(operator.setitem, [cmap]*lenArray, charCodes, names))
+	
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HHH", self.format, self.length, self.language) + self.data
+		cmap = self.cmap
+		codes = list(cmap.keys())
+		if codes: # yes, there are empty cmap tables.
+			codes = list(range(codes[0], codes[-1] + 1))
+			firstCode = codes[0]
+			valueList = [cmap.get(code, ".notdef") for code in codes]
+			valueList = map(ttFont.getGlyphID, valueList)
+			glyphIndexArray = array.array("H", valueList)
+			if sys.byteorder != "big":
+				glyphIndexArray.byteswap()
+			data = glyphIndexArray.tostring()
+		else:
+			data = b""
+			firstCode = 0
+		header = struct.pack(">HHHHH", 
+				6, len(data) + 10, self.language, firstCode, len(codes))
+		return header + data
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.language = safeEval(attrs["language"])
+		if not hasattr(self, "cmap"):
+			self.cmap = {}
+		cmap = self.cmap
+
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name != "map":
+				continue
+			cmap[safeEval(attrs["code"])] = attrs["name"]
+
+
+class cmap_format_12_or_13(CmapSubtable):
+	
+	def __init__(self, format):
+		self.format = format
+		self.reserved = 0
+		self.data = None
+		self.ttFont = None
+
+	def decompileHeader(self, data, ttFont):
+		format, reserved, length, language, nGroups = struct.unpack(">HHLLL", data[:16])
+		assert len(data) == (16 + nGroups*12) == (length), "corrupt cmap table format %d (data length: %d, header length: %d)" % (format, len(data), length)
+		self.format = format
+		self.reserved = reserved
+		self.length = length
+		self.language = language
+		self.nGroups = nGroups
+		self.data = data[16:]
+		self.ttFont = ttFont
+
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+
+		data = self.data # decompileHeader assigns the data after the header to self.data
+		charCodes = []
+		gids = []
+		pos = 0
+		for i in range(self.nGroups):
+			startCharCode, endCharCode, glyphID = struct.unpack(">LLL",data[pos:pos+12] )
+			pos += 12
+			lenGroup = 1 + endCharCode - startCharCode
+			charCodes.extend(list(range(startCharCode, endCharCode +1)))
+			gids.extend(self._computeGIDs(glyphID, lenGroup))
+		self.data = data = None
+		self.cmap = cmap = {}
+		lenCmap = len(gids)
+		glyphOrder = self.ttFont.getGlyphOrder()
+		try:
+			names = list(map(operator.getitem, [glyphOrder]*lenCmap, gids ))
+		except IndexError:
+			getGlyphName = self.ttFont.getGlyphName
+			names = list(map(getGlyphName, gids ))
+		list(map(operator.setitem, [cmap]*lenCmap, charCodes, names))
+	
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HHLLL", self.format, self.reserved, self.length, self.language, self.nGroups) + self.data
+		charCodes = list(self.cmap.keys())
+		lenCharCodes = len(charCodes) 
+		names = list(self.cmap.values())
+		nameMap = ttFont.getReverseGlyphMap()
+		try:
+			gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+		except KeyError:
+			nameMap = ttFont.getReverseGlyphMap(rebuild=True)
+			try:
+				gids = list(map(operator.getitem, [nameMap]*lenCharCodes, names))
+			except KeyError:
+				# allow virtual GIDs in format 12 tables
+				gids = []
+				for name in names:
+					try:
+						gid = nameMap[name]
+					except KeyError:
+						try:
+							if (name[:3] == 'gid'):
+								gid = eval(name[3:])
+							else:
+								gid = ttFont.getGlyphID(name)
+						except:
+							raise KeyError(name)
+
+					gids.append(gid)
+		
+		cmap = {}  # code:glyphID mapping
+		list(map(operator.setitem, [cmap]*len(charCodes), charCodes, gids))
+
+		charCodes.sort()
+		index = 0
+		startCharCode = charCodes[0]
+		startGlyphID = cmap[startCharCode]
+		lastGlyphID = startGlyphID - self._format_step
+		lastCharCode = startCharCode - 1
+		nGroups = 0
+		dataList =  []
+		maxIndex = len(charCodes)
+		for index in range(maxIndex):
+			charCode = charCodes[index]
+			glyphID = cmap[charCode]
+			if not self._IsInSameRun(glyphID, lastGlyphID, charCode, lastCharCode):
+				dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
+				startCharCode = charCode
+				startGlyphID = glyphID
+				nGroups = nGroups + 1
+			lastGlyphID = glyphID
+			lastCharCode = charCode
+		dataList.append(struct.pack(">LLL", startCharCode, lastCharCode, startGlyphID))
+		nGroups = nGroups + 1
+		data = bytesjoin(dataList)
+		lengthSubtable = len(data) +16
+		assert len(data) == (nGroups*12) == (lengthSubtable-16) 
+		return struct.pack(">HHLLL", self.format, self.reserved , lengthSubtable, self.language, nGroups) + data
+	
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [
+				("platformID", self.platformID),
+				("platEncID", self.platEncID),
+				("format", self.format),
+				("reserved", self.reserved),
+				("length", self.length),
+				("language", self.language),
+				("nGroups", self.nGroups),
+				])
+		writer.newline()
+		codes = sorted(self.cmap.items())
+		self._writeCodes(codes, writer)
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.format = safeEval(attrs["format"])
+		self.reserved = safeEval(attrs["reserved"])
+		self.length = safeEval(attrs["length"])
+		self.language = safeEval(attrs["language"])
+		self.nGroups = safeEval(attrs["nGroups"])
+		if not hasattr(self, "cmap"):
+			self.cmap = {}
+		cmap = self.cmap
+
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name != "map":
+				continue
+			cmap[safeEval(attrs["code"])] = attrs["name"]
+
+
+class cmap_format_12(cmap_format_12_or_13):
+	def __init__(self, format):
+		cmap_format_12_or_13.__init__(self, format)
+		self._format_step = 1
+
+	def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+		return list(range(startingGlyph, startingGlyph + numberOfGlyphs))
+
+	def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+		return (glyphID == 1 + lastGlyphID) and (charCode == 1 + lastCharCode)
+
+
+class cmap_format_13(cmap_format_12_or_13):
+	def __init__(self, format):
+		cmap_format_12_or_13.__init__(self, format)
+		self._format_step = 0
+
+	def _computeGIDs(self, startingGlyph, numberOfGlyphs):
+		return [startingGlyph] * numberOfGlyphs
+
+	def _IsInSameRun(self, glyphID, lastGlyphID, charCode, lastCharCode):
+		return (glyphID == lastGlyphID) and (charCode == 1 + lastCharCode)
+
+
+def  cvtToUVS(threeByteString):
+	data = b"\0" + threeByteString
+	val, = struct.unpack(">L", data)
+	return val
+
+def  cvtFromUVS(val):
+	assert 0 <= val < 0x1000000
+	fourByteString = struct.pack(">L", val)
+	return fourByteString[1:]
+
+
+class cmap_format_14(CmapSubtable):
+
+	def decompileHeader(self, data, ttFont):
+		format, length, numVarSelectorRecords = struct.unpack(">HLL", data[:10])
+		self.data = data[10:]
+		self.length = length
+		self.numVarSelectorRecords = numVarSelectorRecords
+		self.ttFont = ttFont
+		self.language = 0xFF # has no language.
+
+	def decompile(self, data, ttFont):
+		if data is not None and ttFont is not None and ttFont.lazy:
+			self.decompileHeader(data, ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+		data = self.data
+		
+		self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
+		uvsDict = {}
+		recOffset = 0
+		for n in range(self.numVarSelectorRecords):
+			uvs, defOVSOffset, nonDefUVSOffset =  struct.unpack(">3sLL", data[recOffset:recOffset +11])		
+			recOffset += 11
+			varUVS = cvtToUVS(uvs)
+			if defOVSOffset:
+				startOffset = defOVSOffset  - 10
+				numValues, = struct.unpack(">L", data[startOffset:startOffset+4])
+				startOffset +=4
+				for r in range(numValues):
+					uv, addtlCnt = struct.unpack(">3sB", data[startOffset:startOffset+4])
+					startOffset += 4
+					firstBaseUV = cvtToUVS(uv)
+					cnt = addtlCnt+1
+					baseUVList = list(range(firstBaseUV, firstBaseUV+cnt))
+					glyphList = [None]*cnt
+					localUVList = zip(baseUVList, glyphList)
+					try:
+						uvsDict[varUVS].extend(localUVList)
+					except KeyError:
+						uvsDict[varUVS] = list(localUVList)
+				
+			if nonDefUVSOffset:
+				startOffset = nonDefUVSOffset  - 10
+				numRecs, = struct.unpack(">L", data[startOffset:startOffset+4])
+				startOffset +=4
+				localUVList = []
+				for r in range(numRecs):
+					uv, gid = struct.unpack(">3sH", data[startOffset:startOffset+5])
+					startOffset += 5
+					uv = cvtToUVS(uv)
+					glyphName = self.ttFont.getGlyphName(gid)
+					localUVList.append( [uv, glyphName] )
+				try:
+					uvsDict[varUVS].extend(localUVList)
+				except KeyError:
+					uvsDict[varUVS] = localUVList
+					
+		self.uvsDict = uvsDict
+							
+	def toXML(self, writer, ttFont):
+		writer.begintag(self.__class__.__name__, [
+				("platformID", self.platformID),
+				("platEncID", self.platEncID),
+				("format", self.format),
+				("length", self.length),
+				("numVarSelectorRecords", self.numVarSelectorRecords),
+				])
+		writer.newline()
+		uvsDict = self.uvsDict
+		uvsList = sorted(uvsDict.keys())
+		for uvs in uvsList:
+			uvList = uvsDict[uvs]
+			uvList.sort(key=lambda item: (item[1] is not None, item[0], item[1]))
+			for uv, gname in uvList:
+				if gname is None:
+					gname = "None"
+				# I use the arg rather than th keyword syntax in order to preserve the attribute order.
+				writer.simpletag("map", [ ("uvs",hex(uvs)), ("uv",hex(uv)), ("name", gname)]  )
+				writer.newline()
+		writer.endtag(self.__class__.__name__)
+		writer.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		self.format = safeEval(attrs["format"])
+		self.length = safeEval(attrs["length"])
+		self.numVarSelectorRecords = safeEval(attrs["numVarSelectorRecords"])
+		self.language = 0xFF # provide a value so that  CmapSubtable.__lt__() won't fail
+		if not hasattr(self, "cmap"):
+			self.cmap = {} # so that clients that expect this to exist in a cmap table won't fail.
+		if not hasattr(self, "uvsDict"):
+			self.uvsDict  = {}
+			uvsDict = self.uvsDict 
+
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			if name != "map":
+				continue
+			uvs = safeEval(attrs["uvs"])
+			uv = safeEval(attrs["uv"])
+			gname = attrs["name"]
+			if gname == "None":
+				gname = None
+			try:
+				uvsDict[uvs].append( [uv, gname])
+			except KeyError:
+				uvsDict[uvs] = [ [uv, gname] ]
+			
+
+	def compile(self, ttFont):
+		if self.data:
+			return struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords) + self.data
+
+		uvsDict = self.uvsDict
+		uvsList = sorted(uvsDict.keys())
+		self.numVarSelectorRecords = len(uvsList)
+		offset = 10 + self.numVarSelectorRecords*11 # current value is end of VarSelectorRecords block.
+		data = []
+		varSelectorRecords =[]
+		for uvs in uvsList:
+			entryList = uvsDict[uvs]
+
+			defList = [entry for entry in entryList if entry[1] is None]
+			if defList:
+				defList = [entry[0] for entry in defList]
+				defOVSOffset = offset
+				defList.sort()
+
+				lastUV = defList[0]
+				cnt = -1
+				defRecs = []
+				for defEntry in defList:
+					cnt +=1
+					if (lastUV+cnt) != defEntry:
+						rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt-1)
+						lastUV = defEntry
+						defRecs.append(rec)
+						cnt = 0
+					
+				rec = struct.pack(">3sB", cvtFromUVS(lastUV), cnt)
+				defRecs.append(rec)
+
+				numDefRecs = len(defRecs)
+				data.append(struct.pack(">L", numDefRecs))
+				data.extend(defRecs)
+				offset += 4 + numDefRecs*4
+			else:
+				defOVSOffset = 0
+
+			ndefList = [entry for entry in entryList if entry[1] is not None]
+			if ndefList:
+				nonDefUVSOffset = offset
+				ndefList.sort()
+				numNonDefRecs = len(ndefList)
+				data.append(struct.pack(">L", numNonDefRecs))
+				offset += 4 + numNonDefRecs*5
+
+				for uv, gname in ndefList:
+					gid = ttFont.getGlyphID(gname)
+					ndrec = struct.pack(">3sH", cvtFromUVS(uv), gid)
+					data.append(ndrec)
+			else:
+				nonDefUVSOffset = 0
+				
+			vrec = struct.pack(">3sLL", cvtFromUVS(uvs), defOVSOffset, nonDefUVSOffset)
+			varSelectorRecords.append(vrec)
+				
+		data = bytesjoin(varSelectorRecords) + bytesjoin(data)
+		self.length = 10 + len(data)
+		headerdata = struct.pack(">HLL", self.format, self.length , self.numVarSelectorRecords)
+		self.data = headerdata + data
+	
+		return self.data
+		
+		
+class cmap_format_unknown(CmapSubtable):
+	
+	def toXML(self, writer, ttFont):
+		cmapName = self.__class__.__name__[:12] + str(self.format)
+		writer.begintag(cmapName, [
+				("platformID", self.platformID),
+				("platEncID", self.platEncID),
+				])
+		writer.newline()
+		writer.dumphex(self.data)
+		writer.endtag(cmapName)
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.data = readHex(content)
+		self.cmap = {}
+	
+	def decompileHeader(self, data, ttFont):
+		self.language = 0  # dummy value
+		self.data = data
+	
+	def decompile(self, data, ttFont):
+		# we usually get here indirectly from the subtable __getattr__ function, in which case both args must be None.
+		# If not, someone is calling  the subtable decompile() directly, and must provide both args.
+		if data is not None and ttFont is not None:
+			self.decompileHeader(data[offset:offset+int(length)], ttFont)
+		else:
+			assert (data is None and ttFont is None), "Need both data and ttFont arguments"
+
+	def compile(self, ttFont):
+		if self.data:
+			return self.data
+		else:
+			return None
+
+cmap_classes = {
+		0: cmap_format_0,
+		2: cmap_format_2,
+		4: cmap_format_4,
+		6: cmap_format_6,
+		12: cmap_format_12,
+		13: cmap_format_13,
+		14: cmap_format_14,
+		}
diff --git a/Lib/fontTools/ttLib/tables/_c_v_t.py b/Lib/fontTools/ttLib/tables/_c_v_t.py
new file mode 100644
index 0000000..f9f8186
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_c_v_t.py
@@ -0,0 +1,50 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import sys
+import array
+
+class table__c_v_t(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		values = array.array("h")
+		values.fromstring(data)
+		if sys.byteorder != "big":
+			values.byteswap()
+		self.values = values
+	
+	def compile(self, ttFont):
+		values = self.values[:]
+		if sys.byteorder != "big":
+			values.byteswap()
+		return values.tostring()
+	
+	def toXML(self, writer, ttFont):
+		for i in range(len(self.values)):
+			value = self.values[i]
+			writer.simpletag("cv", value=value, index=i)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "values"):
+			self.values = array.array("h")
+		if name == "cv":
+			index = safeEval(attrs["index"])
+			value = safeEval(attrs["value"])
+			for i in range(1 + index - len(self.values)):
+				self.values.append(0)
+			self.values[index] = value
+	
+	def __len__(self):
+		return len(self.values)
+	
+	def __getitem__(self, index):
+		return self.values[index]
+	
+	def __setitem__(self, index, value):
+		self.values[index] = value
+	
+	def __delitem__(self, index):
+		del self.values[index]
+
diff --git a/Lib/fontTools/ttLib/tables/_f_p_g_m.py b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
new file mode 100644
index 0000000..e4bd5f7
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_f_p_g_m.py
@@ -0,0 +1,27 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from . import DefaultTable
+from . import ttProgram
+
+class table__f_p_g_m(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		program = ttProgram.Program()
+		program.fromBytecode(data)
+		self.program = program
+	
+	def compile(self, ttFont):
+		return self.program.getBytecode()
+	
+	def toXML(self, writer, ttFont):
+		self.program.toXML(writer, ttFont)
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		program = ttProgram.Program()
+		program.fromXML(name, attrs, content, ttFont)
+		self.program = program
+	
+	def __len__(self):
+		return len(self.program)
+
diff --git a/Lib/fontTools/ttLib/tables/_g_a_s_p.py b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
new file mode 100644
index 0000000..54fef90
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_g_a_s_p.py
@@ -0,0 +1,52 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import struct
+
+
+GASP_SYMMETRIC_GRIDFIT = 0x0004
+GASP_SYMMETRIC_SMOOTHING = 0x0008
+GASP_DOGRAY = 0x0002
+GASP_GRIDFIT = 0x0001
+
+class table__g_a_s_p(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		self.version, numRanges = struct.unpack(">HH", data[:4])
+		assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
+		data = data[4:]
+		self.gaspRange = {}
+		for i in range(numRanges):
+			rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
+			self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
+			data = data[4:]
+		assert not data, "too much data"
+	
+	def compile(self, ttFont):
+		version = 0 # ignore self.version
+		numRanges = len(self.gaspRange)
+		data = b""
+		items = sorted(self.gaspRange.items())
+		for rangeMaxPPEM, rangeGaspBehavior in items:
+			data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
+			if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
+				version = 1
+		data = struct.pack(">HH", version, numRanges) + data
+		return data
+	
+	def toXML(self, writer, ttFont):
+		items = sorted(self.gaspRange.items())
+		for rangeMaxPPEM, rangeGaspBehavior in items:
+			writer.simpletag("gaspRange", [
+					("rangeMaxPPEM", rangeMaxPPEM),
+					("rangeGaspBehavior", rangeGaspBehavior)])
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name != "gaspRange":
+			return
+		if not hasattr(self, "gaspRange"):
+			self.gaspRange = {}
+		self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(attrs["rangeGaspBehavior"])
+
diff --git a/Lib/fontTools/ttLib/tables/_g_l_y_f.py b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
new file mode 100644
index 0000000..970980b
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_g_l_y_f.py
@@ -0,0 +1,1068 @@
+"""_g_l_y_f.py -- Converter classes for the 'glyf' table."""
+
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools import ttLib
+from fontTools.misc.textTools import safeEval
+from fontTools.misc.arrayTools import calcBounds, calcIntBounds, pointInRect
+from fontTools.misc.bezierTools import calcQuadraticBounds
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
+from . import DefaultTable
+from . import ttProgram
+import sys
+import struct
+import array
+import warnings
+
+#
+# The Apple and MS rasterizers behave differently for 
+# scaled composite components: one does scale first and then translate
+# and the other does it vice versa. MS defined some flags to indicate
+# the difference, but it seems nobody actually _sets_ those flags.
+#
+# Funny thing: Apple seems to _only_ do their thing in the
+# WE_HAVE_A_SCALE (eg. Chicago) case, and not when it's WE_HAVE_AN_X_AND_Y_SCALE 
+# (eg. Charcoal)...
+#
+SCALE_COMPONENT_OFFSET_DEFAULT = 0   # 0 == MS, 1 == Apple
+
+
+class table__g_l_y_f(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		loca = ttFont['loca']
+		last = int(loca[0])
+		noname = 0
+		self.glyphs = {}
+		self.glyphOrder = glyphOrder = ttFont.getGlyphOrder()
+		for i in range(0, len(loca)-1):
+			try:
+				glyphName = glyphOrder[i]
+			except IndexError:
+				noname = noname + 1
+				glyphName = 'ttxautoglyph%s' % i
+			next = int(loca[i+1])
+			glyphdata = data[last:next]
+			if len(glyphdata) != (next - last):
+				raise ttLib.TTLibError("not enough 'glyf' table data")
+			glyph = Glyph(glyphdata)
+			self.glyphs[glyphName] = glyph
+			last = next
+		if len(data) > next:
+			warnings.warn("too much 'glyf' table data")
+		if noname:
+			warnings.warn('%s glyphs have no name' % i)
+		if not ttFont.lazy:
+			for glyph in self.glyphs.values():
+				glyph.expand(self)
+	
+	def compile(self, ttFont):
+		if not hasattr(self, "glyphOrder"):
+			self.glyphOrder = ttFont.getGlyphOrder()
+		locations = []
+		currentLocation = 0
+		dataList = []
+		recalcBBoxes = ttFont.recalcBBoxes
+		for glyphName in self.glyphOrder:
+			glyph = self.glyphs[glyphName]
+			glyphData = glyph.compile(self, recalcBBoxes)
+			locations.append(currentLocation)
+			currentLocation = currentLocation + len(glyphData)
+			dataList.append(glyphData)
+		locations.append(currentLocation)
+		data = bytesjoin(dataList)
+		if 'loca' in ttFont:
+			ttFont['loca'].set(locations)
+		ttFont['maxp'].numGlyphs = len(self.glyphs)
+		return data
+	
+	def toXML(self, writer, ttFont, progress=None):
+		writer.newline()
+		glyphNames = ttFont.getGlyphNames()
+		writer.comment("The xMin, yMin, xMax and yMax values\nwill be recalculated by the compiler.")
+		writer.newline()
+		writer.newline()
+		counter = 0
+		progressStep = 10
+		numGlyphs = len(glyphNames)
+		for glyphName in glyphNames:
+			if not counter % progressStep and progress is not None:
+				progress.setLabel("Dumping 'glyf' table... (%s)" % glyphName)
+				progress.increment(progressStep / numGlyphs)
+			counter = counter + 1
+			glyph = self[glyphName]
+			if glyph.numberOfContours:
+				writer.begintag('TTGlyph', [
+						("name", glyphName),
+						("xMin", glyph.xMin),
+						("yMin", glyph.yMin),
+						("xMax", glyph.xMax),
+						("yMax", glyph.yMax),
+						])
+				writer.newline()
+				glyph.toXML(writer, ttFont)
+				writer.endtag('TTGlyph')
+				writer.newline()
+			else:
+				writer.simpletag('TTGlyph', name=glyphName)
+				writer.comment("contains no outline data")
+				writer.newline()
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name != "TTGlyph":
+			return
+		if not hasattr(self, "glyphs"):
+			self.glyphs = {}
+		if not hasattr(self, "glyphOrder"):
+			self.glyphOrder = ttFont.getGlyphOrder()
+		glyphName = attrs["name"]
+		if ttFont.verbose:
+			ttLib.debugmsg("unpacking glyph '%s'" % glyphName)
+		glyph = Glyph()
+		for attr in ['xMin', 'yMin', 'xMax', 'yMax']:
+			setattr(glyph, attr, safeEval(attrs.get(attr, '0')))
+		self.glyphs[glyphName] = glyph
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			glyph.fromXML(name, attrs, content, ttFont)
+		if not ttFont.recalcBBoxes:
+			glyph.compact(self, 0)
+	
+	def setGlyphOrder(self, glyphOrder):
+		self.glyphOrder = glyphOrder
+	
+	def getGlyphName(self, glyphID):
+		return self.glyphOrder[glyphID]
+	
+	def getGlyphID(self, glyphName):
+		# XXX optimize with reverse dict!!!
+		return self.glyphOrder.index(glyphName)
+	
+	def keys(self):
+		return self.glyphs.keys()
+	
+	def has_key(self, glyphName):
+		return glyphName in self.glyphs
+	
+	__contains__ = has_key
+	
+	def __getitem__(self, glyphName):
+		glyph = self.glyphs[glyphName]
+		glyph.expand(self)
+		return glyph
+	
+	def __setitem__(self, glyphName, glyph):
+		self.glyphs[glyphName] = glyph
+		if glyphName not in self.glyphOrder:
+			self.glyphOrder.append(glyphName)
+	
+	def __delitem__(self, glyphName):
+		del self.glyphs[glyphName]
+		self.glyphOrder.remove(glyphName)
+	
+	def __len__(self):
+		assert len(self.glyphOrder) == len(self.glyphs)
+		return len(self.glyphs)
+
+
+glyphHeaderFormat = """
+		>	# big endian
+		numberOfContours:	h
+		xMin:				h
+		yMin:				h
+		xMax:				h
+		yMax:				h
+"""
+
+# flags
+flagOnCurve = 0x01
+flagXShort = 0x02
+flagYShort = 0x04
+flagRepeat = 0x08
+flagXsame =  0x10
+flagYsame = 0x20
+flagReserved1 = 0x40
+flagReserved2 = 0x80
+
+
+ARG_1_AND_2_ARE_WORDS      = 0x0001  # if set args are words otherwise they are bytes 
+ARGS_ARE_XY_VALUES         = 0x0002  # if set args are xy values, otherwise they are points 
+ROUND_XY_TO_GRID           = 0x0004  # for the xy values if above is true 
+WE_HAVE_A_SCALE            = 0x0008  # Sx = Sy, otherwise scale == 1.0 
+NON_OVERLAPPING            = 0x0010  # set to same value for all components (obsolete!)
+MORE_COMPONENTS            = 0x0020  # indicates at least one more glyph after this one 
+WE_HAVE_AN_X_AND_Y_SCALE   = 0x0040  # Sx, Sy 
+WE_HAVE_A_TWO_BY_TWO       = 0x0080  # t00, t01, t10, t11 
+WE_HAVE_INSTRUCTIONS       = 0x0100  # instructions follow 
+USE_MY_METRICS             = 0x0200  # apply these metrics to parent glyph 
+OVERLAP_COMPOUND           = 0x0400  # used by Apple in GX fonts 
+SCALED_COMPONENT_OFFSET    = 0x0800  # composite designed to have the component offset scaled (designed for Apple) 
+UNSCALED_COMPONENT_OFFSET  = 0x1000  # composite designed not to have the component offset scaled (designed for MS) 
+
+
+class Glyph(object):
+	
+	def __init__(self, data=""):
+		if not data:
+			# empty char
+			self.numberOfContours = 0
+			return
+		self.data = data
+	
+	def compact(self, glyfTable, recalcBBoxes=True):
+		data = self.compile(glyfTable, recalcBBoxes)
+		self.__dict__.clear()
+		self.data = data
+	
+	def expand(self, glyfTable):
+		if not hasattr(self, "data"):
+			# already unpacked
+			return
+		if not self.data:
+			# empty char
+			self.numberOfContours = 0
+			return
+		dummy, data = sstruct.unpack2(glyphHeaderFormat, self.data, self)
+		del self.data
+		if self.isComposite():
+			self.decompileComponents(data, glyfTable)
+		else:
+			self.decompileCoordinates(data)
+	
+	def compile(self, glyfTable, recalcBBoxes=True):
+		if hasattr(self, "data"):
+			return self.data
+		if self.numberOfContours == 0:
+			return ""
+		if recalcBBoxes:
+			self.recalcBounds(glyfTable)
+		data = sstruct.pack(glyphHeaderFormat, self)
+		if self.isComposite():
+			data = data + self.compileComponents(glyfTable)
+		else:
+			data = data + self.compileCoordinates()
+		# From the spec: "Note that the local offsets should be word-aligned"
+		# From a later MS spec: "Note that the local offsets should be long-aligned"
+		# Let's be modern and align on 4-byte boundaries.
+		if len(data) % 4:
+			# add pad bytes
+			nPadBytes = 4 - (len(data) % 4)
+			data = data + b"\0" * nPadBytes
+		return data
+	
+	def toXML(self, writer, ttFont):
+		if self.isComposite():
+			for compo in self.components:
+				compo.toXML(writer, ttFont)
+			if hasattr(self, "program"):
+				writer.begintag("instructions")
+				self.program.toXML(writer, ttFont)
+				writer.endtag("instructions")
+				writer.newline()
+		else:
+			last = 0
+			for i in range(self.numberOfContours):
+				writer.begintag("contour")
+				writer.newline()
+				for j in range(last, self.endPtsOfContours[i] + 1):
+					writer.simpletag("pt", [
+							("x", self.coordinates[j][0]), 
+							("y", self.coordinates[j][1]),
+							("on", self.flags[j] & flagOnCurve)])
+					writer.newline()
+				last = self.endPtsOfContours[i] + 1
+				writer.endtag("contour")
+				writer.newline()
+			if self.numberOfContours:
+				writer.begintag("instructions")
+				self.program.toXML(writer, ttFont)
+				writer.endtag("instructions")
+				writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "contour":
+			if self.numberOfContours < 0:
+				raise ttLib.TTLibError("can't mix composites and contours in glyph")
+			self.numberOfContours = self.numberOfContours + 1
+			coordinates = GlyphCoordinates()
+			flags = []
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				if name != "pt":
+					continue  # ignore anything but "pt"
+				coordinates.append((safeEval(attrs["x"]), safeEval(attrs["y"])))
+				flags.append(not not safeEval(attrs["on"]))
+			flags = array.array("B", flags)
+			if not hasattr(self, "coordinates"):
+				self.coordinates = coordinates
+				self.flags = flags
+				self.endPtsOfContours = [len(coordinates)-1]
+			else:
+				self.coordinates.extend (coordinates)
+				self.flags.extend(flags)
+				self.endPtsOfContours.append(len(self.coordinates)-1)
+		elif name == "component":
+			if self.numberOfContours > 0:
+				raise ttLib.TTLibError("can't mix composites and contours in glyph")
+			self.numberOfContours = -1
+			if not hasattr(self, "components"):
+				self.components = []
+			component = GlyphComponent()
+			self.components.append(component)
+			component.fromXML(name, attrs, content, ttFont)
+		elif name == "instructions":
+			self.program = ttProgram.Program()
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				self.program.fromXML(name, attrs, content, ttFont)
+	
+	def getCompositeMaxpValues(self, glyfTable, maxComponentDepth=1):
+		assert self.isComposite()
+		nContours = 0
+		nPoints = 0
+		for compo in self.components:
+			baseGlyph = glyfTable[compo.glyphName]
+			if baseGlyph.numberOfContours == 0:
+				continue
+			elif baseGlyph.numberOfContours > 0:
+				nP, nC = baseGlyph.getMaxpValues()
+			else:
+				nP, nC, maxComponentDepth = baseGlyph.getCompositeMaxpValues(
+						glyfTable, maxComponentDepth + 1)
+			nPoints = nPoints + nP
+			nContours = nContours + nC
+		return nPoints, nContours, maxComponentDepth
+	
+	def getMaxpValues(self):
+		assert self.numberOfContours > 0
+		return len(self.coordinates), len(self.endPtsOfContours)
+	
+	def decompileComponents(self, data, glyfTable):
+		self.components = []
+		more = 1
+		haveInstructions = 0
+		while more:
+			component = GlyphComponent()
+			more, haveInstr, data = component.decompile(data, glyfTable)
+			haveInstructions = haveInstructions | haveInstr
+			self.components.append(component)
+		if haveInstructions:
+			numInstructions, = struct.unpack(">h", data[:2])
+			data = data[2:]
+			self.program = ttProgram.Program()
+			self.program.fromBytecode(data[:numInstructions])
+			data = data[numInstructions:]
+			assert len(data) < 4, "bad composite data"
+	
+	def decompileCoordinates(self, data):
+		endPtsOfContours = array.array("h")
+		endPtsOfContours.fromstring(data[:2*self.numberOfContours])
+		if sys.byteorder != "big":
+			endPtsOfContours.byteswap()
+		self.endPtsOfContours = endPtsOfContours.tolist()
+		
+		data = data[2*self.numberOfContours:]
+		
+		instructionLength, = struct.unpack(">h", data[:2])
+		data = data[2:]
+		self.program = ttProgram.Program()
+		self.program.fromBytecode(data[:instructionLength])
+		data = data[instructionLength:]
+		nCoordinates = self.endPtsOfContours[-1] + 1
+		flags, xCoordinates, yCoordinates = \
+				self.decompileCoordinatesRaw(nCoordinates, data)
+		
+		# fill in repetitions and apply signs
+		self.coordinates = coordinates = GlyphCoordinates.zeros(nCoordinates)
+		xIndex = 0
+		yIndex = 0
+		for i in range(nCoordinates):
+			flag = flags[i]
+			# x coordinate
+			if flag & flagXShort:
+				if flag & flagXsame:
+					x = xCoordinates[xIndex]
+				else:
+					x = -xCoordinates[xIndex]
+				xIndex = xIndex + 1
+			elif flag & flagXsame:
+				x = 0
+			else:
+				x = xCoordinates[xIndex]
+				xIndex = xIndex + 1
+			# y coordinate
+			if flag & flagYShort:
+				if flag & flagYsame:
+					y = yCoordinates[yIndex]
+				else:
+					y = -yCoordinates[yIndex]
+				yIndex = yIndex + 1
+			elif flag & flagYsame:
+				y = 0
+			else:
+				y = yCoordinates[yIndex]
+				yIndex = yIndex + 1
+			coordinates[i] = (x, y)
+		assert xIndex == len(xCoordinates)
+		assert yIndex == len(yCoordinates)
+		coordinates.relativeToAbsolute()
+		# discard all flags but for "flagOnCurve"
+		self.flags = array.array("B", (f & flagOnCurve for f in flags))
+
+	def decompileCoordinatesRaw(self, nCoordinates, data):
+		# unpack flags and prepare unpacking of coordinates
+		flags = array.array("B", [0] * nCoordinates)
+		# Warning: deep Python trickery going on. We use the struct module to unpack
+		# the coordinates. We build a format string based on the flags, so we can
+		# unpack the coordinates in one struct.unpack() call.
+		xFormat = ">" # big endian
+		yFormat = ">" # big endian
+		i = j = 0
+		while True:
+			flag = byteord(data[i])
+			i = i + 1
+			repeat = 1
+			if flag & flagRepeat:
+				repeat = byteord(data[i]) + 1
+				i = i + 1
+			for k in range(repeat):
+				if flag & flagXShort:
+					xFormat = xFormat + 'B'
+				elif not (flag & flagXsame):
+					xFormat = xFormat + 'h'
+				if flag & flagYShort:
+					yFormat = yFormat + 'B'
+				elif not (flag & flagYsame):
+					yFormat = yFormat + 'h'
+				flags[j] = flag
+				j = j + 1
+			if j >= nCoordinates:
+				break
+		assert j == nCoordinates, "bad glyph flags"
+		data = data[i:]
+		# unpack raw coordinates, krrrrrr-tching!
+		xDataLen = struct.calcsize(xFormat)
+		yDataLen = struct.calcsize(yFormat)
+		if len(data) - (xDataLen + yDataLen) >= 4:
+			warnings.warn("too much glyph data: %d excess bytes" % (len(data) - (xDataLen + yDataLen)))
+		xCoordinates = struct.unpack(xFormat, data[:xDataLen])
+		yCoordinates = struct.unpack(yFormat, data[xDataLen:xDataLen+yDataLen])
+		return flags, xCoordinates, yCoordinates
+	
+	def compileComponents(self, glyfTable):
+		data = b""
+		lastcomponent = len(self.components) - 1
+		more = 1
+		haveInstructions = 0
+		for i in range(len(self.components)):
+			if i == lastcomponent:
+				haveInstructions = hasattr(self, "program")
+				more = 0
+			compo = self.components[i]
+			data = data + compo.compile(more, haveInstructions, glyfTable)
+		if haveInstructions:
+			instructions = self.program.getBytecode()
+			data = data + struct.pack(">h", len(instructions)) + instructions
+		return data
+			
+	
+	def compileCoordinates(self):
+		assert len(self.coordinates) == len(self.flags)
+		data = b""
+		endPtsOfContours = array.array("h", self.endPtsOfContours)
+		if sys.byteorder != "big":
+			endPtsOfContours.byteswap()
+		data = data + endPtsOfContours.tostring()
+		instructions = self.program.getBytecode()
+		data = data + struct.pack(">h", len(instructions)) + instructions
+		nCoordinates = len(self.coordinates)
+		
+		coordinates = self.coordinates.copy()
+		coordinates.absoluteToRelative()
+		flags = self.flags
+		compressedflags = []
+		xPoints = []
+		yPoints = []
+		xFormat = ">"
+		yFormat = ">"
+		lastflag = None
+		repeat = 0
+		for i in range(len(coordinates)):
+			# Oh, the horrors of TrueType
+			flag = flags[i]
+			x, y = coordinates[i]
+			# do x
+			if x == 0:
+				flag = flag | flagXsame
+			elif -255 <= x <= 255:
+				flag = flag | flagXShort
+				if x > 0:
+					flag = flag | flagXsame
+				else:
+					x = -x
+				xPoints.append(x)
+				xFormat = xFormat + 'B'
+			else:
+				xPoints.append(x)
+				xFormat = xFormat + 'h'
+			# do y
+			if y == 0:
+				flag = flag | flagYsame
+			elif -255 <= y <= 255:
+				flag = flag | flagYShort
+				if y > 0:
+					flag = flag | flagYsame
+				else:
+					y = -y
+				yPoints.append(y)
+				yFormat = yFormat + 'B'
+			else:
+				yPoints.append(y)
+				yFormat = yFormat + 'h'
+			# handle repeating flags
+			if flag == lastflag and repeat != 255:
+				repeat = repeat + 1
+				if repeat == 1:
+					compressedflags.append(flag)
+				else:
+					compressedflags[-2] = flag | flagRepeat
+					compressedflags[-1] = repeat
+			else:
+				repeat = 0
+				compressedflags.append(flag)
+			lastflag = flag
+		data = data + array.array("B", compressedflags).tostring()
+		if coordinates.isFloat():
+			# Warn?
+			xPoints = [int(round(x)) for x in xPoints]
+			yPoints = [int(round(y)) for y in xPoints]
+		data = data + struct.pack(*(xFormat,)+tuple(xPoints))
+		data = data + struct.pack(*(yFormat,)+tuple(yPoints))
+		return data
+	
+	def recalcBounds(self, glyfTable):
+		coords, endPts, flags = self.getCoordinates(glyfTable)
+		if len(coords) > 0:
+			if 0:
+				# This branch calculates exact glyph outline bounds
+				# analytically, handling cases without on-curve
+				# extremas, etc.  However, the glyf table header
+				# simply says that the bounds should be min/max x/y
+				# "for coordinate data", so I suppose that means no
+				# fancy thing here, just get extremas of all coord
+				# points (on and off).  As such, this branch is
+				# disabled.
+
+				# Collect on-curve points
+				onCurveCoords = [coords[j] for j in range(len(coords))
+						 if flags[j] & flagOnCurve]
+				# Add implicit on-curve points
+				start = 0
+				for end in endPts:
+					last = end
+					for j in range(start, end + 1):
+						if not ((flags[j] | flags[last]) & flagOnCurve):
+							x = (coords[last][0] + coords[j][0]) / 2
+							y = (coords[last][1] + coords[j][1]) / 2
+							onCurveCoords.append((x,y))
+						last = j
+					start = end + 1
+				# Add bounds for curves without an explicit extrema
+				start = 0
+				for end in endPts:
+					last = end
+					for j in range(start, end + 1):
+						if not (flags[j] & flagOnCurve):
+							next = j + 1 if j < end else start
+							bbox = calcBounds([coords[last], coords[next]])
+							if not pointInRect(coords[j], bbox):
+								# Ouch!
+								warnings.warn("Outline has curve with implicit extrema.")
+								# Ouch!  Find analytical curve bounds.
+								pthis = coords[j]
+								plast = coords[last]
+								if not (flags[last] & flagOnCurve):
+									plast = ((pthis[0]+plast[0])/2, (pthis[1]+plast[1])/2)
+								pnext = coords[next]
+								if not (flags[next] & flagOnCurve):
+									pnext = ((pthis[0]+pnext[0])/2, (pthis[1]+pnext[1])/2)
+								bbox = calcQuadraticBounds(plast, pthis, pnext)
+								onCurveCoords.append((bbox[0],bbox[1]))
+								onCurveCoords.append((bbox[2],bbox[3]))
+						last = j
+					start = end + 1
+
+				self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(onCurveCoords)
+			else:
+				self.xMin, self.yMin, self.xMax, self.yMax = calcIntBounds(coords)
+		else:
+			self.xMin, self.yMin, self.xMax, self.yMax = (0, 0, 0, 0)
+	
+	def isComposite(self):
+		"""Can be called on compact or expanded glyph."""
+		if hasattr(self, "data"):
+			return struct.unpack(">h", self.data[:2])[0] == -1
+		else:
+			return self.numberOfContours == -1
+	
+	def __getitem__(self, componentIndex):
+		if not self.isComposite():
+			raise ttLib.TTLibError("can't use glyph as sequence")
+		return self.components[componentIndex]
+	
+	def getCoordinates(self, glyfTable):
+		if self.numberOfContours > 0:
+			return self.coordinates, self.endPtsOfContours, self.flags
+		elif self.isComposite():
+			# it's a composite
+			allCoords = GlyphCoordinates()
+			allFlags = array.array("B")
+			allEndPts = []
+			for compo in self.components:
+				g = glyfTable[compo.glyphName]
+				coordinates, endPts, flags = g.getCoordinates(glyfTable)
+				if hasattr(compo, "firstPt"):
+					# move according to two reference points
+					x1,y1 = allCoords[compo.firstPt]
+					x2,y2 = coordinates[compo.secondPt]
+					move = x1-x2, y1-y2
+				else:
+					move = compo.x, compo.y
+				
+				coordinates = GlyphCoordinates(coordinates)
+				if not hasattr(compo, "transform"):
+					coordinates.translate(move)
+				else:
+					apple_way = compo.flags & SCALED_COMPONENT_OFFSET
+					ms_way = compo.flags & UNSCALED_COMPONENT_OFFSET
+					assert not (apple_way and ms_way)
+					if not (apple_way or ms_way):
+						scale_component_offset = SCALE_COMPONENT_OFFSET_DEFAULT  # see top of this file
+					else:
+						scale_component_offset = apple_way
+					if scale_component_offset:
+						# the Apple way: first move, then scale (ie. scale the component offset)
+						coordinates.translate(move)
+						coordinates.transform(compo.transform)
+					else:
+						# the MS way: first scale, then move
+						coordinates.transform(compo.transform)
+						coordinates.translate(move)
+				offset = len(allCoords)
+				allEndPts.extend(e + offset for e in endPts)
+				allCoords.extend(coordinates)
+				allFlags.extend(flags)
+			return allCoords, allEndPts, allFlags
+		else:
+			return GlyphCoordinates(), [], array.array("B")
+
+	def getComponentNames(self, glyfTable):
+		if not hasattr(self, "data"):
+			if self.isComposite():
+				return [c.glyphName for c in self.components]
+			else:
+				return []
+
+		# Extract components without expanding glyph
+
+		if not self.data or struct.unpack(">h", self.data[:2])[0] >= 0:
+			return []  # Not composite
+
+		data = self.data
+		i = 10
+		components = []
+		more = 1
+		while more:
+			flags, glyphID = struct.unpack(">HH", data[i:i+4])
+			i += 4
+			flags = int(flags)
+			components.append(glyfTable.getGlyphName(int(glyphID)))
+
+			if flags & ARG_1_AND_2_ARE_WORDS: i += 4
+			else: i += 2
+			if flags & WE_HAVE_A_SCALE: i += 2
+			elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
+			elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
+			more = flags & MORE_COMPONENTS
+
+		return components
+
+	def removeHinting(self):
+		if not hasattr(self, "data"):
+			self.program = ttProgram.Program()
+			self.program.fromBytecode([])
+			return
+
+		# Remove instructions without expanding glyph
+
+		if not self.data:
+			return
+		numContours = struct.unpack(">h", self.data[:2])[0]
+		data = array.array("B", self.data)
+		i = 10
+		if numContours >= 0:
+			i += 2 * numContours # endPtsOfContours
+			nCoordinates = ((data[i-2] << 8) | data[i-1]) + 1
+			instructionLen = (data[i] << 8) | data[i+1]
+			# Zero it
+			data[i] = data [i+1] = 0
+			i += 2
+			if instructionLen:
+				# Splice it out
+				data = data[:i] + data[i+instructionLen:]
+				if instructionLen % 4:
+					# We now have to go ahead and drop
+					# the old padding.  Otherwise with
+					# padding we have to add, we may
+					# end up with more than 3 bytes of
+					# padding.
+					coordBytes = 0
+					j = 0
+					while True:
+						flag = data[i]
+						i = i + 1
+						repeat = 1
+						if flag & flagRepeat:
+							repeat = data[i] + 1
+							i = i + 1
+						xBytes = yBytes = 0
+						if flag & flagXShort:
+							xBytes = 1
+						elif not (flag & flagXsame):
+							xBytes = 2
+						if flag & flagYShort:
+							yBytes = 1
+						elif not (flag & flagYsame):
+							yBytes = 2
+						coordBytes += (xBytes + yBytes) * repeat
+						j += repeat
+						if j >= nCoordinates:
+							break
+					assert j == nCoordinates, "bad glyph flags"
+					data = data[:i + coordBytes]
+		else:
+			more = 1
+			while more:
+				flags =(data[i] << 8) | data[i+1]
+				# Turn instruction flag off
+				flags &= ~WE_HAVE_INSTRUCTIONS
+				data[i+0] = flags >> 8
+				data[i+1] = flags & 0xFF
+				i += 4
+				flags = int(flags)
+
+				if flags & ARG_1_AND_2_ARE_WORDS: i += 4
+				else: i += 2
+				if flags & WE_HAVE_A_SCALE: i += 2
+				elif flags & WE_HAVE_AN_X_AND_Y_SCALE: i += 4
+				elif flags & WE_HAVE_A_TWO_BY_TWO: i += 8
+				more = flags & MORE_COMPONENTS
+
+			# Cut off
+			data = data[:i]
+
+		data = data.tostring()
+
+		if len(data) % 4:
+			# add pad bytes
+			nPadBytes = 4 - (len(data) % 4)
+			data = data + b"\0" * nPadBytes
+
+		self.data = data
+
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self.__dict__ == other.__dict__
+
+
+class GlyphComponent(object):
+	
+	def __init__(self):
+		pass
+	
+	def getComponentInfo(self):
+		"""Return the base glyph name and a transform."""
+		# XXX Ignoring self.firstPt & self.lastpt for now: I need to implement
+		# something equivalent in fontTools.objects.glyph (I'd rather not 
+		# convert it to an absolute offset, since it is valuable information).
+		# This method will now raise "AttributeError: x" on glyphs that use
+		# this TT feature.
+		if hasattr(self, "transform"):
+			[[xx, xy], [yx, yy]] = self.transform
+			trans = (xx, xy, yx, yy, self.x, self.y)
+		else:
+			trans = (1, 0, 0, 1, self.x, self.y)
+		return self.glyphName, trans
+	
+	def decompile(self, data, glyfTable):
+		flags, glyphID = struct.unpack(">HH", data[:4])
+		self.flags = int(flags)
+		glyphID = int(glyphID)
+		self.glyphName = glyfTable.getGlyphName(int(glyphID))
+		#print ">>", reprflag(self.flags)
+		data = data[4:]
+		
+		if self.flags & ARG_1_AND_2_ARE_WORDS:
+			if self.flags & ARGS_ARE_XY_VALUES:
+				self.x, self.y = struct.unpack(">hh", data[:4])
+			else:
+				x, y = struct.unpack(">HH", data[:4])
+				self.firstPt, self.secondPt = int(x), int(y)
+			data = data[4:]
+		else:
+			if self.flags & ARGS_ARE_XY_VALUES:
+				self.x, self.y = struct.unpack(">bb", data[:2])
+			else:
+				x, y = struct.unpack(">BB", data[:2])
+				self.firstPt, self.secondPt = int(x), int(y)
+			data = data[2:]
+		
+		if self.flags & WE_HAVE_A_SCALE:
+			scale, = struct.unpack(">h", data[:2])
+			self.transform = [[fi2fl(scale,14), 0], [0, fi2fl(scale,14)]]  # fixed 2.14
+			data = data[2:]
+		elif self.flags & WE_HAVE_AN_X_AND_Y_SCALE:
+			xscale, yscale = struct.unpack(">hh", data[:4])
+			self.transform = [[fi2fl(xscale,14), 0], [0, fi2fl(yscale,14)]]  # fixed 2.14
+			data = data[4:]
+		elif self.flags & WE_HAVE_A_TWO_BY_TWO:
+			(xscale, scale01, 
+					scale10, yscale) = struct.unpack(">hhhh", data[:8])
+			self.transform = [[fi2fl(xscale,14), fi2fl(scale01,14)],
+					  [fi2fl(scale10,14), fi2fl(yscale,14)]] # fixed 2.14
+			data = data[8:]
+		more = self.flags & MORE_COMPONENTS
+		haveInstructions = self.flags & WE_HAVE_INSTRUCTIONS
+		self.flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | 
+				SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
+				NON_OVERLAPPING)
+		return more, haveInstructions, data
+	
+	def compile(self, more, haveInstructions, glyfTable):
+		data = b""
+		
+		# reset all flags we will calculate ourselves
+		flags = self.flags & (ROUND_XY_TO_GRID | USE_MY_METRICS | 
+				SCALED_COMPONENT_OFFSET | UNSCALED_COMPONENT_OFFSET |
+				NON_OVERLAPPING)
+		if more:
+			flags = flags | MORE_COMPONENTS
+		if haveInstructions:
+			flags = flags | WE_HAVE_INSTRUCTIONS
+		
+		if hasattr(self, "firstPt"):
+			if (0 <= self.firstPt <= 255) and (0 <= self.secondPt <= 255):
+				data = data + struct.pack(">BB", self.firstPt, self.secondPt)
+			else:
+				data = data + struct.pack(">HH", self.firstPt, self.secondPt)
+				flags = flags | ARG_1_AND_2_ARE_WORDS
+		else:
+			flags = flags | ARGS_ARE_XY_VALUES
+			if (-128 <= self.x <= 127) and (-128 <= self.y <= 127):
+				data = data + struct.pack(">bb", self.x, self.y)
+			else:
+				data = data + struct.pack(">hh", self.x, self.y)
+				flags = flags | ARG_1_AND_2_ARE_WORDS
+		
+		if hasattr(self, "transform"):
+			transform = [[fl2fi(x,14) for x in row] for row in self.transform]
+			if transform[0][1] or transform[1][0]:
+				flags = flags | WE_HAVE_A_TWO_BY_TWO
+				data = data + struct.pack(">hhhh", 
+						transform[0][0], transform[0][1],
+						transform[1][0], transform[1][1])
+			elif transform[0][0] != transform[1][1]:
+				flags = flags | WE_HAVE_AN_X_AND_Y_SCALE
+				data = data + struct.pack(">hh", 
+						transform[0][0], transform[1][1])
+			else:
+				flags = flags | WE_HAVE_A_SCALE
+				data = data + struct.pack(">h", 
+						transform[0][0])
+		
+		glyphID = glyfTable.getGlyphID(self.glyphName)
+		return struct.pack(">HH", flags, glyphID) + data
+	
+	def toXML(self, writer, ttFont):
+		attrs = [("glyphName", self.glyphName)]
+		if not hasattr(self, "firstPt"):
+			attrs = attrs + [("x", self.x), ("y", self.y)]
+		else:
+			attrs = attrs + [("firstPt", self.firstPt), ("secondPt", self.secondPt)]
+		
+		if hasattr(self, "transform"):
+			transform = self.transform
+			if transform[0][1] or transform[1][0]:
+				attrs = attrs + [
+						("scalex", transform[0][0]), ("scale01", transform[0][1]),
+						("scale10", transform[1][0]), ("scaley", transform[1][1]),
+						]
+			elif transform[0][0] != transform[1][1]:
+				attrs = attrs + [
+						("scalex", transform[0][0]), ("scaley", transform[1][1]),
+						]
+			else:
+				attrs = attrs + [("scale", transform[0][0])]
+		attrs = attrs + [("flags", hex(self.flags))]
+		writer.simpletag("component", attrs)
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.glyphName = attrs["glyphName"]
+		if "firstPt" in attrs:
+			self.firstPt = safeEval(attrs["firstPt"])
+			self.secondPt = safeEval(attrs["secondPt"])
+		else:
+			self.x = safeEval(attrs["x"])
+			self.y = safeEval(attrs["y"])
+		if "scale01" in attrs:
+			scalex = safeEval(attrs["scalex"])
+			scale01 = safeEval(attrs["scale01"])
+			scale10 = safeEval(attrs["scale10"])
+			scaley = safeEval(attrs["scaley"])
+			self.transform = [[scalex, scale01], [scale10, scaley]]
+		elif "scalex" in attrs:
+			scalex = safeEval(attrs["scalex"])
+			scaley = safeEval(attrs["scaley"])
+			self.transform = [[scalex, 0], [0, scaley]]
+		elif "scale" in attrs:
+			scale = safeEval(attrs["scale"])
+			self.transform = [[scale, 0], [0, scale]]
+		self.flags = safeEval(attrs["flags"])
+	
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self.__dict__ == other.__dict__
+
+class GlyphCoordinates(object):
+
+	def __init__(self, iterable=[]):
+		self._a = array.array("h")
+		self.extend(iterable)
+
+	def isFloat(self):
+		return self._a.typecode == 'f'
+
+	def _ensureFloat(self):
+		if self.isFloat():
+			return
+		self._a = array.array("f", self._a)
+
+	def _checkFloat(self, p):
+		if any(isinstance(v, float) for v in p):
+			p = [int(v) if int(v) == v else v for v in p]
+			if any(isinstance(v, float) for v in p):
+				self._ensureFloat()
+		return p
+
+	@staticmethod
+	def zeros(count):
+		return GlyphCoordinates([(0,0)] * count)
+
+	def copy(self):
+		c = GlyphCoordinates()
+		c._a.extend(self._a)
+		return c
+
+	def __len__(self):
+		return len(self._a) // 2
+
+	def __getitem__(self, k):
+		if isinstance(k, slice):
+			indices = range(*k.indices(len(self)))
+			return [self[i] for i in indices]
+		return self._a[2*k],self._a[2*k+1]
+
+	def __setitem__(self, k, v):
+		if isinstance(k, slice):
+			indices = range(*k.indices(len(self)))
+			# XXX This only works if len(v) == len(indices)
+			# TODO Implement __delitem__
+			for j,i in enumerate(indices):
+				self[i] = v[j]
+			return
+		v = self._checkFloat(v)
+		self._a[2*k],self._a[2*k+1] = v
+
+	def __repr__(self):
+		return 'GlyphCoordinates(['+','.join(str(c) for c in self)+'])'
+
+	def append(self, p):
+		p = self._checkFloat(p)
+		self._a.extend(tuple(p))
+
+	def extend(self, iterable):
+		for p in iterable:
+			p = self._checkFloat(p)
+			self._a.extend(p)
+
+	def relativeToAbsolute(self):
+		a = self._a
+		x,y = 0,0
+		for i in range(len(a) // 2):
+			a[2*i  ] = x = a[2*i  ] + x
+			a[2*i+1] = y = a[2*i+1] + y
+
+	def absoluteToRelative(self):
+		a = self._a
+		x,y = 0,0
+		for i in range(len(a) // 2):
+			dx = a[2*i  ] - x
+			dy = a[2*i+1] - y
+			x = a[2*i  ]
+			y = a[2*i+1]
+			a[2*i  ] = dx
+			a[2*i+1] = dy
+
+	def translate(self, p):
+		(x,y) = p
+		a = self._a
+		for i in range(len(a) // 2):
+			a[2*i  ] += x
+			a[2*i+1] += y
+
+	def transform(self, t):
+		a = self._a
+		for i in range(len(a) // 2):
+			x = a[2*i  ]
+			y = a[2*i+1]
+			px = x * t[0][0] + y * t[1][0]
+			py = x * t[0][1] + y * t[1][1]
+			self[i] = (px, py)
+
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self._a == other._a
+
+
+def reprflag(flag):
+	bin = ""
+	if isinstance(flag, str):
+		flag = byteord(flag)
+	while flag:
+		if flag & 0x01:
+			bin = "1" + bin
+		else:
+			bin = "0" + bin
+		flag = flag >> 1
+	bin = (14 - len(bin)) * "0" + bin
+	return bin
+
diff --git a/Lib/fontTools/ttLib/tables/_h_d_m_x.py b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
new file mode 100644
index 0000000..06fca7d
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_h_d_m_x.py
@@ -0,0 +1,100 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from . import DefaultTable
+
+hdmxHeaderFormat = """
+	>   # big endian!
+	version:	H
+	numRecords:	H
+	recordSize:	l
+"""
+
+class table__h_d_m_x(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		numGlyphs = ttFont['maxp'].numGlyphs
+		glyphOrder = ttFont.getGlyphOrder()
+		dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
+		self.hdmx = {}
+		for i in range(self.numRecords):
+			ppem = byteord(data[0])
+			maxSize = byteord(data[1])
+			widths = {}
+			for glyphID in range(numGlyphs):
+				widths[glyphOrder[glyphID]] = byteord(data[glyphID+2])
+			self.hdmx[ppem] = widths
+			data = data[self.recordSize:]
+		assert len(data) == 0, "too much hdmx data"
+	
+	def compile(self, ttFont):
+		self.version = 0
+		numGlyphs = ttFont['maxp'].numGlyphs
+		glyphOrder = ttFont.getGlyphOrder()
+		self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
+		pad = (self.recordSize - 2 - numGlyphs) * b"\0"
+		self.numRecords = len(self.hdmx)
+		data = sstruct.pack(hdmxHeaderFormat, self)
+		items = sorted(self.hdmx.items())
+		for ppem, widths in items:
+			data = data + bytechr(ppem) + bytechr(max(widths.values()))
+			for glyphID in range(len(glyphOrder)):
+				width = widths[glyphOrder[glyphID]]
+				data = data + bytechr(width)
+			data = data + pad
+		return data
+	
+	def toXML(self, writer, ttFont):
+		writer.begintag("hdmxData")
+		writer.newline()
+		ppems = sorted(self.hdmx.keys())
+		records = []
+		format = ""
+		for ppem in ppems:
+			widths = self.hdmx[ppem]
+			records.append(widths)
+			format = format + "%4d"
+		glyphNames = ttFont.getGlyphOrder()[:]
+		glyphNames.sort()
+		maxNameLen = max(map(len, glyphNames))
+		format = "%" + repr(maxNameLen) + 's:' + format + ' ;'
+		writer.write(format % (("ppem",) + tuple(ppems)))
+		writer.newline()
+		writer.newline()
+		for glyphName in glyphNames:
+			row = []
+			for ppem in ppems:
+				widths = self.hdmx[ppem]
+				row.append(widths[glyphName])
+			if ";" in glyphName:
+				glyphName = "\\x3b".join(glyphName.split(";"))
+			writer.write(format % ((glyphName,) + tuple(row)))
+			writer.newline()
+		writer.endtag("hdmxData")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name != "hdmxData":
+			return
+		content = strjoin(content)
+		lines = content.split(";")
+		topRow = lines[0].split()
+		assert topRow[0] == "ppem:", "illegal hdmx format"
+		ppems = list(map(int, topRow[1:]))
+		self.hdmx = hdmx = {}
+		for ppem in ppems:
+			hdmx[ppem] = {}
+		lines = (line.split() for line in lines[1:])
+		for line in lines:
+			if not line:
+				continue
+			assert line[0][-1] == ":", "illegal hdmx format"
+			glyphName = line[0][:-1]
+			if "\\" in glyphName:
+				from fontTools.misc.textTools import safeEval
+				glyphName = safeEval('"""' + glyphName + '"""')
+			line = list(map(int, line[1:]))
+			assert len(line) == len(ppems), "illegal hdmx format"
+			for i in range(len(ppems)):
+				hdmx[ppems[i]][glyphName] = line[i]
+
diff --git a/Lib/fontTools/ttLib/tables/_h_e_a_d.py b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
new file mode 100644
index 0000000..bf4116d
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_h_e_a_d.py
@@ -0,0 +1,81 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval, num2binary, binary2num
+from . import DefaultTable
+import time
+import calendar
+
+
+headFormat = """
+		>	# big endian
+		tableVersion:       16.16F
+		fontRevision:       16.16F
+		checkSumAdjustment: I
+		magicNumber:        I
+		flags:              H
+		unitsPerEm:         H
+		created:            Q
+		modified:           Q
+		xMin:               h
+		yMin:               h
+		xMax:               h
+		yMax:               h
+		macStyle:           H
+		lowestRecPPEM:      H
+		fontDirectionHint:  h
+		indexToLocFormat:   h
+		glyphDataFormat:    h
+"""
+
+class table__h_e_a_d(DefaultTable.DefaultTable):
+	
+	dependencies = ['maxp', 'loca']
+	
+	def decompile(self, data, ttFont):
+		dummy, rest = sstruct.unpack2(headFormat, data, self)
+		if rest:
+			# this is quite illegal, but there seem to be fonts out there that do this
+			assert rest == "\0\0"
+	
+	def compile(self, ttFont):
+		if ttFont.recalcTimestamp:
+			self.modified = int(time.time() - mac_epoch_diff)
+		data = sstruct.pack(headFormat, self)
+		return data
+	
+	def toXML(self, writer, ttFont):
+		writer.comment("Most of this table will be recalculated by the compiler")
+		writer.newline()
+		formatstring, names, fixes = sstruct.getformat(headFormat)
+		for name in names:
+			value = getattr(self, name)
+			if name in ("created", "modified"):
+				try:
+					value = time.asctime(time.gmtime(max(0, value + mac_epoch_diff)))
+				except ValueError:
+					value = time.asctime(time.gmtime(0))
+			if name in ("magicNumber", "checkSumAdjustment"):
+				if value < 0:
+					value = value + 0x100000000
+				value = hex(value)
+				if value[-1:] == "L":
+					value = value[:-1]
+			elif name in ("macStyle", "flags"):
+				value = num2binary(value, 16)
+			writer.simpletag(name, value=value)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		value = attrs["value"]
+		if name in ("created", "modified"):
+			value = calendar.timegm(time.strptime(value)) - mac_epoch_diff
+		elif name in ("macStyle", "flags"):
+			value = binary2num(value)
+		else:
+			value = safeEval(value)
+		setattr(self, name, value)
+
+
+# Difference between the original Mac epoch (1904) to the epoch on this machine.
+mac_epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
diff --git a/Lib/fontTools/ttLib/tables/_h_h_e_a.py b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
new file mode 100644
index 0000000..f8b7eb3
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_h_h_e_a.py
@@ -0,0 +1,91 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+
+
+hheaFormat = """
+		>  # big endian
+		tableVersion:           16.16F
+		ascent:                 h
+		descent:                h
+		lineGap:                h
+		advanceWidthMax:        H
+		minLeftSideBearing:     h
+		minRightSideBearing:    h
+		xMaxExtent:             h
+		caretSlopeRise:         h
+		caretSlopeRun:          h
+		caretOffset:            h
+		reserved0:              h
+		reserved1:              h
+		reserved2:              h
+		reserved3:              h
+		metricDataFormat:       h
+		numberOfHMetrics:       H
+"""
+
+
+class table__h_h_e_a(DefaultTable.DefaultTable):
+	
+	dependencies = ['hmtx', 'glyf']
+	
+	def decompile(self, data, ttFont):
+		sstruct.unpack(hheaFormat, data, self)
+	
+	def compile(self, ttFont):
+		if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
+			self.recalc(ttFont)
+		return sstruct.pack(hheaFormat, self)
+	
+	def recalc(self, ttFont):
+		hmtxTable = ttFont['hmtx']
+		if 'glyf' in ttFont:
+			glyfTable = ttFont['glyf']
+			INFINITY = 100000
+			advanceWidthMax = 0
+			minLeftSideBearing = +INFINITY  # arbitrary big number
+			minRightSideBearing = +INFINITY # arbitrary big number
+			xMaxExtent = -INFINITY          # arbitrary big negative number
+			
+			for name in ttFont.getGlyphOrder():
+				width, lsb = hmtxTable[name]
+				advanceWidthMax = max(advanceWidthMax, width)
+				g = glyfTable[name]
+				if g.numberOfContours == 0:
+					continue
+				if g.numberOfContours < 0 and not hasattr(g, "xMax"):
+					# Composite glyph without extents set.
+					# Calculate those.
+					g.recalcBounds(glyfTable)
+				minLeftSideBearing = min(minLeftSideBearing, lsb)
+				rsb = width - lsb - (g.xMax - g.xMin)
+				minRightSideBearing = min(minRightSideBearing, rsb)
+				extent = lsb + (g.xMax - g.xMin)
+				xMaxExtent = max(xMaxExtent, extent)
+
+			if xMaxExtent == -INFINITY:
+				# No glyph has outlines.
+				minLeftSideBearing = 0
+				minRightSideBearing = 0
+				xMaxExtent = 0
+
+			self.advanceWidthMax = advanceWidthMax
+			self.minLeftSideBearing = minLeftSideBearing
+			self.minRightSideBearing = minRightSideBearing
+			self.xMaxExtent = xMaxExtent
+		else:
+			# XXX CFF recalc...
+			pass
+	
+	def toXML(self, writer, ttFont):
+		formatstring, names, fixes = sstruct.getformat(hheaFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		setattr(self, name, safeEval(attrs["value"]))
+
diff --git a/Lib/fontTools/ttLib/tables/_h_m_t_x.py b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
new file mode 100644
index 0000000..c7b5ee9
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_h_m_t_x.py
@@ -0,0 +1,102 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import sys
+import array
+import warnings
+
+
+class table__h_m_t_x(DefaultTable.DefaultTable):
+	
+	headerTag = 'hhea'
+	advanceName = 'width'
+	sideBearingName = 'lsb'
+	numberOfMetricsName = 'numberOfHMetrics'
+	
+	def decompile(self, data, ttFont):
+		numGlyphs = ttFont['maxp'].numGlyphs
+		numberOfMetrics = int(getattr(ttFont[self.headerTag], self.numberOfMetricsName))
+		if numberOfMetrics > numGlyphs:
+			numberOfMetrics = numGlyphs # We warn later.
+		# Note: advanceWidth is unsigned, but we read/write as signed.
+		metrics = array.array("h", data[:4 * numberOfMetrics])
+		if sys.byteorder != "big":
+			metrics.byteswap()
+		data = data[4 * numberOfMetrics:]
+		numberOfSideBearings = numGlyphs - numberOfMetrics
+		sideBearings = array.array("h", data[:2 * numberOfSideBearings])
+		data = data[2 * numberOfSideBearings:]
+
+		if sys.byteorder != "big":
+			sideBearings.byteswap()
+		if data:
+			warnings.warn("too much 'hmtx'/'vmtx' table data")
+		self.metrics = {}
+		glyphOrder = ttFont.getGlyphOrder()
+		for i in range(numberOfMetrics):
+			glyphName = glyphOrder[i]
+			self.metrics[glyphName] = list(metrics[i*2:i*2+2])
+		lastAdvance = metrics[-2]
+		for i in range(numberOfSideBearings):
+			glyphName = glyphOrder[i + numberOfMetrics]
+			self.metrics[glyphName] = [lastAdvance, sideBearings[i]]
+	
+	def compile(self, ttFont):
+		metrics = []
+		for glyphName in ttFont.getGlyphOrder():
+			metrics.append(self.metrics[glyphName])
+		lastAdvance = metrics[-1][0]
+		lastIndex = len(metrics)
+		while metrics[lastIndex-2][0] == lastAdvance:
+			lastIndex -= 1
+			if lastIndex <= 1:
+				# all advances are equal
+				lastIndex = 1
+				break
+		additionalMetrics = metrics[lastIndex:]
+		additionalMetrics = [sb for advance, sb in additionalMetrics]
+		metrics = metrics[:lastIndex]
+		setattr(ttFont[self.headerTag], self.numberOfMetricsName, len(metrics))
+		
+		allMetrics = []
+		for item in metrics:
+			allMetrics.extend(item)
+		allMetrics = array.array("h", allMetrics)
+		if sys.byteorder != "big":
+			allMetrics.byteswap()
+		data = allMetrics.tostring()
+		
+		additionalMetrics = array.array("h", additionalMetrics)
+		if sys.byteorder != "big":
+			additionalMetrics.byteswap()
+		data = data + additionalMetrics.tostring()
+		return data
+	
+	def toXML(self, writer, ttFont):
+		names = sorted(self.metrics.keys())
+		for glyphName in names:
+			advance, sb = self.metrics[glyphName]
+			writer.simpletag("mtx", [
+					("name", glyphName), 
+					(self.advanceName, advance), 
+					(self.sideBearingName, sb),
+					])
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if not hasattr(self, "metrics"):
+			self.metrics = {}
+		if name == "mtx":
+			self.metrics[attrs["name"]] = [safeEval(attrs[self.advanceName]), 
+					safeEval(attrs[self.sideBearingName])]
+
+	def __delitem__(self, glyphName):
+		del self.metrics[glyphName]
+	
+	def __getitem__(self, glyphName):
+		return self.metrics[glyphName]
+	
+	def __setitem__(self, glyphName, advance_sb_pair):
+		self.metrics[glyphName] = tuple(advance_sb_pair)
+
diff --git a/Lib/fontTools/ttLib/tables/_k_e_r_n.py b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
new file mode 100644
index 0000000..9fa9dd8
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_k_e_r_n.py
@@ -0,0 +1,210 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.ttLib import sfnt
+from fontTools.misc.textTools import safeEval, readHex
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
+from . import DefaultTable
+import struct
+import warnings
+
+
+class table__k_e_r_n(DefaultTable.DefaultTable):
+	
+	def getkern(self, format):
+		for subtable in self.kernTables:
+			if subtable.version == format:
+				return subtable
+		return None  # not found
+	
+	def decompile(self, data, ttFont):
+		version, nTables = struct.unpack(">HH", data[:4])
+		apple = False
+		if (len(data) >= 8) and (version == 1):
+			# AAT Apple's "new" format. Hm.
+			version, nTables = struct.unpack(">LL", data[:8])
+			self.version = fi2fl(version, 16)
+			data = data[8:]
+			apple = True
+		else:
+			self.version = version
+			data = data[4:]
+		tablesIndex = []
+		self.kernTables = []
+		for i in range(nTables):
+			if self.version == 1.0:
+				# Apple
+				length, coverage, tupleIndex = struct.unpack(">lHH", data[:8])
+				version = coverage & 0xff
+			else:
+				version, length = struct.unpack(">HH", data[:4])
+			length = int(length)
+			if version not in kern_classes:
+				subtable = KernTable_format_unkown(version)
+			else:
+				subtable = kern_classes[version]()
+			subtable.apple = apple
+			subtable.decompile(data[:length], ttFont)
+			self.kernTables.append(subtable)
+			data = data[length:]
+	
+	def compile(self, ttFont):
+		if hasattr(self, "kernTables"):
+			nTables = len(self.kernTables)
+		else:
+			nTables = 0
+		if self.version == 1.0:
+			# AAT Apple's "new" format.
+			data = struct.pack(">ll", fl2fi(self.version, 16), nTables)
+		else:
+			data = struct.pack(">HH", self.version, nTables)
+		if hasattr(self, "kernTables"):
+			for subtable in self.kernTables:
+				data = data + subtable.compile(ttFont)
+		return data
+	
+	def toXML(self, writer, ttFont):
+		writer.simpletag("version", value=self.version)
+		writer.newline()
+		for subtable in self.kernTables:
+			subtable.toXML(writer, ttFont)
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "version":
+			self.version = safeEval(attrs["value"])
+			return
+		if name != "kernsubtable":
+			return
+		if not hasattr(self, "kernTables"):
+			self.kernTables = []
+		format = safeEval(attrs["format"])
+		if format not in kern_classes:
+			subtable = KernTable_format_unkown(format)
+		else:
+			subtable = kern_classes[format]()
+		self.kernTables.append(subtable)
+		subtable.fromXML(name, attrs, content, ttFont)
+
+
+class KernTable_format_0(object):
+	
+	def decompile(self, data, ttFont):
+		version, length, coverage = (0,0,0)
+		if not self.apple:
+			version, length, coverage = struct.unpack(">HHH", data[:6])
+			data = data[6:]
+		else:
+			version, length, coverage = struct.unpack(">LHH", data[:8])
+			data = data[8:]
+		self.version, self.coverage = int(version), int(coverage)
+		
+		self.kernTable = kernTable = {}
+		
+		nPairs, searchRange, entrySelector, rangeShift = struct.unpack(">HHHH", data[:8])
+		data = data[8:]
+		
+		for k in range(nPairs):
+			if len(data) < 6:
+				# buggy kern table
+				data = b""
+				break
+			left, right, value = struct.unpack(">HHh", data[:6])
+			data = data[6:]
+			left, right = int(left), int(right)
+			kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = value
+		if len(data):
+			warnings.warn("excess data in 'kern' subtable: %d bytes" % len(data))
+	
+	def compile(self, ttFont):
+		nPairs = len(self.kernTable)
+		entrySelector = sfnt.maxPowerOfTwo(nPairs)
+		searchRange = (2 ** entrySelector) * 6
+		rangeShift = (nPairs - (2 ** entrySelector)) * 6
+		data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
+		
+		# yeehee! (I mean, turn names into indices)
+		getGlyphID = ttFont.getGlyphID
+		kernTable = sorted((getGlyphID(left), getGlyphID(right), value) for ((left,right),value) in self.kernTable.items())
+		for left, right, value in kernTable:
+			data = data + struct.pack(">HHh", left, right, value)
+		return struct.pack(">HHH", self.version, len(data) + 6, self.coverage) + data
+	
+	def toXML(self, writer, ttFont):
+		writer.begintag("kernsubtable", coverage=self.coverage, format=0)
+		writer.newline()
+		items = sorted(self.kernTable.items())
+		for (left, right), value in items:
+			writer.simpletag("pair", [
+					("l", left),
+					("r", right),
+					("v", value)
+					])
+			writer.newline()
+		writer.endtag("kernsubtable")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.coverage = safeEval(attrs["coverage"])
+		self.version = safeEval(attrs["format"])
+		if not hasattr(self, "kernTable"):
+			self.kernTable = {}
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
+	
+	def __getitem__(self, pair):
+		return self.kernTable[pair]
+	
+	def __setitem__(self, pair, value):
+		self.kernTable[pair] = value
+	
+	def __delitem__(self, pair):
+		del self.kernTable[pair]
+
+
+class KernTable_format_2(object):
+	
+	def decompile(self, data, ttFont):
+		self.data = data
+	
+	def compile(self, ttFont):
+		return self.data
+	
+	def toXML(self, writer):
+		writer.begintag("kernsubtable", format=2)
+		writer.newline()
+		writer.dumphex(self.data)
+		writer.endtag("kernsubtable")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.decompile(readHex(content), ttFont)
+
+
+class KernTable_format_unkown(object):
+	
+	def __init__(self, format):
+		self.format = format
+	
+	def decompile(self, data, ttFont):
+		self.data = data
+	
+	def compile(self, ttFont):
+		return self.data
+	
+	def toXML(self, writer, ttFont):
+		writer.begintag("kernsubtable", format=self.format)
+		writer.newline()
+		writer.comment("unknown 'kern' subtable format")
+		writer.newline()
+		writer.dumphex(self.data)
+		writer.endtag("kernsubtable")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.decompile(readHex(content), ttFont)
+
+
+
+kern_classes = {0: KernTable_format_0, 2: KernTable_format_2}
diff --git a/Lib/fontTools/ttLib/tables/_l_o_c_a.py b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
new file mode 100644
index 0000000..1ce9cab
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_l_o_c_a.py
@@ -0,0 +1,61 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from . import DefaultTable
+import sys
+import array
+import warnings
+
+class table__l_o_c_a(DefaultTable.DefaultTable):
+	
+	dependencies = ['glyf']
+	
+	def decompile(self, data, ttFont):
+		longFormat = ttFont['head'].indexToLocFormat
+		if longFormat:
+			format = "I"
+		else:
+			format = "H"
+		locations = array.array(format)
+		locations.fromstring(data)
+		if sys.byteorder != "big":
+			locations.byteswap()
+		if not longFormat:
+			l = array.array("I")
+			for i in range(len(locations)):
+				l.append(locations[i] * 2)
+			locations = l
+		if len(locations) < (ttFont['maxp'].numGlyphs + 1):
+			warnings.warn("corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d" % (len(locations) - 1, ttFont['maxp'].numGlyphs))
+		self.locations = locations
+	
+	def compile(self, ttFont):
+		try:
+			max_location = max(self.locations)
+		except AttributeError:
+			self.set([])
+			max_location = 0
+		if max_location < 0x20000:
+			locations = array.array("H")
+			for i in range(len(self.locations)):
+				locations.append(self.locations[i] // 2)
+			ttFont['head'].indexToLocFormat = 0
+		else:
+			locations = array.array("I", self.locations)
+			ttFont['head'].indexToLocFormat = 1
+		if sys.byteorder != "big":
+			locations.byteswap()
+		return locations.tostring()
+	
+	def set(self, locations):
+		self.locations = array.array("I", locations)
+	
+	def toXML(self, writer, ttFont):
+		writer.comment("The 'loca' table will be calculated by the compiler")
+		writer.newline()
+	
+	def __getitem__(self, index):
+		return self.locations[index]
+	
+	def __len__(self):
+		return len(self.locations)
+
diff --git a/Lib/fontTools/ttLib/tables/_m_a_x_p.py b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
new file mode 100644
index 0000000..1089d64
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_m_a_x_p.py
@@ -0,0 +1,141 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+
+maxpFormat_0_5 = """
+		>	# big endian
+		tableVersion:           i
+		numGlyphs:              H
+"""
+
+maxpFormat_1_0_add = """
+		>	# big endian
+		maxPoints:              H
+		maxContours:            H
+		maxCompositePoints:     H
+		maxCompositeContours:   H
+		maxZones:               H
+		maxTwilightPoints:      H
+		maxStorage:             H
+		maxFunctionDefs:        H
+		maxInstructionDefs:     H
+		maxStackElements:       H
+		maxSizeOfInstructions:  H
+		maxComponentElements:   H
+		maxComponentDepth:      H
+"""
+
+
+class table__m_a_x_p(DefaultTable.DefaultTable):
+	
+	dependencies = ['glyf']
+	
+	def decompile(self, data, ttFont):
+		dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
+		self.numGlyphs = int(self.numGlyphs)
+		if self.tableVersion != 0x00005000:
+			dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
+		assert len(data) == 0
+	
+	def compile(self, ttFont):
+		if 'glyf' in ttFont:
+			if ttFont.isLoaded('glyf') and ttFont.recalcBBoxes:
+				self.recalc(ttFont)
+		else:
+			pass  # CFF
+		self.numGlyphs = len(ttFont.getGlyphOrder())
+		if self.tableVersion != 0x00005000:
+			self.tableVersion = 0x00010000
+		data = sstruct.pack(maxpFormat_0_5, self)
+		if self.tableVersion == 0x00010000:
+			data = data + sstruct.pack(maxpFormat_1_0_add, self)
+		return data
+	
+	def recalc(self, ttFont):
+		"""Recalculate the font bounding box, and most other maxp values except
+		for the TT instructions values. Also recalculate the value of bit 1
+		of the flags field and the font bounding box of the 'head' table.
+		"""
+		glyfTable = ttFont['glyf']
+		hmtxTable = ttFont['hmtx']
+		headTable = ttFont['head']
+		self.numGlyphs = len(glyfTable)
+		INFINITY = 100000
+		xMin = +INFINITY
+		yMin = +INFINITY
+		xMax = -INFINITY
+		yMax = -INFINITY
+		maxPoints = 0
+		maxContours = 0
+		maxCompositePoints = 0
+		maxCompositeContours = 0
+		maxComponentElements = 0
+		maxComponentDepth = 0
+		allXMaxIsLsb = 1
+		for glyphName in ttFont.getGlyphOrder():
+			g = glyfTable[glyphName]
+			if g.numberOfContours:
+				if hmtxTable[glyphName][1] != g.xMin:
+					allXMaxIsLsb = 0
+				xMin = min(xMin, g.xMin)
+				yMin = min(yMin, g.yMin)
+				xMax = max(xMax, g.xMax)
+				yMax = max(yMax, g.yMax)
+				if g.numberOfContours > 0:
+					nPoints, nContours = g.getMaxpValues()
+					maxPoints = max(maxPoints, nPoints)
+					maxContours = max(maxContours, nContours)
+				else:
+					nPoints, nContours, componentDepth = g.getCompositeMaxpValues(glyfTable)
+					maxCompositePoints = max(maxCompositePoints, nPoints)
+					maxCompositeContours = max(maxCompositeContours, nContours)
+					maxComponentElements = max(maxComponentElements, len(g.components))
+					maxComponentDepth = max(maxComponentDepth, componentDepth)
+		if xMin == +INFINITY:
+			headTable.xMin = 0
+			headTable.yMin = 0
+			headTable.xMax = 0
+			headTable.yMax = 0
+		else:
+		    headTable.xMin = xMin
+		    headTable.yMin = yMin
+		    headTable.xMax = xMax
+		    headTable.yMax = yMax
+		self.maxPoints = maxPoints
+		self.maxContours = maxContours
+		self.maxCompositePoints = maxCompositePoints
+		self.maxCompositeContours = maxCompositeContours
+		self.maxComponentDepth = maxComponentDepth
+		if allXMaxIsLsb:
+			headTable.flags = headTable.flags | 0x2
+		else:
+			headTable.flags = headTable.flags & ~0x2
+	
+	def testrepr(self):
+		items = sorted(self.__dict__.items())
+		print(". . . . . . . . .")
+		for combo in items:
+			print("  %s: %s" % combo)
+		print(". . . . . . . . .")
+	
+	def toXML(self, writer, ttFont):
+		if self.tableVersion != 0x00005000:
+			writer.comment("Most of this table will be recalculated by the compiler")
+			writer.newline()
+		formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
+		if self.tableVersion != 0x00005000:
+			formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
+			names = names + names_1_0
+		for name in names:
+			value = getattr(self, name)
+			if name == "tableVersion":
+				value = hex(value)
+			writer.simpletag(name, value=value)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		setattr(self, name, safeEval(attrs["value"]))
+		
+
diff --git a/Lib/fontTools/ttLib/tables/_n_a_m_e.py b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
new file mode 100644
index 0000000..53fde4d
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_n_a_m_e.py
@@ -0,0 +1,153 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+import struct
+
+nameRecordFormat = """
+		>	# big endian
+		platformID:	H
+		platEncID:	H
+		langID:		H
+		nameID:		H
+		length:		H
+		offset:		H
+"""
+
+nameRecordSize = sstruct.calcsize(nameRecordFormat)
+
+
+class table__n_a_m_e(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		format, n, stringOffset = struct.unpack(">HHH", data[:6])
+		expectedStringOffset = 6 + n * nameRecordSize
+		if stringOffset != expectedStringOffset:
+			# XXX we need a warn function
+			print("Warning: 'name' table stringOffset incorrect. Expected: %s; Actual: %s" % (expectedStringOffset, stringOffset))
+		stringData = data[stringOffset:]
+		data = data[6:]
+		self.names = []
+		for i in range(n):
+			if len(data) < 12:
+				# compensate for buggy font
+				break
+			name, data = sstruct.unpack2(nameRecordFormat, data, NameRecord())
+			name.string = stringData[name.offset:name.offset+name.length]
+			assert len(name.string) == name.length
+			#if (name.platEncID, name.platformID) in ((0, 0), (1, 3)):
+			#	if len(name.string) % 2:
+			#		print "2-byte string doesn't have even length!"
+			#		print name.__dict__
+			del name.offset, name.length
+			self.names.append(name)
+	
+	def compile(self, ttFont):
+		if not hasattr(self, "names"):
+			# only happens when there are NO name table entries read
+			# from the TTX file
+			self.names = []
+		self.names.sort()  # sort according to the spec; see NameRecord.__lt__()
+		stringData = b""
+		format = 0
+		n = len(self.names)
+		stringOffset = 6 + n * sstruct.calcsize(nameRecordFormat)
+		data = struct.pack(">HHH", format, n, stringOffset)
+		lastoffset = 0
+		done = {}  # remember the data so we can reuse the "pointers"
+		for name in self.names:
+			if name.string in done:
+				name.offset, name.length = done[name.string]
+			else:
+				name.offset, name.length = done[name.string] = len(stringData), len(name.string)
+				stringData = stringData + name.string
+			data = data + sstruct.pack(nameRecordFormat, name)
+		return data + stringData
+	
+	def toXML(self, writer, ttFont):
+		for name in self.names:
+			name.toXML(writer, ttFont)
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name != "namerecord":
+			return # ignore unknown tags
+		if not hasattr(self, "names"):
+			self.names = []
+		name = NameRecord()
+		self.names.append(name)
+		name.fromXML(name, attrs, content, ttFont)
+	
+	def getName(self, nameID, platformID, platEncID, langID=None):
+		for namerecord in self.names:
+			if (	namerecord.nameID == nameID and 
+					namerecord.platformID == platformID and 
+					namerecord.platEncID == platEncID):
+				if langID is None or namerecord.langID == langID:
+					return namerecord
+		return None # not found
+
+
+class NameRecord(object):
+	
+	def isUnicode(self):
+		return (self.platformID == 0 or
+			(self.platformID == 3 and self.platEncID in [0, 1, 10]))
+
+	def toXML(self, writer, ttFont):
+		writer.begintag("namerecord", [
+				("nameID", self.nameID),
+				("platformID", self.platformID),
+				("platEncID", self.platEncID),
+				("langID", hex(self.langID)),
+						])
+		writer.newline()
+		if self.isUnicode():
+			if len(self.string) % 2:
+				# no, shouldn't happen, but some of the Apple
+				# tools cause this anyway :-(
+				writer.write16bit(self.string + b"\0", strip=True)
+			else:
+				writer.write16bit(self.string, strip=True)
+		else:
+			writer.write8bit(self.string, strip=True)
+		writer.newline()
+		writer.endtag("namerecord")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		self.nameID = safeEval(attrs["nameID"])
+		self.platformID = safeEval(attrs["platformID"])
+		self.platEncID = safeEval(attrs["platEncID"])
+		self.langID =  safeEval(attrs["langID"])
+		s = strjoin(content).strip()
+		if self.isUnicode():
+			self.string = s.encode("utf_16_be")
+		else:
+			# This is the inverse of write8bit...
+			self.string = s.encode("latin1")
+	
+	def __lt__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+
+		# implemented so that list.sort() sorts according to the spec.
+		selfTuple = (
+			getattr(self, "platformID", None),
+			getattr(self, "platEncID", None),
+			getattr(self, "langID", None),
+			getattr(self, "nameID", None),
+			getattr(self, "string", None),
+		)
+		otherTuple = (
+			getattr(other, "platformID", None),
+			getattr(other, "platEncID", None),
+			getattr(other, "langID", None),
+			getattr(other, "nameID", None),
+			getattr(other, "string", None),
+		)
+		return selfTuple < otherTuple
+	
+	def __repr__(self):
+		return "<NameRecord NameID=%d; PlatformID=%d; LanguageID=%d>" % (
+				self.nameID, self.platformID, self.langID)
diff --git a/Lib/fontTools/ttLib/tables/_p_o_s_t.py b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
new file mode 100644
index 0000000..248983f
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_p_o_s_t.py
@@ -0,0 +1,272 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools import ttLib
+from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval, readHex
+from . import DefaultTable
+import sys
+import struct
+import array
+
+
+postFormat = """
+	>
+	formatType:			16.16F
+	italicAngle:		16.16F		# italic angle in degrees			
+	underlinePosition:	h
+	underlineThickness:	h
+	isFixedPitch:		L
+	minMemType42:		L			# minimum memory if TrueType font is downloaded
+	maxMemType42:		L			# maximum memory if TrueType font is downloaded
+	minMemType1:		L			# minimum memory if Type1 font is downloaded
+	maxMemType1:		L			# maximum memory if Type1 font is downloaded
+"""
+
+postFormatSize = sstruct.calcsize(postFormat)
+
+
+class table__p_o_s_t(DefaultTable.DefaultTable):
+	
+	def decompile(self, data, ttFont):
+		sstruct.unpack(postFormat, data[:postFormatSize], self)
+		data = data[postFormatSize:]
+		if self.formatType == 1.0:
+			self.decode_format_1_0(data, ttFont)
+		elif self.formatType == 2.0:
+			self.decode_format_2_0(data, ttFont)
+		elif self.formatType == 3.0:
+			self.decode_format_3_0(data, ttFont)
+		elif self.formatType == 4.0:
+			self.decode_format_4_0(data, ttFont)
+		else:
+			# supported format
+			raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
+	
+	def compile(self, ttFont):
+		data = sstruct.pack(postFormat, self)
+		if self.formatType == 1.0:
+			pass # we're done
+		elif self.formatType == 2.0:
+			data = data + self.encode_format_2_0(ttFont)
+		elif self.formatType == 3.0:
+			pass # we're done
+		elif self.formatType == 4.0:
+			data = data + self.encode_format_4_0(ttFont)
+		else:
+			# supported format
+			raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
+		return data
+	
+	def getGlyphOrder(self):
+		"""This function will get called by a ttLib.TTFont instance.
+		Do not call this function yourself, use TTFont().getGlyphOrder()
+		or its relatives instead!
+		"""
+		if not hasattr(self, "glyphOrder"):
+			raise ttLib.TTLibError("illegal use of getGlyphOrder()")
+		glyphOrder = self.glyphOrder
+		del self.glyphOrder
+		return glyphOrder
+	
+	def decode_format_1_0(self, data, ttFont):
+		self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
+	
+	def decode_format_2_0(self, data, ttFont):
+		numGlyphs, = struct.unpack(">H", data[:2])
+		numGlyphs = int(numGlyphs)
+		if numGlyphs > ttFont['maxp'].numGlyphs:
+			# Assume the numGlyphs field is bogus, so sync with maxp.
+			# I've seen this in one font, and if the assumption is
+			# wrong elsewhere, well, so be it: it's hard enough to
+			# work around _one_ non-conforming post format...
+			numGlyphs = ttFont['maxp'].numGlyphs
+		data = data[2:]
+		indices = array.array("H")
+		indices.fromstring(data[:2*numGlyphs])
+		if sys.byteorder != "big":
+			indices.byteswap()
+		data = data[2*numGlyphs:]
+		self.extraNames = extraNames = unpackPStrings(data)
+		self.glyphOrder = glyphOrder = [None] * int(ttFont['maxp'].numGlyphs)
+		for glyphID in range(numGlyphs):
+			index = indices[glyphID]
+			if index > 257:
+				name = extraNames[index-258]
+			else:
+				# fetch names from standard list
+				name = standardGlyphOrder[index]
+			glyphOrder[glyphID] = name
+		#AL990511: code added to handle the case of new glyphs without
+		#          entries into the 'post' table
+		if numGlyphs < ttFont['maxp'].numGlyphs:
+			for i in range(numGlyphs, ttFont['maxp'].numGlyphs):
+				glyphOrder[i] = "glyph#%.5d" % i
+				self.extraNames.append(glyphOrder[i])
+		self.build_psNameMapping(ttFont)
+	
+	def build_psNameMapping(self, ttFont):
+		mapping = {}
+		allNames = {}
+		for i in range(ttFont['maxp'].numGlyphs):
+			glyphName = psName = self.glyphOrder[i]
+			if glyphName in allNames:
+				# make up a new glyphName that's unique
+				n = allNames[glyphName]
+				allNames[glyphName] = n + 1
+				glyphName = glyphName + "#" + repr(n)
+				self.glyphOrder[i] = glyphName
+				mapping[glyphName] = psName
+			else:
+				allNames[glyphName] = 1
+		self.mapping = mapping
+	
+	def decode_format_3_0(self, data, ttFont):
+		# Setting self.glyphOrder to None will cause the TTFont object
+		# try and construct glyph names from a Unicode cmap table.
+		self.glyphOrder = None
+	
+	def decode_format_4_0(self, data, ttFont):
+		from fontTools import agl
+		numGlyphs = ttFont['maxp'].numGlyphs
+		indices = array.array("H")
+		indices.fromstring(data)
+		if sys.byteorder != "big":
+			indices.byteswap()
+		# In some older fonts, the size of the post table doesn't match
+		# the number of glyphs. Sometimes it's bigger, sometimes smaller.
+		self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
+		for i in range(min(len(indices),numGlyphs)):
+			if indices[i] == 0xFFFF:
+				self.glyphOrder[i] = ''
+			elif indices[i] in agl.UV2AGL:
+				self.glyphOrder[i] = agl.UV2AGL[indices[i]]
+			else:
+				self.glyphOrder[i] = "uni%04X" % indices[i]
+		self.build_psNameMapping(ttFont)
+
+	def encode_format_2_0(self, ttFont):
+		numGlyphs = ttFont['maxp'].numGlyphs
+		glyphOrder = ttFont.getGlyphOrder()
+		assert len(glyphOrder) == numGlyphs
+		indices = array.array("H")
+		extraDict = {}
+		extraNames = self.extraNames
+		for i in range(len(extraNames)):
+			extraDict[extraNames[i]] = i
+		for glyphID in range(numGlyphs):
+			glyphName = glyphOrder[glyphID]
+			if glyphName in self.mapping:
+				psName = self.mapping[glyphName]
+			else:
+				psName = glyphName
+			if psName in extraDict:
+				index = 258 + extraDict[psName]
+			elif psName in standardGlyphOrder:
+				index = standardGlyphOrder.index(psName)
+			else:
+				index = 258 + len(extraNames)
+				extraDict[psName] = len(extraNames)
+				extraNames.append(psName)
+			indices.append(index)
+		if sys.byteorder != "big":
+			indices.byteswap()
+		return struct.pack(">H", numGlyphs) + indices.tostring() + packPStrings(extraNames)
+	
+	def encode_format_4_0(self, ttFont):
+		from fontTools import agl
+		numGlyphs = ttFont['maxp'].numGlyphs
+		glyphOrder = ttFont.getGlyphOrder()
+		assert len(glyphOrder) == numGlyphs
+		indices = array.array("H")
+		for glyphID in glyphOrder:
+			glyphID = glyphID.split('#')[0]
+			if glyphID in agl.AGL2UV:
+				indices.append(agl.AGL2UV[glyphID])
+			elif len(glyphID) == 7 and glyphID[:3] == 'uni':
+				indices.append(int(glyphID[3:],16))
+			else:
+				indices.append(0xFFFF)
+		if sys.byteorder != "big":
+			indices.byteswap()
+		return indices.tostring()
+
+	def toXML(self, writer, ttFont):
+		formatstring, names, fixes = sstruct.getformat(postFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+		if hasattr(self, "mapping"):
+			writer.begintag("psNames")
+			writer.newline()
+			writer.comment("This file uses unique glyph names based on the information\n"
+						"found in the 'post' table. Since these names might not be unique,\n"
+						"we have to invent artificial names in case of clashes. In order to\n"
+						"be able to retain the original information, we need a name to\n"
+						"ps name mapping for those cases where they differ. That's what\n"
+						"you see below.\n")
+			writer.newline()
+			items = sorted(self.mapping.items())
+			for name, psName in items:
+				writer.simpletag("psName", name=name, psName=psName)
+				writer.newline()
+			writer.endtag("psNames")
+			writer.newline()
+		if hasattr(self, "extraNames"):
+			writer.begintag("extraNames")
+			writer.newline()
+			writer.comment("following are the name that are not taken from the standard Mac glyph order")
+			writer.newline()
+			for name in self.extraNames:
+				writer.simpletag("psName", name=name)
+				writer.newline()
+			writer.endtag("extraNames")
+			writer.newline()
+		if hasattr(self, "data"):
+			writer.begintag("hexdata")
+			writer.newline()
+			writer.dumphex(self.data)
+			writer.endtag("hexdata")
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name not in ("psNames", "extraNames", "hexdata"):
+			setattr(self, name, safeEval(attrs["value"]))
+		elif name == "psNames":
+			self.mapping = {}
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				if name == "psName":
+					self.mapping[attrs["name"]] = attrs["psName"]
+		elif name == "extraNames":
+			self.extraNames = []
+			for element in content:
+				if not isinstance(element, tuple):
+					continue
+				name, attrs, content = element
+				if name == "psName":
+					self.extraNames.append(attrs["name"])
+		else:
+			self.data = readHex(content)
+
+
+def unpackPStrings(data):
+	strings = []
+	index = 0
+	dataLen = len(data)
+	while index < dataLen:
+		length = byteord(data[index])
+		strings.append(tostr(data[index+1:index+1+length], encoding="latin1"))
+		index = index + 1 + length
+	return strings
+
+
+def packPStrings(strings):
+	data = b""
+	for s in strings:
+		data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
+	return data
+
diff --git a/Lib/fontTools/ttLib/tables/_p_r_e_p.py b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
new file mode 100644
index 0000000..fc92665
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_p_r_e_p.py
@@ -0,0 +1,7 @@
+from fontTools import ttLib
+
+superclass = ttLib.getTableClass("fpgm")
+
+class table__p_r_e_p(superclass):
+	pass
+
diff --git a/Lib/fontTools/ttLib/tables/_s_b_i_x.py b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
new file mode 100644
index 0000000..23cb6df
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_s_b_i_x.py
@@ -0,0 +1,142 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import readHex
+from . import DefaultTable
+from .sbixBitmap import *
+from .sbixBitmapSet import *
+import struct
+
+"""
+sbix Table organization:
+
+USHORT        version?
+USHORT        version?
+USHORT        count                    number of bitmap sets
+offsetEntry   offsetEntry[count]       offsetEntries
+(Variable)    storage for bitmap sets
+
+
+offsetEntry:
+
+ULONG         offset                   offset from table start to bitmap set
+
+
+bitmap set:
+
+USHORT        size                     height and width in pixels
+USHORT        resolution               ?
+offsetRecord  offsetRecord[]
+(Variable)    storage for bitmaps
+
+
+offsetRecord:
+
+ULONG         bitmapOffset             offset from start of bitmap set to individual bitmap
+
+
+bitmap:
+
+ULONG         reserved                 00 00 00 00
+char[4]       format                   data type, e.g. "png "
+(Variable)    bitmap data
+"""
+
+sbixHeaderFormat = """
+	>
+	usVal1:          H    # 00 01
+	usVal2:          H    #       00 01
+	numSets:         L    # 00 00 00 02 # number of bitmap sets
+"""
+sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat)
+
+
+sbixBitmapSetOffsetFormat = """
+	>
+	offset:          L    # 00 00 00 10 # offset from table start to each bitmap set
+"""
+sbixBitmapSetOffsetFormatSize = sstruct.calcsize(sbixBitmapSetOffsetFormat)
+
+
+class table__s_b_i_x(DefaultTable.DefaultTable):
+	def __init__(self, tag):
+		self.tableTag = tag
+		self.usVal1 = 1
+		self.usVal2 = 1
+		self.numSets = 0
+		self.bitmapSets = {}
+		self.bitmapSetOffsets = []
+
+	def decompile(self, data, ttFont):
+		# read table header
+		sstruct.unpack(sbixHeaderFormat, data[ : sbixHeaderFormatSize], self)
+		# collect offsets to individual bitmap sets in self.bitmapSetOffsets
+		for i in range(self.numSets):
+			myOffset = sbixHeaderFormatSize + i * sbixBitmapSetOffsetFormatSize
+			offsetEntry = sbixBitmapSetOffset()
+			sstruct.unpack(sbixBitmapSetOffsetFormat, \
+				data[myOffset : myOffset+sbixBitmapSetOffsetFormatSize], \
+				offsetEntry)
+			self.bitmapSetOffsets.append(offsetEntry.offset)
+
+		# decompile BitmapSets
+		for i in range(self.numSets-1, -1, -1):
+			myBitmapSet = BitmapSet(rawdata=data[self.bitmapSetOffsets[i]:])
+			data = data[:self.bitmapSetOffsets[i]]
+			myBitmapSet.decompile(ttFont)
+			#print "  BitmapSet length: %xh" % len(bitmapSetData)
+			#print "Number of Bitmaps:", myBitmapSet.numBitmaps
+			if myBitmapSet.size in self.bitmapSets:
+				from fontTools import ttLib
+				raise ttLib.TTLibError("Pixel 'size' must be unique for each BitmapSet")
+			self.bitmapSets[myBitmapSet.size] = myBitmapSet
+
+		# after the bitmaps have been extracted, we don't need the offsets anymore
+		del self.bitmapSetOffsets
+
+	def compile(self, ttFont):
+		sbixData = ""
+		self.numSets = len(self.bitmapSets)
+		sbixHeader = sstruct.pack(sbixHeaderFormat, self)
+
+		# calculate offset to start of first bitmap set
+		setOffset = sbixHeaderFormatSize + sbixBitmapSetOffsetFormatSize * self.numSets
+
+		for si in sorted(self.bitmapSets.keys()):
+			myBitmapSet = self.bitmapSets[si]
+			myBitmapSet.compile(ttFont)
+			# append offset to this bitmap set to table header
+			myBitmapSet.offset = setOffset
+			sbixHeader += sstruct.pack(sbixBitmapSetOffsetFormat, myBitmapSet)
+			setOffset += sbixBitmapSetHeaderFormatSize + len(myBitmapSet.data)
+			sbixData += myBitmapSet.data
+
+		return sbixHeader + sbixData
+
+	def toXML(self, xmlWriter, ttFont):
+		xmlWriter.simpletag("usVal1", value=self.usVal1)
+		xmlWriter.newline()
+		xmlWriter.simpletag("usVal2", value=self.usVal2)
+		xmlWriter.newline()
+		for i in sorted(self.bitmapSets.keys()):
+			self.bitmapSets[i].toXML(xmlWriter, ttFont)
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name in ["usVal1", "usVal2"]:
+			setattr(self, name, int(attrs["value"]))
+		elif name == "bitmapSet":
+			myBitmapSet = BitmapSet()
+			for element in content:
+				if isinstance(element, tuple):
+					name, attrs, content = element
+					myBitmapSet.fromXML(name, attrs, content, ttFont)
+			self.bitmapSets[myBitmapSet.size] = myBitmapSet
+		else:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("can't handle '%s' element" % name)
+
+
+# Helper classes
+
+class sbixBitmapSetOffset(object):
+	pass
diff --git a/Lib/fontTools/ttLib/tables/_v_h_e_a.py b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
new file mode 100644
index 0000000..8131ad3
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_v_h_e_a.py
@@ -0,0 +1,78 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import safeEval
+from . import DefaultTable
+
+vheaFormat = """
+		>	# big endian
+		tableVersion:			16.16F
+		ascent:					h
+		descent:				h
+		lineGap:				h
+		advanceHeightMax:		H
+		minTopSideBearing:		h
+		minBottomSideBearing:	h
+		yMaxExtent:				h
+		caretSlopeRise:			h
+		caretSlopeRun:			h
+		reserved0:				h
+		reserved1:				h
+		reserved2:				h
+		reserved3:				h
+		reserved4:				h
+		metricDataFormat:		h
+		numberOfVMetrics:		H
+"""
+
+class table__v_h_e_a(DefaultTable.DefaultTable):
+	
+	dependencies = ['vmtx', 'glyf']
+	
+	def decompile(self, data, ttFont):
+		sstruct.unpack(vheaFormat, data, self)
+	
+	def compile(self, ttFont):
+		self.recalc(ttFont)
+		return sstruct.pack(vheaFormat, self)
+	
+	def recalc(self, ttFont):
+		vtmxTable = ttFont['vmtx']
+		if 'glyf' in ttFont:
+			if not ttFont.isLoaded('glyf'):
+				return
+			glyfTable = ttFont['glyf']
+			advanceHeightMax = -100000    # arbitrary big negative number
+			minTopSideBearing = 100000    # arbitrary big number
+			minBottomSideBearing = 100000 # arbitrary big number
+			yMaxExtent = -100000          # arbitrary big negative number
+			
+			for name in ttFont.getGlyphOrder():
+				height, tsb = vtmxTable[name]
+				g = glyfTable[name]
+				if g.numberOfContours <= 0:
+					continue
+				advanceHeightMax = max(advanceHeightMax, height)
+				minTopSideBearing = min(minTopSideBearing, tsb)
+				rsb = height - tsb - (g.yMax - g.yMin)
+				minBottomSideBearing = min(minBottomSideBearing, rsb)
+				extent = tsb + (g.yMax - g.yMin)
+				yMaxExtent = max(yMaxExtent, extent)
+			self.advanceHeightMax = advanceHeightMax
+			self.minTopSideBearing = minTopSideBearing
+			self.minBottomSideBearing = minBottomSideBearing
+			self.yMaxExtent = yMaxExtent
+		else:
+			# XXX CFF recalc...
+			pass
+	
+	def toXML(self, writer, ttFont):
+		formatstring, names, fixes = sstruct.getformat(vheaFormat)
+		for name in names:
+			value = getattr(self, name)
+			writer.simpletag(name, value=value)
+			writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		setattr(self, name, safeEval(attrs["value"]))
+
diff --git a/Lib/fontTools/ttLib/tables/_v_m_t_x.py b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
new file mode 100644
index 0000000..c204de6
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/_v_m_t_x.py
@@ -0,0 +1,10 @@
+from fontTools import ttLib
+
+superclass = ttLib.getTableClass("hmtx")
+
+class table__v_m_t_x(superclass):
+	
+	headerTag = 'vhea'
+	advanceName = 'height'
+	sideBearingName = 'tsb'
+	numberOfMetricsName = 'numberOfVMetrics'
diff --git a/Lib/fontTools/ttLib/tables/asciiTable.py b/Lib/fontTools/ttLib/tables/asciiTable.py
new file mode 100644
index 0000000..e5f3136
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/asciiTable.py
@@ -0,0 +1,23 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from . import DefaultTable
+
+
+class asciiTable(DefaultTable.DefaultTable):
+	
+	def toXML(self, writer, ttFont):
+		data = tostr(self.data)
+		# removing null bytes. XXX needed??
+		data = data.split('\0')
+		data = strjoin(data)
+		writer.begintag("source")
+		writer.newline()
+		writer.write_noindent(data.replace("\r", "\n"))
+		writer.newline()
+		writer.endtag("source")
+		writer.newline()
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		lines = strjoin(content).replace("\r", "\n").split("\n")
+		self.data = tobytes("\r".join(lines[1:-1]))
+
diff --git a/Lib/fontTools/ttLib/tables/otBase.py b/Lib/fontTools/ttLib/tables/otBase.py
new file mode 100644
index 0000000..cbe574a
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/otBase.py
@@ -0,0 +1,864 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from .DefaultTable import DefaultTable
+import struct
+
+class OverflowErrorRecord(object):
+	def __init__(self, overflowTuple):
+		self.tableType = overflowTuple[0]
+		self.LookupListIndex = overflowTuple[1]
+		self.SubTableIndex = overflowTuple[2]
+		self.itemName = overflowTuple[3]
+		self.itemIndex = overflowTuple[4]
+
+	def __repr__(self):
+		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
+
+class OTLOffsetOverflowError(Exception):
+	def __init__(self, overflowErrorRecord):
+		self.value = overflowErrorRecord
+
+	def __str__(self):
+		return repr(self.value)
+
+
+class BaseTTXConverter(DefaultTable):
+	
+	"""Generic base class for TTX table converters. It functions as an
+	adapter between the TTX (ttLib actually) table model and the model
+	we use for OpenType tables, which is necessarily subtly different.
+	"""
+	
+	def decompile(self, data, font):
+		from . import otTables
+		cachingStats = None if True else {}
+		class GlobalState(object):
+			def __init__(self, tableType, cachingStats):
+				self.tableType = tableType
+				self.cachingStats = cachingStats
+		globalState = GlobalState(tableType=self.tableTag,
+					  cachingStats=cachingStats)
+		reader = OTTableReader(data, globalState)
+		tableClass = getattr(otTables, self.tableTag)
+		self.table = tableClass()
+		self.table.decompile(reader, font)
+		if cachingStats:
+			stats = sorted([(v, k) for k, v in cachingStats.items()])
+			stats.reverse()
+			print("cachingsstats for ", self.tableTag)
+			for v, k in stats:
+				if v < 2:
+					break
+				print(v, k)
+			print("---", len(stats))
+	
+	def compile(self, font):
+		""" Create a top-level OTFWriter for the GPOS/GSUB table.
+			Call the compile method for the the table
+				for each 'converter' record in the table converter list
+					call converter's write method for each item in the value. 
+						- For simple items, the write method adds a string to the
+						writer's self.items list. 
+						- For Struct/Table/Subtable items, it add first adds new writer to the 
+						to the writer's self.items, then calls the item's compile method.
+						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
+						each writer representing a table, and the writer.items list containing
+						the child data strings and writers.
+			call the getAllData method
+				call _doneWriting, which removes duplicates
+				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
+				Traverse the flat list of tables, calling getDataLength on each to update their position
+				Traverse the flat list of tables again, calling getData each get the data in the table, now that
+				pos's and offset are known.
+
+				If a lookup subtable overflows an offset, we have to start all over. 
+		"""
+		class GlobalState(object):
+			def __init__(self, tableType):
+				self.tableType = tableType
+		globalState = GlobalState(tableType=self.tableTag)
+		overflowRecord = None
+
+		while True:
+			try:
+				writer = OTTableWriter(globalState)
+				self.table.compile(writer, font)
+				return writer.getAllData()
+
+			except OTLOffsetOverflowError as e:
+
+				if overflowRecord == e.value:
+					raise # Oh well...
+
+				overflowRecord = e.value
+				print("Attempting to fix OTLOffsetOverflowError", e)
+				lastItem = overflowRecord
+
+				ok = 0
+				if overflowRecord.itemName is None:
+					from .otTables import fixLookupOverFlows
+					ok = fixLookupOverFlows(font, overflowRecord)
+				else:
+					from .otTables import fixSubTableOverFlows
+					ok = fixSubTableOverFlows(font, overflowRecord)
+				if not ok:
+					raise
+
+	def toXML(self, writer, font):
+		self.table.toXML2(writer, font)
+	
+	def fromXML(self, name, attrs, content, font):
+		from . import otTables
+		if not hasattr(self, "table"):
+			tableClass = getattr(otTables, self.tableTag)
+			self.table = tableClass()
+		self.table.fromXML(name, attrs, content, font)
+
+
+class OTTableReader(object):
+
+	"""Helper class to retrieve data from an OpenType table."""
+
+	__slots__ = ('data', 'offset', 'pos', 'globalState', 'localState')
+
+	def __init__(self, data, globalState={}, localState=None, offset=0):
+		self.data = data
+		self.offset = offset
+		self.pos = offset
+		self.globalState = globalState
+		self.localState = localState
+
+	def getSubReader(self, offset):
+		offset = self.offset + offset
+		cachingStats = self.globalState.cachingStats
+		if cachingStats is not None:
+			cachingStats[offset] = cachingStats.get(offset, 0) + 1
+		return self.__class__(self.data, self.globalState, self.localState, offset)
+
+	def readUShort(self):
+		pos = self.pos
+		newpos = pos + 2
+		value, = struct.unpack(">H", self.data[pos:newpos])
+		self.pos = newpos
+		return value
+
+	def readShort(self):
+		pos = self.pos
+		newpos = pos + 2
+		value, = struct.unpack(">h", self.data[pos:newpos])
+		self.pos = newpos
+		return value
+
+	def readLong(self):
+		pos = self.pos
+		newpos = pos + 4
+		value, = struct.unpack(">l", self.data[pos:newpos])
+		self.pos = newpos
+		return value
+
+	def readUInt24(self):
+		pos = self.pos
+		newpos = pos + 3
+		value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
+		self.pos = newpos
+		return value
+
+	def readULong(self):
+		pos = self.pos
+		newpos = pos + 4
+		value, = struct.unpack(">L", self.data[pos:newpos])
+		self.pos = newpos
+		return value
+	
+	def readTag(self):
+		pos = self.pos
+		newpos = pos + 4
+		value = Tag(self.data[pos:newpos])
+		assert len(value) == 4
+		self.pos = newpos
+		return value
+
+	def __setitem__(self, name, value):
+		state = self.localState.copy() if self.localState else dict()
+		state[name] = value
+		self.localState = state
+
+	def __getitem__(self, name):
+		return self.localState[name]
+
+
+class OTTableWriter(object):
+	
+	"""Helper class to gather and assemble data for OpenType tables."""
+	
+	def __init__(self, globalState, localState=None):
+		self.items = []
+		self.pos = None
+		self.globalState = globalState
+		self.localState = localState
+		self.parent = None
+
+	def __setitem__(self, name, value):
+		state = self.localState.copy() if self.localState else dict()
+		state[name] = value
+		self.localState = state
+
+	def __getitem__(self, name):
+		return self.localState[name]
+
+	# assembler interface
+	
+	def getAllData(self):
+		"""Assemble all data, including all subtables."""
+		self._doneWriting()
+		tables, extTables = self._gatherTables()
+		tables.reverse()
+		extTables.reverse()
+		# Gather all data in two passes: the absolute positions of all
+		# subtable are needed before the actual data can be assembled.
+		pos = 0
+		for table in tables:
+			table.pos = pos
+			pos = pos + table.getDataLength()
+
+		for table in extTables:
+			table.pos = pos
+			pos = pos + table.getDataLength()
+
+
+		data = []
+		for table in tables:
+			tableData = table.getData()
+			data.append(tableData)
+
+		for table in extTables:
+			tableData = table.getData()
+			data.append(tableData)
+
+		return bytesjoin(data)
+	
+	def getDataLength(self):
+		"""Return the length of this table in bytes, without subtables."""
+		l = 0
+		for item in self.items:
+			if hasattr(item, "getData") or hasattr(item, "getCountData"):
+				if item.longOffset:
+					l = l + 4  # sizeof(ULong)
+				else:
+					l = l + 2  # sizeof(UShort)
+			else:
+				l = l + len(item)
+		return l
+	
+	def getData(self):
+		"""Assemble the data for this writer/table, without subtables."""
+		items = list(self.items)  # make a shallow copy
+		pos = self.pos
+		numItems = len(items)
+		for i in range(numItems):
+			item = items[i]
+			
+			if hasattr(item, "getData"):
+				if item.longOffset:
+					items[i] = packULong(item.pos - pos)
+				else:
+					try:
+						items[i] = packUShort(item.pos - pos)
+					except struct.error:
+						# provide data to fix overflow problem.
+						# If the overflow is to a lookup, or from a lookup to a subtable,
+						# just report the current item.  Otherwise...
+						if self.name not in [ 'LookupList', 'Lookup']:
+							# overflow is within a subTable. Life is more complicated.
+							# If we split the sub-table just before the current item, we may still suffer overflow.
+							# This is because duplicate table merging is done only within an Extension subTable tree;
+							# when we split the subtable in two, some items may no longer be duplicates. 
+							# Get worst case by adding up all the item lengths, depth first traversal.
+							# and then report the first item that overflows a short.
+							def getDeepItemLength(table):
+								if hasattr(table, "getDataLength"):
+									length = 0
+									for item in table.items:
+										length = length + getDeepItemLength(item)
+								else:
+									length = len(table)
+								return length
+	
+							length = self.getDataLength()
+							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
+								# Coverage is first in the item list, but last in the table list,
+								# The original overflow is really in the item list. Skip the Coverage 
+								# table in the following test.
+								items = items[i+1:]
+	
+							for j in range(len(items)):
+								item = items[j]
+								length = length + getDeepItemLength(item)
+								if length > 65535:
+									break
+						overflowErrorRecord = self.getOverflowErrorRecord(item)
+						
+						
+						raise OTLOffsetOverflowError(overflowErrorRecord)
+
+		return bytesjoin(items)
+	
+	def __hash__(self):
+		# only works after self._doneWriting() has been called
+		return hash(self.items)
+	
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self.items == other.items
+	
+	def _doneWriting(self, internedTables=None):
+		# Convert CountData references to data string items
+		# collapse duplicate table references to a unique entry
+		# "tables" are OTTableWriter objects.
+
+		# For Extension Lookup types, we can
+		# eliminate duplicates only within the tree under the Extension Lookup,
+		# as offsets may exceed 64K even between Extension LookupTable subtables.
+		if internedTables is None:
+			internedTables = {}
+		items = self.items
+		iRange = list(range(len(items)))
+		
+		if hasattr(self, "Extension"):
+			newTree = 1
+		else:
+			newTree = 0
+		for i in iRange:
+			item = items[i]
+			if hasattr(item, "getCountData"):
+				items[i] = item.getCountData()
+			elif hasattr(item, "getData"):
+				if newTree:
+					item._doneWriting()
+				else:
+					item._doneWriting(internedTables)
+					internedItem = internedTables.get(item)
+					if internedItem:
+						items[i] = item = internedItem
+					else:
+						internedTables[item] = item
+		self.items = tuple(items)
+	
+	def _gatherTables(self, tables=None, extTables=None, done=None):
+		# Convert table references in self.items tree to a flat
+		# list of tables in depth-first traversal order.
+		# "tables" are OTTableWriter objects.
+		# We do the traversal in reverse order at each level, in order to 
+		# resolve duplicate references to be the last reference in the list of tables.
+		# For extension lookups, duplicate references can be merged only within the
+		# writer tree under the  extension lookup.
+		if tables is None: # init call for first time.
+			tables = []
+			extTables = []
+			done = {}
+
+		done[self] = 1
+
+		numItems = len(self.items)
+		iRange = list(range(numItems))
+		iRange.reverse()
+
+		if hasattr(self, "Extension"):
+			appendExtensions = 1
+		else:
+			appendExtensions = 0
+
+		# add Coverage table if it is sorted last.
+		sortCoverageLast = 0
+		if hasattr(self, "sortCoverageLast"):
+			# Find coverage table
+			for i in range(numItems):
+				item = self.items[i]
+				if hasattr(item, "name") and (item.name == "Coverage"):
+					sortCoverageLast = 1
+					break
+			if item not in done:
+				item._gatherTables(tables, extTables, done)
+			else:
+				# We're a new parent of item
+				pass
+
+		for i in iRange:
+			item = self.items[i]
+			if not hasattr(item, "getData"):
+				continue
+
+			if sortCoverageLast and (i==1) and item.name == 'Coverage':
+				# we've already 'gathered' it above
+				continue
+
+			if appendExtensions:
+				assert extTables is not None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
+				newDone = {}
+				item._gatherTables(extTables, None, newDone)
+
+			elif item not in done:
+				item._gatherTables(tables, extTables, done)
+			else:
+				# We're a new parent of item
+				pass
+
+
+		tables.append(self)
+		return tables, extTables
+	
+	# interface for gathering data, as used by table.compile()
+	
+	def getSubWriter(self):
+		subwriter = self.__class__(self.globalState, self.localState)
+		subwriter.parent = self # because some subtables have idential values, we discard
+					# the duplicates under the getAllData method. Hence some
+					# subtable writers can have more than one parent writer.
+					# But we just care about first one right now.
+		return subwriter
+	
+	def writeUShort(self, value):
+		assert 0 <= value < 0x10000
+		self.items.append(struct.pack(">H", value))
+	
+	def writeShort(self, value):
+		self.items.append(struct.pack(">h", value))
+
+	def writeUInt24(self, value):
+		assert 0 <= value < 0x1000000
+		b = struct.pack(">L", value)
+		self.items.append(b[1:])
+	
+	def writeLong(self, value):
+		self.items.append(struct.pack(">l", value))
+	
+	def writeULong(self, value):
+		self.items.append(struct.pack(">L", value))
+	
+	def writeTag(self, tag):
+		tag = Tag(tag).tobytes()
+		assert len(tag) == 4
+		self.items.append(tag)
+	
+	def writeSubTable(self, subWriter):
+		self.items.append(subWriter)
+	
+	def writeCountReference(self, table, name):
+		ref = CountReference(table, name)
+		self.items.append(ref)
+		return ref
+	
+	def writeStruct(self, format, values):
+		data = struct.pack(*(format,) + values)
+		self.items.append(data)
+	
+	def writeData(self, data):
+		self.items.append(data)
+
+	def	getOverflowErrorRecord(self, item):
+		LookupListIndex = SubTableIndex = itemName = itemIndex = None
+		if self.name == 'LookupList':
+			LookupListIndex = item.repeatIndex
+		elif self.name == 'Lookup':
+			LookupListIndex = self.repeatIndex
+			SubTableIndex = item.repeatIndex
+		else:
+			itemName = item.name
+			if hasattr(item, 'repeatIndex'):
+				itemIndex = item.repeatIndex
+			if self.name == 'SubTable':
+				LookupListIndex = self.parent.repeatIndex
+				SubTableIndex = self.repeatIndex
+			elif self.name == 'ExtSubTable':
+				LookupListIndex = self.parent.parent.repeatIndex
+				SubTableIndex = self.parent.repeatIndex
+			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
+				itemName = ".".join([self.name, item.name])
+				p1 = self.parent
+				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
+					itemName = ".".join([p1.name, item.name])
+					p1 = p1.parent
+				if p1:
+					if p1.name == 'ExtSubTable':
+						LookupListIndex = p1.parent.parent.repeatIndex
+						SubTableIndex = p1.parent.repeatIndex
+					else:
+						LookupListIndex = p1.parent.repeatIndex
+						SubTableIndex = p1.repeatIndex
+
+		return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
+
+
+class CountReference(object):
+	"""A reference to a Count value, not a count of references."""
+	def __init__(self, table, name):
+		self.table = table
+		self.name = name
+	def setValue(self, value):
+		table = self.table
+		name = self.name
+		if table[name] is None:
+			table[name] = value
+		else:
+			assert table[name] == value, (name, table[name], value)
+	def getCountData(self):
+		return packUShort(self.table[self.name])
+
+
+def packUShort(value):
+	return struct.pack(">H", value)
+
+
+def packULong(value):
+	assert 0 <= value < 0x100000000, value
+	return struct.pack(">L", value)
+
+
+class BaseTable(object):
+
+	def __getattr__(self, attr):
+		reader = self.__dict__.get("reader")
+		if reader:
+			del self.reader
+			font = self.font
+			del self.font
+			self.decompile(reader, font)
+			return getattr(self, attr)
+
+		raise AttributeError(attr)
+
+	"""Generic base class for all OpenType (sub)tables."""
+	
+	def getConverters(self):
+		return self.converters
+	
+	def getConverterByName(self, name):
+		return self.convertersByName[name]
+	
+	def decompile(self, reader, font):
+		self.readFormat(reader)
+		table = {}
+		self.__rawTable = table  # for debugging
+		converters = self.getConverters()
+		for conv in converters:
+			if conv.name == "SubTable":
+				conv = conv.getConverter(reader.globalState.tableType,
+						table["LookupType"])
+			if conv.name == "ExtSubTable":
+				conv = conv.getConverter(reader.globalState.tableType,
+						table["ExtensionLookupType"])
+			if conv.name == "FeatureParams":
+				conv = conv.getConverter(reader["FeatureTag"])
+			if conv.repeat:
+				l = []
+				if conv.repeat in table:
+					countValue = table[conv.repeat]
+				else:
+					# conv.repeat is a propagated count
+					countValue = reader[conv.repeat]
+				for i in range(countValue + conv.aux):
+					l.append(conv.read(reader, font, table))
+				table[conv.name] = l
+			else:
+				if conv.aux and not eval(conv.aux, None, table):
+					continue
+				table[conv.name] = conv.read(reader, font, table)
+				if conv.isPropagated:
+					reader[conv.name] = table[conv.name]
+
+		self.postRead(table, font)
+
+		del self.__rawTable  # succeeded, get rid of debugging info
+
+	def ensureDecompiled(self):
+		reader = self.__dict__.get("reader")
+		if reader:
+			del self.reader
+			font = self.font
+			del self.font
+			self.decompile(reader, font)
+
+	def compile(self, writer, font):
+		self.ensureDecompiled()
+		table = self.preWrite(font)
+
+		if hasattr(self, 'sortCoverageLast'):
+			writer.sortCoverageLast = 1
+
+		if hasattr(self.__class__, 'LookupType'):
+			writer['LookupType'].setValue(self.__class__.LookupType)
+
+		self.writeFormat(writer)
+		for conv in self.getConverters():
+			value = table.get(conv.name)
+			if conv.repeat:
+				if value is None:
+					value = []
+				countValue = len(value) - conv.aux
+				if conv.repeat in table:
+					CountReference(table, conv.repeat).setValue(countValue)
+				else:
+					# conv.repeat is a propagated count
+					writer[conv.repeat].setValue(countValue)
+				for i in range(len(value)):
+					conv.write(writer, font, table, value[i], i)
+			elif conv.isCount:
+				# Special-case Count values.
+				# Assumption: a Count field will *always* precede
+				# the actual array(s).
+				# We need a default value, as it may be set later by a nested
+				# table. We will later store it here.
+				# We add a reference: by the time the data is assembled
+				# the Count value will be filled in.
+				ref = writer.writeCountReference(table, conv.name)
+				table[conv.name] = None
+				if conv.isPropagated:
+					writer[conv.name] = ref
+			elif conv.isLookupType:
+				ref = writer.writeCountReference(table, conv.name)
+				table[conv.name] = None
+				writer['LookupType'] = ref
+			else:
+				if conv.aux and not eval(conv.aux, None, table):
+					continue
+				conv.write(writer, font, table, value)
+				if conv.isPropagated:
+					writer[conv.name] = value
+	
+	def readFormat(self, reader):
+		pass
+	
+	def writeFormat(self, writer):
+		pass
+	
+	def postRead(self, table, font):
+		self.__dict__.update(table)
+	
+	def preWrite(self, font):
+		return self.__dict__.copy()
+	
+	def toXML(self, xmlWriter, font, attrs=None, name=None):
+		tableName = name if name else self.__class__.__name__
+		if attrs is None:
+			attrs = []
+		if hasattr(self, "Format"):
+			attrs = attrs + [("Format", self.Format)]
+		xmlWriter.begintag(tableName, attrs)
+		xmlWriter.newline()
+		self.toXML2(xmlWriter, font)
+		xmlWriter.endtag(tableName)
+		xmlWriter.newline()
+	
+	def toXML2(self, xmlWriter, font):
+		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
+		# This is because in TTX our parent writes our main tag, and in otBase.py we
+		# do it ourselves. I think I'm getting schizophrenic...
+		for conv in self.getConverters():
+			if conv.repeat:
+				value = getattr(self, conv.name)
+				for i in range(len(value)):
+					item = value[i]
+					conv.xmlWrite(xmlWriter, font, item, conv.name,
+							[("index", i)])
+			else:
+				if conv.aux and not eval(conv.aux, None, vars(self)):
+					continue
+				value = getattr(self, conv.name)
+				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
+	
+	def fromXML(self, name, attrs, content, font):
+		try:
+			conv = self.getConverterByName(name)
+		except KeyError:
+			raise    # XXX on KeyError, raise nice error
+		value = conv.xmlRead(attrs, content, font)
+		if conv.repeat:
+			seq = getattr(self, conv.name, None)
+			if seq is None:
+				seq = []
+				setattr(self, conv.name, seq)
+			seq.append(value)
+		else:
+			setattr(self, conv.name, value)
+	
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+
+		self.ensureDecompiled()
+		other.ensureDecompiled()
+
+		return self.__dict__ == other.__dict__
+
+
+class FormatSwitchingBaseTable(BaseTable):
+	
+	"""Minor specialization of BaseTable, for tables that have multiple
+	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
+	
+	def getConverters(self):
+		return self.converters[self.Format]
+	
+	def getConverterByName(self, name):
+		return self.convertersByName[self.Format][name]
+	
+	def readFormat(self, reader):
+		self.Format = reader.readUShort()
+		assert self.Format != 0, (self, reader.pos, len(reader.data))
+	
+	def writeFormat(self, writer):
+		writer.writeUShort(self.Format)
+
+	def toXML(self, xmlWriter, font, attrs=None, name=None):
+		BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
+
+
+#
+# Support for ValueRecords
+#
+# This data type is so different from all other OpenType data types that
+# it requires quite a bit of code for itself. It even has special support
+# in OTTableReader and OTTableWriter...
+#
+
+valueRecordFormat = [
+#	Mask	 Name            isDevice  signed
+	(0x0001, "XPlacement",   0,        1),
+	(0x0002, "YPlacement",   0,        1),
+	(0x0004, "XAdvance",     0,        1),
+	(0x0008, "YAdvance",     0,        1),
+	(0x0010, "XPlaDevice",   1,        0),
+	(0x0020, "YPlaDevice",   1,        0),
+	(0x0040, "XAdvDevice",   1,        0),
+	(0x0080, "YAdvDevice",   1,        0),
+# 	reserved:
+	(0x0100, "Reserved1",    0,        0),
+	(0x0200, "Reserved2",    0,        0),
+	(0x0400, "Reserved3",    0,        0),
+	(0x0800, "Reserved4",    0,        0),
+	(0x1000, "Reserved5",    0,        0),
+	(0x2000, "Reserved6",    0,        0),
+	(0x4000, "Reserved7",    0,        0),
+	(0x8000, "Reserved8",    0,        0),
+]
+
+def _buildDict():
+	d = {}
+	for mask, name, isDevice, signed in valueRecordFormat:
+		d[name] = mask, isDevice, signed
+	return d
+
+valueRecordFormatDict = _buildDict()
+
+
+class ValueRecordFactory(object):
+	
+	"""Given a format code, this object convert ValueRecords."""
+
+	def __init__(self, valueFormat):
+		format = []
+		for mask, name, isDevice, signed in valueRecordFormat:
+			if valueFormat & mask:
+				format.append((name, isDevice, signed))
+		self.format = format
+	
+	def readValueRecord(self, reader, font):
+		format = self.format
+		if not format:
+			return None
+		valueRecord = ValueRecord()
+		for name, isDevice, signed in format:
+			if signed:
+				value = reader.readShort()
+			else:
+				value = reader.readUShort()
+			if isDevice:
+				if value:
+					from . import otTables
+					subReader = reader.getSubReader(value)
+					value = getattr(otTables, name)()
+					value.decompile(subReader, font)
+				else:
+					value = None
+			setattr(valueRecord, name, value)
+		return valueRecord
+	
+	def writeValueRecord(self, writer, font, valueRecord):
+		for name, isDevice, signed in self.format:
+			value = getattr(valueRecord, name, 0)
+			if isDevice:
+				if value:
+					subWriter = writer.getSubWriter()
+					writer.writeSubTable(subWriter)
+					value.compile(subWriter, font)
+				else:
+					writer.writeUShort(0)
+			elif signed:
+				writer.writeShort(value)
+			else:
+				writer.writeUShort(value)
+
+
+class ValueRecord(object):
+	
+	# see ValueRecordFactory
+	
+	def getFormat(self):
+		format = 0
+		for name in self.__dict__.keys():
+			format = format | valueRecordFormatDict[name][0]
+		return format
+	
+	def toXML(self, xmlWriter, font, valueName, attrs=None):
+		if attrs is None:
+			simpleItems = []
+		else:
+			simpleItems = list(attrs)
+		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
+			if hasattr(self, name):
+				simpleItems.append((name, getattr(self, name)))
+		deviceItems = []
+		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
+			if hasattr(self, name):
+				device = getattr(self, name)
+				if device is not None:
+					deviceItems.append((name, device))
+		if deviceItems:
+			xmlWriter.begintag(valueName, simpleItems)
+			xmlWriter.newline()
+			for name, deviceRecord in deviceItems:
+				if deviceRecord is not None:
+					deviceRecord.toXML(xmlWriter, font)
+			xmlWriter.endtag(valueName)
+			xmlWriter.newline()
+		else:
+			xmlWriter.simpletag(valueName, simpleItems)
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		from . import otTables
+		for k, v in attrs.items():
+			setattr(self, k, int(v))
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			value = getattr(otTables, name)()
+			for elem2 in content:
+				if not isinstance(elem2, tuple):
+					continue
+				name2, attrs2, content2 = elem2
+				value.fromXML(name2, attrs2, content2, font)
+			setattr(self, name, value)
+	
+	def __ne__(self, other):
+		return not self.__eq__(other)
+	def __eq__(self, other):
+		if type(self) != type(other):
+			return NotImplemented
+		return self.__dict__ == other.__dict__
diff --git a/Lib/fontTools/ttLib/tables/otConverters.py b/Lib/fontTools/ttLib/tables/otConverters.py
new file mode 100644
index 0000000..d6ac461
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/otConverters.py
@@ -0,0 +1,385 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import safeEval
+from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
+from .otBase import ValueRecordFactory
+
+
+def buildConverters(tableSpec, tableNamespace):
+	"""Given a table spec from otData.py, build a converter object for each
+	field of the table. This is called for each table in otData.py, and
+	the results are assigned to the corresponding class in otTables.py."""
+	converters = []
+	convertersByName = {}
+	for tp, name, repeat, aux, descr in tableSpec:
+		tableName = name
+		if name.startswith("ValueFormat"):
+			assert tp == "uint16"
+			converterClass = ValueFormat
+		elif name.endswith("Count") or name.endswith("LookupType"):
+			assert tp == "uint16"
+			converterClass = ComputedUShort
+		elif name == "SubTable":
+			converterClass = SubTable
+		elif name == "ExtSubTable":
+			converterClass = ExtSubTable
+		elif name == "FeatureParams":
+			converterClass = FeatureParams
+		else:
+			if not tp in converterMapping:
+				tableName = tp
+				converterClass = Struct
+			else:
+				converterClass = converterMapping[tp]
+		tableClass = tableNamespace.get(tableName)
+		conv = converterClass(name, repeat, aux, tableClass)
+		if name in ["SubTable", "ExtSubTable"]:
+			conv.lookupTypes = tableNamespace['lookupTypes']
+			# also create reverse mapping
+			for t in conv.lookupTypes.values():
+				for cls in t.values():
+					convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+		if name == "FeatureParams":
+			conv.featureParamTypes = tableNamespace['featureParamTypes']
+			conv.defaultFeatureParams = tableNamespace['FeatureParams']
+			for cls in conv.featureParamTypes.values():
+				convertersByName[cls.__name__] = Table(name, repeat, aux, cls)
+		converters.append(conv)
+		assert name not in convertersByName, name
+		convertersByName[name] = conv
+	return converters, convertersByName
+
+
+class BaseConverter(object):
+	
+	"""Base class for converter objects. Apart from the constructor, this
+	is an abstract class."""
+	
+	def __init__(self, name, repeat, aux, tableClass):
+		self.name = name
+		self.repeat = repeat
+		self.aux = aux
+		self.tableClass = tableClass
+		self.isCount = name.endswith("Count")
+		self.isLookupType = name.endswith("LookupType")
+		self.isPropagated = name in ["ClassCount", "Class2Count", "FeatureTag"]
+	
+	def read(self, reader, font, tableDict):
+		"""Read a value from the reader."""
+		raise NotImplementedError(self)
+	
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		"""Write a value to the writer."""
+		raise NotImplementedError(self)
+	
+	def xmlRead(self, attrs, content, font):
+		"""Read a value from XML."""
+		raise NotImplementedError(self)
+	
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		"""Write a value to XML."""
+		raise NotImplementedError(self)
+
+
+class SimpleValue(BaseConverter):
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		xmlWriter.simpletag(name, attrs + [("value", value)])
+		xmlWriter.newline()
+	def xmlRead(self, attrs, content, font):
+		return attrs["value"]
+
+class IntValue(SimpleValue):
+	def xmlRead(self, attrs, content, font):
+		return int(attrs["value"], 0)
+
+class Long(IntValue):
+	def read(self, reader, font, tableDict):
+		return reader.readLong()
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeLong(value)
+
+class Version(BaseConverter):
+	def read(self, reader, font, tableDict):
+		value = reader.readLong()
+		assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
+		return  fi2fl(value, 16)
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		if value < 0x10000:
+			value = fl2fi(value, 16)
+		value = int(round(value))
+		assert (value >> 16) == 1, "Unsupported version 0x%08x" % value
+		writer.writeLong(value)
+	def xmlRead(self, attrs, content, font):
+		value = attrs["value"]
+		value = float(int(value, 0)) if value.startswith("0") else float(value)
+		if value >= 0x10000:
+			value = fi2fl(value, 16)
+		return value
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		if value >= 0x10000:
+			value = fi2fl(value, 16)
+		if value % 1 != 0:
+			# Write as hex
+			value = "0x%08x" % fl2fi(value, 16)
+		xmlWriter.simpletag(name, attrs + [("value", value)])
+		xmlWriter.newline()
+
+class Short(IntValue):
+	def read(self, reader, font, tableDict):
+		return reader.readShort()
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeShort(value)
+
+class UShort(IntValue):
+	def read(self, reader, font, tableDict):
+		return reader.readUShort()
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeUShort(value)
+
+class UInt24(IntValue):
+	def read(self, reader, font, tableDict):
+		return reader.readUInt24()
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeUInt24(value)
+
+class ComputedUShort(UShort):
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		xmlWriter.comment("%s=%s" % (name, value))
+		xmlWriter.newline()
+
+class Tag(SimpleValue):
+	def read(self, reader, font, tableDict):
+		return reader.readTag()
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeTag(value)
+
+class GlyphID(SimpleValue):
+	def read(self, reader, font, tableDict):
+		value = reader.readUShort()
+		value =  font.getGlyphName(value)
+		return value
+
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		value =  font.getGlyphID(value)
+		writer.writeUShort(value)
+
+class FloatValue(SimpleValue):
+	def xmlRead(self, attrs, content, font):
+		return float(attrs["value"])
+
+class DeciPoints(FloatValue):
+	def read(self, reader, font, tableDict):
+		value = reader.readUShort()
+		return value / 10
+
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.writeUShort(int(round(value * 10)))
+
+class Struct(BaseConverter):
+	
+	def read(self, reader, font, tableDict):
+		table = self.tableClass()
+		table.decompile(reader, font)
+		return table
+	
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		value.compile(writer, font)
+	
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		if value is None:
+			if attrs:
+				# If there are attributes (probably index), then
+				# don't drop this even if it's NULL.  It will mess
+				# up the array indices of the containing element.
+				xmlWriter.simpletag(name, attrs + [("empty", True)])
+				xmlWriter.newline()
+			else:
+				pass # NULL table, ignore
+		else:
+			value.toXML(xmlWriter, font, attrs, name=name)
+	
+	def xmlRead(self, attrs, content, font):
+		table = self.tableClass()
+		if attrs.get("empty"):
+			return None
+		Format = attrs.get("Format")
+		if Format is not None:
+			table.Format = int(Format)
+		for element in content:
+			if isinstance(element, tuple):
+				name, attrs, content = element
+				table.fromXML(name, attrs, content, font)
+			else:
+				pass
+		return table
+
+
+class Table(Struct):
+
+	longOffset = False
+
+	def readOffset(self, reader):
+		return reader.readUShort()
+
+	def writeNullOffset(self, writer):
+		if self.longOffset:
+			writer.writeULong(0)
+		else:
+			writer.writeUShort(0)
+	
+	def read(self, reader, font, tableDict):
+		offset = self.readOffset(reader)
+		if offset == 0:
+			return None
+		if offset <= 3:
+			# XXX hack to work around buggy pala.ttf
+			print("*** Warning: offset is not 0, yet suspiciously low (%s). table: %s" \
+					% (offset, self.tableClass.__name__))
+			return None
+		table = self.tableClass()
+		reader = reader.getSubReader(offset)
+		if font.lazy:
+			table.reader = reader
+			table.font = font
+		else:
+			table.decompile(reader, font)
+		return table
+	
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		if value is None:
+			self.writeNullOffset(writer)
+		else:
+			subWriter = writer.getSubWriter()
+			subWriter.longOffset = self.longOffset
+			subWriter.name = self.name
+			if repeatIndex is not None:
+				subWriter.repeatIndex = repeatIndex
+			writer.writeSubTable(subWriter)
+			value.compile(subWriter, font)
+
+class LTable(Table):
+
+	longOffset = True
+
+	def readOffset(self, reader):
+		return reader.readULong()
+
+
+class SubTable(Table):
+	def getConverter(self, tableType, lookupType):
+		tableClass = self.lookupTypes[tableType][lookupType]
+		return self.__class__(self.name, self.repeat, self.aux, tableClass)
+
+
+class ExtSubTable(LTable, SubTable):
+	
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer.Extension = 1 # actually, mere presence of the field flags it as an Ext Subtable writer.
+		Table.write(self, writer, font, tableDict, value, repeatIndex)
+
+class FeatureParams(Table):
+	def getConverter(self, featureTag):
+		tableClass = self.featureParamTypes.get(featureTag, self.defaultFeatureParams)
+		return self.__class__(self.name, self.repeat, self.aux, tableClass)
+
+
+class ValueFormat(IntValue):
+	def __init__(self, name, repeat, aux, tableClass):
+		BaseConverter.__init__(self, name, repeat, aux, tableClass)
+		self.which = "ValueFormat" + ("2" if name[-1] == "2" else "1")
+	def read(self, reader, font, tableDict):
+		format = reader.readUShort()
+		reader[self.which] = ValueRecordFactory(format)
+		return format
+	def write(self, writer, font, tableDict, format, repeatIndex=None):
+		writer.writeUShort(format)
+		writer[self.which] = ValueRecordFactory(format)
+
+
+class ValueRecord(ValueFormat):
+	def read(self, reader, font, tableDict):
+		return reader[self.which].readValueRecord(reader, font)
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		writer[self.which].writeValueRecord(writer, font, value)
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		if value is None:
+			pass  # NULL table, ignore
+		else:
+			value.toXML(xmlWriter, font, self.name, attrs)
+	def xmlRead(self, attrs, content, font):
+		from .otBase import ValueRecord
+		value = ValueRecord()
+		value.fromXML(None, attrs, content, font)
+		return value
+
+
+class DeltaValue(BaseConverter):
+	
+	def read(self, reader, font, tableDict):
+		StartSize = tableDict["StartSize"]
+		EndSize = tableDict["EndSize"]
+		DeltaFormat = tableDict["DeltaFormat"]
+		assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+		nItems = EndSize - StartSize + 1
+		nBits = 1 << DeltaFormat
+		minusOffset = 1 << nBits
+		mask = (1 << nBits) - 1
+		signMask = 1 << (nBits - 1)
+		
+		DeltaValue = []
+		tmp, shift = 0, 0
+		for i in range(nItems):
+			if shift == 0:
+				tmp, shift = reader.readUShort(), 16
+			shift = shift - nBits
+			value = (tmp >> shift) & mask
+			if value & signMask:
+				value = value - minusOffset
+			DeltaValue.append(value)
+		return DeltaValue
+	
+	def write(self, writer, font, tableDict, value, repeatIndex=None):
+		StartSize = tableDict["StartSize"]
+		EndSize = tableDict["EndSize"]
+		DeltaFormat = tableDict["DeltaFormat"]
+		DeltaValue = value
+		assert DeltaFormat in (1, 2, 3), "illegal DeltaFormat"
+		nItems = EndSize - StartSize + 1
+		nBits = 1 << DeltaFormat
+		assert len(DeltaValue) == nItems
+		mask = (1 << nBits) - 1
+		
+		tmp, shift = 0, 16
+		for value in DeltaValue:
+			shift = shift - nBits
+			tmp = tmp | ((value & mask) << shift)
+			if shift == 0:
+				writer.writeUShort(tmp)
+				tmp, shift = 0, 16
+		if shift != 16:
+			writer.writeUShort(tmp)
+	
+	def xmlWrite(self, xmlWriter, font, value, name, attrs):
+		xmlWriter.simpletag(name, attrs + [("value", value)])
+		xmlWriter.newline()
+	
+	def xmlRead(self, attrs, content, font):
+		return safeEval(attrs["value"])
+
+
+converterMapping = {
+	# type         class
+	"int16":       Short,
+	"uint16":      UShort,
+	"uint24":      UInt24,
+	"Version":     Version,
+	"Tag":         Tag,
+	"GlyphID":     GlyphID,
+	"DeciPoints":  DeciPoints,
+	"struct":      Struct,
+	"Offset":      Table,
+	"LOffset":     LTable,
+	"ValueRecord": ValueRecord,
+	"DeltaValue":  DeltaValue,
+}
+
diff --git a/Lib/fontTools/ttLib/tables/otData.py b/Lib/fontTools/ttLib/tables/otData.py
new file mode 100644
index 0000000..10046d8
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/otData.py
@@ -0,0 +1,982 @@
+otData = [
+
+	#
+	# common
+	#
+
+	('ScriptList', [
+		('uint16', 'ScriptCount', None, None, 'Number of ScriptRecords'),
+		('struct', 'ScriptRecord', 'ScriptCount', 0, 'Array of ScriptRecords -listed alphabetically by ScriptTag'),
+	]),
+
+	('ScriptRecord', [
+		('Tag', 'ScriptTag', None, None, '4-byte ScriptTag identifier'),
+		('Offset', 'Script', None, None, 'Offset to Script table-from beginning of ScriptList'),
+	]),
+
+	('Script', [
+		('Offset', 'DefaultLangSys', None, None, 'Offset to DefaultLangSys table-from beginning of Script table-may be NULL'),
+		('uint16', 'LangSysCount', None, None, 'Number of LangSysRecords for this script-excluding the DefaultLangSys'),
+		('struct', 'LangSysRecord', 'LangSysCount', 0, 'Array of LangSysRecords-listed alphabetically by LangSysTag'),
+	]),
+
+	('LangSysRecord', [
+		('Tag', 'LangSysTag', None, None, '4-byte LangSysTag identifier'),
+		('Offset', 'LangSys', None, None, 'Offset to LangSys table-from beginning of Script table'),
+	]),
+
+	('LangSys', [
+		('Offset', 'LookupOrder', None, None, '= NULL (reserved for an offset to a reordering table)'),
+		('uint16', 'ReqFeatureIndex', None, None, 'Index of a feature required for this language system- if no required features = 0xFFFF'),
+		('uint16', 'FeatureCount', None, None, 'Number of FeatureIndex values for this language system-excludes the required feature'),
+		('uint16', 'FeatureIndex', 'FeatureCount', 0, 'Array of indices into the FeatureList-in arbitrary order'),
+	]),
+
+	('FeatureList', [
+		('uint16', 'FeatureCount', None, None, 'Number of FeatureRecords in this table'),
+		('struct', 'FeatureRecord', 'FeatureCount', 0, 'Array of FeatureRecords-zero-based (first feature has FeatureIndex = 0)-listed alphabetically by FeatureTag'),
+	]),
+
+	('FeatureRecord', [
+		('Tag', 'FeatureTag', None, None, '4-byte feature identification tag'),
+		('Offset', 'Feature', None, None, 'Offset to Feature table-from beginning of FeatureList'),
+	]),
+
+	('Feature', [
+		('Offset', 'FeatureParams', None, None, '= NULL (reserved for offset to FeatureParams)'),
+		('uint16', 'LookupCount', None, None, 'Number of LookupList indices for this feature'),
+		('uint16', 'LookupListIndex', 'LookupCount', 0, 'Array of LookupList indices for this feature -zero-based (first lookup is LookupListIndex = 0)'),
+	]),
+
+	('FeatureParams', [
+	]),
+
+	('FeatureParamsSize', [
+		('DeciPoints', 'DesignSize', None, None, 'The design size in 720/inch units (decipoints).'),
+		('uint16', 'SubfamilyID', None, None, 'Serves as an identifier that associates fonts in a subfamily.'),
+		('uint16', 'SubfamilyNameID', None, None, 'Subfamily NameID.'),
+		('DeciPoints', 'RangeStart', None, None, 'Small end of recommended usage range (exclusive) in 720/inch units.'),
+		('DeciPoints', 'RangeEnd', None, None, 'Large end of recommended usage range (inclusive) in 720/inch units.'),
+	]),
+
+	('FeatureParamsStylisticSet', [
+		('uint16', 'Version', None, None, 'Set to 0.'),
+		('uint16', 'UINameID', None, None, 'UI NameID.'),
+	]),
+
+	('FeatureParamsCharacterVariants', [
+		('uint16', 'Format', None, None, 'Set to 0.'),
+		('uint16', 'FeatUILabelNameID', None, None, 'Feature UI label NameID.'),
+		('uint16', 'FeatUITooltipTextNameID', None, None, 'Feature UI tooltip text NameID.'),
+		('uint16', 'SampleTextNameID', None, None, 'Sample text NameID.'),
+		('uint16', 'NumNamedParameters', None, None, 'Number of named parameters.'),
+		('uint16', 'FirstParamUILabelNameID', None, None, 'First NameID of UI feature parameters.'),
+		('uint16', 'CharCount', None, None, 'Count of characters this feature provides glyph variants for.'),
+		('uint24', 'Character', 'CharCount', 0, 'Unicode characters for which this feature provides glyph variants.'),
+	]),
+
+	('LookupList', [
+		('uint16', 'LookupCount', None, None, 'Number of lookups in this table'),
+		('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
+	]),
+
+	('Lookup', [
+		('uint16', 'LookupType', None, None, 'Different enumerations for GSUB and GPOS'),
+		('uint16', 'LookupFlag', None, None, 'Lookup qualifiers'),
+		('uint16', 'SubTableCount', None, None, 'Number of SubTables for this lookup'),
+		('Offset', 'SubTable', 'SubTableCount', 0, 'Array of offsets to SubTables-from beginning of Lookup table'),
+		('uint16', 'MarkFilteringSet', None, 'LookupFlag & 0x0010', 'If set, indicates that the lookup table structure is followed by a MarkFilteringSet field. The layout engine skips over all mark glyphs not in the mark filtering set indicated.'),
+	]),
+
+	('CoverageFormat1', [
+		('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 1'),
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs in the GlyphArray'),
+		('GlyphID', 'GlyphArray', 'GlyphCount', 0, 'Array of GlyphIDs-in numerical order'),
+	]),
+
+	('CoverageFormat2', [
+		('uint16', 'CoverageFormat', None, None, 'Format identifier-format = 2'),
+		('uint16', 'RangeCount', None, None, 'Number of RangeRecords'),
+		('struct', 'RangeRecord', 'RangeCount', 0, 'Array of glyph ranges-ordered by Start GlyphID'),
+	]),
+
+	('RangeRecord', [
+		('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
+		('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
+		('uint16', 'StartCoverageIndex', None, None, 'Coverage Index of first GlyphID in range'),
+	]),
+
+	('ClassDefFormat1', [
+		('uint16', 'ClassFormat', None, None, 'Format identifier-format = 1'),
+		('GlyphID', 'StartGlyph', None, None, 'First GlyphID of the ClassValueArray'),
+		('uint16', 'GlyphCount', None, None, 'Size of the ClassValueArray'),
+		('uint16', 'ClassValueArray', 'GlyphCount', 0, 'Array of Class Values-one per GlyphID'),
+	]),
+
+	('ClassDefFormat2', [
+		('uint16', 'ClassFormat', None, None, 'Format identifier-format = 2'),
+		('uint16', 'ClassRangeCount', None, None, 'Number of ClassRangeRecords'),
+		('struct', 'ClassRangeRecord', 'ClassRangeCount', 0, 'Array of ClassRangeRecords-ordered by Start GlyphID'),
+	]),
+
+	('ClassRangeRecord', [
+		('GlyphID', 'Start', None, None, 'First GlyphID in the range'),
+		('GlyphID', 'End', None, None, 'Last GlyphID in the range'),
+		('uint16', 'Class', None, None, 'Applied to all glyphs in the range'),
+	]),
+
+	('Device', [
+		('uint16', 'StartSize', None, None, 'Smallest size to correct-in ppem'),
+		('uint16', 'EndSize', None, None, 'Largest size to correct-in ppem'),
+		('uint16', 'DeltaFormat', None, None, 'Format of DeltaValue array data: 1, 2, or 3'),
+		('DeltaValue', 'DeltaValue', '', 0, 'Array of compressed data'),
+	]),
+
+
+	#
+	# gpos
+	#
+
+	('GPOS', [
+		('Version', 'Version', None, None, 'Version of the GPOS table-initially = 0x00010000'),
+		('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GPOS table'),
+		('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GPOS table'),
+		('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GPOS table'),
+	]),
+
+	('SinglePosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
+		('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
+		('ValueRecord', 'Value', None, None, 'Defines positioning value(s)-applied to all glyphs in the Coverage table'),
+	]),
+
+	('SinglePosFormat2', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of SinglePos subtable'),
+		('uint16', 'ValueFormat', None, None, 'Defines the types of data in the ValueRecord'),
+		('uint16', 'ValueCount', None, None, 'Number of ValueRecords'),
+		('ValueRecord', 'Value', 'ValueCount', 0, 'Array of ValueRecords-positioning values applied to glyphs'),
+	]),
+
+	('PairPosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-only the first glyph in each pair'),
+		('uint16', 'ValueFormat1', None, None, 'Defines the types of data in ValueRecord1-for the first glyph in the pair -may be zero (0)'),
+		('uint16', 'ValueFormat2', None, None, 'Defines the types of data in ValueRecord2-for the second glyph in the pair -may be zero (0)'),
+		('uint16', 'PairSetCount', None, None, 'Number of PairSet tables'),
+		('Offset', 'PairSet', 'PairSetCount', 0, 'Array of offsets to PairSet tables-from beginning of PairPos subtable-ordered by Coverage Index'),
+	]),
+
+	('PairSet', [
+		('uint16', 'PairValueCount', None, None, 'Number of PairValueRecords'),
+		('struct', 'PairValueRecord', 'PairValueCount', 0, 'Array of PairValueRecords-ordered by GlyphID of the second glyph'),
+	]),
+
+	('PairValueRecord', [
+		('GlyphID', 'SecondGlyph', None, None, 'GlyphID of second glyph in the pair-first glyph is listed in the Coverage table'),
+		('ValueRecord', 'Value1', None, None, 'Positioning data for the first glyph in the pair'),
+		('ValueRecord', 'Value2', None, None, 'Positioning data for the second glyph in the pair'),
+	]),
+
+	('PairPosFormat2', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of PairPos subtable-for the first glyph of the pair'),
+		('uint16', 'ValueFormat1', None, None, 'ValueRecord definition-for the first glyph of the pair-may be zero (0)'),
+		('uint16', 'ValueFormat2', None, None, 'ValueRecord definition-for the second glyph of the pair-may be zero (0)'),
+		('Offset', 'ClassDef1', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the first glyph of the pair'),
+		('Offset', 'ClassDef2', None, None, 'Offset to ClassDef table-from beginning of PairPos subtable-for the second glyph of the pair'),
+		('uint16', 'Class1Count', None, None, 'Number of classes in ClassDef1 table-includes Class0'),
+		('uint16', 'Class2Count', None, None, 'Number of classes in ClassDef2 table-includes Class0'),
+		('struct', 'Class1Record', 'Class1Count', 0, 'Array of Class1 records-ordered by Class1'),
+	]),
+
+	('Class1Record', [
+		('struct', 'Class2Record', 'Class2Count', 0, 'Array of Class2 records-ordered by Class2'),
+	]),
+
+	('Class2Record', [
+		('ValueRecord', 'Value1', None, None, 'Positioning for first glyph-empty if ValueFormat1 = 0'),
+		('ValueRecord', 'Value2', None, None, 'Positioning for second glyph-empty if ValueFormat2 = 0'),
+	]),
+
+	('CursivePosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of CursivePos subtable'),
+		('uint16', 'EntryExitCount', None, None, 'Number of EntryExit records'),
+		('struct', 'EntryExitRecord', 'EntryExitCount', 0, 'Array of EntryExit records-in Coverage Index order'),
+	]),
+
+	('EntryExitRecord', [
+		('Offset', 'EntryAnchor', None, None, 'Offset to EntryAnchor table-from beginning of CursivePos subtable-may be NULL'),
+		('Offset', 'ExitAnchor', None, None, 'Offset to ExitAnchor table-from beginning of CursivePos subtable-may be NULL'),
+	]),
+
+	('MarkBasePosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'MarkCoverage', None, None, 'Offset to MarkCoverage table-from beginning of MarkBasePos subtable'),
+		('Offset', 'BaseCoverage', None, None, 'Offset to BaseCoverage table-from beginning of MarkBasePos subtable'),
+		('uint16', 'ClassCount', None, None, 'Number of classes defined for marks'),
+		('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkBasePos subtable'),
+		('Offset', 'BaseArray', None, None, 'Offset to BaseArray table-from beginning of MarkBasePos subtable'),
+	]),
+
+	('BaseArray', [
+		('uint16', 'BaseCount', None, None, 'Number of BaseRecords'),
+		('struct', 'BaseRecord', 'BaseCount', 0, 'Array of BaseRecords-in order of BaseCoverage Index'),
+	]),
+
+	('BaseRecord', [
+		('Offset', 'BaseAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of BaseArray table-ordered by class-zero-based'),
+	]),
+
+	('MarkLigPosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'MarkCoverage', None, None, 'Offset to Mark Coverage table-from beginning of MarkLigPos subtable'),
+		('Offset', 'LigatureCoverage', None, None, 'Offset to Ligature Coverage table-from beginning of MarkLigPos subtable'),
+		('uint16', 'ClassCount', None, None, 'Number of defined mark classes'),
+		('Offset', 'MarkArray', None, None, 'Offset to MarkArray table-from beginning of MarkLigPos subtable'),
+		('Offset', 'LigatureArray', None, None, 'Offset to LigatureArray table-from beginning of MarkLigPos subtable'),
+	]),
+
+	('LigatureArray', [
+		('uint16', 'LigatureCount', None, None, 'Number of LigatureAttach table offsets'),
+		('Offset', 'LigatureAttach', 'LigatureCount', 0, 'Array of offsets to LigatureAttach tables-from beginning of LigatureArray table-ordered by LigatureCoverage Index'),
+	]),
+
+	('LigatureAttach', [
+		('uint16', 'ComponentCount', None, None, 'Number of ComponentRecords in this ligature'),
+		('struct', 'ComponentRecord', 'ComponentCount', 0, 'Array of Component records-ordered in writing direction'),
+	]),
+
+	('ComponentRecord', [
+		('Offset', 'LigatureAnchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of LigatureAttach table-ordered by class-NULL if a component does not have an attachment for a class-zero-based array'),
+	]),
+
+	('MarkMarkPosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Mark1Coverage', None, None, 'Offset to Combining Mark Coverage table-from beginning of MarkMarkPos subtable'),
+		('Offset', 'Mark2Coverage', None, None, 'Offset to Base Mark Coverage table-from beginning of MarkMarkPos subtable'),
+		('uint16', 'ClassCount', None, None, 'Number of Combining Mark classes defined'),
+		('Offset', 'Mark1Array', None, None, 'Offset to MarkArray table for Mark1-from beginning of MarkMarkPos subtable'),
+		('Offset', 'Mark2Array', None, None, 'Offset to Mark2Array table for Mark2-from beginning of MarkMarkPos subtable'),
+	]),
+
+	('Mark2Array', [
+		('uint16', 'Mark2Count', None, None, 'Number of Mark2 records'),
+		('struct', 'Mark2Record', 'Mark2Count', 0, 'Array of Mark2 records-in Coverage order'),
+	]),
+
+	('Mark2Record', [
+		('Offset', 'Mark2Anchor', 'ClassCount', 0, 'Array of offsets (one per class) to Anchor tables-from beginning of Mark2Array table-zero-based array'),
+	]),
+
+	('PosLookupRecord', [
+		('uint16', 'SequenceIndex', None, None, 'Index to input glyph sequence-first glyph = 0'),
+		('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
+	]),
+
+	('ContextPosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
+		('uint16', 'PosRuleSetCount', None, None, 'Number of PosRuleSet tables'),
+		('Offset', 'PosRuleSet', 'PosRuleSetCount', 0, 'Array of offsets to PosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
+	]),
+
+	('PosRuleSet', [
+		('uint16', 'PosRuleCount', None, None, 'Number of PosRule tables'),
+		('Offset', 'PosRule', 'PosRuleCount', 0, 'Array of offsets to PosRule tables-from beginning of PosRuleSet-ordered by preference'),
+	]),
+
+	('PosRule', [
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs in the Input glyph sequence'),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-starting with the second glyph'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
+	]),
+
+	('ContextPosFormat2', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
+		('Offset', 'ClassDef', None, None, 'Offset to ClassDef table-from beginning of ContextPos subtable'),
+		('uint16', 'PosClassSetCount', None, None, 'Number of PosClassSet tables'),
+		('Offset', 'PosClassSet', 'PosClassSetCount', 0, 'Array of offsets to PosClassSet tables-from beginning of ContextPos subtable-ordered by class-may be NULL'),
+	]),
+
+	('PosClassSet', [
+		('uint16', 'PosClassRuleCount', None, None, 'Number of PosClassRule tables'),
+		('Offset', 'PosClassRule', 'PosClassRuleCount', 0, 'Array of offsets to PosClassRule tables-from beginning of PosClassSet-ordered by preference'),
+	]),
+
+	('PosClassRule', [
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs to be matched'),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph sequence'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
+	]),
+
+	('ContextPosFormat3', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input sequence'),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage tables-from beginning of ContextPos subtable'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of positioning lookups-in design order'),
+	]),
+
+	('ChainContextPosFormat1', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ContextPos subtable'),
+		('uint16', 'ChainPosRuleSetCount', None, None, 'Number of ChainPosRuleSet tables'),
+		('Offset', 'ChainPosRuleSet', 'ChainPosRuleSetCount', 0, 'Array of offsets to ChainPosRuleSet tables-from beginning of ContextPos subtable-ordered by Coverage Index'),
+	]),
+
+	('ChainPosRuleSet', [
+		('uint16', 'ChainPosRuleCount', None, None, 'Number of ChainPosRule tables'),
+		('Offset', 'ChainPosRule', 'ChainPosRuleCount', 0, 'Array of offsets to ChainPosRule tables-from beginning of ChainPosRuleSet-ordered by preference'),
+	]),
+
+	('ChainPosRule', [
+		('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
+		('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
+		('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
+		('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
+		('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
+	]),
+
+	('ChainContextPosFormat2', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of ChainContextPos subtable'),
+		('Offset', 'BacktrackClassDef', None, None, 'Offset to ClassDef table containing backtrack sequence context-from beginning of ChainContextPos subtable'),
+		('Offset', 'InputClassDef', None, None, 'Offset to ClassDef table containing input sequence context-from beginning of ChainContextPos subtable'),
+		('Offset', 'LookAheadClassDef', None, None, 'Offset to ClassDef table containing lookahead sequence context-from beginning of ChainContextPos subtable'),
+		('uint16', 'ChainPosClassSetCount', None, None, 'Number of ChainPosClassSet tables'),
+		('Offset', 'ChainPosClassSet', 'ChainPosClassSetCount', 0, 'Array of offsets to ChainPosClassSet tables-from beginning of ChainContextPos subtable-ordered by input class-may be NULL'),
+	]),
+
+	('ChainPosClassSet', [
+		('uint16', 'ChainPosClassRuleCount', None, None, 'Number of ChainPosClassRule tables'),
+		('Offset', 'ChainPosClassRule', 'ChainPosClassRuleCount', 0, 'Array of offsets to ChainPosClassRule tables-from beginning of ChainPosClassSet-ordered by preference'),
+	]),
+
+	('ChainPosClassRule', [
+		('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
+		('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
+		('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
+		('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
+		('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords (in design order)'),
+	]),
+
+	('ChainContextPosFormat3', [
+		('uint16', 'PosFormat', None, None, 'Format identifier-format = 3'),
+		('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
+		('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
+		('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
+		('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
+		('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
+		('uint16', 'PosCount', None, None, 'Number of PosLookupRecords'),
+		('struct', 'PosLookupRecord', 'PosCount', 0, 'Array of PosLookupRecords,in design order'),
+	]),
+
+	('ExtensionPosFormat1', [
+		('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
+		('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
+		('LOffset', 'ExtSubTable', None, None, 'Offset to SubTable'),
+	]),
+
+	('ValueRecord', [
+		('int16', 'XPlacement', None, None, 'Horizontal adjustment for placement-in design units'),
+		('int16', 'YPlacement', None, None, 'Vertical adjustment for placement-in design units'),
+		('int16', 'XAdvance', None, None, 'Horizontal adjustment for advance-in design units (only used for horizontal writing)'),
+		('int16', 'YAdvance', None, None, 'Vertical adjustment for advance-in design units (only used for vertical writing)'),
+		('Offset', 'XPlaDevice', None, None, 'Offset to Device table for horizontal placement-measured from beginning of PosTable (may be NULL)'),
+		('Offset', 'YPlaDevice', None, None, 'Offset to Device table for vertical placement-measured from beginning of PosTable (may be NULL)'),
+		('Offset', 'XAdvDevice', None, None, 'Offset to Device table for horizontal advance-measured from beginning of PosTable (may be NULL)'),
+		('Offset', 'YAdvDevice', None, None, 'Offset to Device table for vertical advance-measured from beginning of PosTable (may be NULL)'),
+	]),
+
+	('AnchorFormat1', [
+		('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 1'),
+		('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
+		('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
+	]),
+
+	('AnchorFormat2', [
+		('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 2'),
+		('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
+		('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
+		('uint16', 'AnchorPoint', None, None, 'Index to glyph contour point'),
+	]),
+
+	('AnchorFormat3', [
+		('uint16', 'AnchorFormat', None, None, 'Format identifier-format = 3'),
+		('int16', 'XCoordinate', None, None, 'Horizontal value-in design units'),
+		('int16', 'YCoordinate', None, None, 'Vertical value-in design units'),
+		('Offset', 'XDeviceTable', None, None, 'Offset to Device table for X coordinate- from beginning of Anchor table (may be NULL)'),
+		('Offset', 'YDeviceTable', None, None, 'Offset to Device table for Y coordinate- from beginning of Anchor table (may be NULL)'),
+	]),
+
+	('MarkArray', [
+		('uint16', 'MarkCount', None, None, 'Number of MarkRecords'),
+		('struct', 'MarkRecord', 'MarkCount', 0, 'Array of MarkRecords-in Coverage order'),
+	]),
+
+	('MarkRecord', [
+		('uint16', 'Class', None, None, 'Class defined for this mark'),
+		('Offset', 'MarkAnchor', None, None, 'Offset to Anchor table-from beginning of MarkArray table'),
+	]),
+
+
+	#
+	# gsub
+	#
+
+	('GSUB', [
+		('Version', 'Version', None, None, 'Version of the GSUB table-initially set to 0x00010000'),
+		('Offset', 'ScriptList', None, None, 'Offset to ScriptList table-from beginning of GSUB table'),
+		('Offset', 'FeatureList', None, None, 'Offset to FeatureList table-from beginning of GSUB table'),
+		('Offset', 'LookupList', None, None, 'Offset to LookupList table-from beginning of GSUB table'),
+	]),
+
+	('SingleSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('int16', 'DeltaGlyphID', None, None, 'Add to original GlyphID to get substitute GlyphID'),
+	]),
+
+	('SingleSubstFormat2', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
+		('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage Index'),
+	]),
+
+	('MultipleSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'SequenceCount', None, None, 'Number of Sequence table offsets in the Sequence array'),
+		('Offset', 'Sequence', 'SequenceCount', 0, 'Array of offsets to Sequence tables-from beginning of Substitution table-ordered by Coverage Index'),
+	]),
+
+	('Sequence', [
+		('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array. This should always be greater than 0.'),
+		('GlyphID', 'Substitute', 'GlyphCount', 0, 'String of GlyphIDs to substitute'),
+	]),
+
+	('AlternateSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'AlternateSetCount', None, None, 'Number of AlternateSet tables'),
+		('Offset', 'AlternateSet', 'AlternateSetCount', 0, 'Array of offsets to AlternateSet tables-from beginning of Substitution table-ordered by Coverage Index'),
+	]),
+
+	('AlternateSet', [
+		('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Alternate array'),
+		('GlyphID', 'Alternate', 'GlyphCount', 0, 'Array of alternate GlyphIDs-in arbitrary order'),
+	]),
+
+	('LigatureSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'LigSetCount', None, None, 'Number of LigatureSet tables'),
+		('Offset', 'LigatureSet', 'LigSetCount', 0, 'Array of offsets to LigatureSet tables-from beginning of Substitution table-ordered by Coverage Index'),
+	]),
+
+	('LigatureSet', [
+		('uint16', 'LigatureCount', None, None, 'Number of Ligature tables'),
+		('Offset', 'Ligature', 'LigatureCount', 0, 'Array of offsets to Ligature tables-from beginning of LigatureSet table-ordered by preference'),
+	]),
+
+	('Ligature', [
+		('GlyphID', 'LigGlyph', None, None, 'GlyphID of ligature to substitute'),
+		('uint16', 'CompCount', None, None, 'Number of components in the ligature'),
+		('GlyphID', 'Component', 'CompCount', -1, 'Array of component GlyphIDs-start with the second component-ordered in writing direction'),
+	]),
+
+	('SubstLookupRecord', [
+		('uint16', 'SequenceIndex', None, None, 'Index into current glyph sequence-first glyph = 0'),
+		('uint16', 'LookupListIndex', None, None, 'Lookup to apply to that position-zero-based'),
+	]),
+
+	('ContextSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'SubRuleSetCount', None, None, 'Number of SubRuleSet tables-must equal GlyphCount in Coverage table'),
+		('Offset', 'SubRuleSet', 'SubRuleSetCount', 0, 'Array of offsets to SubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
+	]),
+
+	('SubRuleSet', [
+		('uint16', 'SubRuleCount', None, None, 'Number of SubRule tables'),
+		('Offset', 'SubRule', 'SubRuleCount', 0, 'Array of offsets to SubRule tables-from beginning of SubRuleSet table-ordered by preference'),
+	]),
+
+	('SubRule', [
+		('uint16', 'GlyphCount', None, None, 'Total number of glyphs in input glyph sequence-includes the first glyph'),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('GlyphID', 'Input', 'GlyphCount', -1, 'Array of input GlyphIDs-start with second glyph'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
+	]),
+
+	('ContextSubstFormat2', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('Offset', 'ClassDef', None, None, 'Offset to glyph ClassDef table-from beginning of Substitution table'),
+		('uint16', 'SubClassSetCount', None, None, 'Number of SubClassSet tables'),
+		('Offset', 'SubClassSet', 'SubClassSetCount', 0, 'Array of offsets to SubClassSet tables-from beginning of Substitution table-ordered by class-may be NULL'),
+	]),
+
+	('SubClassSet', [
+		('uint16', 'SubClassRuleCount', None, None, 'Number of SubClassRule tables'),
+		('Offset', 'SubClassRule', 'SubClassRuleCount', 0, 'Array of offsets to SubClassRule tables-from beginning of SubClassSet-ordered by preference'),
+	]),
+
+	('SubClassRule', [
+		('uint16', 'GlyphCount', None, None, 'Total number of classes specified for the context in the rule-includes the first class'),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('uint16', 'Class', 'GlyphCount', -1, 'Array of classes-beginning with the second class-to be matched to the input glyph class sequence'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of Substitution lookups-in design order'),
+	]),
+
+	('ContextSubstFormat3', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs in the input glyph sequence'),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('Offset', 'Coverage', 'GlyphCount', 0, 'Array of offsets to Coverage table-from beginning of Substitution table-in glyph sequence order'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords-in design order'),
+	]),
+
+	('ChainContextSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('uint16', 'ChainSubRuleSetCount', None, None, 'Number of ChainSubRuleSet tables-must equal GlyphCount in Coverage table'),
+		('Offset', 'ChainSubRuleSet', 'ChainSubRuleSetCount', 0, 'Array of offsets to ChainSubRuleSet tables-from beginning of Substitution table-ordered by Coverage Index'),
+	]),
+
+	('ChainSubRuleSet', [
+		('uint16', 'ChainSubRuleCount', None, None, 'Number of ChainSubRule tables'),
+		('Offset', 'ChainSubRule', 'ChainSubRuleCount', 0, 'Array of offsets to ChainSubRule tables-from beginning of ChainSubRuleSet table-ordered by preference'),
+	]),
+
+	('ChainSubRule', [
+		('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
+		('GlyphID', 'Backtrack', 'BacktrackGlyphCount', 0, "Array of backtracking GlyphID's (to be matched before the input sequence)"),
+		('uint16', 'InputGlyphCount', None, None, 'Total number of glyphs in the input sequence (includes the first glyph)'),
+		('GlyphID', 'Input', 'InputGlyphCount', -1, 'Array of input GlyphIDs (start with second glyph)'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Total number of glyphs in the look ahead sequence (number of glyphs to be matched after the input sequence)'),
+		('GlyphID', 'LookAhead', 'LookAheadGlyphCount', 0, "Array of lookahead GlyphID's (to be matched after the input sequence)"),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
+	]),
+
+	('ChainContextSubstFormat2', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 2'),
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table-from beginning of Substitution table'),
+		('Offset', 'BacktrackClassDef', None, None, 'Offset to glyph ClassDef table containing backtrack sequence data-from beginning of Substitution table'),
+		('Offset', 'InputClassDef', None, None, 'Offset to glyph ClassDef table containing input sequence data-from beginning of Substitution table'),
+		('Offset', 'LookAheadClassDef', None, None, 'Offset to glyph ClassDef table containing lookahead sequence data-from beginning of Substitution table'),
+		('uint16', 'ChainSubClassSetCount', None, None, 'Number of ChainSubClassSet tables'),
+		('Offset', 'ChainSubClassSet', 'ChainSubClassSetCount', 0, 'Array of offsets to ChainSubClassSet tables-from beginning of Substitution table-ordered by input class-may be NULL'),
+	]),
+
+	('ChainSubClassSet', [
+		('uint16', 'ChainSubClassRuleCount', None, None, 'Number of ChainSubClassRule tables'),
+		('Offset', 'ChainSubClassRule', 'ChainSubClassRuleCount', 0, 'Array of offsets to ChainSubClassRule tables-from beginning of ChainSubClassSet-ordered by preference'),
+	]),
+
+	('ChainSubClassRule', [
+		('uint16', 'BacktrackGlyphCount', None, None, 'Total number of glyphs in the backtrack sequence (number of glyphs to be matched before the first glyph)'),
+		('uint16', 'Backtrack', 'BacktrackGlyphCount', 0, 'Array of backtracking classes(to be matched before the input sequence)'),
+		('uint16', 'InputGlyphCount', None, None, 'Total number of classes in the input sequence (includes the first class)'),
+		('uint16', 'Input', 'InputGlyphCount', -1, 'Array of input classes(start with second class; to be matched with the input glyph sequence)'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Total number of classes in the look ahead sequence (number of classes to be matched after the input sequence)'),
+		('uint16', 'LookAhead', 'LookAheadGlyphCount', 0, 'Array of lookahead classes(to be matched after the input sequence)'),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords (in design order)'),
+	]),
+
+	('ChainContextSubstFormat3', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 3'),
+		('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
+		('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
+		('uint16', 'InputGlyphCount', None, None, 'Number of glyphs in input sequence'),
+		('Offset', 'InputCoverage', 'InputGlyphCount', 0, 'Array of offsets to coverage tables in input sequence, in glyph sequence order'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
+		('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
+		('uint16', 'SubstCount', None, None, 'Number of SubstLookupRecords'),
+		('struct', 'SubstLookupRecord', 'SubstCount', 0, 'Array of SubstLookupRecords, in design order'),
+	]),
+
+	('ExtensionSubstFormat1', [
+		('uint16', 'ExtFormat', None, None, 'Format identifier. Set to 1.'),
+		('uint16', 'ExtensionLookupType', None, None, 'Lookup type of subtable referenced by ExtensionOffset (i.e. the extension subtable).'),
+		('LOffset', 'ExtSubTable', None, None, 'Array of offsets to Lookup tables-from beginning of LookupList -zero based (first lookup is Lookup index = 0)'),
+	]),
+
+	('ReverseChainSingleSubstFormat1', [
+		('uint16', 'SubstFormat', None, None, 'Format identifier-format = 1'),
+		('Offset', 'Coverage', None, 0, 'Offset to Coverage table - from beginning of Substitution table'),
+		('uint16', 'BacktrackGlyphCount', None, None, 'Number of glyphs in the backtracking sequence'),
+		('Offset', 'BacktrackCoverage', 'BacktrackGlyphCount', 0, 'Array of offsets to coverage tables in backtracking sequence, in glyph sequence order'),
+		('uint16', 'LookAheadGlyphCount', None, None, 'Number of glyphs in lookahead sequence'),
+		('Offset', 'LookAheadCoverage', 'LookAheadGlyphCount', 0, 'Array of offsets to coverage tables in lookahead sequence, in glyph sequence order'),
+		('uint16', 'GlyphCount', None, None, 'Number of GlyphIDs in the Substitute array'),
+		('GlyphID', 'Substitute', 'GlyphCount', 0, 'Array of substitute GlyphIDs-ordered by Coverage index'),
+	]),
+
+	#
+	# gdef
+	#
+
+	('GDEF', [
+		('Version', 'Version', None, None, 'Version of the GDEF table-initially 0x00010000'),
+		('Offset', 'GlyphClassDef', None, None, 'Offset to class definition table for glyph type-from beginning of GDEF header (may be NULL)'),
+		('Offset', 'AttachList', None, None, 'Offset to list of glyphs with attachment points-from beginning of GDEF header (may be NULL)'),
+		('Offset', 'LigCaretList', None, None, 'Offset to list of positioning points for ligature carets-from beginning of GDEF header (may be NULL)'),
+		('Offset', 'MarkAttachClassDef', None, None, 'Offset to class definition table for mark attachment type-from beginning of GDEF header (may be NULL)'),
+		('Offset', 'MarkGlyphSetsDef', None, 'int(round(Version*0x10000)) >= 0x00010002', 'Offset to the table of mark set definitions-from beginning of GDEF header (may be NULL)'),
+	]),
+
+	('AttachList', [
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of AttachList table'),
+		('uint16', 'GlyphCount', None, None, 'Number of glyphs with attachment points'),
+		('Offset', 'AttachPoint', 'GlyphCount', 0, 'Array of offsets to AttachPoint tables-from beginning of AttachList table-in Coverage Index order'),
+	]),
+
+	('AttachPoint', [
+		('uint16', 'PointCount', None, None, 'Number of attachment points on this glyph'),
+		('uint16', 'PointIndex', 'PointCount', 0, 'Array of contour point indices -in increasing numerical order'),
+	]),
+
+	('LigCaretList', [
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table - from beginning of LigCaretList table'),
+		('uint16', 'LigGlyphCount', None, None, 'Number of ligature glyphs'),
+		('Offset', 'LigGlyph', 'LigGlyphCount', 0, 'Array of offsets to LigGlyph tables-from beginning of LigCaretList table-in Coverage Index order'),
+	]),
+
+	('LigGlyph', [
+		('uint16', 'CaretCount', None, None, 'Number of CaretValues for this ligature (components - 1)'),
+		('Offset', 'CaretValue', 'CaretCount', 0, 'Array of offsets to CaretValue tables-from beginning of LigGlyph table-in increasing coordinate order'),
+	]),
+
+	('CaretValueFormat1', [
+		('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 1'),
+		('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
+	]),
+
+	('CaretValueFormat2', [
+		('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 2'),
+		('uint16', 'CaretValuePoint', None, None, 'Contour point index on glyph'),
+	]),
+
+	('CaretValueFormat3', [
+		('uint16', 'CaretValueFormat', None, None, 'Format identifier-format = 3'),
+		('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
+		('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value-from beginning of CaretValue table'),
+	]),
+
+	('MarkGlyphSetsDef', [
+		('uint16', 'MarkSetTableFormat', None, None, 'Format identifier == 1'),
+		('uint16', 'MarkSetCount', None, None, 'Number of mark sets defined'),
+		('LOffset', 'Coverage', 'MarkSetCount', 0, 'Array of offsets to mark set coverage tables.'),
+	]),
+
+	#
+	# base
+	#
+
+	('BASE', [
+		('Version', 'Version', None, None, 'Version of the BASE table-initially 0x00010000'),
+		('Offset', 'HorizAxis', None, None, 'Offset to horizontal Axis table-from beginning of BASE table-may be NULL'),
+		('Offset', 'VertAxis', None, None, 'Offset to vertical Axis table-from beginning of BASE table-may be NULL'),
+	]),
+
+	('Axis', [
+		('Offset', 'BaseTagList', None, None, 'Offset to BaseTagList table-from beginning of Axis table-may be NULL'),
+		('Offset', 'BaseScriptList', None, None, 'Offset to BaseScriptList table-from beginning of Axis table'),
+	]),
+
+	('BaseTagList', [
+		('uint16', 'BaseTagCount', None, None, 'Number of baseline identification tags in this text direction-may be zero (0)'),
+		('Tag', 'BaselineTag', 'BaseTagCount', 0, 'Array of 4-byte baseline identification tags-must be in alphabetical order'),
+	]),
+
+	('BaseScriptList', [
+		('uint16', 'BaseScriptCount', None, None, 'Number of BaseScriptRecords defined'),
+		('struct', 'BaseScriptRecord', 'BaseScriptCount', 0, 'Array of BaseScriptRecords-in alphabetical order by BaseScriptTag'),
+	]),
+
+	('BaseScriptRecord', [
+		('Tag', 'BaseScriptTag', None, None, '4-byte script identification tag'),
+		('Offset', 'BaseScript', None, None, 'Offset to BaseScript table-from beginning of BaseScriptList'),
+	]),
+
+	('BaseScript', [
+		('Offset', 'BaseValues', None, None, 'Offset to BaseValues table-from beginning of BaseScript table-may be NULL'),
+		('Offset', 'DefaultMinMax', None, None, 'Offset to MinMax table- from beginning of BaseScript table-may be NULL'),
+		('uint16', 'BaseLangSysCount', None, None, 'Number of BaseLangSysRecords defined-may be zero (0)'),
+		('struct', 'BaseLangSysRecord', 'BaseLangSysCount', 0, 'Array of BaseLangSysRecords-in alphabetical order by BaseLangSysTag'),
+	]),
+
+	('BaseLangSysRecord', [
+		('Tag', 'BaseLangSysTag', None, None, '4-byte language system identification tag'),
+		('Offset', 'MinMax', None, None, 'Offset to MinMax table-from beginning of BaseScript table'),
+	]),
+
+	('BaseValues', [
+		('uint16', 'DefaultIndex', None, None, 'Index number of default baseline for this script-equals index position of baseline tag in BaselineArray of the BaseTagList'),
+		('uint16', 'BaseCoordCount', None, None, 'Number of BaseCoord tables defined-should equal BaseTagCount in the BaseTagList'),
+		('Offset', 'BaseCoord', 'BaseCoordCount', 0, 'Array of offsets to BaseCoord-from beginning of BaseValues table-order matches BaselineTag array in the BaseTagList'),
+	]),
+
+	('MinMax', [
+		('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from the beginning of MinMax table-may be NULL'),
+		('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from the beginning of MinMax table-may be NULL'),
+		('uint16', 'FeatMinMaxCount', None, None, 'Number of FeatMinMaxRecords-may be zero (0)'),
+		('struct', 'FeatMinMaxRecord', 'FeatMinMaxCount', 0, 'Array of FeatMinMaxRecords-in alphabetical order, by FeatureTableTag'),
+	]),
+
+	('FeatMinMaxRecord', [
+		('Tag', 'FeatureTableTag', None, None, '4-byte feature identification tag-must match FeatureTag in FeatureList'),
+		('Offset', 'MinCoord', None, None, 'Offset to BaseCoord table-defines minimum extent value-from beginning of MinMax table-may be NULL'),
+		('Offset', 'MaxCoord', None, None, 'Offset to BaseCoord table-defines maximum extent value-from beginning of MinMax table-may be NULL'),
+	]),
+
+	('BaseCoordFormat1', [
+		('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 1'),
+		('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
+	]),
+
+	('BaseCoordFormat2', [
+		('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 2'),
+		('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
+		('GlyphID', 'ReferenceGlyph', None, None, 'GlyphID of control glyph'),
+		('uint16', 'BaseCoordPoint', None, None, 'Index of contour point on the ReferenceGlyph'),
+	]),
+
+	('BaseCoordFormat3', [
+		('uint16', 'BaseCoordFormat', None, None, 'Format identifier-format = 3'),
+		('int16', 'Coordinate', None, None, 'X or Y value, in design units'),
+		('Offset', 'DeviceTable', None, None, 'Offset to Device table for X or Y value'),
+	]),
+
+
+	#
+	# jstf
+	#
+
+	('JSTF', [
+		('Version', 'Version', None, None, 'Version of the JSTF table-initially set to 0x00010000'),
+		('uint16', 'JstfScriptCount', None, None, 'Number of JstfScriptRecords in this table'),
+		('struct', 'JstfScriptRecord', 'JstfScriptCount', 0, 'Array of JstfScriptRecords-in alphabetical order, by JstfScriptTag'),
+	]),
+
+	('JstfScriptRecord', [
+		('Tag', 'JstfScriptTag', None, None, '4-byte JstfScript identification'),
+		('Offset', 'JstfScript', None, None, 'Offset to JstfScript table-from beginning of JSTF Header'),
+	]),
+
+	('JstfScript', [
+		('Offset', 'ExtenderGlyph', None, None, 'Offset to ExtenderGlyph table-from beginning of JstfScript table-may be NULL'),
+		('Offset', 'DefJstfLangSys', None, None, 'Offset to Default JstfLangSys table-from beginning of JstfScript table-may be NULL'),
+		('uint16', 'JstfLangSysCount', None, None, 'Number of JstfLangSysRecords in this table- may be zero (0)'),
+		('struct', 'JstfLangSysRecord', 'JstfLangSysCount', 0, 'Array of JstfLangSysRecords-in alphabetical order, by JstfLangSysTag'),
+	]),
+
+	('JstfLangSysRecord', [
+		('Tag', 'JstfLangSysTag', None, None, '4-byte JstfLangSys identifier'),
+		('Offset', 'JstfLangSys', None, None, 'Offset to JstfLangSys table-from beginning of JstfScript table'),
+	]),
+
+	('ExtenderGlyph', [
+		('uint16', 'GlyphCount', None, None, 'Number of Extender Glyphs in this script'),
+		('GlyphID', 'ExtenderGlyph', 'GlyphCount', 0, 'GlyphIDs-in increasing numerical order'),
+	]),
+
+	('JstfLangSys', [
+		('uint16', 'JstfPriorityCount', None, None, 'Number of JstfPriority tables'),
+		('Offset', 'JstfPriority', 'JstfPriorityCount', 0, 'Array of offsets to JstfPriority tables-from beginning of JstfLangSys table-in priority order'),
+	]),
+
+	('JstfPriority', [
+		('Offset', 'ShrinkageEnableGSUB', None, None, 'Offset to Shrinkage Enable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ShrinkageDisableGSUB', None, None, 'Offset to Shrinkage Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ShrinkageEnableGPOS', None, None, 'Offset to Shrinkage Enable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ShrinkageDisableGPOS', None, None, 'Offset to Shrinkage Disable JstfGPOSModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ShrinkageJstfMax', None, None, 'Offset to Shrinkage JstfMax table-from beginning of JstfPriority table -may be NULL'),
+		('Offset', 'ExtensionEnableGSUB', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
+		('Offset', 'ExtensionDisableGSUB', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ExtensionEnableGPOS', None, None, 'Offset to Extension Enable JstfGSUBModList table-may be NULL'),
+		('Offset', 'ExtensionDisableGPOS', None, None, 'Offset to Extension Disable JstfGSUBModList table-from beginning of JstfPriority table-may be NULL'),
+		('Offset', 'ExtensionJstfMax', None, None, 'Offset to Extension JstfMax table-from beginning of JstfPriority table -may be NULL'),
+	]),
+
+	('JstfGSUBModList', [
+		('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
+		('uint16', 'GSUBLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GSUB-in increasing numerical order'),
+	]),
+
+	('JstfGPOSModList', [
+		('uint16', 'LookupCount', None, None, 'Number of lookups for this modification'),
+		('uint16', 'GPOSLookupIndex', 'LookupCount', 0, 'Array of LookupIndex identifiers in GPOS-in increasing numerical order'),
+	]),
+
+	('JstfMax', [
+		('uint16', 'LookupCount', None, None, 'Number of lookup Indices for this modification'),
+		('Offset', 'Lookup', 'LookupCount', 0, 'Array of offsets to GPOS-type lookup tables-from beginning of JstfMax table-in design order'),
+	]),
+
+	#
+	# math
+	#
+
+	('MATH', [
+		('Version', 'Version', None, None, 'Version of the MATH table-initially set to 0x00010000.'),
+		('Offset', 'MathConstants', None, None, 'Offset to MathConstants table - from the beginning of MATH table.'),
+		('Offset', 'MathGlyphInfo', None, None, 'Offset to MathGlyphInfo table - from the beginning of MATH table.'),
+		('Offset', 'MathVariants', None, None, 'Offset to MathVariants table - from the beginning of MATH table.'),
+	]),
+
+	('MathValueRecord', [
+		('int16', 'Value', None, None, 'The X or Y value in design units.'),
+		('Offset', 'DeviceTable', None, None, 'Offset to the device table - from the beginning of parent table. May be NULL. Suggested format for device table is 1.'),
+	]),
+
+	('MathConstants', [
+		('int16', 'ScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 1. Suggested value: 80%.'),
+		('int16', 'ScriptScriptPercentScaleDown', None, None, 'Percentage of scaling down for script level 2 (ScriptScript). Suggested value: 60%.'),
+		('uint16', 'DelimitedSubFormulaMinHeight', None, None, 'Minimum height required for a delimited expression to be treated as a subformula. Suggested value: normal line height x1.5.'),
+		('uint16', 'DisplayOperatorMinHeight', None, None, 'Minimum height of n-ary operators (such as integral and summation) for formulas in display mode.'),
+		('MathValueRecord', 'MathLeading', None, None, 'White space to be left between math formulas to ensure proper line spacing. For example, for applications that treat line gap as a part of line ascender, formulas with ink  going above (os2.sTypoAscender + os2.sTypoLineGap - MathLeading) or with ink going below os2.sTypoDescender will result in increasing line height.'),
+		('MathValueRecord', 'AxisHeight', None, None, 'Axis height of the font.'),
+		('MathValueRecord', 'AccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require raising the accents. Suggested: x-height of the font (os2.sxHeight) plus any possible overshots.'),
+		('MathValueRecord', 'FlattenedAccentBaseHeight', None, None, 'Maximum (ink) height of accent base that does not require flattening the accents. Suggested: cap height of the font (os2.sCapHeight).'),
+		('MathValueRecord', 'SubscriptShiftDown', None, None, 'The standard shift down applied to subscript elements. Positive for moving in the downward direction. Suggested: os2.ySubscriptYOffset.'),
+		('MathValueRecord', 'SubscriptTopMax', None, None, 'Maximum allowed height of the (ink) top of subscripts that does not require moving subscripts further down. Suggested: 4/5 x-height.'),
+		('MathValueRecord', 'SubscriptBaselineDropMin', None, None, 'Minimum allowed drop of the baseline of subscripts relative to the (ink) bottom of the base. Checked for bases that are treated as a box or extended shape. Positive for subscript baseline dropped below the base bottom.'),
+		('MathValueRecord', 'SuperscriptShiftUp', None, None, 'Standard shift up applied to superscript elements. Suggested: os2.ySuperscriptYOffset.'),
+		('MathValueRecord', 'SuperscriptShiftUpCramped', None, None, 'Standard shift of superscripts relative to the base, in cramped style.'),
+		('MathValueRecord', 'SuperscriptBottomMin', None, None, 'Minimum allowed height of the (ink) bottom of superscripts that does not require moving subscripts further up. Suggested: 1/4 x-height.'),
+		('MathValueRecord', 'SuperscriptBaselineDropMax', None, None, 'Maximum allowed drop of the baseline of superscripts relative to the (ink) top of the base. Checked for bases that are treated as a box or extended shape. Positive for superscript baseline below the base top.'),
+		('MathValueRecord', 'SubSuperscriptGapMin', None, None, 'Minimum gap between the superscript and subscript ink. Suggested: 4x default rule thickness.'),
+		('MathValueRecord', 'SuperscriptBottomMaxWithSubscript', None, None, 'The maximum level to which the (ink) bottom of superscript can be pushed to increase the gap between superscript and subscript, before subscript starts being moved down. Suggested: 4/5 x-height.'),
+		('MathValueRecord', 'SpaceAfterScript', None, None, 'Extra white space to be added after each subscript and superscript. Suggested: 0.5pt for a 12 pt font.'),
+		('MathValueRecord', 'UpperLimitGapMin', None, None, 'Minimum gap between the (ink) bottom of the upper limit, and the (ink) top of the base operator.'),
+		('MathValueRecord', 'UpperLimitBaselineRiseMin', None, None, 'Minimum distance between baseline of upper limit and (ink) top of the base operator.'),
+		('MathValueRecord', 'LowerLimitGapMin', None, None, 'Minimum gap between (ink) top of the lower limit, and (ink) bottom of the base operator.'),
+		('MathValueRecord', 'LowerLimitBaselineDropMin', None, None, 'Minimum distance between baseline of the lower limit and (ink) bottom of the base operator.'),
+		('MathValueRecord', 'StackTopShiftUp', None, None, 'Standard shift up applied to the top element of a stack.'),
+		('MathValueRecord', 'StackTopDisplayStyleShiftUp', None, None, 'Standard shift up applied to the top element of a stack in display style.'),
+		('MathValueRecord', 'StackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack. Positive for moving in the downward direction.'),
+		('MathValueRecord', 'StackBottomDisplayStyleShiftDown', None, None, 'Standard shift down applied to the bottom element of a stack in display style. Positive for moving in the downward direction.'),
+		('MathValueRecord', 'StackGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element. Suggested: 3x default rule thickness.'),
+		('MathValueRecord', 'StackDisplayStyleGapMin', None, None, 'Minimum gap between (ink) bottom of the top element of a stack, and the (ink) top of the bottom element in display style. Suggested: 7x default rule thickness.'),
+		('MathValueRecord', 'StretchStackTopShiftUp', None, None, 'Standard shift up applied to the top element of the stretch stack.'),
+		('MathValueRecord', 'StretchStackBottomShiftDown', None, None, 'Standard shift down applied to the bottom element of the stretch stack. Positive for moving in the downward direction.'),
+		('MathValueRecord', 'StretchStackGapAboveMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) bottom of the element above. Suggested: UpperLimitGapMin'),
+		('MathValueRecord', 'StretchStackGapBelowMin', None, None, 'Minimum gap between the ink of the stretched element, and the (ink) top of the element below. Suggested: LowerLimitGapMin.'),
+		('MathValueRecord', 'FractionNumeratorShiftUp', None, None, 'Standard shift up applied to the numerator.'),
+		('MathValueRecord', 'FractionNumeratorDisplayStyleShiftUp', None, None, 'Standard shift up applied to the numerator in display style. Suggested: StackTopDisplayStyleShiftUp.'),
+		('MathValueRecord', 'FractionDenominatorShiftDown', None, None, 'Standard shift down applied to the denominator. Positive for moving in the downward direction.'),
+		('MathValueRecord', 'FractionDenominatorDisplayStyleShiftDown', None, None, 'Standard shift down applied to the denominator in display style. Positive for moving in the downward direction. Suggested: StackBottomDisplayStyleShiftDown.'),
+		('MathValueRecord', 'FractionNumeratorGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar. Suggested: default rule thickness'),
+		('MathValueRecord', 'FractionNumDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) bottom of the numerator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
+		('MathValueRecord', 'FractionRuleThickness', None, None, 'Thickness of the fraction bar. Suggested: default rule thickness.'),
+		('MathValueRecord', 'FractionDenominatorGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar. Suggested: default rule thickness'),
+		('MathValueRecord', 'FractionDenomDisplayStyleGapMin', None, None, 'Minimum tolerated gap between the (ink) top of the denominator and the ink of the fraction bar in display style. Suggested: 3x default rule thickness.'),
+		('MathValueRecord', 'SkewedFractionHorizontalGap', None, None, 'Horizontal distance between the top and bottom elements of a skewed fraction.'),
+		('MathValueRecord', 'SkewedFractionVerticalGap', None, None, 'Vertical distance between the ink of the top and bottom elements of a skewed fraction.'),
+		('MathValueRecord', 'OverbarVerticalGap', None, None, 'Distance between the overbar and the (ink) top of he base. Suggested: 3x default rule thickness.'),
+		('MathValueRecord', 'OverbarRuleThickness', None, None, 'Thickness of overbar. Suggested: default rule thickness.'),
+		('MathValueRecord', 'OverbarExtraAscender', None, None, 'Extra white space reserved above the overbar. Suggested: default rule thickness.'),
+		('MathValueRecord', 'UnderbarVerticalGap', None, None, 'Distance between underbar and (ink) bottom of the base. Suggested: 3x default rule thickness.'),
+		('MathValueRecord', 'UnderbarRuleThickness', None, None, 'Thickness of underbar. Suggested: default rule thickness.'),
+		('MathValueRecord', 'UnderbarExtraDescender', None, None, 'Extra white space reserved below the underbar. Always positive. Suggested: default rule thickness.'),
+		('MathValueRecord', 'RadicalVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: 1 1/4 default rule thickness.'),
+		('MathValueRecord', 'RadicalDisplayStyleVerticalGap', None, None, 'Space between the (ink) top of the expression and the bar over it. Suggested: default rule thickness + 1/4 x-height.'),
+		('MathValueRecord', 'RadicalRuleThickness', None, None, 'Thickness of the radical rule. This is the thickness of the rule in designed or constructed radical signs. Suggested: default rule thickness.'),
+		('MathValueRecord', 'RadicalExtraAscender', None, None, 'Extra white space reserved above the radical. Suggested: RadicalRuleThickness.'),
+		('MathValueRecord', 'RadicalKernBeforeDegree', None, None, 'Extra horizontal kern before the degree of a radical, if such is present. Suggested: 5/18 of em.'),
+		('MathValueRecord', 'RadicalKernAfterDegree', None, None, 'Negative kern after the degree of a radical, if such is present. Suggested: 10/18 of em.'),
+		('uint16', 'RadicalDegreeBottomRaisePercent', None, None, 'Height of the bottom of the radical degree, if such is present, in proportion to the ascender of the radical sign. Suggested: 60%.'),
+	]),
+
+	('MathGlyphInfo', [
+		('Offset', 'MathItalicsCorrectionInfo', None, None, 'Offset to MathItalicsCorrectionInfo table - from the beginning of MathGlyphInfo table.'),
+		('Offset', 'MathTopAccentAttachment', None, None, 'Offset to MathTopAccentAttachment table - from the beginning of MathGlyphInfo table.'),
+		('Offset', 'ExtendedShapeCoverage', None, None, 'Offset to coverage table for Extended Shape glyphs - from the  beginning of MathGlyphInfo table. When the left or right glyph of a box is an extended shape variant, the (ink) box (and not the default position defined by values in MathConstants table) should be used for vertical positioning purposes. May be NULL.'),
+		('Offset', 'MathKernInfo', None, None, 'Offset to MathKernInfo table - from the beginning of MathGlyphInfo table.'),
+	]),
+
+	('MathItalicsCorrectionInfo', [
+		('Offset', 'Coverage', None, None, 'Offset to Coverage table - from the beginning of MathItalicsCorrectionInfo table.'),
+		('uint16', 'ItalicsCorrectionCount', None, None, 'Number of italics correction values. Should coincide with the number of covered glyphs.'),
+		('MathValueRecord', 'ItalicsCorrection', 'ItalicsCorrectionCount', 0, 'Array of MathValueRecords defining italics correction values for each covered glyph.'),
+	]),
+
+	('MathTopAccentAttachment', [
+		('Offset', 'TopAccentCoverage', None, None, 'Offset to Coverage table - from the beginning of  MathTopAccentAttachment table.'),
+		('uint16', 'TopAccentAttachmentCount', None, None, 'Number of top accent attachment point values. Should coincide with the number of covered glyphs'),
+		('MathValueRecord', 'TopAccentAttachment', 'TopAccentAttachmentCount', 0, 'Array of MathValueRecords defining top accent attachment points for each covered glyph'),
+	]),
+
+	('MathKernInfo', [
+		('Offset', 'MathKernCoverage', None, None, 'Offset to Coverage table - from the beginning of the MathKernInfo table.'),
+		('uint16', 'MathKernCount', None, None, 'Number of MathKernInfoRecords.'),
+		('MathKernInfoRecord', 'MathKernInfoRecords', 'MathKernCount', 0, 'Array of MathKernInfoRecords, per-glyph information for mathematical positioning of subscripts and superscripts.'),
+	]),
+
+	('MathKernInfoRecord', [
+		('Offset', 'TopRightMathKern', None, None, 'Offset to MathKern table for top right corner - from the beginning of MathKernInfo table. May be NULL.'),
+		('Offset', 'TopLeftMathKern', None, None, 'Offset to MathKern table for the top left corner - from the beginning of MathKernInfo table. May be NULL.'),
+		('Offset', 'BottomRightMathKern', None, None, 'Offset to MathKern table for bottom right corner - from the beginning of MathKernInfo table. May be NULL.'),
+		('Offset', 'BottomLeftMathKern', None, None, 'Offset to MathKern table for bottom left corner - from the beginning of MathKernInfo table. May be NULL.'),
+	]),
+
+	('MathKern', [
+		('uint16', 'HeightCount', None, None, 'Number of heights on which the kern value changes.'),
+		('MathValueRecord', 'CorrectionHeight', 'HeightCount', 0, 'Array of correction heights at which the kern value changes. Sorted by the height value in design units.'),
+		('MathValueRecord', 'KernValue', 'HeightCount', 1, 'Array of kern values corresponding to heights. First value is the kern value for all heights less or equal than the first height in this table.Last value is the value to be applied for all heights greater than the last height in this table. Negative values are interpreted as move glyphs closer to each other.'),
+	]),
+
+	('MathVariants', [
+		('uint16', 'MinConnectorOverlap', None, None, 'Minimum overlap of connecting glyphs during glyph construction,  in design units.'),
+		('Offset', 'VertGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
+		('Offset', 'HorizGlyphCoverage', None, None, 'Offset to Coverage table - from the beginning of MathVariants table.'),
+		('uint16', 'VertGlyphCount', None, None, 'Number of glyphs for which information is provided for vertically growing variants.'),
+		('uint16', 'HorizGlyphCount', None, None, 'Number of glyphs for which information is provided for horizontally growing variants.'),
+		('Offset', 'VertGlyphConstruction', 'VertGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in vertical direction.'),
+		('Offset', 'HorizGlyphConstruction', 'HorizGlyphCount', 0, 'Array of offsets to MathGlyphConstruction tables - from the beginning of the MathVariants table, for shapes growing in horizontal direction.'),
+	]),
+
+	('MathGlyphConstruction', [
+		('Offset', 'GlyphAssembly', None, None, 'Offset to GlyphAssembly table for this shape - from the beginning of MathGlyphConstruction table. May be NULL'),
+		('uint16', 'VariantCount', None, None, 'Count of glyph growing variants for this glyph.'),
+		('MathGlyphVariantRecord', 'MathGlyphVariantRecord', 'VariantCount', 0, 'MathGlyphVariantRecords for alternative variants of the glyphs.'),
+	]),
+
+	('MathGlyphVariantRecord', [
+		('GlyphID', 'VariantGlyph', None, None, 'Glyph ID for the variant.'),
+		('uint16', 'AdvanceMeasurement', None, None, 'Advance width/height, in design units, of the variant, in the direction of requested glyph extension.'),
+	]),
+
+	('GlyphAssembly', [
+		('MathValueRecord', 'ItalicsCorrection', None, None, 'Italics correction of this GlyphAssembly. Should not depend on the assembly size.'),
+		('uint16', 'PartCount', None, None, 'Number of parts in this assembly.'),
+		('GlyphPartRecord', 'PartRecords', 'PartCount', 0, 'Array of part records, from left to right and bottom to top.'),
+	]),
+
+	('GlyphPartRecord', [
+		('GlyphID', 'glyph', None, None, 'Glyph ID for the part.'),
+		('uint16', 'StartConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the beginning of the glyph, in the direction of the extension.'),
+		('uint16', 'EndConnectorLength', None, None, 'Advance width/ height of the straight bar connector material, in design units, is at the end of the glyph, in the direction of the extension.'),
+		('uint16', 'FullAdvance', None, None, 'Full advance width/height for this part, in the direction of the extension. In design units.'),
+		('uint16', 'PartFlags', None, None, 'Part qualifiers. PartFlags enumeration currently uses only one bit: 0x0001 fExtender: If set, the part can be skipped or repeated. 0xFFFE Reserved'),
+	]),
+
+]
diff --git a/Lib/fontTools/ttLib/tables/otTables.py b/Lib/fontTools/ttLib/tables/otTables.py
new file mode 100644
index 0000000..2afc2cc
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/otTables.py
@@ -0,0 +1,785 @@
+"""fontTools.ttLib.tables.otTables -- A collection of classes representing the various
+OpenType subtables.
+
+Most are constructed upon import from data in otData.py, all are populated with
+converter objects from otConverters.py.
+"""
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from .otBase import BaseTable, FormatSwitchingBaseTable
+import operator
+import warnings
+
+
+class LookupOrder(BaseTable):
+	"""Dummy class; this table isn't defined, but is used, and is always NULL."""
+
+class FeatureParams(BaseTable):
+
+	def compile(self, writer, font):
+		assert featureParamTypes.get(writer['FeatureTag']) == self.__class__, "Wrong FeatureParams type for feature '%s': %s" % (writer['FeatureTag'], self.__class__.__name__)
+		BaseTable.compile(self, writer, font)
+
+	def toXML(self, xmlWriter, font, attrs=None, name=None):
+		BaseTable.toXML(self, xmlWriter, font, attrs, name=self.__class__.__name__)
+
+class FeatureParamsSize(FeatureParams):
+	pass
+
+class FeatureParamsStylisticSet(FeatureParams):
+	pass
+
+class FeatureParamsCharacterVariants(FeatureParams):
+	pass
+
+class Coverage(FormatSwitchingBaseTable):
+	
+	# manual implementation to get rid of glyphID dependencies
+	
+	def postRead(self, rawTable, font):
+		if self.Format == 1:
+			# TODO only allow glyphs that are valid?
+			self.glyphs = rawTable["GlyphArray"]
+		elif self.Format == 2:
+			glyphs = self.glyphs = []
+			ranges = rawTable["RangeRecord"]
+			glyphOrder = font.getGlyphOrder()
+			# Some SIL fonts have coverage entries that don't have sorted
+			# StartCoverageIndex.  If it is so, fixup and warn.  We undo
+			# this when writing font out.
+			sorted_ranges = sorted(ranges, key=lambda a: a.StartCoverageIndex)
+			if ranges != sorted_ranges:
+				warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.")
+				ranges = sorted_ranges
+			del sorted_ranges
+			for r in ranges:
+				assert r.StartCoverageIndex == len(glyphs), \
+					(r.StartCoverageIndex, len(glyphs))
+				start = r.Start
+				end = r.End
+				try:
+					startID = font.getGlyphID(start, requireReal=True)
+				except KeyError:
+					warnings.warn("Coverage table has start glyph ID out of range: %s." % start)
+					continue
+				try:
+					endID = font.getGlyphID(end, requireReal=True) + 1
+				except KeyError:
+					# Apparently some tools use 65535 to "match all" the range
+					if end != 'glyph65535':
+						warnings.warn("Coverage table has end glyph ID out of range: %s." % end)
+					# NOTE: We clobber out-of-range things here.  There are legit uses for those,
+					# but none that we have seen in the wild.
+					endID = len(glyphOrder)
+				glyphs.extend(glyphOrder[glyphID] for glyphID in range(startID, endID))
+		else:
+			assert 0, "unknown format: %s" % self.Format
+		del self.Format # Don't need this anymore
+	
+	def preWrite(self, font):
+		glyphs = getattr(self, "glyphs", None)
+		if glyphs is None:
+			glyphs = self.glyphs = []
+		format = 1
+		rawTable = {"GlyphArray": glyphs}
+		getGlyphID = font.getGlyphID
+		if glyphs:
+			# find out whether Format 2 is more compact or not
+			glyphIDs = [getGlyphID(glyphName) for glyphName in glyphs ]
+			brokenOrder = sorted(glyphIDs) != glyphIDs
+			
+			last = glyphIDs[0]
+			ranges = [[last]]
+			for glyphID in glyphIDs[1:]:
+				if glyphID != last + 1:
+					ranges[-1].append(last)
+					ranges.append([glyphID])
+				last = glyphID
+			ranges[-1].append(last)
+			
+			if brokenOrder or len(ranges) * 3 < len(glyphs):  # 3 words vs. 1 word
+				# Format 2 is more compact
+				index = 0
+				for i in range(len(ranges)):
+					start, end = ranges[i]
+					r = RangeRecord()
+					r.StartID = start
+					r.Start = font.getGlyphName(start)
+					r.End = font.getGlyphName(end)
+					r.StartCoverageIndex = index
+					ranges[i] = r
+					index = index + end - start + 1
+				if brokenOrder:
+					warnings.warn("GSUB/GPOS Coverage is not sorted by glyph ids.")
+					ranges.sort(key=lambda a: a.StartID)
+				for r in ranges:
+					del r.StartID
+				format = 2
+				rawTable = {"RangeRecord": ranges}
+			#else:
+			#	fallthrough; Format 1 is more compact
+		self.Format = format
+		return rawTable
+	
+	def toXML2(self, xmlWriter, font):
+		for glyphName in getattr(self, "glyphs", []):
+			xmlWriter.simpletag("Glyph", value=glyphName)
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		glyphs = getattr(self, "glyphs", None)
+		if glyphs is None:
+			glyphs = []
+			self.glyphs = glyphs
+		glyphs.append(attrs["value"])
+
+
+def doModulo(value):
+	if value < 0:
+		return value + 65536
+	return value
+
+class SingleSubst(FormatSwitchingBaseTable):
+
+	def postRead(self, rawTable, font):
+		mapping = {}
+		input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+		lenMapping = len(input)
+		if self.Format == 1:
+			delta = rawTable["DeltaGlyphID"]
+			inputGIDS =  [ font.getGlyphID(name) for name in input ]
+			outGIDS = [ glyphID + delta for glyphID in inputGIDS ]
+			outGIDS = map(doModulo, outGIDS)
+			outNames = [ font.getGlyphName(glyphID) for glyphID in outGIDS ]
+			list(map(operator.setitem, [mapping]*lenMapping, input, outNames))
+		elif self.Format == 2:
+			assert len(input) == rawTable["GlyphCount"], \
+					"invalid SingleSubstFormat2 table"
+			subst = rawTable["Substitute"]
+			list(map(operator.setitem, [mapping]*lenMapping, input, subst))
+		else:
+			assert 0, "unknown format: %s" % self.Format
+		self.mapping = mapping
+		del self.Format # Don't need this anymore
+	
+	def preWrite(self, font):
+		mapping = getattr(self, "mapping", None)
+		if mapping is None:
+			mapping = self.mapping = {}
+		items = list(mapping.items())
+		getGlyphID = font.getGlyphID
+		gidItems = [(getGlyphID(a), getGlyphID(b)) for a,b in items]
+		sortableItems = sorted(zip(gidItems, items))
+
+		# figure out format
+		format = 2
+		delta = None
+		for inID, outID in gidItems:
+			if delta is None:
+				delta = outID - inID
+				if delta < -32768:
+					delta += 65536
+				elif delta > 32767:
+					delta -= 65536
+			else:
+				if delta != outID - inID:
+					break
+		else:
+			format = 1
+
+		rawTable = {}
+		self.Format = format
+		cov = Coverage()
+		input =  [ item [1][0] for item in sortableItems]
+		subst =  [ item [1][1] for item in sortableItems]
+		cov.glyphs = input
+		rawTable["Coverage"] = cov
+		if format == 1:
+			assert delta is not None
+			rawTable["DeltaGlyphID"] = delta
+		else:
+			rawTable["Substitute"] = subst
+		return rawTable
+	
+	def toXML2(self, xmlWriter, font):
+		items = sorted(self.mapping.items())
+		for inGlyph, outGlyph in items:
+			xmlWriter.simpletag("Substitution",
+					[("in", inGlyph), ("out", outGlyph)])
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		mapping = getattr(self, "mapping", None)
+		if mapping is None:
+			mapping = {}
+			self.mapping = mapping
+		mapping[attrs["in"]] = attrs["out"]
+
+
+class ClassDef(FormatSwitchingBaseTable):
+	
+	def postRead(self, rawTable, font):
+		classDefs = {}
+		glyphOrder = font.getGlyphOrder()
+
+		if self.Format == 1:
+			start = rawTable["StartGlyph"]
+			classList = rawTable["ClassValueArray"]
+			try:
+				startID = font.getGlyphID(start, requireReal=True)
+			except KeyError:
+				warnings.warn("ClassDef table has start glyph ID out of range: %s." % start)
+				startID = len(glyphOrder)
+			endID = startID + len(classList)
+			if endID > len(glyphOrder):
+				warnings.warn("ClassDef table has entries for out of range glyph IDs: %s,%s." % (start, len(classList)))
+				# NOTE: We clobber out-of-range things here.  There are legit uses for those,
+				# but none that we have seen in the wild.
+				endID = len(glyphOrder)
+
+			for glyphID, cls in zip(range(startID, endID), classList):
+				classDefs[glyphOrder[glyphID]] = cls
+
+		elif self.Format == 2:
+			records = rawTable["ClassRangeRecord"]
+			for rec in records:
+				start = rec.Start
+				end = rec.End
+				cls = rec.Class
+				try:
+					startID = font.getGlyphID(start, requireReal=True)
+				except KeyError:
+					warnings.warn("ClassDef table has start glyph ID out of range: %s." % start)
+					continue
+				try:
+					endID = font.getGlyphID(end, requireReal=True) + 1
+				except KeyError:
+					# Apparently some tools use 65535 to "match all" the range
+					if end != 'glyph65535':
+						warnings.warn("ClassDef table has end glyph ID out of range: %s." % end)
+					# NOTE: We clobber out-of-range things here.  There are legit uses for those,
+					# but none that we have seen in the wild.
+					endID = len(glyphOrder)
+				for glyphID in range(startID, endID):
+					classDefs[glyphOrder[glyphID]] = cls
+		else:
+			assert 0, "unknown format: %s" % self.Format
+		self.classDefs = classDefs
+		del self.Format # Don't need this anymore
+	
+	def preWrite(self, font):
+		classDefs = getattr(self, "classDefs", None)
+		if classDefs is None:
+			classDefs = self.classDefs = {}
+		items = list(classDefs.items())
+		format = 2
+		rawTable = {"ClassRangeRecord": []}
+		getGlyphID = font.getGlyphID
+		for i in range(len(items)):
+			glyphName, cls = items[i]
+			items[i] = getGlyphID(glyphName), glyphName, cls
+		items.sort()
+		if items:
+			last, lastName, lastCls = items[0]
+			ranges = [[lastCls, last, lastName]]
+			for glyphID, glyphName, cls in items[1:]:
+				if glyphID != last + 1 or cls != lastCls:
+					ranges[-1].extend([last, lastName])
+					ranges.append([cls, glyphID, glyphName])
+				last = glyphID
+				lastName = glyphName
+				lastCls = cls
+			ranges[-1].extend([last, lastName])
+
+			startGlyph = ranges[0][1]
+			endGlyph = ranges[-1][3]
+			glyphCount = endGlyph - startGlyph + 1
+			if len(ranges) * 3 < glyphCount + 1:
+				# Format 2 is more compact
+				for i in range(len(ranges)):
+					cls, start, startName, end, endName = ranges[i]
+					rec = ClassRangeRecord()
+					rec.Start = startName
+					rec.End = endName
+					rec.Class = cls
+					ranges[i] = rec
+				format = 2
+				rawTable = {"ClassRangeRecord": ranges}
+			else:
+				# Format 1 is more compact
+				startGlyphName = ranges[0][2]
+				classes = [0] * glyphCount
+				for cls, start, startName, end, endName in ranges:
+					for g in range(start - startGlyph, end - startGlyph + 1):
+						classes[g] = cls
+				format = 1
+				rawTable = {"StartGlyph": startGlyphName, "ClassValueArray": classes}
+		self.Format = format
+		return rawTable
+	
+	def toXML2(self, xmlWriter, font):
+		items = sorted(self.classDefs.items())
+		for glyphName, cls in items:
+			xmlWriter.simpletag("ClassDef", [("glyph", glyphName), ("class", cls)])
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		classDefs = getattr(self, "classDefs", None)
+		if classDefs is None:
+			classDefs = {}
+			self.classDefs = classDefs
+		classDefs[attrs["glyph"]] = int(attrs["class"])
+
+
+class AlternateSubst(FormatSwitchingBaseTable):
+	
+	def postRead(self, rawTable, font):
+		alternates = {}
+		if self.Format == 1:
+			input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+			alts = rawTable["AlternateSet"]
+			if len(input) != len(alts):
+				assert len(input) == len(alts)
+			for i in range(len(input)):
+				alternates[input[i]] = alts[i].Alternate
+		else:
+			assert 0, "unknown format: %s" % self.Format
+		self.alternates = alternates
+		del self.Format # Don't need this anymore
+	
+	def preWrite(self, font):
+		self.Format = 1
+		alternates = getattr(self, "alternates", None)
+		if alternates is None:
+			alternates = self.alternates = {}
+		items = list(alternates.items())
+		for i in range(len(items)):
+			glyphName, set = items[i]
+			items[i] = font.getGlyphID(glyphName), glyphName, set
+		items.sort()
+		cov = Coverage()
+		cov.glyphs = [ item[1] for item in items]
+		alternates = []
+		setList = [ item[-1] for item in items]
+		for  set in setList:
+			alts = AlternateSet()
+			alts.Alternate = set
+			alternates.append(alts)
+		# a special case to deal with the fact that several hundred Adobe Japan1-5
+		# CJK fonts will overflow an offset if the coverage table isn't pushed to the end.
+		# Also useful in that when splitting a sub-table because of an offset overflow
+		# I don't need to calculate the change in the subtable offset due to the change in the coverage table size.
+		# Allows packing more rules in subtable.
+		self.sortCoverageLast = 1 
+		return {"Coverage": cov, "AlternateSet": alternates}
+	
+	def toXML2(self, xmlWriter, font):
+		items = sorted(self.alternates.items())
+		for glyphName, alternates in items:
+			xmlWriter.begintag("AlternateSet", glyph=glyphName)
+			xmlWriter.newline()
+			for alt in alternates:
+				xmlWriter.simpletag("Alternate", glyph=alt)
+				xmlWriter.newline()
+			xmlWriter.endtag("AlternateSet")
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		alternates = getattr(self, "alternates", None)
+		if alternates is None:
+			alternates = {}
+			self.alternates = alternates
+		glyphName = attrs["glyph"]
+		set = []
+		alternates[glyphName] = set
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			set.append(attrs["glyph"])
+
+
+class LigatureSubst(FormatSwitchingBaseTable):
+	
+	def postRead(self, rawTable, font):
+		ligatures = {}
+		if self.Format == 1:
+			input = _getGlyphsFromCoverageTable(rawTable["Coverage"])
+			ligSets = rawTable["LigatureSet"]
+			assert len(input) == len(ligSets)
+			for i in range(len(input)):
+				ligatures[input[i]] = ligSets[i].Ligature
+		else:
+			assert 0, "unknown format: %s" % self.Format
+		self.ligatures = ligatures
+		del self.Format # Don't need this anymore
+	
+	def preWrite(self, font):
+		self.Format = 1
+		ligatures = getattr(self, "ligatures", None)
+		if ligatures is None:
+			ligatures = self.ligatures = {}
+		items = list(ligatures.items())
+		for i in range(len(items)):
+			glyphName, set = items[i]
+			items[i] = font.getGlyphID(glyphName), glyphName, set
+		items.sort()
+		cov = Coverage()
+		cov.glyphs = [ item[1] for item in items]
+
+		ligSets = []
+		setList = [ item[-1] for item in items ]
+		for set in setList:
+			ligSet = LigatureSet()
+			ligs = ligSet.Ligature = []
+			for lig in set:
+				ligs.append(lig)
+			ligSets.append(ligSet)
+		# Useful in that when splitting a sub-table because of an offset overflow
+		# I don't need to calculate the change in subtabl offset due to the coverage table size.
+		# Allows packing more rules in subtable.
+		self.sortCoverageLast = 1 
+		return {"Coverage": cov, "LigatureSet": ligSets}
+	
+	def toXML2(self, xmlWriter, font):
+		items = sorted(self.ligatures.items())
+		for glyphName, ligSets in items:
+			xmlWriter.begintag("LigatureSet", glyph=glyphName)
+			xmlWriter.newline()
+			for lig in ligSets:
+				xmlWriter.simpletag("Ligature", glyph=lig.LigGlyph,
+					components=",".join(lig.Component))
+				xmlWriter.newline()
+			xmlWriter.endtag("LigatureSet")
+			xmlWriter.newline()
+	
+	def fromXML(self, name, attrs, content, font):
+		ligatures = getattr(self, "ligatures", None)
+		if ligatures is None:
+			ligatures = {}
+			self.ligatures = ligatures
+		glyphName = attrs["glyph"]
+		ligs = []
+		ligatures[glyphName] = ligs
+		for element in content:
+			if not isinstance(element, tuple):
+				continue
+			name, attrs, content = element
+			lig = Ligature()
+			lig.LigGlyph = attrs["glyph"]
+			lig.Component = attrs["components"].split(",")
+			ligs.append(lig)
+
+
+#
+# For each subtable format there is a class. However, we don't really distinguish
+# between "field name" and "format name": often these are the same. Yet there's
+# a whole bunch of fields with different names. The following dict is a mapping
+# from "format name" to "field name". _buildClasses() uses this to create a
+# subclass for each alternate field name.
+#
+_equivalents = {
+	'MarkArray': ("Mark1Array",),
+	'LangSys': ('DefaultLangSys',),
+	'Coverage': ('MarkCoverage', 'BaseCoverage', 'LigatureCoverage', 'Mark1Coverage',
+			'Mark2Coverage', 'BacktrackCoverage', 'InputCoverage',
+			'LookAheadCoverage', 'VertGlyphCoverage', 'HorizGlyphCoverage',
+			'TopAccentCoverage', 'ExtendedShapeCoverage', 'MathKernCoverage'),
+	'ClassDef': ('ClassDef1', 'ClassDef2', 'BacktrackClassDef', 'InputClassDef',
+			'LookAheadClassDef', 'GlyphClassDef', 'MarkAttachClassDef'),
+	'Anchor': ('EntryAnchor', 'ExitAnchor', 'BaseAnchor', 'LigatureAnchor',
+			'Mark2Anchor', 'MarkAnchor'),
+	'Device': ('XPlaDevice', 'YPlaDevice', 'XAdvDevice', 'YAdvDevice',
+			'XDeviceTable', 'YDeviceTable', 'DeviceTable'),
+	'Axis': ('HorizAxis', 'VertAxis',),
+	'MinMax': ('DefaultMinMax',),
+	'BaseCoord': ('MinCoord', 'MaxCoord',),
+	'JstfLangSys': ('DefJstfLangSys',),
+	'JstfGSUBModList': ('ShrinkageEnableGSUB', 'ShrinkageDisableGSUB', 'ExtensionEnableGSUB',
+			'ExtensionDisableGSUB',),
+	'JstfGPOSModList': ('ShrinkageEnableGPOS', 'ShrinkageDisableGPOS', 'ExtensionEnableGPOS',
+			'ExtensionDisableGPOS',),
+	'JstfMax': ('ShrinkageJstfMax', 'ExtensionJstfMax',),
+	'MathKern': ('TopRightMathKern', 'TopLeftMathKern', 'BottomRightMathKern',
+			'BottomLeftMathKern'),
+	'MathGlyphConstruction': ('VertGlyphConstruction', 'HorizGlyphConstruction'),
+}
+
+#
+# OverFlow logic, to automatically create ExtensionLookups
+# XXX This should probably move to otBase.py
+#
+
+def fixLookupOverFlows(ttf, overflowRecord):
+	""" Either the offset from the LookupList to a lookup overflowed, or
+	an offset from a lookup to a subtable overflowed. 
+	The table layout is:
+	GPSO/GUSB
+		Script List
+		Feature List
+		LookUpList
+			Lookup[0] and contents
+				SubTable offset list
+					SubTable[0] and contents
+					...
+					SubTable[n] and contents
+			...
+			Lookup[n] and contents
+				SubTable offset list
+					SubTable[0] and contents
+					...
+					SubTable[n] and contents
+	If the offset to a lookup overflowed (SubTableIndex is None)
+		we must promote the *previous*	lookup to an Extension type.
+	If the offset from a lookup to subtable overflowed, then we must promote it 
+		to an Extension Lookup type.
+	"""
+	ok = 0
+	lookupIndex = overflowRecord.LookupListIndex
+	if (overflowRecord.SubTableIndex is None):
+		lookupIndex = lookupIndex - 1
+	if lookupIndex < 0:
+		return ok
+	if overflowRecord.tableType == 'GSUB':
+		extType = 7
+	elif overflowRecord.tableType == 'GPOS':
+		extType = 9
+
+	lookups = ttf[overflowRecord.tableType].table.LookupList.Lookup
+	lookup = lookups[lookupIndex]
+	# If the previous lookup is an extType, look further back. Very unlikely, but possible.
+	while lookup.SubTable[0].__class__.LookupType == extType:
+		lookupIndex = lookupIndex -1
+		if lookupIndex < 0:
+			return ok
+		lookup = lookups[lookupIndex]
+		
+	for si in range(len(lookup.SubTable)):
+		subTable = lookup.SubTable[si]
+		extSubTableClass = lookupTypes[overflowRecord.tableType][extType]
+		extSubTable = extSubTableClass()
+		extSubTable.Format = 1
+		extSubTable.ExtSubTable = subTable
+		lookup.SubTable[si] = extSubTable
+	ok = 1
+	return ok
+
+def splitAlternateSubst(oldSubTable, newSubTable, overflowRecord):
+	ok = 1
+	newSubTable.Format = oldSubTable.Format
+	if hasattr(oldSubTable, 'sortCoverageLast'):
+		newSubTable.sortCoverageLast = oldSubTable.sortCoverageLast
+	
+	oldAlts = sorted(oldSubTable.alternates.items())
+	oldLen = len(oldAlts)
+
+	if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
+		# Coverage table is written last. overflow is to or within the
+		# the coverage table. We will just cut the subtable in half.
+		newLen = oldLen//2
+
+	elif overflowRecord.itemName == 'AlternateSet':
+		# We just need to back up by two items 
+		# from the overflowed AlternateSet index to make sure the offset
+		# to the Coverage table doesn't overflow.
+		newLen  = overflowRecord.itemIndex - 1
+
+	newSubTable.alternates = {}
+	for i in range(newLen, oldLen):
+		item = oldAlts[i]
+		key = item[0]
+		newSubTable.alternates[key] = item[1]
+		del oldSubTable.alternates[key]
+
+
+	return ok
+
+
+def splitLigatureSubst(oldSubTable, newSubTable, overflowRecord):
+	ok = 1
+	newSubTable.Format = oldSubTable.Format
+	oldLigs = sorted(oldSubTable.ligatures.items())
+	oldLen = len(oldLigs)
+
+	if overflowRecord.itemName in [ 'Coverage', 'RangeRecord']:
+		# Coverage table is written last. overflow is to or within the
+		# the coverage table. We will just cut the subtable in half.
+		newLen = oldLen//2
+
+	elif overflowRecord.itemName == 'LigatureSet':
+		# We just need to back up by two items 
+		# from the overflowed AlternateSet index to make sure the offset
+		# to the Coverage table doesn't overflow.
+		newLen  = overflowRecord.itemIndex - 1
+
+	newSubTable.ligatures = {}
+	for i in range(newLen, oldLen):
+		item = oldLigs[i]
+		key = item[0]
+		newSubTable.ligatures[key] = item[1]
+		del oldSubTable.ligatures[key]
+
+	return ok
+
+
+splitTable = {	'GSUB': {
+#					1: splitSingleSubst,
+#					2: splitMultipleSubst,
+					3: splitAlternateSubst,
+					4: splitLigatureSubst,
+#					5: splitContextSubst,
+#					6: splitChainContextSubst,
+#					7: splitExtensionSubst,
+#					8: splitReverseChainSingleSubst,
+					},
+				'GPOS': {
+#					1: splitSinglePos,
+#					2: splitPairPos,
+#					3: splitCursivePos,
+#					4: splitMarkBasePos,
+#					5: splitMarkLigPos,
+#					6: splitMarkMarkPos,
+#					7: splitContextPos,
+#					8: splitChainContextPos,
+#					9: splitExtensionPos,
+					}
+
+			}
+
+def fixSubTableOverFlows(ttf, overflowRecord):
+	""" 
+	An offset has overflowed within a sub-table. We need to divide this subtable into smaller parts.
+	"""
+	ok = 0
+	table = ttf[overflowRecord.tableType].table
+	lookup = table.LookupList.Lookup[overflowRecord.LookupListIndex]
+	subIndex = overflowRecord.SubTableIndex
+	subtable = lookup.SubTable[subIndex]
+
+	if hasattr(subtable, 'ExtSubTable'):
+		# We split the subtable of the Extension table, and add a new Extension table
+		# to contain the new subtable.
+
+		subTableType = subtable.ExtSubTable.__class__.LookupType
+		extSubTable = subtable
+		subtable = extSubTable.ExtSubTable
+		newExtSubTableClass = lookupTypes[overflowRecord.tableType][subtable.__class__.LookupType]
+		newExtSubTable = newExtSubTableClass()
+		newExtSubTable.Format = extSubTable.Format
+		lookup.SubTable.insert(subIndex + 1, newExtSubTable)
+
+		newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+		newSubTable = newSubTableClass()
+		newExtSubTable.ExtSubTable = newSubTable
+	else:
+		subTableType = subtable.__class__.LookupType
+		newSubTableClass = lookupTypes[overflowRecord.tableType][subTableType]
+		newSubTable = newSubTableClass()
+		lookup.SubTable.insert(subIndex + 1, newSubTable)
+
+	if hasattr(lookup, 'SubTableCount'): # may not be defined yet.
+		lookup.SubTableCount = lookup.SubTableCount + 1
+
+	try:
+		splitFunc = splitTable[overflowRecord.tableType][subTableType]
+	except KeyError:
+		return ok
+
+	ok = splitFunc(subtable, newSubTable, overflowRecord)
+	return ok
+
+# End of OverFlow logic
+
+
+def _buildClasses():
+	import re
+	from .otData import otData
+	
+	formatPat = re.compile("([A-Za-z0-9]+)Format(\d+)$")
+	namespace = globals()
+	
+	# populate module with classes
+	for name, table in otData:
+		baseClass = BaseTable
+		m = formatPat.match(name)
+		if m:
+			# XxxFormatN subtable, we only add the "base" table
+			name = m.group(1)
+			baseClass = FormatSwitchingBaseTable
+		if name not in namespace:
+			# the class doesn't exist yet, so the base implementation is used.
+			cls = type(name, (baseClass,), {})
+			namespace[name] = cls
+	
+	for base, alts in _equivalents.items():
+		base = namespace[base]
+		for alt in alts:
+			namespace[alt] = type(alt, (base,), {})
+	
+	global lookupTypes
+	lookupTypes = {
+		'GSUB': {
+			1: SingleSubst,
+			2: MultipleSubst,
+			3: AlternateSubst,
+			4: LigatureSubst,
+			5: ContextSubst,
+			6: ChainContextSubst,
+			7: ExtensionSubst,
+			8: ReverseChainSingleSubst,
+		},
+		'GPOS': {
+			1: SinglePos,
+			2: PairPos,
+			3: CursivePos,
+			4: MarkBasePos,
+			5: MarkLigPos,
+			6: MarkMarkPos,
+			7: ContextPos,
+			8: ChainContextPos,
+			9: ExtensionPos,
+		},
+	}
+	lookupTypes['JSTF'] = lookupTypes['GPOS']  # JSTF contains GPOS
+	for lookupEnum in lookupTypes.values():
+		for enum, cls in lookupEnum.items():
+			cls.LookupType = enum
+
+	global featureParamTypes
+	featureParamTypes = {
+		'size': FeatureParamsSize,
+	}
+	for i in range(1, 20+1):
+		featureParamTypes['ss%02d' % i] = FeatureParamsStylisticSet
+	for i in range(1, 99+1):
+		featureParamTypes['cv%02d' % i] = FeatureParamsCharacterVariants
+	
+	# add converters to classes
+	from .otConverters import buildConverters
+	for name, table in otData:
+		m = formatPat.match(name)
+		if m:
+			# XxxFormatN subtable, add converter to "base" table
+			name, format = m.groups()
+			format = int(format)
+			cls = namespace[name]
+			if not hasattr(cls, "converters"):
+				cls.converters = {}
+				cls.convertersByName = {}
+			converters, convertersByName = buildConverters(table[1:], namespace)
+			cls.converters[format] = converters
+			cls.convertersByName[format] = convertersByName
+		else:
+			cls = namespace[name]
+			cls.converters, cls.convertersByName = buildConverters(table, namespace)
+
+
+_buildClasses()
+
+
+def _getGlyphsFromCoverageTable(coverage):
+	if coverage is None:
+		# empty coverage table
+		return []
+	else:
+		return coverage.glyphs
diff --git a/Lib/fontTools/ttLib/tables/sbixBitmap.py b/Lib/fontTools/ttLib/tables/sbixBitmap.py
new file mode 100644
index 0000000..96da4e1
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/sbixBitmap.py
@@ -0,0 +1,104 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import readHex
+import struct
+
+
+sbixBitmapHeaderFormat = """
+	>
+	usReserved1:     H    # 00 00
+	usReserved2:     H    #       00 00
+	imageFormatTag:  4s   # e.g. "png "
+"""
+
+sbixBitmapHeaderFormatSize = sstruct.calcsize(sbixBitmapHeaderFormat)
+
+
+class Bitmap(object):
+	def __init__(self, glyphName=None, referenceGlyphName=None, usReserved1=0, usReserved2=0, imageFormatTag=None, imageData=None, rawdata=None, gid=0):
+		self.gid = gid
+		self.glyphName = glyphName
+		self.referenceGlyphName = referenceGlyphName
+		self.usReserved1 = usReserved1
+		self.usReserved2 = usReserved2
+		self.rawdata = rawdata
+		self.imageFormatTag = imageFormatTag
+		self.imageData = imageData
+
+	def decompile(self, ttFont):
+		self.glyphName = ttFont.getGlyphName(self.gid)
+		if self.rawdata is None:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("No table data to decompile")
+		if len(self.rawdata) > 0:
+			if len(self.rawdata) < sbixBitmapHeaderFormatSize:
+				from fontTools import ttLib
+				#print "Bitmap %i header too short: Expected %x, got %x." % (self.gid, sbixBitmapHeaderFormatSize, len(self.rawdata))
+				raise ttLib.TTLibError("Bitmap header too short.")
+
+			sstruct.unpack(sbixBitmapHeaderFormat, self.rawdata[:sbixBitmapHeaderFormatSize], self)
+
+			if self.imageFormatTag == "dupe":
+				# bitmap is a reference to another glyph's bitmap
+				gid, = struct.unpack(">H", self.rawdata[sbixBitmapHeaderFormatSize:])
+				self.referenceGlyphName = ttFont.getGlyphName(gid)
+			else:
+				self.imageData = self.rawdata[sbixBitmapHeaderFormatSize:]
+				self.referenceGlyphName = None
+		# clean up
+		del self.rawdata
+		del self.gid
+
+	def compile(self, ttFont):
+		if self.glyphName is None:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("Can't compile bitmap without glyph name")
+			# TODO: if ttFont has no maxp, cmap etc., ignore glyph names and compile by index?
+			# (needed if you just want to compile the sbix table on its own)
+		self.gid = struct.pack(">H", ttFont.getGlyphID(self.glyphName))
+		if self.imageFormatTag is None:
+			self.rawdata = ""
+		else:
+			self.rawdata = sstruct.pack(sbixBitmapHeaderFormat, self) + self.imageData
+
+	def toXML(self, xmlWriter, ttFont):
+		if self.imageFormatTag == None:
+			# TODO: ignore empty bitmaps?
+			# a bitmap entry is required for each glyph,
+			# but empty ones can be calculated at compile time
+			xmlWriter.simpletag("bitmap", glyphname=self.glyphName)
+			xmlWriter.newline()
+			return
+		xmlWriter.begintag("bitmap", format=self.imageFormatTag, glyphname=self.glyphName)
+		xmlWriter.newline()
+		#xmlWriter.simpletag("usReserved1", value=self.usReserved1)
+		#xmlWriter.newline()
+		#xmlWriter.simpletag("usReserved2", value=self.usReserved2)
+		#xmlWriter.newline()
+		if self.imageFormatTag == "dupe":
+			# format == "dupe" is apparently a reference to another glyph id.
+			xmlWriter.simpletag("ref", glyphname=self.referenceGlyphName)
+		else:
+			xmlWriter.begintag("hexdata")
+			xmlWriter.newline()
+			xmlWriter.dumphex(self.imageData)
+			xmlWriter.endtag("hexdata")
+		xmlWriter.newline()
+		xmlWriter.endtag("bitmap")
+		xmlWriter.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		#if name in ["usReserved1", "usReserved2"]:
+		#	setattr(self, name, int(attrs["value"]))
+		#elif
+		if name == "ref":
+			# bitmap is a "dupe", i.e. a reference to another bitmap.
+			# in this case imageData contains the glyph id of the reference glyph
+			# get glyph id from glyphname
+			self.imageData = struct.pack(">H", ttFont.getGlyphID(attrs["glyphname"]))
+		elif name == "hexdata":
+			self.imageData = readHex(content)
+		else:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/sbixBitmapSet.py b/Lib/fontTools/ttLib/tables/sbixBitmapSet.py
new file mode 100644
index 0000000..b5786ec
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/sbixBitmapSet.py
@@ -0,0 +1,138 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc import sstruct
+from fontTools.misc.textTools import readHex
+from .sbixBitmap import *
+import struct
+
+sbixBitmapSetHeaderFormat = """
+	>
+	size:            H    # 00 28
+	resolution:      H    #       00 48
+"""
+
+sbixBitmapOffsetEntryFormat = """
+	>
+	ulOffset:        L    # 00 00 07 E0 # Offset from start of first offset entry to each bitmap
+"""
+
+sbixBitmapSetHeaderFormatSize = sstruct.calcsize(sbixBitmapSetHeaderFormat)
+sbixBitmapOffsetEntryFormatSize = sstruct.calcsize(sbixBitmapOffsetEntryFormat)
+
+
+class BitmapSet(object):
+	def __init__(self, rawdata=None, size=0, resolution=72):
+		self.data = rawdata
+		self.size = size
+		self.resolution = resolution
+		self.bitmaps = {}
+
+	def decompile(self, ttFont):
+		if self.data is None:
+			from fontTools import ttLib
+			raise ttLib.TTLibError
+		if len(self.data) < sbixBitmapSetHeaderFormatSize:
+			from fontTools import ttLib
+			raise(ttLib.TTLibError, "BitmapSet header too short: Expected %x, got %x.") \
+				% (sbixBitmapSetHeaderFormatSize, len(self.data))
+
+		# read BitmapSet header from raw data
+		sstruct.unpack(sbixBitmapSetHeaderFormat, self.data[:sbixBitmapSetHeaderFormatSize], self)
+
+		# calculate number of bitmaps
+		firstBitmapOffset, = struct.unpack(">L", \
+			self.data[sbixBitmapSetHeaderFormatSize : sbixBitmapSetHeaderFormatSize + sbixBitmapOffsetEntryFormatSize])
+		self.numBitmaps = (firstBitmapOffset - sbixBitmapSetHeaderFormatSize) // sbixBitmapOffsetEntryFormatSize - 1
+		# ^ -1 because there's one more offset than bitmaps
+
+		# build offset list for single bitmap offsets
+		self.bitmapOffsets = []
+		for i in range(self.numBitmaps + 1): # + 1 because there's one more offset than bitmaps
+			start = i * sbixBitmapOffsetEntryFormatSize + sbixBitmapSetHeaderFormatSize
+			myOffset, = struct.unpack(">L", self.data[start : start + sbixBitmapOffsetEntryFormatSize])
+			self.bitmapOffsets.append(myOffset)
+
+		# iterate through offset list and slice raw data into bitmaps
+		for i in range(self.numBitmaps):
+			myBitmap = Bitmap(rawdata=self.data[self.bitmapOffsets[i] : self.bitmapOffsets[i+1]], gid=i)
+			myBitmap.decompile(ttFont)
+			self.bitmaps[myBitmap.glyphName] = myBitmap
+		del self.bitmapOffsets
+		del self.data
+
+	def compile(self, ttFont):
+		self.bitmapOffsets = ""
+		self.bitmapData = ""
+
+		glyphOrder = ttFont.getGlyphOrder()
+
+		# first bitmap starts right after the header
+		bitmapOffset = sbixBitmapSetHeaderFormatSize + sbixBitmapOffsetEntryFormatSize * (len(glyphOrder) + 1)
+		for glyphName in glyphOrder:
+			if glyphName in self.bitmaps:
+				# we have a bitmap for this glyph
+				myBitmap = self.bitmaps[glyphName]
+			else:
+				# must add empty bitmap for this glyph
+				myBitmap = Bitmap(glyphName=glyphName)
+			myBitmap.compile(ttFont)
+			myBitmap.ulOffset = bitmapOffset
+			self.bitmapData += myBitmap.rawdata
+			bitmapOffset += len(myBitmap.rawdata)
+			self.bitmapOffsets += sstruct.pack(sbixBitmapOffsetEntryFormat, myBitmap)
+
+		# add last "offset", really the end address of the last bitmap
+		dummy = Bitmap()
+		dummy.ulOffset = bitmapOffset
+		self.bitmapOffsets += sstruct.pack(sbixBitmapOffsetEntryFormat, dummy)
+
+		# bitmap sets are padded to 4 byte boundaries
+		dataLength = len(self.bitmapOffsets) + len(self.bitmapData)
+		if dataLength % 4 != 0:
+			padding = 4 - (dataLength % 4)
+		else:
+			padding = 0
+
+		# pack header
+		self.data = sstruct.pack(sbixBitmapSetHeaderFormat, self)
+		# add offset, image data and padding after header
+		self.data += self.bitmapOffsets + self.bitmapData + "\0" * padding
+
+	def toXML(self, xmlWriter, ttFont):
+		xmlWriter.begintag("bitmapSet")
+		xmlWriter.newline()
+		xmlWriter.simpletag("size", value=self.size)
+		xmlWriter.newline()
+		xmlWriter.simpletag("resolution", value=self.resolution)
+		xmlWriter.newline()
+		glyphOrder = ttFont.getGlyphOrder()
+		for i in range(len(glyphOrder)):
+			if glyphOrder[i] in self.bitmaps:
+				self.bitmaps[glyphOrder[i]].toXML(xmlWriter, ttFont)
+				# TODO: what if there are more bitmaps than glyphs?
+		xmlWriter.endtag("bitmapSet")
+		xmlWriter.newline()
+
+	def fromXML(self, name, attrs, content, ttFont):
+		if name in ["size", "resolution"]:
+			setattr(self, name, int(attrs["value"]))
+		elif name == "bitmap":
+			if "format" in attrs:
+				myFormat = attrs["format"]
+			else:
+				myFormat = None
+			if "glyphname" in attrs:
+				myGlyphName = attrs["glyphname"]
+			else:
+				from fontTools import ttLib
+				raise ttLib.TTLibError("Bitmap must have a glyph name.")
+			myBitmap = Bitmap(glyphName=myGlyphName, imageFormatTag=myFormat)
+			for element in content:
+				if isinstance(element, tuple):
+					name, attrs, content = element
+					myBitmap.fromXML(name, attrs, content, ttFont)
+					myBitmap.compile(ttFont)
+			self.bitmaps[myBitmap.glyphName] = myBitmap
+		else:
+			from fontTools import ttLib
+			raise ttLib.TTLibError("can't handle '%s' element" % name)
diff --git a/Lib/fontTools/ttLib/tables/table_API_readme.txt b/Lib/fontTools/ttLib/tables/table_API_readme.txt
new file mode 100644
index 0000000..7719201
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/table_API_readme.txt
@@ -0,0 +1,91 @@
+This folder is a subpackage of ttLib. Each module here is a 
+specialized TT/OT table converter: they can convert raw data 
+to Python objects and vice versa. Usually you don't need to 
+use the modules directly: they are imported and used 
+automatically when needed by ttLib.
+
+If you are writing you own table converter the following is 
+important.
+
+The modules here have pretty strange names: this is due to the 
+fact that we need to map TT table tags (which are case sensitive) 
+to filenames (which on Mac and Win aren't case sensitive) as well 
+as to Python identifiers. The latter means it can only contain 
+[A-Za-z0-9_] and cannot start with a number. 
+
+ttLib provides functions to expand a tag into the format used here:
+
+>>> from fontTools import ttLib
+>>> ttLib.tagToIdentifier("FOO ")
+'F_O_O_'
+>>> ttLib.tagToIdentifier("cvt ")
+'_c_v_t'
+>>> ttLib.tagToIdentifier("OS/2")
+'O_S_2f_2'
+>>> ttLib.tagToIdentifier("glyf")
+'_g_l_y_f'
+>>> 
+
+And vice versa:
+
+>>> ttLib.identifierToTag("F_O_O_")
+'FOO '
+>>> ttLib.identifierToTag("_c_v_t")
+'cvt '
+>>> ttLib.identifierToTag("O_S_2f_2")
+'OS/2'
+>>> ttLib.identifierToTag("_g_l_y_f")
+'glyf'
+>>> 
+
+Eg. the 'glyf' table converter lives in a Python file called:
+
+	_g_l_y_f.py
+
+The converter itself is a class, named "table_" + expandedtag. Eg:
+
+	class table__g_l_y_f:
+		etc.
+
+Note that if you _do_ need to use such modules or classes manually, 
+there are two convenient API functions that let you find them by tag:
+
+>>> ttLib.getTableModule('glyf')
+<module 'ttLib.tables._g_l_y_f'>
+>>> ttLib.getTableClass('glyf')
+<class ttLib.tables._g_l_y_f.table__g_l_y_f at 645f400>
+>>> 
+
+You must subclass from DefaultTable.DefaultTable. It provides some default
+behavior, as well as a constructor method (__init__) that you don't need to 
+override.
+
+Your converter should minimally provide two methods:
+
+class table_F_O_O_(DefaultTable.DefaultTable): # converter for table 'FOO '
+	
+	def decompile(self, data, ttFont):
+		# 'data' is the raw table data. Unpack it into a
+		# Python data structure.
+		# 'ttFont' is a ttLib.TTfile instance, enabling you to
+		# refer to other tables. Do ***not*** keep a reference to
+		# it: it will cause a circular reference (ttFont saves 
+		# a reference to us), and that means we'll be leaking 
+		# memory. If you need to use it in other methods, just 
+		# pass it around as a method argument.
+	
+	def compile(self, ttFont):
+		# Return the raw data, as converted from the Python
+		# data structure. 
+		# Again, 'ttFont' is there so you can access other tables.
+		# Same warning applies.
+
+If you want to support TTX import/export as well, you need to provide two
+additional methods:
+
+	def toXML(self, writer, ttFont):
+		# XXX
+	
+	def fromXML(self, (name, attrs, content), ttFont):
+		# XXX
+
diff --git a/Lib/fontTools/ttLib/tables/ttProgram.py b/Lib/fontTools/ttLib/tables/ttProgram.py
new file mode 100644
index 0000000..03f778e
--- /dev/null
+++ b/Lib/fontTools/ttLib/tables/ttProgram.py
@@ -0,0 +1,461 @@
+"""ttLib.tables.ttProgram.py -- Assembler/disassembler for TrueType bytecode programs."""
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.misc.textTools import num2binary, binary2num, readHex
+import array
+import re
+
+# first, the list of instructions that eat bytes or words from the instruction stream
+
+streamInstructions = [
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+#	opcode     mnemonic argBits         descriptive name pops pushes        eats from instruction stream          pushes
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+	(0x40,    'NPUSHB',     0,             'PushNBytes',  0, -1), #                      n, b1, b2,...bn      b1,b2...bn
+	(0x41,    'NPUSHW',     0,             'PushNWords',  0, -1), #                       n, w1, w2,...w      w1,w2...wn
+	(0xb0,     'PUSHB',     3,              'PushBytes',  0, -1), #                          b0, b1,..bn  b0, b1, ...,bn
+	(0xb8,     'PUSHW',     3,              'PushWords',  0, -1), #                           w0,w1,..wn   w0 ,w1, ...wn
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+]
+
+
+# next, the list of "normal" instructions
+
+instructions = [
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+#	opcode     mnemonic  argBits        descriptive name pops pushes                                pops          pushes
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+	(0x7f,        'AA',     0,            'AdjustAngle',  1,  0), #                                    p               -
+	(0x64,       'ABS',     0,               'Absolute',  1,  1), #                                    n             |n|
+	(0x60,       'ADD',     0,                    'Add',  2,  1), #                               n2, n1       (n1 + n2)
+	(0x27,  'ALIGNPTS',     0,               'AlignPts',  2,  0), #                               p2, p1               -
+	(0x3c,   'ALIGNRP',     0,        'AlignRelativePt', -1,  0), #             p1, p2, ... , ploopvalue               -
+	(0x5a,       'AND',     0,             'LogicalAnd',  2,  1), #                               e2, e1               b
+	(0x2b,      'CALL',     0,           'CallFunction',  1,  0), #                                    f               -
+	(0x67,   'CEILING',     0,                'Ceiling',  1,  1), #                                    n         ceil(n)
+	(0x25,    'CINDEX',     0,        'CopyXToTopStack',  1,  1), #                                    k              ek
+	(0x22,     'CLEAR',     0,             'ClearStack', -1,  0), #               all items on the stack               -
+	(0x4f,     'DEBUG',     0,              'DebugCall',  1,  0), #                                    n               -
+	(0x73,   'DELTAC1',     0,       'DeltaExceptionC1', -1,  0), #    argn, cn, argn-1,cn-1, , arg1, c1               -
+	(0x74,   'DELTAC2',     0,       'DeltaExceptionC2', -1,  0), #    argn, cn, argn-1,cn-1, , arg1, c1               -
+	(0x75,   'DELTAC3',     0,       'DeltaExceptionC3', -1,  0), #    argn, cn, argn-1,cn-1, , arg1, c1               -
+	(0x5d,   'DELTAP1',     0,       'DeltaExceptionP1', -1,  0), #   argn, pn, argn-1, pn-1, , arg1, p1               -
+	(0x71,   'DELTAP2',     0,       'DeltaExceptionP2', -1,  0), #   argn, pn, argn-1, pn-1, , arg1, p1               -
+	(0x72,   'DELTAP3',     0,       'DeltaExceptionP3', -1,  0), #   argn, pn, argn-1, pn-1, , arg1, p1               -
+	(0x24,     'DEPTH',     0,          'GetDepthStack',  0,  1), #                                    -               n
+	(0x62,       'DIV',     0,                 'Divide',  2,  1), #                               n2, n1   (n1 * 64)/ n2
+	(0x20,       'DUP',     0,      'DuplicateTopStack',  1,  2), #                                    e            e, e
+	(0x59,       'EIF',     0,                  'EndIf',  0,  0), #                                    -               -
+	(0x1b,      'ELSE',     0,                   'Else',  0,  0), #                                    -               -
+	(0x2d,      'ENDF',     0,  'EndFunctionDefinition',  0,  0), #                                    -               -
+	(0x54,        'EQ',     0,                  'Equal',  2,  1), #                               e2, e1               b
+	(0x57,      'EVEN',     0,                   'Even',  1,  1), #                                    e               b
+	(0x2c,      'FDEF',     0,     'FunctionDefinition',  1,  0), #                                    f               -
+	(0x4e,   'FLIPOFF',     0,         'SetAutoFlipOff',  0,  0), #                                    -               -
+	(0x4d,    'FLIPON',     0,          'SetAutoFlipOn',  0,  0), #                                    -               -
+	(0x80,    'FLIPPT',     0,              'FlipPoint', -1,  0), #              p1, p2, ..., ploopvalue               -
+	(0x82, 'FLIPRGOFF',     0,           'FlipRangeOff',  2,  0), #                                 h, l               -
+	(0x81,  'FLIPRGON',     0,            'FlipRangeOn',  2,  0), #                                 h, l               -
+	(0x66,     'FLOOR',     0,                  'Floor',  1,  1), #                                    n        floor(n)
+	(0x46,        'GC',     1,      'GetCoordOnPVector',  1,  1), #                                    p               c
+	(0x88,   'GETINFO',     0,                'GetInfo',  1,  1), #                             selector          result
+	(0x0d,       'GFV',     0,             'GetFVector',  0,  2), #                                    -          px, py
+	(0x0c,       'GPV',     0,             'GetPVector',  0,  2), #                                    -          px, py
+	(0x52,        'GT',     0,            'GreaterThan',  2,  1), #                               e2, e1               b
+	(0x53,      'GTEQ',     0,     'GreaterThanOrEqual',  2,  1), #                               e2, e1               b
+	(0x89,      'IDEF',     0,  'InstructionDefinition',  1,  0), #                                    f               -
+	(0x58,        'IF',     0,                     'If',  1,  0), #                                    e               -
+	(0x8e,  'INSTCTRL',     0,    'SetInstrExecControl',  2,  0), #                                 s, v               -
+	(0x39,        'IP',     0,         'InterpolatePts', -1,  0), #             p1, p2, ... , ploopvalue               -
+	(0x0f,     'ISECT',     0,      'MovePtToIntersect',  5,  0), #                    a1, a0, b1, b0, p               -
+	(0x30,       'IUP',     1,      'InterpolateUntPts',  0,  0), #                                    -               -
+	(0x1c,      'JMPR',     0,                   'Jump',  1,  0), #                               offset               -
+	(0x79,      'JROF',     0,    'JumpRelativeOnFalse',  2,  0), #                            e, offset               -
+	(0x78,      'JROT',     0,     'JumpRelativeOnTrue',  2,  0), #                            e, offset               -
+	(0x2a,  'LOOPCALL',     0,    'LoopAndCallFunction',  2,  0), #                             f, count               -
+	(0x50,        'LT',     0,               'LessThan',  2,  1), #                               e2, e1               b
+	(0x51,      'LTEQ',     0,        'LessThenOrEqual',  2,  1), #                               e2, e1               b
+	(0x8b,       'MAX',     0,                'Maximum',  2,  1), #                               e2, e1     max(e1, e2)
+	(0x49,        'MD',     1,        'MeasureDistance',  2,  1), #                                p2,p1               d
+	(0x2e,      'MDAP',     1,        'MoveDirectAbsPt',  1,  0), #                                    p               -
+	(0xc0,      'MDRP',     5,        'MoveDirectRelPt',  1,  0), #                                    p               -
+	(0x3e,      'MIAP',     1,      'MoveIndirectAbsPt',  2,  0), #                                 n, p               -
+	(0x8c,       'MIN',     0,                'Minimum',  2,  1), #                               e2, e1     min(e1, e2)
+	(0x26,    'MINDEX',     0,        'MoveXToTopStack',  1,  1), #                                    k              ek
+	(0xe0,      'MIRP',     5,      'MoveIndirectRelPt',  2,  0), #                                 n, p               -
+	(0x4b,     'MPPEM',     0,      'MeasurePixelPerEm',  0,  1), #                                    -            ppem
+	(0x4c,       'MPS',     0,       'MeasurePointSize',  0,  1), #                                    -       pointSize
+	(0x3a,     'MSIRP',     1,    'MoveStackIndirRelPt',  2,  0), #                                 d, p               -
+	(0x63,       'MUL',     0,               'Multiply',  2,  1), #                               n2, n1    (n1 * n2)/64
+	(0x65,       'NEG',     0,                 'Negate',  1,  1), #                                    n              -n
+	(0x55,       'NEQ',     0,               'NotEqual',  2,  1), #                               e2, e1               b
+	(0x5c,       'NOT',     0,             'LogicalNot',  1,  1), #                                    e       ( not e )
+	(0x6c,    'NROUND',     2,                'NoRound',  1,  1), #                                   n1              n2
+	(0x56,       'ODD',     0,                    'Odd',  1,  1), #                                    e               b
+	(0x5b,        'OR',     0,              'LogicalOr',  2,  1), #                               e2, e1               b
+	(0x21,       'POP',     0,            'PopTopStack',  1,  0), #                                    e               -
+	(0x45,      'RCVT',     0,                'ReadCVT',  1,  1), #                             location           value
+	(0x7d,      'RDTG',     0,        'RoundDownToGrid',  0,  0), #                                    -               -
+	(0x7a,      'ROFF',     0,               'RoundOff',  0,  0), #                                    -               -
+	(0x8a,      'ROLL',     0,      'RollTopThreeStack',  3,  3), #                                a,b,c           b,a,c
+	(0x68,     'ROUND',     2,                  'Round',  1,  1), #                                   n1              n2
+	(0x43,        'RS',     0,              'ReadStore',  1,  1), #                                    n               v
+	(0x3d,      'RTDG',     0,      'RoundToDoubleGrid',  0,  0), #                                    -               -
+	(0x18,       'RTG',     0,            'RoundToGrid',  0,  0), #                                    -               -
+	(0x19,      'RTHG',     0,        'RoundToHalfGrid',  0,  0), #                                    -               -
+	(0x7c,      'RUTG',     0,          'RoundUpToGrid',  0,  0), #                                    -               -
+	(0x77,  'S45ROUND',     0,    'SuperRound45Degrees',  1,  0), #                                    n               -
+	(0x7e,     'SANGW',     0,         'SetAngleWeight',  1,  0), #                               weight               -
+	(0x85,  'SCANCTRL',     0,  'ScanConversionControl',  1,  0), #                                    n               -
+	(0x8d,  'SCANTYPE',     0,               'ScanType',  1,  0), #                                    n               -
+	(0x48,      'SCFS',     0,    'SetCoordFromStackFP',  2,  0), #                                 c, p               -
+	(0x1d,    'SCVTCI',     0,            'SetCVTCutIn',  1,  0), #                                    n               -
+	(0x5e,       'SDB',     0,   'SetDeltaBaseInGState',  1,  0), #                                    n               -
+	(0x86,    'SDPVTL',     1,   'SetDualPVectorToLine',  2,  0), #                               p2, p1               -
+	(0x5f,       'SDS',     0,  'SetDeltaShiftInGState',  1,  0), #                                    n               -
+	(0x0b,     'SFVFS',     0,    'SetFVectorFromStack',  2,  0), #                                 y, x               -
+	(0x04,    'SFVTCA',     1,       'SetFVectorToAxis',  0,  0), #                                    -               -
+	(0x08,     'SFVTL',     1,       'SetFVectorToLine',  2,  0), #                               p2, p1               -
+	(0x0e,    'SFVTPV',     0,    'SetFVectorToPVector',  0,  0), #                                    -               -
+	(0x34,       'SHC',     1,   'ShiftContourByLastPt',  1,  0), #                                    c               -
+	(0x32,       'SHP',     1,  'ShiftPointByLastPoint', -1,  0), #              p1, p2, ..., ploopvalue               -
+	(0x38,     'SHPIX',     0,       'ShiftZoneByPixel', -1,  0), #           d, p1, p2, ..., ploopvalue               -
+	(0x36,       'SHZ',     1,   'ShiftZoneByLastPoint',  1,  0), #                                    e               -
+	(0x17,     'SLOOP',     0,        'SetLoopVariable',  1,  0), #                                    n               -
+	(0x1a,       'SMD',     0,     'SetMinimumDistance',  1,  0), #                             distance               -
+	(0x0a,     'SPVFS',     0,    'SetPVectorFromStack',  2,  0), #                                 y, x               -
+	(0x02,    'SPVTCA',     1,       'SetPVectorToAxis',  0,  0), #                                    -               -
+	(0x06,     'SPVTL',     1,       'SetPVectorToLine',  2,  0), #                               p2, p1               -
+	(0x76,    'SROUND',     0,             'SuperRound',  1,  0), #                                    n               -
+	(0x10,      'SRP0',     0,           'SetRefPoint0',  1,  0), #                                    p               -
+	(0x11,      'SRP1',     0,           'SetRefPoint1',  1,  0), #                                    p               -
+	(0x12,      'SRP2',     0,           'SetRefPoint2',  1,  0), #                                    p               -
+	(0x1f,       'SSW',     0,         'SetSingleWidth',  1,  0), #                                    n               -
+	(0x1e,     'SSWCI',     0,    'SetSingleWidthCutIn',  1,  0), #                                    n               -
+	(0x61,       'SUB',     0,               'Subtract',  2,  1), #                               n2, n1       (n1 - n2)
+	(0x00,     'SVTCA',     1,      'SetFPVectorToAxis',  0,  0), #                                    -               -
+	(0x23,      'SWAP',     0,           'SwapTopStack',  2,  2), #                               e2, e1          e1, e2
+	(0x13,      'SZP0',     0,        'SetZonePointer0',  1,  0), #                                    n               -
+	(0x14,      'SZP1',     0,        'SetZonePointer1',  1,  0), #                                    n               -
+	(0x15,      'SZP2',     0,        'SetZonePointer2',  1,  0), #                                    n               -
+	(0x16,      'SZPS',     0,        'SetZonePointerS',  1,  0), #                                    n               -
+	(0x29,       'UTP',     0,              'UnTouchPt',  1,  0), #                                    p               -
+	(0x70,     'WCVTF',     0,       'WriteCVTInFUnits',  2,  0), #                                 n, l               -
+	(0x44,     'WCVTP',     0,       'WriteCVTInPixels',  2,  0), #                                 v, l               -
+	(0x42,        'WS',     0,             'WriteStore',  2,  0), #                                 v, l               -
+#	------  -----------  -----  ------------------------ ---  ------  ----------------------------------  --------------
+]
+
+
+def bitRepr(value, bits):
+	s = ""
+	for i in range(bits):
+		s = "01"[value & 0x1] + s
+		value = value >> 1
+	return s
+
+
+_mnemonicPat = re.compile("[A-Z][A-Z0-9]*$")
+
+def _makeDict(instructionList):
+	opcodeDict = {}
+	mnemonicDict = {}
+	for op, mnemonic, argBits, name, pops, pushes in instructionList:
+		assert _mnemonicPat.match(mnemonic)
+		mnemonicDict[mnemonic] = op, argBits
+		if argBits:
+			argoffset = op
+			for i in range(1 << argBits):
+				opcodeDict[op+i] = mnemonic, argBits, argoffset
+		else:
+				opcodeDict[op] = mnemonic, 0, 0
+	return opcodeDict, mnemonicDict
+
+streamOpcodeDict, streamMnemonicDict = _makeDict(streamInstructions)
+opcodeDict, mnemonicDict = _makeDict(instructions)
+
+class tt_instructions_error(Exception):
+	def __init__(self, error):
+		self.error = error
+	def __str__(self):
+		return "TT instructions error: %s" % repr(self.error)
+
+
+_comment = r"/\*.*?\*/"
+_instruction = r"([A-Z][A-Z0-9]*)\s*\[(.*?)\]"
+_number = r"-?[0-9]+"
+_token = "(%s)|(%s)|(%s)" % (_instruction, _number, _comment)
+
+_tokenRE = re.compile(_token)
+_whiteRE = re.compile(r"\s*")
+
+_pushCountPat = re.compile(r"[A-Z][A-Z0-9]*\s*\[.*?\]\s*/\* ([0-9]*).*?\*/")
+
+
+def _skipWhite(data, pos):
+	m = _whiteRE.match(data, pos)
+	newPos = m.regs[0][1]
+	assert newPos >= pos
+	return newPos
+
+
+class Program(object):
+	
+	def __init__(self):
+		pass
+	
+	def fromBytecode(self, bytecode):
+		self.bytecode = array.array("B", bytecode)
+		if hasattr(self, "assembly"):
+			del self.assembly
+	
+	def fromAssembly(self, assembly):
+		self.assembly = assembly
+		if hasattr(self, "bytecode"):
+			del self.bytecode
+	
+	def getBytecode(self):
+		if not hasattr(self, "bytecode"):
+			self._assemble()
+		return self.bytecode.tostring()
+	
+	def getAssembly(self):
+		if not hasattr(self, "assembly"):
+			self._disassemble()
+		return self.assembly
+	
+	def toXML(self, writer, ttFont):
+		if not hasattr (ttFont, "disassembleInstructions") or ttFont.disassembleInstructions:
+			assembly = self.getAssembly()
+			writer.begintag("assembly")
+			writer.newline()
+			i = 0
+			nInstr = len(assembly)
+			while i < nInstr:
+				instr = assembly[i]
+				writer.write(instr)
+				writer.newline()
+				m = _pushCountPat.match(instr)
+				i = i + 1
+				if m:
+					nValues = int(m.group(1))
+					line = []
+					j = 0
+					for j in range(nValues):
+						if j and not (j % 25):
+							writer.write(' '.join(line))
+							writer.newline()
+							line = []
+						line.append(assembly[i+j])
+					writer.write(' '.join(line))
+					writer.newline()
+					i = i + j + 1
+			writer.endtag("assembly")
+		else:
+			writer.begintag("bytecode")
+			writer.newline()
+			writer.dumphex(self.getBytecode())
+			writer.endtag("bytecode")
+	
+	def fromXML(self, name, attrs, content, ttFont):
+		if name == "assembly":
+			self.fromAssembly(strjoin(content))
+			self._assemble()
+			del self.assembly
+		else:
+			assert name == "bytecode"
+			self.fromBytecode(readHex(content))
+	
+	def _assemble(self):
+		assembly = self.assembly
+		if isinstance(assembly, type([])):
+			assembly = ' '.join(assembly)
+		bytecode = []
+		push = bytecode.append
+		lenAssembly = len(assembly)
+		pos = _skipWhite(assembly, 0)
+		while pos < lenAssembly:
+			m = _tokenRE.match(assembly, pos)
+			if m is None:
+				raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos-5:pos+15])
+			dummy, mnemonic, arg, number, comment = m.groups()
+			pos = m.regs[0][1]
+			if comment:
+				continue
+			
+			arg = arg.strip()
+			if mnemonic.startswith("INSTR"):
+				# Unknown instruction
+				op = int(mnemonic[5:])
+				push(op)
+			elif mnemonic not in ("PUSH", "NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
+				op, argBits = mnemonicDict[mnemonic]
+				if len(arg) != argBits:
+					raise tt_instructions_error("Incorrect number of argument bits (%s[%s])" % (mnemonic, arg))
+				if arg:
+					arg = binary2num(arg)
+					push(op + arg)
+				else:
+					push(op)
+			else:
+				args = []
+				pos = _skipWhite(assembly, pos)
+				while pos < lenAssembly:
+					m = _tokenRE.match(assembly, pos)
+					if m is None:
+						raise tt_instructions_error("Syntax error in TT program (%s)" % assembly[pos:pos+15])
+					dummy, _mnemonic, arg, number, comment = m.groups()
+					if number is None and comment is None:
+						break
+					pos = m.regs[0][1]
+					pos = _skipWhite(assembly, pos)
+					if comment is not None:
+						continue
+					args.append(int(number))
+				nArgs = len(args)
+				if mnemonic == "PUSH":
+					# Automatically choose the most compact representation
+					nWords = 0
+					while nArgs:
+						while nWords < nArgs and nWords < 255 and not (0 <= args[nWords] <= 255):
+							nWords += 1
+						nBytes = 0
+						while nWords+nBytes < nArgs and nBytes < 255 and 0 <= args[nWords+nBytes] <= 255:
+							nBytes += 1
+						if nBytes < 2 and nWords + nBytes < 255 and nWords + nBytes != nArgs:
+							# Will write bytes as words
+							nWords += nBytes
+							continue
+
+						# Write words
+						if nWords:
+							if nWords <= 8:
+								op, argBits = streamMnemonicDict["PUSHW"]
+								op = op + nWords - 1
+								push(op)
+							else:
+								op, argBits = streamMnemonicDict["NPUSHW"]
+								push(op)
+								push(nWords)
+							for value in args[:nWords]:
+								assert -32768 <= value < 32768, "PUSH value out of range %d" % value
+								push((value >> 8) & 0xff)
+								push(value & 0xff)
+
+						# Write bytes
+						if nBytes:
+							pass
+							if nBytes <= 8:
+								op, argBits = streamMnemonicDict["PUSHB"]
+								op = op + nBytes - 1
+								push(op)
+							else:
+								op, argBits = streamMnemonicDict["NPUSHB"]
+								push(op)
+								push(nBytes)
+							for value in args[nWords:nWords+nBytes]:
+								push(value)
+
+						nTotal = nWords + nBytes
+						args = args[nTotal:]
+						nArgs -= nTotal
+						nWords = 0
+				else:
+					# Write exactly what we've been asked to
+					words = mnemonic[-1] == "W"
+					op, argBits = streamMnemonicDict[mnemonic]
+					if mnemonic[0] != "N":
+						assert nArgs <= 8, nArgs
+						op = op + nArgs - 1
+						push(op)
+					else:
+						assert nArgs < 256
+						push(op)
+						push(nArgs)
+					if words:
+						for value in args:
+							assert -32768 <= value < 32768, "PUSHW value out of range %d" % value
+							push((value >> 8) & 0xff)
+							push(value & 0xff)
+					else:
+						for value in args:
+							assert 0 <= value < 256, "PUSHB value out of range %d" % value
+							push(value)
+
+			pos = _skipWhite(assembly, pos)
+		
+		if bytecode:
+			assert max(bytecode) < 256 and min(bytecode) >= 0
+		self.bytecode = array.array("B", bytecode)
+	
+	def _disassemble(self, preserve=False):
+		assembly = []
+		i = 0
+		bytecode = self.bytecode
+		numBytecode = len(bytecode)
+		while i < numBytecode:
+			op = bytecode[i]
+			try:
+				mnemonic, argBits, argoffset = opcodeDict[op]
+			except KeyError:
+				if op in streamOpcodeDict:
+					values = []
+
+					# Merge consecutive PUSH operations
+					while bytecode[i] in streamOpcodeDict:
+						op = bytecode[i]
+						mnemonic, argBits, argoffset = streamOpcodeDict[op]
+						words = mnemonic[-1] == "W"
+						if argBits:
+							nValues = op - argoffset + 1
+						else:
+							i = i + 1
+							nValues = bytecode[i]
+						i = i + 1
+						assert nValues > 0
+						if not words:
+							for j in range(nValues):
+								value = bytecode[i]
+								values.append(repr(value))
+								i = i + 1
+						else:
+							for j in range(nValues):
+								# cast to signed int16
+								value = (bytecode[i] << 8) | bytecode[i+1]
+								if value >= 0x8000:
+									value = value - 0x10000
+								values.append(repr(value))
+								i = i + 2
+						if preserve:
+							break
+
+					if not preserve:
+						mnemonic = "PUSH"
+					nValues = len(values)
+					if nValues == 1:
+						assembly.append("%s[ ]" % mnemonic)
+					else:
+						assembly.append("%s[ ]  /* %s values pushed */" % (mnemonic, nValues))
+					assembly.extend(values)
+				else:
+					assembly.append("INSTR%d[ ]" % op)
+					i = i + 1
+			else:
+				if argBits:
+					assembly.append(mnemonic + "[%s]" % num2binary(op - argoffset, argBits))
+				else:
+					assembly.append(mnemonic + "[ ]")
+				i = i + 1
+		self.assembly = assembly
+
+
+if __name__ == "__main__":
+	bc = """@;:9876543210/.-,+*)(\'&%$#"! \037\036\035\034\033\032\031\030\027\026\025\024\023\022\021\020\017\016\015\014\013\012\011\010\007\006\005\004\003\002\001\000,\001\260\030CXEj\260\031C`\260F#D#\020 \260FN\360M/\260\000\022\033!#\0213Y-,\001\260\030CX\260\005+\260\000\023K\260\024PX\261\000@8Y\260\006+\033!#\0213Y-,\001\260\030CXN\260\003%\020\362!\260\000\022M\033 E\260\004%\260\004%#Jad\260(RX!#\020\326\033\260\003%\020\362!\260\000\022YY-,\260\032CX!!\033\260\002%\260\002%I\260\003%\260\003%Ja d\260\020PX!!!\033\260\003%\260\003%I\260\000PX\260\000PX\270\377\3428!\033\260\0208!Y\033\260\000RX\260\0368!\033\270\377\3608!YYYY-,\001\260\030CX\260\005+\260\000\023K\260\024PX\271\000\000\377\3008Y\260\006+\033!#\0213Y-,N\001\212\020\261F\031CD\260\000\024\261\000F\342\260\000\025\271\000\000\377\3608\000\260\000<\260(+\260\002%\020\260\000<-,\001\030\260\000/\260\001\024\362\260\001\023\260\001\025M\260\000\022-,\001\260\030CX\260\005+\260\000\023\271\000\000\377\3408\260\006+\033!#\0213Y-,\001\260\030CXEdj#Edi\260\031Cd``\260F#D#\020 \260F\360/\260\000\022\033!! \212 \212RX\0213\033!!YY-,\001\261\013\012C#Ce\012-,\000\261\012\013C#C\013-,\000\260F#p\261\001F>\001\260F#p\261\002FE:\261\002\000\010\015-,\260\022+\260\002%E\260\002%Ej\260@\213`\260\002%#D!!!-,\260\023+\260\002%E\260\002%Ej\270\377\300\214`\260\002%#D!!!-,\260\000\260\022+!!!-,\260\000\260\023+!!!-,\001\260\006C\260\007Ce\012-, i\260@a\260\000\213 \261,\300\212\214\270\020\000b`+\014d#da\\X\260\003aY-,\261\000\003%EhT\260\034KPZX\260\003%E\260\003%E`h \260\004%#D\260\004%#D\033\260\003% Eh \212#D\260\003%Eh`\260\003%#DY-,\260\003% Eh \212#D\260\003%Edhe`\260\004%\260\001`#D-,\260\011CX\207!\300\033\260\022CX\207E\260\021+\260G#D\260Gz\344\033\003\212E\030i \260G#D\212\212\207 \260\240QX\260\021+\260G#D\260Gz\344\033!\260Gz\344YYY\030-, \212E#Eh`D-,EjB-,\001\030/-,\001\260\030CX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260\031C`\260F#D!\212\020\260F\366!\033!!!!Y-,\001\260\030CX\260\002%E\260\002%Ed`j\260\003%Eja \260\004%Ej \212\213e\260\004%#D\214\260\003%#D!!\033 EjD EjDY-,\001 E\260\000U\260\030CZXEh#Ei\260@\213a \260\200bj \212#a \260\003%\213e\260\004%#D\214\260\003%#D!!\033!!\260\031+Y-,\001\212\212Ed#EdadB-,\260\004%\260\004%\260\031+\260\030CX\260\004%\260\004%\260\003%\260\033+\001\260\002%C\260@T\260\002%C\260\000TZX\260\003% E\260@aDY\260\002%C\260\000T\260\002%C\260@TZX\260\004% E\260@`DYY!!!!-,\001KRXC\260\002%E#aD\033!!Y-,\001KRXC\260\002%E#`D\033!!Y-,KRXED\033!!Y-,\001 \260\003%#I\260@`\260 c \260\000RX#\260\002%8#\260\002%e8\000\212c8\033!!!!!Y\001-,KPXED\033!!Y-,\001\260\005%\020# \212\365\000\260\001`#\355\354-,\001\260\005%\020# \212\365\000\260\001a#\355\354-,\001\260\006%\020\365\000\355\354-,F#F`\212\212F# F\212`\212a\270\377\200b# \020#\212\261KK\212pE` \260\000PX\260\001a\270\377\272\213\033\260F\214Y\260\020`h\001:-, E\260\003%FRX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-, E\260\003%FPX\260\002%F ha\260\003%\260\003%?#!8\033!\021Y-,\000\260\007C\260\006C\013-,\212\020\354-,\260\014CX!\033 F\260\000RX\270\377\3608\033\260\0208YY-, \260\000UX\270\020\000c\260\003%Ed\260\003%Eda\260\000SX\260\002\033\260@a\260\003Y%EiSXED\033!!Y\033!\260\002%E\260\002%Ead\260(QXED\033!!YY-,!!\014d#d\213\270@\000b-,!\260\200QX\014d#d\213\270 \000b\033\262\000@/+Y\260\002`-,!\260\300QX\014d#d\213\270\025Ub\033\262\000\200/+Y\260\002`-,\014d#d\213\270@\000b`#!-,KSX\260\004%\260\004%Id#Edi\260@\213a \260\200bj\260\002%\260\002%a\214\260F#D!\212\020\260F\366!\033!\212\021#\022 9/Y-,\260\002%\260\002%Id\260\300TX\270\377\3708\260\0108\033!!Y-,\260\023CX\003\033\002Y-,\260\023CX\002\033\003Y-,\260\012+#\020 <\260\027+-,\260\002%\270\377\3608\260(+\212\020# \320#\260\020+\260\005CX\300\033<Y \020\021\260\000\022\001-,KS#KQZX8\033!!Y-,\001\260\002%\020\320#\311\001\260\001\023\260\000\024\020\260\001<\260\001\026-,\001\260\000\023\260\001\260\003%I\260\003\0278\260\001\023-,KS#KQZX E\212`D\033!!Y-, 9/-"""
+	
+	p = Program()
+	p.fromBytecode(bc)
+	asm = p.getAssembly()
+	p.fromAssembly(asm)
+	print(bc == p.getBytecode())
+
diff --git a/Lib/fontTools/ttx.py b/Lib/fontTools/ttx.py
new file mode 100644
index 0000000..e0b5edd
--- /dev/null
+++ b/Lib/fontTools/ttx.py
@@ -0,0 +1,329 @@
+"""\
+usage: ttx [options] inputfile1 [... inputfileN]
+
+    TTX %s -- From OpenType To XML And Back
+
+    If an input file is a TrueType or OpenType font file, it will be
+       dumped to an TTX file (an XML-based text format).
+    If an input file is a TTX file, it will be compiled to a TrueType
+       or OpenType font file.
+
+    Output files are created so they are unique: an existing file is
+       never overwritten.
+
+    General options:
+    -h Help: print this message
+    -d <outputfolder> Specify a directory where the output files are
+       to be created.
+    -o <outputfile> Specify a file to write the output to.
+    -v Verbose: more messages will be written to stdout about what
+       is being done.
+    -q Quiet: No messages will be written to stdout about what
+       is being done.
+    -a allow virtual glyphs ID's on compile or decompile.
+
+    Dump options:
+    -l List table info: instead of dumping to a TTX file, list some
+       minimal info about each table.
+    -t <table> Specify a table to dump. Multiple -t options
+       are allowed. When no -t option is specified, all tables
+       will be dumped.
+    -x <table> Specify a table to exclude from the dump. Multiple
+       -x options are allowed. -t and -x are mutually exclusive.
+    -s Split tables: save the TTX data into separate TTX files per
+       table and write one small TTX file that contains references
+       to the individual table dumps. This file can be used as
+       input to ttx, as long as the table files are in the
+       same directory.
+    -i Do NOT disassemble TT instructions: when this option is given,
+       all TrueType programs (glyph programs, the font program and the
+       pre-program) will be written to the TTX file as hex data
+       instead of assembly. This saves some time and makes the TTX
+       file smaller.
+    -z <format> Specify a bitmap data export option for EBDT:
+       {'raw', 'row', 'bitwise', 'extfile'} or for the CBDT:
+       {'raw', 'extfile'} Each option does one of the following:
+         -z raw
+            * export the bitmap data as a hex dump
+         -z row
+            * export each row as hex data
+         -z bitwise
+            * export each row as binary in an ASCII art style
+         -z extfile
+            * export the data as external files with XML refences
+       If no export format is specified 'raw' format is used.
+    -e Don't ignore decompilation errors, but show a full traceback
+       and abort.
+    -y <number> Select font number for TrueType Collection,
+       starting from 0.
+
+    Compile options:
+    -m Merge with TrueType-input-file: specify a TrueType or OpenType
+       font file to be merged with the TTX file. This option is only
+       valid when at most one TTX file is specified.
+    -b Don't recalc glyph bounding boxes: use the values in the TTX
+       file as-is.
+"""
+
+
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+from fontTools.ttLib import TTFont, TTLibError
+from fontTools.misc.macCreatorType import getMacCreatorAndType
+import os
+import sys
+import getopt
+import re
+
+def usage():
+	from fontTools import version
+	print(__doc__ % version)
+	sys.exit(2)
+
+	
+numberAddedRE = re.compile("#\d+$")
+opentypeheaderRE = re.compile('''sfntVersion=['"]OTTO["']''')
+
+def makeOutputFileName(input, outputDir, extension):
+	dirName, fileName = os.path.split(input)
+	fileName, ext = os.path.splitext(fileName)
+	if outputDir:
+		dirName = outputDir
+	fileName = numberAddedRE.split(fileName)[0]
+	output = os.path.join(dirName, fileName + extension)
+	n = 1
+	while os.path.exists(output):
+		output = os.path.join(dirName, fileName + "#" + repr(n) + extension)
+		n = n + 1
+	return output
+
+
+class Options(object):
+
+	listTables = False
+	outputDir = None
+	outputFile = None
+	verbose = False
+	quiet = False
+	splitTables = False
+	disassembleInstructions = True
+	mergeFile = None
+	recalcBBoxes = True
+	allowVID = False
+	ignoreDecompileErrors = True
+	bitmapGlyphDataFormat = 'raw'
+
+	def __init__(self, rawOptions, numFiles):
+		self.onlyTables = []
+		self.skipTables = []
+		self.fontNumber = -1
+		for option, value in rawOptions:
+			# general options
+			if option == "-h":
+				from fontTools import version
+				print(__doc__ % version)
+				sys.exit(0)
+			elif option == "-d":
+				if not os.path.isdir(value):
+					print("The -d option value must be an existing directory")
+					sys.exit(2)
+				self.outputDir = value
+			elif option == "-o":
+				self.outputFile = value
+			elif option == "-v":
+				self.verbose = True
+			elif option == "-q":
+				self.quiet = True
+			# dump options
+			elif option == "-l":
+				self.listTables = True
+			elif option == "-t":
+				self.onlyTables.append(value)
+			elif option == "-x":
+				self.skipTables.append(value)
+			elif option == "-s":
+				self.splitTables = True
+			elif option == "-i":
+				self.disassembleInstructions = False
+			elif option == "-z":
+				validOptions = ('raw', 'row', 'bitwise', 'extfile')
+				if value not in validOptions:
+					print("-z does not allow %s as a format. Use %s" % (option, validOptions))
+					sys.exit(2)
+				self.bitmapGlyphDataFormat = value
+			elif option == "-y":
+				self.fontNumber = int(value)
+			# compile options
+			elif option == "-m":
+				self.mergeFile = value
+			elif option == "-b":
+				self.recalcBBoxes = False
+			elif option == "-a":
+				self.allowVID = True
+			elif option == "-e":
+				self.ignoreDecompileErrors = False
+		if self.onlyTables and self.skipTables:
+			print("-t and -x options are mutually exclusive")
+			sys.exit(2)
+		if self.mergeFile and numFiles > 1:
+			print("Must specify exactly one TTX source file when using -m")
+			sys.exit(2)
+
+
+def ttList(input, output, options):
+	ttf = TTFont(input, fontNumber=options.fontNumber, lazy=True)
+	reader = ttf.reader
+	tags = sorted(reader.keys())
+	print('Listing table info for "%s":' % input)
+	format = "    %4s  %10s  %7s  %7s"
+	print(format % ("tag ", "  checksum", " length", " offset"))
+	print(format % ("----", "----------", "-------", "-------"))
+	for tag in tags:
+		entry = reader.tables[tag]
+		checkSum = int(entry.checkSum)
+		if checkSum < 0:
+			checkSum = checkSum + 0x100000000
+		checksum = "0x%08X" % checkSum
+		print(format % (tag, checksum, entry.length, entry.offset))
+	print()
+	ttf.close()
+
+
+def ttDump(input, output, options):
+	if not options.quiet:
+		print('Dumping "%s" to "%s"...' % (input, output))
+	ttf = TTFont(input, 0, verbose=options.verbose, allowVID=options.allowVID,
+			quiet=options.quiet,
+			ignoreDecompileErrors=options.ignoreDecompileErrors,
+			fontNumber=options.fontNumber)
+	ttf.saveXML(output,
+			quiet=options.quiet,
+			tables=options.onlyTables,
+			skipTables=options.skipTables,
+			splitTables=options.splitTables,
+			disassembleInstructions=options.disassembleInstructions,
+			bitmapGlyphDataFormat=options.bitmapGlyphDataFormat)
+	ttf.close()
+
+
+def ttCompile(input, output, options):
+	if not options.quiet:
+		print('Compiling "%s" to "%s"...' % (input, output))
+	ttf = TTFont(options.mergeFile,
+			recalcBBoxes=options.recalcBBoxes,
+			verbose=options.verbose, allowVID=options.allowVID)
+	ttf.importXML(input, quiet=options.quiet)
+	ttf.save(output)
+
+	if options.verbose:
+		import time
+		print("finished at", time.strftime("%H:%M:%S", time.localtime(time.time())))
+
+
+def guessFileType(fileName):
+	base, ext = os.path.splitext(fileName)
+	try:
+		f = open(fileName, "rb")
+	except IOError:
+		return None
+	cr, tp = getMacCreatorAndType(fileName)
+	if tp in ("sfnt", "FFIL"):
+		return "TTF"
+	if ext == ".dfont":
+		return "TTF"
+	header = f.read(256)
+	head = Tag(header[:4])
+	if head == "OTTO":
+		return "OTF"
+	elif head == "ttcf":
+		return "TTC"
+	elif head in ("\0\1\0\0", "true"):
+		return "TTF"
+	elif head == "wOFF":
+		return "WOFF"
+	elif head.lower() == "<?xm":
+		# Use 'latin1' because that can't fail.
+		header = tostr(header, 'latin1')
+		if opentypeheaderRE.search(header):
+			return "OTX"
+		else:
+			return "TTX"
+	return None
+
+
+def parseOptions(args):
+	try:
+		rawOptions, files = getopt.getopt(args, "ld:o:vqht:x:sim:z:baey:")
+	except getopt.GetoptError:
+		usage()
+	
+	if not files:
+		usage()
+	
+	options = Options(rawOptions, len(files))
+	jobs = []
+	
+	for input in files:
+		tp = guessFileType(input)
+		if tp in ("OTF", "TTF", "TTC", "WOFF"):
+			extension = ".ttx"
+			if options.listTables:
+				action = ttList
+			else:
+				action = ttDump
+		elif tp == "TTX":
+			extension = ".ttf"
+			action = ttCompile
+		elif tp == "OTX":
+			extension = ".otf"
+			action = ttCompile
+		else:
+			print('Unknown file type: "%s"' % input)
+			continue
+		
+		if options.outputFile:
+			output = options.outputFile
+		else:
+			output = makeOutputFileName(input, options.outputDir, extension)
+		jobs.append((action, input, output))
+	return jobs, options
+
+
+def process(jobs, options):
+	for action, input, output in jobs:
+		action(input, output, options)
+
+
+def waitForKeyPress():
+	"""Force the DOS Prompt window to stay open so the user gets
+	a chance to see what's wrong."""
+	import msvcrt
+	print('(Hit any key to exit)')
+	while not msvcrt.kbhit():
+		pass
+
+
+def main(args):
+	jobs, options = parseOptions(args)
+	try:
+		process(jobs, options)
+	except KeyboardInterrupt:
+		print("(Cancelled.)")
+	except SystemExit:
+		if sys.platform == "win32":
+			waitForKeyPress()
+		else:
+			raise
+	except TTLibError as e:
+		print("Error:",e)
+	except:
+		if sys.platform == "win32":
+			import traceback
+			traceback.print_exc()
+			waitForKeyPress()
+		else:
+			raise
+	
+
+if __name__ == "__main__":
+	main(sys.argv[1:])
diff --git a/Lib/fontTools/unicode.py b/Lib/fontTools/unicode.py
new file mode 100644
index 0000000..b599051
--- /dev/null
+++ b/Lib/fontTools/unicode.py
@@ -0,0 +1,43 @@
+from __future__ import print_function, division, absolute_import
+from fontTools.misc.py23 import *
+
+def _makeunicodes(f):
+	import re
+	lines = iter(f.readlines())
+	unicodes = {}
+	for line in lines:
+		if not line: continue
+		num, name = line.split(';')[:2]
+		if name[0] == '<': continue # "<control>", etc.
+		num = int(num, 16)
+		unicodes[num] = name
+	return unicodes
+
+
+class _UnicodeCustom(object):
+
+	def __init__(self, f):
+		if isinstance(f, basestring):
+			f = open(f)
+		self.codes = _makeunicodes(f)
+
+	def __getitem__(self, charCode):
+		try:
+			return self.codes[charCode]
+		except KeyError:
+			return "????"
+
+class _UnicodeBuiltin(object):
+
+	def __getitem__(self, charCode):
+		import unicodedata
+		try:
+			return unicodedata.name(unichr(charCode))
+		except ValueError:
+			return "????"
+
+Unicode = _UnicodeBuiltin()
+
+def setUnicodeData(f):
+	global Unicode
+	Unicode = _UnicodeCustom(f)
diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..db16aa2
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1,13 @@
+include LICENSE.txt
+include MANIFEST.in
+include Doc/ttx.1
+include Doc/ChangeLog
+include Doc/*.txt
+include Doc/*.html
+include MetaTools/*.py
+include Windows/mcmillan.bat
+include Windows/ttx.ico
+include Windows/README.TXT
+include Windows/fonttools-win-setup.iss
+include Windows/fonttools-win-setup.txt
+include Lib/fontTools/ttLib/tables/table_API_readme.txt
diff --git a/MetaTools/buildChangeLog.py b/MetaTools/buildChangeLog.py
new file mode 100755
index 0000000..d29e379
--- /dev/null
+++ b/MetaTools/buildChangeLog.py
@@ -0,0 +1,10 @@
+#! /usr/bin/env python
+
+import os, sys
+
+fontToolsDir = os.path.dirname(os.path.dirname(os.path.normpath(
+		os.path.join(os.getcwd(), sys.argv[0]))))
+
+os.chdir(fontToolsDir)
+os.system("git2cl > Doc/ChangeLog")
+print("done.")
diff --git a/MetaTools/buildTableList.py b/MetaTools/buildTableList.py
new file mode 100755
index 0000000..1e77492
--- /dev/null
+++ b/MetaTools/buildTableList.py
@@ -0,0 +1,55 @@
+#! /usr/bin/env python
+
+import sys
+import os
+import glob
+from fontTools.ttLib import identifierToTag
+
+
+fontToolsDir = os.path.dirname(os.path.dirname(os.path.join(os.getcwd(), sys.argv[0])))
+fontToolsDir= os.path.normpath(fontToolsDir)
+tablesDir = os.path.join(fontToolsDir,
+		"Lib", "fontTools", "ttLib", "tables")
+docFile = os.path.join(fontToolsDir, "Doc", "documentation.html")
+
+names = glob.glob1(tablesDir, "*.py")
+
+modules = []
+tables = []
+for name in names:
+	try:
+		tag = identifierToTag(name[:-3])
+	except:
+		pass
+	else:
+		modules.append(name[:-3])
+		tables.append(tag.strip())
+
+modules.sort()
+tables.sort()
+
+
+file = open(os.path.join(tablesDir, "__init__.py"), "w")
+
+file.write("# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.\n")
+file.write("def _moduleFinderHint():\n")
+file.write('\t"""Dummy function to let modulefinder know what tables may be\n')
+file.write('\tdynamically imported. Generated by MetaTools/buildTableList.py.\n')
+file.write('\t"""\n')
+for module in modules:
+	file.write("\tfrom . import %s\n" % module)
+
+file.close()
+
+
+begin = "<!-- begin table list -->"
+end = "<!-- end table list -->"
+doc = open(docFile).read()
+beginPos = doc.find(begin)
+assert beginPos > 0
+beginPos = beginPos + len(begin) + 1
+endPos = doc.find(end)
+
+doc = doc[:beginPos] + ", ".join(tables[:-1]) + " and " + tables[-1] + "\n" + doc[endPos:]
+
+open(docFile, "w").write(doc)
diff --git a/MetaTools/roundTrip.py b/MetaTools/roundTrip.py
new file mode 100755
index 0000000..122b39b
--- /dev/null
+++ b/MetaTools/roundTrip.py
@@ -0,0 +1,96 @@
+#! /usr/bin/env python
+
+"""usage: ttroundtrip [options] font1 ... fontN
+
+    Dump each TT/OT font as a TTX file, compile again to TTF or OTF
+    and dump again. Then do a diff on the two TTX files. Append problems
+    and diffs to a file called "report.txt" in the current directory.
+    This is only for testing FontTools/TTX, the resulting files are
+    deleted afterwards.
+
+    This tool supports some of ttx's command line options (-i, -t
+    and -x). Specifying -t or -x implies ttx -m <originalfile> on
+    the way back.
+"""
+
+
+import sys
+import os
+import tempfile
+import getopt
+import traceback
+from fontTools import ttx
+
+class Error(Exception): pass
+
+
+def usage():
+	print(__doc__)
+	sys.exit(2)
+
+
+def roundTrip(ttFile1, options, report):
+	fn = os.path.basename(ttFile1)
+	xmlFile1 = tempfile.mktemp(".%s.ttx1" % fn)
+	ttFile2 = tempfile.mktemp(".%s" % fn)
+	xmlFile2 = tempfile.mktemp(".%s.ttx2" % fn)
+	
+	try:
+		ttx.ttDump(ttFile1, xmlFile1, options)
+		if options.onlyTables or options.skipTables:
+			options.mergeFile = ttFile1
+		ttx.ttCompile(xmlFile1, ttFile2, options)
+		options.mergeFile = None
+		ttx.ttDump(ttFile2, xmlFile2, options)
+		
+		diffcmd = 'diff -U2 -I ".*modified value\|checkSumAdjustment.*" "%s" "%s"' % (xmlFile1, xmlFile2)
+		output = os.popen(diffcmd, "r", 1)
+		lines = []
+		while True:
+			line = output.readline()
+			if not line:
+				break
+			sys.stdout.write(line)
+			lines.append(line)
+		if lines:
+			report.write("=============================================================\n")
+			report.write("  \"%s\" differs after round tripping\n" % ttFile1)
+			report.write("-------------------------------------------------------------\n")
+			report.writelines(lines)
+		else:
+			print("(TTX files are the same)")
+	finally:
+		for tmpFile in (xmlFile1, ttFile2, xmlFile2):
+			if os.path.exists(tmpFile):
+				os.remove(tmpFile)
+
+
+def main(args):
+	try:
+		rawOptions, files = getopt.getopt(args, "it:x:")
+	except getopt.GetoptError:
+		usage()
+	
+	if not files:
+		usage()
+	
+	report = open("report.txt", "a+")
+	options = ttx.Options(rawOptions, len(files))
+	for ttFile in files:
+		try:
+			roundTrip(ttFile, options, report)
+		except KeyboardInterrupt:
+			print("(Cancelled)")
+			break
+		except:
+			print("*** round tripping aborted ***")
+			traceback.print_exc()
+			report.write("=============================================================\n")
+			report.write("  An exception occurred while round tripping")
+			report.write("  \"%s\"\n" % ttFile)
+			traceback.print_exc(file=report)
+			report.write("-------------------------------------------------------------\n")
+	report.close()
+
+	
+main(sys.argv[1:])
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..c2f7d0c
--- /dev/null
+++ b/README.md
@@ -0,0 +1,34 @@
+### What it is ?
+
+Quoting from [TTX/FontTools Sourceforge Project](http://sourceforge.net/projects/fonttools/) 
+> a tool to convert OpenType and TrueType fonts to and from XML. FontTools is a library for manipulating fonts, written in Python. It supports TrueType, OpenType, AFM and to an extent Type 1 and some Mac-specific formats.   
+
+### Quick start
+
+```python setup.py install```
+
+From your command line type the above command to get fontools installed on your system.
+
+### Installation
+
+See [install.txt](https://github.com/behdad/fonttools/blob/master/Doc/install.txt) in the 'Doc' subdirectory for instructions on how to build and install TTX/FontTools from the sources.
+
+
+### Documentation
+
+#### What is TTX ?
+
+See [documentation.html](https://github.com/behdad/fonttools/blob/master/Doc/documentation.html) in the "Doc" subdirectory for TTX usage instructions and information about the TTX file format.
+
+### Community
+* https://groups.google.com/d/forum/fonttools
+
+### License
+
+See "LICENSE.txt" for licensing information.
+
+
+
+Have fun!
+
+Just van Rossum <just@letterror.com>
diff --git a/Tools/fontTools b/Tools/fontTools
new file mode 120000
index 0000000..9a21c02
--- /dev/null
+++ b/Tools/fontTools
@@ -0,0 +1 @@
+../Lib/fontTools
\ No newline at end of file
diff --git a/Tools/pyftinspect b/Tools/pyftinspect
new file mode 100755
index 0000000..f50f81b
--- /dev/null
+++ b/Tools/pyftinspect
@@ -0,0 +1,6 @@
+#! /usr/bin/env python
+
+import sys
+from fontTools import inspect
+
+inspect.main(sys.argv[1:])
diff --git a/Tools/pyftmerge b/Tools/pyftmerge
new file mode 100755
index 0000000..2479258
--- /dev/null
+++ b/Tools/pyftmerge
@@ -0,0 +1,6 @@
+#! /usr/bin/env python
+
+import sys
+from fontTools import merge
+
+merge.main(sys.argv[1:])
diff --git a/Tools/pyftsubset b/Tools/pyftsubset
new file mode 100755
index 0000000..4bc7c7c
--- /dev/null
+++ b/Tools/pyftsubset
@@ -0,0 +1,6 @@
+#! /usr/bin/env python
+
+import sys
+from fontTools import subset
+
+subset.main(sys.argv[1:])
diff --git a/Tools/ttx b/Tools/ttx
new file mode 100755
index 0000000..10b9ef0
--- /dev/null
+++ b/Tools/ttx
@@ -0,0 +1,6 @@
+#! /usr/bin/env python
+
+import sys
+from fontTools import ttx
+
+ttx.main(sys.argv[1:])
diff --git a/Windows/README.TXT b/Windows/README.TXT
new file mode 100644
index 0000000..13f1971
--- /dev/null
+++ b/Windows/README.TXT
@@ -0,0 +1,53 @@
+
+TTX 2.0 for Windows
+-------------------------
+
+Creating a Windows (9x/ME/NT/2000/XP) setup executable for TTX
+This file has been created by Adam Twardoch <list.adam@twardoch.com>
+December 14, 2004
+
+Pre-compiled versions are hosted at http://www.font.org/software/ttx/
+
+APPROACH I: Using py2exe and InnoSetup
+
+1. Install Python 2.3 for Windows: http://www.python.org/
+2. Install py2exe: http://starship.python.net/crew/theller/py2exe/
+3. Install InnoSetup 4: http://www.jrsoftware.org/
+4. Download the latest released source code of TTX/FontTools at
+   http://sourceforge.net/projects/fonttools/
+   Or alternatively grab the sources from the VCS:
+   http://fonttools.sourceforge.net/
+5. Unzip the source code of TTX/FontTools into a folder.
+6. In the folder where you unzipped TTX/FontTools, type:
+   python setup.py py2exe --icon Windows\ttx.ico --packages encodings
+7. Run Inno Setup and open Windows\fonttools-win-setup.iss
+8. In Inno Setup, select File/Compile, then Run/Run.
+
+APPROACH II: Using McMillan Installer and InnoSetup
+
+1. Install Python 2.3 for Windows: http://www.python.org/
+2. Download and unpack McMillan installer: 
+   http://py.vaults.ca/apyllo2.py/22208368
+   and put the Installer folder into your Python folder, 
+   e.g. C:\Python23\Installer
+3. Install InnoSetup 4: http://www.jrsoftware.org/
+4. Install Microsoft Visual C++ Toolkit 2003: 
+   http://msdn.microsoft.com/visualc/vctoolkit2003/
+5. Put UPX somewhere within your PATH: http://upx.sourceforge.net/
+6. Download the latest released source code of TTX/FontTools at
+   http://sourceforge.net/projects/fonttools/
+   Or alternatively grab the sources from the VCS:
+   http://fonttools.sourceforge.net/
+7. Unzip the source code of TTX/FontTools into a folder.
+8. In the folder where you unzipped TTX/FontTools, type:
+   python setup.py install -f 
+9. Edit mcmillan.bat so the paths in the file correspond to the paths in your system, 
+   and run it. 
+10.Run Inno Setup and open Windows\fonttools-win-setup.iss
+11.In Inno Setup, select File/Compile, then Run/Run.
+
+The distributable TTX Windows setup executable has been saved
+in the Output subfolder of the FontTools\Windows folder.
+
+For information on running TTX on Windows, see fonttools-win-setup.txt in this folder. 
+
diff --git a/Windows/fonttools-win-setup.iss b/Windows/fonttools-win-setup.iss
new file mode 100644
index 0000000..1227b9a
--- /dev/null
+++ b/Windows/fonttools-win-setup.iss
@@ -0,0 +1,355 @@
+;This file has been created by Adam Twardoch <adam@twardoch.com>
+;See README.TXT in this folder for instructions on building the setup
+
+[Setup]
+AppName=TTX
+AppVerName=TTX 2.0 r040926 for Windows
+AppPublisher=Just van Rossum
+AppPublisherURL=http://www.letterror.com/code/ttx/
+AppSupportURL=http://www.font.org/software/ttx/
+AppUpdatesURL=http://www.font.org/software/ttx/
+DefaultDirName={pf}\TTX
+DefaultGroupName=TTX
+AllowNoIcons=false
+LicenseFile=..\LICENSE.txt
+InfoBeforeFile=fonttools-win-setup.txt
+InfoAfterFile=..\Doc\changes.txt
+OutputBaseFilename=WinTTX2.0r040926
+AppCopyright=Copyright 1999-2004 by Just van Rossum, Letterror, The Netherlands.
+UninstallDisplayIcon={app}\ttx.ico
+
+[Tasks]
+Name: desktopicon; Description: Create a &desktop icon; GroupDescription: Additional icons:
+
+[Files]
+Source: ..\dist\ttx\*.*; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: ..\LICENSE.txt; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: ..\Doc\documentation.html; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: ..\Doc\changes.txt; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: ..\Doc\bugs.txt; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: fonttools-win-setup.txt; DestDir: {app}; Flags: ignoreversion promptifolder
+Source: ttx.ico; DestDir: {app}; Flags: ignoreversion promptifolder; AfterInstall: AddFolderToPathVariable
+
+[Icons]
+Name: {userdesktop}\ttx.exe; Filename: {app}\ttx.exe; Tasks: desktopicon; IconFilename: {app}\ttx.ico; IconIndex: 0
+Name: {group}\TTX; Filename: {app}\ttx.exe; Tasks: desktopicon; IconFilename: {app}\ttx.ico; IconIndex: 0
+Name: {group}\TTX documentation; Filename: {app}\documentation.html; IconIndex: 0
+Name: {group}\Changes; Filename: {app}\changes.txt; IconIndex: 0
+Name: {group}\Bugs; Filename: {app}\bugs.txt; IconIndex: 0
+Name: {group}\License; Filename: {app}\LICENSE.txt; IconIndex: 0
+Name: {group}\Uninstall TTX; Filename: {uninstallexe}; IconIndex: 0
+Name: {reg:HKCU\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders,SendTo}\TTX; Filename: {app}\ttx.exe; WorkingDir: {reg:HKCU\Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders,SendTo}; IconFilename: {app}\ttx.ico; IconIndex: 0; MinVersion: 0,5.00.2195
+
+[_ISTool]
+EnableISX=true
+
+[Registry]
+Root: HKCR; Subkey: .ttx; ValueType: string; ValueData: {reg:HKCR\.xml,}; Flags: createvalueifdoesntexist uninsdeletekey
+
+[Code]
+
+//
+// InnoSetup Extensions Knowledge Base
+// Article 44 - Native ISX procedures for PATH modification
+// http://www13.brinkster.com/vincenzog/isxart.asp?idart=44
+// Author: Thomas Vedel
+//
+
+// Version log:
+// 03/31/2003: Initial release (thv@lr.dk)
+
+const
+  // Modification method
+  pmAddToBeginning = $1;      // Add dir to beginning of Path
+  pmAddToEnd = $2;            // Add dir to end of Path
+  pmAddAllways = $4;          // Add also if specified dir is already included in existing path
+  pmAddOnlyIfDirExists = $8;  // Add only if specified dir actually exists
+  pmRemove = $10;             // Remove dir from path
+  pmRemoveSubdirsAlso = $20;  // Remove dir and all subdirs from path
+
+  // Scope
+  psCurrentUser = 1;          // Modify path for current user
+  psAllUsers = 2;             // Modify path for all users
+
+  // Error results
+  mpOK = 0;                   // No errors
+  mpMissingRights = -1;       // User has insufficient rights
+  mpAutoexecNoWriteacc = -2;  // Autoexec can not be written (may be readonly)
+  mpBothAddAndRemove = -3;    // User has specified that dir should both be removed from and added to path
+
+
+{ Helper procedure: Split a path environment variable into individual dirnames }
+procedure SplitPath(Path: string; var Dirs: TStringList);
+var
+  pos: integer;
+  s: string;
+begin
+  Dirs.Clear;
+  s := '';
+  pos := 1;
+  while (pos<=Length(Path)) do
+  begin
+    if (Path[pos]<>';') then
+      s := s + Path[pos];
+    if ((Path[pos]=';') or (pos=Length(Path))) then
+    begin
+      s := Trim(s);
+      s := RemoveQuotes(s);
+      s := Trim(s);
+      if (s <> '') then
+        Dirs.Add(s);
+      s := '';
+    end;
+    Pos := Pos + 1;
+  end;
+end; // procedure SplitPath
+
+
+{ Helper procedure: Concatenate individual dirnames into a path environment variable }
+procedure ConcatPath(Dirs: TStringList; Quotes: boolean; var Path: string);
+var
+  Index, MaxIndex: integer;
+  s: string;
+begin
+  MaxIndex := Dirs.Count-1;
+  Path := '';
+  for Index := 0 to MaxIndex do
+  begin
+    s := Dirs.Strings[Index];
+    if ((Quotes) and (pos(' ',s) > 0)) then
+      s := AddQuotes(s);
+    Path := Path + s;
+    if (Index < MaxIndex) then
+      Path := Path + ';'
+  end;
+end; // procedure ConcatPath
+
+
+{ Helper function: Modifies path environment string }
+procedure ModifyPathString(OldPath, DirName: string; Method: integer; Quotes: boolean; var ResultPath: string);
+var
+  Dirs: TStringList;
+  DirNotInPath: Boolean;
+  i: integer;
+begin
+  // Create Dirs variable
+  Dirs := TStringList.Create;
+
+  // Remove quotes form DirName
+  DirName := Trim(DirName);
+  DirName := RemoveQuotes(DirName);
+  DirName := Trim(DirName);
+
+  // Split old path in individual directory names
+  SplitPath(OldPath, Dirs);
+
+  // Check if dir is allready in path
+  DirNotInPath := True;
+  for i:=Dirs.Count-1 downto 0 do
+  begin
+    if (uppercase(Dirs.Strings[i]) = uppercase(DirName)) then
+      DirNotInPath := False;
+  end;
+
+  // Should dir be removed from existing Path?
+  if ((Method and (pmRemove or pmRemoveSubdirsAlso)) > 0) then
+  begin
+    for i:=Dirs.Count-1 downto 0 do
+    begin
+      if (((Method and pmRemoveSubdirsAlso) > 0) and (pos(uppercase(DirName)+'\', uppercase(Dirs.Strings[i])) = 1)) or
+         (((Method and (pmRemove) or (pmRemoveSubdirsAlso)) > 0) and (uppercase(DirName) = uppercase(Dirs.Strings[i])))
+      then
+        Dirs.Delete(i);
+    end;
+  end;
+
+  // Should dir be added to existing Path?
+  if ((Method and (pmAddToBeginning or pmAddToEnd)) > 0) then
+  begin
+    // Add dir to path
+    if (((Method and pmAddAllways) > 0) or DirNotInPath) then
+    begin
+      // Dir is not in path allready or should be added anyway
+      if (((Method and pmAddOnlyIfDirExists) = 0) or (DirExists(DirName))) then
+      begin
+        // Dir actually exsists or should be added anyway
+        if ((Method and pmAddToBeginning) > 0) then
+          Dirs.Insert(0, DirName)
+        else
+          Dirs.Append(DirName);
+      end;
+    end;
+  end;
+
+  // Concatenate directory names into one single path variable
+  ConcatPath(Dirs, Quotes, ResultPath);
+  // Finally free Dirs object
+  Dirs.Free;
+end; // ModifyPathString
+
+
+{ Helper function: Modify path on Windows 9x }
+function ModifyPath9x(DirName: string; Method: integer): integer;
+var
+  AutoexecLines: TStringList;
+  ActualLine: String;
+  PathLineNos: TStringList;
+  FirstPathLineNo: Integer;
+  OldPath, ResultPath: String;
+  LineNo, CharNo, Index: integer;
+
+  TempString: String;
+begin
+  // Expect everything to be OK
+  result := mpOK;
+
+  // Create stringslists
+  AutoexecLines := TStringList.Create;
+  PathLineNos := TStringList.Create;
+
+  // Read existing path
+  OldPath := '';
+  LoadStringFromFile('c:\Autoexec.bat', TempString);
+  AutoexecLines.Text := TempString;
+  PathLineNos.Clear;
+  // Read Autoexec line by line
+  for LineNo := 0 to AutoexecLines.Count - 1 do begin
+    ActualLine := AutoexecLines.Strings[LineNo];
+    // Check if line starts with "PATH=" after first stripping spaces and other "fill-chars"
+    if Pos('=', ActualLine) > 0 then
+    begin
+      for CharNo := Pos('=', ActualLine)-1 downto 1 do
+        if (ActualLine[CharNo]=' ') or (ActualLine[CharNo]=#9) then
+          Delete(ActualLine, CharNo, 1);
+      if Pos('@', ActualLine) = 1 then
+        Delete(ActualLine, 1, 1);
+      if (Pos('PATH=', uppercase(ActualLine))=1) or (Pos('SETPATH=', uppercase(ActualLine))=1) then
+      begin
+        // Remove 'PATH=' and add path to "OldPath" variable
+        Delete(ActualLine, 1, pos('=', ActualLine));
+        // Check if an earlier PATH variable is referenced, but there has been no previous PATH defined in Autoexec
+        if (pos('%PATH%',uppercase(ActualLine))>0) and (PathLineNos.Count=0) then
+          OldPath := ExpandConstant('{win}') + ';' + ExpandConstant('{win}')+'\COMMAND';
+        if (pos('%PATH%',uppercase(ActualLine))>0) then
+        begin
+          ActualLine := Copy(ActualLine, 1, pos('%PATH%',uppercase(ActualLine))-1) +
+                        OldPath +
+                        Copy(ActualLine, pos('%PATH%',uppercase(ActualLine))+6, Length(ActualLine));
+        end;
+        OldPath := ActualLine;
+
+        // Update list of line numbers holding path variables
+        PathLineNos.Add(IntToStr(LineNo));
+      end;
+    end;
+  end;
+
+  // Save first line number in Autoexec.bat which modifies path environment variable
+  if PathLineNos.Count > 0 then
+    FirstPathLineNo := StrToInt(PathLineNos.Strings[0])
+  else
+    FirstPathLineNo := 0;
+
+  // Modify path
+  ModifyPathString(OldPath, DirName, Method, True, ResultPath);
+
+  // Write Modified path back to Autoexec.bat
+  // First delete all existing path references from Autoexec.bat
+  Index := PathLineNos.Count-1;
+  while (Index>=0) do
+  begin
+    AutoexecLines.Delete(StrToInt(PathLineNos.Strings[Index]));
+    Index := Index-1;
+  end;
+  // Then insert new path variable into Autoexec.bat
+  AutoexecLines.Insert(FirstPathLineNo, '@PATH='+ResultPath);
+  // Delete old Autoexec.bat from disk
+  if not DeleteFile('c:\Autoexec.bat') then
+    result := mpAutoexecNoWriteAcc;
+  Sleep(500);
+  // And finally write Autoexec.bat back to disk
+  if not (result=mpAutoexecNoWriteAcc) then
+    SaveStringToFile('c:\Autoexec.bat', AutoexecLines.Text, false);
+
+  // Free stringlists
+  PathLineNos.Free;
+  AutoexecLines.Free;
+end; // ModifyPath9x
+
+
+{ Helper function: Modify path on Windows NT, 2000 and XP }
+function ModifyPathNT(DirName: string; Method, Scope: integer): integer;
+var
+  RegRootKey: integer;
+  RegSubKeyName: string;
+  RegValueName: string;
+  OldPath, ResultPath: string;
+  OK: boolean;
+begin
+  // Expect everything to be OK
+  result := mpOK;
+
+  // Initialize registry key and value names to reflect if changes should be global or local to current user only
+  case Scope of
+    psCurrentUser:
+      begin
+        RegRootKey := HKEY_CURRENT_USER;
+        RegSubKeyName := 'Environment';
+        RegValueName := 'Path';
+      end;
+    psAllUsers:
+      begin
+        RegRootKey := HKEY_LOCAL_MACHINE;
+        RegSubKeyName := 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment';
+        RegValueName := 'Path';
+      end;
+  end;
+
+  // Read current path value from registry
+  OK := RegQueryStringValue(RegRootKey, RegSubKeyName, RegValueName, OldPath);
+  if not OK then
+  begin
+    result := mpMissingRights;
+    Exit;
+  end;
+
+  // Modify path
+  ModifyPathString(OldPath, DirName, Method, False, ResultPath);
+
+  // Write new path value to registry
+  if not RegWriteStringValue(RegRootKey, RegSubKeyName, RegValueName, ResultPath) then
+  begin
+    result := mpMissingRights;
+    Exit;
+
+  end;
+end; // ModifyPathNT
+
+
+{ Main function: Modify path }
+function ModifyPath(Path: string; Method, Scope: integer): integer;
+begin
+  // Check if both add and remove has been specified (= error!)
+  if (Method and (pmAddToBeginning or pmAddToEnd) and (pmRemove or pmRemoveSubdirsAlso)) > 0 then
+  begin
+    result := mpBothAddAndRemove;
+    Exit;
+  end;
+
+  // Perform directory constant expansion
+  Path := ExpandConstantEx(Path, ' ', ' ');
+
+  // Test if Win9x
+  if InstallOnThisVersion('4,0','0,0') = irInstall then
+    ModifyPath9x(Path, Method);
+
+  // Test if WinNT, 2000 or XP
+  if InstallOnThisVersion('0,4','0,0') = irInstall then
+    ModifyPathNT(Path, Method, Scope);
+end; // ModifyPath
+
+procedure AddFolderToPathVariable();
+begin
+  ModifyPath('{app}', pmAddToBeginning, psAllUsers);
+  ModifyPath('{app}', pmAddToBeginning, psCurrentUser);
+end;
diff --git a/Windows/fonttools-win-setup.txt b/Windows/fonttools-win-setup.txt
new file mode 100644
index 0000000..721c858
--- /dev/null
+++ b/Windows/fonttools-win-setup.txt
@@ -0,0 +1,12 @@
+TTX is an application to convert OpenType and TrueType files to and from an
+XML-based text format, also called TTX.
+
+The TTX setup application can create an icon for TTX on your desktop. You will
+then be able to drop .TTF or .OTF files onto the ttx.exe icon to dump the font
+to a .TTX file. Dropping a .TTX file onto it builds a TTF or OTF font.
+
+Also, the setup puts a shortcut to TTX in your Send To context menu in Windows 
+Explorer. Click on any OTF, TTF or TTX file with the right mouse button, 
+choose Send To and then TTX. 
+
+For more information, see documentation.html
diff --git a/Windows/mcmillan.bat b/Windows/mcmillan.bat
new file mode 100755
index 0000000..c4f48c9
--- /dev/null
+++ b/Windows/mcmillan.bat
@@ -0,0 +1,9 @@
+@echo off
+mkdir Build
+mkdir ..\dist
+mkdir ..\dist\ttx
+C:\Python23\Installer\Configure.py
+C:\Python23\Installer\Makespec.py --upx --onefile --paths "C:\Python23\Lib\encodings;C:\Python23\Lib\site-packages\FontTools\fontTools\encodings;C:\Python23\Lib\site-packages\FontTools\fontTools\misc;C:\Python23\Lib\site-packages\FontTools\fontTools\pens;C:\Python23\Lib\site-packages\FontTools\fontTools\ttLib;" --icon ttx.ico --out Build C:\Python23\Lib\site-packages\FontTools\fontTools\ttx.py
+C:\Python23\Installer\Build.py Build\ttx.spec
+move Build\ttx.exe ..\dist\ttx
+
diff --git a/Windows/ttx.ico b/Windows/ttx.ico
new file mode 100644
index 0000000..f0482b0
--- /dev/null
+++ b/Windows/ttx.ico
Binary files differ
diff --git a/setup.py b/setup.py
new file mode 100755
index 0000000..d5b6436
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,87 @@
+#! /usr/bin/env python
+
+from __future__ import print_function
+import os, sys
+from distutils.core import setup, Extension
+from distutils.command.build_ext import build_ext
+
+try:
+	# load py2exe distutils extension, if available
+	import py2exe
+except ImportError:
+	pass
+
+try:
+	import xml.parsers.expat
+except ImportError:
+	print("*** Warning: FontTools needs PyXML, see:")
+	print("        http://sourceforge.net/projects/pyxml/")
+
+
+class build_ext_optional(build_ext):
+	"""build_ext command which doesn't abort when it fails."""
+	def build_extension(self, ext):
+		# Skip extensions which cannot be built
+		try:
+			build_ext.build_extension(self, ext)
+		except:
+			self.announce(
+				'*** WARNING: Building of extension "%s" '
+				'failed: %s' %
+				(ext.name, sys.exc_info()[1]))
+
+
+if sys.version_info > (2, 3, 0, 'alpha', 1):
+	# Trove classifiers for PyPI
+	classifiers = {"classifiers": [
+		"Development Status :: 4 - Beta",
+		"Environment :: Console",
+		"Environment :: Other Environment",
+		"Intended Audience :: Developers",
+		"Intended Audience :: End Users/Desktop",
+		"License :: OSI Approved :: BSD License",
+		"Natural Language :: English",
+		"Operating System :: OS Independent",
+		"Programming Language :: Python",
+		"Topic :: Multimedia :: Graphics",
+		"Topic :: Multimedia :: Graphics :: Graphics Conversion",
+	]}
+else:
+	classifiers = {}
+
+long_description = """\
+FontTools/TTX is a library to manipulate font files from Python.
+It supports reading and writing of TrueType/OpenType fonts, reading
+and writing of AFM files, reading (and partially writing) of PS Type 1
+fonts. The package also contains a tool called "TTX" which converts
+TrueType/OpenType fonts to and from an XML-based format.
+"""
+
+setup(
+		name = "fonttools",
+		version = "2.4",
+		description = "Tools to manipulate font files",
+		author = "Just van Rossum",
+		author_email = "just@letterror.com",
+		maintainer = "Just van Rossum",
+		maintainer_email = "just@letterror.com",
+		url = "http://fonttools.sourceforge.net/",
+		license = "OpenSource, BSD-style",
+		platforms = ["Any"],
+		long_description = long_description,
+		
+		packages = [
+			"fontTools",
+			"fontTools.encodings",
+			"fontTools.misc",
+			"fontTools.pens",
+			"fontTools.ttLib",
+			"fontTools.ttLib.tables",
+		],
+		package_dir = {'': 'Lib'},
+		extra_path = 'FontTools',
+		scripts = ["Tools/ttx", "Tools/pyftsubset", "Tools/pyftinspect", "Tools/pyftmerge"],
+		cmdclass = {"build_ext": build_ext_optional},
+		data_files = [('share/man/man1', ["Doc/ttx.1"])],
+		**classifiers
+	)