jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 1 | from DefaultTable import DefaultTable |
| 2 | import otData |
| 3 | import struct |
| 4 | from types import TupleType |
| 5 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 6 | class OverflowErrorRecord: |
| 7 | def __init__(self, overflowTuple): |
| 8 | self.tableType = overflowTuple[0] |
| 9 | self.LookupListIndex = overflowTuple[1] |
| 10 | self.SubTableIndex = overflowTuple[2] |
| 11 | self.itemName = overflowTuple[3] |
| 12 | self.itemIndex = overflowTuple[4] |
| 13 | |
| 14 | def __repr__(self): |
| 15 | return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) |
| 16 | |
| 17 | class OTLOffsetOverflowError(Exception): |
| 18 | def __init__(self, overflowErrorRecord): |
| 19 | self.value = overflowErrorRecord |
| 20 | |
| 21 | def __str__(self): |
| 22 | return repr(self.value) |
| 23 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 24 | |
| 25 | class BaseTTXConverter(DefaultTable): |
| 26 | |
jvr | 3a6aa23 | 2003-09-02 19:23:13 +0000 | [diff] [blame] | 27 | """Generic base class for TTX table converters. It functions as an |
| 28 | adapter between the TTX (ttLib actually) table model and the model |
| 29 | we use for OpenType tables, which is necessarily subtly different. |
| 30 | """ |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 31 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 32 | def decompile(self, data, font): |
| 33 | import otTables |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 34 | cachingStats = None |
| 35 | reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 36 | tableClass = getattr(otTables, self.tableTag) |
| 37 | self.table = tableClass() |
| 38 | self.table.decompile(reader, font) |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 39 | if 0: |
| 40 | stats = [(v, k) for k, v in cachingStats.items()] |
| 41 | stats.sort() |
| 42 | stats.reverse() |
| 43 | print "cachingsstats for ", self.tableTag |
| 44 | for v, k in stats: |
| 45 | if v < 2: |
| 46 | break |
| 47 | print v, k |
| 48 | print "---", len(stats) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 49 | |
| 50 | def compile(self, font): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 51 | """ Create a top-level OTFWriter for the GPOS/GSUB table. |
| 52 | Call the compile method for the the table |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 53 | for each 'converter' record in the table converter list |
| 54 | call converter's write method for each item in the value. |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 55 | - For simple items, the write method adds a string to the |
| 56 | writer's self.items list. |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 57 | - For Struct/Table/Subtable items, it add first adds new writer to the |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 58 | to the writer's self.items, then calls the item's compile method. |
| 59 | This creates a tree of writers, rooted at the GUSB/GPOS writer, with |
| 60 | each writer representing a table, and the writer.items list containing |
| 61 | the child data strings and writers. |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 62 | call the getAllData method |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 63 | call _doneWriting, which removes duplicates |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 64 | call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables |
| 65 | Traverse the flat list of tables, calling getDataLength on each to update their position |
| 66 | Traverse the flat list of tables again, calling getData each get the data in the table, now that |
| 67 | pos's and offset are known. |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 68 | |
| 69 | If a lookup subtable overflows an offset, we have to start all over. |
| 70 | """ |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 71 | writer = OTTableWriter(self.tableTag) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 72 | writer.parent = None |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 73 | self.table.compile(writer, font) |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 74 | return writer.getAllData() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 75 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 76 | def toXML(self, writer, font): |
| 77 | self.table.toXML2(writer, font) |
| 78 | |
| 79 | def fromXML(self, (name, attrs, content), font): |
| 80 | import otTables |
| 81 | if not hasattr(self, "table"): |
| 82 | tableClass = getattr(otTables, self.tableTag) |
| 83 | self.table = tableClass() |
| 84 | self.table.fromXML((name, attrs, content), font) |
| 85 | |
| 86 | |
| 87 | class OTTableReader: |
| 88 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 89 | """Helper class to retrieve data from an OpenType table.""" |
| 90 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 91 | def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None): |
| 92 | self.data = data |
| 93 | self.offset = offset |
| 94 | self.pos = offset |
| 95 | self.tableType = tableType |
| 96 | if valueFormat is None: |
| 97 | valueFormat = (ValueRecordFactory(), ValueRecordFactory()) |
| 98 | self.valueFormat = valueFormat |
| 99 | self.cachingStats = cachingStats |
| 100 | |
| 101 | def getSubReader(self, offset): |
| 102 | offset = self.offset + offset |
| 103 | if self.cachingStats is not None: |
| 104 | try: |
| 105 | self.cachingStats[offset] = self.cachingStats[offset] + 1 |
| 106 | except KeyError: |
| 107 | self.cachingStats[offset] = 1 |
| 108 | |
| 109 | subReader = self.__class__(self.data, self.tableType, offset, |
| 110 | self.valueFormat, self.cachingStats) |
| 111 | return subReader |
| 112 | |
| 113 | def readUShort(self): |
| 114 | pos = self.pos |
| 115 | newpos = pos + 2 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 116 | value, = struct.unpack(">H", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 117 | self.pos = newpos |
| 118 | return value |
| 119 | |
| 120 | def readShort(self): |
| 121 | pos = self.pos |
| 122 | newpos = pos + 2 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 123 | value, = struct.unpack(">h", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 124 | self.pos = newpos |
| 125 | return value |
| 126 | |
| 127 | def readLong(self): |
| 128 | pos = self.pos |
| 129 | newpos = pos + 4 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 130 | value, = struct.unpack(">l", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 131 | self.pos = newpos |
| 132 | return value |
| 133 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 134 | def readULong(self): |
| 135 | pos = self.pos |
| 136 | newpos = pos + 4 |
| 137 | value, = struct.unpack(">L", self.data[pos:newpos]) |
| 138 | self.pos = newpos |
| 139 | return value |
| 140 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 141 | def readTag(self): |
| 142 | pos = self.pos |
| 143 | newpos = pos + 4 |
| 144 | value = self.data[pos:newpos] |
| 145 | assert len(value) == 4 |
| 146 | self.pos = newpos |
| 147 | return value |
| 148 | |
| 149 | def readStruct(self, format, size=None): |
| 150 | if size is None: |
| 151 | size = struct.calcsize(format) |
| 152 | else: |
| 153 | assert size == struct.calcsize(format) |
| 154 | pos = self.pos |
| 155 | newpos = pos + size |
| 156 | values = struct.unpack(format, self.data[pos:newpos]) |
| 157 | self.pos = newpos |
| 158 | return values |
| 159 | |
| 160 | def setValueFormat(self, format, which): |
| 161 | self.valueFormat[which].setFormat(format) |
| 162 | |
| 163 | def readValueRecord(self, font, which): |
| 164 | return self.valueFormat[which].readValueRecord(self, font) |
| 165 | |
| 166 | |
| 167 | class OTTableWriter: |
| 168 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 169 | """Helper class to gather and assemble data for OpenType tables.""" |
| 170 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 171 | def __init__(self, tableType, valueFormat=None): |
| 172 | self.items = [] |
| 173 | self.tableType = tableType |
| 174 | if valueFormat is None: |
| 175 | valueFormat = ValueRecordFactory(), ValueRecordFactory() |
| 176 | self.valueFormat = valueFormat |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 177 | self.pos = None |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 178 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 179 | # assembler interface |
| 180 | |
| 181 | def getAllData(self): |
| 182 | """Assemble all data, including all subtables.""" |
| 183 | self._doneWriting() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 184 | tables, extTables = self._gatherTables() |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 185 | tables.reverse() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 186 | extTables.reverse() |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 187 | # Gather all data in two passes: the absolute positions of all |
| 188 | # subtable are needed before the actual data can be assembled. |
| 189 | pos = 0 |
| 190 | for table in tables: |
| 191 | table.pos = pos |
| 192 | pos = pos + table.getDataLength() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 193 | |
| 194 | for table in extTables: |
| 195 | table.pos = pos |
| 196 | pos = pos + table.getDataLength() |
| 197 | |
| 198 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 199 | data = [] |
| 200 | for table in tables: |
| 201 | tableData = table.getData() |
| 202 | data.append(tableData) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 203 | |
| 204 | for table in extTables: |
| 205 | tableData = table.getData() |
| 206 | data.append(tableData) |
| 207 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 208 | return "".join(data) |
| 209 | |
| 210 | def getDataLength(self): |
| 211 | """Return the length of this table in bytes, without subtables.""" |
| 212 | l = 0 |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 213 | if hasattr(self, "Extension"): |
| 214 | longOffset = 1 |
| 215 | else: |
| 216 | longOffset = 0 |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 217 | for item in self.items: |
| 218 | if hasattr(item, "getData") or hasattr(item, "getCountData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 219 | if longOffset: |
| 220 | l = l + 4 # sizeof(ULong) |
| 221 | else: |
| 222 | l = l + 2 # sizeof(UShort) |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 223 | else: |
| 224 | l = l + len(item) |
| 225 | return l |
| 226 | |
| 227 | def getData(self): |
| 228 | """Assemble the data for this writer/table, without subtables.""" |
| 229 | items = list(self.items) # make a shallow copy |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 230 | if hasattr(self,"Extension"): |
| 231 | longOffset = 1 |
| 232 | else: |
| 233 | longOffset = 0 |
| 234 | pos = self.pos |
| 235 | numItems = len(items) |
| 236 | for i in range(numItems): |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 237 | item = items[i] |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 238 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 239 | if hasattr(item, "getData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 240 | if longOffset: |
| 241 | items[i] = packULong(item.pos - pos) |
| 242 | else: |
| 243 | try: |
| 244 | items[i] = packUShort(item.pos - pos) |
| 245 | except AssertionError: |
| 246 | # provide data to fix overflow problem. |
| 247 | # If the overflow is to a lookup, or from a lookup to a subtable, |
| 248 | # just report the current item. |
| 249 | if self.name in [ 'LookupList', 'Lookup']: |
| 250 | overflowErrorRecord = self.getOverflowErrorRecord(item) |
| 251 | else: |
| 252 | # overflow is within a subTable. Life is more complicated. |
| 253 | # If we split the sub-table just before the current item, we may still suffer overflow. |
| 254 | # This is because duplicate table merging is done only within an Extension subTable tree; |
| 255 | # when we split the subtable in two, some items may no longer be duplicates. |
| 256 | # Get worst case by adding up all the item lengths, depth first traversal. |
| 257 | # and then report the first item that overflows a short. |
| 258 | def getDeepItemLength(table): |
| 259 | if hasattr(table, "getDataLength"): |
| 260 | length = 0 |
| 261 | for item in table.items: |
| 262 | length = length + getDeepItemLength(item) |
| 263 | else: |
| 264 | length = len(table) |
| 265 | return length |
| 266 | |
| 267 | length = self.getDataLength() |
| 268 | if hasattr(self, "sortCoverageLast") and item.name == "Coverage": |
| 269 | # Coverage is first in the item list, but last in the table list, |
| 270 | # The original overflow is really in the item list. Skip the Coverage |
| 271 | # table in the following test. |
| 272 | items = items[i+1:] |
| 273 | |
| 274 | for j in range(len(items)): |
| 275 | item = items[j] |
| 276 | length = length + getDeepItemLength(item) |
| 277 | if length > 65535: |
| 278 | break |
| 279 | overflowErrorRecord = self.getOverflowErrorRecord(item) |
| 280 | |
| 281 | |
| 282 | raise OTLOffsetOverflowError, overflowErrorRecord |
| 283 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 284 | return "".join(items) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 285 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 286 | def __hash__(self): |
| 287 | # only works after self._doneWriting() has been called |
| 288 | return hash(self.items) |
| 289 | |
| 290 | def __cmp__(self, other): |
| 291 | if hasattr(other, "items"): |
| 292 | return cmp(self.items, other.items) |
| 293 | else: |
| 294 | return cmp(id(self), id(other)) |
| 295 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 296 | def _doneWriting(self, internedTables=None): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 297 | # Convert CountData references to data string items |
| 298 | # collapse duplicate table references to a unique entry |
| 299 | # "tables" are OTTableWriter objects. |
| 300 | |
| 301 | # For Extension Lookup types, we can |
| 302 | # eliminate duplicates only within the tree under the Extension Lookup, |
| 303 | # as offsets may exceed 64K even between Extension LookupTable subtables. |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 304 | if internedTables is None: |
| 305 | internedTables = {} |
| 306 | items = self.items |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 307 | iRange = range(len(items)) |
| 308 | |
| 309 | if hasattr(self, "Extension"): |
| 310 | newTree = 1 |
| 311 | else: |
| 312 | newTree = 0 |
| 313 | for i in iRange: |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 314 | item = items[i] |
| 315 | if hasattr(item, "getCountData"): |
| 316 | items[i] = item.getCountData() |
| 317 | elif hasattr(item, "getData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 318 | if newTree: |
| 319 | item._doneWriting() |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 320 | else: |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 321 | item._doneWriting(internedTables) |
| 322 | if internedTables.has_key(item): |
| 323 | items[i] = item = internedTables[item] |
| 324 | else: |
| 325 | internedTables[item] = item |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 326 | self.items = tuple(items) |
| 327 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 328 | def _gatherTables(self, tables=None, extTables=None, done=None): |
| 329 | # Convert table references in self.items tree to a flat |
| 330 | # list of tables in depth-first traversal order. |
| 331 | # "tables" are OTTableWriter objects. |
| 332 | # We do the traversal in reverse order at each level, in order to |
| 333 | # resolve duplicate references to be the last reference in the list of tables. |
| 334 | # For extension lookups, duplicate references can be merged only within the |
| 335 | # writer tree under the extension lookup. |
| 336 | if tables is None: # init call for first time. |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 337 | tables = [] |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 338 | extTables = [] |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 339 | done = {} |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 340 | |
| 341 | done[self] = 1 |
| 342 | |
| 343 | numItems = len(self.items) |
| 344 | iRange = range(numItems) |
| 345 | iRange.reverse() |
| 346 | |
| 347 | if hasattr(self, "Extension"): |
| 348 | appendExtensions = 1 |
| 349 | else: |
| 350 | appendExtensions = 0 |
| 351 | |
| 352 | # add Coverage table if it is sorted last. |
| 353 | sortCoverageLast = 0 |
| 354 | if hasattr(self, "sortCoverageLast"): |
| 355 | # Find coverage table |
| 356 | for i in range(numItems): |
| 357 | item = self.items[i] |
| 358 | if hasattr(item, "name") and (item.name == "Coverage"): |
| 359 | sortCoverageLast = 1 |
| 360 | break |
| 361 | if not done.has_key(item): |
| 362 | item._gatherTables(tables, extTables, done) |
| 363 | else: |
| 364 | index = max(item.parent.keys()) |
| 365 | item.parent[index + 1] = self |
| 366 | |
| 367 | saveItem = None |
| 368 | for i in iRange: |
| 369 | item = self.items[i] |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 370 | if not hasattr(item, "getData"): |
| 371 | continue |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 372 | |
| 373 | if sortCoverageLast and (i==1) and item.name == 'Coverage': |
| 374 | # we've already 'gathered' it above |
| 375 | continue |
| 376 | |
| 377 | if appendExtensions: |
| 378 | assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" |
| 379 | newDone = {} |
| 380 | item._gatherTables(extTables, None, newDone) |
| 381 | |
| 382 | elif not done.has_key(item): |
| 383 | item._gatherTables(tables, extTables, done) |
| 384 | else: |
| 385 | index = max(item.parent.keys()) |
| 386 | item.parent[index + 1] = self |
| 387 | |
| 388 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 389 | tables.append(self) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 390 | return tables, extTables |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 391 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 392 | # interface for gathering data, as used by table.compile() |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 393 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 394 | def getSubWriter(self): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 395 | subwriter = self.__class__(self.tableType, self.valueFormat) |
| 396 | subwriter.parent = {0:self} # because some subtables have idential values, we discard |
| 397 | # the duplicates under the getAllData method. Hence some |
| 398 | # subtable writers can have more than one parent writer. |
| 399 | return subwriter |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 400 | |
| 401 | def writeUShort(self, value): |
| 402 | assert 0 <= value < 0x10000 |
| 403 | self.items.append(struct.pack(">H", value)) |
| 404 | |
| 405 | def writeShort(self, value): |
| 406 | self.items.append(struct.pack(">h", value)) |
| 407 | |
| 408 | def writeLong(self, value): |
| 409 | self.items.append(struct.pack(">l", value)) |
| 410 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 411 | def writeULong(self, value): |
| 412 | self.items.append(struct.pack(">L", value)) |
| 413 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 414 | def writeTag(self, tag): |
| 415 | assert len(tag) == 4 |
| 416 | self.items.append(tag) |
| 417 | |
| 418 | def writeSubTable(self, subWriter): |
| 419 | self.items.append(subWriter) |
| 420 | |
| 421 | def writeCountReference(self, table, name): |
| 422 | self.items.append(CountReference(table, name)) |
| 423 | |
| 424 | def writeStruct(self, format, values): |
| 425 | data = apply(struct.pack, (format,) + values) |
| 426 | self.items.append(data) |
| 427 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 428 | def writeData(self, data): |
| 429 | self.items.append(data) |
| 430 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 431 | def setValueFormat(self, format, which): |
| 432 | self.valueFormat[which].setFormat(format) |
| 433 | |
| 434 | def writeValueRecord(self, value, font, which): |
| 435 | return self.valueFormat[which].writeValueRecord(self, font, value) |
| 436 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 437 | def getOverflowErrorRecord(self, item): |
| 438 | LookupListIndex = SubTableIndex = itemName = itemIndex = None |
| 439 | if self.name == 'LookupList': |
| 440 | LookupListIndex = item.repeatIndex |
| 441 | elif self.name == 'Lookup': |
| 442 | LookupListIndex = self.repeatIndex |
| 443 | SubTableIndex = item.repeatIndex |
| 444 | else: |
| 445 | itemName = item.name |
| 446 | if hasattr(item, 'repeatIndex'): |
| 447 | itemIndex = item.repeatIndex |
| 448 | if self.name == 'SubTable': |
| 449 | LookupListIndex = self.parent[0].repeatIndex |
| 450 | SubTableIndex = self.repeatIndex |
| 451 | elif self.name == 'ExtSubTable': |
| 452 | LookupListIndex = self.parent[0].parent[0].repeatIndex |
| 453 | SubTableIndex = self.parent[0].repeatIndex |
| 454 | else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. |
| 455 | itemName = ".".join(self.name, item.name) |
| 456 | p1 = self.parent[0] |
| 457 | while p1 and p1.name not in ['ExtSubTable', 'SubTable']: |
| 458 | itemName = ".".join(p1.name, item.name) |
| 459 | p1 = p1.parent[0] |
| 460 | if p1: |
| 461 | if p1.name == 'ExtSubTable': |
| 462 | LookupListIndex = self.parent[0].parent[0].repeatIndex |
| 463 | SubTableIndex = self.parent[0].repeatIndex |
| 464 | else: |
| 465 | LookupListIndex = self.parent[0].repeatIndex |
| 466 | SubTableIndex = self.repeatIndex |
| 467 | |
| 468 | return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) |
| 469 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 470 | |
| 471 | class CountReference: |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 472 | """A reference to a Count value, not a count of references.""" |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 473 | def __init__(self, table, name): |
| 474 | self.table = table |
| 475 | self.name = name |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 476 | def getCountData(self): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 477 | return packUShort(self.table[self.name]) |
| 478 | |
| 479 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 480 | def packUShort(value): |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 481 | assert 0 <= value < 0x10000, value |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 482 | return struct.pack(">H", value) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 483 | |
| 484 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 485 | def packULong(value): |
jvr | ce47e0d | 2008-03-09 20:48:45 +0000 | [diff] [blame] | 486 | assert 0 <= value < 0x100000000, value |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 487 | return struct.pack(">L", value) |
| 488 | |
| 489 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 490 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 491 | class TableStack: |
| 492 | """A stack of table dicts, working as a stack of namespaces so we can |
| 493 | retrieve values from (and store values to) tables higher up the stack.""" |
| 494 | def __init__(self): |
| 495 | self.stack = [] |
| 496 | def push(self, table): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame^] | 497 | self.stack.append(table) |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 498 | def pop(self): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame^] | 499 | self.stack.pop() |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 500 | def getTop(self): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame^] | 501 | return self.stack[-1] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 502 | def getValue(self, name): |
| 503 | return self.__findTable(name)[name] |
| 504 | def storeValue(self, name, value): |
| 505 | table = self.__findTable(name) |
| 506 | if table[name] is None: |
| 507 | table[name] = value |
| 508 | else: |
| 509 | assert table[name] == value, (table[name], value) |
| 510 | def __findTable(self, name): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame^] | 511 | for table in reversed(self.stack): |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 512 | if table.has_key(name): |
| 513 | return table |
| 514 | raise KeyError, name |
| 515 | |
| 516 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 517 | class BaseTable: |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 518 | def __init__(self): |
| 519 | self.compileStatus = 0 # 0 means table was created |
| 520 | # 1 means the table.read() function was called by a table which is subject |
| 521 | # to delayed compilation |
| 522 | # 2 means that it was subject to delayed compilation, and |
| 523 | # has been decompiled |
| 524 | # 3 means that the start and end fields have been filled out, and that we |
| 525 | # can use the data string rather than compiling from the table data. |
| 526 | |
| 527 | self.recurse = 0 |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 528 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 529 | def __getattr__(self, attr): |
| 530 | # we get here only when the table does not have the attribute. |
| 531 | # This method ovveride exists so that we can try to de-compile |
| 532 | # a table which is subject to delayed decompilation, and then try |
| 533 | # to get the value again after decompilation. |
| 534 | self.recurse +=1 |
| 535 | if self.recurse > 2: |
| 536 | # shouldn't ever get here - we should only get to two levels of recursion. |
| 537 | # this guards against self.decompile NOT setting compileStatus to other than 1. |
| 538 | raise AttributeError, attr |
| 539 | if self.compileStatus == 1: |
| 540 | # table.read() has been called, but table has not yet been decompiled |
| 541 | # This happens only for extension tables. |
| 542 | self.decompile(self.reader, self.font) |
| 543 | val = getattr(self, attr) |
| 544 | self.recurse -=1 |
| 545 | return val |
| 546 | |
| 547 | raise AttributeError, attr |
| 548 | |
| 549 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 550 | """Generic base class for all OpenType (sub)tables.""" |
| 551 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 552 | def getConverters(self): |
| 553 | return self.converters |
| 554 | |
| 555 | def getConverterByName(self, name): |
| 556 | return self.convertersByName[name] |
| 557 | |
| 558 | def decompile(self, reader, font, tableStack=None): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 559 | self.compileStatus = 2 # table has been decompiled. |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 560 | if tableStack is None: |
| 561 | tableStack = TableStack() |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 562 | self.readFormat(reader) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 563 | table = {} |
| 564 | self.__rawTable = table # for debugging |
| 565 | tableStack.push(table) |
| 566 | for conv in self.getConverters(): |
| 567 | if conv.name == "SubTable": |
| 568 | conv = conv.getConverter(reader.tableType, |
| 569 | table["LookupType"]) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 570 | if conv.name == "ExtSubTable": |
| 571 | conv = conv.getConverter(reader.tableType, |
| 572 | table["ExtensionLookupType"]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 573 | if conv.repeat: |
| 574 | l = [] |
| 575 | for i in range(tableStack.getValue(conv.repeat) + conv.repeatOffset): |
| 576 | l.append(conv.read(reader, font, tableStack)) |
| 577 | table[conv.name] = l |
| 578 | else: |
| 579 | table[conv.name] = conv.read(reader, font, tableStack) |
| 580 | tableStack.pop() |
| 581 | self.postRead(table, font) |
| 582 | del self.__rawTable # succeeded, get rid of debugging info |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 583 | |
| 584 | def preCompile(self): |
| 585 | pass # used only by the LookupList class |
| 586 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 587 | def compile(self, writer, font, tableStack=None): |
| 588 | if tableStack is None: |
| 589 | tableStack = TableStack() |
| 590 | table = self.preWrite(font) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 591 | |
| 592 | if hasattr(self, 'sortCoverageLast'): |
| 593 | writer.sortCoverageLast = 1 |
| 594 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 595 | self.writeFormat(writer) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 596 | tableStack.push(table) |
| 597 | for conv in self.getConverters(): |
| 598 | value = table.get(conv.name) |
| 599 | if conv.repeat: |
| 600 | if value is None: |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 601 | value = [] |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 602 | tableStack.storeValue(conv.repeat, len(value) - conv.repeatOffset) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 603 | for i in range(len(value)): |
| 604 | conv.write(writer, font, tableStack, value[i], i) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 605 | elif conv.isCount: |
| 606 | # Special-case Count values. |
| 607 | # Assumption: a Count field will *always* precede |
| 608 | # the actual array. |
| 609 | # We need a default value, as it may be set later by a nested |
| 610 | # table. TableStack.storeValue() will then find it here. |
| 611 | table[conv.name] = None |
| 612 | # We add a reference: by the time the data is assembled |
| 613 | # the Count value will be filled in. |
| 614 | writer.writeCountReference(table, conv.name) |
| 615 | else: |
| 616 | conv.write(writer, font, tableStack, value) |
| 617 | tableStack.pop() |
| 618 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 619 | def readFormat(self, reader): |
| 620 | pass |
| 621 | |
| 622 | def writeFormat(self, writer): |
| 623 | pass |
| 624 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 625 | def postRead(self, table, font): |
| 626 | self.__dict__.update(table) |
| 627 | |
| 628 | def preWrite(self, font): |
| 629 | return self.__dict__.copy() |
| 630 | |
| 631 | def toXML(self, xmlWriter, font, attrs=None): |
| 632 | tableName = self.__class__.__name__ |
| 633 | if attrs is None: |
| 634 | attrs = [] |
| 635 | if hasattr(self, "Format"): |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 636 | attrs = attrs + [("Format", self.Format)] |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 637 | xmlWriter.begintag(tableName, attrs) |
| 638 | xmlWriter.newline() |
| 639 | self.toXML2(xmlWriter, font) |
| 640 | xmlWriter.endtag(tableName) |
| 641 | xmlWriter.newline() |
| 642 | |
| 643 | def toXML2(self, xmlWriter, font): |
| 644 | # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). |
| 645 | # This is because in TTX our parent writes our main tag, and in otBase.py we |
| 646 | # do it ourselves. I think I'm getting schizophrenic... |
| 647 | for conv in self.getConverters(): |
| 648 | value = getattr(self, conv.name) |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 649 | if conv.repeat: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 650 | for i in range(len(value)): |
| 651 | item = value[i] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 652 | conv.xmlWrite(xmlWriter, font, item, conv.name, |
| 653 | [("index", i)]) |
| 654 | else: |
| 655 | conv.xmlWrite(xmlWriter, font, value, conv.name, []) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 656 | |
| 657 | def fromXML(self, (name, attrs, content), font): |
| 658 | try: |
| 659 | conv = self.getConverterByName(name) |
| 660 | except KeyError: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 661 | raise # XXX on KeyError, raise nice error |
| 662 | value = conv.xmlRead(attrs, content, font) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 663 | if conv.repeat: |
jvr | 52966bb | 2002-09-12 16:45:48 +0000 | [diff] [blame] | 664 | seq = getattr(self, conv.name, None) |
| 665 | if seq is None: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 666 | seq = [] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 667 | setattr(self, conv.name, seq) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 668 | seq.append(value) |
| 669 | else: |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 670 | setattr(self, conv.name, value) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 671 | |
| 672 | def __cmp__(self, other): |
| 673 | # this is only for debugging, so it's ok to barf |
| 674 | # when 'other' has no __dict__ or __class__ |
| 675 | rv = cmp(self.__class__, other.__class__) |
| 676 | if not rv: |
| 677 | rv = cmp(self.__dict__, other.__dict__) |
| 678 | return rv |
| 679 | else: |
| 680 | return rv |
| 681 | |
| 682 | |
| 683 | class FormatSwitchingBaseTable(BaseTable): |
| 684 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 685 | """Minor specialization of BaseTable, for tables that have multiple |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 686 | formats, eg. CoverageFormat1 vs. CoverageFormat2.""" |
| 687 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 688 | def getConverters(self): |
| 689 | return self.converters[self.Format] |
| 690 | |
| 691 | def getConverterByName(self, name): |
| 692 | return self.convertersByName[self.Format][name] |
| 693 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 694 | def readFormat(self, reader): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 695 | self.Format = reader.readUShort() |
| 696 | assert self.Format <> 0, (self, reader.pos, len(reader.data)) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 697 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 698 | def writeFormat(self, writer): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 699 | writer.writeUShort(self.Format) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 700 | |
| 701 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 702 | # |
| 703 | # Support for ValueRecords |
| 704 | # |
| 705 | # This data type is so different from all other OpenType data types that |
| 706 | # it requires quite a bit of code for itself. It even has special support |
| 707 | # in OTTableReader and OTTableWriter... |
| 708 | # |
| 709 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 710 | valueRecordFormat = [ |
| 711 | # Mask Name isDevice signed |
| 712 | (0x0001, "XPlacement", 0, 1), |
| 713 | (0x0002, "YPlacement", 0, 1), |
| 714 | (0x0004, "XAdvance", 0, 1), |
| 715 | (0x0008, "YAdvance", 0, 1), |
| 716 | (0x0010, "XPlaDevice", 1, 0), |
| 717 | (0x0020, "YPlaDevice", 1, 0), |
| 718 | (0x0040, "XAdvDevice", 1, 0), |
| 719 | (0x0080, "YAdvDevice", 1, 0), |
| 720 | # reserved: |
| 721 | (0x0100, "Reserved1", 0, 0), |
| 722 | (0x0200, "Reserved2", 0, 0), |
| 723 | (0x0400, "Reserved3", 0, 0), |
| 724 | (0x0800, "Reserved4", 0, 0), |
| 725 | (0x1000, "Reserved5", 0, 0), |
| 726 | (0x2000, "Reserved6", 0, 0), |
| 727 | (0x4000, "Reserved7", 0, 0), |
| 728 | (0x8000, "Reserved8", 0, 0), |
| 729 | ] |
| 730 | |
| 731 | def _buildDict(): |
| 732 | d = {} |
| 733 | for mask, name, isDevice, signed in valueRecordFormat: |
| 734 | d[name] = mask, isDevice, signed |
| 735 | return d |
| 736 | |
| 737 | valueRecordFormatDict = _buildDict() |
| 738 | |
| 739 | |
| 740 | class ValueRecordFactory: |
| 741 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 742 | """Given a format code, this object convert ValueRecords.""" |
| 743 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 744 | def setFormat(self, valueFormat): |
| 745 | format = [] |
| 746 | for mask, name, isDevice, signed in valueRecordFormat: |
| 747 | if valueFormat & mask: |
| 748 | format.append((name, isDevice, signed)) |
| 749 | self.format = format |
| 750 | |
| 751 | def readValueRecord(self, reader, font): |
| 752 | format = self.format |
| 753 | if not format: |
| 754 | return None |
| 755 | valueRecord = ValueRecord() |
| 756 | for name, isDevice, signed in format: |
| 757 | if signed: |
| 758 | value = reader.readShort() |
| 759 | else: |
| 760 | value = reader.readUShort() |
| 761 | if isDevice: |
| 762 | if value: |
| 763 | import otTables |
| 764 | subReader = reader.getSubReader(value) |
| 765 | value = getattr(otTables, name)() |
| 766 | value.decompile(subReader, font) |
| 767 | else: |
| 768 | value = None |
| 769 | setattr(valueRecord, name, value) |
| 770 | return valueRecord |
| 771 | |
| 772 | def writeValueRecord(self, writer, font, valueRecord): |
| 773 | for name, isDevice, signed in self.format: |
| 774 | value = getattr(valueRecord, name, 0) |
| 775 | if isDevice: |
| 776 | if value: |
| 777 | subWriter = writer.getSubWriter() |
| 778 | writer.writeSubTable(subWriter) |
| 779 | value.compile(subWriter, font) |
| 780 | else: |
| 781 | writer.writeUShort(0) |
| 782 | elif signed: |
| 783 | writer.writeShort(value) |
| 784 | else: |
| 785 | writer.writeUShort(value) |
| 786 | |
| 787 | |
| 788 | class ValueRecord: |
| 789 | |
| 790 | # see ValueRecordFactory |
| 791 | |
| 792 | def getFormat(self): |
| 793 | format = 0 |
| 794 | for name in self.__dict__.keys(): |
| 795 | format = format | valueRecordFormatDict[name][0] |
| 796 | return format |
| 797 | |
| 798 | def toXML(self, xmlWriter, font, valueName, attrs=None): |
| 799 | if attrs is None: |
| 800 | simpleItems = [] |
| 801 | else: |
| 802 | simpleItems = list(attrs) |
| 803 | for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values |
| 804 | if hasattr(self, name): |
| 805 | simpleItems.append((name, getattr(self, name))) |
| 806 | deviceItems = [] |
| 807 | for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records |
| 808 | if hasattr(self, name): |
| 809 | device = getattr(self, name) |
| 810 | if device is not None: |
| 811 | deviceItems.append((name, device)) |
| 812 | if deviceItems: |
| 813 | xmlWriter.begintag(valueName, simpleItems) |
| 814 | xmlWriter.newline() |
| 815 | for name, deviceRecord in deviceItems: |
| 816 | if deviceRecord is not None: |
| 817 | deviceRecord.toXML(xmlWriter, font) |
| 818 | xmlWriter.endtag(valueName) |
| 819 | xmlWriter.newline() |
| 820 | else: |
| 821 | xmlWriter.simpletag(valueName, simpleItems) |
| 822 | xmlWriter.newline() |
| 823 | |
| 824 | def fromXML(self, (name, attrs, content), font): |
| 825 | import otTables |
| 826 | for k, v in attrs.items(): |
| 827 | setattr(self, k, int(v)) |
| 828 | for element in content: |
| 829 | if type(element) <> TupleType: |
| 830 | continue |
| 831 | name, attrs, content = element |
| 832 | value = getattr(otTables, name)() |
| 833 | for elem2 in content: |
| 834 | if type(elem2) <> TupleType: |
| 835 | continue |
| 836 | value.fromXML(elem2, font) |
| 837 | setattr(self, name, value) |
| 838 | |
| 839 | def __cmp__(self, other): |
| 840 | # this is only for debugging, so it's ok to barf |
| 841 | # when 'other' has no __dict__ or __class__ |
| 842 | rv = cmp(self.__class__, other.__class__) |
| 843 | if not rv: |
| 844 | rv = cmp(self.__dict__, other.__dict__) |
| 845 | return rv |
| 846 | else: |
| 847 | return rv |
| 848 | |