jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 1 | from DefaultTable import DefaultTable |
| 2 | import otData |
| 3 | import struct |
| 4 | from types import TupleType |
| 5 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 6 | class OverflowErrorRecord: |
| 7 | def __init__(self, overflowTuple): |
| 8 | self.tableType = overflowTuple[0] |
| 9 | self.LookupListIndex = overflowTuple[1] |
| 10 | self.SubTableIndex = overflowTuple[2] |
| 11 | self.itemName = overflowTuple[3] |
| 12 | self.itemIndex = overflowTuple[4] |
| 13 | |
| 14 | def __repr__(self): |
| 15 | return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) |
| 16 | |
| 17 | class OTLOffsetOverflowError(Exception): |
| 18 | def __init__(self, overflowErrorRecord): |
| 19 | self.value = overflowErrorRecord |
| 20 | |
| 21 | def __str__(self): |
| 22 | return repr(self.value) |
| 23 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 24 | |
| 25 | class BaseTTXConverter(DefaultTable): |
| 26 | |
jvr | 3a6aa23 | 2003-09-02 19:23:13 +0000 | [diff] [blame] | 27 | """Generic base class for TTX table converters. It functions as an |
| 28 | adapter between the TTX (ttLib actually) table model and the model |
| 29 | we use for OpenType tables, which is necessarily subtly different. |
| 30 | """ |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 31 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 32 | def decompile(self, data, font): |
| 33 | import otTables |
Behdad Esfahbod | 0585b64 | 2013-11-22 16:20:59 -0500 | [diff] [blame^] | 34 | cachingStats = None if True else {} |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 35 | reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 36 | tableClass = getattr(otTables, self.tableTag) |
| 37 | self.table = tableClass() |
| 38 | self.table.decompile(reader, font) |
Behdad Esfahbod | 0585b64 | 2013-11-22 16:20:59 -0500 | [diff] [blame^] | 39 | if cachingStats: |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 40 | stats = [(v, k) for k, v in cachingStats.items()] |
| 41 | stats.sort() |
| 42 | stats.reverse() |
| 43 | print "cachingsstats for ", self.tableTag |
| 44 | for v, k in stats: |
| 45 | if v < 2: |
| 46 | break |
| 47 | print v, k |
| 48 | print "---", len(stats) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 49 | |
| 50 | def compile(self, font): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 51 | """ Create a top-level OTFWriter for the GPOS/GSUB table. |
| 52 | Call the compile method for the the table |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 53 | for each 'converter' record in the table converter list |
| 54 | call converter's write method for each item in the value. |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 55 | - For simple items, the write method adds a string to the |
| 56 | writer's self.items list. |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 57 | - For Struct/Table/Subtable items, it add first adds new writer to the |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 58 | to the writer's self.items, then calls the item's compile method. |
| 59 | This creates a tree of writers, rooted at the GUSB/GPOS writer, with |
| 60 | each writer representing a table, and the writer.items list containing |
| 61 | the child data strings and writers. |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 62 | call the getAllData method |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 63 | call _doneWriting, which removes duplicates |
jvr | 1c73452 | 2008-03-09 20:13:16 +0000 | [diff] [blame] | 64 | call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables |
| 65 | Traverse the flat list of tables, calling getDataLength on each to update their position |
| 66 | Traverse the flat list of tables again, calling getData each get the data in the table, now that |
| 67 | pos's and offset are known. |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 68 | |
| 69 | If a lookup subtable overflows an offset, we have to start all over. |
| 70 | """ |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 71 | writer = OTTableWriter(self.tableTag) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 72 | writer.parent = None |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 73 | self.table.compile(writer, font) |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 74 | return writer.getAllData() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 75 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 76 | def toXML(self, writer, font): |
| 77 | self.table.toXML2(writer, font) |
| 78 | |
| 79 | def fromXML(self, (name, attrs, content), font): |
| 80 | import otTables |
| 81 | if not hasattr(self, "table"): |
| 82 | tableClass = getattr(otTables, self.tableTag) |
| 83 | self.table = tableClass() |
| 84 | self.table.fromXML((name, attrs, content), font) |
| 85 | |
| 86 | |
| 87 | class OTTableReader: |
| 88 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 89 | """Helper class to retrieve data from an OpenType table.""" |
| 90 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 91 | def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None): |
| 92 | self.data = data |
| 93 | self.offset = offset |
| 94 | self.pos = offset |
| 95 | self.tableType = tableType |
| 96 | if valueFormat is None: |
| 97 | valueFormat = (ValueRecordFactory(), ValueRecordFactory()) |
| 98 | self.valueFormat = valueFormat |
| 99 | self.cachingStats = cachingStats |
| 100 | |
Behdad Esfahbod | d01c44a | 2013-11-22 15:21:41 -0500 | [diff] [blame] | 101 | def getSubReader(self, offset, persistent=False): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 102 | offset = self.offset + offset |
| 103 | if self.cachingStats is not None: |
| 104 | try: |
| 105 | self.cachingStats[offset] = self.cachingStats[offset] + 1 |
| 106 | except KeyError: |
| 107 | self.cachingStats[offset] = 1 |
Behdad Esfahbod | d01c44a | 2013-11-22 15:21:41 -0500 | [diff] [blame] | 108 | valueFormat = self.valueFormat |
| 109 | if persistent: |
| 110 | valueFormat = tuple(ValueRecordFactory(v) for v in valueFormat) |
| 111 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 112 | subReader = self.__class__(self.data, self.tableType, offset, |
Behdad Esfahbod | d01c44a | 2013-11-22 15:21:41 -0500 | [diff] [blame] | 113 | valueFormat, self.cachingStats) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 114 | return subReader |
| 115 | |
| 116 | def readUShort(self): |
| 117 | pos = self.pos |
| 118 | newpos = pos + 2 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 119 | value, = struct.unpack(">H", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 120 | self.pos = newpos |
| 121 | return value |
| 122 | |
| 123 | def readShort(self): |
| 124 | pos = self.pos |
| 125 | newpos = pos + 2 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 126 | value, = struct.unpack(">h", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 127 | self.pos = newpos |
| 128 | return value |
| 129 | |
| 130 | def readLong(self): |
| 131 | pos = self.pos |
| 132 | newpos = pos + 4 |
jvr | e69caf8 | 2002-05-13 18:08:19 +0000 | [diff] [blame] | 133 | value, = struct.unpack(">l", self.data[pos:newpos]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 134 | self.pos = newpos |
| 135 | return value |
| 136 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 137 | def readULong(self): |
| 138 | pos = self.pos |
| 139 | newpos = pos + 4 |
| 140 | value, = struct.unpack(">L", self.data[pos:newpos]) |
| 141 | self.pos = newpos |
| 142 | return value |
| 143 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 144 | def readTag(self): |
| 145 | pos = self.pos |
| 146 | newpos = pos + 4 |
| 147 | value = self.data[pos:newpos] |
| 148 | assert len(value) == 4 |
| 149 | self.pos = newpos |
| 150 | return value |
| 151 | |
| 152 | def readStruct(self, format, size=None): |
| 153 | if size is None: |
| 154 | size = struct.calcsize(format) |
| 155 | else: |
| 156 | assert size == struct.calcsize(format) |
| 157 | pos = self.pos |
| 158 | newpos = pos + size |
| 159 | values = struct.unpack(format, self.data[pos:newpos]) |
| 160 | self.pos = newpos |
| 161 | return values |
| 162 | |
| 163 | def setValueFormat(self, format, which): |
| 164 | self.valueFormat[which].setFormat(format) |
| 165 | |
| 166 | def readValueRecord(self, font, which): |
| 167 | return self.valueFormat[which].readValueRecord(self, font) |
| 168 | |
| 169 | |
| 170 | class OTTableWriter: |
| 171 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 172 | """Helper class to gather and assemble data for OpenType tables.""" |
| 173 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 174 | def __init__(self, tableType, valueFormat=None): |
| 175 | self.items = [] |
| 176 | self.tableType = tableType |
| 177 | if valueFormat is None: |
| 178 | valueFormat = ValueRecordFactory(), ValueRecordFactory() |
| 179 | self.valueFormat = valueFormat |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 180 | self.pos = None |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 181 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 182 | # assembler interface |
| 183 | |
| 184 | def getAllData(self): |
| 185 | """Assemble all data, including all subtables.""" |
| 186 | self._doneWriting() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 187 | tables, extTables = self._gatherTables() |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 188 | tables.reverse() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 189 | extTables.reverse() |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 190 | # Gather all data in two passes: the absolute positions of all |
| 191 | # subtable are needed before the actual data can be assembled. |
| 192 | pos = 0 |
| 193 | for table in tables: |
| 194 | table.pos = pos |
| 195 | pos = pos + table.getDataLength() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 196 | |
| 197 | for table in extTables: |
| 198 | table.pos = pos |
| 199 | pos = pos + table.getDataLength() |
| 200 | |
| 201 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 202 | data = [] |
| 203 | for table in tables: |
| 204 | tableData = table.getData() |
| 205 | data.append(tableData) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 206 | |
| 207 | for table in extTables: |
| 208 | tableData = table.getData() |
| 209 | data.append(tableData) |
| 210 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 211 | return "".join(data) |
| 212 | |
| 213 | def getDataLength(self): |
| 214 | """Return the length of this table in bytes, without subtables.""" |
| 215 | l = 0 |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 216 | if hasattr(self, "Extension"): |
| 217 | longOffset = 1 |
| 218 | else: |
| 219 | longOffset = 0 |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 220 | for item in self.items: |
| 221 | if hasattr(item, "getData") or hasattr(item, "getCountData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 222 | if longOffset: |
| 223 | l = l + 4 # sizeof(ULong) |
| 224 | else: |
| 225 | l = l + 2 # sizeof(UShort) |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 226 | else: |
| 227 | l = l + len(item) |
| 228 | return l |
| 229 | |
| 230 | def getData(self): |
| 231 | """Assemble the data for this writer/table, without subtables.""" |
| 232 | items = list(self.items) # make a shallow copy |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 233 | if hasattr(self,"Extension"): |
| 234 | longOffset = 1 |
| 235 | else: |
| 236 | longOffset = 0 |
| 237 | pos = self.pos |
| 238 | numItems = len(items) |
| 239 | for i in range(numItems): |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 240 | item = items[i] |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 241 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 242 | if hasattr(item, "getData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 243 | if longOffset: |
| 244 | items[i] = packULong(item.pos - pos) |
| 245 | else: |
| 246 | try: |
| 247 | items[i] = packUShort(item.pos - pos) |
| 248 | except AssertionError: |
| 249 | # provide data to fix overflow problem. |
| 250 | # If the overflow is to a lookup, or from a lookup to a subtable, |
| 251 | # just report the current item. |
| 252 | if self.name in [ 'LookupList', 'Lookup']: |
| 253 | overflowErrorRecord = self.getOverflowErrorRecord(item) |
| 254 | else: |
| 255 | # overflow is within a subTable. Life is more complicated. |
| 256 | # If we split the sub-table just before the current item, we may still suffer overflow. |
| 257 | # This is because duplicate table merging is done only within an Extension subTable tree; |
| 258 | # when we split the subtable in two, some items may no longer be duplicates. |
| 259 | # Get worst case by adding up all the item lengths, depth first traversal. |
| 260 | # and then report the first item that overflows a short. |
| 261 | def getDeepItemLength(table): |
| 262 | if hasattr(table, "getDataLength"): |
| 263 | length = 0 |
| 264 | for item in table.items: |
| 265 | length = length + getDeepItemLength(item) |
| 266 | else: |
| 267 | length = len(table) |
| 268 | return length |
| 269 | |
| 270 | length = self.getDataLength() |
| 271 | if hasattr(self, "sortCoverageLast") and item.name == "Coverage": |
| 272 | # Coverage is first in the item list, but last in the table list, |
| 273 | # The original overflow is really in the item list. Skip the Coverage |
| 274 | # table in the following test. |
| 275 | items = items[i+1:] |
| 276 | |
| 277 | for j in range(len(items)): |
| 278 | item = items[j] |
| 279 | length = length + getDeepItemLength(item) |
| 280 | if length > 65535: |
| 281 | break |
| 282 | overflowErrorRecord = self.getOverflowErrorRecord(item) |
| 283 | |
| 284 | |
| 285 | raise OTLOffsetOverflowError, overflowErrorRecord |
| 286 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 287 | return "".join(items) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 288 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 289 | def __hash__(self): |
| 290 | # only works after self._doneWriting() has been called |
| 291 | return hash(self.items) |
| 292 | |
| 293 | def __cmp__(self, other): |
Behdad Esfahbod | 0ba7aa7 | 2013-10-28 12:07:15 +0100 | [diff] [blame] | 294 | if type(self) != type(other): return cmp(type(self), type(other)) |
| 295 | if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) |
Behdad Esfahbod | 96b321c | 2013-08-17 11:11:22 -0400 | [diff] [blame] | 296 | |
| 297 | return cmp(self.items, other.items) |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 298 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 299 | def _doneWriting(self, internedTables=None): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 300 | # Convert CountData references to data string items |
| 301 | # collapse duplicate table references to a unique entry |
| 302 | # "tables" are OTTableWriter objects. |
| 303 | |
| 304 | # For Extension Lookup types, we can |
| 305 | # eliminate duplicates only within the tree under the Extension Lookup, |
| 306 | # as offsets may exceed 64K even between Extension LookupTable subtables. |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 307 | if internedTables is None: |
| 308 | internedTables = {} |
| 309 | items = self.items |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 310 | iRange = range(len(items)) |
| 311 | |
| 312 | if hasattr(self, "Extension"): |
| 313 | newTree = 1 |
| 314 | else: |
| 315 | newTree = 0 |
| 316 | for i in iRange: |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 317 | item = items[i] |
| 318 | if hasattr(item, "getCountData"): |
| 319 | items[i] = item.getCountData() |
| 320 | elif hasattr(item, "getData"): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 321 | if newTree: |
| 322 | item._doneWriting() |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 323 | else: |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 324 | item._doneWriting(internedTables) |
| 325 | if internedTables.has_key(item): |
| 326 | items[i] = item = internedTables[item] |
| 327 | else: |
| 328 | internedTables[item] = item |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 329 | self.items = tuple(items) |
| 330 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 331 | def _gatherTables(self, tables=None, extTables=None, done=None): |
| 332 | # Convert table references in self.items tree to a flat |
| 333 | # list of tables in depth-first traversal order. |
| 334 | # "tables" are OTTableWriter objects. |
| 335 | # We do the traversal in reverse order at each level, in order to |
| 336 | # resolve duplicate references to be the last reference in the list of tables. |
| 337 | # For extension lookups, duplicate references can be merged only within the |
| 338 | # writer tree under the extension lookup. |
| 339 | if tables is None: # init call for first time. |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 340 | tables = [] |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 341 | extTables = [] |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 342 | done = {} |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 343 | |
| 344 | done[self] = 1 |
| 345 | |
| 346 | numItems = len(self.items) |
| 347 | iRange = range(numItems) |
| 348 | iRange.reverse() |
| 349 | |
| 350 | if hasattr(self, "Extension"): |
| 351 | appendExtensions = 1 |
| 352 | else: |
| 353 | appendExtensions = 0 |
| 354 | |
| 355 | # add Coverage table if it is sorted last. |
| 356 | sortCoverageLast = 0 |
| 357 | if hasattr(self, "sortCoverageLast"): |
| 358 | # Find coverage table |
| 359 | for i in range(numItems): |
| 360 | item = self.items[i] |
| 361 | if hasattr(item, "name") and (item.name == "Coverage"): |
| 362 | sortCoverageLast = 1 |
| 363 | break |
| 364 | if not done.has_key(item): |
| 365 | item._gatherTables(tables, extTables, done) |
| 366 | else: |
| 367 | index = max(item.parent.keys()) |
| 368 | item.parent[index + 1] = self |
| 369 | |
| 370 | saveItem = None |
| 371 | for i in iRange: |
| 372 | item = self.items[i] |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 373 | if not hasattr(item, "getData"): |
| 374 | continue |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 375 | |
| 376 | if sortCoverageLast and (i==1) and item.name == 'Coverage': |
| 377 | # we've already 'gathered' it above |
| 378 | continue |
| 379 | |
| 380 | if appendExtensions: |
| 381 | assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" |
| 382 | newDone = {} |
| 383 | item._gatherTables(extTables, None, newDone) |
| 384 | |
| 385 | elif not done.has_key(item): |
| 386 | item._gatherTables(tables, extTables, done) |
| 387 | else: |
| 388 | index = max(item.parent.keys()) |
| 389 | item.parent[index + 1] = self |
| 390 | |
| 391 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 392 | tables.append(self) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 393 | return tables, extTables |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 394 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 395 | # interface for gathering data, as used by table.compile() |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 396 | |
jvr | 4105ca0 | 2002-07-23 08:43:03 +0000 | [diff] [blame] | 397 | def getSubWriter(self): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 398 | subwriter = self.__class__(self.tableType, self.valueFormat) |
| 399 | subwriter.parent = {0:self} # because some subtables have idential values, we discard |
| 400 | # the duplicates under the getAllData method. Hence some |
| 401 | # subtable writers can have more than one parent writer. |
| 402 | return subwriter |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 403 | |
| 404 | def writeUShort(self, value): |
| 405 | assert 0 <= value < 0x10000 |
| 406 | self.items.append(struct.pack(">H", value)) |
| 407 | |
| 408 | def writeShort(self, value): |
| 409 | self.items.append(struct.pack(">h", value)) |
| 410 | |
| 411 | def writeLong(self, value): |
| 412 | self.items.append(struct.pack(">l", value)) |
| 413 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 414 | def writeULong(self, value): |
| 415 | self.items.append(struct.pack(">L", value)) |
| 416 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 417 | def writeTag(self, tag): |
| 418 | assert len(tag) == 4 |
| 419 | self.items.append(tag) |
| 420 | |
| 421 | def writeSubTable(self, subWriter): |
| 422 | self.items.append(subWriter) |
| 423 | |
| 424 | def writeCountReference(self, table, name): |
| 425 | self.items.append(CountReference(table, name)) |
| 426 | |
| 427 | def writeStruct(self, format, values): |
| 428 | data = apply(struct.pack, (format,) + values) |
| 429 | self.items.append(data) |
| 430 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 431 | def writeData(self, data): |
| 432 | self.items.append(data) |
| 433 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 434 | def setValueFormat(self, format, which): |
| 435 | self.valueFormat[which].setFormat(format) |
| 436 | |
| 437 | def writeValueRecord(self, value, font, which): |
| 438 | return self.valueFormat[which].writeValueRecord(self, font, value) |
| 439 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 440 | def getOverflowErrorRecord(self, item): |
| 441 | LookupListIndex = SubTableIndex = itemName = itemIndex = None |
| 442 | if self.name == 'LookupList': |
| 443 | LookupListIndex = item.repeatIndex |
| 444 | elif self.name == 'Lookup': |
| 445 | LookupListIndex = self.repeatIndex |
| 446 | SubTableIndex = item.repeatIndex |
| 447 | else: |
| 448 | itemName = item.name |
| 449 | if hasattr(item, 'repeatIndex'): |
| 450 | itemIndex = item.repeatIndex |
| 451 | if self.name == 'SubTable': |
| 452 | LookupListIndex = self.parent[0].repeatIndex |
| 453 | SubTableIndex = self.repeatIndex |
| 454 | elif self.name == 'ExtSubTable': |
| 455 | LookupListIndex = self.parent[0].parent[0].repeatIndex |
| 456 | SubTableIndex = self.parent[0].repeatIndex |
| 457 | else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. |
| 458 | itemName = ".".join(self.name, item.name) |
| 459 | p1 = self.parent[0] |
| 460 | while p1 and p1.name not in ['ExtSubTable', 'SubTable']: |
| 461 | itemName = ".".join(p1.name, item.name) |
| 462 | p1 = p1.parent[0] |
| 463 | if p1: |
| 464 | if p1.name == 'ExtSubTable': |
| 465 | LookupListIndex = self.parent[0].parent[0].repeatIndex |
| 466 | SubTableIndex = self.parent[0].repeatIndex |
| 467 | else: |
| 468 | LookupListIndex = self.parent[0].repeatIndex |
| 469 | SubTableIndex = self.repeatIndex |
| 470 | |
| 471 | return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) |
| 472 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 473 | |
| 474 | class CountReference: |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 475 | """A reference to a Count value, not a count of references.""" |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 476 | def __init__(self, table, name): |
| 477 | self.table = table |
| 478 | self.name = name |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 479 | def getCountData(self): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 480 | return packUShort(self.table[self.name]) |
| 481 | |
| 482 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 483 | def packUShort(value): |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 484 | assert 0 <= value < 0x10000, value |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 485 | return struct.pack(">H", value) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 486 | |
| 487 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 488 | def packULong(value): |
jvr | ce47e0d | 2008-03-09 20:48:45 +0000 | [diff] [blame] | 489 | assert 0 <= value < 0x100000000, value |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 490 | return struct.pack(">L", value) |
| 491 | |
| 492 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 493 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 494 | class TableStack: |
| 495 | """A stack of table dicts, working as a stack of namespaces so we can |
| 496 | retrieve values from (and store values to) tables higher up the stack.""" |
Behdad Esfahbod | f50d0df | 2013-11-20 18:38:46 -0500 | [diff] [blame] | 497 | def __init__(self, other=None): |
| 498 | self.stack = other.stack[:] if other else [] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 499 | def push(self, table): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame] | 500 | self.stack.append(table) |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 501 | def pop(self): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame] | 502 | self.stack.pop() |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 503 | def getTop(self): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame] | 504 | return self.stack[-1] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 505 | def getValue(self, name): |
| 506 | return self.__findTable(name)[name] |
| 507 | def storeValue(self, name, value): |
| 508 | table = self.__findTable(name) |
| 509 | if table[name] is None: |
| 510 | table[name] = value |
| 511 | else: |
| 512 | assert table[name] == value, (table[name], value) |
| 513 | def __findTable(self, name): |
Matt Fontaine | 65499c1 | 2013-08-05 14:35:12 -0700 | [diff] [blame] | 514 | for table in reversed(self.stack): |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 515 | if table.has_key(name): |
| 516 | return table |
| 517 | raise KeyError, name |
| 518 | |
| 519 | |
Behdad Esfahbod | 5988cc3 | 2013-11-19 17:20:54 -0500 | [diff] [blame] | 520 | class BaseTable(object): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 521 | def __init__(self): |
| 522 | self.compileStatus = 0 # 0 means table was created |
| 523 | # 1 means the table.read() function was called by a table which is subject |
| 524 | # to delayed compilation |
| 525 | # 2 means that it was subject to delayed compilation, and |
| 526 | # has been decompiled |
| 527 | # 3 means that the start and end fields have been filled out, and that we |
| 528 | # can use the data string rather than compiling from the table data. |
| 529 | |
| 530 | self.recurse = 0 |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 531 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 532 | def __getattr__(self, attr): |
| 533 | # we get here only when the table does not have the attribute. |
| 534 | # This method ovveride exists so that we can try to de-compile |
| 535 | # a table which is subject to delayed decompilation, and then try |
| 536 | # to get the value again after decompilation. |
| 537 | self.recurse +=1 |
| 538 | if self.recurse > 2: |
| 539 | # shouldn't ever get here - we should only get to two levels of recursion. |
| 540 | # this guards against self.decompile NOT setting compileStatus to other than 1. |
| 541 | raise AttributeError, attr |
| 542 | if self.compileStatus == 1: |
Behdad Esfahbod | f50d0df | 2013-11-20 18:38:46 -0500 | [diff] [blame] | 543 | self.ensureDecompiled() |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 544 | val = getattr(self, attr) |
| 545 | self.recurse -=1 |
| 546 | return val |
| 547 | |
| 548 | raise AttributeError, attr |
| 549 | |
| 550 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 551 | """Generic base class for all OpenType (sub)tables.""" |
| 552 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 553 | def getConverters(self): |
| 554 | return self.converters |
| 555 | |
| 556 | def getConverterByName(self, name): |
| 557 | return self.convertersByName[name] |
| 558 | |
| 559 | def decompile(self, reader, font, tableStack=None): |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 560 | self.compileStatus = 2 # table has been decompiled. |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 561 | if tableStack is None: |
| 562 | tableStack = TableStack() |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 563 | self.readFormat(reader) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 564 | table = {} |
| 565 | self.__rawTable = table # for debugging |
| 566 | tableStack.push(table) |
| 567 | for conv in self.getConverters(): |
| 568 | if conv.name == "SubTable": |
| 569 | conv = conv.getConverter(reader.tableType, |
| 570 | table["LookupType"]) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 571 | if conv.name == "ExtSubTable": |
| 572 | conv = conv.getConverter(reader.tableType, |
| 573 | table["ExtensionLookupType"]) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 574 | if conv.repeat: |
| 575 | l = [] |
| 576 | for i in range(tableStack.getValue(conv.repeat) + conv.repeatOffset): |
| 577 | l.append(conv.read(reader, font, tableStack)) |
| 578 | table[conv.name] = l |
| 579 | else: |
| 580 | table[conv.name] = conv.read(reader, font, tableStack) |
| 581 | tableStack.pop() |
| 582 | self.postRead(table, font) |
| 583 | del self.__rawTable # succeeded, get rid of debugging info |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 584 | |
Behdad Esfahbod | f50d0df | 2013-11-20 18:38:46 -0500 | [diff] [blame] | 585 | def ensureDecompiled(self): |
| 586 | if self.compileStatus != 1: |
| 587 | return |
| 588 | self.decompile(self.reader, self.font, self.tableStack) |
| 589 | del self.reader, self.font, self.tableStack |
| 590 | |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 591 | def preCompile(self): |
| 592 | pass # used only by the LookupList class |
| 593 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 594 | def compile(self, writer, font, tableStack=None): |
| 595 | if tableStack is None: |
| 596 | tableStack = TableStack() |
| 597 | table = self.preWrite(font) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 598 | |
| 599 | if hasattr(self, 'sortCoverageLast'): |
| 600 | writer.sortCoverageLast = 1 |
| 601 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 602 | self.writeFormat(writer) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 603 | tableStack.push(table) |
| 604 | for conv in self.getConverters(): |
| 605 | value = table.get(conv.name) |
| 606 | if conv.repeat: |
| 607 | if value is None: |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 608 | value = [] |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 609 | tableStack.storeValue(conv.repeat, len(value) - conv.repeatOffset) |
jvr | 823f8cd | 2006-10-21 14:12:38 +0000 | [diff] [blame] | 610 | for i in range(len(value)): |
| 611 | conv.write(writer, font, tableStack, value[i], i) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 612 | elif conv.isCount: |
| 613 | # Special-case Count values. |
| 614 | # Assumption: a Count field will *always* precede |
| 615 | # the actual array. |
| 616 | # We need a default value, as it may be set later by a nested |
| 617 | # table. TableStack.storeValue() will then find it here. |
| 618 | table[conv.name] = None |
| 619 | # We add a reference: by the time the data is assembled |
| 620 | # the Count value will be filled in. |
| 621 | writer.writeCountReference(table, conv.name) |
| 622 | else: |
| 623 | conv.write(writer, font, tableStack, value) |
| 624 | tableStack.pop() |
| 625 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 626 | def readFormat(self, reader): |
| 627 | pass |
| 628 | |
| 629 | def writeFormat(self, writer): |
| 630 | pass |
| 631 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 632 | def postRead(self, table, font): |
| 633 | self.__dict__.update(table) |
| 634 | |
| 635 | def preWrite(self, font): |
Behdad Esfahbod | f50d0df | 2013-11-20 18:38:46 -0500 | [diff] [blame] | 636 | self.ensureDecompiled() |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 637 | return self.__dict__.copy() |
| 638 | |
| 639 | def toXML(self, xmlWriter, font, attrs=None): |
| 640 | tableName = self.__class__.__name__ |
| 641 | if attrs is None: |
| 642 | attrs = [] |
| 643 | if hasattr(self, "Format"): |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 644 | attrs = attrs + [("Format", self.Format)] |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 645 | xmlWriter.begintag(tableName, attrs) |
| 646 | xmlWriter.newline() |
| 647 | self.toXML2(xmlWriter, font) |
| 648 | xmlWriter.endtag(tableName) |
| 649 | xmlWriter.newline() |
| 650 | |
| 651 | def toXML2(self, xmlWriter, font): |
| 652 | # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). |
| 653 | # This is because in TTX our parent writes our main tag, and in otBase.py we |
| 654 | # do it ourselves. I think I'm getting schizophrenic... |
| 655 | for conv in self.getConverters(): |
| 656 | value = getattr(self, conv.name) |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 657 | if conv.repeat: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 658 | for i in range(len(value)): |
| 659 | item = value[i] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 660 | conv.xmlWrite(xmlWriter, font, item, conv.name, |
| 661 | [("index", i)]) |
| 662 | else: |
| 663 | conv.xmlWrite(xmlWriter, font, value, conv.name, []) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 664 | |
| 665 | def fromXML(self, (name, attrs, content), font): |
| 666 | try: |
| 667 | conv = self.getConverterByName(name) |
| 668 | except KeyError: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 669 | raise # XXX on KeyError, raise nice error |
| 670 | value = conv.xmlRead(attrs, content, font) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 671 | if conv.repeat: |
jvr | 52966bb | 2002-09-12 16:45:48 +0000 | [diff] [blame] | 672 | seq = getattr(self, conv.name, None) |
| 673 | if seq is None: |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 674 | seq = [] |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 675 | setattr(self, conv.name, seq) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 676 | seq.append(value) |
| 677 | else: |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 678 | setattr(self, conv.name, value) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 679 | |
| 680 | def __cmp__(self, other): |
Behdad Esfahbod | 0ba7aa7 | 2013-10-28 12:07:15 +0100 | [diff] [blame] | 681 | if type(self) != type(other): return cmp(type(self), type(other)) |
| 682 | if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) |
Behdad Esfahbod | 96b321c | 2013-08-17 11:11:22 -0400 | [diff] [blame] | 683 | |
Behdad Esfahbod | f50d0df | 2013-11-20 18:38:46 -0500 | [diff] [blame] | 684 | self.ensureDecompiled() |
| 685 | |
Behdad Esfahbod | 96b321c | 2013-08-17 11:11:22 -0400 | [diff] [blame] | 686 | return cmp(self.__dict__, other.__dict__) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 687 | |
| 688 | |
| 689 | class FormatSwitchingBaseTable(BaseTable): |
| 690 | |
jvr | cfadfd0 | 2002-07-22 22:13:57 +0000 | [diff] [blame] | 691 | """Minor specialization of BaseTable, for tables that have multiple |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 692 | formats, eg. CoverageFormat1 vs. CoverageFormat2.""" |
| 693 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 694 | def getConverters(self): |
| 695 | return self.converters[self.Format] |
| 696 | |
| 697 | def getConverterByName(self, name): |
| 698 | return self.convertersByName[self.Format][name] |
| 699 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 700 | def readFormat(self, reader): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 701 | self.Format = reader.readUShort() |
| 702 | assert self.Format <> 0, (self, reader.pos, len(reader.data)) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 703 | |
jvr | f7ef96c | 2002-09-10 19:26:38 +0000 | [diff] [blame] | 704 | def writeFormat(self, writer): |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 705 | writer.writeUShort(self.Format) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 706 | |
| 707 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 708 | # |
| 709 | # Support for ValueRecords |
| 710 | # |
| 711 | # This data type is so different from all other OpenType data types that |
| 712 | # it requires quite a bit of code for itself. It even has special support |
| 713 | # in OTTableReader and OTTableWriter... |
| 714 | # |
| 715 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 716 | valueRecordFormat = [ |
| 717 | # Mask Name isDevice signed |
| 718 | (0x0001, "XPlacement", 0, 1), |
| 719 | (0x0002, "YPlacement", 0, 1), |
| 720 | (0x0004, "XAdvance", 0, 1), |
| 721 | (0x0008, "YAdvance", 0, 1), |
| 722 | (0x0010, "XPlaDevice", 1, 0), |
| 723 | (0x0020, "YPlaDevice", 1, 0), |
| 724 | (0x0040, "XAdvDevice", 1, 0), |
| 725 | (0x0080, "YAdvDevice", 1, 0), |
| 726 | # reserved: |
| 727 | (0x0100, "Reserved1", 0, 0), |
| 728 | (0x0200, "Reserved2", 0, 0), |
| 729 | (0x0400, "Reserved3", 0, 0), |
| 730 | (0x0800, "Reserved4", 0, 0), |
| 731 | (0x1000, "Reserved5", 0, 0), |
| 732 | (0x2000, "Reserved6", 0, 0), |
| 733 | (0x4000, "Reserved7", 0, 0), |
| 734 | (0x8000, "Reserved8", 0, 0), |
| 735 | ] |
| 736 | |
| 737 | def _buildDict(): |
| 738 | d = {} |
| 739 | for mask, name, isDevice, signed in valueRecordFormat: |
| 740 | d[name] = mask, isDevice, signed |
| 741 | return d |
| 742 | |
| 743 | valueRecordFormatDict = _buildDict() |
| 744 | |
| 745 | |
| 746 | class ValueRecordFactory: |
| 747 | |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 748 | """Given a format code, this object convert ValueRecords.""" |
Behdad Esfahbod | d01c44a | 2013-11-22 15:21:41 -0500 | [diff] [blame] | 749 | |
| 750 | def __init__(self, other=None): |
| 751 | self.format = other.format if other else None |
jvr | 64b5c80 | 2002-05-11 10:21:36 +0000 | [diff] [blame] | 752 | |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 753 | def setFormat(self, valueFormat): |
| 754 | format = [] |
| 755 | for mask, name, isDevice, signed in valueRecordFormat: |
| 756 | if valueFormat & mask: |
| 757 | format.append((name, isDevice, signed)) |
| 758 | self.format = format |
| 759 | |
| 760 | def readValueRecord(self, reader, font): |
| 761 | format = self.format |
| 762 | if not format: |
| 763 | return None |
| 764 | valueRecord = ValueRecord() |
| 765 | for name, isDevice, signed in format: |
| 766 | if signed: |
| 767 | value = reader.readShort() |
| 768 | else: |
| 769 | value = reader.readUShort() |
| 770 | if isDevice: |
| 771 | if value: |
| 772 | import otTables |
| 773 | subReader = reader.getSubReader(value) |
| 774 | value = getattr(otTables, name)() |
| 775 | value.decompile(subReader, font) |
| 776 | else: |
| 777 | value = None |
| 778 | setattr(valueRecord, name, value) |
| 779 | return valueRecord |
| 780 | |
| 781 | def writeValueRecord(self, writer, font, valueRecord): |
| 782 | for name, isDevice, signed in self.format: |
| 783 | value = getattr(valueRecord, name, 0) |
| 784 | if isDevice: |
| 785 | if value: |
| 786 | subWriter = writer.getSubWriter() |
| 787 | writer.writeSubTable(subWriter) |
| 788 | value.compile(subWriter, font) |
| 789 | else: |
| 790 | writer.writeUShort(0) |
| 791 | elif signed: |
| 792 | writer.writeShort(value) |
| 793 | else: |
| 794 | writer.writeUShort(value) |
| 795 | |
| 796 | |
| 797 | class ValueRecord: |
| 798 | |
| 799 | # see ValueRecordFactory |
| 800 | |
| 801 | def getFormat(self): |
| 802 | format = 0 |
| 803 | for name in self.__dict__.keys(): |
| 804 | format = format | valueRecordFormatDict[name][0] |
| 805 | return format |
| 806 | |
| 807 | def toXML(self, xmlWriter, font, valueName, attrs=None): |
| 808 | if attrs is None: |
| 809 | simpleItems = [] |
| 810 | else: |
| 811 | simpleItems = list(attrs) |
| 812 | for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values |
| 813 | if hasattr(self, name): |
| 814 | simpleItems.append((name, getattr(self, name))) |
| 815 | deviceItems = [] |
| 816 | for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records |
| 817 | if hasattr(self, name): |
| 818 | device = getattr(self, name) |
| 819 | if device is not None: |
| 820 | deviceItems.append((name, device)) |
| 821 | if deviceItems: |
| 822 | xmlWriter.begintag(valueName, simpleItems) |
| 823 | xmlWriter.newline() |
| 824 | for name, deviceRecord in deviceItems: |
| 825 | if deviceRecord is not None: |
| 826 | deviceRecord.toXML(xmlWriter, font) |
| 827 | xmlWriter.endtag(valueName) |
| 828 | xmlWriter.newline() |
| 829 | else: |
| 830 | xmlWriter.simpletag(valueName, simpleItems) |
| 831 | xmlWriter.newline() |
| 832 | |
| 833 | def fromXML(self, (name, attrs, content), font): |
| 834 | import otTables |
| 835 | for k, v in attrs.items(): |
| 836 | setattr(self, k, int(v)) |
| 837 | for element in content: |
| 838 | if type(element) <> TupleType: |
| 839 | continue |
| 840 | name, attrs, content = element |
| 841 | value = getattr(otTables, name)() |
| 842 | for elem2 in content: |
| 843 | if type(elem2) <> TupleType: |
| 844 | continue |
| 845 | value.fromXML(elem2, font) |
| 846 | setattr(self, name, value) |
| 847 | |
| 848 | def __cmp__(self, other): |
Behdad Esfahbod | 0ba7aa7 | 2013-10-28 12:07:15 +0100 | [diff] [blame] | 849 | if type(self) != type(other): return cmp(type(self), type(other)) |
| 850 | if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) |
jvr | d4d1513 | 2002-05-11 00:59:27 +0000 | [diff] [blame] | 851 | |
Behdad Esfahbod | 96b321c | 2013-08-17 11:11:22 -0400 | [diff] [blame] | 852 | return cmp(self.__dict__, other.__dict__) |