blob: ea36e2cb0d6ac3a8a3e4bf2bf9bc32de93ec4ffc [file] [log] [blame]
Roozbeh Pournader0e969e22016-03-09 23:08:45 -08001#!/usr/bin/env python
2
3import collections
Roozbeh Pournader5dde0872016-03-31 13:54:56 -07004import copy
Roozbeh Pournader0e969e22016-03-09 23:08:45 -08005import glob
Roozbeh Pournader5dde0872016-03-31 13:54:56 -07006import itertools
Roozbeh Pournader0e969e22016-03-09 23:08:45 -08007from os import path
8import sys
9from xml.etree import ElementTree
10
11from fontTools import ttLib
12
Roozbeh Pournader5dde0872016-03-31 13:54:56 -070013EMOJI_VS = 0xFE0F
14
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080015LANG_TO_SCRIPT = {
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070016 'as': 'Beng',
17 'bn': 'Beng',
18 'cy': 'Latn',
19 'da': 'Latn',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080020 'de': 'Latn',
21 'en': 'Latn',
22 'es': 'Latn',
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070023 'et': 'Latn',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080024 'eu': 'Latn',
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070025 'fr': 'Latn',
26 'ga': 'Latn',
27 'gu': 'Gujr',
28 'hi': 'Deva',
29 'hr': 'Latn',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080030 'hu': 'Latn',
31 'hy': 'Armn',
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070032 'ja': 'Jpan',
33 'kn': 'Knda',
34 'ko': 'Kore',
35 'ml': 'Mlym',
36 'mn': 'Cyrl',
37 'mr': 'Deva',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080038 'nb': 'Latn',
39 'nn': 'Latn',
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070040 'or': 'Orya',
41 'pa': 'Guru',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080042 'pt': 'Latn',
Jungshik Shin6c4f9e02016-03-19 09:32:34 -070043 'sl': 'Latn',
44 'ta': 'Taml',
45 'te': 'Telu',
46 'tk': 'Latn',
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080047}
48
49def lang_to_script(lang_code):
50 lang = lang_code.lower()
51 while lang not in LANG_TO_SCRIPT:
52 hyphen_idx = lang.rfind('-')
53 assert hyphen_idx != -1, (
54 'We do not know what script the "%s" language is written in.'
55 % lang_code)
56 assumed_script = lang[hyphen_idx+1:]
57 if len(assumed_script) == 4 and assumed_script.isalpha():
58 # This is actually the script
59 return assumed_script.title()
60 lang = lang[:hyphen_idx]
61 return LANG_TO_SCRIPT[lang]
62
63
Roozbeh Pournader5dde0872016-03-31 13:54:56 -070064def printable(inp):
65 if type(inp) is set: # set of character sequences
66 return '{' + ', '.join([printable(seq) for seq in inp]) + '}'
67 if type(inp) is tuple: # character sequence
68 return '<' + (', '.join([printable(ch) for ch in inp])) + '>'
69 else: # single character
70 return 'U+%04X' % inp
71
72
73def open_font(font):
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080074 font_file, index = font
75 font_path = path.join(_fonts_dir, font_file)
76 if index is not None:
Roozbeh Pournader5dde0872016-03-31 13:54:56 -070077 return ttLib.TTFont(font_path, fontNumber=index)
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080078 else:
Roozbeh Pournader5dde0872016-03-31 13:54:56 -070079 return ttLib.TTFont(font_path)
80
81
82def get_best_cmap(font):
83 ttfont = open_font(font)
Roozbeh Pournader0e969e22016-03-09 23:08:45 -080084 all_unicode_cmap = None
85 bmp_cmap = None
86 for cmap in ttfont['cmap'].tables:
87 specifier = (cmap.format, cmap.platformID, cmap.platEncID)
88 if specifier == (4, 3, 1):
89 assert bmp_cmap is None, 'More than one BMP cmap in %s' % (font, )
90 bmp_cmap = cmap
91 elif specifier == (12, 3, 10):
92 assert all_unicode_cmap is None, (
93 'More than one UCS-4 cmap in %s' % (font, ))
94 all_unicode_cmap = cmap
95
96 return all_unicode_cmap.cmap if all_unicode_cmap else bmp_cmap.cmap
97
98
Roozbeh Pournader5dde0872016-03-31 13:54:56 -070099def get_variation_sequences_cmap(font):
100 ttfont = open_font(font)
101 vs_cmap = None
102 for cmap in ttfont['cmap'].tables:
103 specifier = (cmap.format, cmap.platformID, cmap.platEncID)
104 if specifier == (14, 0, 5):
105 assert vs_cmap is None, 'More than one VS cmap in %s' % (font, )
106 vs_cmap = cmap
107 return vs_cmap
108
109
110def get_emoji_map(font):
111 # Add normal characters
112 emoji_map = copy.copy(get_best_cmap(font))
113 reverse_cmap = {glyph: code for code, glyph in emoji_map.items()}
114
115 # Add variation sequences
116 vs_dict = get_variation_sequences_cmap(font).uvsDict
117 for vs in vs_dict:
118 for base, glyph in vs_dict[vs]:
119 if glyph is None:
120 emoji_map[(base, vs)] = emoji_map[base]
121 else:
122 emoji_map[(base, vs)] = glyph
123
124 # Add GSUB rules
125 ttfont = open_font(font)
126 for lookup in ttfont['GSUB'].table.LookupList.Lookup:
127 assert lookup.LookupType == 4, 'We only understand type 4 lookups'
128 for subtable in lookup.SubTable:
129 ligatures = subtable.ligatures
130 for first_glyph in ligatures:
131 for ligature in ligatures[first_glyph]:
132 sequence = [first_glyph] + ligature.Component
133 sequence = [reverse_cmap[glyph] for glyph in sequence]
134 sequence = tuple(sequence)
135 # Make sure no starting subsequence of 'sequence' has been
136 # seen before.
137 for sub_len in range(2, len(sequence)+1):
138 subsequence = sequence[:sub_len]
139 assert subsequence not in emoji_map
140 emoji_map[sequence] = ligature.LigGlyph
141
142 return emoji_map
143
144
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800145def assert_font_supports_any_of_chars(font, chars):
146 best_cmap = get_best_cmap(font)
147 for char in chars:
148 if char in best_cmap:
149 return
150 sys.exit('None of characters in %s were found in %s' % (chars, font))
151
152
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700153def assert_font_supports_all_of_chars(font, chars):
154 best_cmap = get_best_cmap(font)
155 for char in chars:
156 assert char in best_cmap, (
157 'U+%04X was not found in %s' % (char, font))
158
159
160def assert_font_supports_none_of_chars(font, chars):
161 best_cmap = get_best_cmap(font)
162 for char in chars:
163 assert char not in best_cmap, (
164 'U+%04X was found in %s' % (char, font))
165
166
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700167def assert_font_supports_all_sequences(font, sequences):
168 vs_dict = get_variation_sequences_cmap(font).uvsDict
169 for base, vs in sorted(sequences):
170 assert vs in vs_dict and (base, None) in vs_dict[vs], (
171 '<U+%04X, U+%04X> was not found in %s' % (base, vs, font))
172
173
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800174def check_hyphens(hyphens_dir):
175 # Find all the scripts that need automatic hyphenation
176 scripts = set()
177 for hyb_file in glob.iglob(path.join(hyphens_dir, '*.hyb')):
178 hyb_file = path.basename(hyb_file)
179 assert hyb_file.startswith('hyph-'), (
180 'Unknown hyphenation file %s' % hyb_file)
181 lang_code = hyb_file[hyb_file.index('-')+1:hyb_file.index('.')]
182 scripts.add(lang_to_script(lang_code))
183
184 HYPHENS = {0x002D, 0x2010}
185 for script in scripts:
186 fonts = _script_to_font_map[script]
187 assert fonts, 'No fonts found for the "%s" script' % script
188 for font in fonts:
189 assert_font_supports_any_of_chars(font, HYPHENS)
190
191
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700192class FontRecord(object):
193 def __init__(self, name, scripts, variant, weight, style, font):
194 self.name = name
195 self.scripts = scripts
196 self.variant = variant
197 self.weight = weight
198 self.style = style
199 self.font = font
200
201
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800202def parse_fonts_xml(fonts_xml_path):
203 global _script_to_font_map, _fallback_chain
204 _script_to_font_map = collections.defaultdict(set)
205 _fallback_chain = []
206 tree = ElementTree.parse(fonts_xml_path)
207 for family in tree.findall('family'):
208 name = family.get('name')
209 variant = family.get('variant')
210 langs = family.get('lang')
211 if name:
212 assert variant is None, (
213 'No variant expected for LGC font %s.' % name)
214 assert langs is None, (
215 'No language expected for LGC fonts %s.' % name)
216 else:
217 assert variant in {None, 'elegant', 'compact'}, (
218 'Unexpected value for variant: %s' % variant)
219
220 if langs:
221 langs = langs.split()
222 scripts = {lang_to_script(lang) for lang in langs}
223 else:
224 scripts = set()
225
226 for child in family:
227 assert child.tag == 'font', (
228 'Unknown tag <%s>' % child.tag)
229 font_file = child.text
230 weight = int(child.get('weight'))
231 assert weight % 100 == 0, (
232 'Font weight "%d" is not a multiple of 100.' % weight)
233
234 style = child.get('style')
235 assert style in {'normal', 'italic'}, (
236 'Unknown style "%s"' % style)
237
238 index = child.get('index')
239 if index:
240 index = int(index)
241
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700242 _fallback_chain.append(FontRecord(
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800243 name,
244 frozenset(scripts),
245 variant,
246 weight,
247 style,
248 (font_file, index)))
249
250 if name: # non-empty names are used for default LGC fonts
251 map_scripts = {'Latn', 'Grek', 'Cyrl'}
252 else:
253 map_scripts = scripts
254 for script in map_scripts:
255 _script_to_font_map[script].add((font_file, index))
256
257
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700258def check_emoji_coverage(all_emoji, equivalent_emoji):
259 emoji_fonts = [
260 record.font for record in _fallback_chain
261 if 'Zsye' in record.scripts]
Roozbeh Pournader27ec3ac2016-03-31 13:05:32 -0700262 assert len(emoji_fonts) == 1, 'There are %d emoji fonts.' % len(emoji_fonts)
263 emoji_font = emoji_fonts[0]
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700264 coverage = get_emoji_map(emoji_font)
265
266 for sequence in all_emoji:
267 assert sequence in coverage, (
268 '%s is not supported in the emoji font.' % printable(sequence))
269
270 for sequence in coverage:
271 if sequence in {0x0000, 0x000D, 0x0020}:
272 # The font needs to support a few extra characters, which is OK
273 continue
274 assert sequence in all_emoji, (
275 'Emoji font should not support %s.' % printable(sequence))
276
277 for first, second in sorted(equivalent_emoji.items()):
278 assert coverage[first] == coverage[second], (
279 '%s and %s should map to the same glyph.' % (
280 printable(first),
281 printable(second)))
282
283 for glyph in set(coverage.values()):
284 maps_to_glyph = [seq for seq in coverage if coverage[seq] == glyph]
285 if len(maps_to_glyph) > 1:
286 # There are more than one sequences mapping to the same glyph. We
287 # need to make sure they were expected to be equivalent.
288 equivalent_seqs = set()
289 for seq in maps_to_glyph:
290 equivalent_seq = seq
291 while equivalent_seq in equivalent_emoji:
292 equivalent_seq = equivalent_emoji[equivalent_seq]
293 equivalent_seqs.add(equivalent_seq)
294 assert len(equivalent_seqs) == 1, (
295 'The sequences %s should not result in the same glyph %s' % (
296 printable(equivalent_seqs),
297 glyph))
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700298
299
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700300def check_emoji_defaults(default_emoji):
301 missing_text_chars = _emoji_properties['Emoji'] - default_emoji
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700302 emoji_font_seen = False
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700303 for record in _fallback_chain:
304 if 'Zsye' in record.scripts:
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700305 emoji_font_seen = True
306 # No need to check the emoji font
307 continue
308 # For later fonts, we only check them if they have a script
309 # defined, since the defined script may get them to a higher
310 # score even if they appear after the emoji font.
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700311 if emoji_font_seen and not record.scripts:
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700312 continue
313
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700314 # Check default emoji-style characters
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700315 assert_font_supports_none_of_chars(record.font, sorted(default_emoji))
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700316
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700317 # Mark default text-style characters appearing in fonts above the emoji
318 # font as seen
319 if not emoji_font_seen:
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700320 missing_text_chars -= set(get_best_cmap(record.font))
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700321
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700322 # Noto does not have monochrome glyphs for Unicode 7.0 wingdings and
323 # webdings yet.
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700324 missing_text_chars -= _chars_by_age['7.0']
325 # TODO: Remove these after b/26113320 is fixed
326 missing_text_chars -= {
327 0x263A, # WHITE SMILING FACE
328 0x270C, # VICTORY HAND
329 0x2744, # SNOWFLAKE
330 0x2764, # HEAVY BLACK HEART
331 }
332 assert missing_text_chars == set(), (
Doug Feltce51db02016-04-29 13:20:05 -0700333 'Text style version of some emoji characters are missing: ' + repr(missing_text_chars))
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700334
335
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700336# Setting reverse to true returns a dictionary that maps the values to sets of
337# characters, useful for some binary properties. Otherwise, we get a
338# dictionary that maps characters to the property values, assuming there's only
339# one property in the file.
340def parse_unicode_datafile(file_path, reverse=False):
341 if reverse:
342 output_dict = collections.defaultdict(set)
343 else:
344 output_dict = {}
345 with open(file_path) as datafile:
346 for line in datafile:
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700347 if '#' in line:
348 line = line[:line.index('#')]
349 line = line.strip()
350 if not line:
351 continue
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700352
353 chars, prop = line.split(';')
354 chars = chars.strip()
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700355 prop = prop.strip()
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700356
357 if ' ' in chars: # character sequence
358 sequence = [int(ch, 16) for ch in chars.split(' ')]
359 additions = [tuple(sequence)]
360 elif '..' in chars: # character range
361 char_start, char_end = chars.split('..')
362 char_start = int(char_start, 16)
363 char_end = int(char_end, 16)
364 additions = xrange(char_start, char_end+1)
365 else: # singe character
366 additions = [int(chars, 16)]
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700367 if reverse:
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700368 output_dict[prop].update(additions)
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700369 else:
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700370 for addition in additions:
371 assert addition not in output_dict
372 output_dict[addition] = prop
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700373 return output_dict
374
375
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700376def parse_standardized_variants(file_path):
377 emoji_set = set()
378 text_set = set()
379 with open(file_path) as datafile:
380 for line in datafile:
381 if '#' in line:
382 line = line[:line.index('#')]
383 line = line.strip()
384 if not line:
385 continue
386 sequence, description, _ = line.split(';')
387 sequence = sequence.strip().split(' ')
388 base = int(sequence[0], 16)
389 vs = int(sequence[1], 16)
390 description = description.strip()
391 if description == 'text style':
392 text_set.add((base, vs))
393 elif description == 'emoji style':
394 emoji_set.add((base, vs))
395 return text_set, emoji_set
396
397
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700398def parse_ucd(ucd_path):
399 global _emoji_properties, _chars_by_age
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700400 global _text_variation_sequences, _emoji_variation_sequences
401 global _emoji_sequences, _emoji_zwj_sequences
Roozbeh Pournader7b822e52016-03-16 18:55:32 -0700402 _emoji_properties = parse_unicode_datafile(
403 path.join(ucd_path, 'emoji-data.txt'), reverse=True)
404 _chars_by_age = parse_unicode_datafile(
405 path.join(ucd_path, 'DerivedAge.txt'), reverse=True)
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700406 sequences = parse_standardized_variants(
407 path.join(ucd_path, 'StandardizedVariants.txt'))
408 _text_variation_sequences, _emoji_variation_sequences = sequences
409 _emoji_sequences = parse_unicode_datafile(
410 path.join(ucd_path, 'emoji-sequences.txt'))
411 _emoji_zwj_sequences = parse_unicode_datafile(
412 path.join(ucd_path, 'emoji-zwj-sequences.txt'))
413
Doug Feltce51db02016-04-29 13:20:05 -0700414 # filter modern pentathlon, as it seems likely to be removed from final spec
Doug Felt3a64f812016-05-10 12:50:25 -0700415 # also filter rifle
Doug Feltce51db02016-04-29 13:20:05 -0700416 def is_excluded(n):
Doug Felt3a64f812016-05-10 12:50:25 -0700417 return n in [0x1f93b, 0x1f946]
Doug Feltce51db02016-04-29 13:20:05 -0700418
419 def contains_excluded(t):
420 if type(t) == int:
421 return is_excluded(t)
422 return any(is_excluded(cp) for cp in t)
423
424 # filter modern pentathlon, as it seems likely to be removed from final spec
425 _emoji_properties['Emoji'] = set(
426 t for t in _emoji_properties['Emoji'] if not contains_excluded(t))
427 _emoji_sequences = dict(
428 (t, v) for (t, v) in _emoji_sequences.items() if not contains_excluded(t))
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700429
430def flag_sequence(territory_code):
431 return tuple(0x1F1E6 + ord(ch) - ord('A') for ch in territory_code)
432
433
434UNSUPPORTED_FLAGS = frozenset({
435 flag_sequence('BL'), flag_sequence('BQ'), flag_sequence('DG'),
436 flag_sequence('EA'), flag_sequence('EH'), flag_sequence('FK'),
437 flag_sequence('GF'), flag_sequence('GP'), flag_sequence('GS'),
438 flag_sequence('MF'), flag_sequence('MQ'), flag_sequence('NC'),
439 flag_sequence('PM'), flag_sequence('RE'), flag_sequence('TF'),
440 flag_sequence('WF'), flag_sequence('XK'), flag_sequence('YT'),
441})
442
443EQUIVALENT_FLAGS = {
444 flag_sequence('BV'): flag_sequence('NO'),
445 flag_sequence('CP'): flag_sequence('FR'),
446 flag_sequence('HM'): flag_sequence('AU'),
447 flag_sequence('SJ'): flag_sequence('NO'),
448 flag_sequence('UM'): flag_sequence('US'),
449}
450
451COMBINING_KEYCAP = 0x20E3
452
453LEGACY_ANDROID_EMOJI = {
454 0xFE4E5: flag_sequence('JP'),
455 0xFE4E6: flag_sequence('US'),
456 0xFE4E7: flag_sequence('FR'),
457 0xFE4E8: flag_sequence('DE'),
458 0xFE4E9: flag_sequence('IT'),
459 0xFE4EA: flag_sequence('GB'),
460 0xFE4EB: flag_sequence('ES'),
461 0xFE4EC: flag_sequence('RU'),
462 0xFE4ED: flag_sequence('CN'),
463 0xFE4EE: flag_sequence('KR'),
464 0xFE82C: (ord('#'), COMBINING_KEYCAP),
465 0xFE82E: (ord('1'), COMBINING_KEYCAP),
466 0xFE82F: (ord('2'), COMBINING_KEYCAP),
467 0xFE830: (ord('3'), COMBINING_KEYCAP),
468 0xFE831: (ord('4'), COMBINING_KEYCAP),
469 0xFE832: (ord('5'), COMBINING_KEYCAP),
470 0xFE833: (ord('6'), COMBINING_KEYCAP),
471 0xFE834: (ord('7'), COMBINING_KEYCAP),
472 0xFE835: (ord('8'), COMBINING_KEYCAP),
473 0xFE836: (ord('9'), COMBINING_KEYCAP),
474 0xFE837: (ord('0'), COMBINING_KEYCAP),
475}
476
477ZWJ_IDENTICALS = {
478 # KISS
479 (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F48B, 0x200D, 0x1F468): 0x1F48F,
480 # COUPLE WITH HEART
481 (0x1F469, 0x200D, 0x2764, 0x200D, 0x1F468): 0x1F491,
482 # FAMILY
483 (0x1F468, 0x200D, 0x1F469, 0x200D, 0x1F466): 0x1F46A,
484}
485
486def compute_expected_emoji():
487 equivalent_emoji = {}
488 sequence_pieces = set()
489 all_sequences = set()
490 all_sequences.update(_emoji_variation_sequences)
491
492 for sequence in _emoji_sequences.keys():
493 sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
494 all_sequences.add(sequence)
495 sequence_pieces.update(sequence)
496
497 for sequence in _emoji_zwj_sequences.keys():
498 sequence = tuple(ch for ch in sequence if ch != EMOJI_VS)
499 all_sequences.add(sequence)
500 sequence_pieces.update(sequence)
501 # Add reverse of all emoji ZWJ sequences, which are added to the fonts
502 # as a workaround to get the sequences work in RTL text.
503 reversed_seq = tuple(reversed(sequence))
504 all_sequences.add(reversed_seq)
505 equivalent_emoji[reversed_seq] = sequence
506
507 # Add all two-letter flag sequences, as even the unsupported ones should
508 # resolve to a flag tofu.
509 all_letters = [chr(code) for code in range(ord('A'), ord('Z')+1)]
510 all_two_letter_codes = itertools.product(all_letters, repeat=2)
511 all_flags = {flag_sequence(code) for code in all_two_letter_codes}
512 all_sequences.update(all_flags)
513 tofu_flags = UNSUPPORTED_FLAGS | (all_flags - set(_emoji_sequences.keys()))
514
515 all_emoji = (
516 _emoji_properties['Emoji'] |
517 all_sequences |
518 sequence_pieces |
519 set(LEGACY_ANDROID_EMOJI.keys()))
520 default_emoji = (
521 _emoji_properties['Emoji_Presentation'] |
522 all_sequences |
523 set(LEGACY_ANDROID_EMOJI.keys()))
524
525 first_tofu_flag = sorted(tofu_flags)[0]
526 for flag in tofu_flags:
527 if flag != first_tofu_flag:
528 equivalent_emoji[flag] = first_tofu_flag
529 equivalent_emoji.update(EQUIVALENT_FLAGS)
530 equivalent_emoji.update(LEGACY_ANDROID_EMOJI)
531 equivalent_emoji.update(ZWJ_IDENTICALS)
532 for seq in _emoji_variation_sequences:
533 equivalent_emoji[seq] = seq[0]
534
535 return all_emoji, default_emoji, equivalent_emoji
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700536
537
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800538def main():
539 target_out = sys.argv[1]
540 global _fonts_dir
541 _fonts_dir = path.join(target_out, 'fonts')
542
543 fonts_xml_path = path.join(target_out, 'etc', 'fonts.xml')
544 parse_fonts_xml(fonts_xml_path)
545
546 hyphens_dir = path.join(target_out, 'usr', 'hyphen-data')
547 check_hyphens(hyphens_dir)
548
Roozbeh Pournader27ec3ac2016-03-31 13:05:32 -0700549 check_emoji = sys.argv[2]
550 if check_emoji == 'true':
551 ucd_path = sys.argv[3]
552 parse_ucd(ucd_path)
Roozbeh Pournader5dde0872016-03-31 13:54:56 -0700553 all_emoji, default_emoji, equivalent_emoji = compute_expected_emoji()
554 check_emoji_coverage(all_emoji, equivalent_emoji)
555 check_emoji_defaults(default_emoji)
Roozbeh Pournaderfa1facc2016-03-16 13:53:47 -0700556
Roozbeh Pournader0e969e22016-03-09 23:08:45 -0800557
558if __name__ == '__main__':
559 main()