#!/usr/bin/env python3 """ Utility to make the set binary files. """ import os, shutil from argparse import ArgumentParser from fontcharacter import Reference # --- # Function for encoding. # --- def tobytes(num, length): return num.to_bytes(length, byteorder='big', signed=False) def charbytes(char): return tobytes(char, 2) if char > 0xFF else tobytes(char, 1) def make_pool(pool): # TODO: make this function optimize space, some day? bdata = b'' offsets = [] for data in pool: offsets += [len(bdata)] bdata += bytes(data) return bdata, offsets def encode_set(ref, fset): global args bheader = bytes() blead = bytes() bchars = bytes() bdata = bytes() # Prepare characters data leading = {} char_count = 0 dchars = [] pool = [] for char_id, (code, char) in enumerate(fset['characters'].items()): char_count = char_id + 1 # Check if we should inherit. if char['_pr'] > 0: char = ref.get(char['inherit'])['characters'][code] # Check the leader. lead = code >> 8 if not lead in leading: leading[lead] = char_id # Get the multi size, feed the pool if needed. mul_sz = 0 if char.get('multi') and char['multi']: mul = b''.join(map(charbytes, char['multi'])) pool += [mul] mul_sz = len(mul) # Get the unicode size, feed the pool if needed. uni_sz = 0 if not args.no_unicode and char.get('unicode') and char['unicode']: uni = ''.join(map(chr, char['unicode'])).encode('utf-8') pool += [uni] uni_sz = len(uni) # Get the CAT token size, feed the pool if needed. cat_sz = 0 # Get the Newcat token size newcat_sz = 0 # Get the CTF token size ctf_sz = 0 # Get the Casemul token size casemul_sz = 0 # Add all of these elements to the chars data tab. dchars += [{ 'code': code, 'mul_sz': mul_sz, 'uni_sz': uni_sz, 'cat_sz': cat_sz, 'newcat_sz': newcat_sz, 'ctf_sz': ctf_sz, 'casemul_sz': casemul_sz }] # Make the pool. bdata, offsets = make_pool(pool) # Prepare pool. offsets = iter(offsets) for char in dchars: ent = tobytes(char['code'], 2) + tobytes(char['mul_sz'], 1) \ + tobytes(char['uni_sz'], 1) + tobytes(char['cat_sz'], 1) \ + tobytes(char['newcat_sz'], 1) + tobytes(char['ctf_sz'], 1) \ + tobytes(char['casemul_sz'], 1) # Offsets ent += tobytes(next(offsets) if char['mul_sz'] > 0 else 0, 4) if not args.no_unicode: ent += tobytes(next(offsets) if char['uni_sz'] > 0 else 0, 4) if not args.no_cat: ent += tobytes(next(offsets) if char['cat_sz'] > 0 else 0, 4) if not args.no_newcat: ent += tobytes(next(offsets) if char['newcat_sz'] > 0 else 0, 4) if not args.no_ctf: ent += tobytes(next(offsets) if char['ctf_sz'] > 0 else 0, 4) if not args.no_casemul: ent += tobytes(next(offsets) if char['casemul_sz'] > 0 else 0, 4) bchars += ent # Correct the leading. for lead in fset['leading']: if not lead in leading: leading[lead] = char_count # Make the leading tab. for lead, off in leading.items(): ent = bytes([lead, 0]) + tobytes(off, 2) blead += bytes(ent) # Make the flags. flags = 0x1F if args.no_unicode: flags &= ~0x01 if args.no_cat: flags &= ~0x02 if args.no_newcat: flags &= ~0x04 if args.no_ctf: flags &= ~0x08 if args.no_casemul: flags &= ~0x10 # Make the checksum. csum = (sum(blead) + sum(bchars) + sum(bdata)) & 0xFFFFFFFF # Make the lengths. datalen = len(bdata) filesize = 32 + len(blead) + len(bchars) + datalen # Finish making the main header. bheader = bytes(list(map(ord, "CASIOFC\x7F")) + [0x01]) \ + tobytes(len(leading), 1) + tobytes(char_count, 2) \ + tobytes(flags, 1) + tobytes(0, 1) + tobytes(0, 2) + tobytes(0, 4) \ + tobytes(csum, 4) + tobytes(filesize, 4) + tobytes(datalen, 4) return bheader + blead + bchars + bdata # --- # Main function. # --- if __name__ == '__main__': ap = ArgumentParser(description="FONTCHARACTER reference binary generator") ap.add_argument('--no-unicode', help='No Unicode equivalents?', action="store_true") ap.add_argument('--no-cat', help='No CAT tokens?', action="store_true") ap.add_argument('--no-newcat', help='No Newcat tokens?', action="store_true") ap.add_argument('--no-ctf', help='No CTF tokens?', action="store_true") ap.add_argument('--no-casemul', help="No Casemul tokens?", action="store_true") ap.add_argument('--output', '-o', help='The output directory path.', default=os.path.join(os.getcwd(), 'generated_sets')) ap.add_argument('--refpath', help='The reference path.', default=os.getcwd()) args = ap.parse_args() # Obtain the reference. ref = Reference(args.refpath) # Make the directory. if os.path.isdir(args.output): shutil.rmtree(args.output) elif os.path.exists(args.output): os.remove(args.output) os.makedirs(args.output) # For each set, make the file. for set_name, set_val in map(lambda x: (x, ref.get(x)), ref.list()): with open(os.path.join(args.output, set_name + '.set'), 'wb') as f: f.write(encode_set(ref, set_val)) # End of file.