ajout fichiers manquant
This commit is contained in:
14
venv/Lib/site-packages/fontTools/ttLib/tables/C_F_F__2.py
Normal file
14
venv/Lib/site-packages/fontTools/ttLib/tables/C_F_F__2.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from io import BytesIO
|
||||
from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
|
||||
|
||||
|
||||
class table_C_F_F__2(table_C_F_F_):
|
||||
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=True)
|
||||
return f.getvalue()
|
@@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_D_E_F_(BaseTTXConverter):
|
||||
pass
|
@@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_V_A_R_(BaseTTXConverter):
|
||||
pass
|
93
venv/Lib/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
Normal file
93
venv/Lib/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
SINGFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
glyphletVersion: H
|
||||
permissions: h
|
||||
mainGID: H
|
||||
unitsPerEm: H
|
||||
vertAdvance: h
|
||||
vertOrigin: h
|
||||
uniqueName: 28s
|
||||
METAMD5: 16s
|
||||
nameLength: 1s
|
||||
"""
|
||||
# baseGlyphName is a byte string which follows the record above.
|
||||
|
||||
|
||||
class table_S_I_N_G_(DefaultTable.DefaultTable):
|
||||
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
|
||||
self.uniqueName = self.decompileUniqueName(self.uniqueName)
|
||||
self.nameLength = byteord(self.nameLength)
|
||||
assert len(rest) == self.nameLength
|
||||
self.baseGlyphName = tostr(rest)
|
||||
|
||||
rawMETAMD5 = self.METAMD5
|
||||
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
|
||||
for char in rawMETAMD5[1:]:
|
||||
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
|
||||
self.METAMD5 = self.METAMD5 + "]"
|
||||
|
||||
def decompileUniqueName(self, data):
|
||||
name = ""
|
||||
for char in data:
|
||||
val = byteord(char)
|
||||
if val == 0:
|
||||
break
|
||||
if (val > 31) or (val < 128):
|
||||
name += chr(val)
|
||||
else:
|
||||
octString = oct(val)
|
||||
if len(octString) > 3:
|
||||
octString = octString[1:] # chop off that leading zero.
|
||||
elif len(octString) < 3:
|
||||
octString.zfill(3)
|
||||
name += "\\" + octString
|
||||
return name
|
||||
|
||||
def compile(self, ttFont):
|
||||
d = self.__dict__.copy()
|
||||
d["nameLength"] = bytechr(len(self.baseGlyphName))
|
||||
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
|
||||
METAMD5List = eval(self.METAMD5)
|
||||
d["METAMD5"] = b""
|
||||
for val in METAMD5List:
|
||||
d["METAMD5"] += bytechr(val)
|
||||
assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
|
||||
data = sstruct.pack(SINGFormat, d)
|
||||
data = data + tobytes(self.baseGlyphName)
|
||||
return data
|
||||
|
||||
def compilecompileUniqueName(self, name, length):
|
||||
nameLen = len(name)
|
||||
if length <= nameLen:
|
||||
name = name[:length-1] + "\000"
|
||||
else:
|
||||
name += (nameLen - length) * "\000"
|
||||
return name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(SINGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
885
venv/Lib/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
885
venv/Lib/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
@@ -0,0 +1,885 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import byteord, safeEval
|
||||
# from itertools import *
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
from array import array
|
||||
from functools import reduce
|
||||
import struct, re, sys
|
||||
|
||||
Silf_hdr_format = '''
|
||||
>
|
||||
version: 16.16F
|
||||
'''
|
||||
|
||||
Silf_hdr_format_3 = '''
|
||||
>
|
||||
version: 16.16F
|
||||
compilerVersion: L
|
||||
numSilf: H
|
||||
x
|
||||
x
|
||||
'''
|
||||
|
||||
Silf_part1_format_v3 = '''
|
||||
>
|
||||
ruleVersion: 16.16F
|
||||
passOffset: H
|
||||
pseudosOffset: H
|
||||
'''
|
||||
|
||||
Silf_part1_format = '''
|
||||
>
|
||||
maxGlyphID: H
|
||||
extraAscent: h
|
||||
extraDescent: h
|
||||
numPasses: B
|
||||
iSubst: B
|
||||
iPos: B
|
||||
iJust: B
|
||||
iBidi: B
|
||||
flags: B
|
||||
maxPreContext: B
|
||||
maxPostContext: B
|
||||
attrPseudo: B
|
||||
attrBreakWeight: B
|
||||
attrDirectionality: B
|
||||
attrMirroring: B
|
||||
attrSkipPasses: B
|
||||
numJLevels: B
|
||||
'''
|
||||
|
||||
Silf_justify_format = '''
|
||||
>
|
||||
attrStretch: B
|
||||
attrShrink: B
|
||||
attrStep: B
|
||||
attrWeight: B
|
||||
runto: B
|
||||
x
|
||||
x
|
||||
x
|
||||
'''
|
||||
|
||||
Silf_part2_format = '''
|
||||
>
|
||||
numLigComp: H
|
||||
numUserDefn: B
|
||||
maxCompPerLig: B
|
||||
direction: B
|
||||
attCollisions: B
|
||||
x
|
||||
x
|
||||
x
|
||||
numCritFeatures: B
|
||||
'''
|
||||
|
||||
Silf_pseudomap_format = '''
|
||||
>
|
||||
unicode: L
|
||||
nPseudo: H
|
||||
'''
|
||||
|
||||
Silf_pseudomap_format_h = '''
|
||||
>
|
||||
unicode: H
|
||||
nPseudo: H
|
||||
'''
|
||||
|
||||
Silf_classmap_format = '''
|
||||
>
|
||||
numClass: H
|
||||
numLinear: H
|
||||
'''
|
||||
|
||||
Silf_lookupclass_format = '''
|
||||
>
|
||||
numIDs: H
|
||||
searchRange: H
|
||||
entrySelector: H
|
||||
rangeShift: H
|
||||
'''
|
||||
|
||||
Silf_lookuppair_format = '''
|
||||
>
|
||||
glyphId: H
|
||||
index: H
|
||||
'''
|
||||
|
||||
Silf_pass_format = '''
|
||||
>
|
||||
flags: B
|
||||
maxRuleLoop: B
|
||||
maxRuleContext: B
|
||||
maxBackup: B
|
||||
numRules: H
|
||||
fsmOffset: H
|
||||
pcCode: L
|
||||
rcCode: L
|
||||
aCode: L
|
||||
oDebug: L
|
||||
numRows: H
|
||||
numTransitional: H
|
||||
numSuccess: H
|
||||
numColumns: H
|
||||
'''
|
||||
|
||||
aCode_info = (
|
||||
("NOP", 0),
|
||||
("PUSH_BYTE", "b"),
|
||||
("PUSH_BYTE_U", "B"),
|
||||
("PUSH_SHORT", ">h"),
|
||||
("PUSH_SHORT_U", ">H"),
|
||||
("PUSH_LONG", ">L"),
|
||||
("ADD", 0),
|
||||
("SUB", 0),
|
||||
("MUL", 0),
|
||||
("DIV", 0),
|
||||
("MIN", 0),
|
||||
("MAX", 0),
|
||||
("NEG", 0),
|
||||
("TRUNC8", 0),
|
||||
("TRUNC16", 0),
|
||||
("COND", 0),
|
||||
("AND", 0), # x10
|
||||
("OR", 0),
|
||||
("NOT", 0),
|
||||
("EQUAL", 0),
|
||||
("NOT_EQ", 0),
|
||||
("LESS", 0),
|
||||
("GTR", 0),
|
||||
("LESS_EQ", 0),
|
||||
("GTR_EQ", 0),
|
||||
("NEXT", 0),
|
||||
("NEXT_N", "b"),
|
||||
("COPY_NEXT", 0),
|
||||
("PUT_GLYPH_8BIT_OBS", "B"),
|
||||
("PUT_SUBS_8BIT_OBS", "bBB"),
|
||||
("PUT_COPY", "b"),
|
||||
("INSERT", 0),
|
||||
("DELETE", 0), # x20
|
||||
("ASSOC", -1),
|
||||
("CNTXT_ITEM", "bB"),
|
||||
("ATTR_SET", "B"),
|
||||
("ATTR_ADD", "B"),
|
||||
("ATTR_SUB", "B"),
|
||||
("ATTR_SET_SLOT", "B"),
|
||||
("IATTR_SET_SLOT", "BB"),
|
||||
("PUSH_SLOT_ATTR", "Bb"),
|
||||
("PUSH_GLYPH_ATTR_OBS", "Bb"),
|
||||
("PUSH_GLYPH_METRIC", "Bbb"),
|
||||
("PUSH_FEAT", "Bb"),
|
||||
("PUSH_ATT_TO_GATTR_OBS", "Bb"),
|
||||
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
|
||||
("PUSH_ISLOT_ATTR", "Bbb"),
|
||||
("PUSH_IGLYPH_ATTR", "Bbb"),
|
||||
("POP_RET", 0), # x30
|
||||
("RET_ZERO", 0),
|
||||
("RET_TRUE", 0),
|
||||
("IATTR_SET", "BB"),
|
||||
("IATTR_ADD", "BB"),
|
||||
("IATTR_SUB", "BB"),
|
||||
("PUSH_PROC_STATE", "B"),
|
||||
("PUSH_VERSION", 0),
|
||||
("PUT_SUBS", ">bHH"),
|
||||
("PUT_SUBS2", 0),
|
||||
("PUT_SUBS3", 0),
|
||||
("PUT_GLYPH", ">H"),
|
||||
("PUSH_GLYPH_ATTR", ">Hb"),
|
||||
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
|
||||
("BITOR", 0),
|
||||
("BITAND", 0),
|
||||
("BITNOT", 0), # x40
|
||||
("BITSET", ">HH"),
|
||||
("SET_FEAT", "Bb")
|
||||
)
|
||||
aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
|
||||
|
||||
def disassemble(aCode):
|
||||
codelen = len(aCode)
|
||||
pc = 0
|
||||
res = []
|
||||
while pc < codelen:
|
||||
opcode = byteord(aCode[pc:pc+1])
|
||||
if opcode > len(aCode_info):
|
||||
instr = aCode_info[0]
|
||||
else:
|
||||
instr = aCode_info[opcode]
|
||||
pc += 1
|
||||
if instr[1] != 0 and pc >= codelen : return res
|
||||
if instr[1] == -1:
|
||||
count = byteord(aCode[pc])
|
||||
fmt = "%dB" % count
|
||||
pc += 1
|
||||
elif instr[1] == 0:
|
||||
fmt = ""
|
||||
else :
|
||||
fmt = instr[1]
|
||||
if fmt == "":
|
||||
res.append(instr[0])
|
||||
continue
|
||||
parms = struct.unpack_from(fmt, aCode[pc:])
|
||||
res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")")
|
||||
pc += struct.calcsize(fmt)
|
||||
return res
|
||||
|
||||
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
|
||||
def assemble(instrs):
|
||||
res = b""
|
||||
for inst in instrs:
|
||||
m = instre.match(inst)
|
||||
if not m or not m.group(1) in aCode_map:
|
||||
continue
|
||||
opcode, parmfmt = aCode_map[m.group(1)]
|
||||
res += struct.pack("B", opcode)
|
||||
if m.group(2):
|
||||
if parmfmt == 0:
|
||||
continue
|
||||
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
|
||||
if parmfmt == -1:
|
||||
l = len(parms)
|
||||
res += struct.pack(("%dB" % (l+1)), l, *parms)
|
||||
else:
|
||||
res += struct.pack(parmfmt, *parms)
|
||||
return res
|
||||
|
||||
def writecode(tag, writer, instrs):
|
||||
writer.begintag(tag)
|
||||
writer.newline()
|
||||
for l in disassemble(instrs):
|
||||
writer.write(l)
|
||||
writer.newline()
|
||||
writer.endtag(tag)
|
||||
writer.newline()
|
||||
|
||||
def readcode(content):
|
||||
res = []
|
||||
for e in content_string(content).split('\n'):
|
||||
e = e.strip()
|
||||
if not len(e): continue
|
||||
res.append(e)
|
||||
return assemble(res)
|
||||
|
||||
attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
|
||||
'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
|
||||
attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
|
||||
attrs_contexts = ('maxPreContext', 'maxPostContext')
|
||||
attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
|
||||
'attrMirroring', 'attrSkipPasses', 'attCollisions')
|
||||
pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
|
||||
'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
|
||||
pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
|
||||
|
||||
def writesimple(tag, self, writer, *attrkeys):
|
||||
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
|
||||
writer.simpletag(tag, **attrs)
|
||||
writer.newline()
|
||||
|
||||
def getSimple(self, attrs, *attr_list):
|
||||
for k in attr_list:
|
||||
if k in attrs:
|
||||
setattr(self, k, int(safeEval(attrs[k])))
|
||||
|
||||
def content_string(contents):
|
||||
res = ""
|
||||
for element in contents:
|
||||
if isinstance(element, tuple): continue
|
||||
res += element
|
||||
return res.strip()
|
||||
|
||||
def wrapline(writer, dat, length=80):
|
||||
currline = ""
|
||||
for d in dat:
|
||||
if len(currline) > length:
|
||||
writer.write(currline[:-1])
|
||||
writer.newline()
|
||||
currline = ""
|
||||
currline += d + " "
|
||||
if len(currline):
|
||||
writer.write(currline[:-1])
|
||||
writer.newline()
|
||||
|
||||
class _Object() :
|
||||
pass
|
||||
|
||||
class table_S__i_l_f(DefaultTable.DefaultTable):
|
||||
'''Silf table support'''
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.silfs = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack2(Silf_hdr_format, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
if self.version >= 5.0:
|
||||
(data, self.scheme) = grUtils.decompress(data)
|
||||
sstruct.unpack2(Silf_hdr_format_3, data, self)
|
||||
base = sstruct.calcsize(Silf_hdr_format_3)
|
||||
elif self.version < 3.0:
|
||||
self.numSilf = struct.unpack('>H', data[4:6])
|
||||
self.scheme = 0
|
||||
self.compilerVersion = 0
|
||||
base = 8
|
||||
else:
|
||||
self.scheme = 0
|
||||
sstruct.unpack2(Silf_hdr_format_3, data, self)
|
||||
base = sstruct.calcsize(Silf_hdr_format_3)
|
||||
|
||||
silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
|
||||
for offset in silfoffsets:
|
||||
s = Silf()
|
||||
self.silfs.append(s)
|
||||
s.decompile(data[offset:], ttFont, self.version)
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.numSilf = len(self.silfs)
|
||||
if self.version < 3.0:
|
||||
hdr = sstruct.pack(Silf_hdr_format, self)
|
||||
hdr += struct.pack(">HH", self.numSilf, 0)
|
||||
else:
|
||||
hdr = sstruct.pack(Silf_hdr_format_3, self)
|
||||
offset = len(hdr) + 4 * self.numSilf
|
||||
data = b""
|
||||
for s in self.silfs:
|
||||
hdr += struct.pack(">L", offset)
|
||||
subdata = s.compile(ttFont, self.version)
|
||||
offset += len(subdata)
|
||||
data += subdata
|
||||
if self.version >= 5.0:
|
||||
return grUtils.compress(self.scheme, hdr+data)
|
||||
return hdr+data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment('Attributes starting with _ are informative only')
|
||||
writer.newline()
|
||||
writer.simpletag('version', version=self.version,
|
||||
compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
|
||||
writer.newline()
|
||||
for s in self.silfs:
|
||||
writer.begintag('silf')
|
||||
writer.newline()
|
||||
s.toXML(writer, ttFont, self.version)
|
||||
writer.endtag('silf')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == 'version':
|
||||
self.scheme=int(safeEval(attrs['compressionScheme']))
|
||||
self.version = float(safeEval(attrs['version']))
|
||||
self.compilerVersion = int(safeEval(attrs['compilerVersion']))
|
||||
return
|
||||
if name == 'silf':
|
||||
s = Silf()
|
||||
self.silfs.append(s)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
|
||||
|
||||
class Silf(object):
|
||||
'''A particular Silf subtable'''
|
||||
|
||||
def __init__(self):
|
||||
self.passes = []
|
||||
self.scriptTags = []
|
||||
self.critFeatures = []
|
||||
self.jLevels = []
|
||||
self.pMap = {}
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
if version >= 3.0 :
|
||||
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
|
||||
self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
|
||||
_, data = sstruct.unpack2(Silf_part1_format, data, self)
|
||||
for jlevel in range(self.numJLevels):
|
||||
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
|
||||
self.jLevels.append(j)
|
||||
_, data = sstruct.unpack2(Silf_part2_format, data, self)
|
||||
if self.numCritFeatures:
|
||||
self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
|
||||
data = data[self.numCritFeatures * 2 + 1:]
|
||||
(numScriptTag,) = struct.unpack_from('B', data)
|
||||
if numScriptTag:
|
||||
self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
|
||||
data = data[1 + 4 * numScriptTag:]
|
||||
(self.lbGID,) = struct.unpack('>H', data[:2])
|
||||
if self.numPasses:
|
||||
self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
|
||||
data = data[6 + 4 * self.numPasses:]
|
||||
(numPseudo,) = struct.unpack(">H", data[:2])
|
||||
for i in range(numPseudo):
|
||||
if version >= 3.0:
|
||||
pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
|
||||
else:
|
||||
pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
|
||||
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
|
||||
data = data[8 + 6 * numPseudo:]
|
||||
currpos = (sstruct.calcsize(Silf_part1_format)
|
||||
+ sstruct.calcsize(Silf_justify_format) * self.numJLevels
|
||||
+ sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
|
||||
+ 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
|
||||
if version >= 3.0:
|
||||
currpos += sstruct.calcsize(Silf_part1_format_v3)
|
||||
self.classes = Classes()
|
||||
self.classes.decompile(data, ttFont, version)
|
||||
for i in range(self.numPasses):
|
||||
p = Pass()
|
||||
self.passes.append(p)
|
||||
p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
|
||||
ttFont, version)
|
||||
|
||||
def compile(self, ttFont, version=2.0):
|
||||
self.numPasses = len(self.passes)
|
||||
self.numJLevels = len(self.jLevels)
|
||||
self.numCritFeatures = len(self.critFeatures)
|
||||
numPseudo = len(self.pMap)
|
||||
data = b""
|
||||
if version >= 3.0:
|
||||
hdroffset = sstruct.calcsize(Silf_part1_format_v3)
|
||||
else:
|
||||
hdroffset = 0
|
||||
data += sstruct.pack(Silf_part1_format, self)
|
||||
for j in self.jLevels:
|
||||
data += sstruct.pack(Silf_justify_format, j)
|
||||
data += sstruct.pack(Silf_part2_format, self)
|
||||
if self.numCritFeatures:
|
||||
data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
|
||||
data += struct.pack("BB", 0, len(self.scriptTags))
|
||||
if len(self.scriptTags):
|
||||
tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
|
||||
data += b"".join(tdata)
|
||||
data += struct.pack(">H", self.lbGID)
|
||||
self.passOffset = len(data)
|
||||
|
||||
data1 = grUtils.bininfo(numPseudo, 6)
|
||||
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
|
||||
self.pseudosOffset = currpos + len(data1)
|
||||
for u, p in sorted(self.pMap.items()):
|
||||
data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
|
||||
u, ttFont.getGlyphID(p))
|
||||
data1 += self.classes.compile(ttFont, version)
|
||||
currpos += len(data1)
|
||||
data2 = b""
|
||||
datao = b""
|
||||
for i, p in enumerate(self.passes):
|
||||
base = currpos + len(data2)
|
||||
datao += struct.pack(">L", base)
|
||||
data2 += p.compile(ttFont, base, version)
|
||||
datao += struct.pack(">L", currpos + len(data2))
|
||||
|
||||
if version >= 3.0:
|
||||
data3 = sstruct.pack(Silf_part1_format_v3, self)
|
||||
else:
|
||||
data3 = b""
|
||||
return data3 + data + datao + data1 + data2
|
||||
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
if version >= 3.0:
|
||||
writer.simpletag('version', ruleVersion=self.ruleVersion)
|
||||
writer.newline()
|
||||
writesimple('info', self, writer, *attrs_info)
|
||||
writesimple('passindexes', self, writer, *attrs_passindexes)
|
||||
writesimple('contexts', self, writer, *attrs_contexts)
|
||||
writesimple('attributes', self, writer, *attrs_attributes)
|
||||
if len(self.jLevels):
|
||||
writer.begintag('justifications')
|
||||
writer.newline()
|
||||
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
|
||||
for i, j in enumerate(self.jLevels):
|
||||
attrs = dict([(k, getattr(j, k)) for k in jnames])
|
||||
writer.simpletag('justify', **attrs)
|
||||
writer.newline()
|
||||
writer.endtag('justifications')
|
||||
writer.newline()
|
||||
if len(self.critFeatures):
|
||||
writer.begintag('critFeatures')
|
||||
writer.newline()
|
||||
writer.write(" ".join(map(str, self.critFeatures)))
|
||||
writer.newline()
|
||||
writer.endtag('critFeatures')
|
||||
writer.newline()
|
||||
if len(self.scriptTags):
|
||||
writer.begintag('scriptTags')
|
||||
writer.newline()
|
||||
writer.write(" ".join(self.scriptTags))
|
||||
writer.newline()
|
||||
writer.endtag('scriptTags')
|
||||
writer.newline()
|
||||
if self.pMap:
|
||||
writer.begintag('pseudoMap')
|
||||
writer.newline()
|
||||
for k, v in sorted(self.pMap.items()):
|
||||
writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
|
||||
writer.newline()
|
||||
writer.endtag('pseudoMap')
|
||||
writer.newline()
|
||||
self.classes.toXML(writer, ttFont, version)
|
||||
if len(self.passes):
|
||||
writer.begintag('passes')
|
||||
writer.newline()
|
||||
for i, p in enumerate(self.passes):
|
||||
writer.begintag('pass', _index=i)
|
||||
writer.newline()
|
||||
p.toXML(writer, ttFont, version)
|
||||
writer.endtag('pass')
|
||||
writer.newline()
|
||||
writer.endtag('passes')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'version':
|
||||
self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
|
||||
if name == 'info':
|
||||
getSimple(self, attrs, *attrs_info)
|
||||
elif name == 'passindexes':
|
||||
getSimple(self, attrs, *attrs_passindexes)
|
||||
elif name == 'contexts':
|
||||
getSimple(self, attrs, *attrs_contexts)
|
||||
elif name == 'attributes':
|
||||
getSimple(self, attrs, *attrs_attributes)
|
||||
elif name == 'justifications':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
(tag, attrs, subcontent) = element
|
||||
if tag == 'justify':
|
||||
j = _Object()
|
||||
for k, v in attrs.items():
|
||||
setattr(j, k, int(v))
|
||||
self.jLevels.append(j)
|
||||
elif name == 'critFeatures':
|
||||
self.critFeatures = []
|
||||
element = content_string(content)
|
||||
self.critFeatures.extend(map(int, element.split()))
|
||||
elif name == 'scriptTags':
|
||||
self.scriptTags = []
|
||||
element = content_string(content)
|
||||
for n in element.split():
|
||||
self.scriptTags.append(n)
|
||||
elif name == 'pseudoMap':
|
||||
self.pMap = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
(tag, attrs, subcontent) = element
|
||||
if tag == 'pseudo':
|
||||
k = int(attrs['unicode'], 16)
|
||||
v = attrs['pseudo']
|
||||
self.pMap[k] = v
|
||||
elif name == 'classes':
|
||||
self.classes = Classes()
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
|
||||
elif name == 'passes':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == 'pass':
|
||||
p = Pass()
|
||||
for e in subcontent:
|
||||
if not isinstance(e, tuple): continue
|
||||
p.fromXML(e[0], e[1], e[2], ttFont, version)
|
||||
self.passes.append(p)
|
||||
|
||||
|
||||
class Classes(object):
|
||||
|
||||
def __init__(self):
|
||||
self.linear = []
|
||||
self.nonLinear = []
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
sstruct.unpack2(Silf_classmap_format, data, self)
|
||||
if version >= 4.0 :
|
||||
oClasses = struct.unpack((">%dL" % (self.numClass+1)),
|
||||
data[4:8+4*self.numClass])
|
||||
else:
|
||||
oClasses = struct.unpack((">%dH" % (self.numClass+1)),
|
||||
data[4:6+2*self.numClass])
|
||||
for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
|
||||
self.linear.append(ttFont.getGlyphName(x) for x in
|
||||
struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
|
||||
for s,e in zip(oClasses[self.numLinear:self.numClass],
|
||||
oClasses[self.numLinear+1:self.numClass+1]):
|
||||
nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
|
||||
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
|
||||
self.nonLinear.append(nonLin)
|
||||
|
||||
def compile(self, ttFont, version=2.0):
|
||||
data = b""
|
||||
oClasses = []
|
||||
if version >= 4.0:
|
||||
offset = 8 + 4 * (len(self.linear) + len(self.nonLinear))
|
||||
else:
|
||||
offset = 6 + 2 * (len(self.linear) + len(self.nonLinear))
|
||||
for l in self.linear:
|
||||
oClasses.append(len(data) + offset)
|
||||
gs = [ttFont.getGlyphID(x) for x in l]
|
||||
data += struct.pack((">%dH" % len(l)), *gs)
|
||||
for l in self.nonLinear:
|
||||
oClasses.append(len(data) + offset)
|
||||
gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()]
|
||||
data += grUtils.bininfo(len(gs))
|
||||
data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)])
|
||||
oClasses.append(len(data) + offset)
|
||||
self.numClass = len(oClasses) - 1
|
||||
self.numLinear = len(self.linear)
|
||||
return sstruct.pack(Silf_classmap_format, self) + \
|
||||
struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
|
||||
*oClasses) + data
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
writer.begintag('classes')
|
||||
writer.newline()
|
||||
writer.begintag('linearClasses')
|
||||
writer.newline()
|
||||
for i,l in enumerate(self.linear):
|
||||
writer.begintag('linear', _index=i)
|
||||
writer.newline()
|
||||
wrapline(writer, l)
|
||||
writer.endtag('linear')
|
||||
writer.newline()
|
||||
writer.endtag('linearClasses')
|
||||
writer.newline()
|
||||
writer.begintag('nonLinearClasses')
|
||||
writer.newline()
|
||||
for i, l in enumerate(self.nonLinear):
|
||||
writer.begintag('nonLinear', _index=i + self.numLinear)
|
||||
writer.newline()
|
||||
for inp, ind in l.items():
|
||||
writer.simpletag('map', glyph=inp, index=ind)
|
||||
writer.newline()
|
||||
writer.endtag('nonLinear')
|
||||
writer.newline()
|
||||
writer.endtag('nonLinearClasses')
|
||||
writer.newline()
|
||||
writer.endtag('classes')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'linearClasses':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == 'linear':
|
||||
l = content_string(subcontent).split()
|
||||
self.linear.append(l)
|
||||
elif name == 'nonLinearClasses':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag =='nonLinear':
|
||||
l = {}
|
||||
for e in subcontent:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, attrs, subsubcontent = e
|
||||
if tag == 'map':
|
||||
l[attrs['glyph']] = int(safeEval(attrs['index']))
|
||||
self.nonLinear.append(l)
|
||||
|
||||
class Pass(object):
|
||||
|
||||
def __init__(self):
|
||||
self.colMap = {}
|
||||
self.rules = []
|
||||
self.rulePreContexts = []
|
||||
self.ruleSortKeys = []
|
||||
self.ruleConstraints = []
|
||||
self.passConstraints = b""
|
||||
self.actions = []
|
||||
self.stateTrans = []
|
||||
self.startStates = []
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
_, data = sstruct.unpack2(Silf_pass_format, data, self)
|
||||
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
|
||||
data = data[8:]
|
||||
for i in range(numRange):
|
||||
(first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
|
||||
for g in range(first, last+1):
|
||||
self.colMap[ttFont.getGlyphName(g)] = col
|
||||
data = data[6*numRange:]
|
||||
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
|
||||
data = data[2+2*self.numSuccess:]
|
||||
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
|
||||
self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
|
||||
data = data[2*oRuleMap[-1]:]
|
||||
(self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
|
||||
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
|
||||
self.startStates = struct.unpack((">%dH" % numStartStates),
|
||||
data[2:2 + numStartStates * 2])
|
||||
data = data[2+numStartStates*2:]
|
||||
self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
|
||||
data = data[2*self.numRules:]
|
||||
self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
|
||||
data = data[self.numRules:]
|
||||
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
|
||||
oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
|
||||
data[3:5 + self.numRules * 2]))
|
||||
data = data[5 + self.numRules * 2:]
|
||||
oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
|
||||
data[:2 + self.numRules * 2]))
|
||||
data = data[2 * self.numRules + 2:]
|
||||
for i in range(self.numTransitional):
|
||||
a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
|
||||
if sys.byteorder != "big": a.byteswap()
|
||||
self.stateTrans.append(a)
|
||||
data = data[self.numTransitional * self.numColumns * 2 + 1:]
|
||||
self.passConstraints = data[:pConstraint]
|
||||
data = data[pConstraint:]
|
||||
for i in range(len(oConstraints)-2,-1,-1):
|
||||
if oConstraints[i] == 0 :
|
||||
oConstraints[i] = oConstraints[i+1]
|
||||
self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
|
||||
data = data[oConstraints[-1]:]
|
||||
self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
|
||||
data = data[oActions[-1]:]
|
||||
# not using debug
|
||||
|
||||
def compile(self, ttFont, base, version=2.0):
|
||||
# build it all up backwards
|
||||
oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
|
||||
oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
|
||||
constraintCode = b"\000" + b"".join(self.ruleConstraints)
|
||||
transes = []
|
||||
for t in self.stateTrans:
|
||||
if sys.byteorder != "big": t.byteswap()
|
||||
transes.append(t.tobytes())
|
||||
if sys.byteorder != "big": t.byteswap()
|
||||
if not len(transes):
|
||||
self.startStates = [0]
|
||||
oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
|
||||
passRanges = []
|
||||
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
|
||||
for e in grUtils.entries(gidcolmap, sameval = True):
|
||||
if e[1]:
|
||||
passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
|
||||
self.numRules = len(self.actions)
|
||||
self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
|
||||
+ len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
|
||||
+ 2 * len(self.startStates) + 3 * self.numRules + 3
|
||||
+ 4 * self.numRules + 4)
|
||||
self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
|
||||
self.rcCode = self.pcCode + len(self.passConstraints)
|
||||
self.aCode = self.rcCode + len(constraintCode)
|
||||
self.oDebug = 0
|
||||
# now generate output
|
||||
data = sstruct.pack(Silf_pass_format, self)
|
||||
data += grUtils.bininfo(len(passRanges), 6)
|
||||
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
|
||||
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
|
||||
flatrules = reduce(lambda a,x: a+x, self.rules, [])
|
||||
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
|
||||
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
|
||||
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
|
||||
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
|
||||
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
|
||||
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
|
||||
data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
|
||||
data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
|
||||
return data + b"".join(transes) + struct.pack("B", 0) + \
|
||||
self.passConstraints + constraintCode + b"".join(self.actions)
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
writesimple('info', self, writer, *pass_attrs_info)
|
||||
writesimple('fsminfo', self, writer, *pass_attrs_fsm)
|
||||
writer.begintag('colmap')
|
||||
writer.newline()
|
||||
wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
|
||||
key=lambda x:ttFont.getGlyphID(x[0]))])
|
||||
writer.endtag('colmap')
|
||||
writer.newline()
|
||||
writer.begintag('staterulemap')
|
||||
writer.newline()
|
||||
for i, r in enumerate(self.rules):
|
||||
writer.simpletag('state', number = self.numRows - self.numSuccess + i,
|
||||
rules = " ".join(map(str, r)))
|
||||
writer.newline()
|
||||
writer.endtag('staterulemap')
|
||||
writer.newline()
|
||||
writer.begintag('rules')
|
||||
writer.newline()
|
||||
for i in range(len(self.actions)):
|
||||
writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
|
||||
sortkey=self.ruleSortKeys[i])
|
||||
writer.newline()
|
||||
if len(self.ruleConstraints[i]):
|
||||
writecode('constraint', writer, self.ruleConstraints[i])
|
||||
writecode('action', writer, self.actions[i])
|
||||
writer.endtag('rule')
|
||||
writer.newline()
|
||||
writer.endtag('rules')
|
||||
writer.newline()
|
||||
if len(self.passConstraints):
|
||||
writecode('passConstraint', writer, self.passConstraints)
|
||||
if len(self.stateTrans):
|
||||
writer.begintag('fsm')
|
||||
writer.newline()
|
||||
writer.begintag('starts')
|
||||
writer.write(" ".join(map(str, self.startStates)))
|
||||
writer.endtag('starts')
|
||||
writer.newline()
|
||||
for i, s in enumerate(self.stateTrans):
|
||||
writer.begintag('row', _i=i)
|
||||
# no newlines here
|
||||
writer.write(" ".join(map(str, s)))
|
||||
writer.endtag('row')
|
||||
writer.newline()
|
||||
writer.endtag('fsm')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'info':
|
||||
getSimple(self, attrs, *pass_attrs_info)
|
||||
elif name == 'fsminfo':
|
||||
getSimple(self, attrs, *pass_attrs_fsm)
|
||||
elif name == 'colmap':
|
||||
e = content_string(content)
|
||||
for w in e.split():
|
||||
x = w.split('=')
|
||||
if len(x) != 2 or x[0] == '' or x[1] == '': continue
|
||||
self.colMap[x[0]] = int(x[1])
|
||||
elif name == 'staterulemap':
|
||||
for e in content:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, a, c = e
|
||||
if tag == 'state':
|
||||
self.rules.append([int(x) for x in a['rules'].split(" ")])
|
||||
elif name == 'rules':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, a, c = element
|
||||
if tag != 'rule': continue
|
||||
self.rulePreContexts.append(int(a['precontext']))
|
||||
self.ruleSortKeys.append(int(a['sortkey']))
|
||||
con = b""
|
||||
act = b""
|
||||
for e in c:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, a, subc = e
|
||||
if tag == 'constraint':
|
||||
con = readcode(subc)
|
||||
elif tag == 'action':
|
||||
act = readcode(subc)
|
||||
self.actions.append(act)
|
||||
self.ruleConstraints.append(con)
|
||||
elif name == 'passConstraint':
|
||||
self.passConstraints = readcode(content)
|
||||
elif name == 'fsm':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, a, c = element
|
||||
if tag == 'row':
|
||||
s = array('H')
|
||||
e = content_string(c)
|
||||
s.extend(map(int, e.split()))
|
||||
self.stateTrans.append(s)
|
||||
elif tag == 'starts':
|
||||
s = []
|
||||
e = content_string(c)
|
||||
s.extend(map(int, e.split()))
|
||||
self.startStates = s
|
||||
|
@@ -0,0 +1,4 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
class table_T_S_I_S_(table_T_S_I_V_):
|
||||
pass
|
54
venv/Lib/site-packages/fontTools/ttLib/tables/T_S_I__0.py
Normal file
54
venv/Lib/site-packages/fontTools/ttLib/tables/T_S_I__0.py
Normal file
@@ -0,0 +1,54 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI0 is the index table containing the lengths and offsets for the glyph
|
||||
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
|
||||
in the TSI1 table.
|
||||
"""
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
tsi0Format = '>HHL'
|
||||
|
||||
def fixlongs(glyphID, textLength, textOffset):
|
||||
return int(glyphID), int(textLength), textOffset
|
||||
|
||||
|
||||
class table_T_S_I__0(DefaultTable.DefaultTable):
|
||||
|
||||
dependencies = ["TSI1"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
indices = []
|
||||
size = struct.calcsize(tsi0Format)
|
||||
for i in range(numGlyphs + 5):
|
||||
glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size]))
|
||||
indices.append((glyphID, textLength, textOffset))
|
||||
data = data[size:]
|
||||
assert len(data) == 0
|
||||
assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number"
|
||||
self.indices = indices[:-5]
|
||||
self.extra_indices = indices[-4:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "indices"):
|
||||
# We have no corresponding table (TSI1 or TSI3); let's return
|
||||
# no data, which effectively means "ignore us".
|
||||
return b""
|
||||
data = b""
|
||||
for index, textLength, textOffset in self.indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34)
|
||||
for index, textLength, textOffset in self.extra_indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
return data
|
||||
|
||||
def set(self, indices, extra_indices):
|
||||
# gets called by 'TSI1' or 'TSI3'
|
||||
self.indices = indices
|
||||
self.extra_indices = extra_indices
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("This table will be calculated by the compiler")
|
||||
writer.newline()
|
1845
venv/Lib/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
1845
venv/Lib/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
File diff suppressed because it is too large
Load Diff
292
venv/Lib/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
292
venv/Lib/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
@@ -0,0 +1,292 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
postFormat = """
|
||||
>
|
||||
formatType: 16.16F
|
||||
italicAngle: 16.16F # italic angle in degrees
|
||||
underlinePosition: h
|
||||
underlineThickness: h
|
||||
isFixedPitch: L
|
||||
minMemType42: L # minimum memory if TrueType font is downloaded
|
||||
maxMemType42: L # maximum memory if TrueType font is downloaded
|
||||
minMemType1: L # minimum memory if Type1 font is downloaded
|
||||
maxMemType1: L # maximum memory if Type1 font is downloaded
|
||||
"""
|
||||
|
||||
postFormatSize = sstruct.calcsize(postFormat)
|
||||
|
||||
|
||||
class table__p_o_s_t(DefaultTable.DefaultTable):
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
||||
data = data[postFormatSize:]
|
||||
if self.formatType == 1.0:
|
||||
self.decode_format_1_0(data, ttFont)
|
||||
elif self.formatType == 2.0:
|
||||
self.decode_format_2_0(data, ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
self.decode_format_3_0(data, ttFont)
|
||||
elif self.formatType == 4.0:
|
||||
self.decode_format_4_0(data, ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(postFormat, self)
|
||||
if self.formatType == 1.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 2.0:
|
||||
data = data + self.encode_format_2_0(ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 4.0:
|
||||
data = data + self.encode_format_4_0(ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
|
||||
return data
|
||||
|
||||
def getGlyphOrder(self):
|
||||
"""This function will get called by a ttLib.TTFont instance.
|
||||
Do not call this function yourself, use TTFont().getGlyphOrder()
|
||||
or its relatives instead!
|
||||
"""
|
||||
if not hasattr(self, "glyphOrder"):
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
glyphOrder = self.glyphOrder
|
||||
del self.glyphOrder
|
||||
return glyphOrder
|
||||
|
||||
def decode_format_1_0(self, data, ttFont):
|
||||
self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
|
||||
|
||||
def decode_format_2_0(self, data, ttFont):
|
||||
numGlyphs, = struct.unpack(">H", data[:2])
|
||||
numGlyphs = int(numGlyphs)
|
||||
if numGlyphs > ttFont['maxp'].numGlyphs:
|
||||
# Assume the numGlyphs field is bogus, so sync with maxp.
|
||||
# I've seen this in one font, and if the assumption is
|
||||
# wrong elsewhere, well, so be it: it's hard enough to
|
||||
# work around _one_ non-conforming post format...
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
data = data[2:]
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data[:2*numGlyphs])
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
data = data[2*numGlyphs:]
|
||||
maxIndex = max(indices)
|
||||
self.extraNames = extraNames = unpackPStrings(data, maxIndex-257)
|
||||
self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
|
||||
for glyphID in range(numGlyphs):
|
||||
index = indices[glyphID]
|
||||
if index > 257:
|
||||
try:
|
||||
name = extraNames[index-258]
|
||||
except IndexError:
|
||||
name = ""
|
||||
else:
|
||||
# fetch names from standard list
|
||||
name = standardGlyphOrder[index]
|
||||
glyphOrder[glyphID] = name
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def build_psNameMapping(self, ttFont):
|
||||
mapping = {}
|
||||
allNames = {}
|
||||
for i in range(ttFont['maxp'].numGlyphs):
|
||||
glyphName = psName = self.glyphOrder[i]
|
||||
if glyphName == "":
|
||||
glyphName = "glyph%.5d" % i
|
||||
if glyphName in allNames:
|
||||
# make up a new glyphName that's unique
|
||||
n = allNames[glyphName]
|
||||
while (glyphName + "#" + str(n)) in allNames:
|
||||
n += 1
|
||||
allNames[glyphName] = n + 1
|
||||
glyphName = glyphName + "#" + str(n)
|
||||
|
||||
self.glyphOrder[i] = glyphName
|
||||
allNames[glyphName] = 1
|
||||
if glyphName != psName:
|
||||
mapping[glyphName] = psName
|
||||
|
||||
self.mapping = mapping
|
||||
|
||||
def decode_format_3_0(self, data, ttFont):
|
||||
# Setting self.glyphOrder to None will cause the TTFont object
|
||||
# try and construct glyph names from a Unicode cmap table.
|
||||
self.glyphOrder = None
|
||||
|
||||
def decode_format_4_0(self, data, ttFont):
|
||||
from fontTools import agl
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
# In some older fonts, the size of the post table doesn't match
|
||||
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
|
||||
self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
|
||||
for i in range(min(len(indices),numGlyphs)):
|
||||
if indices[i] == 0xFFFF:
|
||||
self.glyphOrder[i] = ''
|
||||
elif indices[i] in agl.UV2AGL:
|
||||
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
|
||||
else:
|
||||
self.glyphOrder[i] = "uni%04X" % indices[i]
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def encode_format_2_0(self, ttFont):
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
extraDict = {}
|
||||
extraNames = self.extraNames = [
|
||||
n for n in self.extraNames if n not in standardGlyphOrder]
|
||||
for i in range(len(extraNames)):
|
||||
extraDict[extraNames[i]] = i
|
||||
for glyphID in range(numGlyphs):
|
||||
glyphName = glyphOrder[glyphID]
|
||||
if glyphName in self.mapping:
|
||||
psName = self.mapping[glyphName]
|
||||
else:
|
||||
psName = glyphName
|
||||
if psName in extraDict:
|
||||
index = 258 + extraDict[psName]
|
||||
elif psName in standardGlyphOrder:
|
||||
index = standardGlyphOrder.index(psName)
|
||||
else:
|
||||
index = 258 + len(extraNames)
|
||||
extraDict[psName] = len(extraNames)
|
||||
extraNames.append(psName)
|
||||
indices.append(index)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
return struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
|
||||
|
||||
def encode_format_4_0(self, ttFont):
|
||||
from fontTools import agl
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
for glyphID in glyphOrder:
|
||||
glyphID = glyphID.split('#')[0]
|
||||
if glyphID in agl.AGL2UV:
|
||||
indices.append(agl.AGL2UV[glyphID])
|
||||
elif len(glyphID) == 7 and glyphID[:3] == 'uni':
|
||||
indices.append(int(glyphID[3:],16))
|
||||
else:
|
||||
indices.append(0xFFFF)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
return indices.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(postFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
if hasattr(self, "mapping"):
|
||||
writer.begintag("psNames")
|
||||
writer.newline()
|
||||
writer.comment("This file uses unique glyph names based on the information\n"
|
||||
"found in the 'post' table. Since these names might not be unique,\n"
|
||||
"we have to invent artificial names in case of clashes. In order to\n"
|
||||
"be able to retain the original information, we need a name to\n"
|
||||
"ps name mapping for those cases where they differ. That's what\n"
|
||||
"you see below.\n")
|
||||
writer.newline()
|
||||
items = sorted(self.mapping.items())
|
||||
for name, psName in items:
|
||||
writer.simpletag("psName", name=name, psName=psName)
|
||||
writer.newline()
|
||||
writer.endtag("psNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "extraNames"):
|
||||
writer.begintag("extraNames")
|
||||
writer.newline()
|
||||
writer.comment("following are the name that are not taken from the standard Mac glyph order")
|
||||
writer.newline()
|
||||
for name in self.extraNames:
|
||||
writer.simpletag("psName", name=name)
|
||||
writer.newline()
|
||||
writer.endtag("extraNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "data"):
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name not in ("psNames", "extraNames", "hexdata"):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "psNames":
|
||||
self.mapping = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.mapping[attrs["name"]] = attrs["psName"]
|
||||
elif name == "extraNames":
|
||||
self.extraNames = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.extraNames.append(attrs["name"])
|
||||
else:
|
||||
self.data = readHex(content)
|
||||
|
||||
|
||||
def unpackPStrings(data, n):
|
||||
# extract n Pascal strings from data.
|
||||
# if there is not enough data, use ""
|
||||
|
||||
strings = []
|
||||
index = 0
|
||||
dataLen = len(data)
|
||||
|
||||
for _ in range(n):
|
||||
if dataLen <= index:
|
||||
length = 0
|
||||
else:
|
||||
length = byteord(data[index])
|
||||
index += 1
|
||||
|
||||
if dataLen <= index + length - 1:
|
||||
name = ""
|
||||
else:
|
||||
name = tostr(data[index:index+length], encoding="latin1")
|
||||
strings.append (name)
|
||||
index += length
|
||||
|
||||
if index < dataLen:
|
||||
log.warning("%d extra bytes in post.stringData array", dataLen - index)
|
||||
|
||||
elif dataLen < index:
|
||||
log.warning("not enough data in post.stringData array")
|
||||
|
||||
return strings
|
||||
|
||||
|
||||
def packPStrings(strings):
|
||||
data = b""
|
||||
for s in strings:
|
||||
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
|
||||
return data
|
@@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
|
||||
class table__p_r_o_p(BaseTTXConverter):
|
||||
pass
|
118
venv/Lib/site-packages/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
118
venv/Lib/site-packages/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
vheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceHeightMax: H
|
||||
minTopSideBearing: h
|
||||
minBottomSideBearing: h
|
||||
yMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
reserved4: h
|
||||
metricDataFormat: h
|
||||
numberOfVMetrics: H
|
||||
"""
|
||||
|
||||
class table__v_h_e_a(DefaultTable.DefaultTable):
|
||||
|
||||
# Note: Keep in sync with table__h_h_e_a
|
||||
|
||||
dependencies = ['vmtx', 'glyf', 'CFF ', 'CFF2']
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(vheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(vheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if 'vmtx' in ttFont:
|
||||
vmtxTable = ttFont['vmtx']
|
||||
self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
|
||||
|
||||
boundsHeightDict = {}
|
||||
if 'glyf' in ttFont:
|
||||
glyfTable = ttFont['glyf']
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "yMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsHeightDict[name] = g.yMax - g.yMin
|
||||
elif 'CFF ' in ttFont or 'CFF2' in ttFont:
|
||||
if 'CFF ' in ttFont:
|
||||
topDict = ttFont['CFF '].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont['CFF2'].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsHeightDict[name] = int(
|
||||
math.ceil(bounds[3]) - math.floor(bounds[1]))
|
||||
|
||||
if boundsHeightDict:
|
||||
minTopSideBearing = float('inf')
|
||||
minBottomSideBearing = float('inf')
|
||||
yMaxExtent = -float('inf')
|
||||
for name, boundsHeight in boundsHeightDict.items():
|
||||
advanceHeight, tsb = vmtxTable[name]
|
||||
bsb = advanceHeight - tsb - boundsHeight
|
||||
extent = tsb + boundsHeight
|
||||
minTopSideBearing = min(minTopSideBearing, tsb)
|
||||
minBottomSideBearing = min(minBottomSideBearing, bsb)
|
||||
yMaxExtent = max(yMaxExtent, extent)
|
||||
self.minTopSideBearing = minTopSideBearing
|
||||
self.minBottomSideBearing = minBottomSideBearing
|
||||
self.yMaxExtent = yMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minTopSideBearing = 0
|
||||
self.minBottomSideBearing = 0
|
||||
self.yMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
# reserved0 is caretOffset for legacy reasons
|
||||
@property
|
||||
def reserved0(self):
|
||||
return self.caretOffset
|
||||
|
||||
@reserved0.setter
|
||||
def reserved0(self, value):
|
||||
self.caretOffset = value
|
Reference in New Issue
Block a user