ajout fichiers manquant
This commit is contained in:
137
venv/Lib/site-packages/fontTools/varLib/builder.py
Normal file
137
venv/Lib/site-packages/fontTools/varLib/builder.py
Normal file
@@ -0,0 +1,137 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
|
||||
# VariationStore
|
||||
|
||||
def buildVarRegionAxis(axisSupport):
|
||||
self = ot.VarRegionAxis()
|
||||
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
|
||||
return self
|
||||
|
||||
def buildVarRegion(support, axisTags):
|
||||
assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags)
|
||||
self = ot.VarRegion()
|
||||
self.VarRegionAxis = []
|
||||
for tag in axisTags:
|
||||
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
|
||||
return self
|
||||
|
||||
def buildVarRegionList(supports, axisTags):
|
||||
self = ot.VarRegionList()
|
||||
self.RegionAxisCount = len(axisTags)
|
||||
self.Region = []
|
||||
for support in supports:
|
||||
self.Region.append(buildVarRegion(support, axisTags))
|
||||
self.RegionCount = len(self.Region)
|
||||
return self
|
||||
|
||||
|
||||
def _reorderItem(lst, mapping):
|
||||
return [lst[i] for i in mapping]
|
||||
|
||||
def VarData_calculateNumShorts(self, optimize=False):
|
||||
count = self.VarRegionCount
|
||||
items = self.Item
|
||||
bit_lengths = [0] * count
|
||||
for item in items:
|
||||
# The "+ (i < -1)" magic is to handle two's-compliment.
|
||||
# That is, we want to get back 7 for -128, whereas
|
||||
# bit_length() returns 8. Similarly for -65536.
|
||||
# The reason "i < -1" is used instead of "i < 0" is that
|
||||
# the latter would make it return 0 for "-1" instead of 1.
|
||||
bl = [(i + (i < -1)).bit_length() for i in item]
|
||||
bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
|
||||
# The addition of 8, instead of seven, is to account for the sign bit.
|
||||
# This "((b + 8) >> 3) if b else 0" when combined with the above
|
||||
# "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
|
||||
# conforming to:
|
||||
#
|
||||
# byte_length = (0 if i == 0 else
|
||||
# 1 if -128 <= i < 128 else
|
||||
# 2 if -65536 <= i < 65536 else
|
||||
# ...)
|
||||
byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
|
||||
|
||||
# https://github.com/fonttools/fonttools/issues/2279
|
||||
longWords = any(b > 2 for b in byte_lengths)
|
||||
|
||||
if optimize:
|
||||
# Reorder columns such that wider columns come before narrower columns
|
||||
mapping = []
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b > 2)
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 2)
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 1)
|
||||
|
||||
byte_lengths = _reorderItem(byte_lengths, mapping)
|
||||
self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
for i in range(len(items)):
|
||||
items[i] = _reorderItem(items[i], mapping)
|
||||
|
||||
if longWords:
|
||||
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 2), default=-1) + 1
|
||||
self.NumShorts |= 0x8000
|
||||
else:
|
||||
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 1), default=-1) + 1
|
||||
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
return self
|
||||
|
||||
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
|
||||
|
||||
def VarData_CalculateNumShorts(self, optimize=True):
|
||||
"""Deprecated name for VarData_calculateNumShorts() which
|
||||
defaults to optimize=True. Use varData.calculateNumShorts()
|
||||
or varData.optimize()."""
|
||||
return VarData_calculateNumShorts(self, optimize=optimize)
|
||||
|
||||
def VarData_optimize(self):
|
||||
return VarData_calculateNumShorts(self, optimize=True)
|
||||
|
||||
ot.VarData.optimize = VarData_optimize
|
||||
|
||||
|
||||
def buildVarData(varRegionIndices, items, optimize=True):
|
||||
self = ot.VarData()
|
||||
self.VarRegionIndex = list(varRegionIndices)
|
||||
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
|
||||
records = self.Item = []
|
||||
if items:
|
||||
for item in items:
|
||||
assert len(item) == regionCount
|
||||
records.append(list(item))
|
||||
self.ItemCount = len(self.Item)
|
||||
self.calculateNumShorts(optimize=optimize)
|
||||
return self
|
||||
|
||||
|
||||
def buildVarStore(varRegionList, varDataList):
|
||||
self = ot.VarStore()
|
||||
self.Format = 1
|
||||
self.VarRegionList = varRegionList
|
||||
self.VarData = list(varDataList)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
return self
|
||||
|
||||
|
||||
# Variation helpers
|
||||
|
||||
def buildVarIdxMap(varIdxes, glyphOrder):
|
||||
self = ot.VarIdxMap()
|
||||
self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
|
||||
return self
|
||||
|
||||
|
||||
def buildDeltaSetIndexMap(varIdxes):
|
||||
self = ot.DeltaSetIndexMap()
|
||||
self.mapping = list(varIdxes)
|
||||
self.Format = 1 if len(varIdxes) > 0xFFFF else 0
|
||||
return self
|
||||
|
||||
|
||||
def buildVarDevTable(varIdx):
|
||||
self = ot.Device()
|
||||
self.DeltaFormat = 0x8000
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
return self
|
||||
190
venv/Lib/site-packages/fontTools/varLib/errors.py
Normal file
190
venv/Lib/site-packages/fontTools/varLib/errors.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import textwrap
|
||||
|
||||
|
||||
class VarLibError(Exception):
|
||||
"""Base exception for the varLib module."""
|
||||
|
||||
|
||||
class VarLibValidationError(VarLibError):
|
||||
"""Raised when input data is invalid from varLib's point of view."""
|
||||
|
||||
|
||||
class VarLibMergeError(VarLibError):
|
||||
"""Raised when input data cannot be merged into a variable font."""
|
||||
|
||||
def __init__(self, merger=None, **kwargs):
|
||||
self.merger = merger
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
if "stack" in kwargs:
|
||||
self.stack = kwargs["stack"]
|
||||
del kwargs["stack"]
|
||||
else:
|
||||
self.stack = []
|
||||
self.cause = kwargs
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
return self.__doc__
|
||||
|
||||
def _master_name(self, ix):
|
||||
if self.merger is not None:
|
||||
ttf = self.merger.ttfs[ix]
|
||||
if (
|
||||
"name" in ttf
|
||||
and ttf["name"].getDebugName(1)
|
||||
and ttf["name"].getDebugName(2)
|
||||
):
|
||||
return ttf["name"].getDebugName(1) + " " + ttf["name"].getDebugName(2)
|
||||
elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):
|
||||
return ttf.reader.file.name
|
||||
return f"master number {ix}"
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
index = [x == self.cause["expected"] for x in self.cause["got"]].index(
|
||||
False
|
||||
)
|
||||
return index, self._master_name(index)
|
||||
return None, None
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
offender_index, offender = self.offender
|
||||
got = self.cause["got"][offender_index]
|
||||
return f"Expected to see {self.stack[0]}=={self.cause['expected']}, instead saw {got}\n"
|
||||
return ""
|
||||
|
||||
def __str__(self):
|
||||
offender_index, offender = self.offender
|
||||
location = ""
|
||||
if offender:
|
||||
location = f"\n\nThe problem is likely to be in {offender}:\n"
|
||||
context = "".join(reversed(self.stack))
|
||||
basic = textwrap.fill(
|
||||
f"Couldn't merge the fonts, because {self.reason}. "
|
||||
f"This happened while performing the following operation: {context}",
|
||||
width=78,
|
||||
)
|
||||
return "\n\n" + basic + location + self.details
|
||||
|
||||
|
||||
class ShouldBeConstant(VarLibMergeError):
|
||||
"""some values were different, but should have been the same"""
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
if self.stack[0] != ".FeatureCount" or self.merger is None:
|
||||
return super().details
|
||||
offender_index, offender = self.offender
|
||||
bad_ttf = self.merger.ttfs[offender_index]
|
||||
good_ttf = self.merger.ttfs[offender_index - 1]
|
||||
|
||||
good_features = [
|
||||
x.FeatureTag
|
||||
for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
bad_features = [
|
||||
x.FeatureTag
|
||||
for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
return (
|
||||
"\nIncompatible features between masters.\n"
|
||||
f"Expected: {', '.join(good_features)}.\n"
|
||||
f"Got: {', '.join(bad_features)}.\n"
|
||||
)
|
||||
|
||||
|
||||
class FoundANone(VarLibMergeError):
|
||||
"""one of the values in a list was empty when it shouldn't have been"""
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
cause = self.argv[0]
|
||||
index = [x is None for x in cause["got"]].index(True)
|
||||
return index, self._master_name(index)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
cause, stack = self.args[0], self.args[1:]
|
||||
return f"{stack[0]}=={cause['got']}\n"
|
||||
|
||||
|
||||
class MismatchedTypes(VarLibMergeError):
|
||||
"""data had inconsistent types"""
|
||||
|
||||
|
||||
class LengthsDiffer(VarLibMergeError):
|
||||
"""a list of objects had inconsistent lengths"""
|
||||
|
||||
|
||||
class KeysDiffer(VarLibMergeError):
|
||||
"""a list of objects had different keys"""
|
||||
|
||||
|
||||
class InconsistentGlyphOrder(VarLibMergeError):
|
||||
"""the glyph order was inconsistent between masters"""
|
||||
|
||||
|
||||
class InconsistentExtensions(VarLibMergeError):
|
||||
"""the masters use extension lookups in inconsistent ways"""
|
||||
|
||||
|
||||
class UnsupportedFormat(VarLibMergeError):
|
||||
"""an OpenType subtable (%s) had a format I didn't expect"""
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
cause, stack = self.args[0], self.args[1:]
|
||||
return self.__doc__ % cause["subtable"]
|
||||
|
||||
|
||||
class UnsupportedFormat(UnsupportedFormat):
|
||||
"""an OpenType subtable (%s) had inconsistent formats between masters"""
|
||||
|
||||
|
||||
class VarLibCFFMergeError(VarLibError):
|
||||
pass
|
||||
|
||||
|
||||
class VarLibCFFDictMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF PrivateDict cannot be merged."""
|
||||
|
||||
def __init__(self, key, value, values):
|
||||
error_msg = (
|
||||
f"For the Private Dict key '{key}', the default font value list:"
|
||||
f"\n\t{value}\nhad a different number of values than a region font:"
|
||||
)
|
||||
for region_value in values:
|
||||
error_msg += f"\n\t{region_value}"
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFPointTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of point type differences."""
|
||||
|
||||
def __init__(self, point_type, pt_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in "
|
||||
f"master index {m_index} differs from the default font point type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFHintTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of hint type differences."""
|
||||
|
||||
def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in "
|
||||
f"master index {m_index} differs from the default font hint type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VariationModelError(VarLibError):
|
||||
"""Raised when a variation model is faulty."""
|
||||
1097
venv/Lib/site-packages/fontTools/varLib/merger.py
Normal file
1097
venv/Lib/site-packages/fontTools/varLib/merger.py
Normal file
File diff suppressed because it is too large
Load Diff
530
venv/Lib/site-packages/fontTools/varLib/models.py
Normal file
530
venv/Lib/site-packages/fontTools/varLib/models.py
Normal file
@@ -0,0 +1,530 @@
|
||||
"""Variation fonts interpolation models."""
|
||||
|
||||
__all__ = [
|
||||
"nonNone",
|
||||
"allNone",
|
||||
"allEqual",
|
||||
"allEqualTo",
|
||||
"subList",
|
||||
"normalizeValue",
|
||||
"normalizeLocation",
|
||||
"supportScalar",
|
||||
"VariationModel",
|
||||
]
|
||||
|
||||
from fontTools.misc.roundTools import noRound
|
||||
from .errors import VariationModelError
|
||||
|
||||
|
||||
def nonNone(lst):
|
||||
return [l for l in lst if l is not None]
|
||||
|
||||
|
||||
def allNone(lst):
|
||||
return all(l is None for l in lst)
|
||||
|
||||
|
||||
def allEqualTo(ref, lst, mapper=None):
|
||||
if mapper is None:
|
||||
return all(ref == item for item in lst)
|
||||
|
||||
mapped = mapper(ref)
|
||||
return all(mapped == mapper(item) for item in lst)
|
||||
|
||||
|
||||
def allEqual(lst, mapper=None):
|
||||
if not lst:
|
||||
return True
|
||||
it = iter(lst)
|
||||
try:
|
||||
first = next(it)
|
||||
except StopIteration:
|
||||
return True
|
||||
return allEqualTo(first, it, mapper=mapper)
|
||||
|
||||
|
||||
def subList(truth, lst):
|
||||
assert len(truth) == len(lst)
|
||||
return [l for l, t in zip(lst, truth) if t]
|
||||
|
||||
|
||||
def normalizeValue(v, triple):
|
||||
"""Normalizes value based on a min/default/max triple.
|
||||
>>> normalizeValue(400, (100, 400, 900))
|
||||
0.0
|
||||
>>> normalizeValue(100, (100, 400, 900))
|
||||
-1.0
|
||||
>>> normalizeValue(650, (100, 400, 900))
|
||||
0.5
|
||||
"""
|
||||
lower, default, upper = triple
|
||||
if not (lower <= default <= upper):
|
||||
raise ValueError(
|
||||
f"Invalid axis values, must be minimum, default, maximum: "
|
||||
f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
|
||||
)
|
||||
v = max(min(v, upper), lower)
|
||||
if v == default:
|
||||
v = 0.0
|
||||
elif v < default:
|
||||
v = (v - default) / (default - lower)
|
||||
else:
|
||||
v = (v - default) / (upper - default)
|
||||
return v
|
||||
|
||||
|
||||
def normalizeLocation(location, axes):
|
||||
"""Normalizes location based on axis min/default/max values from axes.
|
||||
>>> axes = {"wght": (100, 400, 900)}
|
||||
>>> normalizeLocation({"wght": 400}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 100}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 900}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 650}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> axes = {"wght": (0, 0, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> axes = {"wght": (0, 1000, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': -0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 0.0}
|
||||
"""
|
||||
out = {}
|
||||
for tag, triple in axes.items():
|
||||
v = location.get(tag, triple[1])
|
||||
out[tag] = normalizeValue(v, triple)
|
||||
return out
|
||||
|
||||
|
||||
def supportScalar(location, support, ot=True):
|
||||
"""Returns the scalar multiplier at location, for a master
|
||||
with support. If ot is True, then a peak value of zero
|
||||
for support of an axis means "axis does not participate". That
|
||||
is how OpenType Variation Font technology works.
|
||||
>>> supportScalar({}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
|
||||
0.1
|
||||
>>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
|
||||
0.375
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
"""
|
||||
scalar = 1.0
|
||||
for axis, (lower, peak, upper) in support.items():
|
||||
if ot:
|
||||
# OpenType-specific case handling
|
||||
if peak == 0.0:
|
||||
continue
|
||||
if lower > peak or peak > upper:
|
||||
continue
|
||||
if lower < 0.0 and upper > 0.0:
|
||||
continue
|
||||
v = location.get(axis, 0.0)
|
||||
else:
|
||||
assert axis in location
|
||||
v = location[axis]
|
||||
if v == peak:
|
||||
continue
|
||||
if v <= lower or upper <= v:
|
||||
scalar = 0.0
|
||||
break
|
||||
if v < peak:
|
||||
scalar *= (v - lower) / (peak - lower)
|
||||
else: # v > peak
|
||||
scalar *= (v - upper) / (peak - upper)
|
||||
return scalar
|
||||
|
||||
|
||||
class VariationModel(object):
|
||||
|
||||
"""
|
||||
Locations must be in normalized space. Ie. base master
|
||||
is at origin (0)::
|
||||
|
||||
>>> from pprint import pprint
|
||||
>>> locations = [ \
|
||||
{'wght':100}, \
|
||||
{'wght':-100}, \
|
||||
{'wght':-180}, \
|
||||
{'wdth':+.3}, \
|
||||
{'wght':+120,'wdth':.3}, \
|
||||
{'wght':+120,'wdth':.2}, \
|
||||
{}, \
|
||||
{'wght':+180,'wdth':.3}, \
|
||||
{'wght':+180}, \
|
||||
]
|
||||
>>> model = VariationModel(locations, axisOrder=['wght'])
|
||||
>>> pprint(model.locations)
|
||||
[{},
|
||||
{'wght': -100},
|
||||
{'wght': -180},
|
||||
{'wght': 100},
|
||||
{'wght': 180},
|
||||
{'wdth': 0.3},
|
||||
{'wdth': 0.3, 'wght': 180},
|
||||
{'wdth': 0.3, 'wght': 120},
|
||||
{'wdth': 0.2, 'wght': 120}]
|
||||
>>> pprint(model.deltaWeights)
|
||||
[{},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0, 4: 1.0, 5: 1.0},
|
||||
{0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
|
||||
{0: 1.0,
|
||||
3: 0.75,
|
||||
4: 0.25,
|
||||
5: 0.6666666666666667,
|
||||
6: 0.4444444444444445,
|
||||
7: 0.6666666666666667}]
|
||||
"""
|
||||
|
||||
def __init__(self, locations, axisOrder=None):
|
||||
if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
|
||||
raise VariationModelError("Locations must be unique.")
|
||||
|
||||
self.origLocations = locations
|
||||
self.axisOrder = axisOrder if axisOrder is not None else []
|
||||
|
||||
locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
|
||||
keyFunc = self.getMasterLocationsSortKeyFunc(
|
||||
locations, axisOrder=self.axisOrder
|
||||
)
|
||||
self.locations = sorted(locations, key=keyFunc)
|
||||
|
||||
# Mapping from user's master order to our master order
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
|
||||
self._computeMasterSupports()
|
||||
self._subModels = {}
|
||||
|
||||
def getSubModel(self, items):
|
||||
if None not in items:
|
||||
return self, items
|
||||
key = tuple(v is not None for v in items)
|
||||
subModel = self._subModels.get(key)
|
||||
if subModel is None:
|
||||
subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
|
||||
self._subModels[key] = subModel
|
||||
return subModel, subList(key, items)
|
||||
|
||||
@staticmethod
|
||||
def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
|
||||
if {} not in locations:
|
||||
raise VariationModelError("Base master not found.")
|
||||
axisPoints = {}
|
||||
for loc in locations:
|
||||
if len(loc) != 1:
|
||||
continue
|
||||
axis = next(iter(loc))
|
||||
value = loc[axis]
|
||||
if axis not in axisPoints:
|
||||
axisPoints[axis] = {0.0}
|
||||
assert (
|
||||
value not in axisPoints[axis]
|
||||
), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
|
||||
axisPoints[axis].add(value)
|
||||
|
||||
def getKey(axisPoints, axisOrder):
|
||||
def sign(v):
|
||||
return -1 if v < 0 else +1 if v > 0 else 0
|
||||
|
||||
def key(loc):
|
||||
rank = len(loc)
|
||||
onPointAxes = [
|
||||
axis
|
||||
for axis, value in loc.items()
|
||||
if axis in axisPoints and value in axisPoints[axis]
|
||||
]
|
||||
orderedAxes = [axis for axis in axisOrder if axis in loc]
|
||||
orderedAxes.extend(
|
||||
[axis for axis in sorted(loc.keys()) if axis not in axisOrder]
|
||||
)
|
||||
return (
|
||||
rank, # First, order by increasing rank
|
||||
-len(onPointAxes), # Next, by decreasing number of onPoint axes
|
||||
tuple(
|
||||
axisOrder.index(axis) if axis in axisOrder else 0x10000
|
||||
for axis in orderedAxes
|
||||
), # Next, by known axes
|
||||
tuple(orderedAxes), # Next, by all axes
|
||||
tuple(
|
||||
sign(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by signs of axis values
|
||||
tuple(
|
||||
abs(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by absolute value of axis values
|
||||
)
|
||||
|
||||
return key
|
||||
|
||||
ret = getKey(axisPoints, axisOrder)
|
||||
return ret
|
||||
|
||||
def reorderMasters(self, master_list, mapping):
|
||||
# For changing the master data order without
|
||||
# recomputing supports and deltaWeights.
|
||||
new_list = [master_list[idx] for idx in mapping]
|
||||
self.origLocations = [self.origLocations[idx] for idx in mapping]
|
||||
locations = [
|
||||
{k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations
|
||||
]
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
self._subModels = {}
|
||||
return new_list
|
||||
|
||||
def _computeMasterSupports(self):
|
||||
self.supports = []
|
||||
regions = self._locationsToRegions()
|
||||
for i, region in enumerate(regions):
|
||||
locAxes = set(region.keys())
|
||||
# Walk over previous masters now
|
||||
for prev_region in regions[:i]:
|
||||
# Master with extra axes do not participte
|
||||
if not set(prev_region.keys()).issubset(locAxes):
|
||||
continue
|
||||
# If it's NOT in the current box, it does not participate
|
||||
relevant = True
|
||||
for axis, (lower, peak, upper) in region.items():
|
||||
if axis not in prev_region or not (
|
||||
prev_region[axis][1] == peak
|
||||
or lower < prev_region[axis][1] < upper
|
||||
):
|
||||
relevant = False
|
||||
break
|
||||
if not relevant:
|
||||
continue
|
||||
|
||||
# Split the box for new master; split in whatever direction
|
||||
# that has largest range ratio.
|
||||
#
|
||||
# For symmetry, we actually cut across multiple axes
|
||||
# if they have the largest, equal, ratio.
|
||||
# https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
|
||||
|
||||
bestAxes = {}
|
||||
bestRatio = -1
|
||||
for axis in prev_region.keys():
|
||||
val = prev_region[axis][1]
|
||||
assert axis in region
|
||||
lower, locV, upper = region[axis]
|
||||
newLower, newUpper = lower, upper
|
||||
if val < locV:
|
||||
newLower = val
|
||||
ratio = (val - locV) / (lower - locV)
|
||||
elif locV < val:
|
||||
newUpper = val
|
||||
ratio = (val - locV) / (upper - locV)
|
||||
else: # val == locV
|
||||
# Can't split box in this direction.
|
||||
continue
|
||||
if ratio > bestRatio:
|
||||
bestAxes = {}
|
||||
bestRatio = ratio
|
||||
if ratio == bestRatio:
|
||||
bestAxes[axis] = (newLower, locV, newUpper)
|
||||
|
||||
for axis, triple in bestAxes.items():
|
||||
region[axis] = triple
|
||||
self.supports.append(region)
|
||||
self._computeDeltaWeights()
|
||||
|
||||
def _locationsToRegions(self):
|
||||
locations = self.locations
|
||||
# Compute min/max across each axis, use it as total range.
|
||||
# TODO Take this as input from outside?
|
||||
minV = {}
|
||||
maxV = {}
|
||||
for l in locations:
|
||||
for k, v in l.items():
|
||||
minV[k] = min(v, minV.get(k, v))
|
||||
maxV[k] = max(v, maxV.get(k, v))
|
||||
|
||||
regions = []
|
||||
for loc in locations:
|
||||
region = {}
|
||||
for axis, locV in loc.items():
|
||||
if locV > 0:
|
||||
region[axis] = (0, locV, maxV[axis])
|
||||
else:
|
||||
region[axis] = (minV[axis], locV, 0)
|
||||
regions.append(region)
|
||||
return regions
|
||||
|
||||
def _computeDeltaWeights(self):
|
||||
self.deltaWeights = []
|
||||
for i, loc in enumerate(self.locations):
|
||||
deltaWeight = {}
|
||||
# Walk over previous masters now, populate deltaWeight
|
||||
for j, support in enumerate(self.supports[:i]):
|
||||
scalar = supportScalar(loc, support)
|
||||
if scalar:
|
||||
deltaWeight[j] = scalar
|
||||
self.deltaWeights.append(deltaWeight)
|
||||
|
||||
def getDeltas(self, masterValues, *, round=noRound):
|
||||
assert len(masterValues) == len(self.deltaWeights)
|
||||
mapping = self.reverseMapping
|
||||
out = []
|
||||
for i, weights in enumerate(self.deltaWeights):
|
||||
delta = masterValues[mapping[i]]
|
||||
for j, weight in weights.items():
|
||||
if weight == 1:
|
||||
delta -= out[j]
|
||||
else:
|
||||
delta -= out[j] * weight
|
||||
out.append(round(delta))
|
||||
return out
|
||||
|
||||
def getDeltasAndSupports(self, items, *, round=noRound):
|
||||
model, items = self.getSubModel(items)
|
||||
return model.getDeltas(items, round=round), model.supports
|
||||
|
||||
def getScalars(self, loc):
|
||||
return [supportScalar(loc, support) for support in self.supports]
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
v = None
|
||||
assert len(deltas) == len(scalars)
|
||||
for delta, scalar in zip(deltas, scalars):
|
||||
if not scalar:
|
||||
continue
|
||||
contribution = delta * scalar
|
||||
if v is None:
|
||||
v = contribution
|
||||
else:
|
||||
v += contribution
|
||||
return v
|
||||
|
||||
def interpolateFromDeltas(self, loc, deltas):
|
||||
scalars = self.getScalars(loc)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
|
||||
deltas = self.getDeltas(masterValues, round=round)
|
||||
return self.interpolateFromDeltas(loc, deltas)
|
||||
|
||||
def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
|
||||
deltas = self.getDeltas(masterValues, round=round)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
def piecewiseLinearMap(v, mapping):
|
||||
keys = mapping.keys()
|
||||
if not keys:
|
||||
return v
|
||||
if v in keys:
|
||||
return mapping[v]
|
||||
k = min(keys)
|
||||
if v < k:
|
||||
return v + mapping[k] - k
|
||||
k = max(keys)
|
||||
if v > k:
|
||||
return v + mapping[k] - k
|
||||
# Interpolate
|
||||
a = max(k for k in keys if k < v)
|
||||
b = min(k for k in keys if k > v)
|
||||
va = mapping[a]
|
||||
vb = mapping[b]
|
||||
return va + (vb - va) * (v - a) / (b - a)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Normalize locations on a given designspace"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.models",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
metavar="LEVEL",
|
||||
default="INFO",
|
||||
help="Logging level (defaults to INFO)",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)
|
||||
group.add_argument(
|
||||
"-l",
|
||||
"--locations",
|
||||
metavar="LOCATION",
|
||||
nargs="+",
|
||||
help="Master locations as comma-separate coordinates. One must be all zeros.",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
configLogger(level=args.loglevel)
|
||||
from pprint import pprint
|
||||
|
||||
if args.designspace:
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
|
||||
doc = DesignSpaceDocument()
|
||||
doc.read(args.designspace)
|
||||
locs = [s.location for s in doc.sources]
|
||||
print("Original locations:")
|
||||
pprint(locs)
|
||||
doc.normalize()
|
||||
print("Normalized locations:")
|
||||
locs = [s.location for s in doc.sources]
|
||||
pprint(locs)
|
||||
else:
|
||||
axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
|
||||
locs = [
|
||||
dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations
|
||||
]
|
||||
|
||||
model = VariationModel(locs)
|
||||
print("Sorted locations:")
|
||||
pprint(model.locations)
|
||||
print("Supports:")
|
||||
pprint(model.supports)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
461
venv/Lib/site-packages/fontTools/varLib/mutator.py
Normal file
461
venv/Lib/site-packages/fontTools/varLib/mutator.py
Normal file
@@ -0,0 +1,461 @@
|
||||
"""
|
||||
Instantiate a variation font. Run, eg:
|
||||
|
||||
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
|
||||
"""
|
||||
from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.pens.boundsPen import BoundsPen
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.ttLib.tables import ttProgram
|
||||
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates, flagOverlapSimple, OVERLAP_COMPOUND
|
||||
from fontTools.varLib.models import (
|
||||
supportScalar,
|
||||
normalizeLocation,
|
||||
piecewiseLinearMap,
|
||||
)
|
||||
from fontTools.varLib.merger import MutatorMerger
|
||||
from fontTools.varLib.varStore import VarStoreInstancer
|
||||
from fontTools.varLib.mvar import MVAR_ENTRIES
|
||||
from fontTools.varLib.iup import iup_delta
|
||||
import fontTools.subset.cff
|
||||
import os.path
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.varlib.mutator")
|
||||
|
||||
# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
|
||||
OS2_WIDTH_CLASS_VALUES = {}
|
||||
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
|
||||
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
|
||||
half = (prev + curr) / 2
|
||||
OS2_WIDTH_CLASS_VALUES[half] = i
|
||||
|
||||
|
||||
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
|
||||
pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
|
||||
"FamilyOtherBlues", "StemSnapH",
|
||||
"StemSnapV")
|
||||
pd_blend_values = ("BlueScale", "BlueShift",
|
||||
"BlueFuzz", "StdHW", "StdVW")
|
||||
for fontDict in topDict.FDArray:
|
||||
pd = fontDict.Private
|
||||
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
|
||||
for key, value in pd.rawDict.items():
|
||||
if (key in pd_blend_values) and isinstance(value, list):
|
||||
delta = interpolateFromDeltas(vsindex, value[1:])
|
||||
pd.rawDict[key] = otRound(value[0] + delta)
|
||||
elif (key in pd_blend_lists) and isinstance(value[0], list):
|
||||
"""If any argument in a BlueValues list is a blend list,
|
||||
then they all are. The first value of each list is an
|
||||
absolute value. The delta tuples are calculated from
|
||||
relative master values, hence we need to append all the
|
||||
deltas to date to each successive absolute value."""
|
||||
delta = 0
|
||||
for i, val_list in enumerate(value):
|
||||
delta += otRound(interpolateFromDeltas(vsindex,
|
||||
val_list[1:]))
|
||||
value[i] = val_list[0] + delta
|
||||
|
||||
|
||||
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
|
||||
charstrings = topDict.CharStrings
|
||||
for gname in glyphOrder:
|
||||
# Interpolate charstring
|
||||
# e.g replace blend op args with regular args,
|
||||
# and use and discard vsindex op.
|
||||
charstring = charstrings[gname]
|
||||
new_program = []
|
||||
vsindex = 0
|
||||
last_i = 0
|
||||
for i, token in enumerate(charstring.program):
|
||||
if token == 'vsindex':
|
||||
vsindex = charstring.program[i - 1]
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i:i - 1])
|
||||
last_i = i + 1
|
||||
elif token == 'blend':
|
||||
num_regions = charstring.getNumRegions(vsindex)
|
||||
numMasters = 1 + num_regions
|
||||
num_args = charstring.program[i - 1]
|
||||
# The program list starting at program[i] is now:
|
||||
# ..args for following operations
|
||||
# num_args values from the default font
|
||||
# num_args tuples, each with numMasters-1 delta values
|
||||
# num_blend_args
|
||||
# 'blend'
|
||||
argi = i - (num_args * numMasters + 1)
|
||||
end_args = tuplei = argi + num_args
|
||||
while argi < end_args:
|
||||
next_ti = tuplei + num_regions
|
||||
deltas = charstring.program[tuplei:next_ti]
|
||||
delta = interpolateFromDeltas(vsindex, deltas)
|
||||
charstring.program[argi] += otRound(delta)
|
||||
tuplei = next_ti
|
||||
argi += 1
|
||||
new_program.extend(charstring.program[last_i:end_args])
|
||||
last_i = i + 1
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i:])
|
||||
charstring.program = new_program
|
||||
|
||||
|
||||
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
|
||||
"""Unlike TrueType glyphs, neither advance width nor bounding box
|
||||
info is stored in a CFF2 charstring. The width data exists only in
|
||||
the hmtx and HVAR tables. Since LSB data cannot be interpolated
|
||||
reliably from the master LSB values in the hmtx table, we traverse
|
||||
the charstring to determine the actual bound box. """
|
||||
|
||||
charstrings = topDict.CharStrings
|
||||
boundsPen = BoundsPen(glyphOrder)
|
||||
hmtx = varfont['hmtx']
|
||||
hvar_table = None
|
||||
if 'HVAR' in varfont:
|
||||
hvar_table = varfont['HVAR'].table
|
||||
fvar = varfont['fvar']
|
||||
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
|
||||
|
||||
for gid, gname in enumerate(glyphOrder):
|
||||
entry = list(hmtx[gname])
|
||||
# get width delta.
|
||||
if hvar_table:
|
||||
if hvar_table.AdvWidthMap:
|
||||
width_idx = hvar_table.AdvWidthMap.mapping[gname]
|
||||
else:
|
||||
width_idx = gid
|
||||
width_delta = otRound(varStoreInstancer[width_idx])
|
||||
else:
|
||||
width_delta = 0
|
||||
|
||||
# get LSB.
|
||||
boundsPen.init()
|
||||
charstring = charstrings[gname]
|
||||
charstring.draw(boundsPen)
|
||||
if boundsPen.bounds is None:
|
||||
# Happens with non-marking glyphs
|
||||
lsb_delta = 0
|
||||
else:
|
||||
lsb = otRound(boundsPen.bounds[0])
|
||||
lsb_delta = entry[1] - lsb
|
||||
|
||||
if lsb_delta or width_delta:
|
||||
if width_delta:
|
||||
entry[0] += width_delta
|
||||
if lsb_delta:
|
||||
entry[1] = lsb
|
||||
hmtx[gname] = tuple(entry)
|
||||
|
||||
|
||||
def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
|
||||
""" Generate a static instance from a variable TTFont and a dictionary
|
||||
defining the desired location along the variable font's axes.
|
||||
The location values must be specified as user-space coordinates, e.g.:
|
||||
|
||||
{'wght': 400, 'wdth': 100}
|
||||
|
||||
By default, a new TTFont object is returned. If ``inplace`` is True, the
|
||||
input varfont is modified and reduced to a static font.
|
||||
|
||||
When the overlap parameter is defined as True,
|
||||
OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
|
||||
"""
|
||||
if not inplace:
|
||||
# make a copy to leave input varfont unmodified
|
||||
stream = BytesIO()
|
||||
varfont.save(stream)
|
||||
stream.seek(0)
|
||||
varfont = TTFont(stream)
|
||||
|
||||
fvar = varfont['fvar']
|
||||
axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes}
|
||||
loc = normalizeLocation(location, axes)
|
||||
if 'avar' in varfont:
|
||||
maps = varfont['avar'].segments
|
||||
loc = {k: piecewiseLinearMap(v, maps[k]) for k,v in loc.items()}
|
||||
# Quantize to F2Dot14, to avoid surprise interpolations.
|
||||
loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()}
|
||||
# Location is normalized now
|
||||
log.info("Normalized location: %s", loc)
|
||||
|
||||
if 'gvar' in varfont:
|
||||
log.info("Mutating glyf/gvar tables")
|
||||
gvar = varfont['gvar']
|
||||
glyf = varfont['glyf']
|
||||
hMetrics = varfont['hmtx'].metrics
|
||||
vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
|
||||
# get list of glyph names in gvar sorted by component depth
|
||||
glyphnames = sorted(
|
||||
gvar.variations.keys(),
|
||||
key=lambda name: (
|
||||
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
|
||||
if glyf[name].isComposite() else 0,
|
||||
name))
|
||||
for glyphname in glyphnames:
|
||||
variations = gvar.variations[glyphname]
|
||||
coordinates, _ = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
|
||||
origCoords, endPts = None, None
|
||||
for var in variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar: continue
|
||||
delta = var.coordinates
|
||||
if None in delta:
|
||||
if origCoords is None:
|
||||
origCoords, g = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
|
||||
delta = iup_delta(delta, origCoords, g.endPts)
|
||||
coordinates += GlyphCoordinates(delta) * scalar
|
||||
glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
|
||||
else:
|
||||
glyf = None
|
||||
|
||||
if 'cvar' in varfont:
|
||||
log.info("Mutating cvt/cvar tables")
|
||||
cvar = varfont['cvar']
|
||||
cvt = varfont['cvt ']
|
||||
deltas = {}
|
||||
for var in cvar.variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar: continue
|
||||
for i, c in enumerate(var.coordinates):
|
||||
if c is not None:
|
||||
deltas[i] = deltas.get(i, 0) + scalar * c
|
||||
for i, delta in deltas.items():
|
||||
cvt[i] += otRound(delta)
|
||||
|
||||
if 'CFF2' in varfont:
|
||||
log.info("Mutating CFF2 table")
|
||||
glyphOrder = varfont.getGlyphOrder()
|
||||
CFF2 = varfont['CFF2']
|
||||
topDict = CFF2.cff.topDictIndex[0]
|
||||
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
|
||||
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
|
||||
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
|
||||
CFF2.desubroutinize()
|
||||
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
|
||||
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
|
||||
del topDict.rawDict['VarStore']
|
||||
del topDict.VarStore
|
||||
|
||||
if 'MVAR' in varfont:
|
||||
log.info("Mutating MVAR table")
|
||||
mvar = varfont['MVAR'].table
|
||||
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
|
||||
records = mvar.ValueRecord
|
||||
for rec in records:
|
||||
mvarTag = rec.ValueTag
|
||||
if mvarTag not in MVAR_ENTRIES:
|
||||
continue
|
||||
tableTag, itemName = MVAR_ENTRIES[mvarTag]
|
||||
delta = otRound(varStoreInstancer[rec.VarIdx])
|
||||
if not delta:
|
||||
continue
|
||||
setattr(varfont[tableTag], itemName,
|
||||
getattr(varfont[tableTag], itemName) + delta)
|
||||
|
||||
log.info("Mutating FeatureVariations")
|
||||
for tableTag in 'GSUB','GPOS':
|
||||
if not tableTag in varfont:
|
||||
continue
|
||||
table = varfont[tableTag].table
|
||||
if not getattr(table, 'FeatureVariations', None):
|
||||
continue
|
||||
variations = table.FeatureVariations
|
||||
for record in variations.FeatureVariationRecord:
|
||||
applies = True
|
||||
for condition in record.ConditionSet.ConditionTable:
|
||||
if condition.Format == 1:
|
||||
axisIdx = condition.AxisIndex
|
||||
axisTag = fvar.axes[axisIdx].axisTag
|
||||
Min = condition.FilterRangeMinValue
|
||||
Max = condition.FilterRangeMaxValue
|
||||
v = loc[axisTag]
|
||||
if not (Min <= v <= Max):
|
||||
applies = False
|
||||
else:
|
||||
applies = False
|
||||
if not applies:
|
||||
break
|
||||
|
||||
if applies:
|
||||
assert record.FeatureTableSubstitution.Version == 0x00010000
|
||||
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
|
||||
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
|
||||
break
|
||||
del table.FeatureVariations
|
||||
|
||||
if 'GDEF' in varfont and varfont['GDEF'].table.Version >= 0x00010003:
|
||||
log.info("Mutating GDEF/GPOS/GSUB tables")
|
||||
gdef = varfont['GDEF'].table
|
||||
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
|
||||
|
||||
merger = MutatorMerger(varfont, instancer)
|
||||
merger.mergeTables(varfont, [varfont], ['GDEF', 'GPOS'])
|
||||
|
||||
# Downgrade GDEF.
|
||||
del gdef.VarStore
|
||||
gdef.Version = 0x00010002
|
||||
if gdef.MarkGlyphSetsDef is None:
|
||||
del gdef.MarkGlyphSetsDef
|
||||
gdef.Version = 0x00010000
|
||||
|
||||
if not (gdef.LigCaretList or
|
||||
gdef.MarkAttachClassDef or
|
||||
gdef.GlyphClassDef or
|
||||
gdef.AttachList or
|
||||
(gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)):
|
||||
del varfont['GDEF']
|
||||
|
||||
addidef = False
|
||||
if glyf:
|
||||
for glyph in glyf.glyphs.values():
|
||||
if hasattr(glyph, "program"):
|
||||
instructions = glyph.program.getAssembly()
|
||||
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
|
||||
addidef = any(op.startswith("GETVARIATION") for op in instructions)
|
||||
if addidef:
|
||||
break
|
||||
if overlap:
|
||||
for glyph_name in glyf.keys():
|
||||
glyph = glyf[glyph_name]
|
||||
# Set OVERLAP_COMPOUND bit for compound glyphs
|
||||
if glyph.isComposite():
|
||||
glyph.components[0].flags |= OVERLAP_COMPOUND
|
||||
# Set OVERLAP_SIMPLE bit for simple glyphs
|
||||
elif glyph.numberOfContours > 0:
|
||||
glyph.flags[0] |= flagOverlapSimple
|
||||
if addidef:
|
||||
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
|
||||
asm = []
|
||||
if 'fpgm' in varfont:
|
||||
fpgm = varfont['fpgm']
|
||||
asm = fpgm.program.getAssembly()
|
||||
else:
|
||||
fpgm = newTable('fpgm')
|
||||
fpgm.program = ttProgram.Program()
|
||||
varfont['fpgm'] = fpgm
|
||||
asm.append("PUSHB[000] 145")
|
||||
asm.append("IDEF[ ]")
|
||||
args = [str(len(loc))]
|
||||
for a in fvar.axes:
|
||||
args.append(str(floatToFixed(loc[a.axisTag], 14)))
|
||||
asm.append("NPUSHW[ ] " + ' '.join(args))
|
||||
asm.append("ENDF[ ]")
|
||||
fpgm.program.fromAssembly(asm)
|
||||
|
||||
# Change maxp attributes as IDEF is added
|
||||
if 'maxp' in varfont:
|
||||
maxp = varfont['maxp']
|
||||
setattr(maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0))
|
||||
setattr(maxp, "maxStackElements", max(len(loc), getattr(maxp, "maxStackElements", 0)))
|
||||
|
||||
if 'name' in varfont:
|
||||
log.info("Pruning name table")
|
||||
exclude = {a.axisNameID for a in fvar.axes}
|
||||
for i in fvar.instances:
|
||||
exclude.add(i.subfamilyNameID)
|
||||
exclude.add(i.postscriptNameID)
|
||||
if 'ltag' in varfont:
|
||||
# Drop the whole 'ltag' table if all its language tags are referenced by
|
||||
# name records to be pruned.
|
||||
# TODO: prune unused ltag tags and re-enumerate langIDs accordingly
|
||||
excludedUnicodeLangIDs = [
|
||||
n.langID for n in varfont['name'].names
|
||||
if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
|
||||
]
|
||||
if set(excludedUnicodeLangIDs) == set(range(len((varfont['ltag'].tags)))):
|
||||
del varfont['ltag']
|
||||
varfont['name'].names[:] = [
|
||||
n for n in varfont['name'].names
|
||||
if n.nameID not in exclude
|
||||
]
|
||||
|
||||
if "wght" in location and "OS/2" in varfont:
|
||||
varfont["OS/2"].usWeightClass = otRound(
|
||||
max(1, min(location["wght"], 1000))
|
||||
)
|
||||
if "wdth" in location:
|
||||
wdth = location["wdth"]
|
||||
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
|
||||
if wdth < percent:
|
||||
varfont["OS/2"].usWidthClass = widthClass
|
||||
break
|
||||
else:
|
||||
varfont["OS/2"].usWidthClass = 9
|
||||
if "slnt" in location and "post" in varfont:
|
||||
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
|
||||
|
||||
log.info("Removing variable tables")
|
||||
for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'):
|
||||
if tag in varfont:
|
||||
del varfont[tag]
|
||||
|
||||
return varfont
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Instantiate a variation font"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.mutator", description="Instantiate a variable font")
|
||||
parser.add_argument(
|
||||
"input", metavar="INPUT.ttf", help="Input variable TTF file.")
|
||||
parser.add_argument(
|
||||
"locargs", metavar="AXIS=LOC", nargs="*",
|
||||
help="List of space separated locations. A location consist in "
|
||||
"the name of a variation axis, followed by '=' and a number. E.g.: "
|
||||
" wght=700 wdth=80. The default is the location of the base master.")
|
||||
parser.add_argument(
|
||||
"-o", "--output", metavar="OUTPUT.ttf", default=None,
|
||||
help="Output instance TTF file (default: INPUT-instance.ttf).")
|
||||
logging_group = parser.add_mutually_exclusive_group(required=False)
|
||||
logging_group.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely.")
|
||||
logging_group.add_argument(
|
||||
"-q", "--quiet", action="store_true", help="Turn verbosity off.")
|
||||
parser.add_argument(
|
||||
"--no-overlap",
|
||||
dest="overlap",
|
||||
action="store_false",
|
||||
help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags."
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
varfilename = options.input
|
||||
outfile = (
|
||||
os.path.splitext(varfilename)[0] + '-instance.ttf'
|
||||
if not options.output else options.output)
|
||||
configLogger(level=(
|
||||
"DEBUG" if options.verbose else
|
||||
"ERROR" if options.quiet else
|
||||
"INFO"))
|
||||
|
||||
loc = {}
|
||||
for arg in options.locargs:
|
||||
try:
|
||||
tag, val = arg.split('=')
|
||||
assert len(tag) <= 4
|
||||
loc[tag.ljust(4)] = float(val)
|
||||
except (ValueError, AssertionError):
|
||||
parser.error("invalid location argument format: %r" % arg)
|
||||
log.info("Location: %s", loc)
|
||||
|
||||
log.info("Loading variable font")
|
||||
varfont = TTFont(varfilename)
|
||||
|
||||
instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
|
||||
|
||||
log.info("Saving instance font %s", outfile)
|
||||
varfont.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
||||
606
venv/Lib/site-packages/fontTools/varLib/varStore.py
Normal file
606
venv/Lib/site-packages/fontTools/varLib/varStore.py
Normal file
@@ -0,0 +1,606 @@
|
||||
from fontTools.misc.roundTools import noRound, otRound
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.varLib.models import supportScalar
|
||||
from fontTools.varLib.builder import (buildVarRegionList, buildVarStore,
|
||||
buildVarRegion, buildVarData)
|
||||
from functools import partial
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def _getLocationKey(loc):
|
||||
return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
|
||||
|
||||
|
||||
class OnlineVarStoreBuilder(object):
|
||||
|
||||
def __init__(self, axisTags):
|
||||
self._axisTags = axisTags
|
||||
self._regionMap = {}
|
||||
self._regionList = buildVarRegionList([], axisTags)
|
||||
self._store = buildVarStore(self._regionList, [])
|
||||
self._data = None
|
||||
self._model = None
|
||||
self._supports = None
|
||||
self._varDataIndices = {}
|
||||
self._varDataCaches = {}
|
||||
self._cache = {}
|
||||
|
||||
def setModel(self, model):
|
||||
self.setSupports(model.supports)
|
||||
self._model = model
|
||||
|
||||
def setSupports(self, supports):
|
||||
self._model = None
|
||||
self._supports = list(supports)
|
||||
if not self._supports[0]:
|
||||
del self._supports[0] # Drop base master support
|
||||
self._cache = {}
|
||||
self._data = None
|
||||
|
||||
def finish(self, optimize=True):
|
||||
self._regionList.RegionCount = len(self._regionList.Region)
|
||||
self._store.VarDataCount = len(self._store.VarData)
|
||||
for data in self._store.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
return self._store
|
||||
|
||||
def _add_VarData(self):
|
||||
regionMap = self._regionMap
|
||||
regionList = self._regionList
|
||||
|
||||
regions = self._supports
|
||||
regionIndices = []
|
||||
for region in regions:
|
||||
key = _getLocationKey(region)
|
||||
idx = regionMap.get(key)
|
||||
if idx is None:
|
||||
varRegion = buildVarRegion(region, self._axisTags)
|
||||
idx = regionMap[key] = len(regionList.Region)
|
||||
regionList.Region.append(varRegion)
|
||||
regionIndices.append(idx)
|
||||
|
||||
# Check if we have one already...
|
||||
key = tuple(regionIndices)
|
||||
varDataIdx = self._varDataIndices.get(key)
|
||||
if varDataIdx is not None:
|
||||
self._outer = varDataIdx
|
||||
self._data = self._store.VarData[varDataIdx]
|
||||
self._cache = self._varDataCaches[key]
|
||||
if len(self._data.Item) == 0xFFFF:
|
||||
# This is full. Need new one.
|
||||
varDataIdx = None
|
||||
|
||||
if varDataIdx is None:
|
||||
self._data = buildVarData(regionIndices, [], optimize=False)
|
||||
self._outer = len(self._store.VarData)
|
||||
self._store.VarData.append(self._data)
|
||||
self._varDataIndices[key] = self._outer
|
||||
if key not in self._varDataCaches:
|
||||
self._varDataCaches[key] = {}
|
||||
self._cache = self._varDataCaches[key]
|
||||
|
||||
|
||||
def storeMasters(self, master_values):
|
||||
deltas = self._model.getDeltas(master_values, round=round)
|
||||
base = deltas.pop(0)
|
||||
return base, self.storeDeltas(deltas, round=noRound)
|
||||
|
||||
def storeDeltas(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
if len(deltas) == len(self._supports) + 1:
|
||||
deltas = tuple(deltas[1:])
|
||||
else:
|
||||
assert len(deltas) == len(self._supports)
|
||||
deltas = tuple(deltas)
|
||||
|
||||
varIdx = self._cache.get(deltas)
|
||||
if varIdx is not None:
|
||||
return varIdx
|
||||
|
||||
if not self._data:
|
||||
self._add_VarData()
|
||||
inner = len(self._data.Item)
|
||||
if inner == 0xFFFF:
|
||||
# Full array. Start new one.
|
||||
self._add_VarData()
|
||||
return self.storeDeltas(deltas)
|
||||
self._data.addItem(deltas, round=noRound)
|
||||
|
||||
varIdx = (self._outer << 16) + inner
|
||||
self._cache[deltas] = varIdx
|
||||
return varIdx
|
||||
|
||||
def VarData_addItem(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
|
||||
countUs = self.VarRegionCount
|
||||
countThem = len(deltas)
|
||||
if countUs + 1 == countThem:
|
||||
deltas = tuple(deltas[1:])
|
||||
else:
|
||||
assert countUs == countThem, (countUs, countThem)
|
||||
deltas = tuple(deltas)
|
||||
self.Item.append(list(deltas))
|
||||
self.ItemCount = len(self.Item)
|
||||
|
||||
ot.VarData.addItem = VarData_addItem
|
||||
|
||||
def VarRegion_get_support(self, fvar_axes):
|
||||
return {
|
||||
fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord)
|
||||
for i, reg in enumerate(self.VarRegionAxis)
|
||||
if reg.PeakCoord != 0
|
||||
}
|
||||
|
||||
ot.VarRegion.get_support = VarRegion_get_support
|
||||
|
||||
class VarStoreInstancer(object):
|
||||
|
||||
def __init__(self, varstore, fvar_axes, location={}):
|
||||
self.fvar_axes = fvar_axes
|
||||
assert varstore is None or varstore.Format == 1
|
||||
self._varData = varstore.VarData if varstore else []
|
||||
self._regions = varstore.VarRegionList.Region if varstore else []
|
||||
self.setLocation(location)
|
||||
|
||||
def setLocation(self, location):
|
||||
self.location = dict(location)
|
||||
self._clearCaches()
|
||||
|
||||
def _clearCaches(self):
|
||||
self._scalars = {}
|
||||
|
||||
def _getScalar(self, regionIdx):
|
||||
scalar = self._scalars.get(regionIdx)
|
||||
if scalar is None:
|
||||
support = self._regions[regionIdx].get_support(self.fvar_axes)
|
||||
scalar = supportScalar(self.location, support)
|
||||
self._scalars[regionIdx] = scalar
|
||||
return scalar
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
delta = 0.
|
||||
for d,s in zip(deltas, scalars):
|
||||
if not s: continue
|
||||
delta += d * s
|
||||
return delta
|
||||
|
||||
def __getitem__(self, varidx):
|
||||
major, minor = varidx >> 16, varidx & 0xFFFF
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
|
||||
deltas = varData[major].Item[minor]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromDeltas(self, varDataIndex, deltas):
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in
|
||||
varData[varDataIndex].VarRegionIndex]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
#
|
||||
# Optimizations
|
||||
#
|
||||
# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
|
||||
# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
|
||||
|
||||
def VarStore_subset_varidxes(self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()):
|
||||
|
||||
# Sort out used varIdxes by major/minor.
|
||||
used = {}
|
||||
for varIdx in varIdxes:
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
d = used.get(major)
|
||||
if d is None:
|
||||
d = used[major] = set()
|
||||
d.add(minor)
|
||||
del varIdxes
|
||||
|
||||
#
|
||||
# Subset VarData
|
||||
#
|
||||
|
||||
varData = self.VarData
|
||||
newVarData = []
|
||||
varDataMap = {}
|
||||
for major,data in enumerate(varData):
|
||||
usedMinors = used.get(major)
|
||||
if usedMinors is None:
|
||||
continue
|
||||
newMajor = len(newVarData)
|
||||
newVarData.append(data)
|
||||
|
||||
items = data.Item
|
||||
newItems = []
|
||||
if major == 0 and retainFirstMap:
|
||||
for minor in range(len(items)):
|
||||
newItems.append(items[minor] if minor in usedMinors else [0] * len(items[minor]))
|
||||
varDataMap[minor] = minor
|
||||
else:
|
||||
if major == 0:
|
||||
minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
|
||||
else:
|
||||
minors = sorted(usedMinors)
|
||||
for minor in minors:
|
||||
newMinor = len(newItems)
|
||||
newItems.append(items[minor])
|
||||
varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
|
||||
|
||||
data.Item = newItems
|
||||
data.ItemCount = len(data.Item)
|
||||
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
|
||||
self.VarData = newVarData
|
||||
self.VarDataCount = len(self.VarData)
|
||||
|
||||
self.prune_regions()
|
||||
|
||||
return varDataMap
|
||||
|
||||
ot.VarStore.subset_varidxes = VarStore_subset_varidxes
|
||||
|
||||
def VarStore_prune_regions(self):
|
||||
"""Remove unused VarRegions."""
|
||||
#
|
||||
# Subset VarRegionList
|
||||
#
|
||||
|
||||
# Collect.
|
||||
usedRegions = set()
|
||||
for data in self.VarData:
|
||||
usedRegions.update(data.VarRegionIndex)
|
||||
# Subset.
|
||||
regionList = self.VarRegionList
|
||||
regions = regionList.Region
|
||||
newRegions = []
|
||||
regionMap = {}
|
||||
for i in sorted(usedRegions):
|
||||
regionMap[i] = len(newRegions)
|
||||
newRegions.append(regions[i])
|
||||
regionList.Region = newRegions
|
||||
regionList.RegionCount = len(regionList.Region)
|
||||
# Map.
|
||||
for data in self.VarData:
|
||||
data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
|
||||
|
||||
ot.VarStore.prune_regions = VarStore_prune_regions
|
||||
|
||||
|
||||
def _visit(self, func):
|
||||
"""Recurse down from self, if type of an object is ot.Device,
|
||||
call func() on it. Works on otData-style classes."""
|
||||
|
||||
if type(self) == ot.Device:
|
||||
func(self)
|
||||
|
||||
elif isinstance(self, list):
|
||||
for that in self:
|
||||
_visit(that, func)
|
||||
|
||||
elif hasattr(self, 'getConverters') and not hasattr(self, 'postRead'):
|
||||
for conv in self.getConverters():
|
||||
that = getattr(self, conv.name, None)
|
||||
if that is not None:
|
||||
_visit(that, func)
|
||||
|
||||
elif isinstance(self, ot.ValueRecord):
|
||||
for that in self.__dict__.values():
|
||||
_visit(that, func)
|
||||
|
||||
def _Device_recordVarIdx(self, s):
|
||||
"""Add VarIdx in this Device table (if any) to the set s."""
|
||||
if self.DeltaFormat == 0x8000:
|
||||
s.add((self.StartSize<<16)+self.EndSize)
|
||||
|
||||
def Object_collect_device_varidxes(self, varidxes):
|
||||
adder = partial(_Device_recordVarIdx, s=varidxes)
|
||||
_visit(self, adder)
|
||||
|
||||
ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes
|
||||
ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes
|
||||
|
||||
def _Device_mapVarIdx(self, mapping, done):
|
||||
"""Map VarIdx in this Device table (if any) through mapping."""
|
||||
if id(self) in done:
|
||||
return
|
||||
done.add(id(self))
|
||||
if self.DeltaFormat == 0x8000:
|
||||
varIdx = mapping[(self.StartSize<<16)+self.EndSize]
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
|
||||
def Object_remap_device_varidxes(self, varidxes_map):
|
||||
mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
|
||||
_visit(self, mapper)
|
||||
|
||||
ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes
|
||||
ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes
|
||||
|
||||
|
||||
class _Encoding(object):
|
||||
|
||||
def __init__(self, chars):
|
||||
self.chars = chars
|
||||
self.width = self._popcount(chars)
|
||||
self.overhead = self._characteristic_overhead(chars)
|
||||
self.items = set()
|
||||
|
||||
def append(self, row):
|
||||
self.items.add(row)
|
||||
|
||||
def extend(self, lst):
|
||||
self.items.update(lst)
|
||||
|
||||
def get_room(self):
|
||||
"""Maximum number of bytes that can be added to characteristic
|
||||
while still being beneficial to merge it into another one."""
|
||||
count = len(self.items)
|
||||
return max(0, (self.overhead - 1) // count - self.width)
|
||||
room = property(get_room)
|
||||
|
||||
@property
|
||||
def gain(self):
|
||||
"""Maximum possible byte gain from merging this into another
|
||||
characteristic."""
|
||||
count = len(self.items)
|
||||
return max(0, self.overhead - count * (self.width + 1))
|
||||
|
||||
def sort_key(self):
|
||||
return self.width, self.chars
|
||||
|
||||
def __len__(self):
|
||||
return len(self.items)
|
||||
|
||||
def can_encode(self, chars):
|
||||
return not (chars & ~self.chars)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self._popcount(self.chars & ~other.chars)
|
||||
|
||||
@staticmethod
|
||||
def _popcount(n):
|
||||
# Apparently this is the fastest native way to do it...
|
||||
# https://stackoverflow.com/a/9831671
|
||||
return bin(n).count('1')
|
||||
|
||||
@staticmethod
|
||||
def _characteristic_overhead(chars):
|
||||
"""Returns overhead in bytes of encoding this characteristic
|
||||
as a VarData."""
|
||||
c = 6
|
||||
while chars:
|
||||
if chars & 0b1111:
|
||||
c += 2
|
||||
chars >>= 4
|
||||
return c
|
||||
|
||||
def _find_yourself_best_new_encoding(self, done_by_width):
|
||||
self.best_new_encoding = None
|
||||
for new_width in range(self.width+1, self.width+self.room+1):
|
||||
for new_encoding in done_by_width[new_width]:
|
||||
if new_encoding.can_encode(self.chars):
|
||||
break
|
||||
else:
|
||||
new_encoding = None
|
||||
self.best_new_encoding = new_encoding
|
||||
|
||||
|
||||
class _EncodingDict(dict):
|
||||
|
||||
def __missing__(self, chars):
|
||||
r = self[chars] = _Encoding(chars)
|
||||
return r
|
||||
|
||||
def add_row(self, row):
|
||||
chars = self._row_characteristics(row)
|
||||
self[chars].append(row)
|
||||
|
||||
@staticmethod
|
||||
def _row_characteristics(row):
|
||||
"""Returns encoding characteristics for a row."""
|
||||
longWords = False
|
||||
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i
|
||||
if not (-128 <= v <= 127):
|
||||
chars += i * 0b0010
|
||||
if not (-32768 <= v <= 32767):
|
||||
longWords = True
|
||||
break
|
||||
i <<= 4
|
||||
|
||||
if longWords:
|
||||
# Redo; only allow 2byte/4byte encoding
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i * 0b0011
|
||||
if not (-32768 <= v <= 32767):
|
||||
chars += i * 0b1100
|
||||
i <<= 4
|
||||
|
||||
return chars
|
||||
|
||||
|
||||
def VarStore_optimize(self):
|
||||
"""Optimize storage. Returns mapping from old VarIdxes to new ones."""
|
||||
|
||||
# TODO
|
||||
# Check that no two VarRegions are the same; if they are, fold them.
|
||||
|
||||
n = len(self.VarRegionList.Region) # Number of columns
|
||||
zeroes = [0] * n
|
||||
|
||||
front_mapping = {} # Map from old VarIdxes to full row tuples
|
||||
|
||||
encodings = _EncodingDict()
|
||||
|
||||
# Collect all items into a set of full rows (with lots of zeroes.)
|
||||
for major,data in enumerate(self.VarData):
|
||||
regionIndices = data.VarRegionIndex
|
||||
|
||||
for minor,item in enumerate(data.Item):
|
||||
|
||||
row = list(zeroes)
|
||||
for regionIdx,v in zip(regionIndices, item):
|
||||
row[regionIdx] += v
|
||||
row = tuple(row)
|
||||
|
||||
encodings.add_row(row)
|
||||
front_mapping[(major<<16)+minor] = row
|
||||
|
||||
# Separate encodings that have no gain (are decided) and those having
|
||||
# possible gain (possibly to be merged into others.)
|
||||
encodings = sorted(encodings.values(), key=_Encoding.__len__, reverse=True)
|
||||
done_by_width = defaultdict(list)
|
||||
todo = []
|
||||
for encoding in encodings:
|
||||
if not encoding.gain:
|
||||
done_by_width[encoding.width].append(encoding)
|
||||
else:
|
||||
todo.append(encoding)
|
||||
|
||||
# For each encoding that is possibly to be merged, find the best match
|
||||
# in the decided encodings, and record that.
|
||||
todo.sort(key=_Encoding.get_room)
|
||||
for encoding in todo:
|
||||
encoding._find_yourself_best_new_encoding(done_by_width)
|
||||
|
||||
# Walk through todo encodings, for each, see if merging it with
|
||||
# another todo encoding gains more than each of them merging with
|
||||
# their best decided encoding. If yes, merge them and add resulting
|
||||
# encoding back to todo queue. If not, move the enconding to decided
|
||||
# list. Repeat till done.
|
||||
while todo:
|
||||
encoding = todo.pop()
|
||||
best_idx = None
|
||||
best_gain = 0
|
||||
for i,other_encoding in enumerate(todo):
|
||||
combined_chars = other_encoding.chars | encoding.chars
|
||||
combined_width = _Encoding._popcount(combined_chars)
|
||||
combined_overhead = _Encoding._characteristic_overhead(combined_chars)
|
||||
combined_gain = (
|
||||
+ encoding.overhead
|
||||
+ other_encoding.overhead
|
||||
- combined_overhead
|
||||
- (combined_width - encoding.width) * len(encoding)
|
||||
- (combined_width - other_encoding.width) * len(other_encoding)
|
||||
)
|
||||
this_gain = 0 if encoding.best_new_encoding is None else (
|
||||
+ encoding.overhead
|
||||
- (encoding.best_new_encoding.width - encoding.width) * len(encoding)
|
||||
)
|
||||
other_gain = 0 if other_encoding.best_new_encoding is None else (
|
||||
+ other_encoding.overhead
|
||||
- (other_encoding.best_new_encoding.width - other_encoding.width) * len(other_encoding)
|
||||
)
|
||||
separate_gain = this_gain + other_gain
|
||||
|
||||
if combined_gain > separate_gain:
|
||||
best_idx = i
|
||||
best_gain = combined_gain - separate_gain
|
||||
|
||||
if best_idx is None:
|
||||
# Encoding is decided as is
|
||||
done_by_width[encoding.width].append(encoding)
|
||||
else:
|
||||
other_encoding = todo[best_idx]
|
||||
combined_chars = other_encoding.chars | encoding.chars
|
||||
combined_encoding = _Encoding(combined_chars)
|
||||
combined_encoding.extend(encoding.items)
|
||||
combined_encoding.extend(other_encoding.items)
|
||||
combined_encoding._find_yourself_best_new_encoding(done_by_width)
|
||||
del todo[best_idx]
|
||||
todo.append(combined_encoding)
|
||||
|
||||
# Assemble final store.
|
||||
back_mapping = {} # Mapping from full rows to new VarIdxes
|
||||
encodings = sum(done_by_width.values(), [])
|
||||
encodings.sort(key=_Encoding.sort_key)
|
||||
self.VarData = []
|
||||
for major,encoding in enumerate(encodings):
|
||||
data = ot.VarData()
|
||||
self.VarData.append(data)
|
||||
data.VarRegionIndex = range(n)
|
||||
data.VarRegionCount = len(data.VarRegionIndex)
|
||||
data.Item = sorted(encoding.items)
|
||||
for minor,item in enumerate(data.Item):
|
||||
back_mapping[item] = (major<<16)+minor
|
||||
|
||||
# Compile final mapping.
|
||||
varidx_map = {}
|
||||
for k,v in front_mapping.items():
|
||||
varidx_map[k] = back_mapping[v]
|
||||
|
||||
# Remove unused regions.
|
||||
self.prune_regions()
|
||||
|
||||
# Recalculate things and go home.
|
||||
self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
for data in self.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.optimize()
|
||||
|
||||
return varidx_map
|
||||
|
||||
ot.VarStore.optimize = VarStore_optimize
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Optimize a font's GDEF variation store"""
|
||||
from argparse import ArgumentParser
|
||||
from fontTools import configLogger
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.ttLib.tables.otBase import OTTableWriter
|
||||
|
||||
parser = ArgumentParser(prog='varLib.varStore', description= main.__doc__)
|
||||
parser.add_argument('fontfile')
|
||||
parser.add_argument('outfile', nargs='?')
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# TODO: allow user to configure logging via command-line options
|
||||
configLogger(level="INFO")
|
||||
|
||||
fontfile = options.fontfile
|
||||
outfile = options.outfile
|
||||
|
||||
font = TTFont(fontfile)
|
||||
gdef = font['GDEF']
|
||||
store = gdef.table.VarStore
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("Before: %7d bytes" % size)
|
||||
|
||||
varidx_map = store.optimize()
|
||||
|
||||
gdef.table.remap_device_varidxes(varidx_map)
|
||||
if 'GPOS' in font:
|
||||
font['GPOS'].table.remap_device_varidxes(varidx_map)
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("After: %7d bytes" % size)
|
||||
|
||||
if outfile is not None:
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
||||
Reference in New Issue
Block a user