ajout fichiers manquant
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -360,4 +360,6 @@ MigrationBackup/
|
||||
.ionide/
|
||||
|
||||
# Fody - auto-generated XML schema
|
||||
FodyWeavers.xsd
|
||||
FodyWeavers.xsd
|
||||
/venv/
|
||||
|
||||
|
3
.idea/.gitignore
generated
vendored
Normal file
3
.idea/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
# Default ignored files
|
||||
/shelf/
|
||||
/workspace.xml
|
14
.idea/Reskreen.iml
generated
Normal file
14
.idea/Reskreen.iml
generated
Normal file
@@ -0,0 +1,14 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<module type="PYTHON_MODULE" version="4">
|
||||
<component name="NewModuleRootManager">
|
||||
<content url="file://$MODULE_DIR$">
|
||||
<excludeFolder url="file://$MODULE_DIR$/venv" />
|
||||
</content>
|
||||
<orderEntry type="inheritedJdk" />
|
||||
<orderEntry type="sourceFolder" forTests="false" />
|
||||
</component>
|
||||
<component name="PyDocumentationSettings">
|
||||
<option name="format" value="PLAIN" />
|
||||
<option name="myDocStringFormat" value="Plain" />
|
||||
</component>
|
||||
</module>
|
6
.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
6
.idea/inspectionProfiles/profiles_settings.xml
generated
Normal file
@@ -0,0 +1,6 @@
|
||||
<component name="InspectionProjectProfileManager">
|
||||
<settings>
|
||||
<option name="USE_PROJECT_PROFILE" value="false" />
|
||||
<version value="1.0" />
|
||||
</settings>
|
||||
</component>
|
7
.idea/misc.xml
generated
Normal file
7
.idea/misc.xml
generated
Normal file
@@ -0,0 +1,7 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (Reskreen)" project-jdk-type="Python SDK" />
|
||||
<component name="PyCharmProfessionalAdvertiser">
|
||||
<option name="shown" value="true" />
|
||||
</component>
|
||||
</project>
|
8
.idea/modules.xml
generated
Normal file
8
.idea/modules.xml
generated
Normal file
@@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="ProjectModuleManager">
|
||||
<modules>
|
||||
<module fileurl="file://$PROJECT_DIR$/.idea/Reskreen.iml" filepath="$PROJECT_DIR$/.idea/Reskreen.iml" />
|
||||
</modules>
|
||||
</component>
|
||||
</project>
|
6
.idea/vcs.xml
generated
Normal file
6
.idea/vcs.xml
generated
Normal file
@@ -0,0 +1,6 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<project version="4">
|
||||
<component name="VcsDirectoryMappings">
|
||||
<mapping directory="$PROJECT_DIR$" vcs="Git" />
|
||||
</component>
|
||||
</project>
|
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 4.0 on 2022-01-31 17:19
|
||||
|
||||
from django.db import migrations
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0007_rename_user_collabs_hour_collaborateur_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.RenameField(
|
||||
model_name='collabs_hour',
|
||||
old_name='Collaborateur',
|
||||
new_name='user',
|
||||
),
|
||||
]
|
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:27
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0011_alter_collabs_hour_options_alter_collabs_hour_nhour'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='collabs_hour',
|
||||
name='dtDate',
|
||||
field=models.DateTimeField(auto_now=True, verbose_name='Date'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='collabs_hour',
|
||||
name='bNoticed',
|
||||
field=models.BooleanField(blank=True, default=False, verbose_name='Vérifiée'),
|
||||
),
|
||||
]
|
18
collabs/migrations/0013_alter_collabs_hour_dtdate.py
Normal file
18
collabs/migrations/0013_alter_collabs_hour_dtdate.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:31
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0012_collabs_hour_dtdate_alter_collabs_hour_bnoticed'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='collabs_hour',
|
||||
name='dtDate',
|
||||
field=models.DateTimeField(auto_now_add=True, verbose_name='Date'),
|
||||
),
|
||||
]
|
18
collabs/migrations/0014_alter_collabs_hour_dtdate.py
Normal file
18
collabs/migrations/0014_alter_collabs_hour_dtdate.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:39
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0013_alter_collabs_hour_dtdate'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='collabs_hour',
|
||||
name='dtDate',
|
||||
field=models.DateTimeField(verbose_name='Date'),
|
||||
),
|
||||
]
|
@@ -0,0 +1,23 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:41
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0014_alter_collabs_hour_dtdate'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='collabs_hour',
|
||||
name='dtDate',
|
||||
field=models.DateField(verbose_name='Date'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='collabs_hour',
|
||||
name='sRemarques',
|
||||
field=models.TextField(blank=True, verbose_name='Remarques'),
|
||||
),
|
||||
]
|
18
collabs/migrations/0016_collabs_hour_sbases.py
Normal file
18
collabs/migrations/0016_collabs_hour_sbases.py
Normal file
@@ -0,0 +1,18 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:54
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('collabs', '0015_alter_collabs_hour_dtdate_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='collabs_hour',
|
||||
name='sBases',
|
||||
field=models.CharField(choices=[('1', 'Monthey'), ('2', 'Uvrier')], default=1, max_length=1, verbose_name='Employé de la base de'),
|
||||
),
|
||||
]
|
BIN
collabs/static/collabs/images/background.gif
Normal file
BIN
collabs/static/collabs/images/background.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.7 MiB |
12
collabs/static/collabs/style.css
Normal file
12
collabs/static/collabs/style.css
Normal file
@@ -0,0 +1,12 @@
|
||||
li a {
|
||||
color: green;
|
||||
}
|
||||
body {
|
||||
color red;
|
||||
}
|
||||
|
||||
table, th, td {
|
||||
border: 1px solid black;
|
||||
border-collapse: collapse;
|
||||
padding: 5px
|
||||
}
|
47
collabs/templates/collabs_hour/detail.html
Normal file
47
collabs/templates/collabs_hour/detail.html
Normal file
@@ -0,0 +1,47 @@
|
||||
|
||||
{% load static %}
|
||||
<link rel="stylesheet" type="text/css" href="{% static 'collabs/style.css' %}">
|
||||
|
||||
{% if latest_hour_list %}
|
||||
<h1>Liste des heures supplémentaires:</h1>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Date</th>
|
||||
<th>Qui</th>
|
||||
<th>Combien</th>
|
||||
<th>Total du mois</th>
|
||||
<th>Traité</th>
|
||||
</tr>
|
||||
{% for collabs_hour in latest_hour_list %}
|
||||
<tr>
|
||||
<td>
|
||||
<a href="/admin/collabs/collabs_hour/{{ collabs_hour.id }}/change/">{{ collabs_hour.dtCreated }}</a>
|
||||
</td>
|
||||
<td>
|
||||
{{ collabs_hour.user }}
|
||||
</td>
|
||||
<td>
|
||||
{{collabs_hour.nHour}}:{{collabs_hour.nMinutes}}
|
||||
</td>
|
||||
<td>
|
||||
{{collabs_hour.total}}
|
||||
</td>
|
||||
<td>
|
||||
{{collabs_hour.bNoticed}}
|
||||
</td>
|
||||
</tr>
|
||||
|
||||
{% endfor %}
|
||||
|
||||
|
||||
|
||||
|
||||
</table>
|
||||
<ul>
|
||||
{% for collabs_hour in latest_hour_list %}
|
||||
<li><a href="/admin/collabs/collabs_hour/{{ collabs_hour.id }}/change/">{{ collabs_hour.dtCreated }} {{ collabs_hour.user }} ({{collabs_hour.total}}) => {{collabs_hour.nHour}}:{{collabs_hour.nMinutes}}</a></li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% else %}
|
||||
<p>No Collabs_hour.</p>
|
||||
{% endif %}
|
11
collabs/urls.py
Normal file
11
collabs/urls.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from django.urls import path
|
||||
|
||||
|
||||
from . import views
|
||||
|
||||
app_name = "collabs"
|
||||
urlpatterns = [
|
||||
path('export/<year>/<month>', views.ExportView.as_view(), name='detail'),
|
||||
|
||||
]
|
||||
|
32
polls/migrations/0001_initial.py
Normal file
32
polls/migrations/0001_initial.py
Normal file
@@ -0,0 +1,32 @@
|
||||
# Generated by Django 4.0 on 2022-02-16 12:27
|
||||
|
||||
from django.db import migrations, models
|
||||
import django.db.models.deletion
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
initial = True
|
||||
|
||||
dependencies = [
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='Question',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('question_txt', models.CharField(max_length=250, verbose_name='Description')),
|
||||
('pub_date', models.DateTimeField(verbose_name='Date de publication')),
|
||||
],
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='Choice',
|
||||
fields=[
|
||||
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
|
||||
('choice_txt', models.CharField(max_length=200, verbose_name='Nom')),
|
||||
('votes', models.IntegerField(default=0)),
|
||||
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.question')),
|
||||
],
|
||||
),
|
||||
]
|
BIN
polls/static/polls/images/background.gif
Normal file
BIN
polls/static/polls/images/background.gif
Normal file
Binary file not shown.
After Width: | Height: | Size: 2.7 MiB |
6
polls/static/polls/style.css
Normal file
6
polls/static/polls/style.css
Normal file
@@ -0,0 +1,6 @@
|
||||
li a {
|
||||
color: green;
|
||||
}
|
||||
body {
|
||||
background: white url("images/background.gif") no-repeat;
|
||||
}
|
9
polls/templates/polls/results.html
Normal file
9
polls/templates/polls/results.html
Normal file
@@ -0,0 +1,9 @@
|
||||
<h1>{{ question.question_txt }}</h1>
|
||||
|
||||
<ul>
|
||||
{% for choice in question.choice_set.all %}
|
||||
<li>{{ choice.choice_txt }} -- {{ choice.votes }} vote{{ choice.votes|pluralize }}</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
|
||||
<a href="{% url 'polls:detail' question.id %}">Vote again?</a>
|
9
templates/admin/base_site.html
Normal file
9
templates/admin/base_site.html
Normal file
@@ -0,0 +1,9 @@
|
||||
{% extends "admin/base.html" %}
|
||||
|
||||
{% block title %}{% if subtitle %}{{ subtitle }} | {% endif %}{{ title }} | Admin Reskreen Clerc{% endblock %}
|
||||
|
||||
{% block branding %}
|
||||
<h1 id="site-name"><a href="{% url 'admin:index' %}">Intranet Clerc V3.x </a></h1>
|
||||
{% endblock %}
|
||||
|
||||
{% block nav-global %}{% endblock %}
|
89
templates/admin/collabs/Collabs_hour/change_list.html
Normal file
89
templates/admin/collabs/Collabs_hour/change_list.html
Normal file
@@ -0,0 +1,89 @@
|
||||
{% extends "admin/base_site.html" %}
|
||||
{% load i18n admin_urls static admin_list %}
|
||||
{% block extrastyle %}
|
||||
{{ block.super }}
|
||||
<link rel="stylesheet" type="text/css" href="{% static "admin/css/changelists.css" %}">
|
||||
{% if cl.formset %}
|
||||
<link rel="stylesheet" type="text/css" href="{% static "admin/css/forms.css" %}">
|
||||
{% endif %}
|
||||
{% if cl.formset or action_form %}
|
||||
<script src="{% url 'admin:jsi18n' %}"></script>
|
||||
{% endif %}
|
||||
{{ media.css }}
|
||||
{% if not actions_on_top and not actions_on_bottom %}
|
||||
<style>
|
||||
#changelist table thead th:first-child {width: inherit}
|
||||
</style>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
|
||||
{% block extrahead %}
|
||||
{{ block.super }}
|
||||
{{ media.js }}
|
||||
{% endblock %}
|
||||
|
||||
{% block bodyclass %}{{ block.super }} app-{{ opts.app_label }} model-{{ opts.model_name }} change-list{% endblock %}
|
||||
|
||||
{% if not is_popup %}
|
||||
{% block breadcrumbs %}
|
||||
<div class="breadcrumbs">
|
||||
<a href="{% url 'admin:index' %}">{% translate 'Home' %}</a>
|
||||
› <a href="{% url 'admin:app_list' app_label=cl.opts.app_label %}">{{ cl.opts.app_config.verbose_name }}</a>
|
||||
› {{ cl.opts.verbose_name_plural|capfirst }}
|
||||
</div>
|
||||
{% endblock %}
|
||||
{% endif %}
|
||||
|
||||
{% block coltype %}{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div id="content-main">
|
||||
{% block object-tools %}
|
||||
|
||||
<ul class="object-tools">
|
||||
{% block object-tools-items %}
|
||||
{% change_list_object_tools %}
|
||||
{% endblock %}
|
||||
<li>
|
||||
<a href="export/" class="historylink">Export</a>
|
||||
</li>
|
||||
</ul>
|
||||
{% endblock %}
|
||||
{% if cl.formset and cl.formset.errors %}
|
||||
<p class="errornote">
|
||||
{% if cl.formset.total_error_count == 1 %}{% translate "Please correct the error below." %}{% else %}{% translate "Please correct the errors below." %}{% endif %}
|
||||
</p>
|
||||
{{ cl.formset.non_form_errors }}
|
||||
{% endif %}
|
||||
<div class="module{% if cl.has_filters %} filtered{% endif %}" id="changelist">
|
||||
<div class="changelist-form-container">
|
||||
{% block search %}{% search_form cl %}{% endblock %}
|
||||
{% block date_hierarchy %}{% if cl.date_hierarchy %}{% date_hierarchy cl %}{% endif %}{% endblock %}
|
||||
|
||||
<form id="changelist-form" method="post"{% if cl.formset and cl.formset.is_multipart %} enctype="multipart/form-data"{% endif %} novalidate>{% csrf_token %}
|
||||
{% if cl.formset %}
|
||||
<div>{{ cl.formset.management_form }}</div>
|
||||
{% endif %}
|
||||
|
||||
{% block result_list %}
|
||||
{% if action_form and actions_on_top and cl.show_admin_actions %}{% admin_actions %}{% endif %}
|
||||
{% result_list cl %}
|
||||
{% if action_form and actions_on_bottom and cl.show_admin_actions %}{% admin_actions %}{% endif %}
|
||||
{% endblock %}
|
||||
{% block pagination %}{% pagination cl %}{% endblock %}
|
||||
</form>
|
||||
</div>
|
||||
{% block filters %}
|
||||
{% if cl.has_filters %}
|
||||
<div id="changelist-filter">
|
||||
<h2>{% translate 'Filter' %}</h2>
|
||||
{% if cl.has_active_filters %}<h3 id="changelist-filter-clear">
|
||||
<a href="{{ cl.clear_all_filters_qs }}">✖ {% translate "Clear all filters" %}</a>
|
||||
</h3>{% endif %}
|
||||
{% for spec in cl.filter_specs %}{% admin_list_filter cl spec %}{% endfor %}
|
||||
</div>
|
||||
{% endif %}
|
||||
{% endblock %}
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
50
templates/admin/index.html
Normal file
50
templates/admin/index.html
Normal file
@@ -0,0 +1,50 @@
|
||||
{% extends "admin/base_site.html" %}
|
||||
{% load i18n static %}
|
||||
|
||||
{% block extrastyle %}{{ block.super }}<link rel="stylesheet" type="text/css" href="{% static "admin/css/dashboard.css" %}">{% endblock %}
|
||||
|
||||
{% block coltype %}colMS{% endblock %}
|
||||
|
||||
{% block bodyclass %}{{ block.super }} dashboard{% endblock %}
|
||||
|
||||
{% block breadcrumbs %}{% endblock %}
|
||||
|
||||
{% block nav-sidebar %}{% endblock %}
|
||||
|
||||
{% block content %}
|
||||
<div id="content-main">
|
||||
{% include "admin/app_list.html" with app_list=app_list show_changelinks=True %}
|
||||
</div>
|
||||
{% endblock %}
|
||||
|
||||
{% block sidebar %}
|
||||
<div id="content-related">
|
||||
<div class="module" id="recent-actions-module">
|
||||
<h2>{% translate 'Recent actions' %}</h2>
|
||||
<h3>{% translate 'My actions' %}</h3>
|
||||
{% load log %}
|
||||
{% get_admin_log 10 as admin_log for_user user %}
|
||||
{% if not admin_log %}
|
||||
<p>{% translate 'None available' %}</p>
|
||||
{% else %}
|
||||
<ul class="actionlist">
|
||||
{% for entry in admin_log %}
|
||||
<li class="{% if entry.is_addition %}addlink{% endif %}{% if entry.is_change %}changelink{% endif %}{% if entry.is_deletion %}deletelink{% endif %}">
|
||||
{% if entry.is_deletion or not entry.get_admin_url %}
|
||||
{{ entry.object_repr }}
|
||||
{% else %}
|
||||
<a href="{{ entry.get_admin_url }}">{{ entry.object_repr }}</a>
|
||||
{% endif %}
|
||||
<br>
|
||||
{% if entry.content_type %}
|
||||
<span class="mini quiet">{% filter capfirst %}{{ entry.content_type.name }}{% endfilter %}</span>
|
||||
{% else %}
|
||||
<span class="mini quiet">{% translate 'Unknown content' %}</span>
|
||||
{% endif %}
|
||||
</li>
|
||||
{% endfor %}
|
||||
</ul>
|
||||
{% endif %}
|
||||
</div>
|
||||
</div>
|
||||
{% endblock %}
|
39
test.py
Normal file
39
test.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import django
|
||||
from datetime import date
|
||||
from django.conf import settings
|
||||
from pathlib import Path
|
||||
BASE_DIR = Path(__file__).resolve()
|
||||
print(BASE_DIR)
|
||||
|
||||
|
||||
INSTALLED_APPS = [
|
||||
'collabs.apps.PollsConfig',
|
||||
]
|
||||
|
||||
DATABASES = {
|
||||
'default': {
|
||||
'ENGINE': 'django.db.backends.sqlite3',
|
||||
'NAME': './db.sqlite3',
|
||||
}
|
||||
}
|
||||
|
||||
settings.configure(
|
||||
INSTALLED_APPS = INSTALLED_APPS,
|
||||
DATABASES = DATABASES,
|
||||
)
|
||||
django.setup()
|
||||
from django.apps import apps
|
||||
from django.apps import AppConfig
|
||||
|
||||
from polls.apps import PollsConfig
|
||||
from polls.models import *
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
print(Question.objects.order_by('-pub_date')[:5])
|
||||
|
||||
new_question = Question(question_txt="test ?", pub_date=date.today() )
|
||||
new_question.save()
|
||||
print(Question.objects.order_by('-pub_date')[:5])
|
5
venv/Lib/site-packages/cffi-1.15.0.dist-info/WHEEL
Normal file
5
venv/Lib/site-packages/cffi-1.15.0.dist-info/WHEEL
Normal file
@@ -0,0 +1,5 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.37.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp310-cp310-win_amd64
|
||||
|
13
venv/Lib/site-packages/cssselect2-0.4.1.dist-info/RECORD
Normal file
13
venv/Lib/site-packages/cssselect2-0.4.1.dist-info/RECORD
Normal file
@@ -0,0 +1,13 @@
|
||||
cssselect2-0.4.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
cssselect2-0.4.1.dist-info/LICENSE,sha256=b9lyKaHRsPaotB4Qn0E0JtvAh0seA3RtZswzKCYBwsI,1548
|
||||
cssselect2-0.4.1.dist-info/METADATA,sha256=xnwvtm3c2LNrPMwRCPAn-kMYlW38HXkYJ4bh__W8J3Q,2922
|
||||
cssselect2-0.4.1.dist-info/RECORD,,
|
||||
cssselect2-0.4.1.dist-info/WHEEL,sha256=CqyTrkghQBNsEzLD3HbCSEIJ_fY58-XpoU29dUzwHSk,81
|
||||
cssselect2/__init__.py,sha256=-erycTkInM63GLXVMejKMM0tCOVYWoLeApJr8DuDA_0,3860
|
||||
cssselect2/__pycache__/__init__.cpython-310.pyc,,
|
||||
cssselect2/__pycache__/compiler.cpython-310.pyc,,
|
||||
cssselect2/__pycache__/parser.cpython-310.pyc,,
|
||||
cssselect2/__pycache__/tree.cpython-310.pyc,,
|
||||
cssselect2/compiler.py,sha256=s3QWX3xMFHQ7kZFOGfIMooSa7LtguUX5kCCL70xTxxc,14076
|
||||
cssselect2/parser.py,sha256=RhzwMYF7iIFiWrFWrw4VsLvNwlTY_u4SpatxdQ3VG8M,13204
|
||||
cssselect2/tree.py,sha256=arI5KdIPoLRxzGSzHK2FnONm7ug7mpiXQqQBQScC4bo,12307
|
183
venv/Lib/site-packages/fontTools/cffLib/width.py
Normal file
183
venv/Lib/site-packages/fontTools/cffLib/width.py
Normal file
@@ -0,0 +1,183 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""T2CharString glyph width optimizer.
|
||||
|
||||
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
|
||||
value do not need to specify their width in their charstring, saving bytes.
|
||||
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
|
||||
values for a font, when provided with a list of glyph widths."""
|
||||
|
||||
from fontTools.ttLib import TTFont
|
||||
from collections import defaultdict
|
||||
from operator import add
|
||||
from functools import reduce
|
||||
|
||||
|
||||
class missingdict(dict):
|
||||
def __init__(self, missing_func):
|
||||
self.missing_func = missing_func
|
||||
def __missing__(self, v):
|
||||
return self.missing_func(v)
|
||||
|
||||
def cumSum(f, op=add, start=0, decreasing=False):
|
||||
|
||||
keys = sorted(f.keys())
|
||||
minx, maxx = keys[0], keys[-1]
|
||||
|
||||
total = reduce(op, f.values(), start)
|
||||
|
||||
if decreasing:
|
||||
missing = lambda x: start if x > maxx else total
|
||||
domain = range(maxx, minx - 1, -1)
|
||||
else:
|
||||
missing = lambda x: start if x < minx else total
|
||||
domain = range(minx, maxx + 1)
|
||||
|
||||
out = missingdict(missing)
|
||||
|
||||
v = start
|
||||
for x in domain:
|
||||
v = op(v, f[x])
|
||||
out[x] = v
|
||||
|
||||
return out
|
||||
|
||||
def byteCost(widths, default, nominal):
|
||||
|
||||
if not hasattr(widths, 'items'):
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
widths = d
|
||||
|
||||
cost = 0
|
||||
for w,freq in widths.items():
|
||||
if w == default: continue
|
||||
diff = abs(w - nominal)
|
||||
if diff <= 107:
|
||||
cost += freq
|
||||
elif diff <= 1131:
|
||||
cost += freq * 2
|
||||
else:
|
||||
cost += freq * 5
|
||||
return cost
|
||||
|
||||
|
||||
def optimizeWidthsBruteforce(widths):
|
||||
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
|
||||
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
|
||||
# Maximum number of bytes using default can possibly save
|
||||
maxDefaultAdvantage = 5 * max(d.values())
|
||||
|
||||
minw, maxw = min(widths), max(widths)
|
||||
domain = list(range(minw, maxw+1))
|
||||
|
||||
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
|
||||
|
||||
bestCost = len(widths) * 5 + 1
|
||||
for nominal in domain:
|
||||
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
|
||||
continue
|
||||
for default in domain:
|
||||
cost = byteCost(widths, default, nominal)
|
||||
if cost < bestCost:
|
||||
bestCost = cost
|
||||
bestDefault = default
|
||||
bestNominal = nominal
|
||||
|
||||
return bestDefault, bestNominal
|
||||
|
||||
|
||||
def optimizeWidths(widths):
|
||||
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
|
||||
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
|
||||
|
||||
This algorithm is linear in UPEM+numGlyphs."""
|
||||
|
||||
if not hasattr(widths, 'items'):
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
widths = d
|
||||
|
||||
keys = sorted(widths.keys())
|
||||
minw, maxw = keys[0], keys[-1]
|
||||
domain = list(range(minw, maxw+1))
|
||||
|
||||
# Cumulative sum/max forward/backward.
|
||||
cumFrqU = cumSum(widths, op=add)
|
||||
cumMaxU = cumSum(widths, op=max)
|
||||
cumFrqD = cumSum(widths, op=add, decreasing=True)
|
||||
cumMaxD = cumSum(widths, op=max, decreasing=True)
|
||||
|
||||
# Cost per nominal choice, without default consideration.
|
||||
nomnCostU = missingdict(lambda x: cumFrqU[x] + cumFrqU[x-108] + cumFrqU[x-1132]*3)
|
||||
nomnCostD = missingdict(lambda x: cumFrqD[x] + cumFrqD[x+108] + cumFrqD[x+1132]*3)
|
||||
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
|
||||
|
||||
# Cost-saving per nominal choice, by best default choice.
|
||||
dfltCostU = missingdict(lambda x: max(cumMaxU[x], cumMaxU[x-108]*2, cumMaxU[x-1132]*5))
|
||||
dfltCostD = missingdict(lambda x: max(cumMaxD[x], cumMaxD[x+108]*2, cumMaxD[x+1132]*5))
|
||||
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
|
||||
|
||||
# Combined cost per nominal choice.
|
||||
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
|
||||
|
||||
# Best nominal.
|
||||
nominal = min(domain, key=lambda x: bestCost[x])
|
||||
|
||||
# Work back the best default.
|
||||
bestC = bestCost[nominal]
|
||||
dfltC = nomnCost[nominal] - bestCost[nominal]
|
||||
ends = []
|
||||
if dfltC == dfltCostU[nominal]:
|
||||
starts = [nominal, nominal-108, nominal-1131]
|
||||
for start in starts:
|
||||
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start-1]:
|
||||
start -= 1
|
||||
ends.append(start)
|
||||
else:
|
||||
starts = [nominal, nominal+108, nominal+1131]
|
||||
for start in starts:
|
||||
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start+1]:
|
||||
start += 1
|
||||
ends.append(start)
|
||||
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
|
||||
|
||||
return default, nominal
|
||||
|
||||
def main(args=None):
|
||||
"""Calculate optimum defaultWidthX/nominalWidthX values"""
|
||||
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools cffLib.width",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument('inputs', metavar='FILE', type=str, nargs='+',
|
||||
help="Input TTF files")
|
||||
parser.add_argument('-b', '--brute-force', dest="brute", action="store_true",
|
||||
help="Use brute-force approach (VERY slow)")
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
for fontfile in args.inputs:
|
||||
font = TTFont(fontfile)
|
||||
hmtx = font['hmtx']
|
||||
widths = [m[0] for m in hmtx.metrics.values()]
|
||||
if args.brute:
|
||||
default, nominal = optimizeWidthsBruteforce(widths)
|
||||
else:
|
||||
default, nominal = optimizeWidths(widths)
|
||||
print("glyphs=%d default=%d nominal=%d byteCost=%d" % (len(widths), default, nominal, byteCost(widths, default, nominal)))
|
||||
|
||||
if __name__ == '__main__':
|
||||
import sys
|
||||
if len(sys.argv) == 1:
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
||||
main()
|
15
venv/Lib/site-packages/fontTools/cu2qu/__init__.py
Normal file
15
venv/Lib/site-packages/fontTools/cu2qu/__init__.py
Normal file
@@ -0,0 +1,15 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .cu2qu import *
|
6
venv/Lib/site-packages/fontTools/cu2qu/__main__.py
Normal file
6
venv/Lib/site-packages/fontTools/cu2qu/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from .cli import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
76
venv/Lib/site-packages/fontTools/cu2qu/errors.py
Normal file
76
venv/Lib/site-packages/fontTools/cu2qu/errors.py
Normal file
@@ -0,0 +1,76 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
class Error(Exception):
|
||||
"""Base Cu2Qu exception class for all other errors."""
|
||||
|
||||
|
||||
class ApproxNotFoundError(Error):
|
||||
def __init__(self, curve):
|
||||
message = "no approximation found: %s" % curve
|
||||
super().__init__(message)
|
||||
self.curve = curve
|
||||
|
||||
|
||||
class UnequalZipLengthsError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class IncompatibleGlyphsError(Error):
|
||||
def __init__(self, glyphs):
|
||||
assert len(glyphs) > 1
|
||||
self.glyphs = glyphs
|
||||
names = set(repr(g.name) for g in glyphs)
|
||||
if len(names) > 1:
|
||||
self.combined_name = "{%s}" % ", ".join(sorted(names))
|
||||
else:
|
||||
self.combined_name = names.pop()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s>" % (type(self).__name__, self.combined_name)
|
||||
|
||||
|
||||
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
|
||||
def __str__(self):
|
||||
return "Glyphs named %s have different number of segments" % (
|
||||
self.combined_name
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
|
||||
def __init__(self, glyphs, segments):
|
||||
IncompatibleGlyphsError.__init__(self, glyphs)
|
||||
self.segments = segments
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
ndigits = len(str(max(self.segments)))
|
||||
for i, tags in sorted(self.segments.items()):
|
||||
lines.append(
|
||||
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
|
||||
)
|
||||
return "Glyphs named %s have incompatible segment types:\n %s" % (
|
||||
self.combined_name,
|
||||
"\n ".join(lines),
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleFontsError(Error):
|
||||
def __init__(self, glyph_errors):
|
||||
self.glyph_errors = glyph_errors
|
||||
|
||||
def __str__(self):
|
||||
return "fonts contains incompatible glyphs: %s" % (
|
||||
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
|
||||
)
|
1654
venv/Lib/site-packages/fontTools/feaLib/builder.py
Normal file
1654
venv/Lib/site-packages/fontTools/feaLib/builder.py
Normal file
File diff suppressed because it is too large
Load Diff
22
venv/Lib/site-packages/fontTools/feaLib/error.py
Normal file
22
venv/Lib/site-packages/fontTools/feaLib/error.py
Normal file
@@ -0,0 +1,22 @@
|
||||
class FeatureLibError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
return f"{self.location}: {message}"
|
||||
else:
|
||||
return message
|
||||
|
||||
|
||||
class IncludedFeaNotFound(FeatureLibError):
|
||||
def __str__(self):
|
||||
assert self.location is not None
|
||||
|
||||
message = (
|
||||
"The following feature file should be included but cannot be found: "
|
||||
f"{Exception.__str__(self)}"
|
||||
)
|
||||
return f"{self.location}: {message}"
|
285
venv/Lib/site-packages/fontTools/feaLib/lexer.py
Normal file
285
venv/Lib/site-packages/fontTools/feaLib/lexer.py
Normal file
@@ -0,0 +1,285 @@
|
||||
from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
|
||||
from fontTools.feaLib.location import FeatureLibLocation
|
||||
import re
|
||||
import os
|
||||
|
||||
|
||||
class Lexer(object):
|
||||
NUMBER = "NUMBER"
|
||||
HEXADECIMAL = "HEXADECIMAL"
|
||||
OCTAL = "OCTAL"
|
||||
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
|
||||
FLOAT = "FLOAT"
|
||||
STRING = "STRING"
|
||||
NAME = "NAME"
|
||||
FILENAME = "FILENAME"
|
||||
GLYPHCLASS = "GLYPHCLASS"
|
||||
CID = "CID"
|
||||
SYMBOL = "SYMBOL"
|
||||
COMMENT = "COMMENT"
|
||||
NEWLINE = "NEWLINE"
|
||||
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
|
||||
|
||||
CHAR_WHITESPACE_ = " \t"
|
||||
CHAR_NEWLINE_ = "\r\n"
|
||||
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
|
||||
CHAR_DIGIT_ = "0123456789"
|
||||
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
|
||||
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
|
||||
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
|
||||
|
||||
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
|
||||
|
||||
MODE_NORMAL_ = "NORMAL"
|
||||
MODE_FILENAME_ = "FILENAME"
|
||||
|
||||
def __init__(self, text, filename):
|
||||
self.filename_ = filename
|
||||
self.line_ = 1
|
||||
self.pos_ = 0
|
||||
self.line_start_ = 0
|
||||
self.text_ = text
|
||||
self.text_length_ = len(text)
|
||||
self.mode_ = Lexer.MODE_NORMAL_
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self): # Python 2
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self): # Python 3
|
||||
while True:
|
||||
token_type, token, location = self.next_()
|
||||
if token_type != Lexer.NEWLINE:
|
||||
return (token_type, token, location)
|
||||
|
||||
def location_(self):
|
||||
column = self.pos_ - self.line_start_ + 1
|
||||
return FeatureLibLocation(self.filename_ or "<features>", self.line_, column)
|
||||
|
||||
def next_(self):
|
||||
self.scan_over_(Lexer.CHAR_WHITESPACE_)
|
||||
location = self.location_()
|
||||
start = self.pos_
|
||||
text = self.text_
|
||||
limit = len(text)
|
||||
if start >= limit:
|
||||
raise StopIteration()
|
||||
cur_char = text[start]
|
||||
next_char = text[start + 1] if start + 1 < limit else None
|
||||
|
||||
if cur_char == "\n":
|
||||
self.pos_ += 1
|
||||
self.line_ += 1
|
||||
self.line_start_ = self.pos_
|
||||
return (Lexer.NEWLINE, None, location)
|
||||
if cur_char == "\r":
|
||||
self.pos_ += 2 if next_char == "\n" else 1
|
||||
self.line_ += 1
|
||||
self.line_start_ = self.pos_
|
||||
return (Lexer.NEWLINE, None, location)
|
||||
if cur_char == "#":
|
||||
self.scan_until_(Lexer.CHAR_NEWLINE_)
|
||||
return (Lexer.COMMENT, text[start : self.pos_], location)
|
||||
|
||||
if self.mode_ is Lexer.MODE_FILENAME_:
|
||||
if cur_char != "(":
|
||||
raise FeatureLibError("Expected '(' before file name", location)
|
||||
self.scan_until_(")")
|
||||
cur_char = text[self.pos_] if self.pos_ < limit else None
|
||||
if cur_char != ")":
|
||||
raise FeatureLibError("Expected ')' after file name", location)
|
||||
self.pos_ += 1
|
||||
self.mode_ = Lexer.MODE_NORMAL_
|
||||
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
|
||||
|
||||
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
|
||||
if cur_char == "@":
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
|
||||
glyphclass = text[start + 1 : self.pos_]
|
||||
if len(glyphclass) < 1:
|
||||
raise FeatureLibError("Expected glyph class name", location)
|
||||
if len(glyphclass) > 63:
|
||||
raise FeatureLibError(
|
||||
"Glyph class names must not be longer than 63 characters", location
|
||||
)
|
||||
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
|
||||
raise FeatureLibError(
|
||||
"Glyph class names must consist of letters, digits, "
|
||||
"underscore, period or hyphen",
|
||||
location,
|
||||
)
|
||||
return (Lexer.GLYPHCLASS, glyphclass, location)
|
||||
if cur_char in Lexer.CHAR_NAME_START_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
|
||||
token = text[start : self.pos_]
|
||||
if token == "include":
|
||||
self.mode_ = Lexer.MODE_FILENAME_
|
||||
return (Lexer.NAME, token, location)
|
||||
if cur_char == "0" and next_char in "xX":
|
||||
self.pos_ += 2
|
||||
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
|
||||
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
|
||||
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
|
||||
if cur_char in Lexer.CHAR_DIGIT_:
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
if self.pos_ >= limit or text[self.pos_] != ".":
|
||||
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
|
||||
self.scan_over_(".")
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
|
||||
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
if self.pos_ >= limit or text[self.pos_] != ".":
|
||||
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
|
||||
self.scan_over_(".")
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
|
||||
if cur_char in Lexer.CHAR_SYMBOL_:
|
||||
self.pos_ += 1
|
||||
return (Lexer.SYMBOL, cur_char, location)
|
||||
if cur_char == '"':
|
||||
self.pos_ += 1
|
||||
self.scan_until_('"')
|
||||
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
|
||||
self.pos_ += 1
|
||||
# strip newlines embedded within a string
|
||||
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
|
||||
return (Lexer.STRING, string, location)
|
||||
else:
|
||||
raise FeatureLibError("Expected '\"' to terminate string", location)
|
||||
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
|
||||
|
||||
def scan_over_(self, valid):
|
||||
p = self.pos_
|
||||
while p < self.text_length_ and self.text_[p] in valid:
|
||||
p += 1
|
||||
self.pos_ = p
|
||||
|
||||
def scan_until_(self, stop_at):
|
||||
p = self.pos_
|
||||
while p < self.text_length_ and self.text_[p] not in stop_at:
|
||||
p += 1
|
||||
self.pos_ = p
|
||||
|
||||
def scan_anonymous_block(self, tag):
|
||||
location = self.location_()
|
||||
tag = tag.strip()
|
||||
self.scan_until_(Lexer.CHAR_NEWLINE_)
|
||||
self.scan_over_(Lexer.CHAR_NEWLINE_)
|
||||
regexp = r"}\s*" + tag + r"\s*;"
|
||||
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
|
||||
if len(split) != 2:
|
||||
raise FeatureLibError(
|
||||
"Expected '} %s;' to terminate anonymous block" % tag, location
|
||||
)
|
||||
self.pos_ += len(split[0])
|
||||
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
|
||||
|
||||
|
||||
class IncludingLexer(object):
|
||||
"""A Lexer that follows include statements.
|
||||
|
||||
The OpenType feature file specification states that due to
|
||||
historical reasons, relative imports should be resolved in this
|
||||
order:
|
||||
|
||||
1. If the source font is UFO format, then relative to the UFO's
|
||||
font directory
|
||||
2. relative to the top-level include file
|
||||
3. relative to the parent include file
|
||||
|
||||
We only support 1 (via includeDir) and 2.
|
||||
"""
|
||||
|
||||
def __init__(self, featurefile, *, includeDir=None):
|
||||
"""Initializes an IncludingLexer.
|
||||
|
||||
Behavior:
|
||||
If includeDir is passed, it will be used to determine the top-level
|
||||
include directory to use for all encountered include statements. If it is
|
||||
not passed, ``os.path.dirname(featurefile)`` will be considered the
|
||||
include directory.
|
||||
"""
|
||||
|
||||
self.lexers_ = [self.make_lexer_(featurefile)]
|
||||
self.featurefilepath = self.lexers_[0].filename_
|
||||
self.includeDir = includeDir
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self): # Python 2
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self): # Python 3
|
||||
while self.lexers_:
|
||||
lexer = self.lexers_[-1]
|
||||
try:
|
||||
token_type, token, location = next(lexer)
|
||||
except StopIteration:
|
||||
self.lexers_.pop()
|
||||
continue
|
||||
if token_type is Lexer.NAME and token == "include":
|
||||
fname_type, fname_token, fname_location = lexer.next()
|
||||
if fname_type is not Lexer.FILENAME:
|
||||
raise FeatureLibError("Expected file name", fname_location)
|
||||
# semi_type, semi_token, semi_location = lexer.next()
|
||||
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
|
||||
# raise FeatureLibError("Expected ';'", semi_location)
|
||||
if os.path.isabs(fname_token):
|
||||
path = fname_token
|
||||
else:
|
||||
if self.includeDir is not None:
|
||||
curpath = self.includeDir
|
||||
elif self.featurefilepath is not None:
|
||||
curpath = os.path.dirname(self.featurefilepath)
|
||||
else:
|
||||
# if the IncludingLexer was initialized from an in-memory
|
||||
# file-like stream, it doesn't have a 'name' pointing to
|
||||
# its filesystem path, therefore we fall back to using the
|
||||
# current working directory to resolve relative includes
|
||||
curpath = os.getcwd()
|
||||
path = os.path.join(curpath, fname_token)
|
||||
if len(self.lexers_) >= 5:
|
||||
raise FeatureLibError("Too many recursive includes", fname_location)
|
||||
try:
|
||||
self.lexers_.append(self.make_lexer_(path))
|
||||
except FileNotFoundError as err:
|
||||
raise IncludedFeaNotFound(fname_token, fname_location) from err
|
||||
else:
|
||||
return (token_type, token, location)
|
||||
raise StopIteration()
|
||||
|
||||
@staticmethod
|
||||
def make_lexer_(file_or_path):
|
||||
if hasattr(file_or_path, "read"):
|
||||
fileobj, closing = file_or_path, False
|
||||
else:
|
||||
filename, closing = file_or_path, True
|
||||
fileobj = open(filename, "r", encoding="utf-8")
|
||||
data = fileobj.read()
|
||||
filename = getattr(fileobj, "name", None)
|
||||
if closing:
|
||||
fileobj.close()
|
||||
return Lexer(data, filename)
|
||||
|
||||
def scan_anonymous_block(self, tag):
|
||||
return self.lexers_[-1].scan_anonymous_block(tag)
|
||||
|
||||
|
||||
class NonIncludingLexer(IncludingLexer):
|
||||
"""Lexer that does not follow `include` statements, emits them as-is."""
|
||||
|
||||
def __next__(self): # Python 3
|
||||
return next(self.lexers_[0])
|
12
venv/Lib/site-packages/fontTools/feaLib/location.py
Normal file
12
venv/Lib/site-packages/fontTools/feaLib/location.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class FeatureLibLocation(NamedTuple):
|
||||
"""A location in a feature file"""
|
||||
|
||||
file: str
|
||||
line: int
|
||||
column: int
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.file}:{self.line}:{self.column}"
|
2356
venv/Lib/site-packages/fontTools/feaLib/parser.py
Normal file
2356
venv/Lib/site-packages/fontTools/feaLib/parser.py
Normal file
File diff suppressed because it is too large
Load Diff
97
venv/Lib/site-packages/fontTools/feaLib/variableScalar.py
Normal file
97
venv/Lib/site-packages/fontTools/feaLib/variableScalar.py
Normal file
@@ -0,0 +1,97 @@
|
||||
from fontTools.varLib.models import VariationModel, normalizeValue
|
||||
|
||||
|
||||
def Location(loc):
|
||||
return tuple(sorted(loc.items()))
|
||||
|
||||
|
||||
class VariableScalar:
|
||||
"""A scalar with different values at different points in the designspace."""
|
||||
|
||||
def __init__(self, location_value={}):
|
||||
self.values = {}
|
||||
self.axes = {}
|
||||
for location, value in location_value.items():
|
||||
self.add_value(location, value)
|
||||
|
||||
def __repr__(self):
|
||||
items = []
|
||||
for location, value in self.values.items():
|
||||
loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location])
|
||||
items.append("%s:%i" % (loc, value))
|
||||
return "(" + (" ".join(items)) + ")"
|
||||
|
||||
@property
|
||||
def does_vary(self):
|
||||
values = list(self.values.values())
|
||||
return any(v != values[0] for v in values[1:])
|
||||
|
||||
@property
|
||||
def axes_dict(self):
|
||||
if not self.axes:
|
||||
raise ValueError(
|
||||
".axes must be defined on variable scalar before interpolating"
|
||||
)
|
||||
return {ax.axisTag: ax for ax in self.axes}
|
||||
|
||||
def _normalized_location(self, location):
|
||||
location = self.fix_location(location)
|
||||
normalized_location = {}
|
||||
for axtag in location.keys():
|
||||
if axtag not in self.axes_dict:
|
||||
raise ValueError("Unknown axis %s in %s" % (axtag, location))
|
||||
axis = self.axes_dict[axtag]
|
||||
normalized_location[axtag] = normalizeValue(
|
||||
location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
|
||||
)
|
||||
|
||||
return Location(normalized_location)
|
||||
|
||||
def fix_location(self, location):
|
||||
location = dict(location)
|
||||
for tag, axis in self.axes_dict.items():
|
||||
if tag not in location:
|
||||
location[tag] = axis.defaultValue
|
||||
return location
|
||||
|
||||
def add_value(self, location, value):
|
||||
if self.axes:
|
||||
location = self.fix_location(location)
|
||||
|
||||
self.values[Location(location)] = value
|
||||
|
||||
def fix_all_locations(self):
|
||||
self.values = {
|
||||
Location(self.fix_location(l)): v for l, v in self.values.items()
|
||||
}
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
self.fix_all_locations()
|
||||
key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
|
||||
if key not in self.values:
|
||||
raise ValueError("Default value could not be found")
|
||||
# I *guess* we could interpolate one, but I don't know how.
|
||||
return self.values[key]
|
||||
|
||||
def value_at_location(self, location):
|
||||
loc = location
|
||||
if loc in self.values.keys():
|
||||
return self.values[loc]
|
||||
values = list(self.values.values())
|
||||
return self.model.interpolateFromMasters(loc, values)
|
||||
|
||||
@property
|
||||
def model(self):
|
||||
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
|
||||
return VariationModel(locations)
|
||||
|
||||
def get_deltas_and_supports(self):
|
||||
values = list(self.values.values())
|
||||
return self.model.getDeltasAndSupports(values)
|
||||
|
||||
def add_to_variation_store(self, store_builder):
|
||||
deltas, supports = self.get_deltas_and_supports()
|
||||
store_builder.setSupports(supports)
|
||||
index = store_builder.storeDeltas(deltas)
|
||||
return int(self.default), index
|
200
venv/Lib/site-packages/fontTools/merge/__init__.py
Normal file
200
venv/Lib/site-packages/fontTools/merge/__init__.py
Normal file
@@ -0,0 +1,200 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib
|
||||
import fontTools.merge.base
|
||||
from fontTools.merge.cmap import computeMegaGlyphOrder, computeMegaCmap, renameCFFCharStrings
|
||||
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
|
||||
from fontTools.merge.options import Options
|
||||
import fontTools.merge.tables
|
||||
from fontTools.misc.loggingTools import Timer
|
||||
from functools import reduce
|
||||
import sys
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
timer = Timer(logger=logging.getLogger(__name__+".timer"), level=logging.INFO)
|
||||
|
||||
|
||||
class Merger(object):
|
||||
"""Font merger.
|
||||
|
||||
This class merges multiple files into a single OpenType font, taking into
|
||||
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
|
||||
cross-font metrics (e.g. ``hhea.ascent`` is set to the maximum value across
|
||||
all the fonts).
|
||||
|
||||
If multiple glyphs map to the same Unicode value, and the glyphs are considered
|
||||
sufficiently different (that is, they differ in any of paths, widths, or
|
||||
height), then subsequent glyphs are renamed and a lookup in the ``locl``
|
||||
feature will be created to disambiguate them. For example, if the arguments
|
||||
are an Arabic font and a Latin font and both contain a set of parentheses,
|
||||
the Latin glyphs will be renamed to ``parenleft#1`` and ``parenright#1``,
|
||||
and a lookup will be inserted into the to ``locl`` feature (creating it if
|
||||
necessary) under the ``latn`` script to substitute ``parenleft`` with
|
||||
``parenleft#1`` etc.
|
||||
|
||||
Restrictions:
|
||||
|
||||
- All fonts must have the same units per em.
|
||||
- If duplicate glyph disambiguation takes place as described above then the
|
||||
fonts must have a ``GSUB`` table.
|
||||
|
||||
Attributes:
|
||||
options: Currently unused.
|
||||
"""
|
||||
|
||||
def __init__(self, options=None):
|
||||
|
||||
if not options:
|
||||
options = Options()
|
||||
|
||||
self.options = options
|
||||
|
||||
def _openFonts(self, fontfiles):
|
||||
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
||||
for font,fontfile in zip(fonts, fontfiles):
|
||||
font._merger__fontfile = fontfile
|
||||
font._merger__name = font['name'].getDebugName(4)
|
||||
return fonts
|
||||
|
||||
def merge(self, fontfiles):
|
||||
"""Merges fonts together.
|
||||
|
||||
Args:
|
||||
fontfiles: A list of file names to be merged
|
||||
|
||||
Returns:
|
||||
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
|
||||
this to write it out to an OTF file.
|
||||
"""
|
||||
#
|
||||
# Settle on a mega glyph order.
|
||||
#
|
||||
fonts = self._openFonts(fontfiles)
|
||||
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
|
||||
computeMegaGlyphOrder(self, glyphOrders)
|
||||
|
||||
# Take first input file sfntVersion
|
||||
sfntVersion = fonts[0].sfntVersion
|
||||
|
||||
# Reload fonts and set new glyph names on them.
|
||||
fonts = self._openFonts(fontfiles)
|
||||
for font,glyphOrder in zip(fonts, glyphOrders):
|
||||
font.setGlyphOrder(glyphOrder)
|
||||
if 'CFF ' in font:
|
||||
renameCFFCharStrings(self, glyphOrder, font['CFF '])
|
||||
|
||||
cmaps = [font['cmap'] for font in fonts]
|
||||
self.duplicateGlyphsPerFont = [{} for _ in fonts]
|
||||
computeMegaCmap(self, cmaps)
|
||||
|
||||
mega = ttLib.TTFont(sfntVersion=sfntVersion)
|
||||
mega.setGlyphOrder(self.glyphOrder)
|
||||
|
||||
for font in fonts:
|
||||
self._preMerge(font)
|
||||
|
||||
self.fonts = fonts
|
||||
|
||||
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
||||
allTags.remove('GlyphOrder')
|
||||
|
||||
for tag in allTags:
|
||||
if tag in self.options.drop_tables:
|
||||
continue
|
||||
|
||||
with timer("merge '%s'" % tag):
|
||||
tables = [font.get(tag, NotImplemented) for font in fonts]
|
||||
|
||||
log.info("Merging '%s'.", tag)
|
||||
clazz = ttLib.getTableClass(tag)
|
||||
table = clazz(tag).merge(self, tables)
|
||||
# XXX Clean this up and use: table = mergeObjects(tables)
|
||||
|
||||
if table is not NotImplemented and table is not False:
|
||||
mega[tag] = table
|
||||
log.info("Merged '%s'.", tag)
|
||||
else:
|
||||
log.info("Dropped '%s'.", tag)
|
||||
|
||||
del self.duplicateGlyphsPerFont
|
||||
del self.fonts
|
||||
|
||||
self._postMerge(mega)
|
||||
|
||||
return mega
|
||||
|
||||
def mergeObjects(self, returnTable, logic, tables):
|
||||
# Right now we don't use self at all. Will use in the future
|
||||
# for options and logging.
|
||||
|
||||
allKeys = set.union(set(), *(vars(table).keys() for table in tables if table is not NotImplemented))
|
||||
for key in allKeys:
|
||||
try:
|
||||
mergeLogic = logic[key]
|
||||
except KeyError:
|
||||
try:
|
||||
mergeLogic = logic['*']
|
||||
except KeyError:
|
||||
raise Exception("Don't know how to merge key %s of class %s" %
|
||||
(key, returnTable.__class__.__name__))
|
||||
if mergeLogic is NotImplemented:
|
||||
continue
|
||||
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
||||
if value is not NotImplemented:
|
||||
setattr(returnTable, key, value)
|
||||
|
||||
return returnTable
|
||||
|
||||
def _preMerge(self, font):
|
||||
layoutPreMerge(font)
|
||||
|
||||
def _postMerge(self, font):
|
||||
layoutPostMerge(font)
|
||||
|
||||
|
||||
__all__ = [
|
||||
'Options',
|
||||
'Merger',
|
||||
'main'
|
||||
]
|
||||
|
||||
@timer("make one with everything (TOTAL TIME)")
|
||||
def main(args=None):
|
||||
"""Merge multiple fonts into one"""
|
||||
from fontTools import configLogger
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
options = Options()
|
||||
args = options.parse_opts(args, ignore_unknown=['output-file'])
|
||||
outfile = 'merged.ttf'
|
||||
fontfiles = []
|
||||
for g in args:
|
||||
if g.startswith('--output-file='):
|
||||
outfile = g[14:]
|
||||
continue
|
||||
fontfiles.append(g)
|
||||
|
||||
if len(args) < 1:
|
||||
print("usage: pyftmerge font...", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
|
||||
if options.timing:
|
||||
timer.logger.setLevel(logging.DEBUG)
|
||||
else:
|
||||
timer.logger.disabled = True
|
||||
|
||||
merger = Merger(options=options)
|
||||
font = merger.merge(fontfiles)
|
||||
with timer("compile and save font"):
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
6
venv/Lib/site-packages/fontTools/merge/__main__.py
Normal file
6
venv/Lib/site-packages/fontTools/merge/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from fontTools.merge import main
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
466
venv/Lib/site-packages/fontTools/merge/layout.py
Normal file
466
venv/Lib/site-packages/fontTools/merge/layout.py
Normal file
@@ -0,0 +1,466 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
from fontTools.ttLib.tables import otTables
|
||||
from fontTools.merge.base import add_method, mergeObjects
|
||||
from fontTools.merge.util import *
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
def mergeLookupLists(lst):
|
||||
# TODO Do smarter merge.
|
||||
return sumLists(lst)
|
||||
|
||||
def mergeFeatures(lst):
|
||||
assert lst
|
||||
self = otTables.Feature()
|
||||
self.FeatureParams = None
|
||||
self.LookupListIndex = mergeLookupLists([l.LookupListIndex for l in lst if l.LookupListIndex])
|
||||
self.LookupCount = len(self.LookupListIndex)
|
||||
return self
|
||||
|
||||
def mergeFeatureLists(lst):
|
||||
d = {}
|
||||
for l in lst:
|
||||
for f in l:
|
||||
tag = f.FeatureTag
|
||||
if tag not in d:
|
||||
d[tag] = []
|
||||
d[tag].append(f.Feature)
|
||||
ret = []
|
||||
for tag in sorted(d.keys()):
|
||||
rec = otTables.FeatureRecord()
|
||||
rec.FeatureTag = tag
|
||||
rec.Feature = mergeFeatures(d[tag])
|
||||
ret.append(rec)
|
||||
return ret
|
||||
|
||||
def mergeLangSyses(lst):
|
||||
assert lst
|
||||
|
||||
# TODO Support merging ReqFeatureIndex
|
||||
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
|
||||
|
||||
self = otTables.LangSys()
|
||||
self.LookupOrder = None
|
||||
self.ReqFeatureIndex = 0xFFFF
|
||||
self.FeatureIndex = mergeFeatureLists([l.FeatureIndex for l in lst if l.FeatureIndex])
|
||||
self.FeatureCount = len(self.FeatureIndex)
|
||||
return self
|
||||
|
||||
def mergeScripts(lst):
|
||||
assert lst
|
||||
|
||||
if len(lst) == 1:
|
||||
return lst[0]
|
||||
langSyses = {}
|
||||
for sr in lst:
|
||||
for lsr in sr.LangSysRecord:
|
||||
if lsr.LangSysTag not in langSyses:
|
||||
langSyses[lsr.LangSysTag] = []
|
||||
langSyses[lsr.LangSysTag].append(lsr.LangSys)
|
||||
lsrecords = []
|
||||
for tag, langSys_list in sorted(langSyses.items()):
|
||||
lsr = otTables.LangSysRecord()
|
||||
lsr.LangSys = mergeLangSyses(langSys_list)
|
||||
lsr.LangSysTag = tag
|
||||
lsrecords.append(lsr)
|
||||
|
||||
self = otTables.Script()
|
||||
self.LangSysRecord = lsrecords
|
||||
self.LangSysCount = len(lsrecords)
|
||||
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
|
||||
if dfltLangSyses:
|
||||
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
|
||||
else:
|
||||
self.DefaultLangSys = None
|
||||
return self
|
||||
|
||||
def mergeScriptRecords(lst):
|
||||
d = {}
|
||||
for l in lst:
|
||||
for s in l:
|
||||
tag = s.ScriptTag
|
||||
if tag not in d:
|
||||
d[tag] = []
|
||||
d[tag].append(s.Script)
|
||||
ret = []
|
||||
for tag in sorted(d.keys()):
|
||||
rec = otTables.ScriptRecord()
|
||||
rec.ScriptTag = tag
|
||||
rec.Script = mergeScripts(d[tag])
|
||||
ret.append(rec)
|
||||
return ret
|
||||
|
||||
otTables.ScriptList.mergeMap = {
|
||||
'ScriptCount': lambda lst: None, # TODO
|
||||
'ScriptRecord': mergeScriptRecords,
|
||||
}
|
||||
otTables.BaseScriptList.mergeMap = {
|
||||
'BaseScriptCount': lambda lst: None, # TODO
|
||||
# TODO: Merge duplicate entries
|
||||
'BaseScriptRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.BaseScriptTag),
|
||||
}
|
||||
|
||||
otTables.FeatureList.mergeMap = {
|
||||
'FeatureCount': sum,
|
||||
'FeatureRecord': lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
|
||||
}
|
||||
|
||||
otTables.LookupList.mergeMap = {
|
||||
'LookupCount': sum,
|
||||
'Lookup': sumLists,
|
||||
}
|
||||
|
||||
otTables.Coverage.mergeMap = {
|
||||
'Format': min,
|
||||
'glyphs': sumLists,
|
||||
}
|
||||
|
||||
otTables.ClassDef.mergeMap = {
|
||||
'Format': min,
|
||||
'classDefs': sumDicts,
|
||||
}
|
||||
|
||||
otTables.LigCaretList.mergeMap = {
|
||||
'Coverage': mergeObjects,
|
||||
'LigGlyphCount': sum,
|
||||
'LigGlyph': sumLists,
|
||||
}
|
||||
|
||||
otTables.AttachList.mergeMap = {
|
||||
'Coverage': mergeObjects,
|
||||
'GlyphCount': sum,
|
||||
'AttachPoint': sumLists,
|
||||
}
|
||||
|
||||
# XXX Renumber MarkFilterSets of lookups
|
||||
otTables.MarkGlyphSetsDef.mergeMap = {
|
||||
'MarkSetTableFormat': equal,
|
||||
'MarkSetCount': sum,
|
||||
'Coverage': sumLists,
|
||||
}
|
||||
|
||||
otTables.Axis.mergeMap = {
|
||||
'*': mergeObjects,
|
||||
}
|
||||
|
||||
# XXX Fix BASE table merging
|
||||
otTables.BaseTagList.mergeMap = {
|
||||
'BaseTagCount': sum,
|
||||
'BaselineTag': sumLists,
|
||||
}
|
||||
|
||||
otTables.GDEF.mergeMap = \
|
||||
otTables.GSUB.mergeMap = \
|
||||
otTables.GPOS.mergeMap = \
|
||||
otTables.BASE.mergeMap = \
|
||||
otTables.JSTF.mergeMap = \
|
||||
otTables.MATH.mergeMap = \
|
||||
{
|
||||
'*': mergeObjects,
|
||||
'Version': max,
|
||||
}
|
||||
|
||||
ttLib.getTableClass('GDEF').mergeMap = \
|
||||
ttLib.getTableClass('GSUB').mergeMap = \
|
||||
ttLib.getTableClass('GPOS').mergeMap = \
|
||||
ttLib.getTableClass('BASE').mergeMap = \
|
||||
ttLib.getTableClass('JSTF').mergeMap = \
|
||||
ttLib.getTableClass('MATH').mergeMap = \
|
||||
{
|
||||
'tableTag': onlyExisting(equal), # XXX clean me up
|
||||
'table': mergeObjects,
|
||||
}
|
||||
|
||||
@add_method(ttLib.getTableClass('GSUB'))
|
||||
def merge(self, m, tables):
|
||||
|
||||
assert len(tables) == len(m.duplicateGlyphsPerFont)
|
||||
for i,(table,dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
|
||||
if not dups: continue
|
||||
if table is None or table is NotImplemented:
|
||||
log.warning("Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s", m.fonts[i]._merger__name, dups)
|
||||
continue
|
||||
|
||||
synthFeature = None
|
||||
synthLookup = None
|
||||
for script in table.table.ScriptList.ScriptRecord:
|
||||
if script.ScriptTag == 'DFLT': continue # XXX
|
||||
for langsys in [script.Script.DefaultLangSys] + [l.LangSys for l in script.Script.LangSysRecord]:
|
||||
if langsys is None: continue # XXX Create!
|
||||
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == 'locl']
|
||||
assert len(feature) <= 1
|
||||
if feature:
|
||||
feature = feature[0]
|
||||
else:
|
||||
if not synthFeature:
|
||||
synthFeature = otTables.FeatureRecord()
|
||||
synthFeature.FeatureTag = 'locl'
|
||||
f = synthFeature.Feature = otTables.Feature()
|
||||
f.FeatureParams = None
|
||||
f.LookupCount = 0
|
||||
f.LookupListIndex = []
|
||||
table.table.FeatureList.FeatureRecord.append(synthFeature)
|
||||
table.table.FeatureList.FeatureCount += 1
|
||||
feature = synthFeature
|
||||
langsys.FeatureIndex.append(feature)
|
||||
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
|
||||
|
||||
if not synthLookup:
|
||||
subtable = otTables.SingleSubst()
|
||||
subtable.mapping = dups
|
||||
synthLookup = otTables.Lookup()
|
||||
synthLookup.LookupFlag = 0
|
||||
synthLookup.LookupType = 1
|
||||
synthLookup.SubTableCount = 1
|
||||
synthLookup.SubTable = [subtable]
|
||||
if table.table.LookupList is None:
|
||||
# mtiLib uses None as default value for LookupList,
|
||||
# while feaLib points to an empty array with count 0
|
||||
# TODO: make them do the same
|
||||
table.table.LookupList = otTables.LookupList()
|
||||
table.table.LookupList.Lookup = []
|
||||
table.table.LookupList.LookupCount = 0
|
||||
table.table.LookupList.Lookup.append(synthLookup)
|
||||
table.table.LookupList.LookupCount += 1
|
||||
|
||||
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
|
||||
feature.Feature.LookupListIndex[:0] = [synthLookup]
|
||||
feature.Feature.LookupCount += 1
|
||||
|
||||
DefaultTable.merge(self, m, tables)
|
||||
return self
|
||||
|
||||
@add_method(otTables.SingleSubst,
|
||||
otTables.MultipleSubst,
|
||||
otTables.AlternateSubst,
|
||||
otTables.LigatureSubst,
|
||||
otTables.ReverseChainSingleSubst,
|
||||
otTables.SinglePos,
|
||||
otTables.PairPos,
|
||||
otTables.CursivePos,
|
||||
otTables.MarkBasePos,
|
||||
otTables.MarkLigPos,
|
||||
otTables.MarkMarkPos)
|
||||
def mapLookups(self, lookupMap):
|
||||
pass
|
||||
|
||||
# Copied and trimmed down from subset.py
|
||||
@add_method(otTables.ContextSubst,
|
||||
otTables.ChainContextSubst,
|
||||
otTables.ContextPos,
|
||||
otTables.ChainContextPos)
|
||||
def __merge_classify_context(self):
|
||||
|
||||
class ContextHelper(object):
|
||||
def __init__(self, klass, Format):
|
||||
if klass.__name__.endswith('Subst'):
|
||||
Typ = 'Sub'
|
||||
Type = 'Subst'
|
||||
else:
|
||||
Typ = 'Pos'
|
||||
Type = 'Pos'
|
||||
if klass.__name__.startswith('Chain'):
|
||||
Chain = 'Chain'
|
||||
else:
|
||||
Chain = ''
|
||||
ChainTyp = Chain+Typ
|
||||
|
||||
self.Typ = Typ
|
||||
self.Type = Type
|
||||
self.Chain = Chain
|
||||
self.ChainTyp = ChainTyp
|
||||
|
||||
self.LookupRecord = Type+'LookupRecord'
|
||||
|
||||
if Format == 1:
|
||||
self.Rule = ChainTyp+'Rule'
|
||||
self.RuleSet = ChainTyp+'RuleSet'
|
||||
elif Format == 2:
|
||||
self.Rule = ChainTyp+'ClassRule'
|
||||
self.RuleSet = ChainTyp+'ClassSet'
|
||||
|
||||
if self.Format not in [1, 2, 3]:
|
||||
return None # Don't shoot the messenger; let it go
|
||||
if not hasattr(self.__class__, "_merge__ContextHelpers"):
|
||||
self.__class__._merge__ContextHelpers = {}
|
||||
if self.Format not in self.__class__._merge__ContextHelpers:
|
||||
helper = ContextHelper(self.__class__, self.Format)
|
||||
self.__class__._merge__ContextHelpers[self.Format] = helper
|
||||
return self.__class__._merge__ContextHelpers[self.Format]
|
||||
|
||||
|
||||
@add_method(otTables.ContextSubst,
|
||||
otTables.ChainContextSubst,
|
||||
otTables.ContextPos,
|
||||
otTables.ChainContextPos)
|
||||
def mapLookups(self, lookupMap):
|
||||
c = self.__merge_classify_context()
|
||||
|
||||
if self.Format in [1, 2]:
|
||||
for rs in getattr(self, c.RuleSet):
|
||||
if not rs: continue
|
||||
for r in getattr(rs, c.Rule):
|
||||
if not r: continue
|
||||
for ll in getattr(r, c.LookupRecord):
|
||||
if not ll: continue
|
||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||
elif self.Format == 3:
|
||||
for ll in getattr(self, c.LookupRecord):
|
||||
if not ll: continue
|
||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||
else:
|
||||
assert 0, "unknown format: %s" % self.Format
|
||||
|
||||
@add_method(otTables.ExtensionSubst,
|
||||
otTables.ExtensionPos)
|
||||
def mapLookups(self, lookupMap):
|
||||
if self.Format == 1:
|
||||
self.ExtSubTable.mapLookups(lookupMap)
|
||||
else:
|
||||
assert 0, "unknown format: %s" % self.Format
|
||||
|
||||
@add_method(otTables.Lookup)
|
||||
def mapLookups(self, lookupMap):
|
||||
for st in self.SubTable:
|
||||
if not st: continue
|
||||
st.mapLookups(lookupMap)
|
||||
|
||||
@add_method(otTables.LookupList)
|
||||
def mapLookups(self, lookupMap):
|
||||
for l in self.Lookup:
|
||||
if not l: continue
|
||||
l.mapLookups(lookupMap)
|
||||
|
||||
@add_method(otTables.Lookup)
|
||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||
if self.LookupFlag & 0x0010:
|
||||
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
|
||||
|
||||
@add_method(otTables.LookupList)
|
||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||
for l in self.Lookup:
|
||||
if not l: continue
|
||||
l.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
@add_method(otTables.Feature)
|
||||
def mapLookups(self, lookupMap):
|
||||
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
|
||||
|
||||
@add_method(otTables.FeatureList)
|
||||
def mapLookups(self, lookupMap):
|
||||
for f in self.FeatureRecord:
|
||||
if not f or not f.Feature: continue
|
||||
f.Feature.mapLookups(lookupMap)
|
||||
|
||||
@add_method(otTables.DefaultLangSys,
|
||||
otTables.LangSys)
|
||||
def mapFeatures(self, featureMap):
|
||||
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
|
||||
if self.ReqFeatureIndex != 65535:
|
||||
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
|
||||
|
||||
@add_method(otTables.Script)
|
||||
def mapFeatures(self, featureMap):
|
||||
if self.DefaultLangSys:
|
||||
self.DefaultLangSys.mapFeatures(featureMap)
|
||||
for l in self.LangSysRecord:
|
||||
if not l or not l.LangSys: continue
|
||||
l.LangSys.mapFeatures(featureMap)
|
||||
|
||||
@add_method(otTables.ScriptList)
|
||||
def mapFeatures(self, featureMap):
|
||||
for s in self.ScriptRecord:
|
||||
if not s or not s.Script: continue
|
||||
s.Script.mapFeatures(featureMap)
|
||||
|
||||
def layoutPreMerge(font):
|
||||
# Map indices to references
|
||||
|
||||
GDEF = font.get('GDEF')
|
||||
GSUB = font.get('GSUB')
|
||||
GPOS = font.get('GPOS')
|
||||
|
||||
for t in [GSUB, GPOS]:
|
||||
if not t: continue
|
||||
|
||||
if t.table.LookupList:
|
||||
lookupMap = {i:v for i,v in enumerate(t.table.LookupList.Lookup)}
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
|
||||
if GDEF and GDEF.table.Version >= 0x00010002:
|
||||
markFilteringSetMap = {i:v for i,v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)}
|
||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
if t.table.FeatureList and t.table.ScriptList:
|
||||
featureMap = {i:v for i,v in enumerate(t.table.FeatureList.FeatureRecord)}
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
# TODO FeatureParams nameIDs
|
||||
|
||||
def layoutPostMerge(font):
|
||||
# Map references back to indices
|
||||
|
||||
GDEF = font.get('GDEF')
|
||||
GSUB = font.get('GSUB')
|
||||
GPOS = font.get('GPOS')
|
||||
|
||||
for t in [GSUB, GPOS]:
|
||||
if not t: continue
|
||||
|
||||
if t.table.FeatureList and t.table.ScriptList:
|
||||
|
||||
# Collect unregistered (new) features.
|
||||
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
# Record used features.
|
||||
featureMap = AttendanceRecordingIdentityDict(t.table.FeatureList.FeatureRecord)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
usedIndices = featureMap.s
|
||||
|
||||
# Remove unused features
|
||||
t.table.FeatureList.FeatureRecord = [f for i,f in enumerate(t.table.FeatureList.FeatureRecord) if i in usedIndices]
|
||||
|
||||
# Map back to indices.
|
||||
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
|
||||
|
||||
if t.table.LookupList:
|
||||
|
||||
# Collect unregistered (new) lookups.
|
||||
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
|
||||
# Record used lookups.
|
||||
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
usedIndices = lookupMap.s
|
||||
|
||||
# Remove unused lookups
|
||||
t.table.LookupList.Lookup = [l for i,l in enumerate(t.table.LookupList.Lookup) if i in usedIndices]
|
||||
|
||||
# Map back to indices.
|
||||
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
|
||||
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
|
||||
|
||||
if GDEF and GDEF.table.Version >= 0x00010002:
|
||||
markFilteringSetMap = NonhashableDict(GDEF.table.MarkGlyphSetsDef.Coverage)
|
||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
# TODO FeatureParams nameIDs
|
85
venv/Lib/site-packages/fontTools/merge/options.py
Normal file
85
venv/Lib/site-packages/fontTools/merge/options.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
|
||||
class Options(object):
|
||||
|
||||
class UnknownOptionError(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.verbose = False
|
||||
self.timing = False
|
||||
self.drop_tables = []
|
||||
|
||||
self.set(**kwargs)
|
||||
|
||||
def set(self, **kwargs):
|
||||
for k,v in kwargs.items():
|
||||
if not hasattr(self, k):
|
||||
raise self.UnknownOptionError("Unknown option '%s'" % k)
|
||||
setattr(self, k, v)
|
||||
|
||||
def parse_opts(self, argv, ignore_unknown=[]):
|
||||
ret = []
|
||||
opts = {}
|
||||
for a in argv:
|
||||
orig_a = a
|
||||
if not a.startswith('--'):
|
||||
ret.append(a)
|
||||
continue
|
||||
a = a[2:]
|
||||
i = a.find('=')
|
||||
op = '='
|
||||
if i == -1:
|
||||
if a.startswith("no-"):
|
||||
k = a[3:]
|
||||
v = False
|
||||
else:
|
||||
k = a
|
||||
v = True
|
||||
else:
|
||||
k = a[:i]
|
||||
if k[-1] in "-+":
|
||||
op = k[-1]+'=' # Ops is '-=' or '+=' now.
|
||||
k = k[:-1]
|
||||
v = a[i+1:]
|
||||
ok = k
|
||||
k = k.replace('-', '_')
|
||||
if not hasattr(self, k):
|
||||
if ignore_unknown is True or ok in ignore_unknown:
|
||||
ret.append(orig_a)
|
||||
continue
|
||||
else:
|
||||
raise self.UnknownOptionError("Unknown option '%s'" % a)
|
||||
|
||||
ov = getattr(self, k)
|
||||
if isinstance(ov, bool):
|
||||
v = bool(v)
|
||||
elif isinstance(ov, int):
|
||||
v = int(v)
|
||||
elif isinstance(ov, list):
|
||||
vv = v.split(',')
|
||||
if vv == ['']:
|
||||
vv = []
|
||||
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
||||
if op == '=':
|
||||
v = vv
|
||||
elif op == '+=':
|
||||
v = ov
|
||||
v.extend(vv)
|
||||
elif op == '-=':
|
||||
v = ov
|
||||
for x in vv:
|
||||
if x in v:
|
||||
v.remove(x)
|
||||
else:
|
||||
assert 0
|
||||
|
||||
opts[k] = v
|
||||
self.set(**opts)
|
||||
|
||||
return ret
|
||||
|
311
venv/Lib/site-packages/fontTools/merge/tables.py
Normal file
311
venv/Lib/site-packages/fontTools/merge/tables.py
Normal file
@@ -0,0 +1,311 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib, cffLib
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
from fontTools.merge.base import add_method, mergeObjects
|
||||
from fontTools.merge.cmap import computeMegaCmap
|
||||
from fontTools.merge.util import *
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
ttLib.getTableClass('maxp').mergeMap = {
|
||||
'*': max,
|
||||
'tableTag': equal,
|
||||
'tableVersion': equal,
|
||||
'numGlyphs': sum,
|
||||
'maxStorage': first,
|
||||
'maxFunctionDefs': first,
|
||||
'maxInstructionDefs': first,
|
||||
# TODO When we correctly merge hinting data, update these values:
|
||||
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
|
||||
}
|
||||
|
||||
headFlagsMergeBitMap = {
|
||||
'size': 16,
|
||||
'*': bitwise_or,
|
||||
1: bitwise_and, # Baseline at y = 0
|
||||
2: bitwise_and, # lsb at x = 0
|
||||
3: bitwise_and, # Force ppem to integer values. FIXME?
|
||||
5: bitwise_and, # Font is vertical
|
||||
6: lambda bit: 0, # Always set to zero
|
||||
11: bitwise_and, # Font data is 'lossless'
|
||||
13: bitwise_and, # Optimized for ClearType
|
||||
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
|
||||
15: lambda bit: 0, # Always set to zero
|
||||
}
|
||||
|
||||
ttLib.getTableClass('head').mergeMap = {
|
||||
'tableTag': equal,
|
||||
'tableVersion': max,
|
||||
'fontRevision': max,
|
||||
'checkSumAdjustment': lambda lst: 0, # We need *something* here
|
||||
'magicNumber': equal,
|
||||
'flags': mergeBits(headFlagsMergeBitMap),
|
||||
'unitsPerEm': equal,
|
||||
'created': current_time,
|
||||
'modified': current_time,
|
||||
'xMin': min,
|
||||
'yMin': min,
|
||||
'xMax': max,
|
||||
'yMax': max,
|
||||
'macStyle': first,
|
||||
'lowestRecPPEM': max,
|
||||
'fontDirectionHint': lambda lst: 2,
|
||||
'indexToLocFormat': first,
|
||||
'glyphDataFormat': equal,
|
||||
}
|
||||
|
||||
ttLib.getTableClass('hhea').mergeMap = {
|
||||
'*': equal,
|
||||
'tableTag': equal,
|
||||
'tableVersion': max,
|
||||
'ascent': max,
|
||||
'descent': min,
|
||||
'lineGap': max,
|
||||
'advanceWidthMax': max,
|
||||
'minLeftSideBearing': min,
|
||||
'minRightSideBearing': min,
|
||||
'xMaxExtent': max,
|
||||
'caretSlopeRise': first,
|
||||
'caretSlopeRun': first,
|
||||
'caretOffset': first,
|
||||
'numberOfHMetrics': recalculate,
|
||||
}
|
||||
|
||||
ttLib.getTableClass('vhea').mergeMap = {
|
||||
'*': equal,
|
||||
'tableTag': equal,
|
||||
'tableVersion': max,
|
||||
'ascent': max,
|
||||
'descent': min,
|
||||
'lineGap': max,
|
||||
'advanceHeightMax': max,
|
||||
'minTopSideBearing': min,
|
||||
'minBottomSideBearing': min,
|
||||
'yMaxExtent': max,
|
||||
'caretSlopeRise': first,
|
||||
'caretSlopeRun': first,
|
||||
'caretOffset': first,
|
||||
'numberOfVMetrics': recalculate,
|
||||
}
|
||||
|
||||
os2FsTypeMergeBitMap = {
|
||||
'size': 16,
|
||||
'*': lambda bit: 0,
|
||||
1: bitwise_or, # no embedding permitted
|
||||
2: bitwise_and, # allow previewing and printing documents
|
||||
3: bitwise_and, # allow editing documents
|
||||
8: bitwise_or, # no subsetting permitted
|
||||
9: bitwise_or, # no embedding of outlines permitted
|
||||
}
|
||||
|
||||
def mergeOs2FsType(lst):
|
||||
lst = list(lst)
|
||||
if all(item == 0 for item in lst):
|
||||
return 0
|
||||
|
||||
# Compute least restrictive logic for each fsType value
|
||||
for i in range(len(lst)):
|
||||
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
|
||||
if lst[i] & 0x000C:
|
||||
lst[i] &= ~0x0002
|
||||
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
|
||||
elif lst[i] & 0x0008:
|
||||
lst[i] |= 0x0004
|
||||
# set bits 2 and 3 if everything is allowed
|
||||
elif lst[i] == 0:
|
||||
lst[i] = 0x000C
|
||||
|
||||
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
|
||||
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
|
||||
if fsType & 0x0002:
|
||||
fsType &= ~0x000C
|
||||
return fsType
|
||||
|
||||
|
||||
ttLib.getTableClass('OS/2').mergeMap = {
|
||||
'*': first,
|
||||
'tableTag': equal,
|
||||
'version': max,
|
||||
'xAvgCharWidth': avg_int, # Apparently fontTools doesn't recalc this
|
||||
'fsType': mergeOs2FsType, # Will be overwritten
|
||||
'panose': first, # FIXME: should really be the first Latin font
|
||||
'ulUnicodeRange1': bitwise_or,
|
||||
'ulUnicodeRange2': bitwise_or,
|
||||
'ulUnicodeRange3': bitwise_or,
|
||||
'ulUnicodeRange4': bitwise_or,
|
||||
'fsFirstCharIndex': min,
|
||||
'fsLastCharIndex': max,
|
||||
'sTypoAscender': max,
|
||||
'sTypoDescender': min,
|
||||
'sTypoLineGap': max,
|
||||
'usWinAscent': max,
|
||||
'usWinDescent': max,
|
||||
# Version 1
|
||||
'ulCodePageRange1': onlyExisting(bitwise_or),
|
||||
'ulCodePageRange2': onlyExisting(bitwise_or),
|
||||
# Version 2, 3, 4
|
||||
'sxHeight': onlyExisting(max),
|
||||
'sCapHeight': onlyExisting(max),
|
||||
'usDefaultChar': onlyExisting(first),
|
||||
'usBreakChar': onlyExisting(first),
|
||||
'usMaxContext': onlyExisting(max),
|
||||
# version 5
|
||||
'usLowerOpticalPointSize': onlyExisting(min),
|
||||
'usUpperOpticalPointSize': onlyExisting(max),
|
||||
}
|
||||
|
||||
@add_method(ttLib.getTableClass('OS/2'))
|
||||
def merge(self, m, tables):
|
||||
DefaultTable.merge(self, m, tables)
|
||||
if self.version < 2:
|
||||
# bits 8 and 9 are reserved and should be set to zero
|
||||
self.fsType &= ~0x0300
|
||||
if self.version >= 3:
|
||||
# Only one of bits 1, 2, and 3 may be set. We already take
|
||||
# care of bit 1 implications in mergeOs2FsType. So unset
|
||||
# bit 2 if bit 3 is already set.
|
||||
if self.fsType & 0x0008:
|
||||
self.fsType &= ~0x0004
|
||||
return self
|
||||
|
||||
ttLib.getTableClass('post').mergeMap = {
|
||||
'*': first,
|
||||
'tableTag': equal,
|
||||
'formatType': max,
|
||||
'isFixedPitch': min,
|
||||
'minMemType42': max,
|
||||
'maxMemType42': lambda lst: 0,
|
||||
'minMemType1': max,
|
||||
'maxMemType1': lambda lst: 0,
|
||||
'mapping': onlyExisting(sumDicts),
|
||||
'extraNames': lambda lst: [],
|
||||
}
|
||||
|
||||
ttLib.getTableClass('vmtx').mergeMap = ttLib.getTableClass('hmtx').mergeMap = {
|
||||
'tableTag': equal,
|
||||
'metrics': sumDicts,
|
||||
}
|
||||
|
||||
ttLib.getTableClass('name').mergeMap = {
|
||||
'tableTag': equal,
|
||||
'names': first, # FIXME? Does mixing name records make sense?
|
||||
}
|
||||
|
||||
ttLib.getTableClass('loca').mergeMap = {
|
||||
'*': recalculate,
|
||||
'tableTag': equal,
|
||||
}
|
||||
|
||||
ttLib.getTableClass('glyf').mergeMap = {
|
||||
'tableTag': equal,
|
||||
'glyphs': sumDicts,
|
||||
'glyphOrder': sumLists,
|
||||
}
|
||||
|
||||
@add_method(ttLib.getTableClass('glyf'))
|
||||
def merge(self, m, tables):
|
||||
for i,table in enumerate(tables):
|
||||
for g in table.glyphs.values():
|
||||
if i:
|
||||
# Drop hints for all but first font, since
|
||||
# we don't map functions / CVT values.
|
||||
g.removeHinting()
|
||||
# Expand composite glyphs to load their
|
||||
# composite glyph names.
|
||||
if g.isComposite():
|
||||
g.expand(table)
|
||||
return DefaultTable.merge(self, m, tables)
|
||||
|
||||
ttLib.getTableClass('prep').mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass('fpgm').mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass('cvt ').mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass('gasp').mergeMap = lambda self, lst: first(lst) # FIXME? Appears irreconcilable
|
||||
|
||||
@add_method(ttLib.getTableClass('CFF '))
|
||||
def merge(self, m, tables):
|
||||
|
||||
if any(hasattr(table, "FDSelect") for table in tables):
|
||||
raise NotImplementedError(
|
||||
"Merging CID-keyed CFF tables is not supported yet"
|
||||
)
|
||||
|
||||
for table in tables:
|
||||
table.cff.desubroutinize()
|
||||
|
||||
newcff = tables[0]
|
||||
newfont = newcff.cff[0]
|
||||
private = newfont.Private
|
||||
storedNamesStrings = []
|
||||
glyphOrderStrings = []
|
||||
glyphOrder = set(newfont.getGlyphOrder())
|
||||
|
||||
for name in newfont.strings.strings:
|
||||
if name not in glyphOrder:
|
||||
storedNamesStrings.append(name)
|
||||
else:
|
||||
glyphOrderStrings.append(name)
|
||||
|
||||
chrset = list(newfont.charset)
|
||||
newcs = newfont.CharStrings
|
||||
log.debug("FONT 0 CharStrings: %d.", len(newcs))
|
||||
|
||||
for i, table in enumerate(tables[1:], start=1):
|
||||
font = table.cff[0]
|
||||
font.Private = private
|
||||
fontGlyphOrder = set(font.getGlyphOrder())
|
||||
for name in font.strings.strings:
|
||||
if name in fontGlyphOrder:
|
||||
glyphOrderStrings.append(name)
|
||||
cs = font.CharStrings
|
||||
gs = table.cff.GlobalSubrs
|
||||
log.debug("Font %d CharStrings: %d.", i, len(cs))
|
||||
chrset.extend(font.charset)
|
||||
if newcs.charStringsAreIndexed:
|
||||
for i, name in enumerate(cs.charStrings, start=len(newcs)):
|
||||
newcs.charStrings[name] = i
|
||||
newcs.charStringsIndex.items.append(None)
|
||||
for name in cs.charStrings:
|
||||
newcs[name] = cs[name]
|
||||
|
||||
newfont.charset = chrset
|
||||
newfont.numGlyphs = len(chrset)
|
||||
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
|
||||
|
||||
return newcff
|
||||
|
||||
@add_method(ttLib.getTableClass('cmap'))
|
||||
def merge(self, m, tables):
|
||||
|
||||
# TODO Handle format=14.
|
||||
if not hasattr(m, 'cmap'):
|
||||
computeMegaCmap(m, tables)
|
||||
cmap = m.cmap
|
||||
|
||||
cmapBmpOnly = {uni: gid for uni,gid in cmap.items() if uni <= 0xFFFF}
|
||||
self.tables = []
|
||||
module = ttLib.getTableModule('cmap')
|
||||
if len(cmapBmpOnly) != len(cmap):
|
||||
# format-12 required.
|
||||
cmapTable = module.cmap_classes[12](12)
|
||||
cmapTable.platformID = 3
|
||||
cmapTable.platEncID = 10
|
||||
cmapTable.language = 0
|
||||
cmapTable.cmap = cmap
|
||||
self.tables.append(cmapTable)
|
||||
# always create format-4
|
||||
cmapTable = module.cmap_classes[4](4)
|
||||
cmapTable.platformID = 3
|
||||
cmapTable.platEncID = 1
|
||||
cmapTable.language = 0
|
||||
cmapTable.cmap = cmapBmpOnly
|
||||
# ordered by platform then encoding
|
||||
self.tables.insert(0, cmapTable)
|
||||
self.tableVersion = 0
|
||||
self.numSubTables = len(self.tables)
|
||||
return self
|
65
venv/Lib/site-packages/fontTools/merge/unicode.py
Normal file
65
venv/Lib/site-packages/fontTools/merge/unicode.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
|
||||
|
||||
def is_Default_Ignorable(u):
|
||||
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
|
||||
#
|
||||
# TODO Move me to unicodedata module and autogenerate.
|
||||
#
|
||||
# Unicode 14.0:
|
||||
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
|
||||
# 00AD # Cf SOFT HYPHEN
|
||||
# 034F # Mn COMBINING GRAPHEME JOINER
|
||||
# 061C # Cf ARABIC LETTER MARK
|
||||
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
||||
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
||||
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
||||
# 180E # Cf MONGOLIAN VOWEL SEPARATOR
|
||||
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
||||
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
||||
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
||||
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
|
||||
# 2065 # Cn <reserved-2065>
|
||||
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
||||
# 3164 # Lo HANGUL FILLER
|
||||
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
||||
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE
|
||||
# FFA0 # Lo HALFWIDTH HANGUL FILLER
|
||||
# FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
||||
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
||||
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
||||
# E0000 # Cn <reserved-E0000>
|
||||
# E0001 # Cf LANGUAGE TAG
|
||||
# E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
|
||||
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
|
||||
# E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
||||
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||
return (
|
||||
u == 0x00AD or # Cf SOFT HYPHEN
|
||||
u == 0x034F or # Mn COMBINING GRAPHEME JOINER
|
||||
u == 0x061C or # Cf ARABIC LETTER MARK
|
||||
0x115F <= u <= 0x1160 or # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
||||
0x17B4 <= u <= 0x17B5 or # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
||||
0x180B <= u <= 0x180D or # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
||||
u == 0x180E or # Cf MONGOLIAN VOWEL SEPARATOR
|
||||
u == 0x180F or # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
||||
0x200B <= u <= 0x200F or # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
||||
0x202A <= u <= 0x202E or # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
||||
0x2060 <= u <= 0x2064 or # Cf [5] WORD JOINER..INVISIBLE PLUS
|
||||
u == 0x2065 or # Cn <reserved-2065>
|
||||
0x2066 <= u <= 0x206F or # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
||||
u == 0x3164 or # Lo HANGUL FILLER
|
||||
0xFE00 <= u <= 0xFE0F or # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
||||
u == 0xFEFF or # Cf ZERO WIDTH NO-BREAK SPACE
|
||||
u == 0xFFA0 or # Lo HALFWIDTH HANGUL FILLER
|
||||
0xFFF0 <= u <= 0xFFF8 or # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
||||
0x1BCA0 <= u <= 0x1BCA3 or # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
||||
0x1D173 <= u <= 0x1D17A or # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
||||
u == 0xE0000 or # Cn <reserved-E0000>
|
||||
u == 0xE0001 or # Cf LANGUAGE TAG
|
||||
0xE0002 <= u <= 0xE001F or # Cn [30] <reserved-E0002>..<reserved-E001F>
|
||||
0xE0020 <= u <= 0xE007F or # Cf [96] TAG SPACE..CANCEL TAG
|
||||
0xE0080 <= u <= 0xE00FF or # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
||||
0xE0100 <= u <= 0xE01EF or # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||
0xE01F0 <= u <= 0xE0FFF or # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||
False)
|
1
venv/Lib/site-packages/fontTools/misc/__init__.py
Normal file
1
venv/Lib/site-packages/fontTools/misc/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Empty __init__.py file to signal Python this directory is a package."""
|
46
venv/Lib/site-packages/fontTools/misc/cliTools.py
Normal file
46
venv/Lib/site-packages/fontTools/misc/cliTools.py
Normal file
@@ -0,0 +1,46 @@
|
||||
"""Collection of utilities for command-line interfaces and console scripts."""
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
numberAddedRE = re.compile(r"#\d+$")
|
||||
|
||||
|
||||
def makeOutputFileName(input, outputDir=None, extension=None, overWrite=False):
|
||||
"""Generates a suitable file name for writing output.
|
||||
|
||||
Often tools will want to take a file, do some kind of transformation to it,
|
||||
and write it out again. This function determines an appropriate name for the
|
||||
output file, through one or more of the following steps:
|
||||
|
||||
- changing the output directory
|
||||
- replacing the file extension
|
||||
- suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid
|
||||
overwriting an existing file.
|
||||
|
||||
Args:
|
||||
input: Name of input file.
|
||||
outputDir: Optionally, a new directory to write the file into.
|
||||
extension: Optionally, a replacement for the current file extension.
|
||||
overWrite: Overwriting an existing file is permitted if true; if false
|
||||
and the proposed filename exists, a new name will be generated by
|
||||
adding an appropriate number suffix.
|
||||
|
||||
Returns:
|
||||
str: Suitable output filename
|
||||
"""
|
||||
dirName, fileName = os.path.split(input)
|
||||
fileName, ext = os.path.splitext(fileName)
|
||||
if outputDir:
|
||||
dirName = outputDir
|
||||
fileName = numberAddedRE.split(fileName)[0]
|
||||
if extension is None:
|
||||
extension = os.path.splitext(input)[1]
|
||||
output = os.path.join(dirName, fileName + extension)
|
||||
n = 1
|
||||
if not overWrite:
|
||||
while os.path.exists(output):
|
||||
output = os.path.join(
|
||||
dirName, fileName + "#" + repr(n) + extension)
|
||||
n += 1
|
||||
return output
|
25
venv/Lib/site-packages/fontTools/misc/cython.py
Normal file
25
venv/Lib/site-packages/fontTools/misc/cython.py
Normal file
@@ -0,0 +1,25 @@
|
||||
""" Exports a no-op 'cython' namespace similar to
|
||||
https://github.com/cython/cython/blob/master/Cython/Shadow.py
|
||||
|
||||
This allows to optionally compile @cython decorated functions
|
||||
(when cython is available at built time), or run the same code
|
||||
as pure-python, without runtime dependency on cython module.
|
||||
|
||||
We only define the symbols that we use. E.g. see fontTools.cu2qu
|
||||
"""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
def _empty_decorator(x):
|
||||
return x
|
||||
|
||||
compiled = False
|
||||
|
||||
for name in ("double", "complex", "int"):
|
||||
globals()[name] = None
|
||||
|
||||
for name in ("cfunc", "inline"):
|
||||
globals()[name] = _empty_decorator
|
||||
|
||||
locals = lambda **_: _empty_decorator
|
||||
returns = lambda _: _empty_decorator
|
66
venv/Lib/site-packages/fontTools/misc/dictTools.py
Normal file
66
venv/Lib/site-packages/fontTools/misc/dictTools.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""Misc dict tools."""
|
||||
|
||||
|
||||
__all__ = ['hashdict']
|
||||
|
||||
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||
class hashdict(dict):
|
||||
"""
|
||||
hashable dict implementation, suitable for use as a key into
|
||||
other dicts.
|
||||
|
||||
>>> h1 = hashdict({"apples": 1, "bananas":2})
|
||||
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
|
||||
>>> h1+h2
|
||||
hashdict(apples=1, bananas=3, mangoes=5)
|
||||
>>> d1 = {}
|
||||
>>> d1[h1] = "salad"
|
||||
>>> d1[h1]
|
||||
'salad'
|
||||
>>> d1[h2]
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
KeyError: hashdict(bananas=3, mangoes=5)
|
||||
|
||||
based on answers from
|
||||
http://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||
|
||||
"""
|
||||
def __key(self):
|
||||
return tuple(sorted(self.items()))
|
||||
def __repr__(self):
|
||||
return "{0}({1})".format(self.__class__.__name__,
|
||||
", ".join("{0}={1}".format(
|
||||
str(i[0]),repr(i[1])) for i in self.__key()))
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__key())
|
||||
def __setitem__(self, key, value):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def __delitem__(self, key):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def clear(self):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def pop(self, *args, **kwargs):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def popitem(self, *args, **kwargs):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def setdefault(self, *args, **kwargs):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
def update(self, *args, **kwargs):
|
||||
raise TypeError("{0} does not support item assignment"
|
||||
.format(self.__class__.__name__))
|
||||
# update is not ok because it mutates the object
|
||||
# __add__ is ok because it creates a new object
|
||||
# while the new object is under construction, it's ok to mutate it
|
||||
def __add__(self, right):
|
||||
result = hashdict(self)
|
||||
dict.update(result, right)
|
||||
return result
|
||||
|
242
venv/Lib/site-packages/fontTools/misc/filenames.py
Normal file
242
venv/Lib/site-packages/fontTools/misc/filenames.py
Normal file
@@ -0,0 +1,242 @@
|
||||
"""
|
||||
This module implements the algorithm for converting between a "user name" -
|
||||
something that a user can choose arbitrarily inside a font editor - and a file
|
||||
name suitable for use in a wide range of operating systems and filesystems.
|
||||
|
||||
The `UFO 3 specification <http://unifiedfontobject.org/versions/ufo3/conventions/>`_
|
||||
provides an example of an algorithm for such conversion, which avoids illegal
|
||||
characters, reserved file names, ambiguity between upper- and lower-case
|
||||
characters, and clashes with existing files.
|
||||
|
||||
This code was originally copied from
|
||||
`ufoLib <https://github.com/unified-font-object/ufoLib/blob/8747da7/Lib/ufoLib/filenames.py>`_
|
||||
by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:
|
||||
|
||||
- Erik van Blokland
|
||||
- Tal Leming
|
||||
- Just van Rossum
|
||||
"""
|
||||
|
||||
|
||||
illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
|
||||
illegalCharacters += [chr(i) for i in range(1, 32)]
|
||||
illegalCharacters += [chr(0x7F)]
|
||||
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
|
||||
reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")
|
||||
maxFileNameLength = 255
|
||||
|
||||
|
||||
class NameTranslationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
|
||||
"""Converts from a user name to a file name.
|
||||
|
||||
Takes care to avoid illegal characters, reserved file names, ambiguity between
|
||||
upper- and lower-case characters, and clashes with existing files.
|
||||
|
||||
Args:
|
||||
userName (str): The input file name.
|
||||
existing: A case-insensitive list of all existing file names.
|
||||
prefix: Prefix to be prepended to the file name.
|
||||
suffix: Suffix to be appended to the file name.
|
||||
|
||||
Returns:
|
||||
A suitable filename.
|
||||
|
||||
Raises:
|
||||
NameTranslationError: If no suitable name could be generated.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> userNameToFileName("a") == "a"
|
||||
True
|
||||
>>> userNameToFileName("A") == "A_"
|
||||
True
|
||||
>>> userNameToFileName("AE") == "A_E_"
|
||||
True
|
||||
>>> userNameToFileName("Ae") == "A_e"
|
||||
True
|
||||
>>> userNameToFileName("ae") == "ae"
|
||||
True
|
||||
>>> userNameToFileName("aE") == "aE_"
|
||||
True
|
||||
>>> userNameToFileName("a.alt") == "a.alt"
|
||||
True
|
||||
>>> userNameToFileName("A.alt") == "A_.alt"
|
||||
True
|
||||
>>> userNameToFileName("A.Alt") == "A_.A_lt"
|
||||
True
|
||||
>>> userNameToFileName("A.aLt") == "A_.aL_t"
|
||||
True
|
||||
>>> userNameToFileName(u"A.alT") == "A_.alT_"
|
||||
True
|
||||
>>> userNameToFileName("T_H") == "T__H_"
|
||||
True
|
||||
>>> userNameToFileName("T_h") == "T__h"
|
||||
True
|
||||
>>> userNameToFileName("t_h") == "t_h"
|
||||
True
|
||||
>>> userNameToFileName("F_F_I") == "F__F__I_"
|
||||
True
|
||||
>>> userNameToFileName("f_f_i") == "f_f_i"
|
||||
True
|
||||
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
|
||||
True
|
||||
>>> userNameToFileName(".notdef") == "_notdef"
|
||||
True
|
||||
>>> userNameToFileName("con") == "_con"
|
||||
True
|
||||
>>> userNameToFileName("CON") == "C_O_N_"
|
||||
True
|
||||
>>> userNameToFileName("con.alt") == "_con.alt"
|
||||
True
|
||||
>>> userNameToFileName("alt.con") == "alt._con"
|
||||
True
|
||||
"""
|
||||
# the incoming name must be a str
|
||||
if not isinstance(userName, str):
|
||||
raise ValueError("The value for userName must be a string.")
|
||||
# establish the prefix and suffix lengths
|
||||
prefixLength = len(prefix)
|
||||
suffixLength = len(suffix)
|
||||
# replace an initial period with an _
|
||||
# if no prefix is to be added
|
||||
if not prefix and userName[0] == ".":
|
||||
userName = "_" + userName[1:]
|
||||
# filter the user name
|
||||
filteredUserName = []
|
||||
for character in userName:
|
||||
# replace illegal characters with _
|
||||
if character in illegalCharacters:
|
||||
character = "_"
|
||||
# add _ to all non-lower characters
|
||||
elif character != character.lower():
|
||||
character += "_"
|
||||
filteredUserName.append(character)
|
||||
userName = "".join(filteredUserName)
|
||||
# clip to 255
|
||||
sliceLength = maxFileNameLength - prefixLength - suffixLength
|
||||
userName = userName[:sliceLength]
|
||||
# test for illegal files names
|
||||
parts = []
|
||||
for part in userName.split("."):
|
||||
if part.lower() in reservedFileNames:
|
||||
part = "_" + part
|
||||
parts.append(part)
|
||||
userName = ".".join(parts)
|
||||
# test for clash
|
||||
fullName = prefix + userName + suffix
|
||||
if fullName.lower() in existing:
|
||||
fullName = handleClash1(userName, existing, prefix, suffix)
|
||||
# finished
|
||||
return fullName
|
||||
|
||||
def handleClash1(userName, existing=[], prefix="", suffix=""):
|
||||
"""
|
||||
existing should be a case-insensitive list
|
||||
of all existing file names.
|
||||
|
||||
>>> prefix = ("0" * 5) + "."
|
||||
>>> suffix = "." + ("0" * 10)
|
||||
>>> existing = ["a" * 5]
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000001.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000002.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000001.0000000000')
|
||||
True
|
||||
"""
|
||||
# if the prefix length + user name length + suffix length + 15 is at
|
||||
# or past the maximum length, silce 15 characters off of the user name
|
||||
prefixLength = len(prefix)
|
||||
suffixLength = len(suffix)
|
||||
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
|
||||
l = (prefixLength + len(userName) + suffixLength + 15)
|
||||
sliceLength = maxFileNameLength - l
|
||||
userName = userName[:sliceLength]
|
||||
finalName = None
|
||||
# try to add numbers to create a unique name
|
||||
counter = 1
|
||||
while finalName is None:
|
||||
name = userName + str(counter).zfill(15)
|
||||
fullName = prefix + name + suffix
|
||||
if fullName.lower() not in existing:
|
||||
finalName = fullName
|
||||
break
|
||||
else:
|
||||
counter += 1
|
||||
if counter >= 999999999999999:
|
||||
break
|
||||
# if there is a clash, go to the next fallback
|
||||
if finalName is None:
|
||||
finalName = handleClash2(existing, prefix, suffix)
|
||||
# finished
|
||||
return finalName
|
||||
|
||||
def handleClash2(existing=[], prefix="", suffix=""):
|
||||
"""
|
||||
existing should be a case-insensitive list
|
||||
of all existing file names.
|
||||
|
||||
>>> prefix = ("0" * 5) + "."
|
||||
>>> suffix = "." + ("0" * 10)
|
||||
>>> existing = [prefix + str(i) + suffix for i in range(100)]
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.100.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.remove(prefix + "1" + suffix)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.1.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.remove(prefix + "2" + suffix)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.2.0000000000')
|
||||
True
|
||||
"""
|
||||
# calculate the longest possible string
|
||||
maxLength = maxFileNameLength - len(prefix) - len(suffix)
|
||||
maxValue = int("9" * maxLength)
|
||||
# try to find a number
|
||||
finalName = None
|
||||
counter = 1
|
||||
while finalName is None:
|
||||
fullName = prefix + str(counter) + suffix
|
||||
if fullName.lower() not in existing:
|
||||
finalName = fullName
|
||||
break
|
||||
else:
|
||||
counter += 1
|
||||
if counter >= maxValue:
|
||||
break
|
||||
# raise an error if nothing has been found
|
||||
if finalName is None:
|
||||
raise NameTranslationError("No unique name could be found.")
|
||||
# finished
|
||||
return finalName
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
import sys
|
||||
sys.exit(doctest.testmod().failed)
|
25
venv/Lib/site-packages/fontTools/misc/intTools.py
Normal file
25
venv/Lib/site-packages/fontTools/misc/intTools.py
Normal file
@@ -0,0 +1,25 @@
|
||||
__all__ = ["popCount"]
|
||||
|
||||
|
||||
try:
|
||||
bit_count = int.bit_count
|
||||
except AttributeError:
|
||||
|
||||
def bit_count(v):
|
||||
return bin(v).count("1")
|
||||
|
||||
|
||||
"""Return number of 1 bits (population count) of the absolute value of an integer.
|
||||
|
||||
See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count
|
||||
"""
|
||||
popCount = bit_count
|
||||
|
||||
|
||||
def bit_indices(v):
|
||||
"""Return list of indices where bits are set, 0 being the index of the least significant bit.
|
||||
|
||||
>>> bit_indices(0b101)
|
||||
[0, 2]
|
||||
"""
|
||||
return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"]
|
216
venv/Lib/site-packages/fontTools/misc/sstruct.py
Normal file
216
venv/Lib/site-packages/fontTools/misc/sstruct.py
Normal file
@@ -0,0 +1,216 @@
|
||||
"""sstruct.py -- SuperStruct
|
||||
|
||||
Higher level layer on top of the struct module, enabling to
|
||||
bind names to struct elements. The interface is similar to
|
||||
struct, except the objects passed and returned are not tuples
|
||||
(or argument lists), but dictionaries or instances.
|
||||
|
||||
Just like struct, we use fmt strings to describe a data
|
||||
structure, except we use one line per element. Lines are
|
||||
separated by newlines or semi-colons. Each line contains
|
||||
either one of the special struct characters ('@', '=', '<',
|
||||
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
|
||||
Repetitions, like the struct module offers them are not useful
|
||||
in this context, except for fixed length strings (eg. 'myInt:5h'
|
||||
is not allowed but 'myString:5s' is). The 'x' fmt character
|
||||
(pad byte) is treated as 'special', since it is by definition
|
||||
anonymous. Extra whitespace is allowed everywhere.
|
||||
|
||||
The sstruct module offers one feature that the "normal" struct
|
||||
module doesn't: support for fixed point numbers. These are spelled
|
||||
as "n.mF", where n is the number of bits before the point, and m
|
||||
the number of bits after the point. Fixed point numbers get
|
||||
converted to floats.
|
||||
|
||||
pack(fmt, object):
|
||||
'object' is either a dictionary or an instance (or actually
|
||||
anything that has a __dict__ attribute). If it is a dictionary,
|
||||
its keys are used for names. If it is an instance, it's
|
||||
attributes are used to grab struct elements from. Returns
|
||||
a string containing the data.
|
||||
|
||||
unpack(fmt, data, object=None)
|
||||
If 'object' is omitted (or None), a new dictionary will be
|
||||
returned. If 'object' is a dictionary, it will be used to add
|
||||
struct elements to. If it is an instance (or in fact anything
|
||||
that has a __dict__ attribute), an attribute will be added for
|
||||
each struct element. In the latter two cases, 'object' itself
|
||||
is returned.
|
||||
|
||||
unpack2(fmt, data, object=None)
|
||||
Convenience function. Same as unpack, except data may be longer
|
||||
than needed. The returned value is a tuple: (object, leftoverdata).
|
||||
|
||||
calcsize(fmt)
|
||||
like struct.calcsize(), but uses our own fmt strings:
|
||||
it returns the size of the data in bytes.
|
||||
"""
|
||||
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
|
||||
from fontTools.misc.textTools import tobytes, tostr
|
||||
import struct
|
||||
import re
|
||||
|
||||
__version__ = "1.2"
|
||||
__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
def pack(fmt, obj):
|
||||
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
|
||||
elements = []
|
||||
if not isinstance(obj, dict):
|
||||
obj = obj.__dict__
|
||||
for name in names:
|
||||
value = obj[name]
|
||||
if name in fixes:
|
||||
# fixed point conversion
|
||||
value = fl2fi(value, fixes[name])
|
||||
elif isinstance(value, str):
|
||||
value = tobytes(value)
|
||||
elements.append(value)
|
||||
data = struct.pack(*(formatstring,) + tuple(elements))
|
||||
return data
|
||||
|
||||
def unpack(fmt, data, obj=None):
|
||||
if obj is None:
|
||||
obj = {}
|
||||
data = tobytes(data)
|
||||
formatstring, names, fixes = getformat(fmt)
|
||||
if isinstance(obj, dict):
|
||||
d = obj
|
||||
else:
|
||||
d = obj.__dict__
|
||||
elements = struct.unpack(formatstring, data)
|
||||
for i in range(len(names)):
|
||||
name = names[i]
|
||||
value = elements[i]
|
||||
if name in fixes:
|
||||
# fixed point conversion
|
||||
value = fi2fl(value, fixes[name])
|
||||
elif isinstance(value, bytes):
|
||||
try:
|
||||
value = tostr(value)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
d[name] = value
|
||||
return obj
|
||||
|
||||
def unpack2(fmt, data, obj=None):
|
||||
length = calcsize(fmt)
|
||||
return unpack(fmt, data[:length], obj), data[length:]
|
||||
|
||||
def calcsize(fmt):
|
||||
formatstring, names, fixes = getformat(fmt)
|
||||
return struct.calcsize(formatstring)
|
||||
|
||||
|
||||
# matches "name:formatchar" (whitespace is allowed)
|
||||
_elementRE = re.compile(
|
||||
r"\s*" # whitespace
|
||||
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
|
||||
r"\s*:\s*" # whitespace : whitespace
|
||||
r"([xcbB?hHiIlLqQfd]|" # formatchar...
|
||||
r"[0-9]+[ps]|" # ...formatchar...
|
||||
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
|
||||
r"\s*" # whitespace
|
||||
r"(#.*)?$" # [comment] + end of string
|
||||
)
|
||||
|
||||
# matches the special struct fmt chars and 'x' (pad byte)
|
||||
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
|
||||
|
||||
# matches an "empty" string, possibly containing whitespace and/or a comment
|
||||
_emptyRE = re.compile(r"\s*(#.*)?$")
|
||||
|
||||
_fixedpointmappings = {
|
||||
8: "b",
|
||||
16: "h",
|
||||
32: "l"}
|
||||
|
||||
_formatcache = {}
|
||||
|
||||
def getformat(fmt, keep_pad_byte=False):
|
||||
fmt = tostr(fmt, encoding="ascii")
|
||||
try:
|
||||
formatstring, names, fixes = _formatcache[fmt]
|
||||
except KeyError:
|
||||
lines = re.split("[\n;]", fmt)
|
||||
formatstring = ""
|
||||
names = []
|
||||
fixes = {}
|
||||
for line in lines:
|
||||
if _emptyRE.match(line):
|
||||
continue
|
||||
m = _extraRE.match(line)
|
||||
if m:
|
||||
formatchar = m.group(1)
|
||||
if formatchar != 'x' and formatstring:
|
||||
raise Error("a special fmt char must be first")
|
||||
else:
|
||||
m = _elementRE.match(line)
|
||||
if not m:
|
||||
raise Error("syntax error in fmt: '%s'" % line)
|
||||
name = m.group(1)
|
||||
formatchar = m.group(2)
|
||||
if keep_pad_byte or formatchar != "x":
|
||||
names.append(name)
|
||||
if m.group(3):
|
||||
# fixed point
|
||||
before = int(m.group(3))
|
||||
after = int(m.group(4))
|
||||
bits = before + after
|
||||
if bits not in [8, 16, 32]:
|
||||
raise Error("fixed point must be 8, 16 or 32 bits long")
|
||||
formatchar = _fixedpointmappings[bits]
|
||||
assert m.group(5) == "F"
|
||||
fixes[name] = after
|
||||
formatstring = formatstring + formatchar
|
||||
_formatcache[fmt] = formatstring, names, fixes
|
||||
return formatstring, names, fixes
|
||||
|
||||
def _test():
|
||||
fmt = """
|
||||
# comments are allowed
|
||||
> # big endian (see documentation for struct)
|
||||
# empty lines are allowed:
|
||||
|
||||
ashort: h
|
||||
along: l
|
||||
abyte: b # a byte
|
||||
achar: c
|
||||
astr: 5s
|
||||
afloat: f; adouble: d # multiple "statements" are allowed
|
||||
afixed: 16.16F
|
||||
abool: ?
|
||||
apad: x
|
||||
"""
|
||||
|
||||
print('size:', calcsize(fmt))
|
||||
|
||||
class foo(object):
|
||||
pass
|
||||
|
||||
i = foo()
|
||||
|
||||
i.ashort = 0x7fff
|
||||
i.along = 0x7fffffff
|
||||
i.abyte = 0x7f
|
||||
i.achar = "a"
|
||||
i.astr = "12345"
|
||||
i.afloat = 0.5
|
||||
i.adouble = 0.5
|
||||
i.afixed = 1.5
|
||||
i.abool = True
|
||||
|
||||
data = pack(fmt, i)
|
||||
print('data:', repr(data))
|
||||
print(unpack(fmt, data))
|
||||
i2 = foo()
|
||||
unpack(fmt, data, i2)
|
||||
print(vars(i2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
192
venv/Lib/site-packages/fontTools/misc/symfont.py
Normal file
192
venv/Lib/site-packages/fontTools/misc/symfont.py
Normal file
@@ -0,0 +1,192 @@
|
||||
from fontTools.pens.basePen import BasePen
|
||||
from functools import partial
|
||||
from itertools import count
|
||||
import sympy as sp
|
||||
import sys
|
||||
|
||||
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
|
||||
|
||||
t, x, y = sp.symbols('t x y', real=True)
|
||||
c = sp.symbols('c', real=False) # Complex representation instead of x/y
|
||||
|
||||
X = tuple(sp.symbols('x:%d'%(n+1), real=True))
|
||||
Y = tuple(sp.symbols('y:%d'%(n+1), real=True))
|
||||
P = tuple(zip(*(sp.symbols('p:%d[%s]'%(n+1,w), real=True) for w in '01')))
|
||||
C = tuple(sp.symbols('c:%d'%(n+1), real=False))
|
||||
|
||||
# Cubic Bernstein basis functions
|
||||
BinomialCoefficient = [(1, 0)]
|
||||
for i in range(1, n+1):
|
||||
last = BinomialCoefficient[-1]
|
||||
this = tuple(last[j-1]+last[j] for j in range(len(last)))+(0,)
|
||||
BinomialCoefficient.append(this)
|
||||
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
|
||||
del last, this
|
||||
|
||||
BernsteinPolynomial = tuple(
|
||||
tuple(c * t**i * (1-t)**(n-i) for i,c in enumerate(coeffs))
|
||||
for n,coeffs in enumerate(BinomialCoefficient))
|
||||
|
||||
BezierCurve = tuple(
|
||||
tuple(sum(P[i][j]*bernstein for i,bernstein in enumerate(bernsteins))
|
||||
for j in range(2))
|
||||
for n,bernsteins in enumerate(BernsteinPolynomial))
|
||||
BezierCurveC = tuple(
|
||||
sum(C[i]*bernstein for i,bernstein in enumerate(bernsteins))
|
||||
for n,bernsteins in enumerate(BernsteinPolynomial))
|
||||
|
||||
|
||||
def green(f, curveXY):
|
||||
f = -sp.integrate(sp.sympify(f), y)
|
||||
f = f.subs({x:curveXY[0], y:curveXY[1]})
|
||||
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
|
||||
return f
|
||||
|
||||
|
||||
class _BezierFuncsLazy(dict):
|
||||
|
||||
def __init__(self, symfunc):
|
||||
self._symfunc = symfunc
|
||||
self._bezfuncs = {}
|
||||
|
||||
def __missing__(self, i):
|
||||
args = ['p%d'%d for d in range(i+1)]
|
||||
f = green(self._symfunc, BezierCurve[i])
|
||||
f = sp.gcd_terms(f.collect(sum(P,()))) # Optimize
|
||||
return sp.lambdify(args, f)
|
||||
|
||||
class GreenPen(BasePen):
|
||||
|
||||
_BezierFuncs = {}
|
||||
|
||||
@classmethod
|
||||
def _getGreenBezierFuncs(celf, func):
|
||||
funcstr = str(func)
|
||||
if not funcstr in celf._BezierFuncs:
|
||||
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
|
||||
return celf._BezierFuncs[funcstr]
|
||||
|
||||
def __init__(self, func, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
self._funcs = self._getGreenBezierFuncs(func)
|
||||
self.value = 0
|
||||
|
||||
def _moveTo(self, p0):
|
||||
self.__startPoint = p0
|
||||
|
||||
def _closePath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self.__startPoint:
|
||||
self._lineTo(self.__startPoint)
|
||||
|
||||
def _endPath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self.__startPoint:
|
||||
# Green theorem is not defined on open contours.
|
||||
raise NotImplementedError
|
||||
|
||||
def _lineTo(self, p1):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[1](p0, p1)
|
||||
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[2](p0, p1, p2)
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[3](p0, p1, p2, p3)
|
||||
|
||||
# Sample pens.
|
||||
# Do not use this in real code.
|
||||
# Use fontTools.pens.momentsPen.MomentsPen instead.
|
||||
AreaPen = partial(GreenPen, func=1)
|
||||
MomentXPen = partial(GreenPen, func=x)
|
||||
MomentYPen = partial(GreenPen, func=y)
|
||||
MomentXXPen = partial(GreenPen, func=x*x)
|
||||
MomentYYPen = partial(GreenPen, func=y*y)
|
||||
MomentXYPen = partial(GreenPen, func=x*y)
|
||||
|
||||
|
||||
def printGreenPen(penName, funcs, file=sys.stdout):
|
||||
|
||||
print(
|
||||
'''from fontTools.pens.basePen import BasePen
|
||||
|
||||
class %s(BasePen):
|
||||
|
||||
def __init__(self, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
'''%penName, file=file)
|
||||
for name,f in funcs:
|
||||
print(' self.%s = 0' % name, file=file)
|
||||
print('''
|
||||
def _moveTo(self, p0):
|
||||
self.__startPoint = p0
|
||||
|
||||
def _closePath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self.__startPoint:
|
||||
self._lineTo(self.__startPoint)
|
||||
|
||||
def _endPath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self.__startPoint:
|
||||
# Green theorem is not defined on open contours.
|
||||
raise NotImplementedError
|
||||
''', end='', file=file)
|
||||
|
||||
for n in (1, 2, 3):
|
||||
|
||||
if n == 1:
|
||||
print('''
|
||||
def _lineTo(self, p1):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
''', file=file)
|
||||
elif n == 2:
|
||||
print('''
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
x2,y2 = p2
|
||||
''', file=file)
|
||||
elif n == 3:
|
||||
print('''
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
x2,y2 = p2
|
||||
x3,y3 = p3
|
||||
''', file=file)
|
||||
subs = {P[i][j]: [X, Y][j][i] for i in range(n+1) for j in range(2)}
|
||||
greens = [green(f, BezierCurve[n]) for name,f in funcs]
|
||||
greens = [sp.gcd_terms(f.collect(sum(P,()))) for f in greens] # Optimize
|
||||
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
|
||||
defs, exprs = sp.cse(greens,
|
||||
optimizations='basic',
|
||||
symbols=(sp.Symbol('r%d'%i) for i in count()))
|
||||
for name,value in defs:
|
||||
print(' %s = %s' % (name, value), file=file)
|
||||
print(file=file)
|
||||
for name,value in zip([f[0] for f in funcs], exprs):
|
||||
print(' self.%s += %s' % (name, value), file=file)
|
||||
|
||||
print('''
|
||||
if __name__ == '__main__':
|
||||
from fontTools.misc.symfont import x, y, printGreenPen
|
||||
printGreenPen('%s', ['''%penName, file=file)
|
||||
for name,f in funcs:
|
||||
print(" ('%s', %s)," % (name, str(f)), file=file)
|
||||
print(' ])', file=file)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pen = AreaPen()
|
||||
pen.moveTo((100,100))
|
||||
pen.lineTo((100,200))
|
||||
pen.lineTo((200,200))
|
||||
pen.curveTo((200,250),(300,300),(250,350))
|
||||
pen.lineTo((200,100))
|
||||
pen.closePath()
|
||||
print(pen.value)
|
198
venv/Lib/site-packages/fontTools/misc/testTools.py
Normal file
198
venv/Lib/site-packages/fontTools/misc/testTools.py
Normal file
@@ -0,0 +1,198 @@
|
||||
"""Helpers for writing unit tests."""
|
||||
|
||||
from collections.abc import Iterable
|
||||
from io import BytesIO
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest import TestCase as _TestCase
|
||||
from fontTools.misc.textTools import tobytes
|
||||
from fontTools.misc.xmlWriter import XMLWriter
|
||||
|
||||
|
||||
def parseXML(xmlSnippet):
|
||||
"""Parses a snippet of XML.
|
||||
|
||||
Input can be either a single string (unicode or UTF-8 bytes), or a
|
||||
a sequence of strings.
|
||||
|
||||
The result is in the same format that would be returned by
|
||||
XMLReader, but the parser imposes no constraints on the root
|
||||
element so it can be called on small snippets of TTX files.
|
||||
"""
|
||||
# To support snippets with multiple elements, we add a fake root.
|
||||
reader = TestXMLReader_()
|
||||
xml = b"<root>"
|
||||
if isinstance(xmlSnippet, bytes):
|
||||
xml += xmlSnippet
|
||||
elif isinstance(xmlSnippet, str):
|
||||
xml += tobytes(xmlSnippet, 'utf-8')
|
||||
elif isinstance(xmlSnippet, Iterable):
|
||||
xml += b"".join(tobytes(s, 'utf-8') for s in xmlSnippet)
|
||||
else:
|
||||
raise TypeError("expected string or sequence of strings; found %r"
|
||||
% type(xmlSnippet).__name__)
|
||||
xml += b"</root>"
|
||||
reader.parser.Parse(xml, 0)
|
||||
return reader.root[2]
|
||||
|
||||
|
||||
def parseXmlInto(font, parseInto, xmlSnippet):
|
||||
parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]
|
||||
for name, attrs, content in parsed_xml:
|
||||
parseInto.fromXML(name, attrs, content, font)
|
||||
parseInto.populateDefaults()
|
||||
return parseInto
|
||||
|
||||
|
||||
class FakeFont:
|
||||
def __init__(self, glyphs):
|
||||
self.glyphOrder_ = glyphs
|
||||
self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}
|
||||
self.lazy = False
|
||||
self.tables = {}
|
||||
|
||||
def __getitem__(self, tag):
|
||||
return self.tables[tag]
|
||||
|
||||
def __setitem__(self, tag, table):
|
||||
self.tables[tag] = table
|
||||
|
||||
def get(self, tag, default=None):
|
||||
return self.tables.get(tag, default)
|
||||
|
||||
def getGlyphID(self, name):
|
||||
return self.reverseGlyphOrderDict_[name]
|
||||
|
||||
def getGlyphIDMany(self, lst):
|
||||
return [self.getGlyphID(gid) for gid in lst]
|
||||
|
||||
def getGlyphName(self, glyphID):
|
||||
if glyphID < len(self.glyphOrder_):
|
||||
return self.glyphOrder_[glyphID]
|
||||
else:
|
||||
return "glyph%.5d" % glyphID
|
||||
def getGlyphNameMany(self, lst):
|
||||
return [self.getGlyphName(gid) for gid in lst]
|
||||
|
||||
def getGlyphOrder(self):
|
||||
return self.glyphOrder_
|
||||
|
||||
def getReverseGlyphMap(self):
|
||||
return self.reverseGlyphOrderDict_
|
||||
|
||||
def getGlyphNames(self):
|
||||
return sorted(self.getGlyphOrder())
|
||||
|
||||
|
||||
class TestXMLReader_(object):
|
||||
def __init__(self):
|
||||
from xml.parsers.expat import ParserCreate
|
||||
self.parser = ParserCreate()
|
||||
self.parser.StartElementHandler = self.startElement_
|
||||
self.parser.EndElementHandler = self.endElement_
|
||||
self.parser.CharacterDataHandler = self.addCharacterData_
|
||||
self.root = None
|
||||
self.stack = []
|
||||
|
||||
def startElement_(self, name, attrs):
|
||||
element = (name, attrs, [])
|
||||
if self.stack:
|
||||
self.stack[-1][2].append(element)
|
||||
else:
|
||||
self.root = element
|
||||
self.stack.append(element)
|
||||
|
||||
def endElement_(self, name):
|
||||
self.stack.pop()
|
||||
|
||||
def addCharacterData_(self, data):
|
||||
self.stack[-1][2].append(data)
|
||||
|
||||
|
||||
def makeXMLWriter(newlinestr='\n'):
|
||||
# don't write OS-specific new lines
|
||||
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
|
||||
# erase XML declaration
|
||||
writer.file.seek(0)
|
||||
writer.file.truncate()
|
||||
return writer
|
||||
|
||||
|
||||
def getXML(func, ttFont=None):
|
||||
"""Call the passed toXML function and return the written content as a
|
||||
list of lines (unicode strings).
|
||||
Result is stripped of XML declaration and OS-specific newline characters.
|
||||
"""
|
||||
writer = makeXMLWriter()
|
||||
func(writer, ttFont)
|
||||
xml = writer.file.getvalue().decode("utf-8")
|
||||
# toXML methods must always end with a writer.newline()
|
||||
assert xml.endswith("\n")
|
||||
return xml.splitlines()
|
||||
|
||||
|
||||
class MockFont(object):
|
||||
"""A font-like object that automatically adds any looked up glyphname
|
||||
to its glyphOrder."""
|
||||
|
||||
def __init__(self):
|
||||
self._glyphOrder = ['.notdef']
|
||||
|
||||
class AllocatingDict(dict):
|
||||
def __missing__(reverseDict, key):
|
||||
self._glyphOrder.append(key)
|
||||
gid = len(reverseDict)
|
||||
reverseDict[key] = gid
|
||||
return gid
|
||||
self._reverseGlyphOrder = AllocatingDict({'.notdef': 0})
|
||||
self.lazy = False
|
||||
|
||||
def getGlyphID(self, glyph):
|
||||
gid = self._reverseGlyphOrder[glyph]
|
||||
return gid
|
||||
|
||||
def getReverseGlyphMap(self):
|
||||
return self._reverseGlyphOrder
|
||||
|
||||
def getGlyphName(self, gid):
|
||||
return self._glyphOrder[gid]
|
||||
|
||||
def getGlyphOrder(self):
|
||||
return self._glyphOrder
|
||||
|
||||
|
||||
class TestCase(_TestCase):
|
||||
|
||||
def __init__(self, methodName):
|
||||
_TestCase.__init__(self, methodName)
|
||||
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
|
||||
# and fires deprecation warnings if a program uses the old name.
|
||||
if not hasattr(self, "assertRaisesRegex"):
|
||||
self.assertRaisesRegex = self.assertRaisesRegexp
|
||||
|
||||
|
||||
class DataFilesHandler(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.tempdir = None
|
||||
self.num_tempfiles = 0
|
||||
|
||||
def tearDown(self):
|
||||
if self.tempdir:
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def getpath(self, testfile):
|
||||
folder = os.path.dirname(sys.modules[self.__module__].__file__)
|
||||
return os.path.join(folder, "data", testfile)
|
||||
|
||||
def temp_dir(self):
|
||||
if not self.tempdir:
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
|
||||
def temp_font(self, font_path, file_name):
|
||||
self.temp_dir()
|
||||
temppath = os.path.join(self.tempdir, file_name)
|
||||
shutil.copy2(font_path, temppath)
|
||||
return temppath
|
154
venv/Lib/site-packages/fontTools/misc/textTools.py
Normal file
154
venv/Lib/site-packages/fontTools/misc/textTools.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""fontTools.misc.textTools.py -- miscellaneous routines."""
|
||||
|
||||
|
||||
import ast
|
||||
import string
|
||||
|
||||
|
||||
# alias kept for backward compatibility
|
||||
safeEval = ast.literal_eval
|
||||
|
||||
|
||||
class Tag(str):
|
||||
@staticmethod
|
||||
def transcode(blob):
|
||||
if isinstance(blob, bytes):
|
||||
blob = blob.decode("latin-1")
|
||||
return blob
|
||||
|
||||
def __new__(self, content):
|
||||
return str.__new__(self, self.transcode(content))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
return str.__eq__(self, self.transcode(other))
|
||||
|
||||
def __hash__(self):
|
||||
return str.__hash__(self)
|
||||
|
||||
def tobytes(self):
|
||||
return self.encode("latin-1")
|
||||
|
||||
|
||||
def readHex(content):
|
||||
"""Convert a list of hex strings to binary data."""
|
||||
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
|
||||
|
||||
|
||||
def deHexStr(hexdata):
|
||||
"""Convert a hex string to binary data."""
|
||||
hexdata = strjoin(hexdata.split())
|
||||
if len(hexdata) % 2:
|
||||
hexdata = hexdata + "0"
|
||||
data = []
|
||||
for i in range(0, len(hexdata), 2):
|
||||
data.append(bytechr(int(hexdata[i:i+2], 16)))
|
||||
return bytesjoin(data)
|
||||
|
||||
|
||||
def hexStr(data):
|
||||
"""Convert binary data to a hex string."""
|
||||
h = string.hexdigits
|
||||
r = ''
|
||||
for c in data:
|
||||
i = byteord(c)
|
||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||
return r
|
||||
|
||||
|
||||
def num2binary(l, bits=32):
|
||||
items = []
|
||||
binary = ""
|
||||
for i in range(bits):
|
||||
if l & 0x1:
|
||||
binary = "1" + binary
|
||||
else:
|
||||
binary = "0" + binary
|
||||
l = l >> 1
|
||||
if not ((i+1) % 8):
|
||||
items.append(binary)
|
||||
binary = ""
|
||||
if binary:
|
||||
items.append(binary)
|
||||
items.reverse()
|
||||
assert l in (0, -1), "number doesn't fit in number of bits"
|
||||
return ' '.join(items)
|
||||
|
||||
|
||||
def binary2num(bin):
|
||||
bin = strjoin(bin.split())
|
||||
l = 0
|
||||
for digit in bin:
|
||||
l = l << 1
|
||||
if digit != "0":
|
||||
l = l | 0x1
|
||||
return l
|
||||
|
||||
|
||||
def caselessSort(alist):
|
||||
"""Return a sorted copy of a list. If there are only strings
|
||||
in the list, it will not consider case.
|
||||
"""
|
||||
|
||||
try:
|
||||
return sorted(alist, key=lambda a: (a.lower(), a))
|
||||
except TypeError:
|
||||
return sorted(alist)
|
||||
|
||||
|
||||
def pad(data, size):
|
||||
r""" Pad byte string 'data' with null bytes until its length is a
|
||||
multiple of 'size'.
|
||||
|
||||
>>> len(pad(b'abcd', 4))
|
||||
4
|
||||
>>> len(pad(b'abcde', 2))
|
||||
6
|
||||
>>> len(pad(b'abcde', 4))
|
||||
8
|
||||
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
|
||||
True
|
||||
"""
|
||||
data = tobytes(data)
|
||||
if size > 1:
|
||||
remainder = len(data) % size
|
||||
if remainder:
|
||||
data += b"\0" * (size - remainder)
|
||||
return data
|
||||
|
||||
|
||||
def tostr(s, encoding="ascii", errors="strict"):
|
||||
if not isinstance(s, str):
|
||||
return s.decode(encoding, errors)
|
||||
else:
|
||||
return s
|
||||
|
||||
|
||||
def tobytes(s, encoding="ascii", errors="strict"):
|
||||
if isinstance(s, str):
|
||||
return s.encode(encoding, errors)
|
||||
else:
|
||||
return bytes(s)
|
||||
|
||||
|
||||
def bytechr(n):
|
||||
return bytes([n])
|
||||
|
||||
|
||||
def byteord(c):
|
||||
return c if isinstance(c, int) else ord(c)
|
||||
|
||||
|
||||
def strjoin(iterable, joiner=""):
|
||||
return tostr(joiner).join(iterable)
|
||||
|
||||
|
||||
def bytesjoin(iterable, joiner=b""):
|
||||
return tobytes(joiner).join(tobytes(item) for item in iterable)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
sys.exit(doctest.testmod().failed)
|
68
venv/Lib/site-packages/fontTools/misc/timeTools.py
Normal file
68
venv/Lib/site-packages/fontTools/misc/timeTools.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
import calendar
|
||||
|
||||
|
||||
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
|
||||
|
||||
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
||||
MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
|
||||
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
|
||||
|
||||
|
||||
def asctime(t=None):
|
||||
"""
|
||||
Convert a tuple or struct_time representing a time as returned by gmtime()
|
||||
or localtime() to a 24-character string of the following form:
|
||||
|
||||
>>> asctime(time.gmtime(0))
|
||||
'Thu Jan 1 00:00:00 1970'
|
||||
|
||||
If t is not provided, the current time as returned by localtime() is used.
|
||||
Locale information is not used by asctime().
|
||||
|
||||
This is meant to normalise the output of the built-in time.asctime() across
|
||||
different platforms and Python versions.
|
||||
In Python 3.x, the day of the month is right-justified, whereas on Windows
|
||||
Python 2.7 it is padded with zeros.
|
||||
|
||||
See https://github.com/fonttools/fonttools/issues/455
|
||||
"""
|
||||
if t is None:
|
||||
t = time.localtime()
|
||||
s = "%s %s %2s %s" % (
|
||||
DAYNAMES[t.tm_wday], MONTHNAMES[t.tm_mon], t.tm_mday,
|
||||
time.strftime("%H:%M:%S %Y", t))
|
||||
return s
|
||||
|
||||
|
||||
def timestampToString(value):
|
||||
return asctime(time.gmtime(max(0, value + epoch_diff)))
|
||||
|
||||
def timestampFromString(value):
|
||||
wkday, mnth = value[:7].split()
|
||||
t = datetime.strptime(value[7:], ' %d %H:%M:%S %Y')
|
||||
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
|
||||
wkday_idx = DAYNAMES.index(wkday)
|
||||
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
|
||||
return int(t.timestamp()) - epoch_diff
|
||||
|
||||
def timestampNow():
|
||||
# https://reproducible-builds.org/specs/source-date-epoch/
|
||||
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
|
||||
if source_date_epoch is not None:
|
||||
return int(source_date_epoch) - epoch_diff
|
||||
return int(time.time() - epoch_diff)
|
||||
|
||||
def timestampSinceEpoch(value):
|
||||
return int(value - epoch_diff)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
398
venv/Lib/site-packages/fontTools/misc/transform.py
Normal file
398
venv/Lib/site-packages/fontTools/misc/transform.py
Normal file
@@ -0,0 +1,398 @@
|
||||
"""Affine 2D transformation matrix class.
|
||||
|
||||
The Transform class implements various transformation matrix operations,
|
||||
both on the matrix itself, as well as on 2D coordinates.
|
||||
|
||||
Transform instances are effectively immutable: all methods that operate on the
|
||||
transformation itself always return a new instance. This has as the
|
||||
interesting side effect that Transform instances are hashable, ie. they can be
|
||||
used as dictionary keys.
|
||||
|
||||
This module exports the following symbols:
|
||||
|
||||
Transform
|
||||
this is the main class
|
||||
Identity
|
||||
Transform instance set to the identity transformation
|
||||
Offset
|
||||
Convenience function that returns a translating transformation
|
||||
Scale
|
||||
Convenience function that returns a scaling transformation
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform(2, 0, 0, 3, 0, 0)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(200, 300)
|
||||
>>> t = Scale(2, 3)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(200, 300)
|
||||
>>> t.transformPoint((0, 0))
|
||||
(0, 0)
|
||||
>>> t = Offset(2, 3)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(102, 103)
|
||||
>>> t.transformPoint((0, 0))
|
||||
(2, 3)
|
||||
>>> t2 = t.scale(0.5)
|
||||
>>> t2.transformPoint((100, 100))
|
||||
(52.0, 53.0)
|
||||
>>> import math
|
||||
>>> t3 = t2.rotate(math.pi / 2)
|
||||
>>> t3.transformPoint((0, 0))
|
||||
(2.0, 3.0)
|
||||
>>> t3.transformPoint((100, 100))
|
||||
(-48.0, 53.0)
|
||||
>>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2)
|
||||
>>> t.transformPoints([(0, 0), (1, 1), (100, 100)])
|
||||
[(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)]
|
||||
>>>
|
||||
"""
|
||||
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
__all__ = ["Transform", "Identity", "Offset", "Scale"]
|
||||
|
||||
|
||||
_EPSILON = 1e-15
|
||||
_ONE_EPSILON = 1 - _EPSILON
|
||||
_MINUS_ONE_EPSILON = -1 + _EPSILON
|
||||
|
||||
|
||||
def _normSinCos(v):
|
||||
if abs(v) < _EPSILON:
|
||||
v = 0
|
||||
elif v > _ONE_EPSILON:
|
||||
v = 1
|
||||
elif v < _MINUS_ONE_EPSILON:
|
||||
v = -1
|
||||
return v
|
||||
|
||||
|
||||
class Transform(NamedTuple):
|
||||
|
||||
"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
|
||||
Transform instances are immutable: all transforming methods, eg.
|
||||
rotate(), return a new Transform instance.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform()
|
||||
>>> t
|
||||
<Transform [1 0 0 1 0 0]>
|
||||
>>> t.scale(2)
|
||||
<Transform [2 0 0 2 0 0]>
|
||||
>>> t.scale(2.5, 5.5)
|
||||
<Transform [2.5 0 0 5.5 0 0]>
|
||||
>>>
|
||||
>>> t.scale(2, 3).transformPoint((100, 100))
|
||||
(200, 300)
|
||||
|
||||
Transform's constructor takes six arguments, all of which are
|
||||
optional, and can be used as keyword arguments::
|
||||
|
||||
>>> Transform(12)
|
||||
<Transform [12 0 0 1 0 0]>
|
||||
>>> Transform(dx=12)
|
||||
<Transform [1 0 0 1 12 0]>
|
||||
>>> Transform(yx=12)
|
||||
<Transform [1 0 12 1 0 0]>
|
||||
|
||||
Transform instances also behave like sequences of length 6::
|
||||
|
||||
>>> len(Identity)
|
||||
6
|
||||
>>> list(Identity)
|
||||
[1, 0, 0, 1, 0, 0]
|
||||
>>> tuple(Identity)
|
||||
(1, 0, 0, 1, 0, 0)
|
||||
|
||||
Transform instances are comparable::
|
||||
|
||||
>>> t1 = Identity.scale(2, 3).translate(4, 6)
|
||||
>>> t2 = Identity.translate(8, 18).scale(2, 3)
|
||||
>>> t1 == t2
|
||||
1
|
||||
|
||||
But beware of floating point rounding errors::
|
||||
|
||||
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
|
||||
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
|
||||
>>> t1
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t2
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t1 == t2
|
||||
0
|
||||
|
||||
Transform instances are hashable, meaning you can use them as
|
||||
keys in dictionaries::
|
||||
|
||||
>>> d = {Scale(12, 13): None}
|
||||
>>> d
|
||||
{<Transform [12 0 0 13 0 0]>: None}
|
||||
|
||||
But again, beware of floating point rounding errors::
|
||||
|
||||
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
|
||||
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
|
||||
>>> t1
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t2
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> d = {t1: None}
|
||||
>>> d
|
||||
{<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
|
||||
>>> d[t2]
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in ?
|
||||
KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
"""
|
||||
|
||||
xx: float = 1
|
||||
xy: float = 0
|
||||
yx: float = 0
|
||||
yy: float = 1
|
||||
dx: float = 0
|
||||
dy: float = 0
|
||||
|
||||
def transformPoint(self, p):
|
||||
"""Transform a point.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform()
|
||||
>>> t = t.scale(2.5, 5.5)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(250.0, 550.0)
|
||||
"""
|
||||
(x, y) = p
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
return (xx*x + yx*y + dx, xy*x + yy*y + dy)
|
||||
|
||||
def transformPoints(self, points):
|
||||
"""Transform a list of points.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Scale(2, 3)
|
||||
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
|
||||
[(0, 0), (0, 300), (200, 300), (200, 0)]
|
||||
>>>
|
||||
"""
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
return [(xx*x + yx*y + dx, xy*x + yy*y + dy) for x, y in points]
|
||||
|
||||
def transformVector(self, v):
|
||||
"""Transform an (dx, dy) vector, treating translation as zero.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform(2, 0, 0, 2, 10, 20)
|
||||
>>> t.transformVector((3, -4))
|
||||
(6, -8)
|
||||
>>>
|
||||
"""
|
||||
(dx, dy) = v
|
||||
xx, xy, yx, yy = self[:4]
|
||||
return (xx*dx + yx*dy, xy*dx + yy*dy)
|
||||
|
||||
def transformVectors(self, vectors):
|
||||
"""Transform a list of (dx, dy) vector, treating translation as zero.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 2, 10, 20)
|
||||
>>> t.transformVectors([(3, -4), (5, -6)])
|
||||
[(6, -8), (10, -12)]
|
||||
>>>
|
||||
"""
|
||||
xx, xy, yx, yy = self[:4]
|
||||
return [(xx*dx + yx*dy, xy*dx + yy*dy) for dx, dy in vectors]
|
||||
|
||||
def translate(self, x=0, y=0):
|
||||
"""Return a new transformation, translated (offset) by x, y.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform()
|
||||
>>> t.translate(20, 30)
|
||||
<Transform [1 0 0 1 20 30]>
|
||||
>>>
|
||||
"""
|
||||
return self.transform((1, 0, 0, 1, x, y))
|
||||
|
||||
def scale(self, x=1, y=None):
|
||||
"""Return a new transformation, scaled by x, y. The 'y' argument
|
||||
may be None, which implies to use the x value for y as well.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform()
|
||||
>>> t.scale(5)
|
||||
<Transform [5 0 0 5 0 0]>
|
||||
>>> t.scale(5, 6)
|
||||
<Transform [5 0 0 6 0 0]>
|
||||
>>>
|
||||
"""
|
||||
if y is None:
|
||||
y = x
|
||||
return self.transform((x, 0, 0, y, 0, 0))
|
||||
|
||||
def rotate(self, angle):
|
||||
"""Return a new transformation, rotated by 'angle' (radians).
|
||||
|
||||
:Example:
|
||||
>>> import math
|
||||
>>> t = Transform()
|
||||
>>> t.rotate(math.pi / 2)
|
||||
<Transform [0 1 -1 0 0 0]>
|
||||
>>>
|
||||
"""
|
||||
import math
|
||||
c = _normSinCos(math.cos(angle))
|
||||
s = _normSinCos(math.sin(angle))
|
||||
return self.transform((c, s, -s, c, 0, 0))
|
||||
|
||||
def skew(self, x=0, y=0):
|
||||
"""Return a new transformation, skewed by x and y.
|
||||
|
||||
:Example:
|
||||
>>> import math
|
||||
>>> t = Transform()
|
||||
>>> t.skew(math.pi / 4)
|
||||
<Transform [1 0 1 1 0 0]>
|
||||
>>>
|
||||
"""
|
||||
import math
|
||||
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
|
||||
|
||||
def transform(self, other):
|
||||
"""Return a new transformation, transformed by another
|
||||
transformation.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 3, 1, 6)
|
||||
>>> t.transform((4, 3, 2, 1, 5, 6))
|
||||
<Transform [8 9 4 3 11 24]>
|
||||
>>>
|
||||
"""
|
||||
xx1, xy1, yx1, yy1, dx1, dy1 = other
|
||||
xx2, xy2, yx2, yy2, dx2, dy2 = self
|
||||
return self.__class__(
|
||||
xx1*xx2 + xy1*yx2,
|
||||
xx1*xy2 + xy1*yy2,
|
||||
yx1*xx2 + yy1*yx2,
|
||||
yx1*xy2 + yy1*yy2,
|
||||
xx2*dx1 + yx2*dy1 + dx2,
|
||||
xy2*dx1 + yy2*dy1 + dy2)
|
||||
|
||||
def reverseTransform(self, other):
|
||||
"""Return a new transformation, which is the other transformation
|
||||
transformed by self. self.reverseTransform(other) is equivalent to
|
||||
other.transform(self).
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 3, 1, 6)
|
||||
>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
|
||||
<Transform [8 6 6 3 21 15]>
|
||||
>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
|
||||
<Transform [8 6 6 3 21 15]>
|
||||
>>>
|
||||
"""
|
||||
xx1, xy1, yx1, yy1, dx1, dy1 = self
|
||||
xx2, xy2, yx2, yy2, dx2, dy2 = other
|
||||
return self.__class__(
|
||||
xx1*xx2 + xy1*yx2,
|
||||
xx1*xy2 + xy1*yy2,
|
||||
yx1*xx2 + yy1*yx2,
|
||||
yx1*xy2 + yy1*yy2,
|
||||
xx2*dx1 + yx2*dy1 + dx2,
|
||||
xy2*dx1 + yy2*dy1 + dy2)
|
||||
|
||||
def inverse(self):
|
||||
"""Return the inverse transformation.
|
||||
|
||||
:Example:
|
||||
>>> t = Identity.translate(2, 3).scale(4, 5)
|
||||
>>> t.transformPoint((10, 20))
|
||||
(42, 103)
|
||||
>>> it = t.inverse()
|
||||
>>> it.transformPoint((42, 103))
|
||||
(10.0, 20.0)
|
||||
>>>
|
||||
"""
|
||||
if self == Identity:
|
||||
return self
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
det = xx*yy - yx*xy
|
||||
xx, xy, yx, yy = yy/det, -xy/det, -yx/det, xx/det
|
||||
dx, dy = -xx*dx - yx*dy, -xy*dx - yy*dy
|
||||
return self.__class__(xx, xy, yx, yy, dx, dy)
|
||||
|
||||
def toPS(self):
|
||||
"""Return a PostScript representation
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Identity.scale(2, 3).translate(4, 5)
|
||||
>>> t.toPS()
|
||||
'[2 0 0 3 8 15]'
|
||||
>>>
|
||||
"""
|
||||
return "[%s %s %s %s %s %s]" % self
|
||||
|
||||
def __bool__(self):
|
||||
"""Returns True if transform is not identity, False otherwise.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> bool(Identity)
|
||||
False
|
||||
>>> bool(Transform())
|
||||
False
|
||||
>>> bool(Scale(1.))
|
||||
False
|
||||
>>> bool(Scale(2))
|
||||
True
|
||||
>>> bool(Offset())
|
||||
False
|
||||
>>> bool(Offset(0))
|
||||
False
|
||||
>>> bool(Offset(2))
|
||||
True
|
||||
"""
|
||||
return self != Identity
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
|
||||
|
||||
|
||||
Identity = Transform()
|
||||
|
||||
def Offset(x=0, y=0):
|
||||
"""Return the identity transformation offset by x, y.
|
||||
|
||||
:Example:
|
||||
>>> Offset(2, 3)
|
||||
<Transform [1 0 0 1 2 3]>
|
||||
>>>
|
||||
"""
|
||||
return Transform(1, 0, 0, 1, x, y)
|
||||
|
||||
def Scale(x, y=None):
|
||||
"""Return the identity transformation scaled by x, y. The 'y' argument
|
||||
may be None, which implies to use the x value for y as well.
|
||||
|
||||
:Example:
|
||||
>>> Scale(2, 3)
|
||||
<Transform [2 0 0 3 0 0]>
|
||||
>>>
|
||||
"""
|
||||
if y is None:
|
||||
y = x
|
||||
return Transform(x, 0, 0, y, 0, 0)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
171
venv/Lib/site-packages/fontTools/misc/xmlReader.py
Normal file
171
venv/Lib/site-packages/fontTools/misc/xmlReader.py
Normal file
@@ -0,0 +1,171 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
import sys
|
||||
import os
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class TTXParseError(Exception): pass
|
||||
|
||||
BUFSIZE = 0x4000
|
||||
|
||||
|
||||
class XMLReader(object):
|
||||
|
||||
def __init__(self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False):
|
||||
if fileOrPath == '-':
|
||||
fileOrPath = sys.stdin
|
||||
if not hasattr(fileOrPath, "read"):
|
||||
self.file = open(fileOrPath, "rb")
|
||||
self._closeStream = True
|
||||
else:
|
||||
# assume readable file object
|
||||
self.file = fileOrPath
|
||||
self._closeStream = False
|
||||
self.ttFont = ttFont
|
||||
self.progress = progress
|
||||
if quiet is not None:
|
||||
from fontTools.misc.loggingTools import deprecateArgument
|
||||
deprecateArgument("quiet", "configure logging instead")
|
||||
self.quiet = quiet
|
||||
self.root = None
|
||||
self.contentStack = []
|
||||
self.contentOnly = contentOnly
|
||||
self.stackSize = 0
|
||||
|
||||
def read(self, rootless=False):
|
||||
if rootless:
|
||||
self.stackSize += 1
|
||||
if self.progress:
|
||||
self.file.seek(0, 2)
|
||||
fileSize = self.file.tell()
|
||||
self.progress.set(0, fileSize // 100 or 1)
|
||||
self.file.seek(0)
|
||||
self._parseFile(self.file)
|
||||
if self._closeStream:
|
||||
self.close()
|
||||
if rootless:
|
||||
self.stackSize -= 1
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
|
||||
def _parseFile(self, file):
|
||||
from xml.parsers.expat import ParserCreate
|
||||
parser = ParserCreate()
|
||||
parser.StartElementHandler = self._startElementHandler
|
||||
parser.EndElementHandler = self._endElementHandler
|
||||
parser.CharacterDataHandler = self._characterDataHandler
|
||||
|
||||
pos = 0
|
||||
while True:
|
||||
chunk = file.read(BUFSIZE)
|
||||
if not chunk:
|
||||
parser.Parse(chunk, 1)
|
||||
break
|
||||
pos = pos + len(chunk)
|
||||
if self.progress:
|
||||
self.progress.set(pos // 100)
|
||||
parser.Parse(chunk, 0)
|
||||
|
||||
def _startElementHandler(self, name, attrs):
|
||||
if self.stackSize == 1 and self.contentOnly:
|
||||
# We already know the table we're parsing, skip
|
||||
# parsing the table tag and continue to
|
||||
# stack '2' which begins parsing content
|
||||
self.contentStack.append([])
|
||||
self.stackSize = 2
|
||||
return
|
||||
stackSize = self.stackSize
|
||||
self.stackSize = stackSize + 1
|
||||
subFile = attrs.get("src")
|
||||
if subFile is not None:
|
||||
if hasattr(self.file, 'name'):
|
||||
# if file has a name, get its parent directory
|
||||
dirname = os.path.dirname(self.file.name)
|
||||
else:
|
||||
# else fall back to using the current working directory
|
||||
dirname = os.getcwd()
|
||||
subFile = os.path.join(dirname, subFile)
|
||||
if not stackSize:
|
||||
if name != "ttFont":
|
||||
raise TTXParseError("illegal root tag: %s" % name)
|
||||
if self.ttFont.reader is None and not self.ttFont.tables:
|
||||
sfntVersion = attrs.get("sfntVersion")
|
||||
if sfntVersion is not None:
|
||||
if len(sfntVersion) != 4:
|
||||
sfntVersion = safeEval('"' + sfntVersion + '"')
|
||||
self.ttFont.sfntVersion = sfntVersion
|
||||
self.contentStack.append([])
|
||||
elif stackSize == 1:
|
||||
if subFile is not None:
|
||||
subReader = XMLReader(subFile, self.ttFont, self.progress)
|
||||
subReader.read()
|
||||
self.contentStack.append([])
|
||||
return
|
||||
tag = ttLib.xmlToTag(name)
|
||||
msg = "Parsing '%s' table..." % tag
|
||||
if self.progress:
|
||||
self.progress.setLabel(msg)
|
||||
log.info(msg)
|
||||
if tag == "GlyphOrder":
|
||||
tableClass = ttLib.GlyphOrder
|
||||
elif "ERROR" in attrs or ('raw' in attrs and safeEval(attrs['raw'])):
|
||||
tableClass = DefaultTable
|
||||
else:
|
||||
tableClass = ttLib.getTableClass(tag)
|
||||
if tableClass is None:
|
||||
tableClass = DefaultTable
|
||||
if tag == 'loca' and tag in self.ttFont:
|
||||
# Special-case the 'loca' table as we need the
|
||||
# original if the 'glyf' table isn't recompiled.
|
||||
self.currentTable = self.ttFont[tag]
|
||||
else:
|
||||
self.currentTable = tableClass(tag)
|
||||
self.ttFont[tag] = self.currentTable
|
||||
self.contentStack.append([])
|
||||
elif stackSize == 2 and subFile is not None:
|
||||
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
|
||||
subReader.read()
|
||||
self.contentStack.append([])
|
||||
self.root = subReader.root
|
||||
elif stackSize == 2:
|
||||
self.contentStack.append([])
|
||||
self.root = (name, attrs, self.contentStack[-1])
|
||||
else:
|
||||
l = []
|
||||
self.contentStack[-1].append((name, attrs, l))
|
||||
self.contentStack.append(l)
|
||||
|
||||
def _characterDataHandler(self, data):
|
||||
if self.stackSize > 1:
|
||||
self.contentStack[-1].append(data)
|
||||
|
||||
def _endElementHandler(self, name):
|
||||
self.stackSize = self.stackSize - 1
|
||||
del self.contentStack[-1]
|
||||
if not self.contentOnly:
|
||||
if self.stackSize == 1:
|
||||
self.root = None
|
||||
elif self.stackSize == 2:
|
||||
name, attrs, content = self.root
|
||||
self.currentTable.fromXML(name, attrs, content, self.ttFont)
|
||||
self.root = None
|
||||
|
||||
|
||||
class ProgressPrinter(object):
|
||||
|
||||
def __init__(self, title, maxval=100):
|
||||
print(title)
|
||||
|
||||
def set(self, val, maxval=None):
|
||||
pass
|
||||
|
||||
def increment(self, val=1):
|
||||
pass
|
||||
|
||||
def setLabel(self, text):
|
||||
print(text)
|
194
venv/Lib/site-packages/fontTools/misc/xmlWriter.py
Normal file
194
venv/Lib/site-packages/fontTools/misc/xmlWriter.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""xmlWriter.py -- Simple XML authoring class"""
|
||||
|
||||
from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr
|
||||
import sys
|
||||
import os
|
||||
import string
|
||||
|
||||
INDENT = " "
|
||||
|
||||
|
||||
class XMLWriter(object):
|
||||
|
||||
def __init__(self, fileOrPath, indentwhite=INDENT, idlefunc=None, encoding="utf_8",
|
||||
newlinestr="\n"):
|
||||
if encoding.lower().replace('-','').replace('_','') != 'utf8':
|
||||
raise Exception('Only UTF-8 encoding is supported.')
|
||||
if fileOrPath == '-':
|
||||
fileOrPath = sys.stdout
|
||||
if not hasattr(fileOrPath, "write"):
|
||||
self.filename = fileOrPath
|
||||
self.file = open(fileOrPath, "wb")
|
||||
self._closeStream = True
|
||||
else:
|
||||
self.filename = None
|
||||
# assume writable file object
|
||||
self.file = fileOrPath
|
||||
self._closeStream = False
|
||||
|
||||
# Figure out if writer expects bytes or unicodes
|
||||
try:
|
||||
# The bytes check should be first. See:
|
||||
# https://github.com/fonttools/fonttools/pull/233
|
||||
self.file.write(b'')
|
||||
self.totype = tobytes
|
||||
except TypeError:
|
||||
# This better not fail.
|
||||
self.file.write('')
|
||||
self.totype = tostr
|
||||
self.indentwhite = self.totype(indentwhite)
|
||||
if newlinestr is None:
|
||||
self.newlinestr = self.totype(os.linesep)
|
||||
else:
|
||||
self.newlinestr = self.totype(newlinestr)
|
||||
self.indentlevel = 0
|
||||
self.stack = []
|
||||
self.needindent = 1
|
||||
self.idlefunc = idlefunc
|
||||
self.idlecounter = 0
|
||||
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
|
||||
self.newline()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exception_type, exception_value, traceback):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
if self._closeStream:
|
||||
self.file.close()
|
||||
|
||||
def write(self, string, indent=True):
|
||||
"""Writes text."""
|
||||
self._writeraw(escape(string), indent=indent)
|
||||
|
||||
def writecdata(self, string):
|
||||
"""Writes text in a CDATA section."""
|
||||
self._writeraw("<![CDATA[" + string + "]]>")
|
||||
|
||||
def write8bit(self, data, strip=False):
|
||||
"""Writes a bytes() sequence into the XML, escaping
|
||||
non-ASCII bytes. When this is read in xmlReader,
|
||||
the original bytes can be recovered by encoding to
|
||||
'latin-1'."""
|
||||
self._writeraw(escape8bit(data), strip=strip)
|
||||
|
||||
def write_noindent(self, string):
|
||||
"""Writes text without indentation."""
|
||||
self._writeraw(escape(string), indent=False)
|
||||
|
||||
def _writeraw(self, data, indent=True, strip=False):
|
||||
"""Writes bytes, possibly indented."""
|
||||
if indent and self.needindent:
|
||||
self.file.write(self.indentlevel * self.indentwhite)
|
||||
self.needindent = 0
|
||||
s = self.totype(data, encoding="utf_8")
|
||||
if (strip):
|
||||
s = s.strip()
|
||||
self.file.write(s)
|
||||
|
||||
def newline(self):
|
||||
self.file.write(self.newlinestr)
|
||||
self.needindent = 1
|
||||
idlecounter = self.idlecounter
|
||||
if not idlecounter % 100 and self.idlefunc is not None:
|
||||
self.idlefunc()
|
||||
self.idlecounter = idlecounter + 1
|
||||
|
||||
def comment(self, data):
|
||||
data = escape(data)
|
||||
lines = data.split("\n")
|
||||
self._writeraw("<!-- " + lines[0])
|
||||
for line in lines[1:]:
|
||||
self.newline()
|
||||
self._writeraw(" " + line)
|
||||
self._writeraw(" -->")
|
||||
|
||||
def simpletag(self, _TAG_, *args, **kwargs):
|
||||
attrdata = self.stringifyattrs(*args, **kwargs)
|
||||
data = "<%s%s/>" % (_TAG_, attrdata)
|
||||
self._writeraw(data)
|
||||
|
||||
def begintag(self, _TAG_, *args, **kwargs):
|
||||
attrdata = self.stringifyattrs(*args, **kwargs)
|
||||
data = "<%s%s>" % (_TAG_, attrdata)
|
||||
self._writeraw(data)
|
||||
self.stack.append(_TAG_)
|
||||
self.indent()
|
||||
|
||||
def endtag(self, _TAG_):
|
||||
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
|
||||
del self.stack[-1]
|
||||
self.dedent()
|
||||
data = "</%s>" % _TAG_
|
||||
self._writeraw(data)
|
||||
|
||||
def dumphex(self, data):
|
||||
linelength = 16
|
||||
hexlinelength = linelength * 2
|
||||
chunksize = 8
|
||||
for i in range(0, len(data), linelength):
|
||||
hexline = hexStr(data[i:i+linelength])
|
||||
line = ""
|
||||
white = ""
|
||||
for j in range(0, hexlinelength, chunksize):
|
||||
line = line + white + hexline[j:j+chunksize]
|
||||
white = " "
|
||||
self._writeraw(line)
|
||||
self.newline()
|
||||
|
||||
def indent(self):
|
||||
self.indentlevel = self.indentlevel + 1
|
||||
|
||||
def dedent(self):
|
||||
assert self.indentlevel > 0
|
||||
self.indentlevel = self.indentlevel - 1
|
||||
|
||||
def stringifyattrs(self, *args, **kwargs):
|
||||
if kwargs:
|
||||
assert not args
|
||||
attributes = sorted(kwargs.items())
|
||||
elif args:
|
||||
assert len(args) == 1
|
||||
attributes = args[0]
|
||||
else:
|
||||
return ""
|
||||
data = ""
|
||||
for attr, value in attributes:
|
||||
if not isinstance(value, (bytes, str)):
|
||||
value = str(value)
|
||||
data = data + ' %s="%s"' % (attr, escapeattr(value))
|
||||
return data
|
||||
|
||||
|
||||
def escape(data):
|
||||
data = tostr(data, 'utf_8')
|
||||
data = data.replace("&", "&")
|
||||
data = data.replace("<", "<")
|
||||
data = data.replace(">", ">")
|
||||
data = data.replace("\r", " ")
|
||||
return data
|
||||
|
||||
def escapeattr(data):
|
||||
data = escape(data)
|
||||
data = data.replace('"', """)
|
||||
return data
|
||||
|
||||
def escape8bit(data):
|
||||
"""Input is Unicode string."""
|
||||
def escapechar(c):
|
||||
n = ord(c)
|
||||
if 32 <= n <= 127 and c not in "<&>":
|
||||
return c
|
||||
else:
|
||||
return "&#" + repr(n) + ";"
|
||||
return strjoin(map(escapechar, data.decode('latin-1')))
|
||||
|
||||
def hexStr(s):
|
||||
h = string.hexdigits
|
||||
r = ''
|
||||
for c in s:
|
||||
i = byteord(c)
|
||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||
return r
|
2853
venv/Lib/site-packages/fontTools/otlLib/builder.py
Normal file
2853
venv/Lib/site-packages/fontTools/otlLib/builder.py
Normal file
File diff suppressed because it is too large
Load Diff
11
venv/Lib/site-packages/fontTools/otlLib/error.py
Normal file
11
venv/Lib/site-packages/fontTools/otlLib/error.py
Normal file
@@ -0,0 +1,11 @@
|
||||
class OpenTypeLibError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
return f"{self.location}: {message}"
|
||||
else:
|
||||
return message
|
439
venv/Lib/site-packages/fontTools/otlLib/optimize/gpos.py
Normal file
439
venv/Lib/site-packages/fontTools/otlLib/optimize/gpos.py
Normal file
@@ -0,0 +1,439 @@
|
||||
import logging
|
||||
from collections import defaultdict, namedtuple
|
||||
from functools import reduce
|
||||
from itertools import chain
|
||||
from math import log2
|
||||
from typing import DefaultDict, Dict, Iterable, List, Sequence, Tuple
|
||||
|
||||
from fontTools.misc.intTools import bit_count, bit_indices
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.ttLib.tables import otBase, otTables
|
||||
|
||||
# NOTE: activating this optimization via the environment variable is
|
||||
# experimental and may not be supported once an alternative mechanism
|
||||
# is in place. See: https://github.com/fonttools/fonttools/issues/2349
|
||||
GPOS_COMPACT_MODE_ENV_KEY = "FONTTOOLS_GPOS_COMPACT_MODE"
|
||||
GPOS_COMPACT_MODE_DEFAULT = "0"
|
||||
|
||||
log = logging.getLogger("fontTools.otlLib.optimize.gpos")
|
||||
|
||||
|
||||
def compact(font: TTFont, mode: str) -> TTFont:
|
||||
# Ideal plan:
|
||||
# 1. Find lookups of Lookup Type 2: Pair Adjustment Positioning Subtable
|
||||
# https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable
|
||||
# 2. Extract glyph-glyph kerning and class-kerning from all present subtables
|
||||
# 3. Regroup into different subtable arrangements
|
||||
# 4. Put back into the lookup
|
||||
#
|
||||
# Actual implementation:
|
||||
# 2. Only class kerning is optimized currently
|
||||
# 3. If the input kerning is already in several subtables, the subtables
|
||||
# are not grouped together first; instead each subtable is treated
|
||||
# independently, so currently this step is:
|
||||
# Split existing subtables into more smaller subtables
|
||||
gpos = font["GPOS"]
|
||||
for lookup in gpos.table.LookupList.Lookup:
|
||||
if lookup.LookupType == 2:
|
||||
compact_lookup(font, mode, lookup)
|
||||
elif lookup.LookupType == 9 and lookup.SubTable[0].ExtensionLookupType == 2:
|
||||
compact_ext_lookup(font, mode, lookup)
|
||||
return font
|
||||
|
||||
|
||||
def compact_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None:
|
||||
new_subtables = compact_pair_pos(font, mode, lookup.SubTable)
|
||||
lookup.SubTable = new_subtables
|
||||
lookup.SubTableCount = len(new_subtables)
|
||||
|
||||
|
||||
def compact_ext_lookup(font: TTFont, mode: str, lookup: otTables.Lookup) -> None:
|
||||
new_subtables = compact_pair_pos(
|
||||
font, mode, [ext_subtable.ExtSubTable for ext_subtable in lookup.SubTable]
|
||||
)
|
||||
new_ext_subtables = []
|
||||
for subtable in new_subtables:
|
||||
ext_subtable = otTables.ExtensionPos()
|
||||
ext_subtable.Format = 1
|
||||
ext_subtable.ExtSubTable = subtable
|
||||
new_ext_subtables.append(ext_subtable)
|
||||
lookup.SubTable = new_ext_subtables
|
||||
lookup.SubTableCount = len(new_ext_subtables)
|
||||
|
||||
|
||||
def compact_pair_pos(
|
||||
font: TTFont, mode: str, subtables: Sequence[otTables.PairPos]
|
||||
) -> Sequence[otTables.PairPos]:
|
||||
new_subtables = []
|
||||
for subtable in subtables:
|
||||
if subtable.Format == 1:
|
||||
# Not doing anything to Format 1 (yet?)
|
||||
new_subtables.append(subtable)
|
||||
elif subtable.Format == 2:
|
||||
new_subtables.extend(compact_class_pairs(font, mode, subtable))
|
||||
return new_subtables
|
||||
|
||||
|
||||
def compact_class_pairs(
|
||||
font: TTFont, mode: str, subtable: otTables.PairPos
|
||||
) -> List[otTables.PairPos]:
|
||||
from fontTools.otlLib.builder import buildPairPosClassesSubtable
|
||||
|
||||
subtables = []
|
||||
classes1: DefaultDict[int, List[str]] = defaultdict(list)
|
||||
for g in subtable.Coverage.glyphs:
|
||||
classes1[subtable.ClassDef1.classDefs.get(g, 0)].append(g)
|
||||
classes2: DefaultDict[int, List[str]] = defaultdict(list)
|
||||
for g, i in subtable.ClassDef2.classDefs.items():
|
||||
classes2[i].append(g)
|
||||
all_pairs = {}
|
||||
for i, class1 in enumerate(subtable.Class1Record):
|
||||
for j, class2 in enumerate(class1.Class2Record):
|
||||
if is_really_zero(class2):
|
||||
continue
|
||||
all_pairs[(tuple(sorted(classes1[i])), tuple(sorted(classes2[j])))] = (
|
||||
getattr(class2, "Value1", None),
|
||||
getattr(class2, "Value2", None),
|
||||
)
|
||||
|
||||
if len(mode) == 1 and mode in "123456789":
|
||||
grouped_pairs = cluster_pairs_by_class2_coverage_custom_cost(
|
||||
font, all_pairs, int(mode)
|
||||
)
|
||||
for pairs in grouped_pairs:
|
||||
subtables.append(
|
||||
buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())
|
||||
)
|
||||
else:
|
||||
raise ValueError(f"Bad {GPOS_COMPACT_MODE_ENV_KEY}={mode}")
|
||||
return subtables
|
||||
|
||||
|
||||
def is_really_zero(class2: otTables.Class2Record) -> bool:
|
||||
v1 = getattr(class2, "Value1", None)
|
||||
v2 = getattr(class2, "Value2", None)
|
||||
return (v1 is None or v1.getEffectiveFormat() == 0) and (
|
||||
v2 is None or v2.getEffectiveFormat() == 0
|
||||
)
|
||||
|
||||
|
||||
Pairs = Dict[
|
||||
Tuple[Tuple[str, ...], Tuple[str, ...]],
|
||||
Tuple[otBase.ValueRecord, otBase.ValueRecord],
|
||||
]
|
||||
|
||||
# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L935-L958
|
||||
def _getClassRanges(glyphIDs: Iterable[int]):
|
||||
glyphIDs = sorted(glyphIDs)
|
||||
last = glyphIDs[0]
|
||||
ranges = [[last]]
|
||||
for glyphID in glyphIDs[1:]:
|
||||
if glyphID != last + 1:
|
||||
ranges[-1].append(last)
|
||||
ranges.append([glyphID])
|
||||
last = glyphID
|
||||
ranges[-1].append(last)
|
||||
return ranges, glyphIDs[0], glyphIDs[-1]
|
||||
|
||||
|
||||
# Adapted from https://github.com/fonttools/fonttools/blob/f64f0b42f2d1163b2d85194e0979def539f5dca3/Lib/fontTools/ttLib/tables/otTables.py#L960-L989
|
||||
def _classDef_bytes(
|
||||
class_data: List[Tuple[List[Tuple[int, int]], int, int]],
|
||||
class_ids: List[int],
|
||||
coverage=False,
|
||||
):
|
||||
if not class_ids:
|
||||
return 0
|
||||
first_ranges, min_glyph_id, max_glyph_id = class_data[class_ids[0]]
|
||||
range_count = len(first_ranges)
|
||||
for i in class_ids[1:]:
|
||||
data = class_data[i]
|
||||
range_count += len(data[0])
|
||||
min_glyph_id = min(min_glyph_id, data[1])
|
||||
max_glyph_id = max(max_glyph_id, data[2])
|
||||
glyphCount = max_glyph_id - min_glyph_id + 1
|
||||
# https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-1
|
||||
format1_bytes = 6 + glyphCount * 2
|
||||
# https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#class-definition-table-format-2
|
||||
format2_bytes = 4 + range_count * 6
|
||||
return min(format1_bytes, format2_bytes)
|
||||
|
||||
|
||||
ClusteringContext = namedtuple(
|
||||
"ClusteringContext",
|
||||
[
|
||||
"lines",
|
||||
"all_class1",
|
||||
"all_class1_data",
|
||||
"all_class2_data",
|
||||
"valueFormat1_bytes",
|
||||
"valueFormat2_bytes",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
class Cluster:
|
||||
# TODO(Python 3.7): Turn this into a dataclass
|
||||
# ctx: ClusteringContext
|
||||
# indices: int
|
||||
# Caches
|
||||
# TODO(Python 3.8): use functools.cached_property instead of the
|
||||
# manually cached properties, and remove the cache fields listed below.
|
||||
# _indices: Optional[List[int]] = None
|
||||
# _column_indices: Optional[List[int]] = None
|
||||
# _cost: Optional[int] = None
|
||||
|
||||
__slots__ = "ctx", "indices_bitmask", "_indices", "_column_indices", "_cost"
|
||||
|
||||
def __init__(self, ctx: ClusteringContext, indices_bitmask: int):
|
||||
self.ctx = ctx
|
||||
self.indices_bitmask = indices_bitmask
|
||||
self._indices = None
|
||||
self._column_indices = None
|
||||
self._cost = None
|
||||
|
||||
@property
|
||||
def indices(self):
|
||||
if self._indices is None:
|
||||
self._indices = bit_indices(self.indices_bitmask)
|
||||
return self._indices
|
||||
|
||||
@property
|
||||
def column_indices(self):
|
||||
if self._column_indices is None:
|
||||
# Indices of columns that have a 1 in at least 1 line
|
||||
# => binary OR all the lines
|
||||
bitmask = reduce(int.__or__, (self.ctx.lines[i] for i in self.indices))
|
||||
self._column_indices = bit_indices(bitmask)
|
||||
return self._column_indices
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
# Add 1 because Class2=0 cannot be used but needs to be encoded.
|
||||
return len(self.column_indices) + 1
|
||||
|
||||
@property
|
||||
def cost(self):
|
||||
if self._cost is None:
|
||||
self._cost = (
|
||||
# 2 bytes to store the offset to this subtable in the Lookup table above
|
||||
2
|
||||
# Contents of the subtable
|
||||
# From: https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#pair-adjustment-positioning-format-2-class-pair-adjustment
|
||||
# uint16 posFormat Format identifier: format = 2
|
||||
+ 2
|
||||
# Offset16 coverageOffset Offset to Coverage table, from beginning of PairPos subtable.
|
||||
+ 2
|
||||
+ self.coverage_bytes
|
||||
# uint16 valueFormat1 ValueRecord definition — for the first glyph of the pair (may be zero).
|
||||
+ 2
|
||||
# uint16 valueFormat2 ValueRecord definition — for the second glyph of the pair (may be zero).
|
||||
+ 2
|
||||
# Offset16 classDef1Offset Offset to ClassDef table, from beginning of PairPos subtable — for the first glyph of the pair.
|
||||
+ 2
|
||||
+ self.classDef1_bytes
|
||||
# Offset16 classDef2Offset Offset to ClassDef table, from beginning of PairPos subtable — for the second glyph of the pair.
|
||||
+ 2
|
||||
+ self.classDef2_bytes
|
||||
# uint16 class1Count Number of classes in classDef1 table — includes Class 0.
|
||||
+ 2
|
||||
# uint16 class2Count Number of classes in classDef2 table — includes Class 0.
|
||||
+ 2
|
||||
# Class1Record class1Records[class1Count] Array of Class1 records, ordered by classes in classDef1.
|
||||
+ (self.ctx.valueFormat1_bytes + self.ctx.valueFormat2_bytes)
|
||||
* len(self.indices)
|
||||
* self.width
|
||||
)
|
||||
return self._cost
|
||||
|
||||
@property
|
||||
def coverage_bytes(self):
|
||||
format1_bytes = (
|
||||
# From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-1
|
||||
# uint16 coverageFormat Format identifier — format = 1
|
||||
# uint16 glyphCount Number of glyphs in the glyph array
|
||||
4
|
||||
# uint16 glyphArray[glyphCount] Array of glyph IDs — in numerical order
|
||||
+ sum(len(self.ctx.all_class1[i]) for i in self.indices) * 2
|
||||
)
|
||||
ranges = sorted(
|
||||
chain.from_iterable(self.ctx.all_class1_data[i][0] for i in self.indices)
|
||||
)
|
||||
merged_range_count = 0
|
||||
last = None
|
||||
for (start, end) in ranges:
|
||||
if last is not None and start != last + 1:
|
||||
merged_range_count += 1
|
||||
last = end
|
||||
format2_bytes = (
|
||||
# From https://docs.microsoft.com/en-us/typography/opentype/spec/chapter2#coverage-format-2
|
||||
# uint16 coverageFormat Format identifier — format = 2
|
||||
# uint16 rangeCount Number of RangeRecords
|
||||
4
|
||||
# RangeRecord rangeRecords[rangeCount] Array of glyph ranges — ordered by startGlyphID.
|
||||
# uint16 startGlyphID First glyph ID in the range
|
||||
# uint16 endGlyphID Last glyph ID in the range
|
||||
# uint16 startCoverageIndex Coverage Index of first glyph ID in range
|
||||
+ merged_range_count * 6
|
||||
)
|
||||
return min(format1_bytes, format2_bytes)
|
||||
|
||||
@property
|
||||
def classDef1_bytes(self):
|
||||
# We can skip encoding one of the Class1 definitions, and use
|
||||
# Class1=0 to represent it instead, because Class1 is gated by the
|
||||
# Coverage definition. Use Class1=0 for the highest byte savings.
|
||||
# Going through all options takes too long, pick the biggest class
|
||||
# = what happens in otlLib.builder.ClassDefBuilder.classes()
|
||||
biggest_index = max(self.indices, key=lambda i: len(self.ctx.all_class1[i]))
|
||||
return _classDef_bytes(
|
||||
self.ctx.all_class1_data, [i for i in self.indices if i != biggest_index]
|
||||
)
|
||||
|
||||
@property
|
||||
def classDef2_bytes(self):
|
||||
# All Class2 need to be encoded because we can't use Class2=0
|
||||
return _classDef_bytes(self.ctx.all_class2_data, self.column_indices)
|
||||
|
||||
|
||||
def cluster_pairs_by_class2_coverage_custom_cost(
|
||||
font: TTFont,
|
||||
pairs: Pairs,
|
||||
compression: int = 5,
|
||||
) -> List[Pairs]:
|
||||
if not pairs:
|
||||
# The subtable was actually empty?
|
||||
return [pairs]
|
||||
|
||||
# Sorted for reproducibility/determinism
|
||||
all_class1 = sorted(set(pair[0] for pair in pairs))
|
||||
all_class2 = sorted(set(pair[1] for pair in pairs))
|
||||
|
||||
# Use Python's big ints for binary vectors representing each line
|
||||
lines = [
|
||||
sum(
|
||||
1 << i if (class1, class2) in pairs else 0
|
||||
for i, class2 in enumerate(all_class2)
|
||||
)
|
||||
for class1 in all_class1
|
||||
]
|
||||
|
||||
# Map glyph names to ids and work with ints throughout for ClassDef formats
|
||||
name_to_id = font.getReverseGlyphMap()
|
||||
# Each entry in the arrays below is (range_count, min_glyph_id, max_glyph_id)
|
||||
all_class1_data = [
|
||||
_getClassRanges(name_to_id[name] for name in cls) for cls in all_class1
|
||||
]
|
||||
all_class2_data = [
|
||||
_getClassRanges(name_to_id[name] for name in cls) for cls in all_class2
|
||||
]
|
||||
|
||||
format1 = 0
|
||||
format2 = 0
|
||||
for pair, value in pairs.items():
|
||||
format1 |= value[0].getEffectiveFormat() if value[0] else 0
|
||||
format2 |= value[1].getEffectiveFormat() if value[1] else 0
|
||||
valueFormat1_bytes = bit_count(format1) * 2
|
||||
valueFormat2_bytes = bit_count(format2) * 2
|
||||
|
||||
ctx = ClusteringContext(
|
||||
lines,
|
||||
all_class1,
|
||||
all_class1_data,
|
||||
all_class2_data,
|
||||
valueFormat1_bytes,
|
||||
valueFormat2_bytes,
|
||||
)
|
||||
|
||||
cluster_cache: Dict[int, Cluster] = {}
|
||||
|
||||
def make_cluster(indices: int) -> Cluster:
|
||||
cluster = cluster_cache.get(indices, None)
|
||||
if cluster is not None:
|
||||
return cluster
|
||||
cluster = Cluster(ctx, indices)
|
||||
cluster_cache[indices] = cluster
|
||||
return cluster
|
||||
|
||||
def merge(cluster: Cluster, other: Cluster) -> Cluster:
|
||||
return make_cluster(cluster.indices_bitmask | other.indices_bitmask)
|
||||
|
||||
# Agglomerative clustering by hand, checking the cost gain of the new
|
||||
# cluster against the previously separate clusters
|
||||
# Start with 1 cluster per line
|
||||
# cluster = set of lines = new subtable
|
||||
clusters = [make_cluster(1 << i) for i in range(len(lines))]
|
||||
|
||||
# Cost of 1 cluster with everything
|
||||
# `(1 << len) - 1` gives a bitmask full of 1's of length `len`
|
||||
cost_before_splitting = make_cluster((1 << len(lines)) - 1).cost
|
||||
log.debug(f" len(clusters) = {len(clusters)}")
|
||||
|
||||
while len(clusters) > 1:
|
||||
lowest_cost_change = None
|
||||
best_cluster_index = None
|
||||
best_other_index = None
|
||||
best_merged = None
|
||||
for i, cluster in enumerate(clusters):
|
||||
for j, other in enumerate(clusters[i + 1 :]):
|
||||
merged = merge(cluster, other)
|
||||
cost_change = merged.cost - cluster.cost - other.cost
|
||||
if lowest_cost_change is None or cost_change < lowest_cost_change:
|
||||
lowest_cost_change = cost_change
|
||||
best_cluster_index = i
|
||||
best_other_index = i + 1 + j
|
||||
best_merged = merged
|
||||
assert lowest_cost_change is not None
|
||||
assert best_cluster_index is not None
|
||||
assert best_other_index is not None
|
||||
assert best_merged is not None
|
||||
|
||||
# If the best merge we found is still taking down the file size, then
|
||||
# there's no question: we must do it, because it's beneficial in both
|
||||
# ways (lower file size and lower number of subtables). However, if the
|
||||
# best merge we found is not reducing file size anymore, then we need to
|
||||
# look at the other stop criteria = the compression factor.
|
||||
if lowest_cost_change > 0:
|
||||
# Stop critera: check whether we should keep merging.
|
||||
# Compute size reduction brought by splitting
|
||||
cost_after_splitting = sum(c.cost for c in clusters)
|
||||
# size_reduction so that after = before * (1 - size_reduction)
|
||||
# E.g. before = 1000, after = 800, 1 - 800/1000 = 0.2
|
||||
size_reduction = 1 - cost_after_splitting / cost_before_splitting
|
||||
|
||||
# Force more merging by taking into account the compression number.
|
||||
# Target behaviour: compression number = 1 to 9, default 5 like gzip
|
||||
# - 1 = accept to add 1 subtable to reduce size by 50%
|
||||
# - 5 = accept to add 5 subtables to reduce size by 50%
|
||||
# See https://github.com/harfbuzz/packtab/blob/master/Lib/packTab/__init__.py#L690-L691
|
||||
# Given the size reduction we have achieved so far, compute how many
|
||||
# new subtables are acceptable.
|
||||
max_new_subtables = -log2(1 - size_reduction) * compression
|
||||
log.debug(
|
||||
f" len(clusters) = {len(clusters):3d} size_reduction={size_reduction:5.2f} max_new_subtables={max_new_subtables}",
|
||||
)
|
||||
if compression == 9:
|
||||
# Override level 9 to mean: create any number of subtables
|
||||
max_new_subtables = len(clusters)
|
||||
|
||||
# If we have managed to take the number of new subtables below the
|
||||
# threshold, then we can stop.
|
||||
if len(clusters) <= max_new_subtables + 1:
|
||||
break
|
||||
|
||||
# No reason to stop yet, do the merge and move on to the next.
|
||||
del clusters[best_other_index]
|
||||
clusters[best_cluster_index] = best_merged
|
||||
|
||||
# All clusters are final; turn bitmasks back into the "Pairs" format
|
||||
pairs_by_class1: Dict[Tuple[str, ...], Pairs] = defaultdict(dict)
|
||||
for pair, values in pairs.items():
|
||||
pairs_by_class1[pair[0]][pair] = values
|
||||
pairs_groups: List[Pairs] = []
|
||||
for cluster in clusters:
|
||||
pairs_group: Pairs = dict()
|
||||
for i in cluster.indices:
|
||||
class1 = all_class1[i]
|
||||
pairs_group.update(pairs_by_class1[class1])
|
||||
pairs_groups.append(pairs_group)
|
||||
return pairs_groups
|
1
venv/Lib/site-packages/fontTools/pens/__init__.py
Normal file
1
venv/Lib/site-packages/fontTools/pens/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Empty __init__.py file to signal Python this directory is a package."""
|
57
venv/Lib/site-packages/fontTools/pens/areaPen.py
Normal file
57
venv/Lib/site-packages/fontTools/pens/areaPen.py
Normal file
@@ -0,0 +1,57 @@
|
||||
"""Calculate the area of a glyph."""
|
||||
|
||||
from fontTools.pens.basePen import BasePen
|
||||
|
||||
|
||||
__all__ = ["AreaPen"]
|
||||
|
||||
|
||||
class AreaPen(BasePen):
|
||||
|
||||
def __init__(self, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
self.value = 0
|
||||
|
||||
def _moveTo(self, p0):
|
||||
self._p0 = self._startPoint = p0
|
||||
|
||||
def _lineTo(self, p1):
|
||||
x0, y0 = self._p0
|
||||
x1, y1 = p1
|
||||
self.value -= (x1 - x0) * (y1 + y0) * .5
|
||||
self._p0 = p1
|
||||
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
# https://github.com/Pomax/bezierinfo/issues/44
|
||||
p0 = self._p0
|
||||
x0, y0 = p0[0], p0[1]
|
||||
x1, y1 = p1[0] - x0, p1[1] - y0
|
||||
x2, y2 = p2[0] - x0, p2[1] - y0
|
||||
self.value -= (x2 * y1 - x1 * y2) / 3
|
||||
self._lineTo(p2)
|
||||
self._p0 = p2
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
# https://github.com/Pomax/bezierinfo/issues/44
|
||||
p0 = self._p0
|
||||
x0, y0 = p0[0], p0[1]
|
||||
x1, y1 = p1[0] - x0, p1[1] - y0
|
||||
x2, y2 = p2[0] - x0, p2[1] - y0
|
||||
x3, y3 = p3[0] - x0, p3[1] - y0
|
||||
self.value -= (
|
||||
x1 * ( - y2 - y3) +
|
||||
x2 * (y1 - 2*y3) +
|
||||
x3 * (y1 + 2*y2 )
|
||||
) * 0.15
|
||||
self._lineTo(p3)
|
||||
self._p0 = p3
|
||||
|
||||
def _closePath(self):
|
||||
self._lineTo(self._startPoint)
|
||||
del self._p0, self._startPoint
|
||||
|
||||
def _endPath(self):
|
||||
if self._p0 != self._startPoint:
|
||||
# Area is not defined for open contours.
|
||||
raise NotImplementedError
|
||||
del self._p0, self._startPoint
|
408
venv/Lib/site-packages/fontTools/pens/basePen.py
Normal file
408
venv/Lib/site-packages/fontTools/pens/basePen.py
Normal file
@@ -0,0 +1,408 @@
|
||||
"""fontTools.pens.basePen.py -- Tools and base classes to build pen objects.
|
||||
|
||||
The Pen Protocol
|
||||
|
||||
A Pen is a kind of object that standardizes the way how to "draw" outlines:
|
||||
it is a middle man between an outline and a drawing. In other words:
|
||||
it is an abstraction for drawing outlines, making sure that outline objects
|
||||
don't need to know the details about how and where they're being drawn, and
|
||||
that drawings don't need to know the details of how outlines are stored.
|
||||
|
||||
The most basic pattern is this::
|
||||
|
||||
outline.draw(pen) # 'outline' draws itself onto 'pen'
|
||||
|
||||
Pens can be used to render outlines to the screen, but also to construct
|
||||
new outlines. Eg. an outline object can be both a drawable object (it has a
|
||||
draw() method) as well as a pen itself: you *build* an outline using pen
|
||||
methods.
|
||||
|
||||
The AbstractPen class defines the Pen protocol. It implements almost
|
||||
nothing (only no-op closePath() and endPath() methods), but is useful
|
||||
for documentation purposes. Subclassing it basically tells the reader:
|
||||
"this class implements the Pen protocol.". An examples of an AbstractPen
|
||||
subclass is :py:class:`fontTools.pens.transformPen.TransformPen`.
|
||||
|
||||
The BasePen class is a base implementation useful for pens that actually
|
||||
draw (for example a pen renders outlines using a native graphics engine).
|
||||
BasePen contains a lot of base functionality, making it very easy to build
|
||||
a pen that fully conforms to the pen protocol. Note that if you subclass
|
||||
BasePen, you *don't* override moveTo(), lineTo(), etc., but _moveTo(),
|
||||
_lineTo(), etc. See the BasePen doc string for details. Examples of
|
||||
BasePen subclasses are fontTools.pens.boundsPen.BoundsPen and
|
||||
fontTools.pens.cocoaPen.CocoaPen.
|
||||
|
||||
Coordinates are usually expressed as (x, y) tuples, but generally any
|
||||
sequence of length 2 will do.
|
||||
"""
|
||||
|
||||
from typing import Tuple
|
||||
|
||||
from fontTools.misc.loggingTools import LogMixin
|
||||
|
||||
__all__ = ["AbstractPen", "NullPen", "BasePen", "PenError",
|
||||
"decomposeSuperBezierSegment", "decomposeQuadraticSegment"]
|
||||
|
||||
|
||||
class PenError(Exception):
|
||||
"""Represents an error during penning."""
|
||||
|
||||
|
||||
class AbstractPen:
|
||||
|
||||
def moveTo(self, pt: Tuple[float, float]) -> None:
|
||||
"""Begin a new sub path, set the current point to 'pt'. You must
|
||||
end each sub path with a call to pen.closePath() or pen.endPath().
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def lineTo(self, pt: Tuple[float, float]) -> None:
|
||||
"""Draw a straight line from the current point to 'pt'."""
|
||||
raise NotImplementedError
|
||||
|
||||
def curveTo(self, *points: Tuple[float, float]) -> None:
|
||||
"""Draw a cubic bezier with an arbitrary number of control points.
|
||||
|
||||
The last point specified is on-curve, all others are off-curve
|
||||
(control) points. If the number of control points is > 2, the
|
||||
segment is split into multiple bezier segments. This works
|
||||
like this:
|
||||
|
||||
Let n be the number of control points (which is the number of
|
||||
arguments to this call minus 1). If n==2, a plain vanilla cubic
|
||||
bezier is drawn. If n==1, we fall back to a quadratic segment and
|
||||
if n==0 we draw a straight line. It gets interesting when n>2:
|
||||
n-1 PostScript-style cubic segments will be drawn as if it were
|
||||
one curve. See decomposeSuperBezierSegment().
|
||||
|
||||
The conversion algorithm used for n>2 is inspired by NURB
|
||||
splines, and is conceptually equivalent to the TrueType "implied
|
||||
points" principle. See also decomposeQuadraticSegment().
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def qCurveTo(self, *points: Tuple[float, float]) -> None:
|
||||
"""Draw a whole string of quadratic curve segments.
|
||||
|
||||
The last point specified is on-curve, all others are off-curve
|
||||
points.
|
||||
|
||||
This method implements TrueType-style curves, breaking up curves
|
||||
using 'implied points': between each two consequtive off-curve points,
|
||||
there is one implied point exactly in the middle between them. See
|
||||
also decomposeQuadraticSegment().
|
||||
|
||||
The last argument (normally the on-curve point) may be None.
|
||||
This is to support contours that have NO on-curve points (a rarely
|
||||
seen feature of TrueType outlines).
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def closePath(self) -> None:
|
||||
"""Close the current sub path. You must call either pen.closePath()
|
||||
or pen.endPath() after each sub path.
|
||||
"""
|
||||
pass
|
||||
|
||||
def endPath(self) -> None:
|
||||
"""End the current sub path, but don't close it. You must call
|
||||
either pen.closePath() or pen.endPath() after each sub path.
|
||||
"""
|
||||
pass
|
||||
|
||||
def addComponent(
|
||||
self,
|
||||
glyphName: str,
|
||||
transformation: Tuple[float, float, float, float, float, float]
|
||||
) -> None:
|
||||
"""Add a sub glyph. The 'transformation' argument must be a 6-tuple
|
||||
containing an affine transformation, or a Transform object from the
|
||||
fontTools.misc.transform module. More precisely: it should be a
|
||||
sequence containing 6 numbers.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class NullPen(AbstractPen):
|
||||
|
||||
"""A pen that does nothing.
|
||||
"""
|
||||
|
||||
def moveTo(self, pt):
|
||||
pass
|
||||
|
||||
def lineTo(self, pt):
|
||||
pass
|
||||
|
||||
def curveTo(self, *points):
|
||||
pass
|
||||
|
||||
def qCurveTo(self, *points):
|
||||
pass
|
||||
|
||||
def closePath(self):
|
||||
pass
|
||||
|
||||
def endPath(self):
|
||||
pass
|
||||
|
||||
def addComponent(self, glyphName, transformation):
|
||||
pass
|
||||
|
||||
|
||||
class LoggingPen(LogMixin, AbstractPen):
|
||||
"""A pen with a ``log`` property (see fontTools.misc.loggingTools.LogMixin)
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class MissingComponentError(KeyError):
|
||||
"""Indicates a component pointing to a non-existent glyph in the glyphset."""
|
||||
|
||||
|
||||
class DecomposingPen(LoggingPen):
|
||||
|
||||
""" Implements a 'addComponent' method that decomposes components
|
||||
(i.e. draws them onto self as simple contours).
|
||||
It can also be used as a mixin class (e.g. see ContourRecordingPen).
|
||||
|
||||
You must override moveTo, lineTo, curveTo and qCurveTo. You may
|
||||
additionally override closePath, endPath and addComponent.
|
||||
|
||||
By default a warning message is logged when a base glyph is missing;
|
||||
set the class variable ``skipMissingComponents`` to False if you want
|
||||
to raise a :class:`MissingComponentError` exception.
|
||||
"""
|
||||
|
||||
skipMissingComponents = True
|
||||
|
||||
def __init__(self, glyphSet):
|
||||
""" Takes a single 'glyphSet' argument (dict), in which the glyphs
|
||||
that are referenced as components are looked up by their name.
|
||||
"""
|
||||
super(DecomposingPen, self).__init__()
|
||||
self.glyphSet = glyphSet
|
||||
|
||||
def addComponent(self, glyphName, transformation):
|
||||
""" Transform the points of the base glyph and draw it onto self.
|
||||
"""
|
||||
from fontTools.pens.transformPen import TransformPen
|
||||
try:
|
||||
glyph = self.glyphSet[glyphName]
|
||||
except KeyError:
|
||||
if not self.skipMissingComponents:
|
||||
raise MissingComponentError(glyphName)
|
||||
self.log.warning(
|
||||
"glyph '%s' is missing from glyphSet; skipped" % glyphName)
|
||||
else:
|
||||
tPen = TransformPen(self, transformation)
|
||||
glyph.draw(tPen)
|
||||
|
||||
|
||||
class BasePen(DecomposingPen):
|
||||
|
||||
"""Base class for drawing pens. You must override _moveTo, _lineTo and
|
||||
_curveToOne. You may additionally override _closePath, _endPath,
|
||||
addComponent and/or _qCurveToOne. You should not override any other
|
||||
methods.
|
||||
"""
|
||||
|
||||
def __init__(self, glyphSet=None):
|
||||
super(BasePen, self).__init__(glyphSet)
|
||||
self.__currentPoint = None
|
||||
|
||||
# must override
|
||||
|
||||
def _moveTo(self, pt):
|
||||
raise NotImplementedError
|
||||
|
||||
def _lineTo(self, pt):
|
||||
raise NotImplementedError
|
||||
|
||||
def _curveToOne(self, pt1, pt2, pt3):
|
||||
raise NotImplementedError
|
||||
|
||||
# may override
|
||||
|
||||
def _closePath(self):
|
||||
pass
|
||||
|
||||
def _endPath(self):
|
||||
pass
|
||||
|
||||
def _qCurveToOne(self, pt1, pt2):
|
||||
"""This method implements the basic quadratic curve type. The
|
||||
default implementation delegates the work to the cubic curve
|
||||
function. Optionally override with a native implementation.
|
||||
"""
|
||||
pt0x, pt0y = self.__currentPoint
|
||||
pt1x, pt1y = pt1
|
||||
pt2x, pt2y = pt2
|
||||
mid1x = pt0x + 0.66666666666666667 * (pt1x - pt0x)
|
||||
mid1y = pt0y + 0.66666666666666667 * (pt1y - pt0y)
|
||||
mid2x = pt2x + 0.66666666666666667 * (pt1x - pt2x)
|
||||
mid2y = pt2y + 0.66666666666666667 * (pt1y - pt2y)
|
||||
self._curveToOne((mid1x, mid1y), (mid2x, mid2y), pt2)
|
||||
|
||||
# don't override
|
||||
|
||||
def _getCurrentPoint(self):
|
||||
"""Return the current point. This is not part of the public
|
||||
interface, yet is useful for subclasses.
|
||||
"""
|
||||
return self.__currentPoint
|
||||
|
||||
def closePath(self):
|
||||
self._closePath()
|
||||
self.__currentPoint = None
|
||||
|
||||
def endPath(self):
|
||||
self._endPath()
|
||||
self.__currentPoint = None
|
||||
|
||||
def moveTo(self, pt):
|
||||
self._moveTo(pt)
|
||||
self.__currentPoint = pt
|
||||
|
||||
def lineTo(self, pt):
|
||||
self._lineTo(pt)
|
||||
self.__currentPoint = pt
|
||||
|
||||
def curveTo(self, *points):
|
||||
n = len(points) - 1 # 'n' is the number of control points
|
||||
assert n >= 0
|
||||
if n == 2:
|
||||
# The common case, we have exactly two BCP's, so this is a standard
|
||||
# cubic bezier. Even though decomposeSuperBezierSegment() handles
|
||||
# this case just fine, we special-case it anyway since it's so
|
||||
# common.
|
||||
self._curveToOne(*points)
|
||||
self.__currentPoint = points[-1]
|
||||
elif n > 2:
|
||||
# n is the number of control points; split curve into n-1 cubic
|
||||
# bezier segments. The algorithm used here is inspired by NURB
|
||||
# splines and the TrueType "implied point" principle, and ensures
|
||||
# the smoothest possible connection between two curve segments,
|
||||
# with no disruption in the curvature. It is practical since it
|
||||
# allows one to construct multiple bezier segments with a much
|
||||
# smaller amount of points.
|
||||
_curveToOne = self._curveToOne
|
||||
for pt1, pt2, pt3 in decomposeSuperBezierSegment(points):
|
||||
_curveToOne(pt1, pt2, pt3)
|
||||
self.__currentPoint = pt3
|
||||
elif n == 1:
|
||||
self.qCurveTo(*points)
|
||||
elif n == 0:
|
||||
self.lineTo(points[0])
|
||||
else:
|
||||
raise AssertionError("can't get there from here")
|
||||
|
||||
def qCurveTo(self, *points):
|
||||
n = len(points) - 1 # 'n' is the number of control points
|
||||
assert n >= 0
|
||||
if points[-1] is None:
|
||||
# Special case for TrueType quadratics: it is possible to
|
||||
# define a contour with NO on-curve points. BasePen supports
|
||||
# this by allowing the final argument (the expected on-curve
|
||||
# point) to be None. We simulate the feature by making the implied
|
||||
# on-curve point between the last and the first off-curve points
|
||||
# explicit.
|
||||
x, y = points[-2] # last off-curve point
|
||||
nx, ny = points[0] # first off-curve point
|
||||
impliedStartPoint = (0.5 * (x + nx), 0.5 * (y + ny))
|
||||
self.__currentPoint = impliedStartPoint
|
||||
self._moveTo(impliedStartPoint)
|
||||
points = points[:-1] + (impliedStartPoint,)
|
||||
if n > 0:
|
||||
# Split the string of points into discrete quadratic curve
|
||||
# segments. Between any two consecutive off-curve points
|
||||
# there's an implied on-curve point exactly in the middle.
|
||||
# This is where the segment splits.
|
||||
_qCurveToOne = self._qCurveToOne
|
||||
for pt1, pt2 in decomposeQuadraticSegment(points):
|
||||
_qCurveToOne(pt1, pt2)
|
||||
self.__currentPoint = pt2
|
||||
else:
|
||||
self.lineTo(points[0])
|
||||
|
||||
|
||||
def decomposeSuperBezierSegment(points):
|
||||
"""Split the SuperBezier described by 'points' into a list of regular
|
||||
bezier segments. The 'points' argument must be a sequence with length
|
||||
3 or greater, containing (x, y) coordinates. The last point is the
|
||||
destination on-curve point, the rest of the points are off-curve points.
|
||||
The start point should not be supplied.
|
||||
|
||||
This function returns a list of (pt1, pt2, pt3) tuples, which each
|
||||
specify a regular curveto-style bezier segment.
|
||||
"""
|
||||
n = len(points) - 1
|
||||
assert n > 1
|
||||
bezierSegments = []
|
||||
pt1, pt2, pt3 = points[0], None, None
|
||||
for i in range(2, n+1):
|
||||
# calculate points in between control points.
|
||||
nDivisions = min(i, 3, n-i+2)
|
||||
for j in range(1, nDivisions):
|
||||
factor = j / nDivisions
|
||||
temp1 = points[i-1]
|
||||
temp2 = points[i-2]
|
||||
temp = (temp2[0] + factor * (temp1[0] - temp2[0]),
|
||||
temp2[1] + factor * (temp1[1] - temp2[1]))
|
||||
if pt2 is None:
|
||||
pt2 = temp
|
||||
else:
|
||||
pt3 = (0.5 * (pt2[0] + temp[0]),
|
||||
0.5 * (pt2[1] + temp[1]))
|
||||
bezierSegments.append((pt1, pt2, pt3))
|
||||
pt1, pt2, pt3 = temp, None, None
|
||||
bezierSegments.append((pt1, points[-2], points[-1]))
|
||||
return bezierSegments
|
||||
|
||||
|
||||
def decomposeQuadraticSegment(points):
|
||||
"""Split the quadratic curve segment described by 'points' into a list
|
||||
of "atomic" quadratic segments. The 'points' argument must be a sequence
|
||||
with length 2 or greater, containing (x, y) coordinates. The last point
|
||||
is the destination on-curve point, the rest of the points are off-curve
|
||||
points. The start point should not be supplied.
|
||||
|
||||
This function returns a list of (pt1, pt2) tuples, which each specify a
|
||||
plain quadratic bezier segment.
|
||||
"""
|
||||
n = len(points) - 1
|
||||
assert n > 0
|
||||
quadSegments = []
|
||||
for i in range(n - 1):
|
||||
x, y = points[i]
|
||||
nx, ny = points[i+1]
|
||||
impliedPt = (0.5 * (x + nx), 0.5 * (y + ny))
|
||||
quadSegments.append((points[i], impliedPt))
|
||||
quadSegments.append((points[-2], points[-1]))
|
||||
return quadSegments
|
||||
|
||||
|
||||
class _TestPen(BasePen):
|
||||
"""Test class that prints PostScript to stdout."""
|
||||
def _moveTo(self, pt):
|
||||
print("%s %s moveto" % (pt[0], pt[1]))
|
||||
def _lineTo(self, pt):
|
||||
print("%s %s lineto" % (pt[0], pt[1]))
|
||||
def _curveToOne(self, bcp1, bcp2, pt):
|
||||
print("%s %s %s %s %s %s curveto" % (bcp1[0], bcp1[1],
|
||||
bcp2[0], bcp2[1], pt[0], pt[1]))
|
||||
def _closePath(self):
|
||||
print("closepath")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pen = _TestPen(None)
|
||||
pen.moveTo((0, 0))
|
||||
pen.lineTo((0, 100))
|
||||
pen.curveTo((50, 75), (60, 50), (50, 25), (0, 0))
|
||||
pen.closePath()
|
||||
|
||||
pen = _TestPen(None)
|
||||
# testing the "no on-curve point" scenario
|
||||
pen.qCurveTo((0, 0), (0, 100), (100, 100), (100, 0), None)
|
||||
pen.closePath()
|
98
venv/Lib/site-packages/fontTools/pens/boundsPen.py
Normal file
98
venv/Lib/site-packages/fontTools/pens/boundsPen.py
Normal file
@@ -0,0 +1,98 @@
|
||||
from fontTools.misc.arrayTools import updateBounds, pointInRect, unionRect
|
||||
from fontTools.misc.bezierTools import calcCubicBounds, calcQuadraticBounds
|
||||
from fontTools.pens.basePen import BasePen
|
||||
|
||||
|
||||
__all__ = ["BoundsPen", "ControlBoundsPen"]
|
||||
|
||||
|
||||
class ControlBoundsPen(BasePen):
|
||||
|
||||
"""Pen to calculate the "control bounds" of a shape. This is the
|
||||
bounding box of all control points, so may be larger than the
|
||||
actual bounding box if there are curves that don't have points
|
||||
on their extremes.
|
||||
|
||||
When the shape has been drawn, the bounds are available as the
|
||||
``bounds`` attribute of the pen object. It's a 4-tuple::
|
||||
|
||||
(xMin, yMin, xMax, yMax).
|
||||
|
||||
If ``ignoreSinglePoints`` is True, single points are ignored.
|
||||
"""
|
||||
|
||||
def __init__(self, glyphSet, ignoreSinglePoints=False):
|
||||
BasePen.__init__(self, glyphSet)
|
||||
self.ignoreSinglePoints = ignoreSinglePoints
|
||||
self.init()
|
||||
|
||||
def init(self):
|
||||
self.bounds = None
|
||||
self._start = None
|
||||
|
||||
def _moveTo(self, pt):
|
||||
self._start = pt
|
||||
if not self.ignoreSinglePoints:
|
||||
self._addMoveTo()
|
||||
|
||||
def _addMoveTo(self):
|
||||
if self._start is None:
|
||||
return
|
||||
bounds = self.bounds
|
||||
if bounds:
|
||||
self.bounds = updateBounds(bounds, self._start)
|
||||
else:
|
||||
x, y = self._start
|
||||
self.bounds = (x, y, x, y)
|
||||
self._start = None
|
||||
|
||||
def _lineTo(self, pt):
|
||||
self._addMoveTo()
|
||||
self.bounds = updateBounds(self.bounds, pt)
|
||||
|
||||
def _curveToOne(self, bcp1, bcp2, pt):
|
||||
self._addMoveTo()
|
||||
bounds = self.bounds
|
||||
bounds = updateBounds(bounds, bcp1)
|
||||
bounds = updateBounds(bounds, bcp2)
|
||||
bounds = updateBounds(bounds, pt)
|
||||
self.bounds = bounds
|
||||
|
||||
def _qCurveToOne(self, bcp, pt):
|
||||
self._addMoveTo()
|
||||
bounds = self.bounds
|
||||
bounds = updateBounds(bounds, bcp)
|
||||
bounds = updateBounds(bounds, pt)
|
||||
self.bounds = bounds
|
||||
|
||||
|
||||
class BoundsPen(ControlBoundsPen):
|
||||
|
||||
"""Pen to calculate the bounds of a shape. It calculates the
|
||||
correct bounds even when the shape contains curves that don't
|
||||
have points on their extremes. This is somewhat slower to compute
|
||||
than the "control bounds".
|
||||
|
||||
When the shape has been drawn, the bounds are available as the
|
||||
``bounds`` attribute of the pen object. It's a 4-tuple::
|
||||
|
||||
(xMin, yMin, xMax, yMax)
|
||||
"""
|
||||
|
||||
def _curveToOne(self, bcp1, bcp2, pt):
|
||||
self._addMoveTo()
|
||||
bounds = self.bounds
|
||||
bounds = updateBounds(bounds, pt)
|
||||
if not pointInRect(bcp1, bounds) or not pointInRect(bcp2, bounds):
|
||||
bounds = unionRect(bounds, calcCubicBounds(
|
||||
self._getCurrentPoint(), bcp1, bcp2, pt))
|
||||
self.bounds = bounds
|
||||
|
||||
def _qCurveToOne(self, bcp, pt):
|
||||
self._addMoveTo()
|
||||
bounds = self.bounds
|
||||
bounds = updateBounds(bounds, pt)
|
||||
if not pointInRect(bcp, bounds):
|
||||
bounds = unionRect(bounds, calcQuadraticBounds(
|
||||
self._getCurrentPoint(), bcp, pt))
|
||||
self.bounds = bounds
|
26
venv/Lib/site-packages/fontTools/pens/cocoaPen.py
Normal file
26
venv/Lib/site-packages/fontTools/pens/cocoaPen.py
Normal file
@@ -0,0 +1,26 @@
|
||||
from fontTools.pens.basePen import BasePen
|
||||
|
||||
|
||||
__all__ = ["CocoaPen"]
|
||||
|
||||
|
||||
class CocoaPen(BasePen):
|
||||
|
||||
def __init__(self, glyphSet, path=None):
|
||||
BasePen.__init__(self, glyphSet)
|
||||
if path is None:
|
||||
from AppKit import NSBezierPath
|
||||
path = NSBezierPath.bezierPath()
|
||||
self.path = path
|
||||
|
||||
def _moveTo(self, p):
|
||||
self.path.moveToPoint_(p)
|
||||
|
||||
def _lineTo(self, p):
|
||||
self.path.lineToPoint_(p)
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
self.path.curveToPoint_controlPoint1_controlPoint2_(p3, p1, p2)
|
||||
|
||||
def _closePath(self):
|
||||
self.path.closePath()
|
260
venv/Lib/site-packages/fontTools/pens/cu2quPen.py
Normal file
260
venv/Lib/site-packages/fontTools/pens/cu2quPen.py
Normal file
@@ -0,0 +1,260 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from fontTools.cu2qu import curve_to_quadratic
|
||||
from fontTools.pens.basePen import AbstractPen, decomposeSuperBezierSegment
|
||||
from fontTools.pens.reverseContourPen import ReverseContourPen
|
||||
from fontTools.pens.pointPen import BasePointToSegmentPen
|
||||
from fontTools.pens.pointPen import ReverseContourPointPen
|
||||
|
||||
|
||||
class Cu2QuPen(AbstractPen):
|
||||
""" A filter pen to convert cubic bezier curves to quadratic b-splines
|
||||
using the FontTools SegmentPen protocol.
|
||||
|
||||
Args:
|
||||
|
||||
other_pen: another SegmentPen used to draw the transformed outline.
|
||||
max_err: maximum approximation error in font units. For optimal results,
|
||||
if you know the UPEM of the font, we recommend setting this to a
|
||||
value equal, or close to UPEM / 1000.
|
||||
reverse_direction: flip the contours' direction but keep starting point.
|
||||
stats: a dictionary counting the point numbers of quadratic segments.
|
||||
ignore_single_points: don't emit contours containing only a single point
|
||||
|
||||
NOTE: The "ignore_single_points" argument is deprecated since v1.3.0,
|
||||
which dropped Robofab subpport. It's no longer needed to special-case
|
||||
UFO2-style anchors (aka "named points") when using ufoLib >= 2.0,
|
||||
as these are no longer drawn onto pens as single-point contours,
|
||||
but are handled separately as anchors.
|
||||
"""
|
||||
|
||||
def __init__(self, other_pen, max_err, reverse_direction=False,
|
||||
stats=None, ignore_single_points=False):
|
||||
if reverse_direction:
|
||||
self.pen = ReverseContourPen(other_pen)
|
||||
else:
|
||||
self.pen = other_pen
|
||||
self.max_err = max_err
|
||||
self.stats = stats
|
||||
if ignore_single_points:
|
||||
import warnings
|
||||
warnings.warn("ignore_single_points is deprecated and "
|
||||
"will be removed in future versions",
|
||||
UserWarning, stacklevel=2)
|
||||
self.ignore_single_points = ignore_single_points
|
||||
self.start_pt = None
|
||||
self.current_pt = None
|
||||
|
||||
def _check_contour_is_open(self):
|
||||
if self.current_pt is None:
|
||||
raise AssertionError("moveTo is required")
|
||||
|
||||
def _check_contour_is_closed(self):
|
||||
if self.current_pt is not None:
|
||||
raise AssertionError("closePath or endPath is required")
|
||||
|
||||
def _add_moveTo(self):
|
||||
if self.start_pt is not None:
|
||||
self.pen.moveTo(self.start_pt)
|
||||
self.start_pt = None
|
||||
|
||||
def moveTo(self, pt):
|
||||
self._check_contour_is_closed()
|
||||
self.start_pt = self.current_pt = pt
|
||||
if not self.ignore_single_points:
|
||||
self._add_moveTo()
|
||||
|
||||
def lineTo(self, pt):
|
||||
self._check_contour_is_open()
|
||||
self._add_moveTo()
|
||||
self.pen.lineTo(pt)
|
||||
self.current_pt = pt
|
||||
|
||||
def qCurveTo(self, *points):
|
||||
self._check_contour_is_open()
|
||||
n = len(points)
|
||||
if n == 1:
|
||||
self.lineTo(points[0])
|
||||
elif n > 1:
|
||||
self._add_moveTo()
|
||||
self.pen.qCurveTo(*points)
|
||||
self.current_pt = points[-1]
|
||||
else:
|
||||
raise AssertionError("illegal qcurve segment point count: %d" % n)
|
||||
|
||||
def _curve_to_quadratic(self, pt1, pt2, pt3):
|
||||
curve = (self.current_pt, pt1, pt2, pt3)
|
||||
quadratic = curve_to_quadratic(curve, self.max_err)
|
||||
if self.stats is not None:
|
||||
n = str(len(quadratic) - 2)
|
||||
self.stats[n] = self.stats.get(n, 0) + 1
|
||||
self.qCurveTo(*quadratic[1:])
|
||||
|
||||
def curveTo(self, *points):
|
||||
self._check_contour_is_open()
|
||||
n = len(points)
|
||||
if n == 3:
|
||||
# this is the most common case, so we special-case it
|
||||
self._curve_to_quadratic(*points)
|
||||
elif n > 3:
|
||||
for segment in decomposeSuperBezierSegment(points):
|
||||
self._curve_to_quadratic(*segment)
|
||||
elif n == 2:
|
||||
self.qCurveTo(*points)
|
||||
elif n == 1:
|
||||
self.lineTo(points[0])
|
||||
else:
|
||||
raise AssertionError("illegal curve segment point count: %d" % n)
|
||||
|
||||
def closePath(self):
|
||||
self._check_contour_is_open()
|
||||
if self.start_pt is None:
|
||||
# if 'start_pt' is _not_ None, we are ignoring single-point paths
|
||||
self.pen.closePath()
|
||||
self.current_pt = self.start_pt = None
|
||||
|
||||
def endPath(self):
|
||||
self._check_contour_is_open()
|
||||
if self.start_pt is None:
|
||||
self.pen.endPath()
|
||||
self.current_pt = self.start_pt = None
|
||||
|
||||
def addComponent(self, glyphName, transformation):
|
||||
self._check_contour_is_closed()
|
||||
self.pen.addComponent(glyphName, transformation)
|
||||
|
||||
|
||||
class Cu2QuPointPen(BasePointToSegmentPen):
|
||||
""" A filter pen to convert cubic bezier curves to quadratic b-splines
|
||||
using the RoboFab PointPen protocol.
|
||||
|
||||
Args:
|
||||
other_point_pen: another PointPen used to draw the transformed outline.
|
||||
max_err: maximum approximation error in font units. For optimal results,
|
||||
if you know the UPEM of the font, we recommend setting this to a
|
||||
value equal, or close to UPEM / 1000.
|
||||
reverse_direction: reverse the winding direction of all contours.
|
||||
stats: a dictionary counting the point numbers of quadratic segments.
|
||||
"""
|
||||
|
||||
def __init__(self, other_point_pen, max_err, reverse_direction=False,
|
||||
stats=None):
|
||||
BasePointToSegmentPen.__init__(self)
|
||||
if reverse_direction:
|
||||
self.pen = ReverseContourPointPen(other_point_pen)
|
||||
else:
|
||||
self.pen = other_point_pen
|
||||
self.max_err = max_err
|
||||
self.stats = stats
|
||||
|
||||
def _flushContour(self, segments):
|
||||
assert len(segments) >= 1
|
||||
closed = segments[0][0] != "move"
|
||||
new_segments = []
|
||||
prev_points = segments[-1][1]
|
||||
prev_on_curve = prev_points[-1][0]
|
||||
for segment_type, points in segments:
|
||||
if segment_type == 'curve':
|
||||
for sub_points in self._split_super_bezier_segments(points):
|
||||
on_curve, smooth, name, kwargs = sub_points[-1]
|
||||
bcp1, bcp2 = sub_points[0][0], sub_points[1][0]
|
||||
cubic = [prev_on_curve, bcp1, bcp2, on_curve]
|
||||
quad = curve_to_quadratic(cubic, self.max_err)
|
||||
if self.stats is not None:
|
||||
n = str(len(quad) - 2)
|
||||
self.stats[n] = self.stats.get(n, 0) + 1
|
||||
new_points = [(pt, False, None, {}) for pt in quad[1:-1]]
|
||||
new_points.append((on_curve, smooth, name, kwargs))
|
||||
new_segments.append(["qcurve", new_points])
|
||||
prev_on_curve = sub_points[-1][0]
|
||||
else:
|
||||
new_segments.append([segment_type, points])
|
||||
prev_on_curve = points[-1][0]
|
||||
if closed:
|
||||
# the BasePointToSegmentPen.endPath method that calls _flushContour
|
||||
# rotates the point list of closed contours so that they end with
|
||||
# the first on-curve point. We restore the original starting point.
|
||||
new_segments = new_segments[-1:] + new_segments[:-1]
|
||||
self._drawPoints(new_segments)
|
||||
|
||||
def _split_super_bezier_segments(self, points):
|
||||
sub_segments = []
|
||||
# n is the number of control points
|
||||
n = len(points) - 1
|
||||
if n == 2:
|
||||
# a simple bezier curve segment
|
||||
sub_segments.append(points)
|
||||
elif n > 2:
|
||||
# a "super" bezier; decompose it
|
||||
on_curve, smooth, name, kwargs = points[-1]
|
||||
num_sub_segments = n - 1
|
||||
for i, sub_points in enumerate(decomposeSuperBezierSegment([
|
||||
pt for pt, _, _, _ in points])):
|
||||
new_segment = []
|
||||
for point in sub_points[:-1]:
|
||||
new_segment.append((point, False, None, {}))
|
||||
if i == (num_sub_segments - 1):
|
||||
# the last on-curve keeps its original attributes
|
||||
new_segment.append((on_curve, smooth, name, kwargs))
|
||||
else:
|
||||
# on-curves of sub-segments are always "smooth"
|
||||
new_segment.append((sub_points[-1], True, None, {}))
|
||||
sub_segments.append(new_segment)
|
||||
else:
|
||||
raise AssertionError(
|
||||
"expected 2 control points, found: %d" % n)
|
||||
return sub_segments
|
||||
|
||||
def _drawPoints(self, segments):
|
||||
pen = self.pen
|
||||
pen.beginPath()
|
||||
last_offcurves = []
|
||||
for i, (segment_type, points) in enumerate(segments):
|
||||
if segment_type in ("move", "line"):
|
||||
assert len(points) == 1, (
|
||||
"illegal line segment point count: %d" % len(points))
|
||||
pt, smooth, name, kwargs = points[0]
|
||||
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
||||
elif segment_type == "qcurve":
|
||||
assert len(points) >= 2, (
|
||||
"illegal qcurve segment point count: %d" % len(points))
|
||||
offcurves = points[:-1]
|
||||
if offcurves:
|
||||
if i == 0:
|
||||
# any off-curve points preceding the first on-curve
|
||||
# will be appended at the end of the contour
|
||||
last_offcurves = offcurves
|
||||
else:
|
||||
for (pt, smooth, name, kwargs) in offcurves:
|
||||
pen.addPoint(pt, None, smooth, name, **kwargs)
|
||||
pt, smooth, name, kwargs = points[-1]
|
||||
if pt is None:
|
||||
# special quadratic contour with no on-curve points:
|
||||
# we need to skip the "None" point. See also the Pen
|
||||
# protocol's qCurveTo() method and fontTools.pens.basePen
|
||||
pass
|
||||
else:
|
||||
pen.addPoint(pt, segment_type, smooth, name, **kwargs)
|
||||
else:
|
||||
# 'curve' segments must have been converted to 'qcurve' by now
|
||||
raise AssertionError(
|
||||
"unexpected segment type: %r" % segment_type)
|
||||
for (pt, smooth, name, kwargs) in last_offcurves:
|
||||
pen.addPoint(pt, None, smooth, name, **kwargs)
|
||||
pen.endPath()
|
||||
|
||||
def addComponent(self, baseGlyphName, transformation):
|
||||
assert self.currentPath is None
|
||||
self.pen.addComponent(baseGlyphName, transformation)
|
158
venv/Lib/site-packages/fontTools/pens/filterPen.py
Normal file
158
venv/Lib/site-packages/fontTools/pens/filterPen.py
Normal file
@@ -0,0 +1,158 @@
|
||||
from fontTools.pens.basePen import AbstractPen
|
||||
from fontTools.pens.pointPen import AbstractPointPen
|
||||
from fontTools.pens.recordingPen import RecordingPen
|
||||
|
||||
|
||||
class _PassThruComponentsMixin(object):
|
||||
|
||||
def addComponent(self, glyphName, transformation, **kwargs):
|
||||
self._outPen.addComponent(glyphName, transformation, **kwargs)
|
||||
|
||||
|
||||
class FilterPen(_PassThruComponentsMixin, AbstractPen):
|
||||
|
||||
""" Base class for pens that apply some transformation to the coordinates
|
||||
they receive and pass them to another pen.
|
||||
|
||||
You can override any of its methods. The default implementation does
|
||||
nothing, but passes the commands unmodified to the other pen.
|
||||
|
||||
>>> from fontTools.pens.recordingPen import RecordingPen
|
||||
>>> rec = RecordingPen()
|
||||
>>> pen = FilterPen(rec)
|
||||
>>> v = iter(rec.value)
|
||||
|
||||
>>> pen.moveTo((0, 0))
|
||||
>>> next(v)
|
||||
('moveTo', ((0, 0),))
|
||||
|
||||
>>> pen.lineTo((1, 1))
|
||||
>>> next(v)
|
||||
('lineTo', ((1, 1),))
|
||||
|
||||
>>> pen.curveTo((2, 2), (3, 3), (4, 4))
|
||||
>>> next(v)
|
||||
('curveTo', ((2, 2), (3, 3), (4, 4)))
|
||||
|
||||
>>> pen.qCurveTo((5, 5), (6, 6), (7, 7), (8, 8))
|
||||
>>> next(v)
|
||||
('qCurveTo', ((5, 5), (6, 6), (7, 7), (8, 8)))
|
||||
|
||||
>>> pen.closePath()
|
||||
>>> next(v)
|
||||
('closePath', ())
|
||||
|
||||
>>> pen.moveTo((9, 9))
|
||||
>>> next(v)
|
||||
('moveTo', ((9, 9),))
|
||||
|
||||
>>> pen.endPath()
|
||||
>>> next(v)
|
||||
('endPath', ())
|
||||
|
||||
>>> pen.addComponent('foo', (1, 0, 0, 1, 0, 0))
|
||||
>>> next(v)
|
||||
('addComponent', ('foo', (1, 0, 0, 1, 0, 0)))
|
||||
"""
|
||||
|
||||
def __init__(self, outPen):
|
||||
self._outPen = outPen
|
||||
|
||||
def moveTo(self, pt):
|
||||
self._outPen.moveTo(pt)
|
||||
|
||||
def lineTo(self, pt):
|
||||
self._outPen.lineTo(pt)
|
||||
|
||||
def curveTo(self, *points):
|
||||
self._outPen.curveTo(*points)
|
||||
|
||||
def qCurveTo(self, *points):
|
||||
self._outPen.qCurveTo(*points)
|
||||
|
||||
def closePath(self):
|
||||
self._outPen.closePath()
|
||||
|
||||
def endPath(self):
|
||||
self._outPen.endPath()
|
||||
|
||||
|
||||
class ContourFilterPen(_PassThruComponentsMixin, RecordingPen):
|
||||
"""A "buffered" filter pen that accumulates contour data, passes
|
||||
it through a ``filterContour`` method when the contour is closed or ended,
|
||||
and finally draws the result with the output pen.
|
||||
|
||||
Components are passed through unchanged.
|
||||
"""
|
||||
|
||||
def __init__(self, outPen):
|
||||
super(ContourFilterPen, self).__init__()
|
||||
self._outPen = outPen
|
||||
|
||||
def closePath(self):
|
||||
super(ContourFilterPen, self).closePath()
|
||||
self._flushContour()
|
||||
|
||||
def endPath(self):
|
||||
super(ContourFilterPen, self).endPath()
|
||||
self._flushContour()
|
||||
|
||||
def _flushContour(self):
|
||||
result = self.filterContour(self.value)
|
||||
if result is not None:
|
||||
self.value = result
|
||||
self.replay(self._outPen)
|
||||
self.value = []
|
||||
|
||||
def filterContour(self, contour):
|
||||
"""Subclasses must override this to perform the filtering.
|
||||
|
||||
The contour is a list of pen (operator, operands) tuples.
|
||||
Operators are strings corresponding to the AbstractPen methods:
|
||||
"moveTo", "lineTo", "curveTo", "qCurveTo", "closePath" and
|
||||
"endPath". The operands are the positional arguments that are
|
||||
passed to each method.
|
||||
|
||||
If the method doesn't return a value (i.e. returns None), it's
|
||||
assumed that the argument was modified in-place.
|
||||
Otherwise, the return value is drawn with the output pen.
|
||||
"""
|
||||
return # or return contour
|
||||
|
||||
|
||||
class FilterPointPen(_PassThruComponentsMixin, AbstractPointPen):
|
||||
""" Baseclass for point pens that apply some transformation to the
|
||||
coordinates they receive and pass them to another point pen.
|
||||
|
||||
You can override any of its methods. The default implementation does
|
||||
nothing, but passes the commands unmodified to the other pen.
|
||||
|
||||
>>> from fontTools.pens.recordingPen import RecordingPointPen
|
||||
>>> rec = RecordingPointPen()
|
||||
>>> pen = FilterPointPen(rec)
|
||||
>>> v = iter(rec.value)
|
||||
>>> pen.beginPath(identifier="abc")
|
||||
>>> next(v)
|
||||
('beginPath', (), {'identifier': 'abc'})
|
||||
>>> pen.addPoint((1, 2), "line", False)
|
||||
>>> next(v)
|
||||
('addPoint', ((1, 2), 'line', False, None), {})
|
||||
>>> pen.addComponent("a", (2, 0, 0, 2, 10, -10), identifier="0001")
|
||||
>>> next(v)
|
||||
('addComponent', ('a', (2, 0, 0, 2, 10, -10)), {'identifier': '0001'})
|
||||
>>> pen.endPath()
|
||||
>>> next(v)
|
||||
('endPath', (), {})
|
||||
"""
|
||||
|
||||
def __init__(self, outPointPen):
|
||||
self._outPen = outPointPen
|
||||
|
||||
def beginPath(self, **kwargs):
|
||||
self._outPen.beginPath(**kwargs)
|
||||
|
||||
def endPath(self):
|
||||
self._outPen.endPath()
|
||||
|
||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None, **kwargs):
|
||||
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
|
493
venv/Lib/site-packages/fontTools/pens/pointPen.py
Normal file
493
venv/Lib/site-packages/fontTools/pens/pointPen.py
Normal file
@@ -0,0 +1,493 @@
|
||||
"""
|
||||
=========
|
||||
PointPens
|
||||
=========
|
||||
|
||||
Where **SegmentPens** have an intuitive approach to drawing
|
||||
(if you're familiar with postscript anyway), the **PointPen**
|
||||
is geared towards accessing all the data in the contours of
|
||||
the glyph. A PointPen has a very simple interface, it just
|
||||
steps through all the points in a call from glyph.drawPoints().
|
||||
This allows the caller to provide more data for each point.
|
||||
For instance, whether or not a point is smooth, and its name.
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from fontTools.pens.basePen import AbstractPen, PenError
|
||||
|
||||
__all__ = [
|
||||
"AbstractPointPen",
|
||||
"BasePointToSegmentPen",
|
||||
"PointToSegmentPen",
|
||||
"SegmentToPointPen",
|
||||
"GuessSmoothPointPen",
|
||||
"ReverseContourPointPen",
|
||||
]
|
||||
|
||||
|
||||
class AbstractPointPen:
|
||||
"""Baseclass for all PointPens."""
|
||||
|
||||
def beginPath(self, identifier: Optional[str] = None, **kwargs: Any) -> None:
|
||||
"""Start a new sub path."""
|
||||
raise NotImplementedError
|
||||
|
||||
def endPath(self) -> None:
|
||||
"""End the current sub path."""
|
||||
raise NotImplementedError
|
||||
|
||||
def addPoint(
|
||||
self,
|
||||
pt: Tuple[float, float],
|
||||
segmentType: Optional[str] = None,
|
||||
smooth: bool = False,
|
||||
name: Optional[str] = None,
|
||||
identifier: Optional[str] = None,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Add a point to the current sub path."""
|
||||
raise NotImplementedError
|
||||
|
||||
def addComponent(
|
||||
self,
|
||||
baseGlyphName: str,
|
||||
transformation: Tuple[float, float, float, float, float, float],
|
||||
identifier: Optional[str] = None,
|
||||
**kwargs: Any
|
||||
) -> None:
|
||||
"""Add a sub glyph."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class BasePointToSegmentPen(AbstractPointPen):
|
||||
"""
|
||||
Base class for retrieving the outline in a segment-oriented
|
||||
way. The PointPen protocol is simple yet also a little tricky,
|
||||
so when you need an outline presented as segments but you have
|
||||
as points, do use this base implementation as it properly takes
|
||||
care of all the edge cases.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.currentPath = None
|
||||
|
||||
def beginPath(self, identifier=None, **kwargs):
|
||||
if self.currentPath is not None:
|
||||
raise PenError("Path already begun.")
|
||||
self.currentPath = []
|
||||
|
||||
def _flushContour(self, segments):
|
||||
"""Override this method.
|
||||
|
||||
It will be called for each non-empty sub path with a list
|
||||
of segments: the 'segments' argument.
|
||||
|
||||
The segments list contains tuples of length 2:
|
||||
(segmentType, points)
|
||||
|
||||
segmentType is one of "move", "line", "curve" or "qcurve".
|
||||
"move" may only occur as the first segment, and it signifies
|
||||
an OPEN path. A CLOSED path does NOT start with a "move", in
|
||||
fact it will not contain a "move" at ALL.
|
||||
|
||||
The 'points' field in the 2-tuple is a list of point info
|
||||
tuples. The list has 1 or more items, a point tuple has
|
||||
four items:
|
||||
(point, smooth, name, kwargs)
|
||||
'point' is an (x, y) coordinate pair.
|
||||
|
||||
For a closed path, the initial moveTo point is defined as
|
||||
the last point of the last segment.
|
||||
|
||||
The 'points' list of "move" and "line" segments always contains
|
||||
exactly one point tuple.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def endPath(self):
|
||||
if self.currentPath is None:
|
||||
raise PenError("Path not begun.")
|
||||
points = self.currentPath
|
||||
self.currentPath = None
|
||||
if not points:
|
||||
return
|
||||
if len(points) == 1:
|
||||
# Not much more we can do than output a single move segment.
|
||||
pt, segmentType, smooth, name, kwargs = points[0]
|
||||
segments = [("move", [(pt, smooth, name, kwargs)])]
|
||||
self._flushContour(segments)
|
||||
return
|
||||
segments = []
|
||||
if points[0][1] == "move":
|
||||
# It's an open contour, insert a "move" segment for the first
|
||||
# point and remove that first point from the point list.
|
||||
pt, segmentType, smooth, name, kwargs = points[0]
|
||||
segments.append(("move", [(pt, smooth, name, kwargs)]))
|
||||
points.pop(0)
|
||||
else:
|
||||
# It's a closed contour. Locate the first on-curve point, and
|
||||
# rotate the point list so that it _ends_ with an on-curve
|
||||
# point.
|
||||
firstOnCurve = None
|
||||
for i in range(len(points)):
|
||||
segmentType = points[i][1]
|
||||
if segmentType is not None:
|
||||
firstOnCurve = i
|
||||
break
|
||||
if firstOnCurve is None:
|
||||
# Special case for quadratics: a contour with no on-curve
|
||||
# points. Add a "None" point. (See also the Pen protocol's
|
||||
# qCurveTo() method and fontTools.pens.basePen.py.)
|
||||
points.append((None, "qcurve", None, None, None))
|
||||
else:
|
||||
points = points[firstOnCurve+1:] + points[:firstOnCurve+1]
|
||||
|
||||
currentSegment = []
|
||||
for pt, segmentType, smooth, name, kwargs in points:
|
||||
currentSegment.append((pt, smooth, name, kwargs))
|
||||
if segmentType is None:
|
||||
continue
|
||||
segments.append((segmentType, currentSegment))
|
||||
currentSegment = []
|
||||
|
||||
self._flushContour(segments)
|
||||
|
||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
|
||||
identifier=None, **kwargs):
|
||||
if self.currentPath is None:
|
||||
raise PenError("Path not begun")
|
||||
self.currentPath.append((pt, segmentType, smooth, name, kwargs))
|
||||
|
||||
|
||||
class PointToSegmentPen(BasePointToSegmentPen):
|
||||
"""
|
||||
Adapter class that converts the PointPen protocol to the
|
||||
(Segment)Pen protocol.
|
||||
|
||||
NOTE: The segment pen does not support and will drop point names, identifiers
|
||||
and kwargs.
|
||||
"""
|
||||
|
||||
def __init__(self, segmentPen, outputImpliedClosingLine=False):
|
||||
BasePointToSegmentPen.__init__(self)
|
||||
self.pen = segmentPen
|
||||
self.outputImpliedClosingLine = outputImpliedClosingLine
|
||||
|
||||
def _flushContour(self, segments):
|
||||
if not segments:
|
||||
raise PenError("Must have at least one segment.")
|
||||
pen = self.pen
|
||||
if segments[0][0] == "move":
|
||||
# It's an open path.
|
||||
closed = False
|
||||
points = segments[0][1]
|
||||
if len(points) != 1:
|
||||
raise PenError(f"Illegal move segment point count: {len(points)}")
|
||||
movePt, _, _ , _ = points[0]
|
||||
del segments[0]
|
||||
else:
|
||||
# It's a closed path, do a moveTo to the last
|
||||
# point of the last segment.
|
||||
closed = True
|
||||
segmentType, points = segments[-1]
|
||||
movePt, _, _ , _ = points[-1]
|
||||
if movePt is None:
|
||||
# quad special case: a contour with no on-curve points contains
|
||||
# one "qcurve" segment that ends with a point that's None. We
|
||||
# must not output a moveTo() in that case.
|
||||
pass
|
||||
else:
|
||||
pen.moveTo(movePt)
|
||||
outputImpliedClosingLine = self.outputImpliedClosingLine
|
||||
nSegments = len(segments)
|
||||
lastPt = movePt
|
||||
for i in range(nSegments):
|
||||
segmentType, points = segments[i]
|
||||
points = [pt for pt, _, _ , _ in points]
|
||||
if segmentType == "line":
|
||||
if len(points) != 1:
|
||||
raise PenError(f"Illegal line segment point count: {len(points)}")
|
||||
pt = points[0]
|
||||
# For closed contours, a 'lineTo' is always implied from the last oncurve
|
||||
# point to the starting point, thus we can omit it when the last and
|
||||
# starting point don't overlap.
|
||||
# However, when the last oncurve point is a "line" segment and has same
|
||||
# coordinates as the starting point of a closed contour, we need to output
|
||||
# the closing 'lineTo' explicitly (regardless of the value of the
|
||||
# 'outputImpliedClosingLine' option) in order to disambiguate this case from
|
||||
# the implied closing 'lineTo', otherwise the duplicate point would be lost.
|
||||
# See https://github.com/googlefonts/fontmake/issues/572.
|
||||
if (
|
||||
i + 1 != nSegments
|
||||
or outputImpliedClosingLine
|
||||
or not closed
|
||||
or pt == lastPt
|
||||
):
|
||||
pen.lineTo(pt)
|
||||
lastPt = pt
|
||||
elif segmentType == "curve":
|
||||
pen.curveTo(*points)
|
||||
lastPt = points[-1]
|
||||
elif segmentType == "qcurve":
|
||||
pen.qCurveTo(*points)
|
||||
lastPt = points[-1]
|
||||
else:
|
||||
raise PenError(f"Illegal segmentType: {segmentType}")
|
||||
if closed:
|
||||
pen.closePath()
|
||||
else:
|
||||
pen.endPath()
|
||||
|
||||
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
|
||||
del identifier # unused
|
||||
del kwargs # unused
|
||||
self.pen.addComponent(glyphName, transform)
|
||||
|
||||
|
||||
class SegmentToPointPen(AbstractPen):
|
||||
"""
|
||||
Adapter class that converts the (Segment)Pen protocol to the
|
||||
PointPen protocol.
|
||||
"""
|
||||
|
||||
def __init__(self, pointPen, guessSmooth=True):
|
||||
if guessSmooth:
|
||||
self.pen = GuessSmoothPointPen(pointPen)
|
||||
else:
|
||||
self.pen = pointPen
|
||||
self.contour = None
|
||||
|
||||
def _flushContour(self):
|
||||
pen = self.pen
|
||||
pen.beginPath()
|
||||
for pt, segmentType in self.contour:
|
||||
pen.addPoint(pt, segmentType=segmentType)
|
||||
pen.endPath()
|
||||
|
||||
def moveTo(self, pt):
|
||||
self.contour = []
|
||||
self.contour.append((pt, "move"))
|
||||
|
||||
def lineTo(self, pt):
|
||||
if self.contour is None:
|
||||
raise PenError("Contour missing required initial moveTo")
|
||||
self.contour.append((pt, "line"))
|
||||
|
||||
def curveTo(self, *pts):
|
||||
if not pts:
|
||||
raise TypeError("Must pass in at least one point")
|
||||
if self.contour is None:
|
||||
raise PenError("Contour missing required initial moveTo")
|
||||
for pt in pts[:-1]:
|
||||
self.contour.append((pt, None))
|
||||
self.contour.append((pts[-1], "curve"))
|
||||
|
||||
def qCurveTo(self, *pts):
|
||||
if not pts:
|
||||
raise TypeError("Must pass in at least one point")
|
||||
if pts[-1] is None:
|
||||
self.contour = []
|
||||
else:
|
||||
if self.contour is None:
|
||||
raise PenError("Contour missing required initial moveTo")
|
||||
for pt in pts[:-1]:
|
||||
self.contour.append((pt, None))
|
||||
if pts[-1] is not None:
|
||||
self.contour.append((pts[-1], "qcurve"))
|
||||
|
||||
def closePath(self):
|
||||
if self.contour is None:
|
||||
raise PenError("Contour missing required initial moveTo")
|
||||
if len(self.contour) > 1 and self.contour[0][0] == self.contour[-1][0]:
|
||||
self.contour[0] = self.contour[-1]
|
||||
del self.contour[-1]
|
||||
else:
|
||||
# There's an implied line at the end, replace "move" with "line"
|
||||
# for the first point
|
||||
pt, tp = self.contour[0]
|
||||
if tp == "move":
|
||||
self.contour[0] = pt, "line"
|
||||
self._flushContour()
|
||||
self.contour = None
|
||||
|
||||
def endPath(self):
|
||||
if self.contour is None:
|
||||
raise PenError("Contour missing required initial moveTo")
|
||||
self._flushContour()
|
||||
self.contour = None
|
||||
|
||||
def addComponent(self, glyphName, transform):
|
||||
if self.contour is not None:
|
||||
raise PenError("Components must be added before or after contours")
|
||||
self.pen.addComponent(glyphName, transform)
|
||||
|
||||
|
||||
class GuessSmoothPointPen(AbstractPointPen):
|
||||
"""
|
||||
Filtering PointPen that tries to determine whether an on-curve point
|
||||
should be "smooth", ie. that it's a "tangent" point or a "curve" point.
|
||||
"""
|
||||
|
||||
def __init__(self, outPen, error=0.05):
|
||||
self._outPen = outPen
|
||||
self._error = error
|
||||
self._points = None
|
||||
|
||||
def _flushContour(self):
|
||||
if self._points is None:
|
||||
raise PenError("Path not begun")
|
||||
points = self._points
|
||||
nPoints = len(points)
|
||||
if not nPoints:
|
||||
return
|
||||
if points[0][1] == "move":
|
||||
# Open path.
|
||||
indices = range(1, nPoints - 1)
|
||||
elif nPoints > 1:
|
||||
# Closed path. To avoid having to mod the contour index, we
|
||||
# simply abuse Python's negative index feature, and start at -1
|
||||
indices = range(-1, nPoints - 1)
|
||||
else:
|
||||
# closed path containing 1 point (!), ignore.
|
||||
indices = []
|
||||
for i in indices:
|
||||
pt, segmentType, _, name, kwargs = points[i]
|
||||
if segmentType is None:
|
||||
continue
|
||||
prev = i - 1
|
||||
next = i + 1
|
||||
if points[prev][1] is not None and points[next][1] is not None:
|
||||
continue
|
||||
# At least one of our neighbors is an off-curve point
|
||||
pt = points[i][0]
|
||||
prevPt = points[prev][0]
|
||||
nextPt = points[next][0]
|
||||
if pt != prevPt and pt != nextPt:
|
||||
dx1, dy1 = pt[0] - prevPt[0], pt[1] - prevPt[1]
|
||||
dx2, dy2 = nextPt[0] - pt[0], nextPt[1] - pt[1]
|
||||
a1 = math.atan2(dy1, dx1)
|
||||
a2 = math.atan2(dy2, dx2)
|
||||
if abs(a1 - a2) < self._error:
|
||||
points[i] = pt, segmentType, True, name, kwargs
|
||||
|
||||
for pt, segmentType, smooth, name, kwargs in points:
|
||||
self._outPen.addPoint(pt, segmentType, smooth, name, **kwargs)
|
||||
|
||||
def beginPath(self, identifier=None, **kwargs):
|
||||
if self._points is not None:
|
||||
raise PenError("Path already begun")
|
||||
self._points = []
|
||||
if identifier is not None:
|
||||
kwargs["identifier"] = identifier
|
||||
self._outPen.beginPath(**kwargs)
|
||||
|
||||
def endPath(self):
|
||||
self._flushContour()
|
||||
self._outPen.endPath()
|
||||
self._points = None
|
||||
|
||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None,
|
||||
identifier=None, **kwargs):
|
||||
if self._points is None:
|
||||
raise PenError("Path not begun")
|
||||
if identifier is not None:
|
||||
kwargs["identifier"] = identifier
|
||||
self._points.append((pt, segmentType, False, name, kwargs))
|
||||
|
||||
def addComponent(self, glyphName, transformation, identifier=None, **kwargs):
|
||||
if self._points is not None:
|
||||
raise PenError("Components must be added before or after contours")
|
||||
if identifier is not None:
|
||||
kwargs["identifier"] = identifier
|
||||
self._outPen.addComponent(glyphName, transformation, **kwargs)
|
||||
|
||||
|
||||
class ReverseContourPointPen(AbstractPointPen):
|
||||
"""
|
||||
This is a PointPen that passes outline data to another PointPen, but
|
||||
reversing the winding direction of all contours. Components are simply
|
||||
passed through unchanged.
|
||||
|
||||
Closed contours are reversed in such a way that the first point remains
|
||||
the first point.
|
||||
"""
|
||||
|
||||
def __init__(self, outputPointPen):
|
||||
self.pen = outputPointPen
|
||||
# a place to store the points for the current sub path
|
||||
self.currentContour = None
|
||||
|
||||
def _flushContour(self):
|
||||
pen = self.pen
|
||||
contour = self.currentContour
|
||||
if not contour:
|
||||
pen.beginPath(identifier=self.currentContourIdentifier)
|
||||
pen.endPath()
|
||||
return
|
||||
|
||||
closed = contour[0][1] != "move"
|
||||
if not closed:
|
||||
lastSegmentType = "move"
|
||||
else:
|
||||
# Remove the first point and insert it at the end. When
|
||||
# the list of points gets reversed, this point will then
|
||||
# again be at the start. In other words, the following
|
||||
# will hold:
|
||||
# for N in range(len(originalContour)):
|
||||
# originalContour[N] == reversedContour[-N]
|
||||
contour.append(contour.pop(0))
|
||||
# Find the first on-curve point.
|
||||
firstOnCurve = None
|
||||
for i in range(len(contour)):
|
||||
if contour[i][1] is not None:
|
||||
firstOnCurve = i
|
||||
break
|
||||
if firstOnCurve is None:
|
||||
# There are no on-curve points, be basically have to
|
||||
# do nothing but contour.reverse().
|
||||
lastSegmentType = None
|
||||
else:
|
||||
lastSegmentType = contour[firstOnCurve][1]
|
||||
|
||||
contour.reverse()
|
||||
if not closed:
|
||||
# Open paths must start with a move, so we simply dump
|
||||
# all off-curve points leading up to the first on-curve.
|
||||
while contour[0][1] is None:
|
||||
contour.pop(0)
|
||||
pen.beginPath(identifier=self.currentContourIdentifier)
|
||||
for pt, nextSegmentType, smooth, name, kwargs in contour:
|
||||
if nextSegmentType is not None:
|
||||
segmentType = lastSegmentType
|
||||
lastSegmentType = nextSegmentType
|
||||
else:
|
||||
segmentType = None
|
||||
pen.addPoint(pt, segmentType=segmentType, smooth=smooth, name=name, **kwargs)
|
||||
pen.endPath()
|
||||
|
||||
def beginPath(self, identifier=None, **kwargs):
|
||||
if self.currentContour is not None:
|
||||
raise PenError("Path already begun")
|
||||
self.currentContour = []
|
||||
self.currentContourIdentifier = identifier
|
||||
self.onCurve = []
|
||||
|
||||
def endPath(self):
|
||||
if self.currentContour is None:
|
||||
raise PenError("Path not begun")
|
||||
self._flushContour()
|
||||
self.currentContour = None
|
||||
|
||||
def addPoint(self, pt, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
|
||||
if self.currentContour is None:
|
||||
raise PenError("Path not begun")
|
||||
if identifier is not None:
|
||||
kwargs["identifier"] = identifier
|
||||
self.currentContour.append((pt, segmentType, smooth, name, kwargs))
|
||||
|
||||
def addComponent(self, glyphName, transform, identifier=None, **kwargs):
|
||||
if self.currentContour is not None:
|
||||
raise PenError("Components must be added before or after contours")
|
||||
self.pen.addComponent(glyphName, transform, identifier=identifier, **kwargs)
|
45
venv/Lib/site-packages/fontTools/pens/quartzPen.py
Normal file
45
venv/Lib/site-packages/fontTools/pens/quartzPen.py
Normal file
@@ -0,0 +1,45 @@
|
||||
from fontTools.pens.basePen import BasePen
|
||||
|
||||
from Quartz.CoreGraphics import CGPathCreateMutable, CGPathMoveToPoint
|
||||
from Quartz.CoreGraphics import CGPathAddLineToPoint, CGPathAddCurveToPoint
|
||||
from Quartz.CoreGraphics import CGPathAddQuadCurveToPoint, CGPathCloseSubpath
|
||||
|
||||
|
||||
__all__ = ["QuartzPen"]
|
||||
|
||||
|
||||
class QuartzPen(BasePen):
|
||||
|
||||
"""A pen that creates a CGPath
|
||||
|
||||
Parameters
|
||||
- path: an optional CGPath to add to
|
||||
- xform: an optional CGAffineTransform to apply to the path
|
||||
"""
|
||||
|
||||
def __init__(self, glyphSet, path=None, xform=None):
|
||||
BasePen.__init__(self, glyphSet)
|
||||
if path is None:
|
||||
path = CGPathCreateMutable()
|
||||
self.path = path
|
||||
self.xform = xform
|
||||
|
||||
def _moveTo(self, pt):
|
||||
x, y = pt
|
||||
CGPathMoveToPoint(self.path, self.xform, x, y)
|
||||
|
||||
def _lineTo(self, pt):
|
||||
x, y = pt
|
||||
CGPathAddLineToPoint(self.path, self.xform, x, y)
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
(x1, y1), (x2, y2), (x3, y3) = p1, p2, p3
|
||||
CGPathAddCurveToPoint(self.path, self.xform, x1, y1, x2, y2, x3, y3)
|
||||
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
(x1, y1), (x2, y2) = p1, p2
|
||||
CGPathAddQuadCurveToPoint(self.path, self.xform, x1, y1, x2, y2)
|
||||
|
||||
def _closePath(self):
|
||||
CGPathCloseSubpath(self.path)
|
||||
|
65
venv/Lib/site-packages/fontTools/pens/t2CharStringPen.py
Normal file
65
venv/Lib/site-packages/fontTools/pens/t2CharStringPen.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# Copyright (c) 2009 Type Supply LLC
|
||||
# Author: Tal Leming
|
||||
|
||||
from fontTools.misc.roundTools import otRound, roundFunc
|
||||
from fontTools.misc.psCharStrings import T2CharString
|
||||
from fontTools.pens.basePen import BasePen
|
||||
from fontTools.cffLib.specializer import specializeCommands, commandsToProgram
|
||||
|
||||
|
||||
class T2CharStringPen(BasePen):
|
||||
"""Pen to draw Type 2 CharStrings.
|
||||
|
||||
The 'roundTolerance' argument controls the rounding of point coordinates.
|
||||
It is defined as the maximum absolute difference between the original
|
||||
float and the rounded integer value.
|
||||
The default tolerance of 0.5 means that all floats are rounded to integer;
|
||||
a value of 0 disables rounding; values in between will only round floats
|
||||
which are close to their integral part within the tolerated range.
|
||||
"""
|
||||
|
||||
def __init__(self, width, glyphSet, roundTolerance=0.5, CFF2=False):
|
||||
super(T2CharStringPen, self).__init__(glyphSet)
|
||||
self.round = roundFunc(roundTolerance)
|
||||
self._CFF2 = CFF2
|
||||
self._width = width
|
||||
self._commands = []
|
||||
self._p0 = (0,0)
|
||||
|
||||
def _p(self, pt):
|
||||
p0 = self._p0
|
||||
pt = self._p0 = (self.round(pt[0]), self.round(pt[1]))
|
||||
return [pt[0]-p0[0], pt[1]-p0[1]]
|
||||
|
||||
def _moveTo(self, pt):
|
||||
self._commands.append(('rmoveto', self._p(pt)))
|
||||
|
||||
def _lineTo(self, pt):
|
||||
self._commands.append(('rlineto', self._p(pt)))
|
||||
|
||||
def _curveToOne(self, pt1, pt2, pt3):
|
||||
_p = self._p
|
||||
self._commands.append(('rrcurveto', _p(pt1)+_p(pt2)+_p(pt3)))
|
||||
|
||||
def _closePath(self):
|
||||
pass
|
||||
|
||||
def _endPath(self):
|
||||
pass
|
||||
|
||||
def getCharString(self, private=None, globalSubrs=None, optimize=True):
|
||||
commands = self._commands
|
||||
if optimize:
|
||||
maxstack = 48 if not self._CFF2 else 513
|
||||
commands = specializeCommands(commands,
|
||||
generalizeFirst=False,
|
||||
maxstack=maxstack)
|
||||
program = commandsToProgram(commands)
|
||||
if self._width is not None:
|
||||
assert not self._CFF2, "CFF2 does not allow encoding glyph width in CharString."
|
||||
program.insert(0, otRound(self._width))
|
||||
if not self._CFF2:
|
||||
program.append('endchar')
|
||||
charString = T2CharString(
|
||||
program=program, private=private, globalSubrs=globalSubrs)
|
||||
return charString
|
6
venv/Lib/site-packages/fontTools/subset/__main__.py
Normal file
6
venv/Lib/site-packages/fontTools/subset/__main__.py
Normal file
@@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from fontTools.subset import main
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
61
venv/Lib/site-packages/fontTools/svgLib/path/__init__.py
Normal file
61
venv/Lib/site-packages/fontTools/svgLib/path/__init__.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from fontTools.pens.transformPen import TransformPen
|
||||
from fontTools.misc import etree
|
||||
from fontTools.misc.textTools import tostr
|
||||
from .parser import parse_path
|
||||
from .shapes import PathBuilder
|
||||
|
||||
|
||||
__all__ = [tostr(s) for s in ("SVGPath", "parse_path")]
|
||||
|
||||
|
||||
class SVGPath(object):
|
||||
""" Parse SVG ``path`` elements from a file or string, and draw them
|
||||
onto a glyph object that supports the FontTools Pen protocol.
|
||||
|
||||
For example, reading from an SVG file and drawing to a Defcon Glyph:
|
||||
|
||||
import defcon
|
||||
glyph = defcon.Glyph()
|
||||
pen = glyph.getPen()
|
||||
svg = SVGPath("path/to/a.svg")
|
||||
svg.draw(pen)
|
||||
|
||||
Or reading from a string containing SVG data, using the alternative
|
||||
'fromstring' (a class method):
|
||||
|
||||
data = '<?xml version="1.0" ...'
|
||||
svg = SVGPath.fromstring(data)
|
||||
svg.draw(pen)
|
||||
|
||||
Both constructors can optionally take a 'transform' matrix (6-float
|
||||
tuple, or a FontTools Transform object) to modify the draw output.
|
||||
"""
|
||||
|
||||
def __init__(self, filename=None, transform=None):
|
||||
if filename is None:
|
||||
self.root = etree.ElementTree()
|
||||
else:
|
||||
tree = etree.parse(filename)
|
||||
self.root = tree.getroot()
|
||||
self.transform = transform
|
||||
|
||||
@classmethod
|
||||
def fromstring(cls, data, transform=None):
|
||||
self = cls(transform=transform)
|
||||
self.root = etree.fromstring(data)
|
||||
return self
|
||||
|
||||
def draw(self, pen):
|
||||
if self.transform:
|
||||
pen = TransformPen(pen, self.transform)
|
||||
pb = PathBuilder()
|
||||
# xpath | doesn't seem to reliable work so just walk it
|
||||
for el in self.root.iter():
|
||||
pb.add_path_from_element(el)
|
||||
original_pen = pen
|
||||
for path, transform in zip(pb.paths, pb.transforms):
|
||||
if transform:
|
||||
pen = TransformPen(original_pen, transform)
|
||||
else:
|
||||
pen = original_pen
|
||||
parse_path(path, pen)
|
369
venv/Lib/site-packages/fontTools/t1Lib/__init__.py
Normal file
369
venv/Lib/site-packages/fontTools/t1Lib/__init__.py
Normal file
@@ -0,0 +1,369 @@
|
||||
"""fontTools.t1Lib.py -- Tools for PostScript Type 1 fonts (Python2 only)
|
||||
|
||||
Functions for reading and writing raw Type 1 data:
|
||||
|
||||
read(path)
|
||||
reads any Type 1 font file, returns the raw data and a type indicator:
|
||||
'LWFN', 'PFB' or 'OTHER', depending on the format of the file pointed
|
||||
to by 'path'.
|
||||
Raises an error when the file does not contain valid Type 1 data.
|
||||
|
||||
write(path, data, kind='OTHER', dohex=False)
|
||||
writes raw Type 1 data to the file pointed to by 'path'.
|
||||
'kind' can be one of 'LWFN', 'PFB' or 'OTHER'; it defaults to 'OTHER'.
|
||||
'dohex' is a flag which determines whether the eexec encrypted
|
||||
part should be written as hexadecimal or binary, but only if kind
|
||||
is 'OTHER'.
|
||||
"""
|
||||
from fontTools.misc import eexec
|
||||
from fontTools.misc.macCreatorType import getMacCreatorAndType
|
||||
from fontTools.misc.textTools import bytechr, byteord, bytesjoin
|
||||
import os
|
||||
import re
|
||||
|
||||
__author__ = "jvr"
|
||||
__version__ = "1.0b2"
|
||||
DEBUG = 0
|
||||
|
||||
|
||||
try:
|
||||
try:
|
||||
from Carbon import Res
|
||||
except ImportError:
|
||||
import Res # MacPython < 2.2
|
||||
except ImportError:
|
||||
haveMacSupport = 0
|
||||
else:
|
||||
haveMacSupport = 1
|
||||
|
||||
|
||||
class T1Error(Exception): pass
|
||||
|
||||
|
||||
class T1Font(object):
|
||||
|
||||
"""Type 1 font class.
|
||||
|
||||
Uses a minimal interpeter that supports just about enough PS to parse
|
||||
Type 1 fonts.
|
||||
"""
|
||||
|
||||
def __init__(self, path, encoding="ascii", kind=None):
|
||||
if kind is None:
|
||||
self.data, _ = read(path)
|
||||
elif kind == "LWFN":
|
||||
self.data = readLWFN(path)
|
||||
elif kind == "PFB":
|
||||
self.data = readPFB(path)
|
||||
elif kind == "OTHER":
|
||||
self.data = readOther(path)
|
||||
else:
|
||||
raise ValueError(kind)
|
||||
self.encoding = encoding
|
||||
|
||||
def saveAs(self, path, type, dohex=False):
|
||||
write(path, self.getData(), type, dohex)
|
||||
|
||||
def getData(self):
|
||||
# XXX Todo: if the data has been converted to Python object,
|
||||
# recreate the PS stream
|
||||
return self.data
|
||||
|
||||
def getGlyphSet(self):
|
||||
"""Return a generic GlyphSet, which is a dict-like object
|
||||
mapping glyph names to glyph objects. The returned glyph objects
|
||||
have a .draw() method that supports the Pen protocol, and will
|
||||
have an attribute named 'width', but only *after* the .draw() method
|
||||
has been called.
|
||||
|
||||
In the case of Type 1, the GlyphSet is simply the CharStrings dict.
|
||||
"""
|
||||
return self["CharStrings"]
|
||||
|
||||
def __getitem__(self, key):
|
||||
if not hasattr(self, "font"):
|
||||
self.parse()
|
||||
return self.font[key]
|
||||
|
||||
def parse(self):
|
||||
from fontTools.misc import psLib
|
||||
from fontTools.misc import psCharStrings
|
||||
self.font = psLib.suckfont(self.data, self.encoding)
|
||||
charStrings = self.font["CharStrings"]
|
||||
lenIV = self.font["Private"].get("lenIV", 4)
|
||||
assert lenIV >= 0
|
||||
subrs = self.font["Private"]["Subrs"]
|
||||
for glyphName, charString in charStrings.items():
|
||||
charString, R = eexec.decrypt(charString, 4330)
|
||||
charStrings[glyphName] = psCharStrings.T1CharString(charString[lenIV:],
|
||||
subrs=subrs)
|
||||
for i in range(len(subrs)):
|
||||
charString, R = eexec.decrypt(subrs[i], 4330)
|
||||
subrs[i] = psCharStrings.T1CharString(charString[lenIV:], subrs=subrs)
|
||||
del self.data
|
||||
|
||||
|
||||
# low level T1 data read and write functions
|
||||
|
||||
def read(path, onlyHeader=False):
|
||||
"""reads any Type 1 font file, returns raw data"""
|
||||
_, ext = os.path.splitext(path)
|
||||
ext = ext.lower()
|
||||
creator, typ = getMacCreatorAndType(path)
|
||||
if typ == 'LWFN':
|
||||
return readLWFN(path, onlyHeader), 'LWFN'
|
||||
if ext == '.pfb':
|
||||
return readPFB(path, onlyHeader), 'PFB'
|
||||
else:
|
||||
return readOther(path), 'OTHER'
|
||||
|
||||
def write(path, data, kind='OTHER', dohex=False):
|
||||
assertType1(data)
|
||||
kind = kind.upper()
|
||||
try:
|
||||
os.remove(path)
|
||||
except os.error:
|
||||
pass
|
||||
err = 1
|
||||
try:
|
||||
if kind == 'LWFN':
|
||||
writeLWFN(path, data)
|
||||
elif kind == 'PFB':
|
||||
writePFB(path, data)
|
||||
else:
|
||||
writeOther(path, data, dohex)
|
||||
err = 0
|
||||
finally:
|
||||
if err and not DEBUG:
|
||||
try:
|
||||
os.remove(path)
|
||||
except os.error:
|
||||
pass
|
||||
|
||||
|
||||
# -- internal --
|
||||
|
||||
LWFNCHUNKSIZE = 2000
|
||||
HEXLINELENGTH = 80
|
||||
|
||||
|
||||
def readLWFN(path, onlyHeader=False):
|
||||
"""reads an LWFN font file, returns raw data"""
|
||||
from fontTools.misc.macRes import ResourceReader
|
||||
reader = ResourceReader(path)
|
||||
try:
|
||||
data = []
|
||||
for res in reader.get('POST', []):
|
||||
code = byteord(res.data[0])
|
||||
if byteord(res.data[1]) != 0:
|
||||
raise T1Error('corrupt LWFN file')
|
||||
if code in [1, 2]:
|
||||
if onlyHeader and code == 2:
|
||||
break
|
||||
data.append(res.data[2:])
|
||||
elif code in [3, 5]:
|
||||
break
|
||||
elif code == 4:
|
||||
with open(path, "rb") as f:
|
||||
data.append(f.read())
|
||||
elif code == 0:
|
||||
pass # comment, ignore
|
||||
else:
|
||||
raise T1Error('bad chunk code: ' + repr(code))
|
||||
finally:
|
||||
reader.close()
|
||||
data = bytesjoin(data)
|
||||
assertType1(data)
|
||||
return data
|
||||
|
||||
def readPFB(path, onlyHeader=False):
|
||||
"""reads a PFB font file, returns raw data"""
|
||||
data = []
|
||||
with open(path, "rb") as f:
|
||||
while True:
|
||||
if f.read(1) != bytechr(128):
|
||||
raise T1Error('corrupt PFB file')
|
||||
code = byteord(f.read(1))
|
||||
if code in [1, 2]:
|
||||
chunklen = stringToLong(f.read(4))
|
||||
chunk = f.read(chunklen)
|
||||
assert len(chunk) == chunklen
|
||||
data.append(chunk)
|
||||
elif code == 3:
|
||||
break
|
||||
else:
|
||||
raise T1Error('bad chunk code: ' + repr(code))
|
||||
if onlyHeader:
|
||||
break
|
||||
data = bytesjoin(data)
|
||||
assertType1(data)
|
||||
return data
|
||||
|
||||
def readOther(path):
|
||||
"""reads any (font) file, returns raw data"""
|
||||
with open(path, "rb") as f:
|
||||
data = f.read()
|
||||
assertType1(data)
|
||||
chunks = findEncryptedChunks(data)
|
||||
data = []
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted and isHex(chunk[:4]):
|
||||
data.append(deHexString(chunk))
|
||||
else:
|
||||
data.append(chunk)
|
||||
return bytesjoin(data)
|
||||
|
||||
# file writing tools
|
||||
|
||||
def writeLWFN(path, data):
|
||||
# Res.FSpCreateResFile was deprecated in OS X 10.5
|
||||
Res.FSpCreateResFile(path, "just", "LWFN", 0)
|
||||
resRef = Res.FSOpenResFile(path, 2) # write-only
|
||||
try:
|
||||
Res.UseResFile(resRef)
|
||||
resID = 501
|
||||
chunks = findEncryptedChunks(data)
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
code = 2
|
||||
else:
|
||||
code = 1
|
||||
while chunk:
|
||||
res = Res.Resource(bytechr(code) + '\0' + chunk[:LWFNCHUNKSIZE - 2])
|
||||
res.AddResource('POST', resID, '')
|
||||
chunk = chunk[LWFNCHUNKSIZE - 2:]
|
||||
resID = resID + 1
|
||||
res = Res.Resource(bytechr(5) + '\0')
|
||||
res.AddResource('POST', resID, '')
|
||||
finally:
|
||||
Res.CloseResFile(resRef)
|
||||
|
||||
def writePFB(path, data):
|
||||
chunks = findEncryptedChunks(data)
|
||||
with open(path, "wb") as f:
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
code = 2
|
||||
else:
|
||||
code = 1
|
||||
f.write(bytechr(128) + bytechr(code))
|
||||
f.write(longToString(len(chunk)))
|
||||
f.write(chunk)
|
||||
f.write(bytechr(128) + bytechr(3))
|
||||
|
||||
def writeOther(path, data, dohex=False):
|
||||
chunks = findEncryptedChunks(data)
|
||||
with open(path, "wb") as f:
|
||||
hexlinelen = HEXLINELENGTH // 2
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
code = 2
|
||||
else:
|
||||
code = 1
|
||||
if code == 2 and dohex:
|
||||
while chunk:
|
||||
f.write(eexec.hexString(chunk[:hexlinelen]))
|
||||
f.write(b'\r')
|
||||
chunk = chunk[hexlinelen:]
|
||||
else:
|
||||
f.write(chunk)
|
||||
|
||||
|
||||
# decryption tools
|
||||
|
||||
EEXECBEGIN = b"currentfile eexec"
|
||||
# The spec allows for 512 ASCII zeros interrupted by arbitrary whitespace to
|
||||
# follow eexec
|
||||
EEXECEND = re.compile(b'(0[ \t\r\n]*){512}', flags=re.M)
|
||||
EEXECINTERNALEND = b"currentfile closefile"
|
||||
EEXECBEGINMARKER = b"%-- eexec start\r"
|
||||
EEXECENDMARKER = b"%-- eexec end\r"
|
||||
|
||||
_ishexRE = re.compile(b'[0-9A-Fa-f]*$')
|
||||
|
||||
def isHex(text):
|
||||
return _ishexRE.match(text) is not None
|
||||
|
||||
|
||||
def decryptType1(data):
|
||||
chunks = findEncryptedChunks(data)
|
||||
data = []
|
||||
for isEncrypted, chunk in chunks:
|
||||
if isEncrypted:
|
||||
if isHex(chunk[:4]):
|
||||
chunk = deHexString(chunk)
|
||||
decrypted, R = eexec.decrypt(chunk, 55665)
|
||||
decrypted = decrypted[4:]
|
||||
if decrypted[-len(EEXECINTERNALEND)-1:-1] != EEXECINTERNALEND \
|
||||
and decrypted[-len(EEXECINTERNALEND)-2:-2] != EEXECINTERNALEND:
|
||||
raise T1Error("invalid end of eexec part")
|
||||
decrypted = decrypted[:-len(EEXECINTERNALEND)-2] + b'\r'
|
||||
data.append(EEXECBEGINMARKER + decrypted + EEXECENDMARKER)
|
||||
else:
|
||||
if chunk[-len(EEXECBEGIN)-1:-1] == EEXECBEGIN:
|
||||
data.append(chunk[:-len(EEXECBEGIN)-1])
|
||||
else:
|
||||
data.append(chunk)
|
||||
return bytesjoin(data)
|
||||
|
||||
def findEncryptedChunks(data):
|
||||
chunks = []
|
||||
while True:
|
||||
eBegin = data.find(EEXECBEGIN)
|
||||
if eBegin < 0:
|
||||
break
|
||||
eBegin = eBegin + len(EEXECBEGIN) + 1
|
||||
endMatch = EEXECEND.search(data, eBegin)
|
||||
if endMatch is None:
|
||||
raise T1Error("can't find end of eexec part")
|
||||
eEnd = endMatch.start()
|
||||
cypherText = data[eBegin:eEnd + 2]
|
||||
if isHex(cypherText[:4]):
|
||||
cypherText = deHexString(cypherText)
|
||||
plainText, R = eexec.decrypt(cypherText, 55665)
|
||||
eEndLocal = plainText.find(EEXECINTERNALEND)
|
||||
if eEndLocal < 0:
|
||||
raise T1Error("can't find end of eexec part")
|
||||
chunks.append((0, data[:eBegin]))
|
||||
chunks.append((1, cypherText[:eEndLocal + len(EEXECINTERNALEND) + 1]))
|
||||
data = data[eEnd:]
|
||||
chunks.append((0, data))
|
||||
return chunks
|
||||
|
||||
def deHexString(hexstring):
|
||||
return eexec.deHexString(bytesjoin(hexstring.split()))
|
||||
|
||||
|
||||
# Type 1 assertion
|
||||
|
||||
_fontType1RE = re.compile(br"/FontType\s+1\s+def")
|
||||
|
||||
def assertType1(data):
|
||||
for head in [b'%!PS-AdobeFont', b'%!FontType1']:
|
||||
if data[:len(head)] == head:
|
||||
break
|
||||
else:
|
||||
raise T1Error("not a PostScript font")
|
||||
if not _fontType1RE.search(data):
|
||||
raise T1Error("not a Type 1 font")
|
||||
if data.find(b"currentfile eexec") < 0:
|
||||
raise T1Error("not an encrypted Type 1 font")
|
||||
# XXX what else?
|
||||
return data
|
||||
|
||||
|
||||
# pfb helpers
|
||||
|
||||
def longToString(long):
|
||||
s = b""
|
||||
for i in range(4):
|
||||
s += bytechr((long & (0xff << (i * 8))) >> i * 8)
|
||||
return s
|
||||
|
||||
def stringToLong(s):
|
||||
if len(s) != 4:
|
||||
raise ValueError('string must be 4 bytes long')
|
||||
l = 0
|
||||
for i in range(4):
|
||||
l += byteord(s[i]) << (i * 8)
|
||||
return l
|
17
venv/Lib/site-packages/fontTools/ttLib/__init__.py
Normal file
17
venv/Lib/site-packages/fontTools/ttLib/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
"""fontTools.ttLib -- a package for dealing with TrueType fonts."""
|
||||
|
||||
from fontTools.misc.loggingTools import deprecateFunction
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
class TTLibError(Exception): pass
|
||||
|
||||
@deprecateFunction("use logging instead", category=DeprecationWarning)
|
||||
def debugmsg(msg):
|
||||
import time
|
||||
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
|
||||
|
||||
from fontTools.ttLib.ttFont import *
|
||||
from fontTools.ttLib.ttCollection import TTCollection
|
52
venv/Lib/site-packages/fontTools/ttLib/macUtils.py
Normal file
52
venv/Lib/site-packages/fontTools/ttLib/macUtils.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""ttLib.macUtils.py -- Various Mac-specific stuff."""
|
||||
from io import BytesIO
|
||||
from fontTools.misc.macRes import ResourceReader, ResourceError
|
||||
|
||||
|
||||
def getSFNTResIndices(path):
|
||||
"""Determine whether a file has a 'sfnt' resource fork or not."""
|
||||
try:
|
||||
reader = ResourceReader(path)
|
||||
indices = reader.getIndices('sfnt')
|
||||
reader.close()
|
||||
return indices
|
||||
except ResourceError:
|
||||
return []
|
||||
|
||||
|
||||
def openTTFonts(path):
|
||||
"""Given a pathname, return a list of TTFont objects. In the case
|
||||
of a flat TTF/OTF file, the list will contain just one font object;
|
||||
but in the case of a Mac font suitcase it will contain as many
|
||||
font objects as there are sfnt resources in the file.
|
||||
"""
|
||||
from fontTools import ttLib
|
||||
fonts = []
|
||||
sfnts = getSFNTResIndices(path)
|
||||
if not sfnts:
|
||||
fonts.append(ttLib.TTFont(path))
|
||||
else:
|
||||
for index in sfnts:
|
||||
fonts.append(ttLib.TTFont(path, index))
|
||||
if not fonts:
|
||||
raise ttLib.TTLibError("no fonts found in file '%s'" % path)
|
||||
return fonts
|
||||
|
||||
|
||||
class SFNTResourceReader(BytesIO):
|
||||
|
||||
"""Simple read-only file wrapper for 'sfnt' resources."""
|
||||
|
||||
def __init__(self, path, res_name_or_index):
|
||||
from fontTools import ttLib
|
||||
reader = ResourceReader(path)
|
||||
if isinstance(res_name_or_index, str):
|
||||
rsrc = reader.getNamedResource('sfnt', res_name_or_index)
|
||||
else:
|
||||
rsrc = reader.getIndResource('sfnt', res_name_or_index)
|
||||
if rsrc is None:
|
||||
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
|
||||
reader.close()
|
||||
self.rsrc = rsrc
|
||||
super(SFNTResourceReader, self).__init__(rsrc.data)
|
||||
self.name = path
|
248
venv/Lib/site-packages/fontTools/ttLib/removeOverlaps.py
Normal file
248
venv/Lib/site-packages/fontTools/ttLib/removeOverlaps.py
Normal file
@@ -0,0 +1,248 @@
|
||||
""" Simplify TrueType glyphs by merging overlapping contours/components.
|
||||
|
||||
Requires https://github.com/fonttools/skia-pathops
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from typing import Callable, Iterable, Optional, Mapping
|
||||
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.ttLib import ttFont
|
||||
from fontTools.ttLib.tables import _g_l_y_f
|
||||
from fontTools.ttLib.tables import _h_m_t_x
|
||||
from fontTools.pens.ttGlyphPen import TTGlyphPen
|
||||
|
||||
import pathops
|
||||
|
||||
|
||||
__all__ = ["removeOverlaps"]
|
||||
|
||||
|
||||
class RemoveOverlapsError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.ttLib.removeOverlaps")
|
||||
|
||||
_TTGlyphMapping = Mapping[str, ttFont._TTGlyph]
|
||||
|
||||
|
||||
def skPathFromGlyph(glyphName: str, glyphSet: _TTGlyphMapping) -> pathops.Path:
|
||||
path = pathops.Path()
|
||||
pathPen = path.getPen(glyphSet=glyphSet)
|
||||
glyphSet[glyphName].draw(pathPen)
|
||||
return path
|
||||
|
||||
|
||||
def skPathFromGlyphComponent(
|
||||
component: _g_l_y_f.GlyphComponent, glyphSet: _TTGlyphMapping
|
||||
):
|
||||
baseGlyphName, transformation = component.getComponentInfo()
|
||||
path = skPathFromGlyph(baseGlyphName, glyphSet)
|
||||
return path.transform(*transformation)
|
||||
|
||||
|
||||
def componentsOverlap(glyph: _g_l_y_f.Glyph, glyphSet: _TTGlyphMapping) -> bool:
|
||||
if not glyph.isComposite():
|
||||
raise ValueError("This method only works with TrueType composite glyphs")
|
||||
if len(glyph.components) < 2:
|
||||
return False # single component, no overlaps
|
||||
|
||||
component_paths = {}
|
||||
|
||||
def _get_nth_component_path(index: int) -> pathops.Path:
|
||||
if index not in component_paths:
|
||||
component_paths[index] = skPathFromGlyphComponent(
|
||||
glyph.components[index], glyphSet
|
||||
)
|
||||
return component_paths[index]
|
||||
|
||||
return any(
|
||||
pathops.op(
|
||||
_get_nth_component_path(i),
|
||||
_get_nth_component_path(j),
|
||||
pathops.PathOp.INTERSECTION,
|
||||
fix_winding=False,
|
||||
keep_starting_points=False,
|
||||
)
|
||||
for i, j in itertools.combinations(range(len(glyph.components)), 2)
|
||||
)
|
||||
|
||||
|
||||
def ttfGlyphFromSkPath(path: pathops.Path) -> _g_l_y_f.Glyph:
|
||||
# Skia paths have no 'components', no need for glyphSet
|
||||
ttPen = TTGlyphPen(glyphSet=None)
|
||||
path.draw(ttPen)
|
||||
glyph = ttPen.glyph()
|
||||
assert not glyph.isComposite()
|
||||
# compute glyph.xMin (glyfTable parameter unused for non composites)
|
||||
glyph.recalcBounds(glyfTable=None)
|
||||
return glyph
|
||||
|
||||
|
||||
def _round_path(
|
||||
path: pathops.Path, round: Callable[[float], float] = otRound
|
||||
) -> pathops.Path:
|
||||
rounded_path = pathops.Path()
|
||||
for verb, points in path:
|
||||
rounded_path.add(verb, *((round(p[0]), round(p[1])) for p in points))
|
||||
return rounded_path
|
||||
|
||||
|
||||
def _simplify(path: pathops.Path, debugGlyphName: str) -> pathops.Path:
|
||||
# skia-pathops has a bug where it sometimes fails to simplify paths when there
|
||||
# are float coordinates and control points are very close to one another.
|
||||
# Rounding coordinates to integers works around the bug.
|
||||
# Since we are going to round glyf coordinates later on anyway, here it is
|
||||
# ok(-ish) to also round before simplify. Better than failing the whole process
|
||||
# for the entire font.
|
||||
# https://bugs.chromium.org/p/skia/issues/detail?id=11958
|
||||
# https://github.com/google/fonts/issues/3365
|
||||
# TODO(anthrotype): remove once this Skia bug is fixed
|
||||
try:
|
||||
return pathops.simplify(path, clockwise=path.clockwise)
|
||||
except pathops.PathOpsError:
|
||||
pass
|
||||
|
||||
path = _round_path(path)
|
||||
try:
|
||||
path = pathops.simplify(path, clockwise=path.clockwise)
|
||||
log.debug(
|
||||
"skia-pathops failed to simplify '%s' with float coordinates, "
|
||||
"but succeded using rounded integer coordinates",
|
||||
debugGlyphName,
|
||||
)
|
||||
return path
|
||||
except pathops.PathOpsError as e:
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
path.dump()
|
||||
raise RemoveOverlapsError(
|
||||
f"Failed to remove overlaps from glyph {debugGlyphName!r}"
|
||||
) from e
|
||||
|
||||
raise AssertionError("Unreachable")
|
||||
|
||||
|
||||
def removeTTGlyphOverlaps(
|
||||
glyphName: str,
|
||||
glyphSet: _TTGlyphMapping,
|
||||
glyfTable: _g_l_y_f.table__g_l_y_f,
|
||||
hmtxTable: _h_m_t_x.table__h_m_t_x,
|
||||
removeHinting: bool = True,
|
||||
) -> bool:
|
||||
glyph = glyfTable[glyphName]
|
||||
# decompose composite glyphs only if components overlap each other
|
||||
if (
|
||||
glyph.numberOfContours > 0
|
||||
or glyph.isComposite()
|
||||
and componentsOverlap(glyph, glyphSet)
|
||||
):
|
||||
path = skPathFromGlyph(glyphName, glyphSet)
|
||||
|
||||
# remove overlaps
|
||||
path2 = _simplify(path, glyphName)
|
||||
|
||||
# replace TTGlyph if simplified path is different (ignoring contour order)
|
||||
if {tuple(c) for c in path.contours} != {tuple(c) for c in path2.contours}:
|
||||
glyfTable[glyphName] = glyph = ttfGlyphFromSkPath(path2)
|
||||
# simplified glyph is always unhinted
|
||||
assert not glyph.program
|
||||
# also ensure hmtx LSB == glyph.xMin so glyph origin is at x=0
|
||||
width, lsb = hmtxTable[glyphName]
|
||||
if lsb != glyph.xMin:
|
||||
hmtxTable[glyphName] = (width, glyph.xMin)
|
||||
return True
|
||||
|
||||
if removeHinting:
|
||||
glyph.removeHinting()
|
||||
return False
|
||||
|
||||
|
||||
def removeOverlaps(
|
||||
font: ttFont.TTFont,
|
||||
glyphNames: Optional[Iterable[str]] = None,
|
||||
removeHinting: bool = True,
|
||||
ignoreErrors=False,
|
||||
) -> None:
|
||||
"""Simplify glyphs in TTFont by merging overlapping contours.
|
||||
|
||||
Overlapping components are first decomposed to simple contours, then merged.
|
||||
|
||||
Currently this only works with TrueType fonts with 'glyf' table.
|
||||
Raises NotImplementedError if 'glyf' table is absent.
|
||||
|
||||
Note that removing overlaps invalidates the hinting. By default we drop hinting
|
||||
from all glyphs whether or not overlaps are removed from a given one, as it would
|
||||
look weird if only some glyphs are left (un)hinted.
|
||||
|
||||
Args:
|
||||
font: input TTFont object, modified in place.
|
||||
glyphNames: optional iterable of glyph names (str) to remove overlaps from.
|
||||
By default, all glyphs in the font are processed.
|
||||
removeHinting (bool): set to False to keep hinting for unmodified glyphs.
|
||||
ignoreErrors (bool): set to True to ignore errors while removing overlaps,
|
||||
thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363).
|
||||
"""
|
||||
try:
|
||||
glyfTable = font["glyf"]
|
||||
except KeyError:
|
||||
raise NotImplementedError("removeOverlaps currently only works with TTFs")
|
||||
|
||||
hmtxTable = font["hmtx"]
|
||||
# wraps the underlying glyf Glyphs, takes care of interfacing with drawing pens
|
||||
glyphSet = font.getGlyphSet()
|
||||
|
||||
if glyphNames is None:
|
||||
glyphNames = font.getGlyphOrder()
|
||||
|
||||
# process all simple glyphs first, then composites with increasing component depth,
|
||||
# so that by the time we test for component intersections the respective base glyphs
|
||||
# have already been simplified
|
||||
glyphNames = sorted(
|
||||
glyphNames,
|
||||
key=lambda name: (
|
||||
glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth
|
||||
if glyfTable[name].isComposite()
|
||||
else 0,
|
||||
name,
|
||||
),
|
||||
)
|
||||
modified = set()
|
||||
for glyphName in glyphNames:
|
||||
try:
|
||||
if removeTTGlyphOverlaps(
|
||||
glyphName, glyphSet, glyfTable, hmtxTable, removeHinting
|
||||
):
|
||||
modified.add(glyphName)
|
||||
except RemoveOverlapsError:
|
||||
if not ignoreErrors:
|
||||
raise
|
||||
log.error("Failed to remove overlaps for '%s'", glyphName)
|
||||
|
||||
log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified))
|
||||
|
||||
|
||||
def main(args=None):
|
||||
import sys
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
if len(args) < 2:
|
||||
print(
|
||||
f"usage: fonttools ttLib.removeOverlaps INPUT.ttf OUTPUT.ttf [GLYPHS ...]"
|
||||
)
|
||||
sys.exit(1)
|
||||
|
||||
src = args[0]
|
||||
dst = args[1]
|
||||
glyphNames = args[2:] or None
|
||||
|
||||
with ttFont.TTFont(src) as f:
|
||||
removeOverlaps(f, glyphNames)
|
||||
f.save(dst)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
14
venv/Lib/site-packages/fontTools/ttLib/tables/C_F_F__2.py
Normal file
14
venv/Lib/site-packages/fontTools/ttLib/tables/C_F_F__2.py
Normal file
@@ -0,0 +1,14 @@
|
||||
from io import BytesIO
|
||||
from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
|
||||
|
||||
|
||||
class table_C_F_F__2(table_C_F_F_):
|
||||
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=True)
|
||||
return f.getvalue()
|
@@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_D_E_F_(BaseTTXConverter):
|
||||
pass
|
@@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_V_A_R_(BaseTTXConverter):
|
||||
pass
|
93
venv/Lib/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
Normal file
93
venv/Lib/site-packages/fontTools/ttLib/tables/S_I_N_G_.py
Normal file
@@ -0,0 +1,93 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
SINGFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
glyphletVersion: H
|
||||
permissions: h
|
||||
mainGID: H
|
||||
unitsPerEm: H
|
||||
vertAdvance: h
|
||||
vertOrigin: h
|
||||
uniqueName: 28s
|
||||
METAMD5: 16s
|
||||
nameLength: 1s
|
||||
"""
|
||||
# baseGlyphName is a byte string which follows the record above.
|
||||
|
||||
|
||||
class table_S_I_N_G_(DefaultTable.DefaultTable):
|
||||
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
|
||||
self.uniqueName = self.decompileUniqueName(self.uniqueName)
|
||||
self.nameLength = byteord(self.nameLength)
|
||||
assert len(rest) == self.nameLength
|
||||
self.baseGlyphName = tostr(rest)
|
||||
|
||||
rawMETAMD5 = self.METAMD5
|
||||
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
|
||||
for char in rawMETAMD5[1:]:
|
||||
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
|
||||
self.METAMD5 = self.METAMD5 + "]"
|
||||
|
||||
def decompileUniqueName(self, data):
|
||||
name = ""
|
||||
for char in data:
|
||||
val = byteord(char)
|
||||
if val == 0:
|
||||
break
|
||||
if (val > 31) or (val < 128):
|
||||
name += chr(val)
|
||||
else:
|
||||
octString = oct(val)
|
||||
if len(octString) > 3:
|
||||
octString = octString[1:] # chop off that leading zero.
|
||||
elif len(octString) < 3:
|
||||
octString.zfill(3)
|
||||
name += "\\" + octString
|
||||
return name
|
||||
|
||||
def compile(self, ttFont):
|
||||
d = self.__dict__.copy()
|
||||
d["nameLength"] = bytechr(len(self.baseGlyphName))
|
||||
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
|
||||
METAMD5List = eval(self.METAMD5)
|
||||
d["METAMD5"] = b""
|
||||
for val in METAMD5List:
|
||||
d["METAMD5"] += bytechr(val)
|
||||
assert (len(d["METAMD5"]) == 16), "Failed to pack 16 byte MD5 hash in SING table"
|
||||
data = sstruct.pack(SINGFormat, d)
|
||||
data = data + tobytes(self.baseGlyphName)
|
||||
return data
|
||||
|
||||
def compilecompileUniqueName(self, name, length):
|
||||
nameLen = len(name)
|
||||
if length <= nameLen:
|
||||
name = name[:length-1] + "\000"
|
||||
else:
|
||||
name += (nameLen - length) * "\000"
|
||||
return name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(SINGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
885
venv/Lib/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
885
venv/Lib/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
@@ -0,0 +1,885 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import byteord, safeEval
|
||||
# from itertools import *
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
from array import array
|
||||
from functools import reduce
|
||||
import struct, re, sys
|
||||
|
||||
Silf_hdr_format = '''
|
||||
>
|
||||
version: 16.16F
|
||||
'''
|
||||
|
||||
Silf_hdr_format_3 = '''
|
||||
>
|
||||
version: 16.16F
|
||||
compilerVersion: L
|
||||
numSilf: H
|
||||
x
|
||||
x
|
||||
'''
|
||||
|
||||
Silf_part1_format_v3 = '''
|
||||
>
|
||||
ruleVersion: 16.16F
|
||||
passOffset: H
|
||||
pseudosOffset: H
|
||||
'''
|
||||
|
||||
Silf_part1_format = '''
|
||||
>
|
||||
maxGlyphID: H
|
||||
extraAscent: h
|
||||
extraDescent: h
|
||||
numPasses: B
|
||||
iSubst: B
|
||||
iPos: B
|
||||
iJust: B
|
||||
iBidi: B
|
||||
flags: B
|
||||
maxPreContext: B
|
||||
maxPostContext: B
|
||||
attrPseudo: B
|
||||
attrBreakWeight: B
|
||||
attrDirectionality: B
|
||||
attrMirroring: B
|
||||
attrSkipPasses: B
|
||||
numJLevels: B
|
||||
'''
|
||||
|
||||
Silf_justify_format = '''
|
||||
>
|
||||
attrStretch: B
|
||||
attrShrink: B
|
||||
attrStep: B
|
||||
attrWeight: B
|
||||
runto: B
|
||||
x
|
||||
x
|
||||
x
|
||||
'''
|
||||
|
||||
Silf_part2_format = '''
|
||||
>
|
||||
numLigComp: H
|
||||
numUserDefn: B
|
||||
maxCompPerLig: B
|
||||
direction: B
|
||||
attCollisions: B
|
||||
x
|
||||
x
|
||||
x
|
||||
numCritFeatures: B
|
||||
'''
|
||||
|
||||
Silf_pseudomap_format = '''
|
||||
>
|
||||
unicode: L
|
||||
nPseudo: H
|
||||
'''
|
||||
|
||||
Silf_pseudomap_format_h = '''
|
||||
>
|
||||
unicode: H
|
||||
nPseudo: H
|
||||
'''
|
||||
|
||||
Silf_classmap_format = '''
|
||||
>
|
||||
numClass: H
|
||||
numLinear: H
|
||||
'''
|
||||
|
||||
Silf_lookupclass_format = '''
|
||||
>
|
||||
numIDs: H
|
||||
searchRange: H
|
||||
entrySelector: H
|
||||
rangeShift: H
|
||||
'''
|
||||
|
||||
Silf_lookuppair_format = '''
|
||||
>
|
||||
glyphId: H
|
||||
index: H
|
||||
'''
|
||||
|
||||
Silf_pass_format = '''
|
||||
>
|
||||
flags: B
|
||||
maxRuleLoop: B
|
||||
maxRuleContext: B
|
||||
maxBackup: B
|
||||
numRules: H
|
||||
fsmOffset: H
|
||||
pcCode: L
|
||||
rcCode: L
|
||||
aCode: L
|
||||
oDebug: L
|
||||
numRows: H
|
||||
numTransitional: H
|
||||
numSuccess: H
|
||||
numColumns: H
|
||||
'''
|
||||
|
||||
aCode_info = (
|
||||
("NOP", 0),
|
||||
("PUSH_BYTE", "b"),
|
||||
("PUSH_BYTE_U", "B"),
|
||||
("PUSH_SHORT", ">h"),
|
||||
("PUSH_SHORT_U", ">H"),
|
||||
("PUSH_LONG", ">L"),
|
||||
("ADD", 0),
|
||||
("SUB", 0),
|
||||
("MUL", 0),
|
||||
("DIV", 0),
|
||||
("MIN", 0),
|
||||
("MAX", 0),
|
||||
("NEG", 0),
|
||||
("TRUNC8", 0),
|
||||
("TRUNC16", 0),
|
||||
("COND", 0),
|
||||
("AND", 0), # x10
|
||||
("OR", 0),
|
||||
("NOT", 0),
|
||||
("EQUAL", 0),
|
||||
("NOT_EQ", 0),
|
||||
("LESS", 0),
|
||||
("GTR", 0),
|
||||
("LESS_EQ", 0),
|
||||
("GTR_EQ", 0),
|
||||
("NEXT", 0),
|
||||
("NEXT_N", "b"),
|
||||
("COPY_NEXT", 0),
|
||||
("PUT_GLYPH_8BIT_OBS", "B"),
|
||||
("PUT_SUBS_8BIT_OBS", "bBB"),
|
||||
("PUT_COPY", "b"),
|
||||
("INSERT", 0),
|
||||
("DELETE", 0), # x20
|
||||
("ASSOC", -1),
|
||||
("CNTXT_ITEM", "bB"),
|
||||
("ATTR_SET", "B"),
|
||||
("ATTR_ADD", "B"),
|
||||
("ATTR_SUB", "B"),
|
||||
("ATTR_SET_SLOT", "B"),
|
||||
("IATTR_SET_SLOT", "BB"),
|
||||
("PUSH_SLOT_ATTR", "Bb"),
|
||||
("PUSH_GLYPH_ATTR_OBS", "Bb"),
|
||||
("PUSH_GLYPH_METRIC", "Bbb"),
|
||||
("PUSH_FEAT", "Bb"),
|
||||
("PUSH_ATT_TO_GATTR_OBS", "Bb"),
|
||||
("PUSH_ATT_TO_GLYPH_METRIC", "Bbb"),
|
||||
("PUSH_ISLOT_ATTR", "Bbb"),
|
||||
("PUSH_IGLYPH_ATTR", "Bbb"),
|
||||
("POP_RET", 0), # x30
|
||||
("RET_ZERO", 0),
|
||||
("RET_TRUE", 0),
|
||||
("IATTR_SET", "BB"),
|
||||
("IATTR_ADD", "BB"),
|
||||
("IATTR_SUB", "BB"),
|
||||
("PUSH_PROC_STATE", "B"),
|
||||
("PUSH_VERSION", 0),
|
||||
("PUT_SUBS", ">bHH"),
|
||||
("PUT_SUBS2", 0),
|
||||
("PUT_SUBS3", 0),
|
||||
("PUT_GLYPH", ">H"),
|
||||
("PUSH_GLYPH_ATTR", ">Hb"),
|
||||
("PUSH_ATT_TO_GLYPH_ATTR", ">Hb"),
|
||||
("BITOR", 0),
|
||||
("BITAND", 0),
|
||||
("BITNOT", 0), # x40
|
||||
("BITSET", ">HH"),
|
||||
("SET_FEAT", "Bb")
|
||||
)
|
||||
aCode_map = dict([(x[0], (i, x[1])) for i,x in enumerate(aCode_info)])
|
||||
|
||||
def disassemble(aCode):
|
||||
codelen = len(aCode)
|
||||
pc = 0
|
||||
res = []
|
||||
while pc < codelen:
|
||||
opcode = byteord(aCode[pc:pc+1])
|
||||
if opcode > len(aCode_info):
|
||||
instr = aCode_info[0]
|
||||
else:
|
||||
instr = aCode_info[opcode]
|
||||
pc += 1
|
||||
if instr[1] != 0 and pc >= codelen : return res
|
||||
if instr[1] == -1:
|
||||
count = byteord(aCode[pc])
|
||||
fmt = "%dB" % count
|
||||
pc += 1
|
||||
elif instr[1] == 0:
|
||||
fmt = ""
|
||||
else :
|
||||
fmt = instr[1]
|
||||
if fmt == "":
|
||||
res.append(instr[0])
|
||||
continue
|
||||
parms = struct.unpack_from(fmt, aCode[pc:])
|
||||
res.append(instr[0] + "(" + ", ".join(map(str, parms)) + ")")
|
||||
pc += struct.calcsize(fmt)
|
||||
return res
|
||||
|
||||
instre = re.compile(r"^\s*([^(]+)\s*(?:\(([^)]+)\))?")
|
||||
def assemble(instrs):
|
||||
res = b""
|
||||
for inst in instrs:
|
||||
m = instre.match(inst)
|
||||
if not m or not m.group(1) in aCode_map:
|
||||
continue
|
||||
opcode, parmfmt = aCode_map[m.group(1)]
|
||||
res += struct.pack("B", opcode)
|
||||
if m.group(2):
|
||||
if parmfmt == 0:
|
||||
continue
|
||||
parms = [int(x) for x in re.split(r",\s*", m.group(2))]
|
||||
if parmfmt == -1:
|
||||
l = len(parms)
|
||||
res += struct.pack(("%dB" % (l+1)), l, *parms)
|
||||
else:
|
||||
res += struct.pack(parmfmt, *parms)
|
||||
return res
|
||||
|
||||
def writecode(tag, writer, instrs):
|
||||
writer.begintag(tag)
|
||||
writer.newline()
|
||||
for l in disassemble(instrs):
|
||||
writer.write(l)
|
||||
writer.newline()
|
||||
writer.endtag(tag)
|
||||
writer.newline()
|
||||
|
||||
def readcode(content):
|
||||
res = []
|
||||
for e in content_string(content).split('\n'):
|
||||
e = e.strip()
|
||||
if not len(e): continue
|
||||
res.append(e)
|
||||
return assemble(res)
|
||||
|
||||
attrs_info=('flags', 'extraAscent', 'extraDescent', 'maxGlyphID',
|
||||
'numLigComp', 'numUserDefn', 'maxCompPerLig', 'direction', 'lbGID')
|
||||
attrs_passindexes = ('iSubst', 'iPos', 'iJust', 'iBidi')
|
||||
attrs_contexts = ('maxPreContext', 'maxPostContext')
|
||||
attrs_attributes = ('attrPseudo', 'attrBreakWeight', 'attrDirectionality',
|
||||
'attrMirroring', 'attrSkipPasses', 'attCollisions')
|
||||
pass_attrs_info = ('flags', 'maxRuleLoop', 'maxRuleContext', 'maxBackup',
|
||||
'minRulePreContext', 'maxRulePreContext', 'collisionThreshold')
|
||||
pass_attrs_fsm = ('numRows', 'numTransitional', 'numSuccess', 'numColumns')
|
||||
|
||||
def writesimple(tag, self, writer, *attrkeys):
|
||||
attrs = dict([(k, getattr(self, k)) for k in attrkeys])
|
||||
writer.simpletag(tag, **attrs)
|
||||
writer.newline()
|
||||
|
||||
def getSimple(self, attrs, *attr_list):
|
||||
for k in attr_list:
|
||||
if k in attrs:
|
||||
setattr(self, k, int(safeEval(attrs[k])))
|
||||
|
||||
def content_string(contents):
|
||||
res = ""
|
||||
for element in contents:
|
||||
if isinstance(element, tuple): continue
|
||||
res += element
|
||||
return res.strip()
|
||||
|
||||
def wrapline(writer, dat, length=80):
|
||||
currline = ""
|
||||
for d in dat:
|
||||
if len(currline) > length:
|
||||
writer.write(currline[:-1])
|
||||
writer.newline()
|
||||
currline = ""
|
||||
currline += d + " "
|
||||
if len(currline):
|
||||
writer.write(currline[:-1])
|
||||
writer.newline()
|
||||
|
||||
class _Object() :
|
||||
pass
|
||||
|
||||
class table_S__i_l_f(DefaultTable.DefaultTable):
|
||||
'''Silf table support'''
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.silfs = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack2(Silf_hdr_format, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
if self.version >= 5.0:
|
||||
(data, self.scheme) = grUtils.decompress(data)
|
||||
sstruct.unpack2(Silf_hdr_format_3, data, self)
|
||||
base = sstruct.calcsize(Silf_hdr_format_3)
|
||||
elif self.version < 3.0:
|
||||
self.numSilf = struct.unpack('>H', data[4:6])
|
||||
self.scheme = 0
|
||||
self.compilerVersion = 0
|
||||
base = 8
|
||||
else:
|
||||
self.scheme = 0
|
||||
sstruct.unpack2(Silf_hdr_format_3, data, self)
|
||||
base = sstruct.calcsize(Silf_hdr_format_3)
|
||||
|
||||
silfoffsets = struct.unpack_from(('>%dL' % self.numSilf), data[base:])
|
||||
for offset in silfoffsets:
|
||||
s = Silf()
|
||||
self.silfs.append(s)
|
||||
s.decompile(data[offset:], ttFont, self.version)
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.numSilf = len(self.silfs)
|
||||
if self.version < 3.0:
|
||||
hdr = sstruct.pack(Silf_hdr_format, self)
|
||||
hdr += struct.pack(">HH", self.numSilf, 0)
|
||||
else:
|
||||
hdr = sstruct.pack(Silf_hdr_format_3, self)
|
||||
offset = len(hdr) + 4 * self.numSilf
|
||||
data = b""
|
||||
for s in self.silfs:
|
||||
hdr += struct.pack(">L", offset)
|
||||
subdata = s.compile(ttFont, self.version)
|
||||
offset += len(subdata)
|
||||
data += subdata
|
||||
if self.version >= 5.0:
|
||||
return grUtils.compress(self.scheme, hdr+data)
|
||||
return hdr+data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment('Attributes starting with _ are informative only')
|
||||
writer.newline()
|
||||
writer.simpletag('version', version=self.version,
|
||||
compilerVersion=self.compilerVersion, compressionScheme=self.scheme)
|
||||
writer.newline()
|
||||
for s in self.silfs:
|
||||
writer.begintag('silf')
|
||||
writer.newline()
|
||||
s.toXML(writer, ttFont, self.version)
|
||||
writer.endtag('silf')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == 'version':
|
||||
self.scheme=int(safeEval(attrs['compressionScheme']))
|
||||
self.version = float(safeEval(attrs['version']))
|
||||
self.compilerVersion = int(safeEval(attrs['compilerVersion']))
|
||||
return
|
||||
if name == 'silf':
|
||||
s = Silf()
|
||||
self.silfs.append(s)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
s.fromXML(tag, attrs, subcontent, ttFont, self.version)
|
||||
|
||||
class Silf(object):
|
||||
'''A particular Silf subtable'''
|
||||
|
||||
def __init__(self):
|
||||
self.passes = []
|
||||
self.scriptTags = []
|
||||
self.critFeatures = []
|
||||
self.jLevels = []
|
||||
self.pMap = {}
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
if version >= 3.0 :
|
||||
_, data = sstruct.unpack2(Silf_part1_format_v3, data, self)
|
||||
self.ruleVersion = float(floatToFixedToStr(self.ruleVersion, precisionBits=16))
|
||||
_, data = sstruct.unpack2(Silf_part1_format, data, self)
|
||||
for jlevel in range(self.numJLevels):
|
||||
j, data = sstruct.unpack2(Silf_justify_format, data, _Object())
|
||||
self.jLevels.append(j)
|
||||
_, data = sstruct.unpack2(Silf_part2_format, data, self)
|
||||
if self.numCritFeatures:
|
||||
self.critFeatures = struct.unpack_from(('>%dH' % self.numCritFeatures), data)
|
||||
data = data[self.numCritFeatures * 2 + 1:]
|
||||
(numScriptTag,) = struct.unpack_from('B', data)
|
||||
if numScriptTag:
|
||||
self.scriptTags = [struct.unpack("4s", data[x:x+4])[0].decode("ascii") for x in range(1, 1 + 4 * numScriptTag, 4)]
|
||||
data = data[1 + 4 * numScriptTag:]
|
||||
(self.lbGID,) = struct.unpack('>H', data[:2])
|
||||
if self.numPasses:
|
||||
self.oPasses = struct.unpack(('>%dL' % (self.numPasses+1)), data[2:6+4*self.numPasses])
|
||||
data = data[6 + 4 * self.numPasses:]
|
||||
(numPseudo,) = struct.unpack(">H", data[:2])
|
||||
for i in range(numPseudo):
|
||||
if version >= 3.0:
|
||||
pseudo = sstruct.unpack(Silf_pseudomap_format, data[8+6*i:14+6*i], _Object())
|
||||
else:
|
||||
pseudo = sstruct.unpack(Silf_pseudomap_format_h, data[8+4*i:12+4*i], _Object())
|
||||
self.pMap[pseudo.unicode] = ttFont.getGlyphName(pseudo.nPseudo)
|
||||
data = data[8 + 6 * numPseudo:]
|
||||
currpos = (sstruct.calcsize(Silf_part1_format)
|
||||
+ sstruct.calcsize(Silf_justify_format) * self.numJLevels
|
||||
+ sstruct.calcsize(Silf_part2_format) + 2 * self.numCritFeatures
|
||||
+ 1 + 1 + 4 * numScriptTag + 6 + 4 * self.numPasses + 8 + 6 * numPseudo)
|
||||
if version >= 3.0:
|
||||
currpos += sstruct.calcsize(Silf_part1_format_v3)
|
||||
self.classes = Classes()
|
||||
self.classes.decompile(data, ttFont, version)
|
||||
for i in range(self.numPasses):
|
||||
p = Pass()
|
||||
self.passes.append(p)
|
||||
p.decompile(data[self.oPasses[i]-currpos:self.oPasses[i+1]-currpos],
|
||||
ttFont, version)
|
||||
|
||||
def compile(self, ttFont, version=2.0):
|
||||
self.numPasses = len(self.passes)
|
||||
self.numJLevels = len(self.jLevels)
|
||||
self.numCritFeatures = len(self.critFeatures)
|
||||
numPseudo = len(self.pMap)
|
||||
data = b""
|
||||
if version >= 3.0:
|
||||
hdroffset = sstruct.calcsize(Silf_part1_format_v3)
|
||||
else:
|
||||
hdroffset = 0
|
||||
data += sstruct.pack(Silf_part1_format, self)
|
||||
for j in self.jLevels:
|
||||
data += sstruct.pack(Silf_justify_format, j)
|
||||
data += sstruct.pack(Silf_part2_format, self)
|
||||
if self.numCritFeatures:
|
||||
data += struct.pack((">%dH" % self.numCritFeaturs), *self.critFeatures)
|
||||
data += struct.pack("BB", 0, len(self.scriptTags))
|
||||
if len(self.scriptTags):
|
||||
tdata = [struct.pack("4s", x.encode("ascii")) for x in self.scriptTags]
|
||||
data += b"".join(tdata)
|
||||
data += struct.pack(">H", self.lbGID)
|
||||
self.passOffset = len(data)
|
||||
|
||||
data1 = grUtils.bininfo(numPseudo, 6)
|
||||
currpos = hdroffset + len(data) + 4 * (self.numPasses + 1)
|
||||
self.pseudosOffset = currpos + len(data1)
|
||||
for u, p in sorted(self.pMap.items()):
|
||||
data1 += struct.pack((">LH" if version >= 3.0 else ">HH"),
|
||||
u, ttFont.getGlyphID(p))
|
||||
data1 += self.classes.compile(ttFont, version)
|
||||
currpos += len(data1)
|
||||
data2 = b""
|
||||
datao = b""
|
||||
for i, p in enumerate(self.passes):
|
||||
base = currpos + len(data2)
|
||||
datao += struct.pack(">L", base)
|
||||
data2 += p.compile(ttFont, base, version)
|
||||
datao += struct.pack(">L", currpos + len(data2))
|
||||
|
||||
if version >= 3.0:
|
||||
data3 = sstruct.pack(Silf_part1_format_v3, self)
|
||||
else:
|
||||
data3 = b""
|
||||
return data3 + data + datao + data1 + data2
|
||||
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
if version >= 3.0:
|
||||
writer.simpletag('version', ruleVersion=self.ruleVersion)
|
||||
writer.newline()
|
||||
writesimple('info', self, writer, *attrs_info)
|
||||
writesimple('passindexes', self, writer, *attrs_passindexes)
|
||||
writesimple('contexts', self, writer, *attrs_contexts)
|
||||
writesimple('attributes', self, writer, *attrs_attributes)
|
||||
if len(self.jLevels):
|
||||
writer.begintag('justifications')
|
||||
writer.newline()
|
||||
jformat, jnames, jfixes = sstruct.getformat(Silf_justify_format)
|
||||
for i, j in enumerate(self.jLevels):
|
||||
attrs = dict([(k, getattr(j, k)) for k in jnames])
|
||||
writer.simpletag('justify', **attrs)
|
||||
writer.newline()
|
||||
writer.endtag('justifications')
|
||||
writer.newline()
|
||||
if len(self.critFeatures):
|
||||
writer.begintag('critFeatures')
|
||||
writer.newline()
|
||||
writer.write(" ".join(map(str, self.critFeatures)))
|
||||
writer.newline()
|
||||
writer.endtag('critFeatures')
|
||||
writer.newline()
|
||||
if len(self.scriptTags):
|
||||
writer.begintag('scriptTags')
|
||||
writer.newline()
|
||||
writer.write(" ".join(self.scriptTags))
|
||||
writer.newline()
|
||||
writer.endtag('scriptTags')
|
||||
writer.newline()
|
||||
if self.pMap:
|
||||
writer.begintag('pseudoMap')
|
||||
writer.newline()
|
||||
for k, v in sorted(self.pMap.items()):
|
||||
writer.simpletag('pseudo', unicode=hex(k), pseudo=v)
|
||||
writer.newline()
|
||||
writer.endtag('pseudoMap')
|
||||
writer.newline()
|
||||
self.classes.toXML(writer, ttFont, version)
|
||||
if len(self.passes):
|
||||
writer.begintag('passes')
|
||||
writer.newline()
|
||||
for i, p in enumerate(self.passes):
|
||||
writer.begintag('pass', _index=i)
|
||||
writer.newline()
|
||||
p.toXML(writer, ttFont, version)
|
||||
writer.endtag('pass')
|
||||
writer.newline()
|
||||
writer.endtag('passes')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'version':
|
||||
self.ruleVersion = float(safeEval(attrs.get('ruleVersion', "0")))
|
||||
if name == 'info':
|
||||
getSimple(self, attrs, *attrs_info)
|
||||
elif name == 'passindexes':
|
||||
getSimple(self, attrs, *attrs_passindexes)
|
||||
elif name == 'contexts':
|
||||
getSimple(self, attrs, *attrs_contexts)
|
||||
elif name == 'attributes':
|
||||
getSimple(self, attrs, *attrs_attributes)
|
||||
elif name == 'justifications':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
(tag, attrs, subcontent) = element
|
||||
if tag == 'justify':
|
||||
j = _Object()
|
||||
for k, v in attrs.items():
|
||||
setattr(j, k, int(v))
|
||||
self.jLevels.append(j)
|
||||
elif name == 'critFeatures':
|
||||
self.critFeatures = []
|
||||
element = content_string(content)
|
||||
self.critFeatures.extend(map(int, element.split()))
|
||||
elif name == 'scriptTags':
|
||||
self.scriptTags = []
|
||||
element = content_string(content)
|
||||
for n in element.split():
|
||||
self.scriptTags.append(n)
|
||||
elif name == 'pseudoMap':
|
||||
self.pMap = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
(tag, attrs, subcontent) = element
|
||||
if tag == 'pseudo':
|
||||
k = int(attrs['unicode'], 16)
|
||||
v = attrs['pseudo']
|
||||
self.pMap[k] = v
|
||||
elif name == 'classes':
|
||||
self.classes = Classes()
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
self.classes.fromXML(tag, attrs, subcontent, ttFont, version)
|
||||
elif name == 'passes':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == 'pass':
|
||||
p = Pass()
|
||||
for e in subcontent:
|
||||
if not isinstance(e, tuple): continue
|
||||
p.fromXML(e[0], e[1], e[2], ttFont, version)
|
||||
self.passes.append(p)
|
||||
|
||||
|
||||
class Classes(object):
|
||||
|
||||
def __init__(self):
|
||||
self.linear = []
|
||||
self.nonLinear = []
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
sstruct.unpack2(Silf_classmap_format, data, self)
|
||||
if version >= 4.0 :
|
||||
oClasses = struct.unpack((">%dL" % (self.numClass+1)),
|
||||
data[4:8+4*self.numClass])
|
||||
else:
|
||||
oClasses = struct.unpack((">%dH" % (self.numClass+1)),
|
||||
data[4:6+2*self.numClass])
|
||||
for s,e in zip(oClasses[:self.numLinear], oClasses[1:self.numLinear+1]):
|
||||
self.linear.append(ttFont.getGlyphName(x) for x in
|
||||
struct.unpack((">%dH" % ((e-s)/2)), data[s:e]))
|
||||
for s,e in zip(oClasses[self.numLinear:self.numClass],
|
||||
oClasses[self.numLinear+1:self.numClass+1]):
|
||||
nonLinids = [struct.unpack(">HH", data[x:x+4]) for x in range(s+8, e, 4)]
|
||||
nonLin = dict([(ttFont.getGlyphName(x[0]), x[1]) for x in nonLinids])
|
||||
self.nonLinear.append(nonLin)
|
||||
|
||||
def compile(self, ttFont, version=2.0):
|
||||
data = b""
|
||||
oClasses = []
|
||||
if version >= 4.0:
|
||||
offset = 8 + 4 * (len(self.linear) + len(self.nonLinear))
|
||||
else:
|
||||
offset = 6 + 2 * (len(self.linear) + len(self.nonLinear))
|
||||
for l in self.linear:
|
||||
oClasses.append(len(data) + offset)
|
||||
gs = [ttFont.getGlyphID(x) for x in l]
|
||||
data += struct.pack((">%dH" % len(l)), *gs)
|
||||
for l in self.nonLinear:
|
||||
oClasses.append(len(data) + offset)
|
||||
gs = [(ttFont.getGlyphID(x[0]), x[1]) for x in l.items()]
|
||||
data += grUtils.bininfo(len(gs))
|
||||
data += b"".join([struct.pack(">HH", *x) for x in sorted(gs)])
|
||||
oClasses.append(len(data) + offset)
|
||||
self.numClass = len(oClasses) - 1
|
||||
self.numLinear = len(self.linear)
|
||||
return sstruct.pack(Silf_classmap_format, self) + \
|
||||
struct.pack(((">%dL" if version >= 4.0 else ">%dH") % len(oClasses)),
|
||||
*oClasses) + data
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
writer.begintag('classes')
|
||||
writer.newline()
|
||||
writer.begintag('linearClasses')
|
||||
writer.newline()
|
||||
for i,l in enumerate(self.linear):
|
||||
writer.begintag('linear', _index=i)
|
||||
writer.newline()
|
||||
wrapline(writer, l)
|
||||
writer.endtag('linear')
|
||||
writer.newline()
|
||||
writer.endtag('linearClasses')
|
||||
writer.newline()
|
||||
writer.begintag('nonLinearClasses')
|
||||
writer.newline()
|
||||
for i, l in enumerate(self.nonLinear):
|
||||
writer.begintag('nonLinear', _index=i + self.numLinear)
|
||||
writer.newline()
|
||||
for inp, ind in l.items():
|
||||
writer.simpletag('map', glyph=inp, index=ind)
|
||||
writer.newline()
|
||||
writer.endtag('nonLinear')
|
||||
writer.newline()
|
||||
writer.endtag('nonLinearClasses')
|
||||
writer.newline()
|
||||
writer.endtag('classes')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'linearClasses':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == 'linear':
|
||||
l = content_string(subcontent).split()
|
||||
self.linear.append(l)
|
||||
elif name == 'nonLinearClasses':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag =='nonLinear':
|
||||
l = {}
|
||||
for e in subcontent:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, attrs, subsubcontent = e
|
||||
if tag == 'map':
|
||||
l[attrs['glyph']] = int(safeEval(attrs['index']))
|
||||
self.nonLinear.append(l)
|
||||
|
||||
class Pass(object):
|
||||
|
||||
def __init__(self):
|
||||
self.colMap = {}
|
||||
self.rules = []
|
||||
self.rulePreContexts = []
|
||||
self.ruleSortKeys = []
|
||||
self.ruleConstraints = []
|
||||
self.passConstraints = b""
|
||||
self.actions = []
|
||||
self.stateTrans = []
|
||||
self.startStates = []
|
||||
|
||||
def decompile(self, data, ttFont, version=2.0):
|
||||
_, data = sstruct.unpack2(Silf_pass_format, data, self)
|
||||
(numRange, _, _, _) = struct.unpack(">4H", data[:8])
|
||||
data = data[8:]
|
||||
for i in range(numRange):
|
||||
(first, last, col) = struct.unpack(">3H", data[6*i:6*i+6])
|
||||
for g in range(first, last+1):
|
||||
self.colMap[ttFont.getGlyphName(g)] = col
|
||||
data = data[6*numRange:]
|
||||
oRuleMap = struct.unpack_from((">%dH" % (self.numSuccess + 1)), data)
|
||||
data = data[2+2*self.numSuccess:]
|
||||
rules = struct.unpack_from((">%dH" % oRuleMap[-1]), data)
|
||||
self.rules = [rules[s:e] for (s,e) in zip(oRuleMap, oRuleMap[1:])]
|
||||
data = data[2*oRuleMap[-1]:]
|
||||
(self.minRulePreContext, self.maxRulePreContext) = struct.unpack('BB', data[:2])
|
||||
numStartStates = self.maxRulePreContext - self.minRulePreContext + 1
|
||||
self.startStates = struct.unpack((">%dH" % numStartStates),
|
||||
data[2:2 + numStartStates * 2])
|
||||
data = data[2+numStartStates*2:]
|
||||
self.ruleSortKeys = struct.unpack((">%dH" % self.numRules), data[:2 * self.numRules])
|
||||
data = data[2*self.numRules:]
|
||||
self.rulePreContexts = struct.unpack(("%dB" % self.numRules), data[:self.numRules])
|
||||
data = data[self.numRules:]
|
||||
(self.collisionThreshold, pConstraint) = struct.unpack(">BH", data[:3])
|
||||
oConstraints = list(struct.unpack((">%dH" % (self.numRules + 1)),
|
||||
data[3:5 + self.numRules * 2]))
|
||||
data = data[5 + self.numRules * 2:]
|
||||
oActions = list(struct.unpack((">%dH" % (self.numRules + 1)),
|
||||
data[:2 + self.numRules * 2]))
|
||||
data = data[2 * self.numRules + 2:]
|
||||
for i in range(self.numTransitional):
|
||||
a = array("H", data[i*self.numColumns*2:(i+1)*self.numColumns*2])
|
||||
if sys.byteorder != "big": a.byteswap()
|
||||
self.stateTrans.append(a)
|
||||
data = data[self.numTransitional * self.numColumns * 2 + 1:]
|
||||
self.passConstraints = data[:pConstraint]
|
||||
data = data[pConstraint:]
|
||||
for i in range(len(oConstraints)-2,-1,-1):
|
||||
if oConstraints[i] == 0 :
|
||||
oConstraints[i] = oConstraints[i+1]
|
||||
self.ruleConstraints = [(data[s:e] if (e-s > 1) else b"") for (s,e) in zip(oConstraints, oConstraints[1:])]
|
||||
data = data[oConstraints[-1]:]
|
||||
self.actions = [(data[s:e] if (e-s > 1) else "") for (s,e) in zip(oActions, oActions[1:])]
|
||||
data = data[oActions[-1]:]
|
||||
# not using debug
|
||||
|
||||
def compile(self, ttFont, base, version=2.0):
|
||||
# build it all up backwards
|
||||
oActions = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.actions + [b""], (0, []))[1]
|
||||
oConstraints = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.ruleConstraints + [b""], (1, []))[1]
|
||||
constraintCode = b"\000" + b"".join(self.ruleConstraints)
|
||||
transes = []
|
||||
for t in self.stateTrans:
|
||||
if sys.byteorder != "big": t.byteswap()
|
||||
transes.append(t.tobytes())
|
||||
if sys.byteorder != "big": t.byteswap()
|
||||
if not len(transes):
|
||||
self.startStates = [0]
|
||||
oRuleMap = reduce(lambda a, x: (a[0]+len(x), a[1]+[a[0]]), self.rules+[[]], (0, []))[1]
|
||||
passRanges = []
|
||||
gidcolmap = dict([(ttFont.getGlyphID(x[0]), x[1]) for x in self.colMap.items()])
|
||||
for e in grUtils.entries(gidcolmap, sameval = True):
|
||||
if e[1]:
|
||||
passRanges.append((e[0], e[0]+e[1]-1, e[2][0]))
|
||||
self.numRules = len(self.actions)
|
||||
self.fsmOffset = (sstruct.calcsize(Silf_pass_format) + 8 + len(passRanges) * 6
|
||||
+ len(oRuleMap) * 2 + 2 * oRuleMap[-1] + 2
|
||||
+ 2 * len(self.startStates) + 3 * self.numRules + 3
|
||||
+ 4 * self.numRules + 4)
|
||||
self.pcCode = self.fsmOffset + 2*self.numTransitional*self.numColumns + 1 + base
|
||||
self.rcCode = self.pcCode + len(self.passConstraints)
|
||||
self.aCode = self.rcCode + len(constraintCode)
|
||||
self.oDebug = 0
|
||||
# now generate output
|
||||
data = sstruct.pack(Silf_pass_format, self)
|
||||
data += grUtils.bininfo(len(passRanges), 6)
|
||||
data += b"".join(struct.pack(">3H", *p) for p in passRanges)
|
||||
data += struct.pack((">%dH" % len(oRuleMap)), *oRuleMap)
|
||||
flatrules = reduce(lambda a,x: a+x, self.rules, [])
|
||||
data += struct.pack((">%dH" % oRuleMap[-1]), *flatrules)
|
||||
data += struct.pack("BB", self.minRulePreContext, self.maxRulePreContext)
|
||||
data += struct.pack((">%dH" % len(self.startStates)), *self.startStates)
|
||||
data += struct.pack((">%dH" % self.numRules), *self.ruleSortKeys)
|
||||
data += struct.pack(("%dB" % self.numRules), *self.rulePreContexts)
|
||||
data += struct.pack(">BH", self.collisionThreshold, len(self.passConstraints))
|
||||
data += struct.pack((">%dH" % (self.numRules+1)), *oConstraints)
|
||||
data += struct.pack((">%dH" % (self.numRules+1)), *oActions)
|
||||
return data + b"".join(transes) + struct.pack("B", 0) + \
|
||||
self.passConstraints + constraintCode + b"".join(self.actions)
|
||||
|
||||
def toXML(self, writer, ttFont, version=2.0):
|
||||
writesimple('info', self, writer, *pass_attrs_info)
|
||||
writesimple('fsminfo', self, writer, *pass_attrs_fsm)
|
||||
writer.begintag('colmap')
|
||||
writer.newline()
|
||||
wrapline(writer, ["{}={}".format(*x) for x in sorted(self.colMap.items(),
|
||||
key=lambda x:ttFont.getGlyphID(x[0]))])
|
||||
writer.endtag('colmap')
|
||||
writer.newline()
|
||||
writer.begintag('staterulemap')
|
||||
writer.newline()
|
||||
for i, r in enumerate(self.rules):
|
||||
writer.simpletag('state', number = self.numRows - self.numSuccess + i,
|
||||
rules = " ".join(map(str, r)))
|
||||
writer.newline()
|
||||
writer.endtag('staterulemap')
|
||||
writer.newline()
|
||||
writer.begintag('rules')
|
||||
writer.newline()
|
||||
for i in range(len(self.actions)):
|
||||
writer.begintag('rule', index=i, precontext=self.rulePreContexts[i],
|
||||
sortkey=self.ruleSortKeys[i])
|
||||
writer.newline()
|
||||
if len(self.ruleConstraints[i]):
|
||||
writecode('constraint', writer, self.ruleConstraints[i])
|
||||
writecode('action', writer, self.actions[i])
|
||||
writer.endtag('rule')
|
||||
writer.newline()
|
||||
writer.endtag('rules')
|
||||
writer.newline()
|
||||
if len(self.passConstraints):
|
||||
writecode('passConstraint', writer, self.passConstraints)
|
||||
if len(self.stateTrans):
|
||||
writer.begintag('fsm')
|
||||
writer.newline()
|
||||
writer.begintag('starts')
|
||||
writer.write(" ".join(map(str, self.startStates)))
|
||||
writer.endtag('starts')
|
||||
writer.newline()
|
||||
for i, s in enumerate(self.stateTrans):
|
||||
writer.begintag('row', _i=i)
|
||||
# no newlines here
|
||||
writer.write(" ".join(map(str, s)))
|
||||
writer.endtag('row')
|
||||
writer.newline()
|
||||
writer.endtag('fsm')
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, version=2.0):
|
||||
if name == 'info':
|
||||
getSimple(self, attrs, *pass_attrs_info)
|
||||
elif name == 'fsminfo':
|
||||
getSimple(self, attrs, *pass_attrs_fsm)
|
||||
elif name == 'colmap':
|
||||
e = content_string(content)
|
||||
for w in e.split():
|
||||
x = w.split('=')
|
||||
if len(x) != 2 or x[0] == '' or x[1] == '': continue
|
||||
self.colMap[x[0]] = int(x[1])
|
||||
elif name == 'staterulemap':
|
||||
for e in content:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, a, c = e
|
||||
if tag == 'state':
|
||||
self.rules.append([int(x) for x in a['rules'].split(" ")])
|
||||
elif name == 'rules':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, a, c = element
|
||||
if tag != 'rule': continue
|
||||
self.rulePreContexts.append(int(a['precontext']))
|
||||
self.ruleSortKeys.append(int(a['sortkey']))
|
||||
con = b""
|
||||
act = b""
|
||||
for e in c:
|
||||
if not isinstance(e, tuple): continue
|
||||
tag, a, subc = e
|
||||
if tag == 'constraint':
|
||||
con = readcode(subc)
|
||||
elif tag == 'action':
|
||||
act = readcode(subc)
|
||||
self.actions.append(act)
|
||||
self.ruleConstraints.append(con)
|
||||
elif name == 'passConstraint':
|
||||
self.passConstraints = readcode(content)
|
||||
elif name == 'fsm':
|
||||
for element in content:
|
||||
if not isinstance(element, tuple): continue
|
||||
tag, a, c = element
|
||||
if tag == 'row':
|
||||
s = array('H')
|
||||
e = content_string(c)
|
||||
s.extend(map(int, e.split()))
|
||||
self.stateTrans.append(s)
|
||||
elif tag == 'starts':
|
||||
s = []
|
||||
e = content_string(c)
|
||||
s.extend(map(int, e.split()))
|
||||
self.startStates = s
|
||||
|
@@ -0,0 +1,4 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
class table_T_S_I_S_(table_T_S_I_V_):
|
||||
pass
|
54
venv/Lib/site-packages/fontTools/ttLib/tables/T_S_I__0.py
Normal file
54
venv/Lib/site-packages/fontTools/ttLib/tables/T_S_I__0.py
Normal file
@@ -0,0 +1,54 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI0 is the index table containing the lengths and offsets for the glyph
|
||||
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
|
||||
in the TSI1 table.
|
||||
"""
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
tsi0Format = '>HHL'
|
||||
|
||||
def fixlongs(glyphID, textLength, textOffset):
|
||||
return int(glyphID), int(textLength), textOffset
|
||||
|
||||
|
||||
class table_T_S_I__0(DefaultTable.DefaultTable):
|
||||
|
||||
dependencies = ["TSI1"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
indices = []
|
||||
size = struct.calcsize(tsi0Format)
|
||||
for i in range(numGlyphs + 5):
|
||||
glyphID, textLength, textOffset = fixlongs(*struct.unpack(tsi0Format, data[:size]))
|
||||
indices.append((glyphID, textLength, textOffset))
|
||||
data = data[size:]
|
||||
assert len(data) == 0
|
||||
assert indices[-5] == (0XFFFE, 0, 0xABFC1F34), "bad magic number"
|
||||
self.indices = indices[:-5]
|
||||
self.extra_indices = indices[-4:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "indices"):
|
||||
# We have no corresponding table (TSI1 or TSI3); let's return
|
||||
# no data, which effectively means "ignore us".
|
||||
return b""
|
||||
data = b""
|
||||
for index, textLength, textOffset in self.indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
data = data + struct.pack(tsi0Format, 0XFFFE, 0, 0xABFC1F34)
|
||||
for index, textLength, textOffset in self.extra_indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
return data
|
||||
|
||||
def set(self, indices, extra_indices):
|
||||
# gets called by 'TSI1' or 'TSI3'
|
||||
self.indices = indices
|
||||
self.extra_indices = extra_indices
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("This table will be calculated by the compiler")
|
||||
writer.newline()
|
1845
venv/Lib/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
1845
venv/Lib/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
File diff suppressed because it is too large
Load Diff
292
venv/Lib/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
292
venv/Lib/site-packages/fontTools/ttLib/tables/_p_o_s_t.py
Normal file
@@ -0,0 +1,292 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
postFormat = """
|
||||
>
|
||||
formatType: 16.16F
|
||||
italicAngle: 16.16F # italic angle in degrees
|
||||
underlinePosition: h
|
||||
underlineThickness: h
|
||||
isFixedPitch: L
|
||||
minMemType42: L # minimum memory if TrueType font is downloaded
|
||||
maxMemType42: L # maximum memory if TrueType font is downloaded
|
||||
minMemType1: L # minimum memory if Type1 font is downloaded
|
||||
maxMemType1: L # maximum memory if Type1 font is downloaded
|
||||
"""
|
||||
|
||||
postFormatSize = sstruct.calcsize(postFormat)
|
||||
|
||||
|
||||
class table__p_o_s_t(DefaultTable.DefaultTable):
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
||||
data = data[postFormatSize:]
|
||||
if self.formatType == 1.0:
|
||||
self.decode_format_1_0(data, ttFont)
|
||||
elif self.formatType == 2.0:
|
||||
self.decode_format_2_0(data, ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
self.decode_format_3_0(data, ttFont)
|
||||
elif self.formatType == 4.0:
|
||||
self.decode_format_4_0(data, ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(postFormat, self)
|
||||
if self.formatType == 1.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 2.0:
|
||||
data = data + self.encode_format_2_0(ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 4.0:
|
||||
data = data + self.encode_format_4_0(ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError("'post' table format %f not supported" % self.formatType)
|
||||
return data
|
||||
|
||||
def getGlyphOrder(self):
|
||||
"""This function will get called by a ttLib.TTFont instance.
|
||||
Do not call this function yourself, use TTFont().getGlyphOrder()
|
||||
or its relatives instead!
|
||||
"""
|
||||
if not hasattr(self, "glyphOrder"):
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
glyphOrder = self.glyphOrder
|
||||
del self.glyphOrder
|
||||
return glyphOrder
|
||||
|
||||
def decode_format_1_0(self, data, ttFont):
|
||||
self.glyphOrder = standardGlyphOrder[:ttFont["maxp"].numGlyphs]
|
||||
|
||||
def decode_format_2_0(self, data, ttFont):
|
||||
numGlyphs, = struct.unpack(">H", data[:2])
|
||||
numGlyphs = int(numGlyphs)
|
||||
if numGlyphs > ttFont['maxp'].numGlyphs:
|
||||
# Assume the numGlyphs field is bogus, so sync with maxp.
|
||||
# I've seen this in one font, and if the assumption is
|
||||
# wrong elsewhere, well, so be it: it's hard enough to
|
||||
# work around _one_ non-conforming post format...
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
data = data[2:]
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data[:2*numGlyphs])
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
data = data[2*numGlyphs:]
|
||||
maxIndex = max(indices)
|
||||
self.extraNames = extraNames = unpackPStrings(data, maxIndex-257)
|
||||
self.glyphOrder = glyphOrder = [""] * int(ttFont['maxp'].numGlyphs)
|
||||
for glyphID in range(numGlyphs):
|
||||
index = indices[glyphID]
|
||||
if index > 257:
|
||||
try:
|
||||
name = extraNames[index-258]
|
||||
except IndexError:
|
||||
name = ""
|
||||
else:
|
||||
# fetch names from standard list
|
||||
name = standardGlyphOrder[index]
|
||||
glyphOrder[glyphID] = name
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def build_psNameMapping(self, ttFont):
|
||||
mapping = {}
|
||||
allNames = {}
|
||||
for i in range(ttFont['maxp'].numGlyphs):
|
||||
glyphName = psName = self.glyphOrder[i]
|
||||
if glyphName == "":
|
||||
glyphName = "glyph%.5d" % i
|
||||
if glyphName in allNames:
|
||||
# make up a new glyphName that's unique
|
||||
n = allNames[glyphName]
|
||||
while (glyphName + "#" + str(n)) in allNames:
|
||||
n += 1
|
||||
allNames[glyphName] = n + 1
|
||||
glyphName = glyphName + "#" + str(n)
|
||||
|
||||
self.glyphOrder[i] = glyphName
|
||||
allNames[glyphName] = 1
|
||||
if glyphName != psName:
|
||||
mapping[glyphName] = psName
|
||||
|
||||
self.mapping = mapping
|
||||
|
||||
def decode_format_3_0(self, data, ttFont):
|
||||
# Setting self.glyphOrder to None will cause the TTFont object
|
||||
# try and construct glyph names from a Unicode cmap table.
|
||||
self.glyphOrder = None
|
||||
|
||||
def decode_format_4_0(self, data, ttFont):
|
||||
from fontTools import agl
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
# In some older fonts, the size of the post table doesn't match
|
||||
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
|
||||
self.glyphOrder = glyphOrder = [''] * int(numGlyphs)
|
||||
for i in range(min(len(indices),numGlyphs)):
|
||||
if indices[i] == 0xFFFF:
|
||||
self.glyphOrder[i] = ''
|
||||
elif indices[i] in agl.UV2AGL:
|
||||
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
|
||||
else:
|
||||
self.glyphOrder[i] = "uni%04X" % indices[i]
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def encode_format_2_0(self, ttFont):
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
extraDict = {}
|
||||
extraNames = self.extraNames = [
|
||||
n for n in self.extraNames if n not in standardGlyphOrder]
|
||||
for i in range(len(extraNames)):
|
||||
extraDict[extraNames[i]] = i
|
||||
for glyphID in range(numGlyphs):
|
||||
glyphName = glyphOrder[glyphID]
|
||||
if glyphName in self.mapping:
|
||||
psName = self.mapping[glyphName]
|
||||
else:
|
||||
psName = glyphName
|
||||
if psName in extraDict:
|
||||
index = 258 + extraDict[psName]
|
||||
elif psName in standardGlyphOrder:
|
||||
index = standardGlyphOrder.index(psName)
|
||||
else:
|
||||
index = 258 + len(extraNames)
|
||||
extraDict[psName] = len(extraNames)
|
||||
extraNames.append(psName)
|
||||
indices.append(index)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
return struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
|
||||
|
||||
def encode_format_4_0(self, ttFont):
|
||||
from fontTools import agl
|
||||
numGlyphs = ttFont['maxp'].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
for glyphID in glyphOrder:
|
||||
glyphID = glyphID.split('#')[0]
|
||||
if glyphID in agl.AGL2UV:
|
||||
indices.append(agl.AGL2UV[glyphID])
|
||||
elif len(glyphID) == 7 and glyphID[:3] == 'uni':
|
||||
indices.append(int(glyphID[3:],16))
|
||||
else:
|
||||
indices.append(0xFFFF)
|
||||
if sys.byteorder != "big": indices.byteswap()
|
||||
return indices.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(postFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
if hasattr(self, "mapping"):
|
||||
writer.begintag("psNames")
|
||||
writer.newline()
|
||||
writer.comment("This file uses unique glyph names based on the information\n"
|
||||
"found in the 'post' table. Since these names might not be unique,\n"
|
||||
"we have to invent artificial names in case of clashes. In order to\n"
|
||||
"be able to retain the original information, we need a name to\n"
|
||||
"ps name mapping for those cases where they differ. That's what\n"
|
||||
"you see below.\n")
|
||||
writer.newline()
|
||||
items = sorted(self.mapping.items())
|
||||
for name, psName in items:
|
||||
writer.simpletag("psName", name=name, psName=psName)
|
||||
writer.newline()
|
||||
writer.endtag("psNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "extraNames"):
|
||||
writer.begintag("extraNames")
|
||||
writer.newline()
|
||||
writer.comment("following are the name that are not taken from the standard Mac glyph order")
|
||||
writer.newline()
|
||||
for name in self.extraNames:
|
||||
writer.simpletag("psName", name=name)
|
||||
writer.newline()
|
||||
writer.endtag("extraNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "data"):
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name not in ("psNames", "extraNames", "hexdata"):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "psNames":
|
||||
self.mapping = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.mapping[attrs["name"]] = attrs["psName"]
|
||||
elif name == "extraNames":
|
||||
self.extraNames = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.extraNames.append(attrs["name"])
|
||||
else:
|
||||
self.data = readHex(content)
|
||||
|
||||
|
||||
def unpackPStrings(data, n):
|
||||
# extract n Pascal strings from data.
|
||||
# if there is not enough data, use ""
|
||||
|
||||
strings = []
|
||||
index = 0
|
||||
dataLen = len(data)
|
||||
|
||||
for _ in range(n):
|
||||
if dataLen <= index:
|
||||
length = 0
|
||||
else:
|
||||
length = byteord(data[index])
|
||||
index += 1
|
||||
|
||||
if dataLen <= index + length - 1:
|
||||
name = ""
|
||||
else:
|
||||
name = tostr(data[index:index+length], encoding="latin1")
|
||||
strings.append (name)
|
||||
index += length
|
||||
|
||||
if index < dataLen:
|
||||
log.warning("%d extra bytes in post.stringData array", dataLen - index)
|
||||
|
||||
elif dataLen < index:
|
||||
log.warning("not enough data in post.stringData array")
|
||||
|
||||
return strings
|
||||
|
||||
|
||||
def packPStrings(strings):
|
||||
data = b""
|
||||
for s in strings:
|
||||
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
|
||||
return data
|
@@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
|
||||
class table__p_r_o_p(BaseTTXConverter):
|
||||
pass
|
118
venv/Lib/site-packages/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
118
venv/Lib/site-packages/fontTools/ttLib/tables/_v_h_e_a.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve, versionToFixed as ve2fi)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
vheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceHeightMax: H
|
||||
minTopSideBearing: h
|
||||
minBottomSideBearing: h
|
||||
yMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
reserved4: h
|
||||
metricDataFormat: h
|
||||
numberOfVMetrics: H
|
||||
"""
|
||||
|
||||
class table__v_h_e_a(DefaultTable.DefaultTable):
|
||||
|
||||
# Note: Keep in sync with table__h_h_e_a
|
||||
|
||||
dependencies = ['vmtx', 'glyf', 'CFF ', 'CFF2']
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(vheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (ttFont.isLoaded('glyf') or ttFont.isLoaded('CFF ') or ttFont.isLoaded('CFF2')):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(vheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if 'vmtx' in ttFont:
|
||||
vmtxTable = ttFont['vmtx']
|
||||
self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
|
||||
|
||||
boundsHeightDict = {}
|
||||
if 'glyf' in ttFont:
|
||||
glyfTable = ttFont['glyf']
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "yMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsHeightDict[name] = g.yMax - g.yMin
|
||||
elif 'CFF ' in ttFont or 'CFF2' in ttFont:
|
||||
if 'CFF ' in ttFont:
|
||||
topDict = ttFont['CFF '].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont['CFF2'].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsHeightDict[name] = int(
|
||||
math.ceil(bounds[3]) - math.floor(bounds[1]))
|
||||
|
||||
if boundsHeightDict:
|
||||
minTopSideBearing = float('inf')
|
||||
minBottomSideBearing = float('inf')
|
||||
yMaxExtent = -float('inf')
|
||||
for name, boundsHeight in boundsHeightDict.items():
|
||||
advanceHeight, tsb = vmtxTable[name]
|
||||
bsb = advanceHeight - tsb - boundsHeight
|
||||
extent = tsb + boundsHeight
|
||||
minTopSideBearing = min(minTopSideBearing, tsb)
|
||||
minBottomSideBearing = min(minBottomSideBearing, bsb)
|
||||
yMaxExtent = max(yMaxExtent, extent)
|
||||
self.minTopSideBearing = minTopSideBearing
|
||||
self.minBottomSideBearing = minBottomSideBearing
|
||||
self.yMaxExtent = yMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minTopSideBearing = 0
|
||||
self.minBottomSideBearing = 0
|
||||
self.yMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
# reserved0 is caretOffset for legacy reasons
|
||||
@property
|
||||
def reserved0(self):
|
||||
return self.caretOffset
|
||||
|
||||
@reserved0.setter
|
||||
def reserved0(self, value):
|
||||
self.caretOffset = value
|
1086
venv/Lib/site-packages/fontTools/ttLib/ttFont.py
Normal file
1086
venv/Lib/site-packages/fontTools/ttLib/ttFont.py
Normal file
File diff suppressed because it is too large
Load Diff
2308
venv/Lib/site-packages/fontTools/ufoLib/__init__.py
Normal file
2308
venv/Lib/site-packages/fontTools/ufoLib/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
16
venv/Lib/site-packages/fontTools/ufoLib/errors.py
Normal file
16
venv/Lib/site-packages/fontTools/ufoLib/errors.py
Normal file
@@ -0,0 +1,16 @@
|
||||
|
||||
|
||||
class UFOLibError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedUFOFormat(UFOLibError):
|
||||
pass
|
||||
|
||||
|
||||
class GlifLibError(UFOLibError):
|
||||
pass
|
||||
|
||||
|
||||
class UnsupportedGLIFFormat(GlifLibError):
|
||||
pass
|
5
venv/Lib/site-packages/fontTools/ufoLib/etree.py
Normal file
5
venv/Lib/site-packages/fontTools/ufoLib/etree.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""DEPRECATED - This module is kept here only as a backward compatibility shim
|
||||
for the old ufoLib.etree module, which was moved to fontTools.misc.etree.
|
||||
Please use the latter instead.
|
||||
"""
|
||||
from fontTools.misc.etree import *
|
1811
venv/Lib/site-packages/fontTools/ufoLib/glifLib.py
Normal file
1811
venv/Lib/site-packages/fontTools/ufoLib/glifLib.py
Normal file
File diff suppressed because it is too large
Load Diff
89
venv/Lib/site-packages/fontTools/ufoLib/kerning.py
Normal file
89
venv/Lib/site-packages/fontTools/ufoLib/kerning.py
Normal file
@@ -0,0 +1,89 @@
|
||||
|
||||
|
||||
def lookupKerningValue(pair, kerning, groups, fallback=0, glyphToFirstGroup=None, glyphToSecondGroup=None):
|
||||
"""
|
||||
Note: This expects kerning to be a flat dictionary
|
||||
of kerning pairs, not the nested structure used
|
||||
in kerning.plist.
|
||||
|
||||
>>> groups = {
|
||||
... "public.kern1.O" : ["O", "D", "Q"],
|
||||
... "public.kern2.E" : ["E", "F"]
|
||||
... }
|
||||
>>> kerning = {
|
||||
... ("public.kern1.O", "public.kern2.E") : -100,
|
||||
... ("public.kern1.O", "F") : -200,
|
||||
... ("D", "F") : -300
|
||||
... }
|
||||
>>> lookupKerningValue(("D", "F"), kerning, groups)
|
||||
-300
|
||||
>>> lookupKerningValue(("O", "F"), kerning, groups)
|
||||
-200
|
||||
>>> lookupKerningValue(("O", "E"), kerning, groups)
|
||||
-100
|
||||
>>> lookupKerningValue(("O", "O"), kerning, groups)
|
||||
0
|
||||
>>> lookupKerningValue(("E", "E"), kerning, groups)
|
||||
0
|
||||
>>> lookupKerningValue(("E", "O"), kerning, groups)
|
||||
0
|
||||
>>> lookupKerningValue(("X", "X"), kerning, groups)
|
||||
0
|
||||
>>> lookupKerningValue(("public.kern1.O", "public.kern2.E"),
|
||||
... kerning, groups)
|
||||
-100
|
||||
>>> lookupKerningValue(("public.kern1.O", "F"), kerning, groups)
|
||||
-200
|
||||
>>> lookupKerningValue(("O", "public.kern2.E"), kerning, groups)
|
||||
-100
|
||||
>>> lookupKerningValue(("public.kern1.X", "public.kern2.X"), kerning, groups)
|
||||
0
|
||||
"""
|
||||
# quickly check to see if the pair is in the kerning dictionary
|
||||
if pair in kerning:
|
||||
return kerning[pair]
|
||||
# create glyph to group mapping
|
||||
if glyphToFirstGroup is not None:
|
||||
assert glyphToSecondGroup is not None
|
||||
if glyphToSecondGroup is not None:
|
||||
assert glyphToFirstGroup is not None
|
||||
if glyphToFirstGroup is None:
|
||||
glyphToFirstGroup = {}
|
||||
glyphToSecondGroup = {}
|
||||
for group, groupMembers in groups.items():
|
||||
if group.startswith("public.kern1."):
|
||||
for glyph in groupMembers:
|
||||
glyphToFirstGroup[glyph] = group
|
||||
elif group.startswith("public.kern2."):
|
||||
for glyph in groupMembers:
|
||||
glyphToSecondGroup[glyph] = group
|
||||
# get group names and make sure first and second are glyph names
|
||||
first, second = pair
|
||||
firstGroup = secondGroup = None
|
||||
if first.startswith("public.kern1."):
|
||||
firstGroup = first
|
||||
first = None
|
||||
else:
|
||||
firstGroup = glyphToFirstGroup.get(first)
|
||||
if second.startswith("public.kern2."):
|
||||
secondGroup = second
|
||||
second = None
|
||||
else:
|
||||
secondGroup = glyphToSecondGroup.get(second)
|
||||
# make an ordered list of pairs to look up
|
||||
pairs = [
|
||||
(first, second),
|
||||
(first, secondGroup),
|
||||
(firstGroup, second),
|
||||
(firstGroup, secondGroup)
|
||||
]
|
||||
# look up the pairs and return any matches
|
||||
for pair in pairs:
|
||||
if pair in kerning:
|
||||
return kerning[pair]
|
||||
# use the fallback value
|
||||
return fallback
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
doctest.testmod()
|
75
venv/Lib/site-packages/fontTools/ufoLib/utils.py
Normal file
75
venv/Lib/site-packages/fontTools/ufoLib/utils.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""The module contains miscellaneous helpers.
|
||||
It's not considered part of the public ufoLib API.
|
||||
"""
|
||||
import warnings
|
||||
import functools
|
||||
|
||||
|
||||
numberTypes = (int, float)
|
||||
|
||||
|
||||
def deprecated(msg=""):
|
||||
"""Decorator factory to mark functions as deprecated with given message.
|
||||
|
||||
>>> @deprecated("Enough!")
|
||||
... def some_function():
|
||||
... "I just print 'hello world'."
|
||||
... print("hello world")
|
||||
>>> some_function()
|
||||
hello world
|
||||
>>> some_function.__doc__ == "I just print 'hello world'."
|
||||
True
|
||||
"""
|
||||
|
||||
def deprecated_decorator(func):
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
warnings.warn(
|
||||
f"{func.__name__} function is a deprecated. {msg}",
|
||||
category=DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return deprecated_decorator
|
||||
|
||||
|
||||
# To be mixed with enum.Enum in UFOFormatVersion and GLIFFormatVersion
|
||||
class _VersionTupleEnumMixin:
|
||||
@property
|
||||
def major(self):
|
||||
return self.value[0]
|
||||
|
||||
@property
|
||||
def minor(self):
|
||||
return self.value[1]
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value):
|
||||
# allow to initialize a version enum from a single (major) integer
|
||||
if isinstance(value, int):
|
||||
return cls((value, 0))
|
||||
# or from None to obtain the current default version
|
||||
if value is None:
|
||||
return cls.default()
|
||||
return super()._missing_(value)
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.major}.{self.minor}"
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
# get the latest defined version (i.e. the max of all versions)
|
||||
return max(cls.__members__.values())
|
||||
|
||||
@classmethod
|
||||
def supported_versions(cls):
|
||||
return frozenset(cls.__members__.values())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
||||
doctest.testmod()
|
137
venv/Lib/site-packages/fontTools/varLib/builder.py
Normal file
137
venv/Lib/site-packages/fontTools/varLib/builder.py
Normal file
@@ -0,0 +1,137 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
|
||||
# VariationStore
|
||||
|
||||
def buildVarRegionAxis(axisSupport):
|
||||
self = ot.VarRegionAxis()
|
||||
self.StartCoord, self.PeakCoord, self.EndCoord = [float(v) for v in axisSupport]
|
||||
return self
|
||||
|
||||
def buildVarRegion(support, axisTags):
|
||||
assert all(tag in axisTags for tag in support.keys()), ("Unknown axis tag found.", support, axisTags)
|
||||
self = ot.VarRegion()
|
||||
self.VarRegionAxis = []
|
||||
for tag in axisTags:
|
||||
self.VarRegionAxis.append(buildVarRegionAxis(support.get(tag, (0,0,0))))
|
||||
return self
|
||||
|
||||
def buildVarRegionList(supports, axisTags):
|
||||
self = ot.VarRegionList()
|
||||
self.RegionAxisCount = len(axisTags)
|
||||
self.Region = []
|
||||
for support in supports:
|
||||
self.Region.append(buildVarRegion(support, axisTags))
|
||||
self.RegionCount = len(self.Region)
|
||||
return self
|
||||
|
||||
|
||||
def _reorderItem(lst, mapping):
|
||||
return [lst[i] for i in mapping]
|
||||
|
||||
def VarData_calculateNumShorts(self, optimize=False):
|
||||
count = self.VarRegionCount
|
||||
items = self.Item
|
||||
bit_lengths = [0] * count
|
||||
for item in items:
|
||||
# The "+ (i < -1)" magic is to handle two's-compliment.
|
||||
# That is, we want to get back 7 for -128, whereas
|
||||
# bit_length() returns 8. Similarly for -65536.
|
||||
# The reason "i < -1" is used instead of "i < 0" is that
|
||||
# the latter would make it return 0 for "-1" instead of 1.
|
||||
bl = [(i + (i < -1)).bit_length() for i in item]
|
||||
bit_lengths = [max(*pair) for pair in zip(bl, bit_lengths)]
|
||||
# The addition of 8, instead of seven, is to account for the sign bit.
|
||||
# This "((b + 8) >> 3) if b else 0" when combined with the above
|
||||
# "(i + (i < -1)).bit_length()" is a faster way to compute byte-lengths
|
||||
# conforming to:
|
||||
#
|
||||
# byte_length = (0 if i == 0 else
|
||||
# 1 if -128 <= i < 128 else
|
||||
# 2 if -65536 <= i < 65536 else
|
||||
# ...)
|
||||
byte_lengths = [((b + 8) >> 3) if b else 0 for b in bit_lengths]
|
||||
|
||||
# https://github.com/fonttools/fonttools/issues/2279
|
||||
longWords = any(b > 2 for b in byte_lengths)
|
||||
|
||||
if optimize:
|
||||
# Reorder columns such that wider columns come before narrower columns
|
||||
mapping = []
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b > 2)
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 2)
|
||||
mapping.extend(i for i,b in enumerate(byte_lengths) if b == 1)
|
||||
|
||||
byte_lengths = _reorderItem(byte_lengths, mapping)
|
||||
self.VarRegionIndex = _reorderItem(self.VarRegionIndex, mapping)
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
for i in range(len(items)):
|
||||
items[i] = _reorderItem(items[i], mapping)
|
||||
|
||||
if longWords:
|
||||
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 2), default=-1) + 1
|
||||
self.NumShorts |= 0x8000
|
||||
else:
|
||||
self.NumShorts = max((i for i,b in enumerate(byte_lengths) if b > 1), default=-1) + 1
|
||||
|
||||
self.VarRegionCount = len(self.VarRegionIndex)
|
||||
return self
|
||||
|
||||
ot.VarData.calculateNumShorts = VarData_calculateNumShorts
|
||||
|
||||
def VarData_CalculateNumShorts(self, optimize=True):
|
||||
"""Deprecated name for VarData_calculateNumShorts() which
|
||||
defaults to optimize=True. Use varData.calculateNumShorts()
|
||||
or varData.optimize()."""
|
||||
return VarData_calculateNumShorts(self, optimize=optimize)
|
||||
|
||||
def VarData_optimize(self):
|
||||
return VarData_calculateNumShorts(self, optimize=True)
|
||||
|
||||
ot.VarData.optimize = VarData_optimize
|
||||
|
||||
|
||||
def buildVarData(varRegionIndices, items, optimize=True):
|
||||
self = ot.VarData()
|
||||
self.VarRegionIndex = list(varRegionIndices)
|
||||
regionCount = self.VarRegionCount = len(self.VarRegionIndex)
|
||||
records = self.Item = []
|
||||
if items:
|
||||
for item in items:
|
||||
assert len(item) == regionCount
|
||||
records.append(list(item))
|
||||
self.ItemCount = len(self.Item)
|
||||
self.calculateNumShorts(optimize=optimize)
|
||||
return self
|
||||
|
||||
|
||||
def buildVarStore(varRegionList, varDataList):
|
||||
self = ot.VarStore()
|
||||
self.Format = 1
|
||||
self.VarRegionList = varRegionList
|
||||
self.VarData = list(varDataList)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
return self
|
||||
|
||||
|
||||
# Variation helpers
|
||||
|
||||
def buildVarIdxMap(varIdxes, glyphOrder):
|
||||
self = ot.VarIdxMap()
|
||||
self.mapping = {g:v for g,v in zip(glyphOrder, varIdxes)}
|
||||
return self
|
||||
|
||||
|
||||
def buildDeltaSetIndexMap(varIdxes):
|
||||
self = ot.DeltaSetIndexMap()
|
||||
self.mapping = list(varIdxes)
|
||||
self.Format = 1 if len(varIdxes) > 0xFFFF else 0
|
||||
return self
|
||||
|
||||
|
||||
def buildVarDevTable(varIdx):
|
||||
self = ot.Device()
|
||||
self.DeltaFormat = 0x8000
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
return self
|
190
venv/Lib/site-packages/fontTools/varLib/errors.py
Normal file
190
venv/Lib/site-packages/fontTools/varLib/errors.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import textwrap
|
||||
|
||||
|
||||
class VarLibError(Exception):
|
||||
"""Base exception for the varLib module."""
|
||||
|
||||
|
||||
class VarLibValidationError(VarLibError):
|
||||
"""Raised when input data is invalid from varLib's point of view."""
|
||||
|
||||
|
||||
class VarLibMergeError(VarLibError):
|
||||
"""Raised when input data cannot be merged into a variable font."""
|
||||
|
||||
def __init__(self, merger=None, **kwargs):
|
||||
self.merger = merger
|
||||
if not kwargs:
|
||||
kwargs = {}
|
||||
if "stack" in kwargs:
|
||||
self.stack = kwargs["stack"]
|
||||
del kwargs["stack"]
|
||||
else:
|
||||
self.stack = []
|
||||
self.cause = kwargs
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
return self.__doc__
|
||||
|
||||
def _master_name(self, ix):
|
||||
if self.merger is not None:
|
||||
ttf = self.merger.ttfs[ix]
|
||||
if (
|
||||
"name" in ttf
|
||||
and ttf["name"].getDebugName(1)
|
||||
and ttf["name"].getDebugName(2)
|
||||
):
|
||||
return ttf["name"].getDebugName(1) + " " + ttf["name"].getDebugName(2)
|
||||
elif hasattr(ttf.reader, "file") and hasattr(ttf.reader.file, "name"):
|
||||
return ttf.reader.file.name
|
||||
return f"master number {ix}"
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
index = [x == self.cause["expected"] for x in self.cause["got"]].index(
|
||||
False
|
||||
)
|
||||
return index, self._master_name(index)
|
||||
return None, None
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
if "expected" in self.cause and "got" in self.cause:
|
||||
offender_index, offender = self.offender
|
||||
got = self.cause["got"][offender_index]
|
||||
return f"Expected to see {self.stack[0]}=={self.cause['expected']}, instead saw {got}\n"
|
||||
return ""
|
||||
|
||||
def __str__(self):
|
||||
offender_index, offender = self.offender
|
||||
location = ""
|
||||
if offender:
|
||||
location = f"\n\nThe problem is likely to be in {offender}:\n"
|
||||
context = "".join(reversed(self.stack))
|
||||
basic = textwrap.fill(
|
||||
f"Couldn't merge the fonts, because {self.reason}. "
|
||||
f"This happened while performing the following operation: {context}",
|
||||
width=78,
|
||||
)
|
||||
return "\n\n" + basic + location + self.details
|
||||
|
||||
|
||||
class ShouldBeConstant(VarLibMergeError):
|
||||
"""some values were different, but should have been the same"""
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
if self.stack[0] != ".FeatureCount" or self.merger is None:
|
||||
return super().details
|
||||
offender_index, offender = self.offender
|
||||
bad_ttf = self.merger.ttfs[offender_index]
|
||||
good_ttf = self.merger.ttfs[offender_index - 1]
|
||||
|
||||
good_features = [
|
||||
x.FeatureTag
|
||||
for x in good_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
bad_features = [
|
||||
x.FeatureTag
|
||||
for x in bad_ttf[self.stack[-1]].table.FeatureList.FeatureRecord
|
||||
]
|
||||
return (
|
||||
"\nIncompatible features between masters.\n"
|
||||
f"Expected: {', '.join(good_features)}.\n"
|
||||
f"Got: {', '.join(bad_features)}.\n"
|
||||
)
|
||||
|
||||
|
||||
class FoundANone(VarLibMergeError):
|
||||
"""one of the values in a list was empty when it shouldn't have been"""
|
||||
|
||||
@property
|
||||
def offender(self):
|
||||
cause = self.argv[0]
|
||||
index = [x is None for x in cause["got"]].index(True)
|
||||
return index, self._master_name(index)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
cause, stack = self.args[0], self.args[1:]
|
||||
return f"{stack[0]}=={cause['got']}\n"
|
||||
|
||||
|
||||
class MismatchedTypes(VarLibMergeError):
|
||||
"""data had inconsistent types"""
|
||||
|
||||
|
||||
class LengthsDiffer(VarLibMergeError):
|
||||
"""a list of objects had inconsistent lengths"""
|
||||
|
||||
|
||||
class KeysDiffer(VarLibMergeError):
|
||||
"""a list of objects had different keys"""
|
||||
|
||||
|
||||
class InconsistentGlyphOrder(VarLibMergeError):
|
||||
"""the glyph order was inconsistent between masters"""
|
||||
|
||||
|
||||
class InconsistentExtensions(VarLibMergeError):
|
||||
"""the masters use extension lookups in inconsistent ways"""
|
||||
|
||||
|
||||
class UnsupportedFormat(VarLibMergeError):
|
||||
"""an OpenType subtable (%s) had a format I didn't expect"""
|
||||
|
||||
@property
|
||||
def reason(self):
|
||||
cause, stack = self.args[0], self.args[1:]
|
||||
return self.__doc__ % cause["subtable"]
|
||||
|
||||
|
||||
class UnsupportedFormat(UnsupportedFormat):
|
||||
"""an OpenType subtable (%s) had inconsistent formats between masters"""
|
||||
|
||||
|
||||
class VarLibCFFMergeError(VarLibError):
|
||||
pass
|
||||
|
||||
|
||||
class VarLibCFFDictMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF PrivateDict cannot be merged."""
|
||||
|
||||
def __init__(self, key, value, values):
|
||||
error_msg = (
|
||||
f"For the Private Dict key '{key}', the default font value list:"
|
||||
f"\n\t{value}\nhad a different number of values than a region font:"
|
||||
)
|
||||
for region_value in values:
|
||||
error_msg += f"\n\t{region_value}"
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFPointTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of point type differences."""
|
||||
|
||||
def __init__(self, point_type, pt_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{point_type}' at point index {pt_index} in "
|
||||
f"master index {m_index} differs from the default font point type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VarLibCFFHintTypeMergeError(VarLibCFFMergeError):
|
||||
"""Raised when a CFF glyph cannot be merged because of hint type differences."""
|
||||
|
||||
def __init__(self, hint_type, cmd_index, m_index, default_type, glyph_name):
|
||||
error_msg = (
|
||||
f"Glyph '{glyph_name}': '{hint_type}' at index {cmd_index} in "
|
||||
f"master index {m_index} differs from the default font hint type "
|
||||
f"'{default_type}'"
|
||||
)
|
||||
self.args = (error_msg,)
|
||||
|
||||
|
||||
class VariationModelError(VarLibError):
|
||||
"""Raised when a variation model is faulty."""
|
1097
venv/Lib/site-packages/fontTools/varLib/merger.py
Normal file
1097
venv/Lib/site-packages/fontTools/varLib/merger.py
Normal file
File diff suppressed because it is too large
Load Diff
530
venv/Lib/site-packages/fontTools/varLib/models.py
Normal file
530
venv/Lib/site-packages/fontTools/varLib/models.py
Normal file
@@ -0,0 +1,530 @@
|
||||
"""Variation fonts interpolation models."""
|
||||
|
||||
__all__ = [
|
||||
"nonNone",
|
||||
"allNone",
|
||||
"allEqual",
|
||||
"allEqualTo",
|
||||
"subList",
|
||||
"normalizeValue",
|
||||
"normalizeLocation",
|
||||
"supportScalar",
|
||||
"VariationModel",
|
||||
]
|
||||
|
||||
from fontTools.misc.roundTools import noRound
|
||||
from .errors import VariationModelError
|
||||
|
||||
|
||||
def nonNone(lst):
|
||||
return [l for l in lst if l is not None]
|
||||
|
||||
|
||||
def allNone(lst):
|
||||
return all(l is None for l in lst)
|
||||
|
||||
|
||||
def allEqualTo(ref, lst, mapper=None):
|
||||
if mapper is None:
|
||||
return all(ref == item for item in lst)
|
||||
|
||||
mapped = mapper(ref)
|
||||
return all(mapped == mapper(item) for item in lst)
|
||||
|
||||
|
||||
def allEqual(lst, mapper=None):
|
||||
if not lst:
|
||||
return True
|
||||
it = iter(lst)
|
||||
try:
|
||||
first = next(it)
|
||||
except StopIteration:
|
||||
return True
|
||||
return allEqualTo(first, it, mapper=mapper)
|
||||
|
||||
|
||||
def subList(truth, lst):
|
||||
assert len(truth) == len(lst)
|
||||
return [l for l, t in zip(lst, truth) if t]
|
||||
|
||||
|
||||
def normalizeValue(v, triple):
|
||||
"""Normalizes value based on a min/default/max triple.
|
||||
>>> normalizeValue(400, (100, 400, 900))
|
||||
0.0
|
||||
>>> normalizeValue(100, (100, 400, 900))
|
||||
-1.0
|
||||
>>> normalizeValue(650, (100, 400, 900))
|
||||
0.5
|
||||
"""
|
||||
lower, default, upper = triple
|
||||
if not (lower <= default <= upper):
|
||||
raise ValueError(
|
||||
f"Invalid axis values, must be minimum, default, maximum: "
|
||||
f"{lower:3.3f}, {default:3.3f}, {upper:3.3f}"
|
||||
)
|
||||
v = max(min(v, upper), lower)
|
||||
if v == default:
|
||||
v = 0.0
|
||||
elif v < default:
|
||||
v = (v - default) / (default - lower)
|
||||
else:
|
||||
v = (v - default) / (upper - default)
|
||||
return v
|
||||
|
||||
|
||||
def normalizeLocation(location, axes):
|
||||
"""Normalizes location based on axis min/default/max values from axes.
|
||||
>>> axes = {"wght": (100, 400, 900)}
|
||||
>>> normalizeLocation({"wght": 400}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 100}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 900}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 650}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> axes = {"wght": (0, 0, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': 0.5}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 1.0}
|
||||
>>> axes = {"wght": (0, 1000, 1000)}
|
||||
>>> normalizeLocation({"wght": 0}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": -1}, axes)
|
||||
{'wght': -1.0}
|
||||
>>> normalizeLocation({"wght": 500}, axes)
|
||||
{'wght': -0.5}
|
||||
>>> normalizeLocation({"wght": 1000}, axes)
|
||||
{'wght': 0.0}
|
||||
>>> normalizeLocation({"wght": 1001}, axes)
|
||||
{'wght': 0.0}
|
||||
"""
|
||||
out = {}
|
||||
for tag, triple in axes.items():
|
||||
v = location.get(tag, triple[1])
|
||||
out[tag] = normalizeValue(v, triple)
|
||||
return out
|
||||
|
||||
|
||||
def supportScalar(location, support, ot=True):
|
||||
"""Returns the scalar multiplier at location, for a master
|
||||
with support. If ot is True, then a peak value of zero
|
||||
for support of an axis means "axis does not participate". That
|
||||
is how OpenType Variation Font technology works.
|
||||
>>> supportScalar({}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {})
|
||||
1.0
|
||||
>>> supportScalar({'wght':.2}, {'wght':(0,2,3)})
|
||||
0.1
|
||||
>>> supportScalar({'wght':2.5}, {'wght':(0,2,4)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)}, ot=False)
|
||||
0.375
|
||||
>>> supportScalar({'wght':2.5, 'wdth':0}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
>>> supportScalar({'wght':2.5, 'wdth':.5}, {'wght':(0,2,4), 'wdth':(-1,0,+1)})
|
||||
0.75
|
||||
"""
|
||||
scalar = 1.0
|
||||
for axis, (lower, peak, upper) in support.items():
|
||||
if ot:
|
||||
# OpenType-specific case handling
|
||||
if peak == 0.0:
|
||||
continue
|
||||
if lower > peak or peak > upper:
|
||||
continue
|
||||
if lower < 0.0 and upper > 0.0:
|
||||
continue
|
||||
v = location.get(axis, 0.0)
|
||||
else:
|
||||
assert axis in location
|
||||
v = location[axis]
|
||||
if v == peak:
|
||||
continue
|
||||
if v <= lower or upper <= v:
|
||||
scalar = 0.0
|
||||
break
|
||||
if v < peak:
|
||||
scalar *= (v - lower) / (peak - lower)
|
||||
else: # v > peak
|
||||
scalar *= (v - upper) / (peak - upper)
|
||||
return scalar
|
||||
|
||||
|
||||
class VariationModel(object):
|
||||
|
||||
"""
|
||||
Locations must be in normalized space. Ie. base master
|
||||
is at origin (0)::
|
||||
|
||||
>>> from pprint import pprint
|
||||
>>> locations = [ \
|
||||
{'wght':100}, \
|
||||
{'wght':-100}, \
|
||||
{'wght':-180}, \
|
||||
{'wdth':+.3}, \
|
||||
{'wght':+120,'wdth':.3}, \
|
||||
{'wght':+120,'wdth':.2}, \
|
||||
{}, \
|
||||
{'wght':+180,'wdth':.3}, \
|
||||
{'wght':+180}, \
|
||||
]
|
||||
>>> model = VariationModel(locations, axisOrder=['wght'])
|
||||
>>> pprint(model.locations)
|
||||
[{},
|
||||
{'wght': -100},
|
||||
{'wght': -180},
|
||||
{'wght': 100},
|
||||
{'wght': 180},
|
||||
{'wdth': 0.3},
|
||||
{'wdth': 0.3, 'wght': 180},
|
||||
{'wdth': 0.3, 'wght': 120},
|
||||
{'wdth': 0.2, 'wght': 120}]
|
||||
>>> pprint(model.deltaWeights)
|
||||
[{},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0},
|
||||
{0: 1.0, 4: 1.0, 5: 1.0},
|
||||
{0: 1.0, 3: 0.75, 4: 0.25, 5: 1.0, 6: 0.6666666666666666},
|
||||
{0: 1.0,
|
||||
3: 0.75,
|
||||
4: 0.25,
|
||||
5: 0.6666666666666667,
|
||||
6: 0.4444444444444445,
|
||||
7: 0.6666666666666667}]
|
||||
"""
|
||||
|
||||
def __init__(self, locations, axisOrder=None):
|
||||
if len(set(tuple(sorted(l.items())) for l in locations)) != len(locations):
|
||||
raise VariationModelError("Locations must be unique.")
|
||||
|
||||
self.origLocations = locations
|
||||
self.axisOrder = axisOrder if axisOrder is not None else []
|
||||
|
||||
locations = [{k: v for k, v in loc.items() if v != 0.0} for loc in locations]
|
||||
keyFunc = self.getMasterLocationsSortKeyFunc(
|
||||
locations, axisOrder=self.axisOrder
|
||||
)
|
||||
self.locations = sorted(locations, key=keyFunc)
|
||||
|
||||
# Mapping from user's master order to our master order
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
|
||||
self._computeMasterSupports()
|
||||
self._subModels = {}
|
||||
|
||||
def getSubModel(self, items):
|
||||
if None not in items:
|
||||
return self, items
|
||||
key = tuple(v is not None for v in items)
|
||||
subModel = self._subModels.get(key)
|
||||
if subModel is None:
|
||||
subModel = VariationModel(subList(key, self.origLocations), self.axisOrder)
|
||||
self._subModels[key] = subModel
|
||||
return subModel, subList(key, items)
|
||||
|
||||
@staticmethod
|
||||
def getMasterLocationsSortKeyFunc(locations, axisOrder=[]):
|
||||
if {} not in locations:
|
||||
raise VariationModelError("Base master not found.")
|
||||
axisPoints = {}
|
||||
for loc in locations:
|
||||
if len(loc) != 1:
|
||||
continue
|
||||
axis = next(iter(loc))
|
||||
value = loc[axis]
|
||||
if axis not in axisPoints:
|
||||
axisPoints[axis] = {0.0}
|
||||
assert (
|
||||
value not in axisPoints[axis]
|
||||
), 'Value "%s" in axisPoints["%s"] --> %s' % (value, axis, axisPoints)
|
||||
axisPoints[axis].add(value)
|
||||
|
||||
def getKey(axisPoints, axisOrder):
|
||||
def sign(v):
|
||||
return -1 if v < 0 else +1 if v > 0 else 0
|
||||
|
||||
def key(loc):
|
||||
rank = len(loc)
|
||||
onPointAxes = [
|
||||
axis
|
||||
for axis, value in loc.items()
|
||||
if axis in axisPoints and value in axisPoints[axis]
|
||||
]
|
||||
orderedAxes = [axis for axis in axisOrder if axis in loc]
|
||||
orderedAxes.extend(
|
||||
[axis for axis in sorted(loc.keys()) if axis not in axisOrder]
|
||||
)
|
||||
return (
|
||||
rank, # First, order by increasing rank
|
||||
-len(onPointAxes), # Next, by decreasing number of onPoint axes
|
||||
tuple(
|
||||
axisOrder.index(axis) if axis in axisOrder else 0x10000
|
||||
for axis in orderedAxes
|
||||
), # Next, by known axes
|
||||
tuple(orderedAxes), # Next, by all axes
|
||||
tuple(
|
||||
sign(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by signs of axis values
|
||||
tuple(
|
||||
abs(loc[axis]) for axis in orderedAxes
|
||||
), # Next, by absolute value of axis values
|
||||
)
|
||||
|
||||
return key
|
||||
|
||||
ret = getKey(axisPoints, axisOrder)
|
||||
return ret
|
||||
|
||||
def reorderMasters(self, master_list, mapping):
|
||||
# For changing the master data order without
|
||||
# recomputing supports and deltaWeights.
|
||||
new_list = [master_list[idx] for idx in mapping]
|
||||
self.origLocations = [self.origLocations[idx] for idx in mapping]
|
||||
locations = [
|
||||
{k: v for k, v in loc.items() if v != 0.0} for loc in self.origLocations
|
||||
]
|
||||
self.mapping = [self.locations.index(l) for l in locations]
|
||||
self.reverseMapping = [locations.index(l) for l in self.locations]
|
||||
self._subModels = {}
|
||||
return new_list
|
||||
|
||||
def _computeMasterSupports(self):
|
||||
self.supports = []
|
||||
regions = self._locationsToRegions()
|
||||
for i, region in enumerate(regions):
|
||||
locAxes = set(region.keys())
|
||||
# Walk over previous masters now
|
||||
for prev_region in regions[:i]:
|
||||
# Master with extra axes do not participte
|
||||
if not set(prev_region.keys()).issubset(locAxes):
|
||||
continue
|
||||
# If it's NOT in the current box, it does not participate
|
||||
relevant = True
|
||||
for axis, (lower, peak, upper) in region.items():
|
||||
if axis not in prev_region or not (
|
||||
prev_region[axis][1] == peak
|
||||
or lower < prev_region[axis][1] < upper
|
||||
):
|
||||
relevant = False
|
||||
break
|
||||
if not relevant:
|
||||
continue
|
||||
|
||||
# Split the box for new master; split in whatever direction
|
||||
# that has largest range ratio.
|
||||
#
|
||||
# For symmetry, we actually cut across multiple axes
|
||||
# if they have the largest, equal, ratio.
|
||||
# https://github.com/fonttools/fonttools/commit/7ee81c8821671157968b097f3e55309a1faa511e#commitcomment-31054804
|
||||
|
||||
bestAxes = {}
|
||||
bestRatio = -1
|
||||
for axis in prev_region.keys():
|
||||
val = prev_region[axis][1]
|
||||
assert axis in region
|
||||
lower, locV, upper = region[axis]
|
||||
newLower, newUpper = lower, upper
|
||||
if val < locV:
|
||||
newLower = val
|
||||
ratio = (val - locV) / (lower - locV)
|
||||
elif locV < val:
|
||||
newUpper = val
|
||||
ratio = (val - locV) / (upper - locV)
|
||||
else: # val == locV
|
||||
# Can't split box in this direction.
|
||||
continue
|
||||
if ratio > bestRatio:
|
||||
bestAxes = {}
|
||||
bestRatio = ratio
|
||||
if ratio == bestRatio:
|
||||
bestAxes[axis] = (newLower, locV, newUpper)
|
||||
|
||||
for axis, triple in bestAxes.items():
|
||||
region[axis] = triple
|
||||
self.supports.append(region)
|
||||
self._computeDeltaWeights()
|
||||
|
||||
def _locationsToRegions(self):
|
||||
locations = self.locations
|
||||
# Compute min/max across each axis, use it as total range.
|
||||
# TODO Take this as input from outside?
|
||||
minV = {}
|
||||
maxV = {}
|
||||
for l in locations:
|
||||
for k, v in l.items():
|
||||
minV[k] = min(v, minV.get(k, v))
|
||||
maxV[k] = max(v, maxV.get(k, v))
|
||||
|
||||
regions = []
|
||||
for loc in locations:
|
||||
region = {}
|
||||
for axis, locV in loc.items():
|
||||
if locV > 0:
|
||||
region[axis] = (0, locV, maxV[axis])
|
||||
else:
|
||||
region[axis] = (minV[axis], locV, 0)
|
||||
regions.append(region)
|
||||
return regions
|
||||
|
||||
def _computeDeltaWeights(self):
|
||||
self.deltaWeights = []
|
||||
for i, loc in enumerate(self.locations):
|
||||
deltaWeight = {}
|
||||
# Walk over previous masters now, populate deltaWeight
|
||||
for j, support in enumerate(self.supports[:i]):
|
||||
scalar = supportScalar(loc, support)
|
||||
if scalar:
|
||||
deltaWeight[j] = scalar
|
||||
self.deltaWeights.append(deltaWeight)
|
||||
|
||||
def getDeltas(self, masterValues, *, round=noRound):
|
||||
assert len(masterValues) == len(self.deltaWeights)
|
||||
mapping = self.reverseMapping
|
||||
out = []
|
||||
for i, weights in enumerate(self.deltaWeights):
|
||||
delta = masterValues[mapping[i]]
|
||||
for j, weight in weights.items():
|
||||
if weight == 1:
|
||||
delta -= out[j]
|
||||
else:
|
||||
delta -= out[j] * weight
|
||||
out.append(round(delta))
|
||||
return out
|
||||
|
||||
def getDeltasAndSupports(self, items, *, round=noRound):
|
||||
model, items = self.getSubModel(items)
|
||||
return model.getDeltas(items, round=round), model.supports
|
||||
|
||||
def getScalars(self, loc):
|
||||
return [supportScalar(loc, support) for support in self.supports]
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
v = None
|
||||
assert len(deltas) == len(scalars)
|
||||
for delta, scalar in zip(deltas, scalars):
|
||||
if not scalar:
|
||||
continue
|
||||
contribution = delta * scalar
|
||||
if v is None:
|
||||
v = contribution
|
||||
else:
|
||||
v += contribution
|
||||
return v
|
||||
|
||||
def interpolateFromDeltas(self, loc, deltas):
|
||||
scalars = self.getScalars(loc)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromMasters(self, loc, masterValues, *, round=noRound):
|
||||
deltas = self.getDeltas(masterValues, round=round)
|
||||
return self.interpolateFromDeltas(loc, deltas)
|
||||
|
||||
def interpolateFromMastersAndScalars(self, masterValues, scalars, *, round=noRound):
|
||||
deltas = self.getDeltas(masterValues, round=round)
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
def piecewiseLinearMap(v, mapping):
|
||||
keys = mapping.keys()
|
||||
if not keys:
|
||||
return v
|
||||
if v in keys:
|
||||
return mapping[v]
|
||||
k = min(keys)
|
||||
if v < k:
|
||||
return v + mapping[k] - k
|
||||
k = max(keys)
|
||||
if v > k:
|
||||
return v + mapping[k] - k
|
||||
# Interpolate
|
||||
a = max(k for k in keys if k < v)
|
||||
b = min(k for k in keys if k > v)
|
||||
va = mapping[a]
|
||||
vb = mapping[b]
|
||||
return va + (vb - va) * (v - a) / (b - a)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Normalize locations on a given designspace"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.models",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--loglevel",
|
||||
metavar="LEVEL",
|
||||
default="INFO",
|
||||
help="Logging level (defaults to INFO)",
|
||||
)
|
||||
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("-d", "--designspace", metavar="DESIGNSPACE", type=str)
|
||||
group.add_argument(
|
||||
"-l",
|
||||
"--locations",
|
||||
metavar="LOCATION",
|
||||
nargs="+",
|
||||
help="Master locations as comma-separate coordinates. One must be all zeros.",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
configLogger(level=args.loglevel)
|
||||
from pprint import pprint
|
||||
|
||||
if args.designspace:
|
||||
from fontTools.designspaceLib import DesignSpaceDocument
|
||||
|
||||
doc = DesignSpaceDocument()
|
||||
doc.read(args.designspace)
|
||||
locs = [s.location for s in doc.sources]
|
||||
print("Original locations:")
|
||||
pprint(locs)
|
||||
doc.normalize()
|
||||
print("Normalized locations:")
|
||||
locs = [s.location for s in doc.sources]
|
||||
pprint(locs)
|
||||
else:
|
||||
axes = [chr(c) for c in range(ord("A"), ord("Z") + 1)]
|
||||
locs = [
|
||||
dict(zip(axes, (float(v) for v in s.split(",")))) for s in args.locations
|
||||
]
|
||||
|
||||
model = VariationModel(locs)
|
||||
print("Sorted locations:")
|
||||
pprint(model.locations)
|
||||
print("Supports:")
|
||||
pprint(model.supports)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
461
venv/Lib/site-packages/fontTools/varLib/mutator.py
Normal file
461
venv/Lib/site-packages/fontTools/varLib/mutator.py
Normal file
@@ -0,0 +1,461 @@
|
||||
"""
|
||||
Instantiate a variation font. Run, eg:
|
||||
|
||||
$ fonttools varLib.mutator ./NotoSansArabic-VF.ttf wght=140 wdth=85
|
||||
"""
|
||||
from fontTools.misc.fixedTools import floatToFixedToFloat, floatToFixed
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.pens.boundsPen import BoundsPen
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.ttLib.tables import ttProgram
|
||||
from fontTools.ttLib.tables._g_l_y_f import GlyphCoordinates, flagOverlapSimple, OVERLAP_COMPOUND
|
||||
from fontTools.varLib.models import (
|
||||
supportScalar,
|
||||
normalizeLocation,
|
||||
piecewiseLinearMap,
|
||||
)
|
||||
from fontTools.varLib.merger import MutatorMerger
|
||||
from fontTools.varLib.varStore import VarStoreInstancer
|
||||
from fontTools.varLib.mvar import MVAR_ENTRIES
|
||||
from fontTools.varLib.iup import iup_delta
|
||||
import fontTools.subset.cff
|
||||
import os.path
|
||||
import logging
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.varlib.mutator")
|
||||
|
||||
# map 'wdth' axis (1..200) to OS/2.usWidthClass (1..9), rounding to closest
|
||||
OS2_WIDTH_CLASS_VALUES = {}
|
||||
percents = [50.0, 62.5, 75.0, 87.5, 100.0, 112.5, 125.0, 150.0, 200.0]
|
||||
for i, (prev, curr) in enumerate(zip(percents[:-1], percents[1:]), start=1):
|
||||
half = (prev + curr) / 2
|
||||
OS2_WIDTH_CLASS_VALUES[half] = i
|
||||
|
||||
|
||||
def interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas):
|
||||
pd_blend_lists = ("BlueValues", "OtherBlues", "FamilyBlues",
|
||||
"FamilyOtherBlues", "StemSnapH",
|
||||
"StemSnapV")
|
||||
pd_blend_values = ("BlueScale", "BlueShift",
|
||||
"BlueFuzz", "StdHW", "StdVW")
|
||||
for fontDict in topDict.FDArray:
|
||||
pd = fontDict.Private
|
||||
vsindex = pd.vsindex if (hasattr(pd, 'vsindex')) else 0
|
||||
for key, value in pd.rawDict.items():
|
||||
if (key in pd_blend_values) and isinstance(value, list):
|
||||
delta = interpolateFromDeltas(vsindex, value[1:])
|
||||
pd.rawDict[key] = otRound(value[0] + delta)
|
||||
elif (key in pd_blend_lists) and isinstance(value[0], list):
|
||||
"""If any argument in a BlueValues list is a blend list,
|
||||
then they all are. The first value of each list is an
|
||||
absolute value. The delta tuples are calculated from
|
||||
relative master values, hence we need to append all the
|
||||
deltas to date to each successive absolute value."""
|
||||
delta = 0
|
||||
for i, val_list in enumerate(value):
|
||||
delta += otRound(interpolateFromDeltas(vsindex,
|
||||
val_list[1:]))
|
||||
value[i] = val_list[0] + delta
|
||||
|
||||
|
||||
def interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder):
|
||||
charstrings = topDict.CharStrings
|
||||
for gname in glyphOrder:
|
||||
# Interpolate charstring
|
||||
# e.g replace blend op args with regular args,
|
||||
# and use and discard vsindex op.
|
||||
charstring = charstrings[gname]
|
||||
new_program = []
|
||||
vsindex = 0
|
||||
last_i = 0
|
||||
for i, token in enumerate(charstring.program):
|
||||
if token == 'vsindex':
|
||||
vsindex = charstring.program[i - 1]
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i:i - 1])
|
||||
last_i = i + 1
|
||||
elif token == 'blend':
|
||||
num_regions = charstring.getNumRegions(vsindex)
|
||||
numMasters = 1 + num_regions
|
||||
num_args = charstring.program[i - 1]
|
||||
# The program list starting at program[i] is now:
|
||||
# ..args for following operations
|
||||
# num_args values from the default font
|
||||
# num_args tuples, each with numMasters-1 delta values
|
||||
# num_blend_args
|
||||
# 'blend'
|
||||
argi = i - (num_args * numMasters + 1)
|
||||
end_args = tuplei = argi + num_args
|
||||
while argi < end_args:
|
||||
next_ti = tuplei + num_regions
|
||||
deltas = charstring.program[tuplei:next_ti]
|
||||
delta = interpolateFromDeltas(vsindex, deltas)
|
||||
charstring.program[argi] += otRound(delta)
|
||||
tuplei = next_ti
|
||||
argi += 1
|
||||
new_program.extend(charstring.program[last_i:end_args])
|
||||
last_i = i + 1
|
||||
if last_i != 0:
|
||||
new_program.extend(charstring.program[last_i:])
|
||||
charstring.program = new_program
|
||||
|
||||
|
||||
def interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc):
|
||||
"""Unlike TrueType glyphs, neither advance width nor bounding box
|
||||
info is stored in a CFF2 charstring. The width data exists only in
|
||||
the hmtx and HVAR tables. Since LSB data cannot be interpolated
|
||||
reliably from the master LSB values in the hmtx table, we traverse
|
||||
the charstring to determine the actual bound box. """
|
||||
|
||||
charstrings = topDict.CharStrings
|
||||
boundsPen = BoundsPen(glyphOrder)
|
||||
hmtx = varfont['hmtx']
|
||||
hvar_table = None
|
||||
if 'HVAR' in varfont:
|
||||
hvar_table = varfont['HVAR'].table
|
||||
fvar = varfont['fvar']
|
||||
varStoreInstancer = VarStoreInstancer(hvar_table.VarStore, fvar.axes, loc)
|
||||
|
||||
for gid, gname in enumerate(glyphOrder):
|
||||
entry = list(hmtx[gname])
|
||||
# get width delta.
|
||||
if hvar_table:
|
||||
if hvar_table.AdvWidthMap:
|
||||
width_idx = hvar_table.AdvWidthMap.mapping[gname]
|
||||
else:
|
||||
width_idx = gid
|
||||
width_delta = otRound(varStoreInstancer[width_idx])
|
||||
else:
|
||||
width_delta = 0
|
||||
|
||||
# get LSB.
|
||||
boundsPen.init()
|
||||
charstring = charstrings[gname]
|
||||
charstring.draw(boundsPen)
|
||||
if boundsPen.bounds is None:
|
||||
# Happens with non-marking glyphs
|
||||
lsb_delta = 0
|
||||
else:
|
||||
lsb = otRound(boundsPen.bounds[0])
|
||||
lsb_delta = entry[1] - lsb
|
||||
|
||||
if lsb_delta or width_delta:
|
||||
if width_delta:
|
||||
entry[0] += width_delta
|
||||
if lsb_delta:
|
||||
entry[1] = lsb
|
||||
hmtx[gname] = tuple(entry)
|
||||
|
||||
|
||||
def instantiateVariableFont(varfont, location, inplace=False, overlap=True):
|
||||
""" Generate a static instance from a variable TTFont and a dictionary
|
||||
defining the desired location along the variable font's axes.
|
||||
The location values must be specified as user-space coordinates, e.g.:
|
||||
|
||||
{'wght': 400, 'wdth': 100}
|
||||
|
||||
By default, a new TTFont object is returned. If ``inplace`` is True, the
|
||||
input varfont is modified and reduced to a static font.
|
||||
|
||||
When the overlap parameter is defined as True,
|
||||
OVERLAP_SIMPLE and OVERLAP_COMPOUND bits are set to 1. See
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/glyf
|
||||
"""
|
||||
if not inplace:
|
||||
# make a copy to leave input varfont unmodified
|
||||
stream = BytesIO()
|
||||
varfont.save(stream)
|
||||
stream.seek(0)
|
||||
varfont = TTFont(stream)
|
||||
|
||||
fvar = varfont['fvar']
|
||||
axes = {a.axisTag:(a.minValue,a.defaultValue,a.maxValue) for a in fvar.axes}
|
||||
loc = normalizeLocation(location, axes)
|
||||
if 'avar' in varfont:
|
||||
maps = varfont['avar'].segments
|
||||
loc = {k: piecewiseLinearMap(v, maps[k]) for k,v in loc.items()}
|
||||
# Quantize to F2Dot14, to avoid surprise interpolations.
|
||||
loc = {k:floatToFixedToFloat(v, 14) for k,v in loc.items()}
|
||||
# Location is normalized now
|
||||
log.info("Normalized location: %s", loc)
|
||||
|
||||
if 'gvar' in varfont:
|
||||
log.info("Mutating glyf/gvar tables")
|
||||
gvar = varfont['gvar']
|
||||
glyf = varfont['glyf']
|
||||
hMetrics = varfont['hmtx'].metrics
|
||||
vMetrics = getattr(varfont.get('vmtx'), 'metrics', None)
|
||||
# get list of glyph names in gvar sorted by component depth
|
||||
glyphnames = sorted(
|
||||
gvar.variations.keys(),
|
||||
key=lambda name: (
|
||||
glyf[name].getCompositeMaxpValues(glyf).maxComponentDepth
|
||||
if glyf[name].isComposite() else 0,
|
||||
name))
|
||||
for glyphname in glyphnames:
|
||||
variations = gvar.variations[glyphname]
|
||||
coordinates, _ = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
|
||||
origCoords, endPts = None, None
|
||||
for var in variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar: continue
|
||||
delta = var.coordinates
|
||||
if None in delta:
|
||||
if origCoords is None:
|
||||
origCoords, g = glyf._getCoordinatesAndControls(glyphname, hMetrics, vMetrics)
|
||||
delta = iup_delta(delta, origCoords, g.endPts)
|
||||
coordinates += GlyphCoordinates(delta) * scalar
|
||||
glyf._setCoordinates(glyphname, coordinates, hMetrics, vMetrics)
|
||||
else:
|
||||
glyf = None
|
||||
|
||||
if 'cvar' in varfont:
|
||||
log.info("Mutating cvt/cvar tables")
|
||||
cvar = varfont['cvar']
|
||||
cvt = varfont['cvt ']
|
||||
deltas = {}
|
||||
for var in cvar.variations:
|
||||
scalar = supportScalar(loc, var.axes)
|
||||
if not scalar: continue
|
||||
for i, c in enumerate(var.coordinates):
|
||||
if c is not None:
|
||||
deltas[i] = deltas.get(i, 0) + scalar * c
|
||||
for i, delta in deltas.items():
|
||||
cvt[i] += otRound(delta)
|
||||
|
||||
if 'CFF2' in varfont:
|
||||
log.info("Mutating CFF2 table")
|
||||
glyphOrder = varfont.getGlyphOrder()
|
||||
CFF2 = varfont['CFF2']
|
||||
topDict = CFF2.cff.topDictIndex[0]
|
||||
vsInstancer = VarStoreInstancer(topDict.VarStore.otVarStore, fvar.axes, loc)
|
||||
interpolateFromDeltas = vsInstancer.interpolateFromDeltas
|
||||
interpolate_cff2_PrivateDict(topDict, interpolateFromDeltas)
|
||||
CFF2.desubroutinize()
|
||||
interpolate_cff2_charstrings(topDict, interpolateFromDeltas, glyphOrder)
|
||||
interpolate_cff2_metrics(varfont, topDict, glyphOrder, loc)
|
||||
del topDict.rawDict['VarStore']
|
||||
del topDict.VarStore
|
||||
|
||||
if 'MVAR' in varfont:
|
||||
log.info("Mutating MVAR table")
|
||||
mvar = varfont['MVAR'].table
|
||||
varStoreInstancer = VarStoreInstancer(mvar.VarStore, fvar.axes, loc)
|
||||
records = mvar.ValueRecord
|
||||
for rec in records:
|
||||
mvarTag = rec.ValueTag
|
||||
if mvarTag not in MVAR_ENTRIES:
|
||||
continue
|
||||
tableTag, itemName = MVAR_ENTRIES[mvarTag]
|
||||
delta = otRound(varStoreInstancer[rec.VarIdx])
|
||||
if not delta:
|
||||
continue
|
||||
setattr(varfont[tableTag], itemName,
|
||||
getattr(varfont[tableTag], itemName) + delta)
|
||||
|
||||
log.info("Mutating FeatureVariations")
|
||||
for tableTag in 'GSUB','GPOS':
|
||||
if not tableTag in varfont:
|
||||
continue
|
||||
table = varfont[tableTag].table
|
||||
if not getattr(table, 'FeatureVariations', None):
|
||||
continue
|
||||
variations = table.FeatureVariations
|
||||
for record in variations.FeatureVariationRecord:
|
||||
applies = True
|
||||
for condition in record.ConditionSet.ConditionTable:
|
||||
if condition.Format == 1:
|
||||
axisIdx = condition.AxisIndex
|
||||
axisTag = fvar.axes[axisIdx].axisTag
|
||||
Min = condition.FilterRangeMinValue
|
||||
Max = condition.FilterRangeMaxValue
|
||||
v = loc[axisTag]
|
||||
if not (Min <= v <= Max):
|
||||
applies = False
|
||||
else:
|
||||
applies = False
|
||||
if not applies:
|
||||
break
|
||||
|
||||
if applies:
|
||||
assert record.FeatureTableSubstitution.Version == 0x00010000
|
||||
for rec in record.FeatureTableSubstitution.SubstitutionRecord:
|
||||
table.FeatureList.FeatureRecord[rec.FeatureIndex].Feature = rec.Feature
|
||||
break
|
||||
del table.FeatureVariations
|
||||
|
||||
if 'GDEF' in varfont and varfont['GDEF'].table.Version >= 0x00010003:
|
||||
log.info("Mutating GDEF/GPOS/GSUB tables")
|
||||
gdef = varfont['GDEF'].table
|
||||
instancer = VarStoreInstancer(gdef.VarStore, fvar.axes, loc)
|
||||
|
||||
merger = MutatorMerger(varfont, instancer)
|
||||
merger.mergeTables(varfont, [varfont], ['GDEF', 'GPOS'])
|
||||
|
||||
# Downgrade GDEF.
|
||||
del gdef.VarStore
|
||||
gdef.Version = 0x00010002
|
||||
if gdef.MarkGlyphSetsDef is None:
|
||||
del gdef.MarkGlyphSetsDef
|
||||
gdef.Version = 0x00010000
|
||||
|
||||
if not (gdef.LigCaretList or
|
||||
gdef.MarkAttachClassDef or
|
||||
gdef.GlyphClassDef or
|
||||
gdef.AttachList or
|
||||
(gdef.Version >= 0x00010002 and gdef.MarkGlyphSetsDef)):
|
||||
del varfont['GDEF']
|
||||
|
||||
addidef = False
|
||||
if glyf:
|
||||
for glyph in glyf.glyphs.values():
|
||||
if hasattr(glyph, "program"):
|
||||
instructions = glyph.program.getAssembly()
|
||||
# If GETVARIATION opcode is used in bytecode of any glyph add IDEF
|
||||
addidef = any(op.startswith("GETVARIATION") for op in instructions)
|
||||
if addidef:
|
||||
break
|
||||
if overlap:
|
||||
for glyph_name in glyf.keys():
|
||||
glyph = glyf[glyph_name]
|
||||
# Set OVERLAP_COMPOUND bit for compound glyphs
|
||||
if glyph.isComposite():
|
||||
glyph.components[0].flags |= OVERLAP_COMPOUND
|
||||
# Set OVERLAP_SIMPLE bit for simple glyphs
|
||||
elif glyph.numberOfContours > 0:
|
||||
glyph.flags[0] |= flagOverlapSimple
|
||||
if addidef:
|
||||
log.info("Adding IDEF to fpgm table for GETVARIATION opcode")
|
||||
asm = []
|
||||
if 'fpgm' in varfont:
|
||||
fpgm = varfont['fpgm']
|
||||
asm = fpgm.program.getAssembly()
|
||||
else:
|
||||
fpgm = newTable('fpgm')
|
||||
fpgm.program = ttProgram.Program()
|
||||
varfont['fpgm'] = fpgm
|
||||
asm.append("PUSHB[000] 145")
|
||||
asm.append("IDEF[ ]")
|
||||
args = [str(len(loc))]
|
||||
for a in fvar.axes:
|
||||
args.append(str(floatToFixed(loc[a.axisTag], 14)))
|
||||
asm.append("NPUSHW[ ] " + ' '.join(args))
|
||||
asm.append("ENDF[ ]")
|
||||
fpgm.program.fromAssembly(asm)
|
||||
|
||||
# Change maxp attributes as IDEF is added
|
||||
if 'maxp' in varfont:
|
||||
maxp = varfont['maxp']
|
||||
setattr(maxp, "maxInstructionDefs", 1 + getattr(maxp, "maxInstructionDefs", 0))
|
||||
setattr(maxp, "maxStackElements", max(len(loc), getattr(maxp, "maxStackElements", 0)))
|
||||
|
||||
if 'name' in varfont:
|
||||
log.info("Pruning name table")
|
||||
exclude = {a.axisNameID for a in fvar.axes}
|
||||
for i in fvar.instances:
|
||||
exclude.add(i.subfamilyNameID)
|
||||
exclude.add(i.postscriptNameID)
|
||||
if 'ltag' in varfont:
|
||||
# Drop the whole 'ltag' table if all its language tags are referenced by
|
||||
# name records to be pruned.
|
||||
# TODO: prune unused ltag tags and re-enumerate langIDs accordingly
|
||||
excludedUnicodeLangIDs = [
|
||||
n.langID for n in varfont['name'].names
|
||||
if n.nameID in exclude and n.platformID == 0 and n.langID != 0xFFFF
|
||||
]
|
||||
if set(excludedUnicodeLangIDs) == set(range(len((varfont['ltag'].tags)))):
|
||||
del varfont['ltag']
|
||||
varfont['name'].names[:] = [
|
||||
n for n in varfont['name'].names
|
||||
if n.nameID not in exclude
|
||||
]
|
||||
|
||||
if "wght" in location and "OS/2" in varfont:
|
||||
varfont["OS/2"].usWeightClass = otRound(
|
||||
max(1, min(location["wght"], 1000))
|
||||
)
|
||||
if "wdth" in location:
|
||||
wdth = location["wdth"]
|
||||
for percent, widthClass in sorted(OS2_WIDTH_CLASS_VALUES.items()):
|
||||
if wdth < percent:
|
||||
varfont["OS/2"].usWidthClass = widthClass
|
||||
break
|
||||
else:
|
||||
varfont["OS/2"].usWidthClass = 9
|
||||
if "slnt" in location and "post" in varfont:
|
||||
varfont["post"].italicAngle = max(-90, min(location["slnt"], 90))
|
||||
|
||||
log.info("Removing variable tables")
|
||||
for tag in ('avar','cvar','fvar','gvar','HVAR','MVAR','VVAR','STAT'):
|
||||
if tag in varfont:
|
||||
del varfont[tag]
|
||||
|
||||
return varfont
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Instantiate a variation font"""
|
||||
from fontTools import configLogger
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools varLib.mutator", description="Instantiate a variable font")
|
||||
parser.add_argument(
|
||||
"input", metavar="INPUT.ttf", help="Input variable TTF file.")
|
||||
parser.add_argument(
|
||||
"locargs", metavar="AXIS=LOC", nargs="*",
|
||||
help="List of space separated locations. A location consist in "
|
||||
"the name of a variation axis, followed by '=' and a number. E.g.: "
|
||||
" wght=700 wdth=80. The default is the location of the base master.")
|
||||
parser.add_argument(
|
||||
"-o", "--output", metavar="OUTPUT.ttf", default=None,
|
||||
help="Output instance TTF file (default: INPUT-instance.ttf).")
|
||||
logging_group = parser.add_mutually_exclusive_group(required=False)
|
||||
logging_group.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely.")
|
||||
logging_group.add_argument(
|
||||
"-q", "--quiet", action="store_true", help="Turn verbosity off.")
|
||||
parser.add_argument(
|
||||
"--no-overlap",
|
||||
dest="overlap",
|
||||
action="store_false",
|
||||
help="Don't set OVERLAP_SIMPLE/OVERLAP_COMPOUND glyf flags."
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
varfilename = options.input
|
||||
outfile = (
|
||||
os.path.splitext(varfilename)[0] + '-instance.ttf'
|
||||
if not options.output else options.output)
|
||||
configLogger(level=(
|
||||
"DEBUG" if options.verbose else
|
||||
"ERROR" if options.quiet else
|
||||
"INFO"))
|
||||
|
||||
loc = {}
|
||||
for arg in options.locargs:
|
||||
try:
|
||||
tag, val = arg.split('=')
|
||||
assert len(tag) <= 4
|
||||
loc[tag.ljust(4)] = float(val)
|
||||
except (ValueError, AssertionError):
|
||||
parser.error("invalid location argument format: %r" % arg)
|
||||
log.info("Location: %s", loc)
|
||||
|
||||
log.info("Loading variable font")
|
||||
varfont = TTFont(varfilename)
|
||||
|
||||
instantiateVariableFont(varfont, loc, inplace=True, overlap=options.overlap)
|
||||
|
||||
log.info("Saving instance font %s", outfile)
|
||||
varfont.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
606
venv/Lib/site-packages/fontTools/varLib/varStore.py
Normal file
606
venv/Lib/site-packages/fontTools/varLib/varStore.py
Normal file
@@ -0,0 +1,606 @@
|
||||
from fontTools.misc.roundTools import noRound, otRound
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.varLib.models import supportScalar
|
||||
from fontTools.varLib.builder import (buildVarRegionList, buildVarStore,
|
||||
buildVarRegion, buildVarData)
|
||||
from functools import partial
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
def _getLocationKey(loc):
|
||||
return tuple(sorted(loc.items(), key=lambda kv: kv[0]))
|
||||
|
||||
|
||||
class OnlineVarStoreBuilder(object):
|
||||
|
||||
def __init__(self, axisTags):
|
||||
self._axisTags = axisTags
|
||||
self._regionMap = {}
|
||||
self._regionList = buildVarRegionList([], axisTags)
|
||||
self._store = buildVarStore(self._regionList, [])
|
||||
self._data = None
|
||||
self._model = None
|
||||
self._supports = None
|
||||
self._varDataIndices = {}
|
||||
self._varDataCaches = {}
|
||||
self._cache = {}
|
||||
|
||||
def setModel(self, model):
|
||||
self.setSupports(model.supports)
|
||||
self._model = model
|
||||
|
||||
def setSupports(self, supports):
|
||||
self._model = None
|
||||
self._supports = list(supports)
|
||||
if not self._supports[0]:
|
||||
del self._supports[0] # Drop base master support
|
||||
self._cache = {}
|
||||
self._data = None
|
||||
|
||||
def finish(self, optimize=True):
|
||||
self._regionList.RegionCount = len(self._regionList.Region)
|
||||
self._store.VarDataCount = len(self._store.VarData)
|
||||
for data in self._store.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
return self._store
|
||||
|
||||
def _add_VarData(self):
|
||||
regionMap = self._regionMap
|
||||
regionList = self._regionList
|
||||
|
||||
regions = self._supports
|
||||
regionIndices = []
|
||||
for region in regions:
|
||||
key = _getLocationKey(region)
|
||||
idx = regionMap.get(key)
|
||||
if idx is None:
|
||||
varRegion = buildVarRegion(region, self._axisTags)
|
||||
idx = regionMap[key] = len(regionList.Region)
|
||||
regionList.Region.append(varRegion)
|
||||
regionIndices.append(idx)
|
||||
|
||||
# Check if we have one already...
|
||||
key = tuple(regionIndices)
|
||||
varDataIdx = self._varDataIndices.get(key)
|
||||
if varDataIdx is not None:
|
||||
self._outer = varDataIdx
|
||||
self._data = self._store.VarData[varDataIdx]
|
||||
self._cache = self._varDataCaches[key]
|
||||
if len(self._data.Item) == 0xFFFF:
|
||||
# This is full. Need new one.
|
||||
varDataIdx = None
|
||||
|
||||
if varDataIdx is None:
|
||||
self._data = buildVarData(regionIndices, [], optimize=False)
|
||||
self._outer = len(self._store.VarData)
|
||||
self._store.VarData.append(self._data)
|
||||
self._varDataIndices[key] = self._outer
|
||||
if key not in self._varDataCaches:
|
||||
self._varDataCaches[key] = {}
|
||||
self._cache = self._varDataCaches[key]
|
||||
|
||||
|
||||
def storeMasters(self, master_values):
|
||||
deltas = self._model.getDeltas(master_values, round=round)
|
||||
base = deltas.pop(0)
|
||||
return base, self.storeDeltas(deltas, round=noRound)
|
||||
|
||||
def storeDeltas(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
if len(deltas) == len(self._supports) + 1:
|
||||
deltas = tuple(deltas[1:])
|
||||
else:
|
||||
assert len(deltas) == len(self._supports)
|
||||
deltas = tuple(deltas)
|
||||
|
||||
varIdx = self._cache.get(deltas)
|
||||
if varIdx is not None:
|
||||
return varIdx
|
||||
|
||||
if not self._data:
|
||||
self._add_VarData()
|
||||
inner = len(self._data.Item)
|
||||
if inner == 0xFFFF:
|
||||
# Full array. Start new one.
|
||||
self._add_VarData()
|
||||
return self.storeDeltas(deltas)
|
||||
self._data.addItem(deltas, round=noRound)
|
||||
|
||||
varIdx = (self._outer << 16) + inner
|
||||
self._cache[deltas] = varIdx
|
||||
return varIdx
|
||||
|
||||
def VarData_addItem(self, deltas, *, round=round):
|
||||
deltas = [round(d) for d in deltas]
|
||||
|
||||
countUs = self.VarRegionCount
|
||||
countThem = len(deltas)
|
||||
if countUs + 1 == countThem:
|
||||
deltas = tuple(deltas[1:])
|
||||
else:
|
||||
assert countUs == countThem, (countUs, countThem)
|
||||
deltas = tuple(deltas)
|
||||
self.Item.append(list(deltas))
|
||||
self.ItemCount = len(self.Item)
|
||||
|
||||
ot.VarData.addItem = VarData_addItem
|
||||
|
||||
def VarRegion_get_support(self, fvar_axes):
|
||||
return {
|
||||
fvar_axes[i].axisTag: (reg.StartCoord,reg.PeakCoord,reg.EndCoord)
|
||||
for i, reg in enumerate(self.VarRegionAxis)
|
||||
if reg.PeakCoord != 0
|
||||
}
|
||||
|
||||
ot.VarRegion.get_support = VarRegion_get_support
|
||||
|
||||
class VarStoreInstancer(object):
|
||||
|
||||
def __init__(self, varstore, fvar_axes, location={}):
|
||||
self.fvar_axes = fvar_axes
|
||||
assert varstore is None or varstore.Format == 1
|
||||
self._varData = varstore.VarData if varstore else []
|
||||
self._regions = varstore.VarRegionList.Region if varstore else []
|
||||
self.setLocation(location)
|
||||
|
||||
def setLocation(self, location):
|
||||
self.location = dict(location)
|
||||
self._clearCaches()
|
||||
|
||||
def _clearCaches(self):
|
||||
self._scalars = {}
|
||||
|
||||
def _getScalar(self, regionIdx):
|
||||
scalar = self._scalars.get(regionIdx)
|
||||
if scalar is None:
|
||||
support = self._regions[regionIdx].get_support(self.fvar_axes)
|
||||
scalar = supportScalar(self.location, support)
|
||||
self._scalars[regionIdx] = scalar
|
||||
return scalar
|
||||
|
||||
@staticmethod
|
||||
def interpolateFromDeltasAndScalars(deltas, scalars):
|
||||
delta = 0.
|
||||
for d,s in zip(deltas, scalars):
|
||||
if not s: continue
|
||||
delta += d * s
|
||||
return delta
|
||||
|
||||
def __getitem__(self, varidx):
|
||||
major, minor = varidx >> 16, varidx & 0xFFFF
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in varData[major].VarRegionIndex]
|
||||
deltas = varData[major].Item[minor]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
def interpolateFromDeltas(self, varDataIndex, deltas):
|
||||
varData = self._varData
|
||||
scalars = [self._getScalar(ri) for ri in
|
||||
varData[varDataIndex].VarRegionIndex]
|
||||
return self.interpolateFromDeltasAndScalars(deltas, scalars)
|
||||
|
||||
|
||||
#
|
||||
# Optimizations
|
||||
#
|
||||
# retainFirstMap - If true, major 0 mappings are retained. Deltas for unused indices are zeroed
|
||||
# advIdxes - Set of major 0 indices for advance deltas to be listed first. Other major 0 indices follow.
|
||||
|
||||
def VarStore_subset_varidxes(self, varIdxes, optimize=True, retainFirstMap=False, advIdxes=set()):
|
||||
|
||||
# Sort out used varIdxes by major/minor.
|
||||
used = {}
|
||||
for varIdx in varIdxes:
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
d = used.get(major)
|
||||
if d is None:
|
||||
d = used[major] = set()
|
||||
d.add(minor)
|
||||
del varIdxes
|
||||
|
||||
#
|
||||
# Subset VarData
|
||||
#
|
||||
|
||||
varData = self.VarData
|
||||
newVarData = []
|
||||
varDataMap = {}
|
||||
for major,data in enumerate(varData):
|
||||
usedMinors = used.get(major)
|
||||
if usedMinors is None:
|
||||
continue
|
||||
newMajor = len(newVarData)
|
||||
newVarData.append(data)
|
||||
|
||||
items = data.Item
|
||||
newItems = []
|
||||
if major == 0 and retainFirstMap:
|
||||
for minor in range(len(items)):
|
||||
newItems.append(items[minor] if minor in usedMinors else [0] * len(items[minor]))
|
||||
varDataMap[minor] = minor
|
||||
else:
|
||||
if major == 0:
|
||||
minors = sorted(advIdxes) + sorted(usedMinors - advIdxes)
|
||||
else:
|
||||
minors = sorted(usedMinors)
|
||||
for minor in minors:
|
||||
newMinor = len(newItems)
|
||||
newItems.append(items[minor])
|
||||
varDataMap[(major<<16)+minor] = (newMajor<<16)+newMinor
|
||||
|
||||
data.Item = newItems
|
||||
data.ItemCount = len(data.Item)
|
||||
|
||||
data.calculateNumShorts(optimize=optimize)
|
||||
|
||||
self.VarData = newVarData
|
||||
self.VarDataCount = len(self.VarData)
|
||||
|
||||
self.prune_regions()
|
||||
|
||||
return varDataMap
|
||||
|
||||
ot.VarStore.subset_varidxes = VarStore_subset_varidxes
|
||||
|
||||
def VarStore_prune_regions(self):
|
||||
"""Remove unused VarRegions."""
|
||||
#
|
||||
# Subset VarRegionList
|
||||
#
|
||||
|
||||
# Collect.
|
||||
usedRegions = set()
|
||||
for data in self.VarData:
|
||||
usedRegions.update(data.VarRegionIndex)
|
||||
# Subset.
|
||||
regionList = self.VarRegionList
|
||||
regions = regionList.Region
|
||||
newRegions = []
|
||||
regionMap = {}
|
||||
for i in sorted(usedRegions):
|
||||
regionMap[i] = len(newRegions)
|
||||
newRegions.append(regions[i])
|
||||
regionList.Region = newRegions
|
||||
regionList.RegionCount = len(regionList.Region)
|
||||
# Map.
|
||||
for data in self.VarData:
|
||||
data.VarRegionIndex = [regionMap[i] for i in data.VarRegionIndex]
|
||||
|
||||
ot.VarStore.prune_regions = VarStore_prune_regions
|
||||
|
||||
|
||||
def _visit(self, func):
|
||||
"""Recurse down from self, if type of an object is ot.Device,
|
||||
call func() on it. Works on otData-style classes."""
|
||||
|
||||
if type(self) == ot.Device:
|
||||
func(self)
|
||||
|
||||
elif isinstance(self, list):
|
||||
for that in self:
|
||||
_visit(that, func)
|
||||
|
||||
elif hasattr(self, 'getConverters') and not hasattr(self, 'postRead'):
|
||||
for conv in self.getConverters():
|
||||
that = getattr(self, conv.name, None)
|
||||
if that is not None:
|
||||
_visit(that, func)
|
||||
|
||||
elif isinstance(self, ot.ValueRecord):
|
||||
for that in self.__dict__.values():
|
||||
_visit(that, func)
|
||||
|
||||
def _Device_recordVarIdx(self, s):
|
||||
"""Add VarIdx in this Device table (if any) to the set s."""
|
||||
if self.DeltaFormat == 0x8000:
|
||||
s.add((self.StartSize<<16)+self.EndSize)
|
||||
|
||||
def Object_collect_device_varidxes(self, varidxes):
|
||||
adder = partial(_Device_recordVarIdx, s=varidxes)
|
||||
_visit(self, adder)
|
||||
|
||||
ot.GDEF.collect_device_varidxes = Object_collect_device_varidxes
|
||||
ot.GPOS.collect_device_varidxes = Object_collect_device_varidxes
|
||||
|
||||
def _Device_mapVarIdx(self, mapping, done):
|
||||
"""Map VarIdx in this Device table (if any) through mapping."""
|
||||
if id(self) in done:
|
||||
return
|
||||
done.add(id(self))
|
||||
if self.DeltaFormat == 0x8000:
|
||||
varIdx = mapping[(self.StartSize<<16)+self.EndSize]
|
||||
self.StartSize = varIdx >> 16
|
||||
self.EndSize = varIdx & 0xFFFF
|
||||
|
||||
def Object_remap_device_varidxes(self, varidxes_map):
|
||||
mapper = partial(_Device_mapVarIdx, mapping=varidxes_map, done=set())
|
||||
_visit(self, mapper)
|
||||
|
||||
ot.GDEF.remap_device_varidxes = Object_remap_device_varidxes
|
||||
ot.GPOS.remap_device_varidxes = Object_remap_device_varidxes
|
||||
|
||||
|
||||
class _Encoding(object):
|
||||
|
||||
def __init__(self, chars):
|
||||
self.chars = chars
|
||||
self.width = self._popcount(chars)
|
||||
self.overhead = self._characteristic_overhead(chars)
|
||||
self.items = set()
|
||||
|
||||
def append(self, row):
|
||||
self.items.add(row)
|
||||
|
||||
def extend(self, lst):
|
||||
self.items.update(lst)
|
||||
|
||||
def get_room(self):
|
||||
"""Maximum number of bytes that can be added to characteristic
|
||||
while still being beneficial to merge it into another one."""
|
||||
count = len(self.items)
|
||||
return max(0, (self.overhead - 1) // count - self.width)
|
||||
room = property(get_room)
|
||||
|
||||
@property
|
||||
def gain(self):
|
||||
"""Maximum possible byte gain from merging this into another
|
||||
characteristic."""
|
||||
count = len(self.items)
|
||||
return max(0, self.overhead - count * (self.width + 1))
|
||||
|
||||
def sort_key(self):
|
||||
return self.width, self.chars
|
||||
|
||||
def __len__(self):
|
||||
return len(self.items)
|
||||
|
||||
def can_encode(self, chars):
|
||||
return not (chars & ~self.chars)
|
||||
|
||||
def __sub__(self, other):
|
||||
return self._popcount(self.chars & ~other.chars)
|
||||
|
||||
@staticmethod
|
||||
def _popcount(n):
|
||||
# Apparently this is the fastest native way to do it...
|
||||
# https://stackoverflow.com/a/9831671
|
||||
return bin(n).count('1')
|
||||
|
||||
@staticmethod
|
||||
def _characteristic_overhead(chars):
|
||||
"""Returns overhead in bytes of encoding this characteristic
|
||||
as a VarData."""
|
||||
c = 6
|
||||
while chars:
|
||||
if chars & 0b1111:
|
||||
c += 2
|
||||
chars >>= 4
|
||||
return c
|
||||
|
||||
def _find_yourself_best_new_encoding(self, done_by_width):
|
||||
self.best_new_encoding = None
|
||||
for new_width in range(self.width+1, self.width+self.room+1):
|
||||
for new_encoding in done_by_width[new_width]:
|
||||
if new_encoding.can_encode(self.chars):
|
||||
break
|
||||
else:
|
||||
new_encoding = None
|
||||
self.best_new_encoding = new_encoding
|
||||
|
||||
|
||||
class _EncodingDict(dict):
|
||||
|
||||
def __missing__(self, chars):
|
||||
r = self[chars] = _Encoding(chars)
|
||||
return r
|
||||
|
||||
def add_row(self, row):
|
||||
chars = self._row_characteristics(row)
|
||||
self[chars].append(row)
|
||||
|
||||
@staticmethod
|
||||
def _row_characteristics(row):
|
||||
"""Returns encoding characteristics for a row."""
|
||||
longWords = False
|
||||
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i
|
||||
if not (-128 <= v <= 127):
|
||||
chars += i * 0b0010
|
||||
if not (-32768 <= v <= 32767):
|
||||
longWords = True
|
||||
break
|
||||
i <<= 4
|
||||
|
||||
if longWords:
|
||||
# Redo; only allow 2byte/4byte encoding
|
||||
chars = 0
|
||||
i = 1
|
||||
for v in row:
|
||||
if v:
|
||||
chars += i * 0b0011
|
||||
if not (-32768 <= v <= 32767):
|
||||
chars += i * 0b1100
|
||||
i <<= 4
|
||||
|
||||
return chars
|
||||
|
||||
|
||||
def VarStore_optimize(self):
|
||||
"""Optimize storage. Returns mapping from old VarIdxes to new ones."""
|
||||
|
||||
# TODO
|
||||
# Check that no two VarRegions are the same; if they are, fold them.
|
||||
|
||||
n = len(self.VarRegionList.Region) # Number of columns
|
||||
zeroes = [0] * n
|
||||
|
||||
front_mapping = {} # Map from old VarIdxes to full row tuples
|
||||
|
||||
encodings = _EncodingDict()
|
||||
|
||||
# Collect all items into a set of full rows (with lots of zeroes.)
|
||||
for major,data in enumerate(self.VarData):
|
||||
regionIndices = data.VarRegionIndex
|
||||
|
||||
for minor,item in enumerate(data.Item):
|
||||
|
||||
row = list(zeroes)
|
||||
for regionIdx,v in zip(regionIndices, item):
|
||||
row[regionIdx] += v
|
||||
row = tuple(row)
|
||||
|
||||
encodings.add_row(row)
|
||||
front_mapping[(major<<16)+minor] = row
|
||||
|
||||
# Separate encodings that have no gain (are decided) and those having
|
||||
# possible gain (possibly to be merged into others.)
|
||||
encodings = sorted(encodings.values(), key=_Encoding.__len__, reverse=True)
|
||||
done_by_width = defaultdict(list)
|
||||
todo = []
|
||||
for encoding in encodings:
|
||||
if not encoding.gain:
|
||||
done_by_width[encoding.width].append(encoding)
|
||||
else:
|
||||
todo.append(encoding)
|
||||
|
||||
# For each encoding that is possibly to be merged, find the best match
|
||||
# in the decided encodings, and record that.
|
||||
todo.sort(key=_Encoding.get_room)
|
||||
for encoding in todo:
|
||||
encoding._find_yourself_best_new_encoding(done_by_width)
|
||||
|
||||
# Walk through todo encodings, for each, see if merging it with
|
||||
# another todo encoding gains more than each of them merging with
|
||||
# their best decided encoding. If yes, merge them and add resulting
|
||||
# encoding back to todo queue. If not, move the enconding to decided
|
||||
# list. Repeat till done.
|
||||
while todo:
|
||||
encoding = todo.pop()
|
||||
best_idx = None
|
||||
best_gain = 0
|
||||
for i,other_encoding in enumerate(todo):
|
||||
combined_chars = other_encoding.chars | encoding.chars
|
||||
combined_width = _Encoding._popcount(combined_chars)
|
||||
combined_overhead = _Encoding._characteristic_overhead(combined_chars)
|
||||
combined_gain = (
|
||||
+ encoding.overhead
|
||||
+ other_encoding.overhead
|
||||
- combined_overhead
|
||||
- (combined_width - encoding.width) * len(encoding)
|
||||
- (combined_width - other_encoding.width) * len(other_encoding)
|
||||
)
|
||||
this_gain = 0 if encoding.best_new_encoding is None else (
|
||||
+ encoding.overhead
|
||||
- (encoding.best_new_encoding.width - encoding.width) * len(encoding)
|
||||
)
|
||||
other_gain = 0 if other_encoding.best_new_encoding is None else (
|
||||
+ other_encoding.overhead
|
||||
- (other_encoding.best_new_encoding.width - other_encoding.width) * len(other_encoding)
|
||||
)
|
||||
separate_gain = this_gain + other_gain
|
||||
|
||||
if combined_gain > separate_gain:
|
||||
best_idx = i
|
||||
best_gain = combined_gain - separate_gain
|
||||
|
||||
if best_idx is None:
|
||||
# Encoding is decided as is
|
||||
done_by_width[encoding.width].append(encoding)
|
||||
else:
|
||||
other_encoding = todo[best_idx]
|
||||
combined_chars = other_encoding.chars | encoding.chars
|
||||
combined_encoding = _Encoding(combined_chars)
|
||||
combined_encoding.extend(encoding.items)
|
||||
combined_encoding.extend(other_encoding.items)
|
||||
combined_encoding._find_yourself_best_new_encoding(done_by_width)
|
||||
del todo[best_idx]
|
||||
todo.append(combined_encoding)
|
||||
|
||||
# Assemble final store.
|
||||
back_mapping = {} # Mapping from full rows to new VarIdxes
|
||||
encodings = sum(done_by_width.values(), [])
|
||||
encodings.sort(key=_Encoding.sort_key)
|
||||
self.VarData = []
|
||||
for major,encoding in enumerate(encodings):
|
||||
data = ot.VarData()
|
||||
self.VarData.append(data)
|
||||
data.VarRegionIndex = range(n)
|
||||
data.VarRegionCount = len(data.VarRegionIndex)
|
||||
data.Item = sorted(encoding.items)
|
||||
for minor,item in enumerate(data.Item):
|
||||
back_mapping[item] = (major<<16)+minor
|
||||
|
||||
# Compile final mapping.
|
||||
varidx_map = {}
|
||||
for k,v in front_mapping.items():
|
||||
varidx_map[k] = back_mapping[v]
|
||||
|
||||
# Remove unused regions.
|
||||
self.prune_regions()
|
||||
|
||||
# Recalculate things and go home.
|
||||
self.VarRegionList.RegionCount = len(self.VarRegionList.Region)
|
||||
self.VarDataCount = len(self.VarData)
|
||||
for data in self.VarData:
|
||||
data.ItemCount = len(data.Item)
|
||||
data.optimize()
|
||||
|
||||
return varidx_map
|
||||
|
||||
ot.VarStore.optimize = VarStore_optimize
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Optimize a font's GDEF variation store"""
|
||||
from argparse import ArgumentParser
|
||||
from fontTools import configLogger
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.ttLib.tables.otBase import OTTableWriter
|
||||
|
||||
parser = ArgumentParser(prog='varLib.varStore', description= main.__doc__)
|
||||
parser.add_argument('fontfile')
|
||||
parser.add_argument('outfile', nargs='?')
|
||||
options = parser.parse_args(args)
|
||||
|
||||
# TODO: allow user to configure logging via command-line options
|
||||
configLogger(level="INFO")
|
||||
|
||||
fontfile = options.fontfile
|
||||
outfile = options.outfile
|
||||
|
||||
font = TTFont(fontfile)
|
||||
gdef = font['GDEF']
|
||||
store = gdef.table.VarStore
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("Before: %7d bytes" % size)
|
||||
|
||||
varidx_map = store.optimize()
|
||||
|
||||
gdef.table.remap_device_varidxes(varidx_map)
|
||||
if 'GPOS' in font:
|
||||
font['GPOS'].table.remap_device_varidxes(varidx_map)
|
||||
|
||||
writer = OTTableWriter()
|
||||
store.compile(writer, font)
|
||||
size = len(writer.getAllData())
|
||||
print("After: %7d bytes" % size)
|
||||
|
||||
if outfile is not None:
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
if len(sys.argv) > 1:
|
||||
sys.exit(main())
|
||||
import doctest
|
||||
sys.exit(doctest.testmod().failed)
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user