8375057: Update HarfBuzz to 12.3.2

Reviewed-by: prr, kizune
This commit is contained in:
Damon Nguyen 2026-02-02 21:53:02 +00:00
parent 5607a4620c
commit 4db0f7f291
173 changed files with 11918 additions and 9815 deletions

View File

@ -1,4 +1,4 @@
## Harfbuzz 11.2.0
## Harfbuzz 12.3.2
### Harfbuzz License

View File

@ -104,7 +104,7 @@ public:
foreground (foreground_),
instancer (instancer_)
{
if (font->is_synthetic ())
if (font->is_synthetic)
{
font = hb_font_create_sub_font (font);
hb_font_set_synthetic_bold (font, 0, 0, true);
@ -178,7 +178,10 @@ struct hb_colrv1_closure_context_t :
{ glyphs->add (glyph_id); }
void add_layer_indices (unsigned first_layer_index, unsigned num_of_layers)
{ layer_indices->add_range (first_layer_index, first_layer_index + num_of_layers - 1); }
{
if (num_of_layers == 0) return;
layer_indices->add_range (first_layer_index, first_layer_index + num_of_layers - 1);
}
void add_palette_index (unsigned palette_index)
{ palette_indices->add (palette_index); }
@ -650,10 +653,10 @@ struct PaintColrLayers
TRACE_SUBSET (this);
auto *out = c->serializer->embed (this);
if (unlikely (!out)) return_trace (false);
return_trace (c->serializer->check_assign (out->firstLayerIndex, c->plan->colrv1_layers.get (firstLayerIndex),
HB_SERIALIZE_ERROR_INT_OVERFLOW));
return_trace (true);
uint32_t first_layer_index = numLayers ? c->plan->colrv1_layers.get (firstLayerIndex) : 0;
return_trace (c->serializer->check_assign (out->firstLayerIndex, first_layer_index,
HB_SERIALIZE_ERROR_INT_OVERFLOW));
}
bool sanitize (hb_sanitize_context_t *c) const
@ -1075,9 +1078,9 @@ struct PaintTranslate
float ddx = dx + c->instancer (varIdxBase, 0);
float ddy = dy + c->instancer (varIdxBase, 1);
bool p1 = c->funcs->push_translate (c->data, ddx, ddy);
c->funcs->push_translate (c->data, ddx, ddy);
c->recurse (this+src);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 14(noVar) or 15 (Var) */
@ -1124,9 +1127,9 @@ struct PaintScale
float sx = scaleX.to_float (c->instancer (varIdxBase, 0));
float sy = scaleY.to_float (c->instancer (varIdxBase, 1));
bool p1 = c->funcs->push_scale (c->data, sx, sy);
c->funcs->push_scale (c->data, sx, sy);
c->recurse (this+src);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 16 (noVar) or 17(Var) */
@ -1177,13 +1180,9 @@ struct PaintScaleAroundCenter
float tCenterX = centerX + c->instancer (varIdxBase, 2);
float tCenterY = centerY + c->instancer (varIdxBase, 3);
bool p1 = c->funcs->push_translate (c->data, +tCenterX, +tCenterY);
bool p2 = c->funcs->push_scale (c->data, sx, sy);
bool p3 = c->funcs->push_translate (c->data, -tCenterX, -tCenterY);
c->funcs->push_scale_around_center (c->data, sx, sy, tCenterX, tCenterY);
c->recurse (this+src);
if (p3) c->funcs->pop_transform (c->data);
if (p2) c->funcs->pop_transform (c->data);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 18 (noVar) or 19(Var) */
@ -1228,9 +1227,9 @@ struct PaintScaleUniform
TRACE_PAINT (this);
float s = scale.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_scale (c->data, s, s);
c->funcs->push_scale (c->data, s, s);
c->recurse (this+src);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 20 (noVar) or 21(Var) */
@ -1278,13 +1277,9 @@ struct PaintScaleUniformAroundCenter
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
bool p1 = c->funcs->push_translate (c->data, +tCenterX, +tCenterY);
bool p2 = c->funcs->push_scale (c->data, s, s);
bool p3 = c->funcs->push_translate (c->data, -tCenterX, -tCenterY);
c->funcs->push_scale_around_center (c->data, s, s, tCenterX, tCenterY);
c->recurse (this+src);
if (p3) c->funcs->pop_transform (c->data);
if (p2) c->funcs->pop_transform (c->data);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 22 (noVar) or 23(Var) */
@ -1328,9 +1323,9 @@ struct PaintRotate
TRACE_PAINT (this);
float a = angle.to_float (c->instancer (varIdxBase, 0));
bool p1 = c->funcs->push_rotate (c->data, a);
c->funcs->push_rotate (c->data, a);
c->recurse (this+src);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 24 (noVar) or 25(Var) */
@ -1378,13 +1373,9 @@ struct PaintRotateAroundCenter
float tCenterX = centerX + c->instancer (varIdxBase, 1);
float tCenterY = centerY + c->instancer (varIdxBase, 2);
bool p1 = c->funcs->push_translate (c->data, +tCenterX, +tCenterY);
bool p2 = c->funcs->push_rotate (c->data, a);
bool p3 = c->funcs->push_translate (c->data, -tCenterX, -tCenterY);
c->funcs->push_rotate_around_center (c->data, a, tCenterX, tCenterY);
c->recurse (this+src);
if (p3) c->funcs->pop_transform (c->data);
if (p2) c->funcs->pop_transform (c->data);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 26 (noVar) or 27(Var) */
@ -1432,9 +1423,9 @@ struct PaintSkew
float sx = xSkewAngle.to_float(c->instancer (varIdxBase, 0));
float sy = ySkewAngle.to_float(c->instancer (varIdxBase, 1));
bool p1 = c->funcs->push_skew (c->data, sx, sy);
c->funcs->push_skew (c->data, sx, sy);
c->recurse (this+src);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 28(noVar) or 29 (Var) */
@ -1485,13 +1476,9 @@ struct PaintSkewAroundCenter
float tCenterX = centerX + c->instancer (varIdxBase, 2);
float tCenterY = centerY + c->instancer (varIdxBase, 3);
bool p1 = c->funcs->push_translate (c->data, +tCenterX, +tCenterY);
bool p2 = c->funcs->push_skew (c->data, sx, sy);
bool p3 = c->funcs->push_translate (c->data, -tCenterX, -tCenterY);
c->funcs->push_skew_around_center (c->data, sx, sy, tCenterX, tCenterY);
c->recurse (this+src);
if (p3) c->funcs->pop_transform (c->data);
if (p2) c->funcs->pop_transform (c->data);
if (p1) c->funcs->pop_transform (c->data);
c->funcs->pop_transform (c->data);
}
HBUINT8 format; /* format = 30(noVar) or 31 (Var) */
@ -1626,7 +1613,7 @@ struct ClipBox
const ItemVarStoreInstancer &instancer) const
{
TRACE_SUBSET (this);
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (u.format1.subset (c, instancer, VarIdx::NO_VARIATION));
case 2: return_trace (u.format2.subset (c, instancer));
default:return_trace (c->default_return_value ());
@ -1635,7 +1622,7 @@ struct ClipBox
void closurev1 (hb_colrv1_closure_context_t* c) const
{
switch (u.format) {
switch (u.format.v) {
case 2: u.format2.closurev1 (c); return;
default:return;
}
@ -1644,9 +1631,9 @@ struct ClipBox
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
@ -1657,7 +1644,7 @@ struct ClipBox
const ItemVarStoreInstancer &instancer) const
{
ClipBoxData clip_box;
switch (u.format) {
switch (u.format.v) {
case 1:
u.format1.get_clip_box (clip_box, instancer);
break;
@ -1677,7 +1664,7 @@ struct ClipBox
protected:
union {
HBUINT8 format; /* Format identifier */
struct { HBUINT8 v; } format; /* Format identifier */
ClipBoxFormat1 format1;
ClipBoxFormat2 format2;
} u;
@ -1857,9 +1844,9 @@ struct Paint
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.paintformat1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.paintformat2, std::forward<Ts> (ds)...));
case 3: return_trace (c->dispatch (u.paintformat3, std::forward<Ts> (ds)...));
@ -1898,7 +1885,7 @@ struct Paint
protected:
union {
HBUINT8 format;
struct { HBUINT8 v; } format;
PaintColrLayers paintformat1;
NoVariable<PaintSolid> paintformat2;
Variable<PaintSolid> paintformat3;
@ -2073,7 +2060,7 @@ struct delta_set_index_map_subset_plan_t
outer_bit_count = 1;
inner_bit_count = 1;
if (unlikely (!output_map.resize (map_count, false))) return false;
if (unlikely (!output_map.resize_dirty (map_count))) return false;
for (unsigned idx = 0; idx < map_count; idx++)
{
@ -2693,7 +2680,8 @@ struct COLR
{
ItemVarStoreInstancer instancer (get_var_store_ptr (),
get_delta_set_index_map_ptr (),
hb_array (font->coords, font->num_coords));
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0));
hb_paint_context_t c (this, funcs, data, font, palette_index, foreground, instancer);
hb_decycler_node_t node (c.glyphs_decycler);

View File

@ -307,6 +307,7 @@ struct CPAL
if (first_color_to_layer_index.has (first_color_record_idx)) continue;
first_color_index_for_layer.push (first_color_record_idx);
if (unlikely (!c->serializer->propagate_error (first_color_index_for_layer))) return_trace (false);
first_color_to_layer_index.set (first_color_record_idx,
first_color_index_for_layer.length - 1);
}

View File

@ -46,7 +46,7 @@ struct Coverage
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
CoverageFormat1_3<SmallTypes> format1;
CoverageFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
@ -55,7 +55,7 @@ struct Coverage
#endif
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
@ -63,9 +63,9 @@ struct Coverage
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format)
switch (u.format.v)
{
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
@ -86,7 +86,7 @@ struct Coverage
unsigned int get (hb_codepoint_t k) const { return get_coverage (k); }
unsigned int get_coverage (hb_codepoint_t glyph_id) const
{
switch (u.format) {
switch (u.format.v) {
case 1: return u.format1.get_coverage (glyph_id);
case 2: return u.format2.get_coverage (glyph_id);
#ifndef HB_NO_BEYOND_64K
@ -97,18 +97,38 @@ struct Coverage
}
}
unsigned int get_coverage (hb_codepoint_t glyph_id,
hb_ot_lookup_cache_t *cache) const
hb_ot_layout_mapping_cache_t *cache) const
{
unsigned coverage;
if (cache && cache->get (glyph_id, &coverage)) return coverage;
if (cache && cache->get (glyph_id, &coverage)) return coverage < cache->MAX_VALUE ? coverage : NOT_COVERED;
coverage = get_coverage (glyph_id);
if (cache) cache->set (glyph_id, coverage);
if (cache) {
if (coverage == NOT_COVERED)
cache->set_unchecked (glyph_id, cache->MAX_VALUE);
else if (likely (coverage < cache->MAX_VALUE))
cache->set_unchecked (glyph_id, coverage);
}
return coverage;
}
unsigned int get_coverage_binary (hb_codepoint_t glyph_id,
hb_ot_layout_binary_cache_t *cache) const
{
unsigned coverage;
if (cache && cache->get (glyph_id, &coverage)) return coverage < cache->MAX_VALUE ? coverage : NOT_COVERED;
coverage = get_coverage (glyph_id);
if (cache) {
if (coverage == NOT_COVERED)
cache->set_unchecked (glyph_id, cache->MAX_VALUE);
else
cache->set_unchecked (glyph_id, 0);
}
return coverage;
}
unsigned get_population () const
{
switch (u.format) {
switch (u.format.v) {
case 1: return u.format1.get_population ();
case 2: return u.format2.get_population ();
#ifndef HB_NO_BEYOND_64K
@ -140,11 +160,11 @@ struct Coverage
last = g;
if (g > max) max = g;
}
u.format = !unsorted && count <= num_ranges * 3 ? 1 : 2;
u.format.v = !unsorted && count <= num_ranges * 3 ? 1 : 2;
#ifndef HB_NO_BEYOND_64K
if (max > 0xFFFFu)
u.format += 2;
u.format.v += 2;
if (unlikely (max > 0xFFFFFFu))
#else
if (unlikely (max > 0xFFFFu))
@ -154,7 +174,7 @@ struct Coverage
return_trace (false);
}
switch (u.format)
switch (u.format.v)
{
case 1: return_trace (u.format1.serialize (c, glyphs));
case 2: return_trace (u.format2.serialize (c, glyphs));
@ -185,7 +205,7 @@ struct Coverage
bool intersects (const hb_set_t *glyphs) const
{
switch (u.format)
switch (u.format.v)
{
case 1: return u.format1.intersects (glyphs);
case 2: return u.format2.intersects (glyphs);
@ -198,7 +218,7 @@ struct Coverage
}
bool intersects_coverage (const hb_set_t *glyphs, unsigned int index) const
{
switch (u.format)
switch (u.format.v)
{
case 1: return u.format1.intersects_coverage (glyphs, index);
case 2: return u.format2.intersects_coverage (glyphs, index);
@ -212,7 +232,7 @@ struct Coverage
unsigned cost () const
{
switch (u.format) {
switch (u.format.v) {
case 1: hb_barrier (); return u.format1.cost ();
case 2: hb_barrier (); return u.format2.cost ();
#ifndef HB_NO_BEYOND_64K
@ -228,7 +248,7 @@ struct Coverage
template <typename set_t>
bool collect_coverage (set_t *glyphs) const
{
switch (u.format)
switch (u.format.v)
{
case 1: return u.format1.collect_coverage (glyphs);
case 2: return u.format2.collect_coverage (glyphs);
@ -244,7 +264,7 @@ struct Coverage
hb_requires (hb_is_sink_of (IterableOut, hb_codepoint_t))>
void intersect_set (const hb_set_t &glyphs, IterableOut&& intersect_glyphs) const
{
switch (u.format)
switch (u.format.v)
{
case 1: return u.format1.intersect_set (glyphs, intersect_glyphs);
case 2: return u.format2.intersect_set (glyphs, intersect_glyphs);
@ -262,7 +282,7 @@ struct Coverage
iter_t (const Coverage &c_ = Null (Coverage))
{
hb_memset (this, 0, sizeof (*this));
format = c_.u.format;
format = c_.u.format.v;
switch (format)
{
case 1: u.format1.init (c_.u.format1); return;
@ -332,7 +352,7 @@ struct Coverage
}
iter_t __end__ () const
{
iter_t it = {};
iter_t it;
it.format = format;
switch (format)
{

View File

@ -41,11 +41,11 @@ struct CoverageFormat1_3
{
friend struct Coverage;
protected:
public:
HBUINT16 coverageFormat; /* Format identifier--format = 1 */
SortedArray16Of<typename Types::HBGlyphID>
glyphArray; /* Array of GlyphIDs--in numerical order */
public:
DEFINE_SIZE_ARRAY (4, glyphArray);
private:

View File

@ -40,7 +40,7 @@ struct CoverageFormat2_4
{
friend struct Coverage;
protected:
public:
HBUINT16 coverageFormat; /* Format identifier--format = 2 */
SortedArray16Of<RangeRecord<Types>>
rangeRecord; /* Array of glyph ranges--ordered by

View File

@ -252,7 +252,7 @@ struct CaretValue
hb_codepoint_t glyph_id,
const ItemVariationStore &var_store) const
{
switch (u.format) {
switch (u.format.v) {
case 1: return u.format1.get_caret_value (font, direction);
case 2: return u.format2.get_caret_value (font, direction, glyph_id);
case 3: return u.format3.get_caret_value (font, direction, var_store);
@ -263,9 +263,9 @@ struct CaretValue
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
case 3: return_trace (c->dispatch (u.format3, std::forward<Ts> (ds)...));
@ -275,7 +275,7 @@ struct CaretValue
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
switch (u.format) {
switch (u.format.v) {
case 1:
case 2:
return;
@ -289,9 +289,9 @@ struct CaretValue
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
case 3: return_trace (u.format3.sanitize (c));
@ -301,13 +301,13 @@ struct CaretValue
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
CaretValueFormat1 format1;
CaretValueFormat2 format2;
CaretValueFormat3 format3;
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
};
struct LigGlyph
@ -519,7 +519,7 @@ struct MarkGlyphSets
{
bool covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{
switch (u.format) {
switch (u.format.v) {
case 1: return u.format1.covers (set_index, glyph_id);
default:return false;
}
@ -528,7 +528,7 @@ struct MarkGlyphSets
template <typename set_t>
void collect_coverage (hb_vector_t<set_t> &sets) const
{
switch (u.format) {
switch (u.format.v) {
case 1: u.format1.collect_coverage (sets); return;
default:return;
}
@ -537,7 +537,7 @@ struct MarkGlyphSets
void collect_used_mark_sets (const hb_set_t& glyph_set,
hb_set_t& used_mark_sets /* OUT */) const
{
switch (u.format) {
switch (u.format.v) {
case 1: u.format1.collect_used_mark_sets (glyph_set, used_mark_sets); return;
default:return;
}
@ -546,7 +546,7 @@ struct MarkGlyphSets
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (u.format1.subset (c));
default:return_trace (false);
}
@ -555,9 +555,9 @@ struct MarkGlyphSets
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (u.format1.sanitize (c));
default:return_trace (true);
}
@ -565,11 +565,11 @@ struct MarkGlyphSets
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
MarkGlyphSetsFormat1 format1;
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
};
@ -977,7 +977,7 @@ struct GDEF
}
#ifndef HB_NO_GDEF_CACHE
table->get_mark_glyph_sets ().collect_coverage (mark_glyph_set_digests);
table->get_mark_glyph_sets ().collect_coverage (mark_glyph_sets);
#endif
}
~accelerator_t () { table.destroy (); }
@ -1002,18 +1002,34 @@ struct GDEF
}
HB_ALWAYS_INLINE
bool mark_set_covers (unsigned int set_index, hb_codepoint_t glyph_id) const
{
return
#ifndef HB_NO_GDEF_CACHE
mark_glyph_set_digests[set_index].may_have (glyph_id) &&
// We can access arrayZ directly because of sanitize_lookup_props() guarantee.
mark_glyph_sets.arrayZ[set_index].may_have (glyph_id) &&
#endif
table->mark_set_covers (set_index, glyph_id);
table->mark_set_covers (set_index, glyph_id)
;
}
unsigned sanitize_lookup_props (unsigned lookup_props) const
{
#ifndef HB_NO_GDEF_CACHE
if (lookup_props & LookupFlag::UseMarkFilteringSet &&
(lookup_props >> 16) >= mark_glyph_sets.length)
{
// Invalid mark filtering set index; unset the flag.
lookup_props &= ~LookupFlag::UseMarkFilteringSet;
}
#endif
return lookup_props;
}
hb_blob_ptr_t<GDEF> table;
#ifndef HB_NO_GDEF_CACHE
hb_vector_t<hb_set_digest_t> mark_glyph_set_digests;
hb_vector_t<hb_set_digest_t> mark_glyph_sets;
mutable hb_cache_t<21, 3> glyph_props_cache;
static_assert (sizeof (glyph_props_cache) == 512, "");
#endif

View File

@ -13,20 +13,20 @@ struct Anchor
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
AnchorFormat1 format1;
AnchorFormat2 format2;
AnchorFormat3 format3;
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (u.format1.sanitize (c));
case 2: return_trace (u.format2.sanitize (c));
case 3: return_trace (u.format3.sanitize (c));
@ -38,7 +38,7 @@ struct Anchor
float *x, float *y) const
{
*x = *y = 0;
switch (u.format) {
switch (u.format.v) {
case 1: u.format1.get_anchor (c, glyph_id, x, y); return;
case 2: u.format2.get_anchor (c, glyph_id, x, y); return;
case 3: u.format3.get_anchor (c, glyph_id, x, y); return;
@ -49,7 +49,7 @@ struct Anchor
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
switch (u.format) {
switch (u.format.v) {
case 1: return_trace (bool (reinterpret_cast<Anchor *> (u.format1.copy (c->serializer))));
case 2:
if (c->plan->flags & HB_SUBSET_FLAGS_NO_HINTING)
@ -66,7 +66,7 @@ struct Anchor
void collect_variation_indices (hb_collect_variation_indices_context_t *c) const
{
switch (u.format) {
switch (u.format.v) {
case 1: case 2:
return;
case 3:

View File

@ -37,12 +37,12 @@ struct AnchorFormat3
*x = font->em_fscale_x (xCoordinate);
*y = font->em_fscale_y (yCoordinate);
if ((font->x_ppem || font->num_coords) && xDeviceTable.sanitize (&c->sanitizer, this))
if ((font->x_ppem || font->has_nonzero_coords) && xDeviceTable.sanitize (&c->sanitizer, this))
{
hb_barrier ();
*x += (this+xDeviceTable).get_x_delta (font, c->var_store, c->var_store_cache);
}
if ((font->y_ppem || font->num_coords) && yDeviceTable.sanitize (&c->sanitizer, this))
if ((font->y_ppem || font->has_nonzero_coords) && yDeviceTable.sanitize (&c->sanitizer, this))
{
hb_barrier ();
*y += (this+yDeviceTable).get_y_delta (font, c->var_store, c->var_store_cache);
@ -91,10 +91,13 @@ struct AnchorFormat3
}
}
/* in case that all axes are pinned or no variations after instantiation,
* both var_idxes will be mapped to HB_OT_LAYOUT_NO_VARIATIONS_INDEX */
if (x_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX &&
y_varidx == HB_OT_LAYOUT_NO_VARIATIONS_INDEX)
bool no_downgrade = (!xDeviceTable.is_null () && !(this+xDeviceTable).is_variation_device ()) ||
x_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX ||
y_varidx != HB_OT_LAYOUT_NO_VARIATIONS_INDEX ||
(!yDeviceTable.is_null () && !(this+yDeviceTable).is_variation_device ());
if (!no_downgrade)
return_trace (c->serializer->check_assign (out->format, 1, HB_SERIALIZE_ERROR_INT_OVERFLOW));
if (!c->serializer->embed (xDeviceTable)) return_trace (false);

View File

@ -77,6 +77,13 @@ struct AnchorMatrix
return_trace (true);
}
bool offset_is_null (unsigned row, unsigned col, unsigned num_cols) const
{
if (unlikely (row >= rows || col >= num_cols)) return true;
auto &offset = matrixZ[row * num_cols + col];
return offset.is_null ();
}
};

View File

@ -11,7 +11,7 @@ struct CursivePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
CursivePosFormat1 format1;
} u;
@ -19,9 +19,9 @@ struct CursivePos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}

View File

@ -50,8 +50,9 @@ struct EntryExitRecord
DEFINE_SIZE_STATIC (4);
};
static void
reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent) {
static inline void
reverse_cursive_minor_offset (hb_glyph_position_t *pos, unsigned int i, hb_direction_t direction, unsigned int new_parent)
{
int chain = pos[i].attach_chain(), type = pos[i].attach_type();
if (likely (!chain || 0 == (type & ATTACH_TYPE_CURSIVE)))
return;
@ -130,7 +131,7 @@ struct CursivePosFormat1
unlikely (!this_record.entryAnchor.sanitize (&c->sanitizer, this))) return_trace (false);
hb_barrier ();
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_from;
if (unlikely (!skippy_iter.prev (&unsafe_from)))
@ -229,8 +230,13 @@ struct CursivePosFormat1
*/
reverse_cursive_minor_offset (pos, child, c->direction, parent);
pos[child].attach_type() = ATTACH_TYPE_CURSIVE;
pos[child].attach_chain() = (int) parent - (int) child;
if (pos[child].attach_chain() != (int) parent - (int) child)
{
pos[child].attach_chain() = 0;
goto overflow;
}
pos[child].attach_type() = ATTACH_TYPE_CURSIVE;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
if (likely (HB_DIRECTION_IS_HORIZONTAL (c->direction)))
pos[child].y_offset = y_offset;
@ -256,6 +262,7 @@ struct CursivePosFormat1
i, j);
}
overflow:
buffer->idx++;
return_trace (true);
}

View File

@ -80,9 +80,8 @@ propagate_attachment_offsets (hb_glyph_position_t *pos,
{
/* Adjusts offsets of attached glyphs (both cursive and mark) to accumulate
* offset of glyph they are attached to. */
int chain = pos[i].attach_chain(), type = pos[i].attach_type();
if (likely (!chain))
return;
int chain = pos[i].attach_chain();
int type = pos[i].attach_type();
pos[i].attach_chain() = 0;
@ -94,7 +93,8 @@ propagate_attachment_offsets (hb_glyph_position_t *pos,
if (unlikely (!nesting_level))
return;
propagate_attachment_offsets (pos, len, j, direction, nesting_level - 1);
if (pos[j].attach_chain())
propagate_attachment_offsets (pos, len, j, direction, nesting_level - 1);
assert (!!(type & GPOS_impl::ATTACH_TYPE_MARK) ^ !!(type & GPOS_impl::ATTACH_TYPE_CURSIVE));
@ -110,17 +110,37 @@ propagate_attachment_offsets (hb_glyph_position_t *pos,
pos[i].x_offset += pos[j].x_offset;
pos[i].y_offset += pos[j].y_offset;
assert (j < i);
if (HB_DIRECTION_IS_FORWARD (direction))
for (unsigned int k = j; k < i; k++) {
pos[i].x_offset -= pos[k].x_advance;
pos[i].y_offset -= pos[k].y_advance;
}
else
for (unsigned int k = j + 1; k < i + 1; k++) {
pos[i].x_offset += pos[k].x_advance;
pos[i].y_offset += pos[k].y_advance;
}
// i is the position of the mark; j is the base.
if (j < i)
{
/* This is the common case: mark follows base.
* And currently the only way in OpenType. */
if (HB_DIRECTION_IS_FORWARD (direction))
for (unsigned int k = j; k < i; k++) {
pos[i].x_offset -= pos[k].x_advance;
pos[i].y_offset -= pos[k].y_advance;
}
else
for (unsigned int k = j + 1; k < i + 1; k++) {
pos[i].x_offset += pos[k].x_advance;
pos[i].y_offset += pos[k].y_advance;
}
}
else // j > i
{
/* This can happen with `kerx`: a mark attaching
* to a base after it in the logical order. */
if (HB_DIRECTION_IS_FORWARD (direction))
for (unsigned int k = i; k < j; k++) {
pos[i].x_offset += pos[k].x_advance;
pos[i].y_offset += pos[k].y_advance;
}
else
for (unsigned int k = i + 1; k < j + 1; k++) {
pos[i].x_offset -= pos[k].x_advance;
pos[i].y_offset -= pos[k].y_advance;
}
}
}
}
@ -149,8 +169,20 @@ GPOS::position_finish_offsets (hb_font_t *font, hb_buffer_t *buffer)
/* Handle attachments */
if (buffer->scratch_flags & HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT)
for (unsigned i = 0; i < len; i++)
propagate_attachment_offsets (pos, len, i, direction);
{
auto *pos = buffer->pos;
// https://github.com/harfbuzz/harfbuzz/issues/5514
if (HB_DIRECTION_IS_FORWARD (direction))
{
for (unsigned i = 0; i < len; i++)
if (pos[i].attach_chain())
propagate_attachment_offsets (pos, len, i, direction);
} else {
for (unsigned i = len; i-- > 0; )
if (pos[i].attach_chain())
propagate_attachment_offsets (pos, len, i, direction);
}
}
if (unlikely (font->slant_xy) &&
HB_DIRECTION_IS_HORIZONTAL (direction))

View File

@ -19,22 +19,30 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
bool subset (hb_subset_context_t *c,
Iterator coverage,
unsigned class_count,
const hb_map_t *klass_mapping) const
const hb_map_t *klass_mapping,
hb_sorted_vector_t<hb_codepoint_t> &new_coverage /* OUT */) const
{
TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = c->plan->glyph_map_gsub;
auto *out = c->serializer->start_embed (this);
if (unlikely (!c->serializer->extend_min (out))) return_trace (false);
bool ret = false;
for (const auto _ : + hb_zip (coverage, *this)
| hb_filter (glyphset, hb_first))
| hb_filter (glyph_map, hb_first))
{
const LigatureAttach& src = (this + _.second);
bool non_empty = + hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
| hb_map ([&] (const unsigned index) { return !src.offset_is_null (index / class_count, index % class_count, class_count); })
| hb_any;
if (!non_empty) continue;
auto *matrix = out->serialize_append (c->serializer);
if (unlikely (!matrix)) return_trace (false);
const LigatureAttach& src = (this + _.second);
auto indexes =
+ hb_range (src.rows * class_count)
| hb_filter ([=] (unsigned index) { return klass_mapping->has (index % class_count); })
@ -44,6 +52,9 @@ struct LigatureArray : List16OfOffset16To<LigatureAttach>
this,
src.rows,
indexes);
hb_codepoint_t new_gid = glyph_map.get (_.first);
new_coverage.push (new_gid);
}
return_trace (ret);
}

View File

@ -47,10 +47,15 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
}
hb_glyph_position_t &o = buffer->cur_pos();
o.attach_chain() = (int) glyph_pos - (int) buffer->idx;
if (o.attach_chain() != (int) glyph_pos - (int) buffer->idx)
{
o.attach_chain() = 0;
goto overflow;
}
o.attach_type() = ATTACH_TYPE_MARK;
o.x_offset = roundf (base_x - mark_x);
o.y_offset = roundf (base_y - mark_y);
o.attach_type() = ATTACH_TYPE_MARK;
o.attach_chain() = (int) glyph_pos - (int) buffer->idx;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
if (HB_BUFFER_MESSAGE_MORE && c->buffer->messaging ())
@ -60,6 +65,7 @@ struct MarkArray : Array16Of<MarkRecord> /* Array of MarkRecords--in Cove
c->buffer->idx, glyph_pos);
}
overflow:
buffer->idx++;
return_trace (true);
}

View File

@ -11,7 +11,7 @@ struct MarkBasePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
MarkBasePosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkBasePosFormat1_2<MediumTypes> format2;
@ -22,9 +22,9 @@ struct MarkBasePos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));

View File

@ -119,7 +119,7 @@ struct MarkBasePosFormat1_2
/* Now we search backwards for a non-mark glyph.
* We don't use skippy_iter.prev() to avoid O(n^2) behavior. */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
if (c->last_base_until > buffer->idx)
@ -209,19 +209,22 @@ struct MarkBasePosFormat1_2
;
new_coverage.reset ();
+ base_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->baseCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
hb_sorted_vector_t<unsigned> base_indexes;
for (const unsigned row : + base_iter
| hb_map (hb_second))
auto &base_array = (this+baseArray);
for (const auto _ : + base_iter)
{
unsigned row = _.second;
bool non_empty = + hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return !base_array.offset_is_null (row, col, (unsigned) classCount); })
| hb_any
;
if (!non_empty) continue;
hb_codepoint_t new_g = glyph_map.get ( _.first);
new_coverage.push (new_g);
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
@ -229,8 +232,12 @@ struct MarkBasePosFormat1_2
;
}
if (!new_coverage) return_trace (false);
if (!out->baseCoverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
return_trace (out->baseArray.serialize_subset (c, baseArray, this,
base_iter.len (),
new_coverage.length,
base_indexes.iter ()));
}
};

View File

@ -11,7 +11,7 @@ struct MarkLigPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
MarkLigPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkLigPosFormat1_2<MediumTypes> format2;
@ -22,9 +22,9 @@ struct MarkLigPos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));

View File

@ -101,7 +101,7 @@ struct MarkLigPosFormat1_2
/* Now we search backwards for a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.set_lookup_props (LookupFlag::IgnoreMarks);
if (c->last_base_until > buffer->idx)
@ -200,19 +200,13 @@ struct MarkLigPosFormat1_2
&klass_mapping)))
return_trace (false);
auto new_ligature_coverage =
+ hb_iter (this + ligatureCoverage)
| hb_take ((this + ligatureArray).len)
| hb_map_retains_sorting (glyph_map)
| hb_filter ([] (hb_codepoint_t glyph) { return glyph != HB_MAP_VALUE_INVALID; })
;
if (!out->ligatureCoverage.serialize_serialize (c->serializer, new_ligature_coverage))
hb_sorted_vector_t<hb_codepoint_t> new_lig_coverage;
if (!out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage),
classCount, &klass_mapping, new_lig_coverage))
return_trace (false);
return_trace (out->ligatureArray.serialize_subset (c, ligatureArray, this,
hb_iter (this+ligatureCoverage),
classCount, &klass_mapping));
return_trace (out->ligatureCoverage.serialize_serialize (c->serializer, new_lig_coverage.iter ()));
}
};

View File

@ -11,7 +11,7 @@ struct MarkMarkPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
MarkMarkPosFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MarkMarkPosFormat1_2<MediumTypes> format2;
@ -22,9 +22,9 @@ struct MarkMarkPos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));

View File

@ -100,7 +100,7 @@ struct MarkMarkPosFormat1_2
if (likely (mark1_index == NOT_COVERED)) return_trace (false);
/* now we search backwards for a suitable mark glyph until a non-mark glyph */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
skippy_iter.set_lookup_props (c->lookup_props & ~(uint32_t)LookupFlag::IgnoreFlags);
unsigned unsafe_from;
@ -196,19 +196,23 @@ struct MarkMarkPosFormat1_2
;
new_coverage.reset ();
+ mark2_iter
| hb_map (hb_first)
| hb_map (glyph_map)
| hb_sink (new_coverage)
;
if (!out->mark2Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
hb_sorted_vector_t<unsigned> mark2_indexes;
for (const unsigned row : + mark2_iter
| hb_map (hb_second))
auto &mark2_array = (this+mark2Array);
for (const auto _ : + mark2_iter)
{
unsigned row = _.second;
bool non_empty = + hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return !mark2_array.offset_is_null (row, col, (unsigned) classCount); })
| hb_any
;
if (!non_empty) continue;
hb_codepoint_t new_g = glyph_map.get ( _.first);
new_coverage.push (new_g);
+ hb_range ((unsigned) classCount)
| hb_filter (klass_mapping)
| hb_map ([&] (const unsigned col) { return row * (unsigned) classCount + col; })
@ -216,6 +220,10 @@ struct MarkMarkPosFormat1_2
;
}
if (!new_coverage) return_trace (false);
if (!out->mark2Coverage.serialize_serialize (c->serializer, new_coverage.iter ()))
return_trace (false);
return_trace (out->mark2Array.serialize_subset (c, mark2Array, this,
mark2_iter.len (),
mark2_indexes.iter ()));

View File

@ -12,7 +12,7 @@ struct PairPos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
PairPosFormat1_3<SmallTypes> format1;
PairPosFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
@ -25,9 +25,9 @@ struct PairPos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K

View File

@ -103,52 +103,35 @@ struct PairPosFormat1_3
const Coverage &get_coverage () const { return this+coverage; }
unsigned cache_cost () const
struct external_cache_t
{
return (this+coverage).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
hb_ot_layout_mapping_cache_t coverage;
};
void *external_cache_create () const
{
switch (op)
external_cache_t *cache = (external_cache_t *) hb_malloc (sizeof (external_cache_t));
if (likely (cache))
{
case hb_ot_lookup_cache_op_t::CREATE:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) hb_malloc (sizeof (hb_ot_lookup_cache_t));
if (likely (cache))
cache->clear ();
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) p;
hb_free (cache);
return nullptr;
}
cache->coverage.clear ();
}
return nullptr;
return cache;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
bool apply (hb_ot_apply_context_t *c, void *external_cache) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
hb_ot_lookup_cache_t *cache = cached ? (hb_ot_lookup_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache);
external_cache_t *cache = (external_cache_t *) external_cache;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache ? &cache->coverage : nullptr);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (unlikely (!skippy_iter.next (&unsafe_to)))

View File

@ -123,63 +123,39 @@ struct PairPosFormat2_4 : ValueBase
const Coverage &get_coverage () const { return this+coverage; }
struct pair_pos_cache_t
struct external_cache_t
{
hb_ot_lookup_cache_t coverage;
hb_ot_lookup_cache_t first;
hb_ot_lookup_cache_t second;
hb_ot_layout_mapping_cache_t coverage;
hb_ot_layout_mapping_cache_t first;
hb_ot_layout_mapping_cache_t second;
};
unsigned cache_cost () const
void *external_cache_create () const
{
return (this+coverage).cost () + (this+classDef1).cost () + (this+classDef2).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
{
switch (op)
external_cache_t *cache = (external_cache_t *) hb_malloc (sizeof (external_cache_t));
if (likely (cache))
{
case hb_ot_lookup_cache_op_t::CREATE:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) hb_malloc (sizeof (pair_pos_cache_t));
if (likely (cache))
{
cache->coverage.clear ();
cache->first.clear ();
cache->second.clear ();
}
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
pair_pos_cache_t *cache = (pair_pos_cache_t *) p;
hb_free (cache);
return nullptr;
}
cache->coverage.clear ();
cache->first.clear ();
cache->second.clear ();
}
return nullptr;
return cache;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
bool apply (hb_ot_apply_context_t *c, void *external_cache) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
pair_pos_cache_t *cache = cached ? (pair_pos_cache_t *) c->lookup_accel->cache : nullptr;
external_cache_t *cache = (external_cache_t *) external_cache;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache ? &cache->coverage : nullptr);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_input;
skippy_iter.reset_fast (buffer->idx);
unsigned unsafe_to;
if (unlikely (!skippy_iter.next (&unsafe_to)))

View File

@ -12,7 +12,7 @@ struct SinglePos
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
SinglePosFormat1 format1;
SinglePosFormat2 format2;
} u;
@ -41,7 +41,7 @@ struct SinglePos
const hb_hashmap_t<unsigned, hb_pair_t<unsigned, int>> *layout_variation_idx_delta_map,
unsigned newFormat)
{
if (unlikely (!c->extend_min (u.format))) return;
if (unlikely (!c->extend_min (u.format.v))) return;
unsigned format = 2;
ValueFormat new_format;
new_format = newFormat;
@ -49,8 +49,8 @@ struct SinglePos
if (glyph_val_iter_pairs)
format = get_format (glyph_val_iter_pairs);
u.format = format;
switch (u.format) {
u.format.v = format;
switch (u.format.v) {
case 1: u.format1.serialize (c,
src,
glyph_val_iter_pairs,
@ -70,9 +70,9 @@ struct SinglePos
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());

View File

@ -56,9 +56,14 @@ struct ValueFormat : HBUINT16
* PosTable (may be NULL) */
#endif
IntType& operator = (uint16_t i) { v = i; return *this; }
NumType& operator = (uint16_t i) { v = i; return *this; }
unsigned int get_len () const { return hb_popcount ((unsigned int) *this); }
// Note: spec says skip 2 bytes per bit in the valueformat. But reports
// from Microsoft developers indicate that only the fields that are
// currently defined are counted. We don't expect any new fields to
// be added to ValueFormat. As such, we use the faster hb_popcount8
// that only processes the lowest 8 bits.
unsigned int get_len () const { return hb_popcount8 ((uint8_t) *this); }
unsigned int get_size () const { return get_len () * Value::static_size; }
hb_vector_t<unsigned> get_device_table_indices () const {
@ -111,8 +116,8 @@ struct ValueFormat : HBUINT16
if (!has_device ()) return ret;
bool use_x_device = font->x_ppem || font->num_coords;
bool use_y_device = font->y_ppem || font->num_coords;
bool use_x_device = font->x_ppem || font->has_nonzero_coords;
bool use_y_device = font->y_ppem || font->has_nonzero_coords;
if (!use_x_device && !use_y_device) return ret;

View File

@ -91,6 +91,19 @@ struct AlternateSet
return alternates.len;
}
void
collect_alternates (hb_codepoint_t gid,
hb_map_t *alternate_count /* IN/OUT */,
hb_map_t *alternate_glyphs /* IN/OUT */) const
{
+ hb_enumerate (alternates)
| hb_map ([gid] (hb_pair_t<unsigned, hb_codepoint_t> _) { return hb_pair (gid + (_.first << 24), _.second); })
| hb_apply ([&] (const hb_pair_t<hb_codepoint_t, hb_codepoint_t> &p) -> void
{ _hb_collect_glyph_alternates_add (p.first, p.second,
alternate_count, alternate_glyphs); })
;
}
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c,

View File

@ -12,7 +12,7 @@ struct AlternateSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
AlternateSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
AlternateSubstFormat1_2<MediumTypes> format2;
@ -23,9 +23,9 @@ struct AlternateSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@ -42,10 +42,10 @@ struct AlternateSubst
hb_array_t<const HBGlyphID16> alternate_glyphs_list)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
if (unlikely (!c->extend_min (u.format.v))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
u.format.v = format;
switch (u.format.v) {
case 1: return_trace (u.format1.serialize (c, glyphs, alternate_len_list, alternate_glyphs_list));
default:return_trace (false);
}

View File

@ -69,6 +69,19 @@ struct AlternateSubstFormat1_2
{ return (this+alternateSet[(this+coverage).get_coverage (gid)])
.get_alternates (start_offset, alternate_count, alternate_glyphs); }
void
collect_glyph_alternates (hb_map_t *alternate_count /* IN/OUT */,
hb_map_t *alternate_glyphs /* IN/OUT */) const
{
+ hb_iter (alternateSet)
| hb_map (hb_add (this))
| hb_zip (this+coverage)
| hb_apply ([&] (const hb_pair_t<const AlternateSet<Types> &, hb_codepoint_t> _) {
_.first.collect_alternates (_.second, alternate_count, alternate_glyphs);
})
;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);

View File

@ -44,6 +44,18 @@ struct Ligature
c->output->add (ligGlyph);
}
template <typename set_t>
void collect_second (set_t &s) const
{
if (unlikely (!component.get_length ()))
{
// A ligature without any components. Anything matches.
s = set_t::full ();
return;
}
s.add (component.arrayZ[0]);
}
bool would_apply (hb_would_apply_context_t *c) const
{
if (c->len != component.lenP1)
@ -91,15 +103,6 @@ struct Ligature
unsigned int total_component_count = 0;
if (unlikely (count > HB_MAX_CONTEXT_LENGTH)) return false;
unsigned match_positions_stack[4];
unsigned *match_positions = match_positions_stack;
if (unlikely (count > ARRAY_LENGTH (match_positions_stack)))
{
match_positions = (unsigned *) hb_malloc (hb_max (count, 1u) * sizeof (unsigned));
if (unlikely (!match_positions))
return_trace (false);
}
unsigned int match_end = 0;
if (likely (!match_input (c, count,
@ -107,12 +110,9 @@ struct Ligature
match_glyph,
nullptr,
&match_end,
match_positions,
&total_component_count)))
{
c->buffer->unsafe_to_concat (c->buffer->idx, match_end);
if (match_positions != match_positions_stack)
hb_free (match_positions);
return_trace (false);
}
@ -129,10 +129,10 @@ struct Ligature
match_end += delta;
for (unsigned i = 0; i < count; i++)
{
match_positions[i] += delta;
c->match_positions[i] += delta;
if (i)
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", match_positions[i]);
snprintf (p, sizeof(buf) - (p - buf), "%u", c->match_positions[i]);
p += strlen(p);
}
@ -143,7 +143,6 @@ struct Ligature
ligate_input (c,
count,
match_positions,
match_end,
ligGlyph,
total_component_count);
@ -156,8 +155,6 @@ struct Ligature
pos);
}
if (match_positions != match_positions_stack)
hb_free (match_positions);
return_trace (true);
}

View File

@ -11,11 +11,11 @@ namespace GSUB_impl {
template <typename Types>
struct LigatureSet
{
protected:
public:
Array16OfOffset16To<Ligature<Types>>
ligature; /* Array LigatureSet tables
* ordered by preference */
public:
DEFINE_SIZE_ARRAY (2, ligature);
bool sanitize (hb_sanitize_context_t *c) const
@ -62,6 +62,15 @@ struct LigatureSet
;
}
template <typename set_t>
void collect_seconds (set_t &s) const
{
+ hb_iter (ligature)
| hb_map (hb_add (this))
| hb_apply ([&s] (const Ligature<Types> &_) { _.collect_second (s); })
;
}
bool would_apply (hb_would_apply_context_t *c) const
{
return
@ -72,14 +81,14 @@ struct LigatureSet
;
}
bool apply (hb_ot_apply_context_t *c) const
bool apply (hb_ot_apply_context_t *c, const hb_set_digest_t *seconds = nullptr) const
{
TRACE_APPLY (this);
unsigned int num_ligs = ligature.len;
#ifndef HB_NO_OT_RULESETS_FAST_PATH
if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 4)
if (HB_OPTIMIZE_SIZE_VAL || num_ligs <= 1)
#endif
{
slow:
@ -91,21 +100,21 @@ struct LigatureSet
return_trace (false);
}
/* This version is optimized for speed by matching the first component
/* This version is optimized for speed by matching the second component
* of the ligature here, instead of calling into the ligation code.
*
* This is replicated in ChainRuleSet and RuleSet. */
hb_ot_apply_context_t::skipping_iterator_t &skippy_iter = c->iter_input;
auto &skippy_iter = c->iter_context;
skippy_iter.reset (c->buffer->idx);
skippy_iter.set_match_func (match_always, nullptr);
skippy_iter.set_glyph_data ((HBUINT16 *) nullptr);
unsigned unsafe_to;
hb_codepoint_t first = (unsigned) -1;
hb_codepoint_t second = (unsigned) -1;
bool matched = skippy_iter.next (&unsafe_to);
if (likely (matched))
{
first = c->buffer->info[skippy_iter.idx].codepoint;
second = c->buffer->info[skippy_iter.idx].codepoint;
unsafe_to = skippy_iter.idx + 1;
if (skippy_iter.may_skip (c->buffer->info[skippy_iter.idx]))
@ -118,13 +127,14 @@ struct LigatureSet
else
goto slow;
if (seconds && !seconds->may_have (second))
return_trace (false);
bool unsafe_to_concat = false;
for (unsigned int i = 0; i < num_ligs; i++)
{
const auto &lig = this+ligature.arrayZ[i];
if (unlikely (lig.component.lenP1 <= 1) ||
lig.component.arrayZ[0] == first)
lig.component.arrayZ[0] == second)
{
if (lig.apply (c))
{

View File

@ -12,7 +12,7 @@ struct LigatureSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
LigatureSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
LigatureSubstFormat1_2<MediumTypes> format2;
@ -23,9 +23,9 @@ struct LigatureSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@ -45,10 +45,10 @@ struct LigatureSubst
hb_array_t<const HBGlyphID16> component_list /* Starting from second for each ligature */)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
if (unlikely (!c->extend_min (u.format.v))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
u.format.v = format;
switch (u.format.v) {
case 1: return_trace (u.format1.serialize (c,
first_glyphs,
ligature_per_first_glyph_count_list,

View File

@ -78,52 +78,44 @@ struct LigatureSubstFormat1_2
return lig_set.would_apply (c);
}
unsigned cache_cost () const
struct external_cache_t
{
return (this+coverage).cost ();
}
static void * cache_func (void *p, hb_ot_lookup_cache_op_t op)
hb_ot_layout_mapping_cache_t coverage;
hb_set_digest_t seconds;
};
void *external_cache_create () const
{
switch (op)
external_cache_t *cache = (external_cache_t *) hb_malloc (sizeof (external_cache_t));
if (likely (cache))
{
case hb_ot_lookup_cache_op_t::CREATE:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) hb_malloc (sizeof (hb_ot_lookup_cache_t));
if (likely (cache))
cache->clear ();
return cache;
}
case hb_ot_lookup_cache_op_t::ENTER:
return (void *) true;
case hb_ot_lookup_cache_op_t::LEAVE:
return nullptr;
case hb_ot_lookup_cache_op_t::DESTROY:
{
hb_ot_lookup_cache_t *cache = (hb_ot_lookup_cache_t *) p;
hb_free (cache);
return nullptr;
}
cache->coverage.clear ();
cache->seconds.init ();
+ hb_iter (ligatureSet)
| hb_map (hb_add (this))
| hb_apply ([cache] (const LigatureSet<Types> &_) { _.collect_seconds (cache->seconds); })
;
}
return nullptr;
return cache;
}
bool apply_cached (hb_ot_apply_context_t *c) const { return _apply (c, true); }
bool apply (hb_ot_apply_context_t *c) const { return _apply (c, false); }
bool _apply (hb_ot_apply_context_t *c, bool cached) const
bool apply (hb_ot_apply_context_t *c, void *external_cache) const
{
TRACE_APPLY (this);
hb_buffer_t *buffer = c->buffer;
#ifndef HB_NO_OT_LAYOUT_LOOKUP_CACHE
hb_ot_lookup_cache_t *cache = cached ? (hb_ot_lookup_cache_t *) c->lookup_accel->cache : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache);
external_cache_t *cache = (external_cache_t *) external_cache;
const hb_set_digest_t *seconds = cache ? &cache->seconds : nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint, cache ? &cache->coverage : nullptr);
#else
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
const hb_set_digest_t *seconds = nullptr;
unsigned int index = (this+coverage).get_coverage (buffer->cur().codepoint);
#endif
if (index == NOT_COVERED) return_trace (false);
const auto &lig_set = this+ligatureSet[index];
return_trace (lig_set.apply (c));
return_trace (lig_set.apply (c, seconds));
}
bool serialize (hb_serialize_context_t *c,

View File

@ -12,7 +12,7 @@ struct MultipleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
MultipleSubstFormat1_2<SmallTypes> format1;
#ifndef HB_NO_BEYOND_64K
MultipleSubstFormat1_2<MediumTypes> format2;
@ -24,9 +24,9 @@ struct MultipleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
@ -41,10 +41,10 @@ struct MultipleSubst
Iterator it)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
if (unlikely (!c->extend_min (u.format.v))) return_trace (false);
unsigned int format = 1;
u.format = format;
switch (u.format) {
u.format.v = format;
switch (u.format.v) {
case 1: return_trace (u.format1.serialize (c, it));
default:return_trace (false);
}

View File

@ -12,7 +12,7 @@ struct ReverseChainSingleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
ReverseChainSingleSubstFormat1 format1;
} u;
@ -20,9 +20,9 @@ struct ReverseChainSingleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
default:return_trace (c->default_return_value ());
}

View File

@ -115,7 +115,7 @@ struct Sequence
for (unsigned i = c->buffer->idx - count; i < c->buffer->idx; i++)
{
if (buf < p)
if (buf < p && sizeof(buf) - 1u > unsigned (p - buf))
*p++ = ',';
snprintf (p, sizeof(buf) - (p - buf), "%u", i);
p += strlen(p);

View File

@ -13,7 +13,7 @@ struct SingleSubst
{
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
SingleSubstFormat1_3<SmallTypes> format1;
SingleSubstFormat2_4<SmallTypes> format2;
#ifndef HB_NO_BEYOND_64K
@ -27,9 +27,9 @@ struct SingleSubst
template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{
if (unlikely (!c->may_dispatch (this, &u.format))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format);
switch (u.format) {
if (unlikely (!c->may_dispatch (this, &u.format.v))) return c->no_dispatch_return_value ();
TRACE_DISPATCH (this, u.format.v);
switch (u.format.v) {
case 1: return_trace (c->dispatch (u.format1, std::forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2, std::forward<Ts> (ds)...));
#ifndef HB_NO_BEYOND_64K
@ -47,7 +47,7 @@ struct SingleSubst
Iterator glyphs)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false);
if (unlikely (!c->extend_min (u.format.v))) return_trace (false);
unsigned format = 2;
unsigned delta = 0;
if (glyphs)
@ -71,8 +71,8 @@ struct SingleSubst
if (!hb_all (++(+glyphs), delta, get_delta)) format += 1;
}
u.format = format;
switch (u.format) {
u.format.v = format;
switch (u.format.v) {
case 1: return_trace (u.format1.serialize (c,
+ glyphs
| hb_map_retains_sorting (hb_first),

View File

@ -123,6 +123,21 @@ struct SingleSubstFormat1_3
return 1;
}
void
collect_glyph_alternates (hb_map_t *alternate_count /* IN/OUT */,
hb_map_t *alternate_glyphs /* IN/OUT */) const
{
hb_codepoint_t d = deltaGlyphID;
hb_codepoint_t mask = get_mask ();
+ hb_iter (this+coverage)
| hb_map ([d, mask] (hb_codepoint_t g) { return hb_pair (g, (g + d) & mask); })
| hb_apply ([&] (const hb_pair_t<hb_codepoint_t, hb_codepoint_t> &p) -> void
{ _hb_collect_glyph_alternates_add (p.first, p.second,
alternate_count, alternate_glyphs); })
;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);

View File

@ -100,6 +100,17 @@ struct SingleSubstFormat2_4
return 1;
}
void
collect_glyph_alternates (hb_map_t *alternate_count /* IN/OUT */,
hb_map_t *alternate_glyphs /* IN/OUT */) const
{
+ hb_zip (this+coverage, substitute)
| hb_apply ([&] (const hb_pair_t<hb_codepoint_t, hb_codepoint_t> &p) -> void
{ _hb_collect_glyph_alternates_add (p.first, p.second,
alternate_count, alternate_glyphs); })
;
}
bool apply (hb_ot_apply_context_t *c) const
{
TRACE_APPLY (this);

View File

@ -29,8 +29,11 @@
#ifndef OT_LAYOUT_TYPES_HH
#define OT_LAYOUT_TYPES_HH
using hb_ot_lookup_cache_t = hb_cache_t<15, 8, 7>;
static_assert (sizeof (hb_ot_lookup_cache_t) == 256, "");
using hb_ot_layout_mapping_cache_t = hb_cache_t<16, 8, 8>;
static_assert (sizeof (hb_ot_layout_mapping_cache_t) == 512, "");
using hb_ot_layout_binary_cache_t = hb_cache_t<14, 1, 8>;
static_assert (sizeof (hb_ot_layout_binary_cache_t) == 256, "");
namespace OT {
namespace Layout {

View File

@ -0,0 +1,421 @@
#include "VARC.hh"
#ifndef HB_NO_VAR_COMPOSITES
#include "../../../hb-draw.hh"
#include "../../../hb-ot-layout-common.hh"
#include "../../../hb-ot-layout-gdef-table.hh"
namespace OT {
//namespace Var {
#ifndef HB_NO_DRAW
struct hb_transforming_pen_context_t
{
hb_transform_t<> transform;
hb_draw_funcs_t *dfuncs;
void *data;
hb_draw_state_t *st;
};
static void
hb_transforming_pen_move_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (to_x, to_y);
c->dfuncs->move_to (c->data, *c->st, to_x, to_y);
}
static void
hb_transforming_pen_line_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (to_x, to_y);
c->dfuncs->line_to (c->data, *c->st, to_x, to_y);
}
static void
hb_transforming_pen_quadratic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float control_x, float control_y,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (control_x, control_y);
c->transform.transform_point (to_x, to_y);
c->dfuncs->quadratic_to (c->data, *c->st, control_x, control_y, to_x, to_y);
}
static void
hb_transforming_pen_cubic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
float control1_x, float control1_y,
float control2_x, float control2_y,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->transform.transform_point (control1_x, control1_y);
c->transform.transform_point (control2_x, control2_y);
c->transform.transform_point (to_x, to_y);
c->dfuncs->cubic_to (c->data, *c->st, control1_x, control1_y, control2_x, control2_y, to_x, to_y);
}
static void
hb_transforming_pen_close_path (hb_draw_funcs_t *dfuncs HB_UNUSED,
void *data,
hb_draw_state_t *st,
void *user_data HB_UNUSED)
{
hb_transforming_pen_context_t *c = (hb_transforming_pen_context_t *) data;
c->dfuncs->close_path (c->data, *c->st);
}
static inline void free_static_transforming_pen_funcs ();
static struct hb_transforming_pen_funcs_lazy_loader_t : hb_draw_funcs_lazy_loader_t<hb_transforming_pen_funcs_lazy_loader_t>
{
static hb_draw_funcs_t *create ()
{
hb_draw_funcs_t *funcs = hb_draw_funcs_create ();
hb_draw_funcs_set_move_to_func (funcs, hb_transforming_pen_move_to, nullptr, nullptr);
hb_draw_funcs_set_line_to_func (funcs, hb_transforming_pen_line_to, nullptr, nullptr);
hb_draw_funcs_set_quadratic_to_func (funcs, hb_transforming_pen_quadratic_to, nullptr, nullptr);
hb_draw_funcs_set_cubic_to_func (funcs, hb_transforming_pen_cubic_to, nullptr, nullptr);
hb_draw_funcs_set_close_path_func (funcs, hb_transforming_pen_close_path, nullptr, nullptr);
hb_draw_funcs_make_immutable (funcs);
hb_atexit (free_static_transforming_pen_funcs);
return funcs;
}
} static_transforming_pen_funcs;
static inline
void free_static_transforming_pen_funcs ()
{
static_transforming_pen_funcs.free_instance ();
}
static hb_draw_funcs_t *
hb_transforming_pen_get_funcs ()
{
return static_transforming_pen_funcs.get_unconst ();
}
hb_ubytes_t
VarComponent::get_path_at (const hb_varc_context_t &c,
hb_codepoint_t parent_gid,
hb_array_t<const int> coords,
hb_transform_t<> total_transform,
hb_ubytes_t total_record,
hb_scalar_cache_t *cache) const
{
const unsigned char *end = total_record.arrayZ + total_record.length;
const unsigned char *record = total_record.arrayZ;
auto &VARC = *c.font->face->table.VARC->table;
auto &varStore = &VARC+VARC.varStore;
#define READ_UINT32VAR(name) \
HB_STMT_START { \
if (unlikely (unsigned (end - record) < HBUINT32VAR::min_size)) return hb_ubytes_t (); \
hb_barrier (); \
auto &varint = * (const HBUINT32VAR *) record; \
unsigned size = varint.get_size (); \
if (unlikely (unsigned (end - record) < size)) return hb_ubytes_t (); \
name = (uint32_t) varint; \
record += size; \
} HB_STMT_END
uint32_t flags;
READ_UINT32VAR (flags);
// gid
hb_codepoint_t gid = 0;
if (flags & (unsigned) flags_t::GID_IS_24BIT)
{
if (unlikely (unsigned (end - record) < HBGlyphID24::static_size))
return hb_ubytes_t ();
hb_barrier ();
gid = * (const HBGlyphID24 *) record;
record += HBGlyphID24::static_size;
}
else
{
if (unlikely (unsigned (end - record) < HBGlyphID16::static_size))
return hb_ubytes_t ();
hb_barrier ();
gid = * (const HBGlyphID16 *) record;
record += HBGlyphID16::static_size;
}
// Condition
bool show = true;
if (flags & (unsigned) flags_t::HAVE_CONDITION)
{
unsigned conditionIndex;
READ_UINT32VAR (conditionIndex);
const auto &condition = (&VARC+VARC.conditionList)[conditionIndex];
auto instancer = MultiItemVarStoreInstancer(&varStore, nullptr, coords, cache);
show = condition.evaluate (coords.arrayZ, coords.length, &instancer);
}
// Axis values
auto &axisIndices = c.scratch.axisIndices;
axisIndices.clear ();
auto &axisValues = c.scratch.axisValues;
axisValues.clear ();
if (flags & (unsigned) flags_t::HAVE_AXES)
{
unsigned axisIndicesIndex;
READ_UINT32VAR (axisIndicesIndex);
axisIndices.extend ((&VARC+VARC.axisIndicesList)[axisIndicesIndex]);
axisValues.resize (axisIndices.length);
const HBUINT8 *p = (const HBUINT8 *) record;
TupleValues::decompile (p, axisValues, (const HBUINT8 *) end);
record = (const unsigned char *) p;
}
// Apply variations if any
if (flags & (unsigned) flags_t::AXIS_VALUES_HAVE_VARIATION)
{
uint32_t axisValuesVarIdx;
READ_UINT32VAR (axisValuesVarIdx);
if (show && coords && !axisValues.in_error ())
varStore.get_delta (axisValuesVarIdx, coords, axisValues.as_array (), cache);
}
auto component_coords = coords;
/* Copying coords is expensive; so we have put an arbitrary
* limit on the max number of coords for now. */
if ((flags & (unsigned) flags_t::RESET_UNSPECIFIED_AXES) ||
coords.length > HB_VAR_COMPOSITE_MAX_AXES)
component_coords = hb_array (c.font->coords, c.font->num_coords);
// Transform
uint32_t transformVarIdx = VarIdx::NO_VARIATION;
if (flags & (unsigned) flags_t::TRANSFORM_HAS_VARIATION)
READ_UINT32VAR (transformVarIdx);
#define PROCESS_TRANSFORM_COMPONENTS \
HB_STMT_START { \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TRANSLATE_X, translateX); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TRANSLATE_Y, translateY); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_ROTATION, rotation); \
PROCESS_TRANSFORM_COMPONENT (F6DOT10, 1.0f, HAVE_SCALE_X, scaleX); \
PROCESS_TRANSFORM_COMPONENT (F6DOT10, 1.0f, HAVE_SCALE_Y, scaleY); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_SKEW_X, skewX); \
PROCESS_TRANSFORM_COMPONENT (F4DOT12, HB_PI, HAVE_SKEW_Y, skewY); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TCENTER_X, tCenterX); \
PROCESS_TRANSFORM_COMPONENT (FWORD, 1.0f, HAVE_TCENTER_Y, tCenterY); \
} HB_STMT_END
hb_transform_decomposed_t<> transform;
// Read transform components
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
{ \
static_assert (type::static_size == HBINT16::static_size, ""); \
if (unlikely (unsigned (end - record) < HBINT16::static_size)) \
return hb_ubytes_t (); \
hb_barrier (); \
transform.name = mult * * (const HBINT16 *) record; \
record += HBINT16::static_size; \
}
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
// Read reserved records
unsigned i = flags & (unsigned) flags_t::RESERVED_MASK;
while (i)
{
HB_UNUSED uint32_t discard;
READ_UINT32VAR (discard);
i &= i - 1;
}
/* Parsing is over now. */
if (show)
{
// Only use coord_setter if there's actually any axis overrides.
coord_setter_t coord_setter (axisIndices ? component_coords : hb_array<int> ());
// Go backwards, to reduce coord_setter vector reallocations.
for (unsigned i = axisIndices.length; i; i--)
coord_setter[axisIndices[i - 1]] = axisValues[i - 1];
if (axisIndices)
component_coords = coord_setter.get_coords ();
// Apply transform variations if any
if (transformVarIdx != VarIdx::NO_VARIATION && coords)
{
float transformValues[9];
unsigned numTransformValues = 0;
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
transformValues[numTransformValues++] = transform.name / mult;
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
varStore.get_delta (transformVarIdx, coords, hb_array (transformValues, numTransformValues), cache);
numTransformValues = 0;
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
transform.name = transformValues[numTransformValues++] * mult;
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
}
// Divide them by their divisors
#define PROCESS_TRANSFORM_COMPONENT(type, mult, flag, name) \
if (flags & (unsigned) flags_t::flag) \
{ \
HBINT16 int_v; \
int_v = roundf (transform.name); \
type typed_v = * (const type *) &int_v; \
float float_v = (float) typed_v; \
transform.name = float_v; \
}
PROCESS_TRANSFORM_COMPONENTS;
#undef PROCESS_TRANSFORM_COMPONENT
if (!(flags & (unsigned) flags_t::HAVE_SCALE_Y))
transform.scaleY = transform.scaleX;
total_transform.transform (transform.to_transform ());
total_transform.scale (c.font->x_mult ? 1.f / c.font->x_multf : 0.f,
c.font->y_mult ? 1.f / c.font->y_multf : 0.f);
bool same_coords = component_coords.length == coords.length &&
component_coords.arrayZ == coords.arrayZ;
c.depth_left--;
VARC.get_path_at (c, gid,
component_coords, total_transform,
parent_gid,
same_coords ? cache : nullptr);
c.depth_left++;
}
#undef PROCESS_TRANSFORM_COMPONENTS
#undef READ_UINT32VAR
return hb_ubytes_t (record, end - record);
}
bool
VARC::get_path_at (const hb_varc_context_t &c,
hb_codepoint_t glyph,
hb_array_t<const int> coords,
hb_transform_t<> transform,
hb_codepoint_t parent_glyph,
hb_scalar_cache_t *parent_cache) const
{
// Don't recurse on the same glyph.
unsigned idx = glyph == parent_glyph ?
NOT_COVERED :
(this+coverage).get_coverage (glyph);
if (idx == NOT_COVERED)
{
if (c.draw_session)
{
// Build a transforming pen to apply the transform.
hb_draw_funcs_t *transformer_funcs = hb_transforming_pen_get_funcs ();
hb_transforming_pen_context_t context {transform,
c.draw_session->funcs,
c.draw_session->draw_data,
&c.draw_session->st};
hb_draw_session_t transformer_session {transformer_funcs, &context};
hb_draw_session_t &shape_draw_session = transform.is_identity () ? *c.draw_session : transformer_session;
if (c.font->face->table.glyf->get_path_at (c.font, glyph, shape_draw_session, coords, c.scratch.glyf_scratch)) return true;
#ifndef HB_NO_CFF
if (c.font->face->table.cff2->get_path_at (c.font, glyph, shape_draw_session, coords)) return true;
if (c.font->face->table.cff1->get_path (c.font, glyph, shape_draw_session)) return true; // Doesn't have variations
#endif
return false;
}
else if (c.extents)
{
hb_glyph_extents_t glyph_extents;
if (!c.font->face->table.glyf->get_extents_at (c.font, glyph, &glyph_extents, coords))
#ifndef HB_NO_CFF
if (!c.font->face->table.cff2->get_extents_at (c.font, glyph, &glyph_extents, coords))
if (!c.font->face->table.cff1->get_extents (c.font, glyph, &glyph_extents)) // Doesn't have variations
#endif
return false;
hb_extents_t<> comp_extents (glyph_extents);
transform.transform_extents (comp_extents);
c.extents->union_ (comp_extents);
}
return true;
}
if (c.depth_left <= 0)
return true;
if (c.edges_left <= 0)
return true;
(c.edges_left)--;
hb_decycler_node_t node (c.decycler);
if (unlikely (!node.visit (glyph)))
return true;
hb_ubytes_t record = (this+glyphRecords)[idx];
hb_scalar_cache_t static_cache;
hb_scalar_cache_t *cache = parent_cache ?
parent_cache :
(this+varStore).create_cache (&static_cache);
transform.scale (c.font->x_multf, c.font->y_multf);
VarCompositeGlyph::get_path_at (c,
glyph,
coords, transform,
record,
cache);
if (cache != parent_cache)
(this+varStore).destroy_cache (cache, &static_cache);
return true;
}
#endif
//} // namespace Var
} // namespace OT
#endif

View File

@ -32,7 +32,7 @@ struct hb_varc_context_t
{
hb_font_t *font;
hb_draw_session_t *draw_session;
hb_extents_t *extents;
hb_extents_t<> *extents;
mutable hb_decycler_t decycler;
mutable signed edges_left;
mutable signed depth_left;
@ -65,9 +65,9 @@ struct VarComponent
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t parent_gid,
hb_array_t<const int> coords,
hb_transform_t transform,
hb_transform_t<> transform,
hb_ubytes_t record,
VarRegionList::cache_t *cache = nullptr) const;
hb_scalar_cache_t *cache = nullptr) const;
};
struct VarCompositeGlyph
@ -76,9 +76,9 @@ struct VarCompositeGlyph
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t gid,
hb_array_t<const int> coords,
hb_transform_t transform,
hb_transform_t<> transform,
hb_ubytes_t record,
VarRegionList::cache_t *cache)
hb_scalar_cache_t *cache)
{
while (record)
{
@ -104,9 +104,9 @@ struct VARC
get_path_at (const hb_varc_context_t &c,
hb_codepoint_t gid,
hb_array_t<const int> coords,
hb_transform_t transform = HB_TRANSFORM_IDENTITY,
hb_transform_t<> transform = HB_TRANSFORM_IDENTITY,
hb_codepoint_t parent_gid = HB_CODEPOINT_INVALID,
VarRegionList::cache_t *parent_cache = nullptr) const;
hb_scalar_cache_t *parent_cache = nullptr) const;
bool
get_path (hb_font_t *font,
@ -129,7 +129,7 @@ struct VARC
bool
get_extents (hb_font_t *font,
hb_codepoint_t gid,
hb_extents_t *extents,
hb_extents_t<> *extents,
hb_varc_scratch_t &scratch) const
{
hb_varc_context_t c {font,
@ -194,9 +194,10 @@ struct VARC
hb_codepoint_t gid,
hb_glyph_extents_t *extents) const
{
#ifndef HB_NO_DRAW
if (!table->has_data ()) return false;
hb_extents_t f_extents;
hb_extents_t<> f_extents;
auto *scratch = acquire_scratch ();
if (unlikely (!scratch)) return true;
@ -207,6 +208,9 @@ struct VARC
*extents = f_extents.to_glyph_extents (font->x_scale < 0, font->y_scale < 0);
return ret;
#else
return false;
#endif
}
private:

View File

@ -102,17 +102,15 @@ struct Glyph
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
// Duplicated code.
int lsb = 0;
int h_delta = face->table.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb) ?
(int) header->xMin - lsb : 0;
face->table.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
int h_delta = (int) header->xMin - lsb;
HB_UNUSED int tsb = 0;
int v_orig = (int) header->yMax +
#ifndef HB_NO_VERTICAL
((void) face->table.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb), tsb)
#else
0
face->table.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb);
#endif
;
int v_orig = (int) header->yMax + tsb;
unsigned h_adv = face->table.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
@ -314,6 +312,7 @@ struct Glyph
bool use_my_metrics = true,
bool phantom_only = false,
hb_array_t<const int> coords = hb_array_t<const int> (),
hb_scalar_cache_t *gvar_cache = nullptr,
unsigned int depth = 0,
unsigned *edge_count = nullptr) const
{
@ -328,7 +327,7 @@ struct Glyph
head_maxp_info->maxComponentDepth = hb_max (head_maxp_info->maxComponentDepth, depth);
}
if (!coords)
if (!coords && font->has_nonzero_coords)
coords = hb_array (font->coords, font->num_coords);
contour_point_vector_t &points = type == SIMPLE ? all_points : scratch.comp_points;
@ -357,17 +356,15 @@ struct Glyph
if (unlikely (!points.resize (points.length + PHANTOM_COUNT))) return false;
hb_array_t<contour_point_t> phantoms = points.as_array ().sub_array (points.length - PHANTOM_COUNT, PHANTOM_COUNT);
{
// Duplicated code.
int lsb = 0;
int h_delta = glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb) ?
(int) header->xMin - lsb : 0;
glyf_accelerator.hmtx->get_leading_bearing_without_var_unscaled (gid, &lsb);
int h_delta = (int) header->xMin - lsb;
HB_UNUSED int tsb = 0;
int v_orig = (int) header->yMax +
#ifndef HB_NO_VERTICAL
((void) glyf_accelerator.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb), tsb)
#else
0
glyf_accelerator.vmtx->get_leading_bearing_without_var_unscaled (gid, &tsb);
#endif
;
int v_orig = (int) header->yMax + tsb;
unsigned h_adv = glyf_accelerator.hmtx->get_advance_without_var_unscaled (gid);
unsigned v_adv =
#ifndef HB_NO_VERTICAL
@ -383,7 +380,7 @@ struct Glyph
}
#ifndef HB_NO_VAR
if (coords)
if (hb_any (coords))
{
#ifndef HB_NO_BEYOND_64K
if (glyf_accelerator.GVAR->has_data ())
@ -391,6 +388,7 @@ struct Glyph
coords,
points.as_array ().sub_array (old_length),
scratch,
gvar_cache,
phantom_only && type == SIMPLE);
else
#endif
@ -398,6 +396,7 @@ struct Glyph
coords,
points.as_array ().sub_array (old_length),
scratch,
gvar_cache,
phantom_only && type == SIMPLE);
}
#endif
@ -447,6 +446,7 @@ struct Glyph
use_my_metrics,
phantom_only,
coords,
gvar_cache,
depth + 1,
edge_count)))
{
@ -533,7 +533,11 @@ struct Glyph
bool get_extents_without_var_scaled (hb_font_t *font, const glyf_accelerator_t &glyf_accelerator,
hb_glyph_extents_t *extents) const
{
if (type == EMPTY) return true; /* Empty glyph; zero extents. */
if (type == EMPTY)
{
*extents = {0, 0, 0, 0};
return true; /* Empty glyph; zero extents. */
}
return header->get_extents_without_var_scaled (font, glyf_accelerator, gid, extents);
}

View File

@ -189,7 +189,7 @@ struct SimpleGlyph
unsigned old_length = points.length;
points.alloc (points.length + num_points + 4); // Allocate for phantom points, to avoid a possible copy
if (unlikely (!points.resize (points.length + num_points, false))) return false;
if (unlikely (!points.resize_dirty (points.length + num_points))) return false;
auto points_ = points.as_array ().sub_array (old_length);
if (!phantom_only)
hb_memset (points_.arrayZ, 0, sizeof (contour_point_t) * num_points);

View File

@ -220,7 +220,8 @@ struct glyf_accelerator_t
template<typename T>
bool get_points (hb_font_t *font, hb_codepoint_t gid, T consumer,
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch) const
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (gid >= num_glyphs) return false;
@ -228,7 +229,7 @@ struct glyf_accelerator_t
all_points.resize (0);
bool phantom_only = !consumer.is_consuming_contour_points ();
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, scratch, nullptr, nullptr, nullptr, true, true, phantom_only, coords)))
if (unlikely (!glyph_for_gid (gid).get_points (font, *this, all_points, scratch, nullptr, nullptr, nullptr, true, true, phantom_only, coords, gvar_cache)))
return false;
unsigned count = all_points.length;
@ -371,28 +372,28 @@ struct glyf_accelerator_t
contour_point_t *get_phantoms_sink () { return phantoms; }
};
#ifndef HB_NO_VAR
unsigned
get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t gid, bool is_vertical) const
get_advance_with_var_unscaled (hb_codepoint_t gid,
hb_font_t *font,
bool is_vertical,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (unlikely (gid >= num_glyphs)) return 0;
bool success = false;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (font->num_coords)
{
hb_glyf_scratch_t scratch;
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords, font->num_coords),
scratch);
}
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
scratch, gvar_cache);
if (unlikely (!success))
return
#ifndef HB_NO_VERTICAL
is_vertical ? vmtx->get_advance_without_var_unscaled (gid) :
#endif
hmtx->get_advance_without_var_unscaled (gid);
{
unsigned upem = font->face->get_upem ();
return is_vertical ? upem : upem / 2;
}
float result = is_vertical
? phantoms[glyf_impl::PHANTOM_TOP].y - phantoms[glyf_impl::PHANTOM_BOTTOM].y
@ -400,40 +401,38 @@ struct glyf_accelerator_t
return hb_clamp (roundf (result), 0.f, (float) UINT_MAX / 2);
}
bool get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t gid, bool is_vertical, int *lsb) const
float
get_v_origin_with_var_unscaled (hb_codepoint_t gid,
hb_font_t *font,
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (unlikely (gid >= num_glyphs)) return false;
if (unlikely (gid >= num_glyphs)) return 0;
bool success = false;
hb_glyph_extents_t extents;
hb_glyf_scratch_t scratch;
contour_point_t phantoms[glyf_impl::PHANTOM_COUNT];
if (unlikely (!get_points (font, gid, points_aggregator_t (font, &extents, phantoms, false),
hb_array (font->coords, font->num_coords),
scratch)))
return false;
success = get_points (font, gid, points_aggregator_t (font, nullptr, phantoms, false),
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
scratch, gvar_cache);
if (unlikely (!success))
{
return font->face->get_upem ();
}
*lsb = is_vertical
? roundf (phantoms[glyf_impl::PHANTOM_TOP].y) - extents.y_bearing
: roundf (phantoms[glyf_impl::PHANTOM_LEFT].x);
return true;
return phantoms[glyf_impl::PHANTOM_TOP].y;
}
#endif
bool get_leading_bearing_without_var_unscaled (hb_codepoint_t gid, bool is_vertical, int *lsb) const
{
if (unlikely (gid >= num_glyphs)) return false;
if (is_vertical) return false; // TODO Humm, what to do here?
*lsb = glyph_for_gid (gid).get_header ()->xMin;
return true;
}
#endif
public:
bool get_extents (hb_font_t *font,
hb_codepoint_t gid,
hb_glyph_extents_t *extents) const
{ return get_extents_at (font, gid, extents, hb_array (font->coords, font->num_coords)); }
{ return get_extents_at (font, gid, extents, hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0)); }
bool get_extents_at (hb_font_t *font,
hb_codepoint_t gid,
@ -445,12 +444,15 @@ struct glyf_accelerator_t
#ifndef HB_NO_VAR
if (coords)
{
hb_glyf_scratch_t scratch;
return get_points (font,
gid,
points_aggregator_t (font, extents, nullptr, true),
coords,
scratch);
hb_glyf_scratch_t *scratch = acquire_scratch ();
if (unlikely (!scratch)) return false;
bool ret = get_points (font,
gid,
points_aggregator_t (font, extents, nullptr, true),
coords,
*scratch);
release_scratch (scratch);
return ret;
}
#endif
return glyph_for_gid (gid).get_extents_without_var_scaled (font, *this, extents);
@ -485,33 +487,20 @@ struct glyf_accelerator_t
}
bool
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session) const
get_path (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session, hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (!has_data ()) return false;
hb_glyf_scratch_t *scratch;
// Borrow the cached strach buffer.
{
scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_glyf_scratch_t *) hb_calloc (1, sizeof (hb_glyf_scratch_t));
if (unlikely (!scratch))
return true;
}
}
hb_glyf_scratch_t *scratch = acquire_scratch ();
if (unlikely (!scratch)) return true;
bool ret = get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
hb_array (font->coords, font->num_coords),
*scratch);
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0),
*scratch,
gvar_cache);
// Put it back.
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
release_scratch (scratch);
return ret;
}
@ -519,12 +508,38 @@ struct glyf_accelerator_t
bool
get_path_at (hb_font_t *font, hb_codepoint_t gid, hb_draw_session_t &draw_session,
hb_array_t<const int> coords,
hb_glyf_scratch_t &scratch) const
hb_glyf_scratch_t &scratch,
hb_scalar_cache_t *gvar_cache = nullptr) const
{
if (!has_data ()) return false;
return get_points (font, gid, glyf_impl::path_builder_t (font, draw_session),
coords,
scratch);
scratch,
gvar_cache);
}
hb_glyf_scratch_t *acquire_scratch () const
{
if (!has_data ()) return nullptr;
hb_glyf_scratch_t *scratch = cached_scratch.get_acquire ();
if (!scratch || unlikely (!cached_scratch.cmpexch (scratch, nullptr)))
{
scratch = (hb_glyf_scratch_t *) hb_calloc (1, sizeof (hb_glyf_scratch_t));
if (unlikely (!scratch))
return nullptr;
}
return scratch;
}
void release_scratch (hb_glyf_scratch_t *scratch) const
{
if (!scratch)
return;
if (!cached_scratch.cmpexch (nullptr, scratch))
{
scratch->~hb_glyf_scratch_t ();
hb_free (scratch);
}
}
#ifndef HB_NO_VAR

View File

@ -74,7 +74,7 @@ struct ClassDef : public OT::ClassDef
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_prime_id;
class_def_link->position = link_position;
class_def_prime_vertex.add_parent (parent_id);
class_def_prime_vertex.add_parent (parent_id, false);
return true;
}
@ -117,7 +117,7 @@ struct ClassDef : public OT::ClassDef
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::ClassDef::min_size) return false;
hb_barrier ();
switch (u.format)
switch (u.format.v)
{
case 1: return ((ClassDefFormat1*)this)->sanitize (vertex);
case 2: return ((ClassDefFormat2*)this)->sanitize (vertex);

View File

@ -32,29 +32,27 @@
namespace graph {
struct CoverageFormat1 : public OT::Layout::Common::CoverageFormat1_3<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat1_3<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + glyphArray.get_size () - glyphArray.len.get_size ();
}
};
static bool sanitize (
const OT::Layout::Common::CoverageFormat1_3<OT::Layout::SmallTypes>* thiz,
graph_t::vertex_t& vertex
) {
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat1_3<OT::Layout::SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + thiz->glyphArray.get_size () - thiz->glyphArray.len.get_size ();
}
struct CoverageFormat2 : public OT::Layout::Common::CoverageFormat2_4<SmallTypes>
{
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat2_4<SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + rangeRecord.get_size () - rangeRecord.len.get_size ();
}
};
static bool sanitize (
const OT::Layout::Common::CoverageFormat2_4<OT::Layout::SmallTypes>* thiz,
graph_t::vertex_t& vertex
) {
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
constexpr unsigned min_size = OT::Layout::Common::CoverageFormat2_4<OT::Layout::SmallTypes>::min_size;
if (vertex_len < min_size) return false;
hb_barrier ();
return vertex_len >= min_size + thiz->rangeRecord.get_size () - thiz->rangeRecord.len.get_size ();
}
struct Coverage : public OT::Layout::Common::Coverage
{
@ -98,11 +96,33 @@ struct Coverage : public OT::Layout::Common::Coverage
coverage_link->width = SmallTypes::size;
coverage_link->objidx = coverage_prime_id;
coverage_link->position = link_position;
coverage_prime_vertex.add_parent (parent_id);
coverage_prime_vertex.add_parent (parent_id, false);
return (Coverage*) coverage_prime_vertex.obj.head;
}
// Filter an existing coverage table to glyphs at indices [start, end) and replace it with the filtered version.
static bool filter_coverage (gsubgpos_graph_context_t& c,
unsigned existing_coverage,
unsigned start, unsigned end) {
unsigned coverage_size = c.graph.vertices_[existing_coverage].table_size ();
auto& coverage_v = c.graph.vertices_[existing_coverage];
Coverage* coverage_table = (Coverage*) coverage_v.obj.head;
if (!coverage_table || !coverage_table->sanitize (coverage_v))
return false;
auto new_coverage =
+ hb_zip (coverage_table->iter (), hb_range ())
| hb_filter ([&] (hb_pair_t<unsigned, unsigned> p) {
return p.second >= start && p.second < end;
})
| hb_map_retains_sorting (hb_first)
;
return make_coverage (c, new_coverage, existing_coverage, coverage_size * 2 + 100);
}
// Replace the coverage table at dest obj with one covering 'glyphs'.
template<typename It>
static bool make_coverage (gsubgpos_graph_context_t& c,
It glyphs,
@ -141,10 +161,10 @@ struct Coverage : public OT::Layout::Common::Coverage
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < OT::Layout::Common::Coverage::min_size) return false;
hb_barrier ();
switch (u.format)
switch (u.format.v)
{
case 1: return ((CoverageFormat1*)this)->sanitize (vertex);
case 2: return ((CoverageFormat2*)this)->sanitize (vertex);
case 1: return graph::sanitize ((const OT::Layout::Common::CoverageFormat1_3<OT::Layout::SmallTypes>*) this, vertex);
case 2: return graph::sanitize ((const OT::Layout::Common::CoverageFormat2_4<OT::Layout::SmallTypes>*) this, vertex);
#ifndef HB_NO_BEYOND_64K
// Not currently supported
case 3:

View File

@ -50,6 +50,7 @@ struct graph_t
private:
unsigned incoming_edges_ = 0;
unsigned single_parent = (unsigned) -1;
bool has_incoming_virtual_edges_ = false;
hb_hashmap_t<unsigned, unsigned> parents;
public:
@ -66,6 +67,11 @@ struct graph_t
return parents.in_error ();
}
bool has_incoming_virtual_edges () const
{
return has_incoming_virtual_edges_;
}
bool link_positions_valid (unsigned num_objects, bool removed_nil)
{
hb_set_t assigned_bytes;
@ -121,7 +127,9 @@ struct graph_t
}
}
bool equals (const vertex_t& other,
bool equals (unsigned this_index,
unsigned other_index,
const vertex_t& other,
const graph_t& graph,
const graph_t& other_graph,
unsigned depth) const
@ -129,8 +137,10 @@ struct graph_t
if (!(as_bytes () == other.as_bytes ()))
{
DEBUG_MSG (SUBSET_REPACK, nullptr,
"vertex [%lu] bytes != [%lu] bytes, depth = %u",
"vertex %u [%lu bytes] != %u [%lu bytes], depth = %u",
this_index,
(unsigned long) table_size (),
other_index,
(unsigned long) other.table_size (),
depth);
@ -162,6 +172,7 @@ struct graph_t
hb_swap (a.single_parent, b.single_parent);
hb_swap (a.parents, b.parents);
hb_swap (a.incoming_edges_, b.incoming_edges_);
hb_swap (a.has_incoming_virtual_edges_, b.has_incoming_virtual_edges_);
hb_swap (a.start, b.start);
hb_swap (a.end, b.end);
hb_swap (a.priority, b.priority);
@ -207,13 +218,16 @@ struct graph_t
void reset_parents ()
{
incoming_edges_ = 0;
has_incoming_virtual_edges_ = false;
single_parent = (unsigned) -1;
parents.reset ();
}
void add_parent (unsigned parent_index)
void add_parent (unsigned parent_index, bool is_virtual)
{
assert (parent_index != (unsigned) -1);
has_incoming_virtual_edges_ |= is_virtual;
if (incoming_edges_ == 0)
{
single_parent = parent_index;
@ -408,7 +422,7 @@ struct graph_t
link_a.bias != link_b.bias)
return false;
if (!graph.vertices_[link_a.objidx].equals (
if (!graph.vertices_[link_a.objidx].equals (link_a.objidx, link_b.objidx,
other_graph.vertices_[link_b.objidx], graph, other_graph, depth + 1))
return false;
@ -456,8 +470,12 @@ struct graph_t
num_roots_for_space_.push (1);
bool removed_nil = false;
vertices_.alloc (objects.length);
vertices_scratch_.alloc (objects.length);
ordering_.resize (objects.length);
ordering_scratch_.alloc (objects.length);
unsigned count = objects.length;
unsigned order = objects.length;
unsigned skip = 0;
for (unsigned i = 0; i < count; i++)
{
// If this graph came from a serialization buffer object 0 is the
@ -465,6 +483,9 @@ struct graph_t
if (i == 0 && !objects.arrayZ[i])
{
removed_nil = true;
order--;
ordering_.resize(objects.length - 1);
skip++;
continue;
}
@ -474,6 +495,12 @@ struct graph_t
check_success (v->link_positions_valid (count, removed_nil));
// To start we set the ordering to match the provided objects
// list. Note: objects are provided to us in reverse order (ie.
// the last object is the root).
unsigned obj_idx = i - skip;
ordering_[--order] = obj_idx;
if (!removed_nil) continue;
// Fix indices to account for removed nil object.
for (auto& l : v->obj.all_links_writer ()) {
@ -490,17 +517,20 @@ struct graph_t
bool operator== (const graph_t& other) const
{
return root ().equals (other.root (), *this, other, 0);
return root ().equals (root_idx(), other.root_idx(), other.root (), *this, other, 0);
}
void print () const {
for (int i = vertices_.length - 1; i >= 0; i--)
for (unsigned id : ordering_)
{
const auto& v = vertices_[i];
printf("%d: %u [", i, (unsigned int)v.table_size());
const auto& v = vertices_[id];
printf("%u: %u [", id, (unsigned int)v.table_size());
for (const auto &l : v.obj.real_links) {
printf("%u, ", l.objidx);
}
for (const auto &l : v.obj.virtual_links) {
printf("v%u, ", l.objidx);
}
printf("]\n");
}
}
@ -516,6 +546,7 @@ struct graph_t
{
return !successful ||
vertices_.in_error () ||
ordering_.in_error() ||
num_roots_for_space_.in_error ();
}
@ -526,10 +557,10 @@ struct graph_t
unsigned root_idx () const
{
// Object graphs are in reverse order, the first object is at the end
// of the vector. Since the graph is topologically sorted it's safe to
// First element of ordering_ is the root.
// Since the graph is topologically sorted it's safe to
// assume the first object has no incoming edges.
return vertices_.length - 1;
return ordering_[0];
}
const hb_serialize_context_t::object_t& object (unsigned i) const
@ -556,7 +587,7 @@ struct graph_t
link->width = 2;
link->objidx = child_id;
link->position = (char*) offset - (char*) v.obj.head;
vertices_[child_id].add_parent (parent_id);
vertices_[child_id].add_parent (parent_id, false);
}
/*
@ -587,55 +618,51 @@ struct graph_t
hb_priority_queue_t<int64_t> queue;
queue.alloc (vertices_.length);
hb_vector_t<vertex_t> &sorted_graph = vertices_scratch_;
if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return;
hb_vector_t<unsigned> id_map;
if (unlikely (!check_success (id_map.resize (vertices_.length)))) return;
hb_vector_t<unsigned> &new_ordering = ordering_scratch_;
if (unlikely (!check_success (new_ordering.resize (vertices_.length)))) return;
hb_vector_t<unsigned> removed_edges;
if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return;
update_parents ();
queue.insert (root ().modified_distance (0), root_idx ());
int new_id = root_idx ();
unsigned order = 1;
unsigned pos = 0;
while (!queue.in_error () && !queue.is_empty ())
{
unsigned next_id = queue.pop_minimum().second;
sorted_graph[new_id] = std::move (vertices_[next_id]);
const vertex_t& next = sorted_graph[new_id];
if (unlikely (!check_success(new_id >= 0))) {
if (unlikely (!check_success(pos < new_ordering.length))) {
// We are out of ids. Which means we've visited a node more than once.
// This graph contains a cycle which is not allowed.
DEBUG_MSG (SUBSET_REPACK, nullptr, "Invalid graph. Contains cycle.");
return;
}
id_map[next_id] = new_id--;
new_ordering[pos++] = next_id;
const vertex_t& next = vertices_[next_id];
for (const auto& link : next.obj.all_links ()) {
removed_edges[link.objidx]++;
if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx]))
const auto& v = vertices_[link.objidx];
if (!(v.incoming_edges () - removed_edges[link.objidx]))
// Add the order that the links were encountered to the priority.
// This ensures that ties between priorities objects are broken in a consistent
// way. More specifically this is set up so that if a set of objects have the same
// distance they'll be added to the topological order in the order that they are
// referenced from the parent object.
queue.insert (vertices_[link.objidx].modified_distance (order++),
queue.insert (v.modified_distance (order++),
link.objidx);
}
}
check_success (!queue.in_error ());
check_success (!sorted_graph.in_error ());
check_success (!new_ordering.in_error ());
check_success (remap_all_obj_indices (id_map, &sorted_graph));
vertices_ = std::move (sorted_graph);
hb_swap (ordering_, new_ordering);
if (!check_success (new_id == -1))
if (!check_success (pos == vertices_.length)) {
print_orphaned_nodes ();
}
}
/*
@ -645,8 +672,8 @@ struct graph_t
*/
void find_space_roots (hb_set_t& visited, hb_set_t& roots)
{
int root_index = (int) root_idx ();
for (int i = root_index; i >= 0; i--)
unsigned root_index = root_idx ();
for (unsigned i : ordering_)
{
if (visited.has (i)) continue;
@ -829,7 +856,6 @@ struct graph_t
if (subgraph.in_error ())
return false;
unsigned original_root_idx = root_idx ();
hb_map_t index_map;
bool made_changes = false;
for (auto entry : subgraph.iter ())
@ -852,14 +878,6 @@ struct graph_t
if (!made_changes)
return false;
if (original_root_idx != root_idx ()
&& parents.has (original_root_idx))
{
// If the root idx has changed since parents was determined, update root idx in parents
parents.add (root_idx ());
parents.del (original_root_idx);
}
auto new_subgraph =
+ subgraph.keys ()
| hb_map([&] (uint32_t node_idx) {
@ -943,12 +961,14 @@ struct graph_t
/*
* Moves the child of old_parent_idx pointed to by old_offset to a new
* vertex at the new_offset.
*
* Returns the id of the child node that was moved.
*/
template<typename O>
void move_child (unsigned old_parent_idx,
const O* old_offset,
unsigned new_parent_idx,
const O* new_offset)
unsigned move_child (unsigned old_parent_idx,
const O* old_offset,
unsigned new_parent_idx,
const O* new_offset)
{
distance_invalid = true;
positions_invalid = true;
@ -965,10 +985,56 @@ struct graph_t
new_link->position = (const char*) new_offset - (const char*) new_v.obj.head;
auto& child = vertices_[child_id];
child.add_parent (new_parent_idx);
child.add_parent (new_parent_idx, false);
old_v.remove_real_link (child_id, old_offset);
child.remove_parent (old_parent_idx);
return child_id;
}
/*
* Moves all outgoing links in old parent that have
* a link position between [old_post_start, old_pos_end)
* to the new parent. Links are placed serially in the new
* parent starting at new_pos_start.
*/
template<typename O>
void move_children (unsigned old_parent_idx,
unsigned old_pos_start,
unsigned old_pos_end,
unsigned new_parent_idx,
unsigned new_pos_start)
{
distance_invalid = true;
positions_invalid = true;
auto& old_v = vertices_[old_parent_idx];
auto& new_v = vertices_[new_parent_idx];
hb_vector_t<hb_serialize_context_t::object_t::link_t> old_links;
for (const auto& l : old_v.obj.real_links)
{
if (l.position < old_pos_start || l.position >= old_pos_end)
{
old_links.push(l);
continue;
}
unsigned array_pos = l.position - old_pos_start;
unsigned child_id = l.objidx;
auto* new_link = new_v.obj.real_links.push ();
new_link->width = O::static_size;
new_link->objidx = child_id;
new_link->position = new_pos_start + array_pos;
auto& child = vertices_[child_id];
child.add_parent (new_parent_idx, false);
child.remove_parent (old_parent_idx);
}
old_v.obj.real_links = std::move (old_links);
}
/*
@ -1000,8 +1066,11 @@ struct graph_t
distance_invalid = true;
auto* clone = vertices_.push ();
unsigned clone_idx = vertices_.length - 1;
ordering_.push(clone_idx);
auto& child = vertices_[node_idx];
if (vertices_.in_error ()) {
if (vertices_.in_error () || ordering_.in_error()) {
return -1;
}
@ -1011,51 +1080,23 @@ struct graph_t
clone->space = child.space;
clone->reset_parents ();
unsigned clone_idx = vertices_.length - 2;
for (const auto& l : child.obj.real_links)
{
clone->obj.real_links.push (l);
vertices_[l.objidx].add_parent (clone_idx);
vertices_[l.objidx].add_parent (clone_idx, false);
}
for (const auto& l : child.obj.virtual_links)
{
clone->obj.virtual_links.push (l);
vertices_[l.objidx].add_parent (clone_idx);
vertices_[l.objidx].add_parent (clone_idx, true);
}
check_success (!clone->obj.real_links.in_error ());
check_success (!clone->obj.virtual_links.in_error ());
// The last object is the root of the graph, so swap back the root to the end.
// The root's obj idx does change, however since it's root nothing else refers to it.
// all other obj idx's will be unaffected.
hb_swap (vertices_[vertices_.length - 2], *clone);
// Since the root moved, update the parents arrays of all children on the root.
for (const auto& l : root ().obj.all_links ())
vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ());
return clone_idx;
}
/*
* Creates a copy of child and re-assigns the link from
* parent to the clone. The copy is a shallow copy, objects
* linked from child are not duplicated.
*
* Returns the index of the newly created duplicate.
*
* If the child_idx only has incoming edges from parent_idx, this
* will do nothing and return the original child_idx.
*/
unsigned duplicate_if_shared (unsigned parent_idx, unsigned child_idx)
{
unsigned new_idx = duplicate (parent_idx, child_idx);
if (new_idx == (unsigned) -1) return child_idx;
return new_idx;
}
/*
* Creates a copy of child and re-assigns the link from
* parent to the clone. The copy is a shallow copy, objects
@ -1073,10 +1114,15 @@ struct graph_t
const auto& child = vertices_[child_idx];
unsigned links_to_child = child.incoming_edges_from_parent(parent_idx);
if (child.incoming_edges () <= links_to_child)
if (child.incoming_edges () <= links_to_child || child.has_incoming_virtual_edges())
{
// Can't duplicate this node, doing so would orphan the original one as all remaining links
// to child are from parent.
//
// We don't allow duplication of nodes with incoming virtual edges because we don't track
// the number of virtual vs real incoming edges. As a result we can't tell if a node
// with virtual edges may end up orphaned by duplication (ie. where one copy is only pointed
// to by virtual edges).
DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %u => %u",
parent_idx, child_idx);
return -1;
@ -1091,12 +1137,15 @@ struct graph_t
if (parent_idx == clone_idx) parent_idx++;
auto& parent = vertices_[parent_idx];
unsigned count = 0;
unsigned num_real = parent.obj.real_links.length;
for (auto& l : parent.obj.all_links_writer ())
{
count++;
if (l.objidx != child_idx)
continue;
reassign_link (l, parent_idx, clone_idx);
reassign_link (l, parent_idx, clone_idx, count > num_real);
}
return clone_idx;
@ -1129,10 +1178,15 @@ struct graph_t
links_to_child += child.incoming_edges_from_parent(parent_idx);
}
if (child.incoming_edges () <= links_to_child)
if (child.incoming_edges () <= links_to_child || child.has_incoming_virtual_edges())
{
// Can't duplicate this node, doing so would orphan the original one as all remaining links
// to child are from parent.
//
// We don't allow duplication of nodes with incoming virtual edges because we don't track
// the number of virtual vs real incoming edges. As a result we can't tell if a node
// with virtual edges may end up orphaned by duplication (ie. where one copy is only pointed
// to by virtual edges).
DEBUG_MSG (SUBSET_REPACK, nullptr, " Not duplicating %u, ..., %u => %u", first_parent, last_parent, child_idx);
return -1;
}
@ -1146,12 +1200,15 @@ struct graph_t
// duplicate shifts the root node idx, so if parent_idx was root update it.
if (parent_idx == clone_idx) parent_idx++;
auto& parent = vertices_[parent_idx];
unsigned count = 0;
unsigned num_real = parent.obj.real_links.length;
for (auto& l : parent.obj.all_links_writer ())
{
count++;
if (l.objidx != child_idx)
continue;
reassign_link (l, parent_idx, clone_idx);
reassign_link (l, parent_idx, clone_idx, count > num_real);
}
}
@ -1168,7 +1225,10 @@ struct graph_t
distance_invalid = true;
auto* clone = vertices_.push ();
if (vertices_.in_error ()) {
unsigned clone_idx = vertices_.length - 1;
ordering_.push(clone_idx);
if (vertices_.in_error () || ordering_.in_error()) {
return -1;
}
@ -1177,20 +1237,37 @@ struct graph_t
clone->distance = 0;
clone->space = 0;
unsigned clone_idx = vertices_.length - 2;
// The last object is the root of the graph, so swap back the root to the end.
// The root's obj idx does change, however since it's root nothing else refers to it.
// all other obj idx's will be unaffected.
hb_swap (vertices_[vertices_.length - 2], *clone);
// Since the root moved, update the parents arrays of all children on the root.
for (const auto& l : root ().obj.all_links ())
vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ());
return clone_idx;
}
/*
* Creates a new child node and remap the old child to it.
*
* Returns the index of the newly created child.
*
*/
unsigned remap_child (unsigned parent_idx, unsigned old_child_idx)
{
unsigned new_child_idx = duplicate (old_child_idx);
if (new_child_idx == (unsigned) -1) return -1;
auto& parent = vertices_[parent_idx];
for (auto& l : parent.obj.real_links)
{
if (l.objidx != old_child_idx)
continue;
reassign_link (l, parent_idx, new_child_idx, false);
}
for (auto& l : parent.obj.virtual_links)
{
if (l.objidx != old_child_idx)
continue;
reassign_link (l, parent_idx, new_child_idx, true);
}
return new_child_idx;
}
/*
* Raises the sorting priority of all children.
*/
@ -1279,6 +1356,7 @@ struct graph_t
if (!DEBUG_ENABLED(SUBSET_REPACK)) return;
DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected.");
parents_invalid = true;
update_parents();
@ -1348,7 +1426,8 @@ struct graph_t
size_t total_size = 0;
unsigned count = vertices_.length;
for (unsigned i = 0; i < count; i++) {
size_t size = vertices_.arrayZ[i].obj.tail - vertices_.arrayZ[i].obj.head;
const auto& obj = vertices_.arrayZ[i].obj;
size_t size = obj.tail - obj.head;
total_size += size;
}
return total_size;
@ -1398,8 +1477,11 @@ struct graph_t
for (unsigned p = 0; p < count; p++)
{
for (auto& l : vertices_.arrayZ[p].obj.all_links ())
vertices_[l.objidx].add_parent (p);
for (auto& l : vertices_.arrayZ[p].obj.real_links)
vertices_[l.objidx].add_parent (p, false);
for (auto& l : vertices_.arrayZ[p].obj.virtual_links)
vertices_[l.objidx].add_parent (p, true);
}
for (unsigned i = 0; i < count; i++)
@ -1418,7 +1500,7 @@ struct graph_t
if (!positions_invalid) return;
unsigned current_pos = 0;
for (int i = root_idx (); i >= 0; i--)
for (unsigned i : ordering_)
{
auto& v = vertices_[i];
v.start = current_pos;
@ -1450,11 +1532,11 @@ struct graph_t
unsigned count = vertices_.length;
for (unsigned i = 0; i < count; i++)
vertices_.arrayZ[i].distance = hb_int_max (int64_t);
vertices_.tail ().distance = 0;
vertices_[root_idx ()].distance = 0;
hb_priority_queue_t<int64_t> queue;
queue.alloc (count);
queue.insert (0, vertices_.length - 1);
queue.insert (0, root_idx ());
hb_vector_t<bool> visited;
visited.resize (vertices_.length);
@ -1464,22 +1546,23 @@ struct graph_t
unsigned next_idx = queue.pop_minimum ().second;
if (visited[next_idx]) continue;
const auto& next = vertices_[next_idx];
int64_t next_distance = vertices_[next_idx].distance;
int64_t next_distance = next.distance;
visited[next_idx] = true;
for (const auto& link : next.obj.all_links ())
{
if (visited[link.objidx]) continue;
const auto& child = vertices_.arrayZ[link.objidx].obj;
auto& child_v = vertices_.arrayZ[link.objidx];
const auto& child = child_v.obj;
unsigned link_width = link.width ? link.width : 4; // treat virtual offsets as 32 bits wide
int64_t child_weight = (child.tail - child.head) +
((int64_t) 1 << (link_width * 8)) * (vertices_.arrayZ[link.objidx].space + 1);
((int64_t) 1 << (link_width * 8)) * (child_v.space + 1);
int64_t child_distance = next_distance + child_weight;
if (child_distance < vertices_.arrayZ[link.objidx].distance)
if (child_distance < child_v.distance)
{
vertices_.arrayZ[link.objidx].distance = child_distance;
child_v.distance = child_distance;
queue.insert (child_distance, link.objidx);
}
}
@ -1502,12 +1585,13 @@ struct graph_t
*/
void reassign_link (hb_serialize_context_t::object_t::link_t& link,
unsigned parent_idx,
unsigned new_idx)
unsigned new_idx,
bool is_virtual)
{
unsigned old_idx = link.objidx;
link.objidx = new_idx;
vertices_[old_idx].remove_parent (parent_idx);
vertices_[new_idx].add_parent (parent_idx);
vertices_[new_idx].add_parent (parent_idx, is_virtual);
}
/*
@ -1521,36 +1605,21 @@ struct graph_t
if (!id_map) return;
for (unsigned i : subgraph)
{
for (auto& link : vertices_[i].obj.all_links_writer ())
auto& obj = vertices_[i].obj;
unsigned num_real = obj.real_links.length;
unsigned count = 0;
for (auto& link : obj.all_links_writer ())
{
count++;
const uint32_t *v;
if (!id_map.has (link.objidx, &v)) continue;
if (only_wide && !(link.width == 4 && !link.is_signed)) continue;
if (only_wide && (link.is_signed || (link.width != 4 && link.width != 3))) continue;
reassign_link (link, i, *v);
reassign_link (link, i, *v, count > num_real);
}
}
}
/*
* Updates all objidx's in all links using the provided mapping.
*/
bool remap_all_obj_indices (const hb_vector_t<unsigned>& id_map,
hb_vector_t<vertex_t>* sorted_graph) const
{
unsigned count = sorted_graph->length;
for (unsigned i = 0; i < count; i++)
{
if (!(*sorted_graph)[i].remap_parents (id_map))
return false;
for (auto& link : sorted_graph->arrayZ[i].obj.all_links_writer ())
{
link.objidx = id_map[link.objidx];
}
}
return true;
}
/*
* Finds all nodes in targets that are reachable from start_idx, nodes in visited will be skipped.
* For this search the graph is treated as being undirected.
@ -1586,7 +1655,16 @@ struct graph_t
public:
// TODO(garretrieger): make private, will need to move most of offset overflow code into graph.
hb_vector_t<vertex_t> vertices_;
hb_vector_t<vertex_t> vertices_scratch_;
// Specifies the current topological ordering of this graph
//
// ordering_[pos] = obj index
//
// specifies that the 'pos'th spot is filled by the object
// given by obj index.
hb_vector_t<unsigned> ordering_;
hb_vector_t<unsigned> ordering_scratch_;
private:
bool parents_invalid;
bool distance_invalid;

View File

@ -41,6 +41,7 @@ struct gsubgpos_graph_context_t
unsigned lookup_list_index;
hb_hashmap_t<unsigned, graph::Lookup*> lookups;
hb_hashmap_t<unsigned, unsigned> subtable_to_extension;
hb_hashmap_t<unsigned, hb_vector_t<unsigned>> split_subtables;
HB_INTERNAL gsubgpos_graph_context_t (hb_tag_t table_tag_,
graph_t& graph_);

View File

@ -27,9 +27,11 @@
#include "graph.hh"
#include "../hb-ot-layout-gsubgpos.hh"
#include "../OT/Layout/GSUB/ExtensionSubst.hh"
#include "../OT/Layout/GSUB/SubstLookupSubTable.hh"
#include "gsubgpos-context.hh"
#include "pairpos-graph.hh"
#include "markbasepos-graph.hh"
#include "ligature-graph.hh"
#ifndef GRAPH_GSUBGPOS_GRAPH_HH
#define GRAPH_GSUBGPOS_GRAPH_HH
@ -85,6 +87,12 @@ struct Lookup : public OT::Lookup
return lookupType == extension_type (table_tag);
}
bool use_mark_filtering_set () const
{
unsigned flag = lookupFlag;
return flag & 0x0010u;
}
bool make_extension (gsubgpos_graph_context_t& c,
unsigned this_index)
{
@ -120,22 +128,18 @@ struct Lookup : public OT::Lookup
unsigned type = lookupType;
bool is_ext = is_extension (c.table_tag);
if (c.table_tag != HB_OT_TAG_GPOS)
if (c.table_tag != HB_OT_TAG_GPOS && c.table_tag != HB_OT_TAG_GSUB)
return true;
if (!is_ext &&
type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair &&
type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase)
if (!is_ext && !is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
return true;
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>> all_new_subtables;
for (unsigned i = 0; i < subTable.len; i++)
{
unsigned subtable_index = c.graph.index_for_offset (this_index, &subTable[i]);
unsigned parent_index = this_index;
if (is_ext) {
unsigned ext_subtable_index = subtable_index;
parent_index = ext_subtable_index;
ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>* extension =
(ExtensionFormat1<OT::Layout::GSUB_impl::ExtensionSubst>*)
c.graph.object (ext_subtable_index).head;
@ -144,26 +148,47 @@ struct Lookup : public OT::Lookup
subtable_index = extension->get_subtable_index (c.graph, ext_subtable_index);
type = extension->get_lookup_type ();
if (type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair
&& type != OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase)
if (!is_supported_gpos_type(type, c) && !is_supported_gsub_type(type, c))
continue;
}
hb_vector_t<unsigned> new_sub_tables;
switch (type)
hb_vector_t<unsigned>* split_result;
if (c.split_subtables.has (subtable_index, &split_result))
{
case 2:
new_sub_tables = split_subtable<PairPos> (c, parent_index, subtable_index); break;
case 4:
new_sub_tables = split_subtable<MarkBasePos> (c, parent_index, subtable_index); break;
default:
break;
if (split_result->length == 0)
continue;
all_new_subtables.push (hb_pair(i, *split_result));
}
else
{
hb_vector_t<unsigned> new_sub_tables;
if (c.table_tag == HB_OT_TAG_GPOS) {
switch (type)
{
case 2:
new_sub_tables = split_subtable<PairPos> (c, subtable_index); break;
case 4:
new_sub_tables = split_subtable<MarkBasePos> (c, subtable_index); break;
default:
break;
}
} else if (c.table_tag == HB_OT_TAG_GSUB) {
switch (type)
{
case 4:
new_sub_tables = split_subtable<graph::LigatureSubst> (c, subtable_index); break;
default:
break;
}
}
if (new_sub_tables.in_error ()) return false;
c.split_subtables.set (subtable_index, new_sub_tables);
if (new_sub_tables)
all_new_subtables.push (hb_pair (i, std::move (new_sub_tables)));
}
if (new_sub_tables.in_error ()) return false;
if (!new_sub_tables) continue;
hb_pair_t<unsigned, hb_vector_t<unsigned>>* entry = all_new_subtables.push ();
entry->first = i;
entry->second = std::move (new_sub_tables);
}
if (all_new_subtables) {
@ -175,30 +200,29 @@ struct Lookup : public OT::Lookup
template<typename T>
hb_vector_t<unsigned> split_subtable (gsubgpos_graph_context_t& c,
unsigned parent_idx,
unsigned objidx)
{
T* sub_table = (T*) c.graph.object (objidx).head;
if (!sub_table || !sub_table->sanitize (c.graph.vertices_[objidx]))
return hb_vector_t<unsigned> ();
return sub_table->split_subtables (c, parent_idx, objidx);
return sub_table->split_subtables (c, objidx);
}
bool add_sub_tables (gsubgpos_graph_context_t& c,
unsigned this_index,
unsigned type,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
const hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
bool is_ext = is_extension (c.table_tag);
auto& v = c.graph.vertices_[this_index];
auto* v = &c.graph.vertices_[this_index];
fix_existing_subtable_links (c, this_index, subtable_ids);
unsigned new_subtable_count = 0;
for (const auto& p : subtable_ids)
new_subtable_count += p.second.length;
size_t new_size = v.table_size ()
size_t new_size = v->table_size ()
+ new_subtable_count * OT::Offset16::static_size;
char* buffer = (char*) hb_calloc (1, new_size);
if (!buffer) return false;
@ -207,10 +231,13 @@ struct Lookup : public OT::Lookup
hb_free (buffer);
return false;
}
hb_memcpy (buffer, v.obj.head, v.table_size());
hb_memcpy (buffer, v->obj.head, v->table_size());
v.obj.head = buffer;
v.obj.tail = buffer + new_size;
if (use_mark_filtering_set ())
hb_memcpy (buffer + new_size - 2, v->obj.tail - 2, 2);
v->obj.head = buffer;
v->obj.tail = buffer + new_size;
Lookup* new_lookup = (Lookup*) buffer;
@ -226,21 +253,23 @@ struct Lookup : public OT::Lookup
if (is_ext)
{
unsigned ext_id = create_extension_subtable (c, subtable_id, type);
c.graph.vertices_[subtable_id].add_parent (ext_id);
c.graph.vertices_[subtable_id].add_parent (ext_id, false);
subtable_id = ext_id;
// the reference to v may have changed on adding a node, so reassign it.
v = &c.graph.vertices_[this_index];
}
auto* link = v.obj.real_links.push ();
auto* link = v->obj.real_links.push ();
link->width = 2;
link->objidx = subtable_id;
link->position = (char*) &new_lookup->subTable[offset_index++] -
(char*) new_lookup;
c.graph.vertices_[subtable_id].add_parent (this_index);
c.graph.vertices_[subtable_id].add_parent (this_index, false);
}
}
// Repacker sort order depends on link order, which we've messed up so resort it.
v.obj.real_links.qsort ();
v->obj.real_links.qsort ();
// The head location of the lookup has changed, invalidating the lookups map entry
// in the context. Update the map.
@ -250,17 +279,15 @@ struct Lookup : public OT::Lookup
void fix_existing_subtable_links (gsubgpos_graph_context_t& c,
unsigned this_index,
hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
const hb_vector_t<hb_pair_t<unsigned, hb_vector_t<unsigned>>>& subtable_ids)
{
auto& v = c.graph.vertices_[this_index];
Lookup* lookup = (Lookup*) v.obj.head;
unsigned shift = 0;
for (const auto& p : subtable_ids)
{
unsigned insert_index = p.first + shift;
unsigned pos_offset = p.second.length * OT::Offset16::static_size;
unsigned insert_offset = (char*) &lookup->subTable[insert_index] - (char*) lookup;
unsigned insert_offset = Lookup::min_size + insert_index * OT::Offset16::static_size;
shift += p.second.length;
for (auto& l : v.obj.all_links_writer ())
@ -326,7 +353,7 @@ struct Lookup : public OT::Lookup
// Make extension point at the subtable.
auto& ext_vertex = c.graph.vertices_[ext_index];
ext_vertex.add_parent (lookup_index);
ext_vertex.add_parent (lookup_index, false);
if (!existing_ext_index)
subtable_vertex.remap_parent (lookup_index, ext_index);
@ -334,6 +361,19 @@ struct Lookup : public OT::Lookup
}
private:
bool is_supported_gsub_type(unsigned type, gsubgpos_graph_context_t& c) const {
return (c.table_tag == HB_OT_TAG_GSUB) && (
type == OT::Layout::GSUB_impl::SubstLookupSubTable::Type::Ligature
);
}
bool is_supported_gpos_type(unsigned type, gsubgpos_graph_context_t& c) const {
return (c.table_tag == HB_OT_TAG_GPOS) && (
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::Pair ||
type == OT::Layout::GPOS_impl::PosLookupSubTable::Type::MarkBase
);
}
unsigned extension_type (hb_tag_t table_tag) const
{
switch (table_tag)

View File

@ -212,7 +212,6 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
@ -265,7 +264,7 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
this_index,
std::move (class_to_info),
c.graph.vertices_[mark_array_id].position_to_index_map (),
};
@ -478,12 +477,11 @@ struct MarkBasePosFormat1 : public OT::Layout::GPOS_impl::MarkBasePosFormat1_2<S
struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
switch (u.format.v) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
return ((MarkBasePosFormat1*)(&u.format1))->split_subtables (c, this_index);
#ifndef HB_NO_BEYOND_64K
case 2: HB_FALLTHROUGH;
// Don't split 24bit MarkBasePos's.
@ -496,10 +494,10 @@ struct MarkBasePos : public OT::Layout::GPOS_impl::MarkBasePos
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
if (vertex_len < u.format.v.get_size ()) return false;
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 1:
return ((MarkBasePosFormat1*)(&u.format1))->sanitize (vertex);
#ifndef HB_NO_BEYOND_64K

View File

@ -49,7 +49,6 @@ struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallType
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
hb_set_t visited;
@ -84,7 +83,7 @@ struct PairPosFormat1 : public OT::Layout::GPOS_impl::PairPosFormat1_3<SmallType
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
this_index,
};
return actuate_subtable_split<split_context_t> (split_context, split_points);
@ -207,7 +206,6 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
}
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
const unsigned base_size = OT::Layout::GPOS_impl::PairPosFormat2_4<SmallTypes>::min_size;
@ -291,7 +289,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
split_context_t split_context {
c,
this,
c.graph.duplicate_if_shared (parent_index, this_index),
this_index,
class1_record_size,
total_value_len,
value_1_len,
@ -423,7 +421,7 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
class_def_link->width = SmallTypes::size;
class_def_link->objidx = class_def_2_id;
class_def_link->position = 10;
graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id);
graph.vertices_[class_def_2_id].add_parent (pair_pos_prime_id, false);
graph.duplicate (pair_pos_prime_id, class_def_2_id);
return pair_pos_prime_id;
@ -607,14 +605,13 @@ struct PairPosFormat2 : public OT::Layout::GPOS_impl::PairPosFormat2_4<SmallType
struct PairPos : public OT::Layout::GPOS_impl::PairPos
{
hb_vector_t<unsigned> split_subtables (gsubgpos_graph_context_t& c,
unsigned parent_index,
unsigned this_index)
{
switch (u.format) {
switch (u.format.v) {
case 1:
return ((PairPosFormat1*)(&u.format1))->split_subtables (c, parent_index, this_index);
return ((PairPosFormat1*)(&u.format1))->split_subtables (c, this_index);
case 2:
return ((PairPosFormat2*)(&u.format2))->split_subtables (c, parent_index, this_index);
return ((PairPosFormat2*)(&u.format2))->split_subtables (c, this_index);
#ifndef HB_NO_BEYOND_64K
case 3: HB_FALLTHROUGH;
case 4: HB_FALLTHROUGH;
@ -628,10 +625,10 @@ struct PairPos : public OT::Layout::GPOS_impl::PairPos
bool sanitize (graph_t::vertex_t& vertex) const
{
int64_t vertex_len = vertex.obj.tail - vertex.obj.head;
if (vertex_len < u.format.get_size ()) return false;
if (vertex_len < u.format.v.get_size ()) return false;
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 1:
return ((PairPosFormat1*)(&u.format1))->sanitize (vertex);
case 2:

View File

@ -113,7 +113,7 @@ will_overflow (graph_t& graph,
hb_hashmap_t<overflow_record_t*, bool> record_set;
const auto& vertices = graph.vertices_;
for (int parent_idx = vertices.length - 1; parent_idx >= 0; parent_idx--)
for (unsigned parent_idx : graph.ordering_)
{
// Don't need to check virtual links for overflow
for (const auto& link : vertices.arrayZ[parent_idx].obj.real_links)
@ -172,14 +172,16 @@ void print_overflows (graph_t& graph,
template <typename O> inline void
serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link,
char* head,
unsigned size,
const hb_vector_t<unsigned>& id_map,
hb_serialize_context_t* c)
{
assert(link.position + link.width <= size);
OT::Offset<O>* offset = reinterpret_cast<OT::Offset<O>*> (head + link.position);
*offset = 0;
c->add_link (*offset,
// serializer has an extra nil object at the start of the
// object array. So all id's are +1 of what our id's are.
link.objidx + 1,
id_map[link.objidx],
(hb_serialize_context_t::whence_t) link.whence,
link.bias);
}
@ -187,6 +189,8 @@ serialize_link_of_type (const hb_serialize_context_t::object_t::link_t& link,
inline
void serialize_link (const hb_serialize_context_t::object_t::link_t& link,
char* head,
unsigned size,
const hb_vector_t<unsigned>& id_map,
hb_serialize_context_t* c)
{
switch (link.width)
@ -197,21 +201,21 @@ void serialize_link (const hb_serialize_context_t::object_t::link_t& link,
case 4:
if (link.is_signed)
{
serialize_link_of_type<OT::HBINT32> (link, head, c);
serialize_link_of_type<OT::HBINT32> (link, head, size, id_map, c);
} else {
serialize_link_of_type<OT::HBUINT32> (link, head, c);
serialize_link_of_type<OT::HBUINT32> (link, head, size, id_map, c);
}
return;
case 2:
if (link.is_signed)
{
serialize_link_of_type<OT::HBINT16> (link, head, c);
serialize_link_of_type<OT::HBINT16> (link, head, size, id_map, c);
} else {
serialize_link_of_type<OT::HBUINT16> (link, head, c);
serialize_link_of_type<OT::HBUINT16> (link, head, size, id_map, c);
}
return;
case 3:
serialize_link_of_type<OT::HBUINT24> (link, head, c);
serialize_link_of_type<OT::HBUINT24> (link, head, size, id_map, c);
return;
default:
// Unexpected link width.
@ -237,25 +241,36 @@ inline hb_blob_t* serialize (const graph_t& graph)
c.start_serialize<void> ();
const auto& vertices = graph.vertices_;
for (unsigned i = 0; i < vertices.length; i++) {
// Objects are placed in the serializer in reverse order since children need
// to be inserted before their parents.
// Maps from our obj id's to the id's used during this serialization.
hb_vector_t<unsigned> id_map;
id_map.resize(graph.ordering_.length);
for (int pos = graph.ordering_.length - 1; pos >= 0; pos--) {
unsigned i = graph.ordering_[pos];
c.push ();
size_t size = vertices[i].obj.tail - vertices[i].obj.head;
auto& v = vertices[i];
size_t size = v.obj.tail - v.obj.head;
char* start = c.allocate_size <char> (size);
if (!start) {
DEBUG_MSG (SUBSET_REPACK, nullptr, "Buffer out of space.");
return nullptr;
}
hb_memcpy (start, vertices[i].obj.head, size);
hb_memcpy (start, v.obj.head, size);
// Only real links needs to be serialized.
for (const auto& link : vertices[i].obj.real_links)
serialize_link (link, start, &c);
for (const auto& link : v.obj.real_links)
serialize_link (link, start, size, id_map, &c);
// All duplications are already encoded in the graph, so don't
// enable sharing during packing.
c.pop_pack (false);
id_map[i] = c.pop_pack (false);
}
c.end_serialize ();

View File

@ -49,7 +49,7 @@ hb_vector_t<unsigned> actuate_subtable_split (Context& split_context,
if (id == (unsigned) -1)
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
new_objects.ensure_error ();
return new_objects;
}
new_objects.push (id);
@ -58,7 +58,7 @@ hb_vector_t<unsigned> actuate_subtable_split (Context& split_context,
if (!split_context.shrink (split_points[0]))
{
new_objects.reset ();
new_objects.allocated = -1; // mark error
new_objects.ensure_error ();
}
return new_objects;

View File

@ -47,8 +47,7 @@ using namespace OT;
struct ankr;
using hb_aat_class_cache_t = hb_cache_t<15, 8, 7>;
static_assert (sizeof (hb_aat_class_cache_t) == 256, "");
using hb_aat_class_cache_t = hb_ot_layout_mapping_cache_t;
struct hb_aat_scratch_t
{
@ -79,7 +78,10 @@ struct hb_aat_scratch_t
{
hb_bit_set_t *s = buffer_glyph_set.get_acquire ();
if (s && buffer_glyph_set.cmpexch (s, nullptr))
{
s->clear ();
return s;
}
s = (hb_bit_set_t *) hb_calloc (1, sizeof (hb_bit_set_t));
if (unlikely (!s))
@ -124,13 +126,14 @@ struct hb_aat_apply_context_t :
const OT::GDEF &gdef;
bool has_glyph_classes;
const hb_sorted_vector_t<hb_aat_map_t::range_flags_t> *range_flags = nullptr;
hb_mask_t subtable_flags = 0;
bool buffer_is_reversed = false;
// Caches
bool using_buffer_glyph_set = false;
hb_bit_set_t *buffer_glyph_set = nullptr;
const hb_bit_set_t *left_set = nullptr;
const hb_bit_set_t *right_set = nullptr;
const hb_bit_set_t *machine_glyph_set = nullptr;
const hb_bit_set_t *first_set = nullptr;
const hb_bit_set_t *second_set = nullptr;
hb_aat_class_cache_t *machine_class_cache = nullptr;
hb_mask_t subtable_flags = 0;
/* Unused. For debug tracing only. */
unsigned int lookup_index;
@ -146,6 +149,12 @@ struct hb_aat_apply_context_t :
void set_lookup_index (unsigned int i) { lookup_index = i; }
void reverse_buffer ()
{
buffer->reverse ();
buffer_is_reversed = !buffer_is_reversed;
}
void setup_buffer_glyph_set ()
{
using_buffer_glyph_set = buffer->len >= 4 && buffer_glyph_set;
@ -156,11 +165,11 @@ struct hb_aat_apply_context_t :
bool buffer_intersects_machine () const
{
if (likely (using_buffer_glyph_set))
return buffer_glyph_set->intersects (*machine_glyph_set);
return buffer_glyph_set->intersects (*first_set);
// Faster for shorter buffers.
for (unsigned i = 0; i < buffer->len; i++)
if (machine_glyph_set->has (buffer->info[i].codepoint))
if (first_set->has (buffer->info[i].codepoint))
return true;
return false;
}
@ -639,6 +648,23 @@ struct LookupFormat10
glyphs.add_range (firstGlyph, firstGlyph + glyphCount - 1);
}
template <typename set_t, typename filter_t>
void collect_glyphs_filtered (set_t &glyphs, const filter_t &filter) const
{
if (unlikely (!glyphCount)) return;
if (firstGlyph == DELETED_GLYPH) return;
const HBUINT8 *p = valueArrayZ.arrayZ;
for (unsigned i = 0; i < glyphCount; i++)
{
unsigned int v = 0;
unsigned int count = valueSize;
for (unsigned int j = 0; j < count; j++)
v = (v << 8) | *p++;
if (filter (v))
glyphs.add (firstGlyph + i);
}
}
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
@ -666,7 +692,7 @@ struct Lookup
{
const T* get_value (hb_codepoint_t glyph_id, unsigned int num_glyphs) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return u.format0.get_value (glyph_id, num_glyphs);
case 2: hb_barrier (); return u.format2.get_value (glyph_id);
case 4: hb_barrier (); return u.format4.get_value (glyph_id);
@ -678,7 +704,7 @@ struct Lookup
const typename T::type get_value_or_null (hb_codepoint_t glyph_id, unsigned int num_glyphs) const
{
switch (u.format) {
switch (u.format.v) {
/* Format 10 cannot return a pointer. */
case 10: hb_barrier (); return u.format10.get_value_or_null (glyph_id);
default:
@ -690,7 +716,7 @@ struct Lookup
template <typename set_t>
void collect_glyphs (set_t &glyphs, unsigned int num_glyphs) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); u.format0.collect_glyphs (glyphs, num_glyphs); return;
case 2: hb_barrier (); u.format2.collect_glyphs (glyphs); return;
case 4: hb_barrier (); u.format4.collect_glyphs (glyphs); return;
@ -703,12 +729,13 @@ struct Lookup
template <typename set_t, typename filter_t>
void collect_glyphs_filtered (set_t &glyphs, unsigned num_glyphs, const filter_t &filter) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); u.format0.collect_glyphs_filtered (glyphs, num_glyphs, filter); return;
case 2: hb_barrier (); u.format2.collect_glyphs_filtered (glyphs, filter); return;
case 4: hb_barrier (); u.format4.collect_glyphs_filtered (glyphs, filter); return;
case 6: hb_barrier (); u.format6.collect_glyphs_filtered (glyphs, filter); return;
case 8: hb_barrier (); u.format8.collect_glyphs_filtered (glyphs, filter); return;
case 10: hb_barrier (); u.format10.collect_glyphs_filtered (glyphs, filter); return;
default:return;
}
}
@ -724,9 +751,9 @@ struct Lookup
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return_trace (u.format0.sanitize (c));
case 2: hb_barrier (); return_trace (u.format2.sanitize (c));
case 4: hb_barrier (); return_trace (u.format4.sanitize (c));
@ -739,9 +766,9 @@ struct Lookup
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return_trace (u.format0.sanitize (c, base));
case 2: hb_barrier (); return_trace (u.format2.sanitize (c, base));
case 4: hb_barrier (); return_trace (u.format4.sanitize (c, base));
@ -754,7 +781,7 @@ struct Lookup
protected:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
LookupFormat0<T> format0;
LookupFormat2<T> format2;
LookupFormat4<T> format4;
@ -763,7 +790,7 @@ struct Lookup
LookupFormat10<T> format10;
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
};
DECLARE_NULL_NAMESPACE_BYTES_TEMPLATE1 (AAT, Lookup, 2);
@ -838,11 +865,6 @@ struct StateTable
STATE_START_OF_LINE = 1,
};
template <typename set_t>
void collect_glyphs (set_t &glyphs, unsigned num_glyphs) const
{
(this+classTable).collect_glyphs (glyphs, num_glyphs);
}
template <typename set_t, typename table_t>
void collect_initial_glyphs (set_t &glyphs, unsigned num_glyphs, const table_t &table) const
{
@ -1082,6 +1104,8 @@ struct SubtableGlyphCoverage
for (unsigned i = 0; i < subtable_count; i++)
{
uint32_t offset = (uint32_t) subtableOffsets[i];
// A font file called SFNSDisplay.ttf has value 0xFFFFFFFF in the offsets.
// Just ignore it.
if (offset == 0 || offset == 0xFFFFFFFF)
continue;
if (unlikely (!subtableOffsets[i].sanitize (c, this, bytes)))
@ -1192,11 +1216,24 @@ struct StateTableDriver
int state = StateTableT::STATE_START_OF_TEXT;
// If there's only one range, we already checked the flag.
auto *last_range = ac->range_flags && (ac->range_flags->length > 1) ? &(*ac->range_flags)[0] : nullptr;
const bool start_state_safe_to_break_eot =
!c->table->is_actionable (machine.get_entry (StateTableT::STATE_START_OF_TEXT, CLASS_END_OF_TEXT));
for (buffer->idx = 0; buffer->successful;)
{
/* This block is copied in NoncontextualSubtable::apply. Keep in sync. */
if (last_range)
unsigned int klass = likely (buffer->idx < buffer->len) ?
machine.get_class (buffer->cur().codepoint, num_glyphs, ac->machine_class_cache) :
(unsigned) CLASS_END_OF_TEXT;
resume:
DEBUG_MSG (APPLY, nullptr, "c%u at %u", klass, buffer->idx);
const EntryT &entry = machine.get_entry (state, klass);
const int next_state = machine.new_state (entry.newState);
bool is_not_epsilon_transition = !(entry.flags & Flags::DontAdvance);
bool is_not_actionable = !c->table->is_actionable (entry);
if (unlikely (last_range))
{
/* This block is copied in NoncontextualSubtable::apply. Keep in sync. */
auto *range = last_range;
if (buffer->idx < buffer->len)
{
@ -1211,7 +1248,7 @@ struct StateTableDriver
}
if (!(range->flags & ac->subtable_flags))
{
if (buffer->idx == buffer->len || unlikely (!buffer->successful))
if (buffer->idx == buffer->len)
break;
state = StateTableT::STATE_START_OF_TEXT;
@ -1219,13 +1256,42 @@ struct StateTableDriver
continue;
}
}
else
{
// Fast path for when transitioning from start-state to start-state with
// no action and advancing. Do so as long as the class remains the same.
// This is common with runs of non-actionable glyphs.
unsigned int klass = likely (buffer->idx < buffer->len) ?
machine.get_class (buffer->cur().codepoint, num_glyphs, ac->machine_class_cache) :
(unsigned) CLASS_END_OF_TEXT;
DEBUG_MSG (APPLY, nullptr, "c%u at %u", klass, buffer->idx);
const EntryT &entry = machine.get_entry (state, klass);
const int next_state = machine.new_state (entry.newState);
bool is_null_transition = state == StateTableT::STATE_START_OF_TEXT &&
next_state == StateTableT::STATE_START_OF_TEXT &&
start_state_safe_to_break_eot &&
is_not_actionable &&
is_not_epsilon_transition &&
!last_range;
if (is_null_transition)
{
unsigned old_klass = klass;
do
{
c->transition (buffer, this, entry);
if (buffer->idx == buffer->len || !buffer->successful)
break;
(void) buffer->next_glyph ();
klass = likely (buffer->idx < buffer->len) ?
machine.get_class (buffer->cur().codepoint, num_glyphs, ac->machine_class_cache) :
(unsigned) CLASS_END_OF_TEXT;
} while (klass == old_klass);
if (buffer->idx == buffer->len || !buffer->successful)
break;
goto resume;
}
}
/* Conditions under which it's guaranteed safe-to-break before current glyph:
*
@ -1292,10 +1358,10 @@ struct StateTableDriver
state = next_state;
DEBUG_MSG (APPLY, nullptr, "s%d", state);
if (buffer->idx == buffer->len || unlikely (!buffer->successful))
if (buffer->idx == buffer->len)
break;
if (!(entry.flags & Flags::DontAdvance) || buffer->max_ops-- <= 0)
if (is_not_epsilon_transition || buffer->max_ops-- <= 0)
(void) buffer->next_glyph ();
}

View File

@ -120,12 +120,12 @@ struct KerxSubTableFormat0
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
for (const KernPair& pair : pairs)
{
left_set.add (pair.left);
right_set.add (pair.right);
first_set.add (pair.left);
second_set.add (pair.right);
}
}
@ -140,7 +140,7 @@ struct KerxSubTableFormat0
int get_kerning (hb_codepoint_t left, hb_codepoint_t right) const
{
if (!(*c->left_set)[left] || !(*c->right_set)[right]) return 0;
if (!(*c->first_set)[left] || !(*c->second_set)[right]) return 0;
return table.get_kerning (left, right, c);
}
};
@ -396,10 +396,10 @@ struct KerxSubTableFormat1
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
machine.collect_initial_glyphs (left_set, num_glyphs, *this);
//machine.collect_glyphs (right_set, num_glyphs); // right_set is unused for machine kerning
machine.collect_initial_glyphs (first_set, num_glyphs, *this);
//machine.collect_glyphs (second_set, num_glyphs); // second_set is unused for machine kerning
}
protected:
@ -451,10 +451,10 @@ struct KerxSubTableFormat2
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
(this+leftClassTable).collect_glyphs (left_set, num_glyphs);
(this+rightClassTable).collect_glyphs (right_set, num_glyphs);
(this+leftClassTable).collect_glyphs (first_set, num_glyphs);
(this+rightClassTable).collect_glyphs (second_set, num_glyphs);
}
struct accelerator_t
@ -468,7 +468,7 @@ struct KerxSubTableFormat2
int get_kerning (hb_codepoint_t left, hb_codepoint_t right) const
{
if (!(*c->left_set)[left] || !(*c->right_set)[right]) return 0;
if (!(*c->first_set)[left] || !(*c->second_set)[right]) return 0;
return table.get_kerning (left, right, c);
}
};
@ -629,6 +629,8 @@ struct KerxSubTableFormat4
}
o.attach_type() = OT::Layout::GPOS_impl::ATTACH_TYPE_MARK;
o.attach_chain() = (int) mark - (int) buffer->idx;
if (c->buffer_is_reversed)
o.attach_chain() = -o.attach_chain();
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
}
@ -671,10 +673,10 @@ struct KerxSubTableFormat4
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
machine.collect_initial_glyphs (left_set, num_glyphs, *this);
//machine.collect_glyphs (right_set, num_glyphs); // right_set is unused for machine kerning
machine.collect_initial_glyphs (first_set, num_glyphs, *this);
//machine.collect_glyphs (second_set, num_glyphs); // second_set is unused for machine kerning
}
protected:
@ -762,19 +764,19 @@ struct KerxSubTableFormat6
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
if (is_long ())
{
const auto &t = u.l;
(this+t.rowIndexTable).collect_glyphs (left_set, num_glyphs);
(this+t.columnIndexTable).collect_glyphs (right_set, num_glyphs);
(this+t.rowIndexTable).collect_glyphs (first_set, num_glyphs);
(this+t.columnIndexTable).collect_glyphs (second_set, num_glyphs);
}
else
{
const auto &t = u.s;
(this+t.rowIndexTable).collect_glyphs (left_set, num_glyphs);
(this+t.columnIndexTable).collect_glyphs (right_set, num_glyphs);
(this+t.rowIndexTable).collect_glyphs (first_set, num_glyphs);
(this+t.columnIndexTable).collect_glyphs (second_set, num_glyphs);
}
}
@ -789,7 +791,7 @@ struct KerxSubTableFormat6
int get_kerning (hb_codepoint_t left, hb_codepoint_t right) const
{
if (!(*c->left_set)[left] || !(*c->right_set)[right]) return 0;
if (!(*c->first_set)[left] || !(*c->second_set)[right]) return 0;
return table.get_kerning (left, right, c);
}
};
@ -878,15 +880,15 @@ struct KerxSubTable
}
template <typename set_t>
void collect_glyphs (set_t &left_set, set_t &right_set, unsigned num_glyphs) const
void collect_glyphs (set_t &first_set, set_t &second_set, unsigned num_glyphs) const
{
unsigned int subtable_type = get_type ();
switch (subtable_type) {
case 0: u.format0.collect_glyphs (left_set, right_set, num_glyphs); return;
case 1: u.format1.collect_glyphs (left_set, right_set, num_glyphs); return;
case 2: u.format2.collect_glyphs (left_set, right_set, num_glyphs); return;
case 4: u.format4.collect_glyphs (left_set, right_set, num_glyphs); return;
case 6: u.format6.collect_glyphs (left_set, right_set, num_glyphs); return;
case 0: u.format0.collect_glyphs (first_set, second_set, num_glyphs); return;
case 1: u.format1.collect_glyphs (first_set, second_set, num_glyphs); return;
case 2: u.format2.collect_glyphs (first_set, second_set, num_glyphs); return;
case 4: u.format4.collect_glyphs (first_set, second_set, num_glyphs); return;
case 6: u.format6.collect_glyphs (first_set, second_set, num_glyphs); return;
default: return;
}
}
@ -923,8 +925,8 @@ struct KerxSubTable
struct kern_subtable_accelerator_data_t
{
hb_bit_set_t left_set;
hb_bit_set_t right_set;
hb_bit_set_t first_set;
hb_bit_set_t second_set;
mutable hb_aat_class_cache_t class_cache;
};
@ -1017,9 +1019,8 @@ struct KerxTable
if (HB_DIRECTION_IS_HORIZONTAL (c->buffer->props.direction) != st->u.header.is_horizontal ())
goto skip;
c->left_set = &subtable_accel.left_set;
c->right_set = &subtable_accel.right_set;
c->machine_glyph_set = &subtable_accel.left_set;
c->first_set = &subtable_accel.first_set;
c->second_set = &subtable_accel.second_set;
c->machine_class_cache = &subtable_accel.class_cache;
if (!c->buffer_intersects_machine ())
@ -1051,8 +1052,8 @@ struct KerxTable
}
}
if (reverse)
c->buffer->reverse ();
if (reverse != c->buffer_is_reversed)
c->reverse_buffer ();
{
/* See comment in sanitize() for conditional here. */
@ -1060,15 +1061,14 @@ struct KerxTable
ret |= st->dispatch (c);
}
if (reverse)
c->buffer->reverse ();
(void) c->buffer->message (c->font, "end subtable %u", c->lookup_index);
skip:
st = &StructAfter<SubTable> (*st);
c->set_lookup_index (c->lookup_index + 1);
}
if (c->buffer_is_reversed)
c->reverse_buffer ();
return ret;
}
@ -1133,7 +1133,7 @@ struct KerxTable
if (unlikely (accel_data.subtable_accels.in_error ()))
return accel_data;
st->collect_glyphs (subtable_accel.left_set, subtable_accel.right_set, num_glyphs);
st->collect_glyphs (subtable_accel.first_set, subtable_accel.second_set, num_glyphs);
subtable_accel.class_cache.clear ();
st = &StructAfter<SubTable> (*st);

View File

@ -487,7 +487,7 @@ struct LigatureSubtable
if (entry.flags & LigatureEntryT::SetComponent)
{
/* Never mark same index twice, in case DontAdvance was used... */
if (match_length && match_positions[(match_length - 1u) % ARRAY_LENGTH (match_positions)] == buffer->out_len)
if (unlikely (match_length && match_positions[(match_length - 1u) % ARRAY_LENGTH (match_positions)] == buffer->out_len))
match_length--;
match_positions[match_length++ % ARRAY_LENGTH (match_positions)] = buffer->out_len;
@ -640,7 +640,7 @@ struct NoncontextualSubtable
for (unsigned int i = 0; i < count; i++)
{
/* This block copied from StateTableDriver::drive. Keep in sync. */
if (last_range)
if (unlikely (last_range))
{
auto *range = last_range;
{
@ -1169,15 +1169,15 @@ struct Chain
hb_map ([subtable_flags] (const hb_aat_map_t::range_flags_t _) -> bool { return subtable_flags & (_.flags); })))
goto skip;
c->subtable_flags = subtable_flags;
c->machine_glyph_set = accel ? &accel->subtables[i].glyph_set : &Null(hb_bit_set_t);
c->machine_class_cache = accel ? &accel->subtables[i].class_cache : nullptr;
if (!(coverage & ChainSubtable<Types>::AllDirections) &&
HB_DIRECTION_IS_VERTICAL (c->buffer->props.direction) !=
bool (coverage & ChainSubtable<Types>::Vertical))
goto skip;
c->subtable_flags = subtable_flags;
c->first_set = accel ? &accel->subtables[i].glyph_set : &Null(hb_bit_set_t);
c->machine_class_cache = accel ? &accel->subtables[i].class_cache : nullptr;
if (!c->buffer_intersects_machine ())
{
(void) c->buffer->message (c->font, "skipped chainsubtable %u because no glyph matches", c->lookup_index);
@ -1219,22 +1219,21 @@ struct Chain
if (!c->buffer->message (c->font, "start chainsubtable %u", c->lookup_index))
goto skip;
if (reverse)
c->buffer->reverse ();
if (reverse != c->buffer_is_reversed)
c->reverse_buffer ();
subtable->apply (c);
if (reverse)
c->buffer->reverse ();
(void) c->buffer->message (c->font, "end chainsubtable %u", c->lookup_index);
if (unlikely (!c->buffer->successful)) return;
if (unlikely (!c->buffer->successful)) break;
skip:
subtable = &StructAfter<ChainSubtable<Types>> (*subtable);
c->set_lookup_index (c->lookup_index + 1);
}
if (c->buffer_is_reversed)
c->reverse_buffer ();
}
unsigned int get_size () const { return length; }

View File

@ -78,129 +78,246 @@
/*
* Big-endian integers.
* Fixed-endian integers / floats.
*/
/* Endian swap, used in Windows related backends */
static inline constexpr uint16_t hb_uint16_swap (uint16_t v)
{ return (v >> 8) | (v << 8); }
static inline constexpr uint32_t hb_uint32_swap (uint32_t v)
{ return (hb_uint16_swap (v) << 16) | hb_uint16_swap (v >> 16); }
#ifndef HB_FAST_INT_ACCESS
template <typename Type>
struct __attribute__((packed)) hb_packed_t { Type v; };
#ifndef HB_FAST_NUM_ACCESS
#if defined(__OPTIMIZE__) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __BIG_ENDIAN || \
(__BYTE_ORDER == __LITTLE_ENDIAN && \
hb_has_builtin(__builtin_bswap16) && \
hb_has_builtin(__builtin_bswap32)))
#define HB_FAST_INT_ACCESS 1
hb_has_builtin(__builtin_bswap32) && \
hb_has_builtin(__builtin_bswap64)))
#define HB_FAST_NUM_ACCESS 1
#else
#define HB_FAST_INT_ACCESS 0
#endif
#define HB_FAST_NUM_ACCESS 0
#endif
template <typename Type, int Bytes = sizeof (Type)>
struct BEInt;
template <typename Type>
struct BEInt<Type, 1>
// https://github.com/harfbuzz/harfbuzz/issues/5456
#if defined(__GNUC__) && !defined(__clang__) && (__GNUC__ <= 12)
#undef HB_FAST_NUM_ACCESS
#define HB_FAST_NUM_ACCESS 0
#endif
#endif
template <bool BE, typename Type, int Bytes = sizeof (Type)>
struct HBInt;
template <bool BE, typename Type>
struct HBInt<BE, Type, 1>
{
public:
BEInt () = default;
constexpr BEInt (Type V) : v {uint8_t (V)} {}
HBInt () = default;
constexpr HBInt (Type V) : v {uint8_t (V)} {}
constexpr operator Type () const { return v; }
private: uint8_t v;
};
template <typename Type>
struct BEInt<Type, 2>
template <bool BE, typename Type>
struct HBInt<BE, Type, 2>
{
struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
public:
BEInt () = default;
HBInt () = default;
BEInt (Type V)
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
{ ((packed_uint16_t *) v)->v = __builtin_bswap16 (V); }
#else /* __BYTE_ORDER == __BIG_ENDIAN */
{ ((packed_uint16_t *) v)->v = V; }
#endif
HBInt (Type V)
#if HB_FAST_NUM_ACCESS
{
if (BE == (__BYTE_ORDER == __BIG_ENDIAN))
((hb_packed_t<uint16_t> *) v)->v = V;
else
((hb_packed_t<uint16_t> *) v)->v = __builtin_bswap16 (V);
}
#else
: v {uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
: v {BE ? uint8_t ((V >> 8) & 0xFF) : uint8_t ((V ) & 0xFF),
BE ? uint8_t ((V ) & 0xFF) : uint8_t ((V >> 8) & 0xFF)} {}
#endif
constexpr operator Type () const {
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap16 (((packed_uint16_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
return ((packed_uint16_t *) v)->v;
#endif
constexpr operator Type () const
{
#if HB_FAST_NUM_ACCESS
return (BE == (__BYTE_ORDER == __BIG_ENDIAN)) ?
((const hb_packed_t<uint16_t> *) v)->v
:
__builtin_bswap16 (((const hb_packed_t<uint16_t> *) v)->v)
;
#else
return (v[0] << 8)
+ (v[1] );
return (BE ? (v[0] << 8) : (v[0] ))
+ (BE ? (v[1] ) : (v[1] << 8));
#endif
}
private: uint8_t v[2];
};
template <typename Type>
struct BEInt<Type, 3>
template <bool BE, typename Type>
struct HBInt<BE, Type, 3>
{
static_assert (!std::is_signed<Type>::value, "");
public:
BEInt () = default;
constexpr BEInt (Type V) : v {uint8_t ((V >> 16) & 0xFF),
uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
HBInt () = default;
constexpr HBInt (Type V) : v {BE ? uint8_t ((V >> 16) & 0xFF) : uint8_t ((V >> 16) & 0xFF),
BE ? uint8_t ((V >> 8) & 0xFF) : uint8_t ((V >> 8) & 0xFF),
BE ? uint8_t ((V ) & 0xFF) : uint8_t ((V ) & 0xFF)} {}
constexpr operator Type () const { return (v[0] << 16)
+ (v[1] << 8)
+ (v[2] ); }
constexpr operator Type () const { return (BE ? (v[0] << 16) : (v[0] ))
+ (BE ? (v[1] << 8) : (v[1] << 8))
+ (BE ? (v[2] ) : (v[2] << 16)); }
private: uint8_t v[3];
};
template <typename Type>
struct BEInt<Type, 4>
template <bool BE, typename Type>
struct HBInt<BE, Type, 4>
{
struct __attribute__((packed)) packed_uint32_t { uint32_t v; };
template <bool, typename, int>
friend struct HBFloat;
public:
BEInt () = default;
HBInt () = default;
BEInt (Type V)
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
{ ((packed_uint32_t *) v)->v = __builtin_bswap32 (V); }
#else /* __BYTE_ORDER == __BIG_ENDIAN */
{ ((packed_uint32_t *) v)->v = V; }
#endif
HBInt (Type V)
#if HB_FAST_NUM_ACCESS
{
if (BE == (__BYTE_ORDER == __BIG_ENDIAN))
((hb_packed_t<uint32_t> *) v)->v = V;
else
((hb_packed_t<uint32_t> *) v)->v = __builtin_bswap32 (V);
}
#else
: v {uint8_t ((V >> 24) & 0xFF),
uint8_t ((V >> 16) & 0xFF),
uint8_t ((V >> 8) & 0xFF),
uint8_t ((V ) & 0xFF)} {}
: v {BE ? uint8_t ((V >> 24) & 0xFF) : uint8_t ((V ) & 0xFF),
BE ? uint8_t ((V >> 16) & 0xFF) : uint8_t ((V >> 8) & 0xFF),
BE ? uint8_t ((V >> 8) & 0xFF) : uint8_t ((V >> 16) & 0xFF),
BE ? uint8_t ((V ) & 0xFF) : uint8_t ((V >> 24) & 0xFF)} {}
#endif
constexpr operator Type () const {
#if HB_FAST_INT_ACCESS
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap32 (((packed_uint32_t *) v)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
return ((packed_uint32_t *) v)->v;
#endif
#if HB_FAST_NUM_ACCESS
return (BE == (__BYTE_ORDER == __BIG_ENDIAN)) ?
((const hb_packed_t<uint32_t> *) v)->v
:
__builtin_bswap32 (((const hb_packed_t<uint32_t> *) v)->v)
;
#else
return (v[0] << 24)
+ (v[1] << 16)
+ (v[2] << 8)
+ (v[3] );
return (BE ? (v[0] << 24) : (v[0] ))
+ (BE ? (v[1] << 16) : (v[1] << 8))
+ (BE ? (v[2] << 8) : (v[2] << 16))
+ (BE ? (v[3] ) : (v[3] << 24));
#endif
}
private: uint8_t v[4];
};
template <bool BE, typename Type>
struct HBInt<BE, Type, 8>
{
template <bool, typename, int>
friend struct HBFloat;
public:
HBInt () = default;
HBInt (Type V)
#if HB_FAST_NUM_ACCESS
{
if (BE == (__BYTE_ORDER == __BIG_ENDIAN))
((hb_packed_t<uint64_t> *) v)->v = V;
else
((hb_packed_t<uint64_t> *) v)->v = __builtin_bswap64 (V);
}
#else
: v {BE ? uint8_t ((V >> 56) & 0xFF) : uint8_t ((V ) & 0xFF),
BE ? uint8_t ((V >> 48) & 0xFF) : uint8_t ((V >> 8) & 0xFF),
BE ? uint8_t ((V >> 40) & 0xFF) : uint8_t ((V >> 16) & 0xFF),
BE ? uint8_t ((V >> 32) & 0xFF) : uint8_t ((V >> 24) & 0xFF),
BE ? uint8_t ((V >> 24) & 0xFF) : uint8_t ((V >> 32) & 0xFF),
BE ? uint8_t ((V >> 16) & 0xFF) : uint8_t ((V >> 40) & 0xFF),
BE ? uint8_t ((V >> 8) & 0xFF) : uint8_t ((V >> 48) & 0xFF),
BE ? uint8_t ((V ) & 0xFF) : uint8_t ((V >> 56) & 0xFF)} {}
#endif
constexpr operator Type () const {
#if HB_FAST_NUM_ACCESS
return (BE == (__BYTE_ORDER == __BIG_ENDIAN)) ?
((const hb_packed_t<uint64_t> *) v)->v
:
__builtin_bswap64 (((const hb_packed_t<uint64_t> *) v)->v)
;
#else
return (BE ? (uint64_t (v[0]) << 56) : (uint64_t (v[0]) ))
+ (BE ? (uint64_t (v[1]) << 48) : (uint64_t (v[1]) << 8))
+ (BE ? (uint64_t (v[2]) << 40) : (uint64_t (v[2]) << 16))
+ (BE ? (uint64_t (v[3]) << 32) : (uint64_t (v[3]) << 24))
+ (BE ? (uint64_t (v[4]) << 24) : (uint64_t (v[4]) << 32))
+ (BE ? (uint64_t (v[5]) << 16) : (uint64_t (v[5]) << 40))
+ (BE ? (uint64_t (v[6]) << 8) : (uint64_t (v[6]) << 48))
+ (BE ? (uint64_t (v[7]) ) : (uint64_t (v[7]) << 56));
#endif
}
private: uint8_t v[8];
};
/* Floats. */
template <bool BE, typename Type, int Bytes>
struct HBFloat
{
using IntType = typename std::conditional<Bytes == 4, uint32_t, uint64_t>::type;
public:
HBFloat () = default;
HBFloat (Type V)
{
#if HB_FAST_NUM_ACCESS
{
if (BE == (__BYTE_ORDER == __BIG_ENDIAN))
{
((hb_packed_t<Type> *) v)->v = V;
return;
}
}
#endif
union {
hb_packed_t<Type> f;
hb_packed_t<IntType> i;
} u = {{V}};
const HBInt<BE, IntType> I = u.i.v;
for (unsigned i = 0; i < Bytes; i++)
v[i] = I.v[i];
}
/* c++14 constexpr */ operator Type () const
{
#if HB_FAST_NUM_ACCESS
{
if (BE == (__BYTE_ORDER == __BIG_ENDIAN))
return ((const hb_packed_t<Type> *) v)->v;
}
#endif
HBInt<BE, IntType> I;
for (unsigned i = 0; i < Bytes; i++)
I.v[i] = v[i];
union {
hb_packed_t<IntType> i;
hb_packed_t<Type> f;
} u = {{I}};
return u.f.v;
}
private: uint8_t v[Bytes];
};
/* We want our rounding towards +infinity. */
static inline double
_hb_roundf (double x) { return floor (x + .5); }
@ -210,6 +327,27 @@ _hb_roundf (float x) { return floorf (x + .5f); }
#define roundf(x) _hb_roundf(x)
static inline void
hb_sincos (float rotation, float &s, float &c)
{
#ifdef HAVE_SINCOSF
sincosf (rotation, &s, &c);
#else
c = cosf (rotation);
s = sinf (rotation);
#endif
}
static inline void
hb_sincos (double rotation, double &s, double &c)
{
#ifdef HAVE_SINCOS
sincos (rotation, &s, &c);
#else
c = cos (rotation);
s = sin (rotation);
#endif
}
/* Encodes three unsigned integers in one 64-bit number. If the inputs have more than 21 bits,
* values will be truncated / overlap, and might not decode exactly. */
@ -734,6 +872,17 @@ HB_FUNCOBJ (hb_clamp);
* Bithacks.
*/
/* Return the number of 1 bits in a uint8_t; faster than hb_popcount() */
static inline unsigned
hb_popcount8 (uint8_t v)
{
static const uint8_t popcount4[16] = {
0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4
};
return popcount4[v & 0xF] + popcount4[v >> 4];
}
/* Return the number of 1 bits in v. */
template <typename T>
static inline unsigned int
@ -1070,6 +1219,7 @@ _hb_cmp_operator (const void *pkey, const void *pval)
}
template <typename V, typename K, typename ...Ts>
HB_HOT
static inline bool
hb_bsearch_impl (unsigned *pos, /* Out */
const K& key,

View File

@ -0,0 +1,105 @@
/*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Author(s): Behdad Esfahbod
*/
#ifndef HB_ALLOC_POOL_HH
#define HB_ALLOC_POOL_HH
#include "hb-vector.hh"
/* Memory pool for persistent small- to medium-sized allocations.
*
* Some AI musings on this, not necessarily true:
*
* This is a very simple implementation, but it's good enough for our
* purposes. It's not thread-safe. It's not very fast. It's not
* very memory efficient. It's not very cache efficient. It's not
* very anything efficient. But it's simple and it works. And it's
* good enough for our purposes. If you need something more
* sophisticated, use a real allocator. Or use a real language. */
struct hb_alloc_pool_t
{
unsigned ChunkSize = 65536 - 2 * sizeof (void *);
void *alloc (size_t size, unsigned alignment = 2 * sizeof (void *))
{
if (unlikely (chunks.in_error ())) return nullptr;
assert (alignment > 0);
assert (alignment <= 2 * sizeof (void *));
assert ((alignment & (alignment - 1)) == 0); /* power of two */
if (size > (ChunkSize) / 4)
{
/* Big chunk, allocate separately. */
hb_vector_t<char> chunk;
if (unlikely (!chunk.resize (size))) return nullptr;
void *ret = chunk.arrayZ;
chunks.push (std::move (chunk));
if (chunks.in_error ()) return nullptr;
if (chunks.length > 1)
{
// Bring back the previous last chunk to the end, so that
// we can continue to allocate from it.
hb_swap (chunks.arrayZ[chunks.length - 1], chunks.arrayZ[chunks.length - 2]);
}
return ret;
}
unsigned pad = (unsigned) ((alignment - ((uintptr_t) current_chunk.arrayZ & (alignment - 1))) & (alignment - 1));
// Small chunk, allocate from the last chunk.
if (current_chunk.length < pad + size)
{
chunks.push ();
if (unlikely (chunks.in_error ())) return nullptr;
hb_vector_t<char> &chunk = chunks.arrayZ[chunks.length - 1];
if (unlikely (!chunk.resize (ChunkSize))) return nullptr;
current_chunk = chunk;
pad = (unsigned) ((alignment - ((uintptr_t) current_chunk.arrayZ & (alignment - 1))) & (alignment - 1));
}
current_chunk += pad;
assert (current_chunk.length >= size);
void *ret = current_chunk.arrayZ;
current_chunk += size;
return ret;
}
void discard (void *p_, size_t size)
{
// Reclaim memory if we can.
char *p = (char *) p_;
if (current_chunk.arrayZ == p + size && current_chunk.backwards_length >= size)
current_chunk -= size;
}
private:
hb_vector_t<hb_vector_t<char>> chunks;
hb_array_t<char> current_chunk;
};
#endif /* HB_ALLOC_POOL_HH */

View File

@ -291,6 +291,13 @@ struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
&& (unsigned int) (arrayZ + length - (const char *) p) >= size;
}
template <unsigned P = sizeof (Type),
hb_enable_if (P == 1)>
bool check_end (const void *p) const
{
return (uintptr_t) (((const char *) p) - arrayZ) <= length;
}
/* Only call if you allocated the underlying array using hb_malloc() or similar. */
void fini ()
{ hb_free ((void *) arrayZ); arrayZ = nullptr; length = 0; }

View File

@ -40,7 +40,6 @@
* Atomic integers and pointers.
*/
/* We need external help for these */
#if defined(hb_atomic_int_impl_add) \
@ -80,27 +79,11 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
#include <atomic>
#define HB_STL_ATOMIC_IMPL
#define _hb_memory_r_barrier() std::atomic_thread_fence(std::memory_order_acquire)
#define _hb_memory_w_barrier() std::atomic_thread_fence(std::memory_order_release)
#define hb_atomic_int_impl_add(AI, V) (reinterpret_cast<std::atomic<typename std::decay<decltype (*(AI))>::type> *> (AI)->fetch_add ((V), std::memory_order_acq_rel))
#define hb_atomic_int_impl_set_relaxed(AI, V) (reinterpret_cast<std::atomic<typename std::decay<decltype (*(AI))>::type> *> (AI)->store ((V), std::memory_order_relaxed))
#define hb_atomic_int_impl_set(AI, V) (reinterpret_cast<std::atomic<typename std::decay<decltype (*(AI))>::type> *> (AI)->store ((V), std::memory_order_release))
#define hb_atomic_int_impl_get_relaxed(AI) (reinterpret_cast<std::atomic<typename std::decay<decltype (*(AI))>::type> const *> (AI)->load (std::memory_order_relaxed))
#define hb_atomic_int_impl_get(AI) (reinterpret_cast<std::atomic<typename std::decay<decltype (*(AI))>::type> const *> (AI)->load (std::memory_order_acquire))
#define hb_atomic_ptr_impl_set_relaxed(P, V) (reinterpret_cast<std::atomic<void*> *> (P)->store ((V), std::memory_order_relaxed))
#define hb_atomic_ptr_impl_get_relaxed(P) (reinterpret_cast<std::atomic<void*> const *> (P)->load (std::memory_order_relaxed))
#define hb_atomic_ptr_impl_get(P) (reinterpret_cast<std::atomic<void*> *> (P)->load (std::memory_order_acquire))
static inline bool
_hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
{
const void *O = O_; // Need lvalue
return reinterpret_cast<std::atomic<const void*> *> (P)->compare_exchange_weak (O, N, std::memory_order_acq_rel, std::memory_order_relaxed);
}
#define hb_atomic_ptr_impl_cmpexch(P,O,N) _hb_atomic_ptr_impl_cmplexch ((const void **) (P), (O), (N))
#else /* defined(HB_NO_MT) */
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
@ -159,6 +142,76 @@ inline T hb_atomic_int_impl_get (const T *AI) { T v = *AI; _hb_memory_r_barrie
inline void *hb_atomic_ptr_impl_get (void ** const P) { void *v = *P; _hb_memory_r_barrier (); return v; }
#endif
#ifdef HB_STL_ATOMIC_IMPL
template <typename T>
struct hb_atomic_t
{
hb_atomic_t () = default;
constexpr hb_atomic_t (T v) : v (v) {}
constexpr hb_atomic_t (const hb_atomic_t& o) : v (o.get_relaxed ()) {}
constexpr hb_atomic_t (hb_atomic_t&& o) : v (o.get_relaxed ()) { o.set_relaxed ({}); }
hb_atomic_t &operator= (const hb_atomic_t& o) { set_relaxed (o.get_relaxed ()); return *this; }
hb_atomic_t &operator= (hb_atomic_t&& o){ set_relaxed (o.get_relaxed ()); o.set_relaxed ({}); return *this; }
hb_atomic_t &operator= (T v_)
{
set_relaxed (v_);
return *this;
}
operator T () const { return get_relaxed (); }
void set_relaxed (T v_) { v.store (v_, std::memory_order_relaxed); }
void set_release (T v_) { v.store (v_, std::memory_order_release); }
T get_relaxed () const { return v.load (std::memory_order_relaxed); }
T get_acquire () const { return v.load (std::memory_order_acquire); }
T inc () { return v.fetch_add (1, std::memory_order_acq_rel); }
T dec () { return v.fetch_add (-1, std::memory_order_acq_rel); }
int operator++ (int) { return inc (); }
int operator-- (int) { return dec (); }
friend void swap (hb_atomic_t &a, hb_atomic_t &b) noexcept
{
T v = a.get_acquire ();
a.set_relaxed (b.get_acquire ());
b.set_relaxed (v);
}
std::atomic<T> v = 0;
};
template <typename T>
struct hb_atomic_t<T *>
{
hb_atomic_t () = default;
constexpr hb_atomic_t (T *v) : v (v) {}
hb_atomic_t (const hb_atomic_t &other) = delete;
void init (T *v_ = nullptr) { set_relaxed (v_); }
void set_relaxed (T *v_) { v.store (v_, std::memory_order_relaxed); }
T *get_relaxed () const { return v.load (std::memory_order_relaxed); }
T *get_acquire () const { return v.load (std::memory_order_acquire); }
bool cmpexch (T *old, T *new_) { return v.compare_exchange_weak (old, new_, std::memory_order_acq_rel, std::memory_order_relaxed); }
operator bool () const { return get_acquire () != nullptr; }
T *operator->() const { return get_acquire (); }
template <typename C>
operator C * () const
{
return get_acquire ();
}
friend void swap (hb_atomic_t &a, hb_atomic_t &b) noexcept
{
T *p = a.get_acquire ();
a.set_relaxed (b.get_acquire ());
b.set_relaxed (p);
}
std::atomic<T *> v = nullptr;
};
#else
template <typename T>
struct hb_atomic_t
@ -178,7 +231,6 @@ struct hb_atomic_t
int operator ++ (int) { return inc (); }
int operator -- (int) { return dec (); }
long operator |= (long v_) { set_relaxed (get_relaxed () | v_); return *this; }
T v = 0;
};
@ -194,7 +246,7 @@ struct hb_atomic_t<T*>
void set_relaxed (T* v_) { hb_atomic_ptr_impl_set_relaxed (&v, v_); }
T *get_relaxed () const { return (T *) hb_atomic_ptr_impl_get_relaxed (&v); }
T *get_acquire () const { return (T *) hb_atomic_ptr_impl_get ((void **) &v); }
bool cmpexch (const T *old, T *new_) { return hb_atomic_ptr_impl_cmpexch ((void **) &v, (void *) old, (void *) new_); }
bool cmpexch (T *old, T *new_) { return hb_atomic_ptr_impl_cmpexch ((void **) &v, (void *) old, (void *) new_); }
operator bool () const { return get_acquire () != nullptr; }
T * operator -> () const { return get_acquire (); }
@ -203,6 +255,8 @@ struct hb_atomic_t<T*>
T *v = nullptr;
};
#endif
static inline bool hb_barrier ()
{
_hb_compiler_memory_r_barrier ();

View File

@ -176,7 +176,7 @@ struct hb_inc_bimap_t
{
hb_codepoint_t count = get_population ();
hb_vector_t <hb_codepoint_t> work;
if (unlikely (!work.resize (count, false))) return;
if (unlikely (!work.resize_dirty (count))) return;
for (hb_codepoint_t rhs = 0; rhs < count; rhs++)
work.arrayZ[rhs] = back_map[rhs];

View File

@ -142,6 +142,7 @@ struct hb_bit_page_t
bool operator [] (hb_codepoint_t g) const { return get (g); }
bool operator () (hb_codepoint_t g) const { return get (g); }
bool has (hb_codepoint_t g) const { return get (g); }
void add_range (hb_codepoint_t a, hb_codepoint_t b)
{
@ -290,7 +291,7 @@ struct hb_bit_page_t
unsigned int j = m & ELT_MASK;
const elt_t vv = v[i] & ~((elt_t (1) << j) - 1);
for (const elt_t *p = &vv; i < len (); p = &v[++i])
for (const elt_t *p = &vv; i < len (); p = ((const elt_t *) &v[0]) + (++i))
if (*p)
{
*codepoint = i * ELT_BITS + elt_get_min (*p);
@ -346,6 +347,36 @@ struct hb_bit_page_t
return 0;
}
/*
* Iterator implementation.
*/
struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{
static constexpr bool is_sorted_iterator = true;
iter_t (const hb_bit_page_t &s_ = Null (hb_bit_page_t), bool init = true) : s (&s_), v (INVALID)
{
if (init)
v = s->get_min ();
}
typedef hb_codepoint_t __item_t__;
hb_codepoint_t __item__ () const { return v; }
bool __more__ () const { return v != INVALID; }
void __next__ () {
s->next (&v);
}
void __prev__ () { s->previous (&v); }
iter_t end () const { return iter_t (*s, false); }
bool operator != (const iter_t& o) const
{ return v != o.v; }
protected:
const hb_bit_page_t *s;
hb_codepoint_t v;
};
iter_t iter () const { return iter_t (*this); }
operator iter_t () const { return iter (); }
static constexpr hb_codepoint_t INVALID = HB_SET_VALUE_INVALID;
typedef unsigned long long elt_t;

View File

@ -368,7 +368,7 @@ struct hb_bit_set_invertible_t
unsigned __len__ () const { return l; }
iter_t end () const { return iter_t (*s, false); }
bool operator != (const iter_t& o) const
{ return v != o.v || s != o.s; }
{ return v != o.v; }
protected:
const hb_bit_set_invertible_t *s;

View File

@ -91,10 +91,10 @@ struct hb_bit_set_t
if (pages.length < count && (unsigned) pages.allocated < count && count <= 2)
exact_size = true; // Most sets are small and local
if (unlikely (!pages.resize (count, clear, exact_size) ||
!page_map.resize (count, clear)))
if (unlikely (!pages.resize_full (count, clear, exact_size) ||
!page_map.resize_full (count, clear, false)))
{
pages.resize (page_map.length, clear, exact_size);
pages.resize_full (page_map.length, clear, exact_size);
successful = false;
return false;
}
@ -108,10 +108,11 @@ struct hb_bit_set_t
page_map.alloc (sz);
}
void reset ()
hb_bit_set_t& reset ()
{
successful = true;
clear ();
return *this;
}
void clear ()
@ -394,7 +395,7 @@ struct hb_bit_set_t
{
if (unlikely (!successful)) return;
unsigned int count = other.pages.length;
if (unlikely (!resize (count, false, exact_size)))
if (unlikely (!resize (count, false, exact_size)))
return;
population = other.population;
@ -922,7 +923,7 @@ struct hb_bit_set_t
unsigned __len__ () const { return l; }
iter_t end () const { return iter_t (*s, false); }
bool operator != (const iter_t& o) const
{ return s != o.s || v != o.v; }
{ return v != o.v; }
protected:
const hb_bit_set_t *s;

View File

@ -32,7 +32,7 @@
#include "hb.hh"
#line 36 "hb-buffer-deserialize-text-unicode.hh"
#line 33 "hb-buffer-deserialize-text-unicode.hh"
static const unsigned char _deserialize_text_unicode_trans_keys[] = {
0u, 0u, 43u, 102u, 48u, 102u, 48u, 124u, 48u, 57u, 62u, 124u, 48u, 124u, 60u, 117u,
85u, 117u, 85u, 117u, 0
@ -150,12 +150,12 @@ _hb_buffer_deserialize_text_unicode (hb_buffer_t *buffer,
hb_glyph_info_t info = {0};
const hb_glyph_position_t pos = {0};
#line 154 "hb-buffer-deserialize-text-unicode.hh"
#line 147 "hb-buffer-deserialize-text-unicode.hh"
{
cs = deserialize_text_unicode_start;
}
#line 159 "hb-buffer-deserialize-text-unicode.hh"
#line 150 "hb-buffer-deserialize-text-unicode.hh"
{
int _slen;
int _trans;
@ -215,7 +215,7 @@ _resume:
hb_memset (&info, 0, sizeof (info));
}
break;
#line 219 "hb-buffer-deserialize-text-unicode.hh"
#line 203 "hb-buffer-deserialize-text-unicode.hh"
}
_again:
@ -238,7 +238,7 @@ _again:
*end_ptr = p;
}
break;
#line 242 "hb-buffer-deserialize-text-unicode.hh"
#line 224 "hb-buffer-deserialize-text-unicode.hh"
}
}

View File

@ -427,7 +427,7 @@ _hb_buffer_serialize_unicode_text (hb_buffer_t *buffer,
* #HB_BUFFER_SERIALIZE_FLAG_NO_GLYPH_NAMES flag is set. Then,
* - If #HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS is not set, `=` then #hb_glyph_info_t.cluster.
* - If #HB_BUFFER_SERIALIZE_FLAG_NO_POSITIONS is not set, the #hb_glyph_position_t in the format:
* - If both #hb_glyph_position_t.x_offset and #hb_glyph_position_t.y_offset are not 0, `@x_offset,y_offset`. Then,
* - If #hb_glyph_position_t.x_offset and #hb_glyph_position_t.y_offset are not both 0, `@x_offset,y_offset`. Then,
* - `+x_advance`, then `,y_advance` if #hb_glyph_position_t.y_advance is not 0. Then,
* - If #HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS is set, the #hb_glyph_extents_t in the format `<x_bearing,y_bearing,width,height>`
*

View File

@ -163,7 +163,7 @@ buffer_verify_unsafe_to_break (hb_buffer_t *buffer,
hb_buffer_append (fragment, text_buffer, text_start, text_end);
if (!hb_shape_full (font, fragment, features, num_features, shapers) ||
fragment->successful || fragment->shaping_failed)
fragment->successful)
{
hb_buffer_destroy (reconstruction);
hb_buffer_destroy (fragment);
@ -313,11 +313,11 @@ buffer_verify_unsafe_to_concat (hb_buffer_t *buffer,
* Shape the two fragment streams.
*/
if (!hb_shape_full (font, fragments[0], features, num_features, shapers) ||
!fragments[0]->successful || fragments[0]->shaping_failed)
!fragments[0]->successful)
goto out;
if (!hb_shape_full (font, fragments[1], features, num_features, shapers) ||
!fragments[1]->successful || fragments[1]->shaping_failed)
!fragments[1]->successful)
goto out;
if (!forward)

View File

@ -158,14 +158,15 @@ hb_segment_properties_overlay (hb_segment_properties_t *p,
bool
hb_buffer_t::enlarge (unsigned int size)
{
if (unlikely (!successful))
return false;
if (unlikely (size > max_len))
{
successful = false;
return false;
}
if (unlikely (!successful))
return false;
unsigned int new_allocated = allocated;
hb_glyph_position_t *new_pos = nullptr;
hb_glyph_info_t *new_info = nullptr;
@ -226,6 +227,13 @@ hb_buffer_t::shift_forward (unsigned int count)
assert (have_output);
if (unlikely (!ensure (len + count))) return false;
max_ops -= len - idx;
if (unlikely (max_ops < 0))
{
successful = false;
return false;
}
memmove (info + idx + count, info + idx, (len - idx) * sizeof (info[0]));
if (idx + count > len)
{
@ -297,7 +305,6 @@ hb_buffer_t::clear ()
props = default_props;
successful = true;
shaping_failed = false;
have_output = false;
have_positions = false;
@ -320,7 +327,6 @@ hb_buffer_t::enter ()
{
deallocate_var_all ();
serial = 0;
shaping_failed = false;
scratch_flags = HB_BUFFER_SCRATCH_FLAG_DEFAULT;
unsigned mul;
if (likely (!hb_unsigned_mul_overflows (len, HB_BUFFER_MAX_LEN_FACTOR, &mul)))
@ -339,7 +345,6 @@ hb_buffer_t::leave ()
max_ops = HB_BUFFER_MAX_OPS_DEFAULT;
deallocate_var_all ();
serial = 0;
// Intentionally not reseting shaping_failed, such that it can be inspected.
}
@ -520,7 +525,19 @@ hb_buffer_t::set_masks (hb_mask_t value,
hb_mask_t not_mask = ~mask;
value &= mask;
max_ops -= len;
if (unlikely (max_ops < 0))
successful = false;
unsigned int count = len;
if (cluster_start == 0 && cluster_end == (unsigned int) -1)
{
for (unsigned int i = 0; i < count; i++)
info[i].mask = (info[i].mask & not_mask) | value;
return;
}
for (unsigned int i = 0; i < count; i++)
if (cluster_start <= info[i].cluster && info[i].cluster < cluster_end)
info[i].mask = (info[i].mask & not_mask) | value;
@ -536,6 +553,10 @@ hb_buffer_t::merge_clusters_impl (unsigned int start,
return;
}
max_ops -= end - start;
if (unlikely (max_ops < 0))
successful = false;
unsigned int cluster = info[start].cluster;
for (unsigned int i = start + 1; i < end; i++)
@ -569,6 +590,10 @@ hb_buffer_t::merge_out_clusters (unsigned int start,
if (unlikely (end - start < 2))
return;
max_ops -= end - start;
if (unlikely (max_ops < 0))
successful = false;
unsigned int cluster = out_info[start].cluster;
for (unsigned int i = start + 1; i < end; i++)
@ -726,7 +751,6 @@ DEFINE_NULL_INSTANCE (hb_buffer_t) =
HB_SEGMENT_PROPERTIES_DEFAULT,
false, /* successful */
true, /* shaping_failed */
false, /* have_output */
true /* have_positions */

View File

@ -32,6 +32,7 @@
#include "hb.hh"
#include "hb-unicode.hh"
#include "hb-set-digest.hh"
static_assert ((sizeof (hb_glyph_info_t) == 20), "");
@ -44,14 +45,14 @@ HB_MARK_AS_FLAG_T (hb_buffer_diff_flags_t);
enum hb_buffer_scratch_flags_t {
HB_BUFFER_SCRATCH_FLAG_DEFAULT = 0x00000000u,
HB_BUFFER_SCRATCH_FLAG_HAS_NON_ASCII = 0x00000001u,
HB_BUFFER_SCRATCH_FLAG_HAS_FRACTION_SLASH = 0x00000001u,
HB_BUFFER_SCRATCH_FLAG_HAS_DEFAULT_IGNORABLES = 0x00000002u,
HB_BUFFER_SCRATCH_FLAG_HAS_SPACE_FALLBACK = 0x00000004u,
HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT = 0x00000008u,
HB_BUFFER_SCRATCH_FLAG_HAS_CGJ = 0x00000010u,
HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS = 0x00000020u,
HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE = 0x00000040u,
HB_BUFFER_SCRATCH_FLAG_HAS_VARIATION_SELECTOR_FALLBACK= 0x00000080u,
HB_BUFFER_SCRATCH_FLAG_HAS_BROKEN_SYLLABLE = 0x00000020u,
HB_BUFFER_SCRATCH_FLAG_HAS_VARIATION_SELECTOR_FALLBACK= 0x00000040u,
HB_BUFFER_SCRATCH_FLAG_HAS_CONTINUATIONS = 0x00000080u,
/* Reserved for shapers' internal use. */
HB_BUFFER_SCRATCH_FLAG_SHAPER0 = 0x01000000u,
@ -90,7 +91,6 @@ struct hb_buffer_t
hb_segment_properties_t props; /* Script, language, direction */
bool successful; /* Allocations successful */
bool shaping_failed; /* Shaping failure */
bool have_output; /* Whether we have an output buffer going on */
bool have_positions; /* Whether we have positions */
@ -110,6 +110,7 @@ struct hb_buffer_t
hb_codepoint_t context[2][CONTEXT_LENGTH];
unsigned int context_len[2];
hb_set_digest_t digest; /* Manually updated sometimes */
/*
* Managed by enter / leave
@ -200,6 +201,12 @@ struct hb_buffer_t
void collect_codepoints (set_t &d) const
{ d.clear (); d.add_array (&info[0].codepoint, len, sizeof (info[0])); }
void update_digest ()
{
digest = hb_set_digest_t ();
collect_codepoints (digest);
}
HB_INTERNAL void similar (const hb_buffer_t &src);
HB_INTERNAL void reset ();
HB_INTERNAL void clear ();
@ -346,7 +353,7 @@ struct hb_buffer_t
{
if (out_info != info || out_len != idx)
{
if (unlikely (!make_room_for (1, 1))) return false;
if (unlikely (!ensure (out_len + 1))) return false;
out_info[out_len] = info[idx];
}
out_len++;
@ -363,7 +370,7 @@ struct hb_buffer_t
{
if (out_info != info || out_len != idx)
{
if (unlikely (!make_room_for (n, n))) return false;
if (unlikely (!ensure (out_len + n))) return false;
memmove (out_info + out_len, info + idx, n * sizeof (out_info[0]));
}
out_len += n;
@ -404,22 +411,12 @@ struct hb_buffer_t
/* Adds glyph flags in mask to infos with clusters between start and end.
* The start index will be from out-buffer if from_out_buffer is true.
* If interior is true, then the cluster having the minimum value is skipped. */
void _set_glyph_flags (hb_mask_t mask,
unsigned start = 0,
unsigned end = (unsigned) -1,
bool interior = false,
bool from_out_buffer = false)
void _set_glyph_flags_impl (hb_mask_t mask,
unsigned start,
unsigned end,
bool interior,
bool from_out_buffer)
{
end = hb_min (end, len);
if (unlikely (end - start > 255))
return;
if (interior && !from_out_buffer && end - start < 2)
return;
scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
if (!from_out_buffer || !have_output)
{
if (!interior)
@ -456,6 +453,25 @@ struct hb_buffer_t
}
}
HB_ALWAYS_INLINE
void _set_glyph_flags (hb_mask_t mask,
unsigned start = 0,
unsigned end = (unsigned) -1,
bool interior = false,
bool from_out_buffer = false)
{
if (unlikely (end != (unsigned) -1 && end - start > 255))
return;
end = hb_min (end, len);
if (interior && !from_out_buffer && end - start < 2)
return;
_set_glyph_flags_impl (mask, start, end, interior, from_out_buffer);
}
void unsafe_to_break (unsigned int start = 0, unsigned int end = -1)
{
_set_glyph_flags (HB_GLYPH_FLAG_UNSAFE_TO_BREAK | HB_GLYPH_FLAG_UNSAFE_TO_CONCAT,
@ -606,6 +622,10 @@ struct hb_buffer_t
if (unlikely (start == end))
return;
max_ops -= end - start;
if (unlikely (max_ops < 0))
successful = false;
unsigned cluster_first = infos[start].cluster;
unsigned cluster_last = infos[end - 1].cluster;
@ -614,10 +634,7 @@ struct hb_buffer_t
{
for (unsigned int i = start; i < end; i++)
if (cluster != infos[i].cluster)
{
scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
infos[i].mask |= mask;
}
return;
}
@ -626,18 +643,12 @@ struct hb_buffer_t
if (cluster == cluster_first)
{
for (unsigned int i = end; start < i && infos[i - 1].cluster != cluster_first; i--)
{
scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
infos[i - 1].mask |= mask;
}
}
else /* cluster == cluster_last */
{
for (unsigned int i = start; i < end && infos[i].cluster != cluster_last; i++)
{
scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GLYPH_FLAGS;
infos[i].mask |= mask;
}
}
}
unsigned

View File

@ -64,17 +64,23 @@ template <unsigned int key_bits=16,
struct hb_cache_t
{
using item_t = typename std::conditional<thread_safe,
typename std::conditional<key_bits + value_bits - cache_bits <= 16,
hb_atomic_t<unsigned short>,
hb_atomic_t<unsigned int>>::type,
typename std::conditional<key_bits + value_bits - cache_bits <= 16,
unsigned short,
unsigned int>::type
typename std::conditional<key_bits + value_bits - cache_bits <= 8,
hb_atomic_t<unsigned char>,
typename std::conditional<key_bits + value_bits - cache_bits <= 16,
hb_atomic_t<unsigned short>,
hb_atomic_t<unsigned int>>::type>::type,
typename std::conditional<key_bits + value_bits - cache_bits <= 8,
unsigned char,
typename std::conditional<key_bits + value_bits - cache_bits <= 16,
unsigned short,
unsigned int>::type>::type
>::type;
static_assert ((key_bits >= cache_bits), "");
static_assert ((key_bits + value_bits <= cache_bits + 8 * sizeof (item_t)), "");
static constexpr unsigned MAX_VALUE = (1u << value_bits) - 1;
hb_cache_t () { clear (); }
void clear ()
@ -83,25 +89,32 @@ struct hb_cache_t
v = -1;
}
HB_HOT
bool get (unsigned int key, unsigned int *value) const
{
unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = values[k];
if ((key_bits + value_bits - cache_bits == 8 * sizeof (item_t) && v == (unsigned int) -1) ||
if ((key_bits + value_bits - cache_bits == 8 * sizeof (item_t) && (item_t) v == (item_t) -1) ||
(v >> value_bits) != (key >> cache_bits))
return false;
*value = v & ((1u<<value_bits)-1);
return true;
}
bool set (unsigned int key, unsigned int value)
HB_HOT
void set (unsigned int key, unsigned int value)
{
if (unlikely ((key >> key_bits) || (value >> value_bits)))
return false; /* Overflows */
return; /* Overflows */
set_unchecked (key, value);
}
HB_HOT
void set_unchecked (unsigned int key, unsigned int value)
{
unsigned int k = key & ((1u<<cache_bits)-1);
unsigned int v = ((key>>cache_bits)<<value_bits) | value;
values[k] = v;
return true;
}
private:

View File

@ -72,12 +72,12 @@ struct cff2_cs_interp_env_t : cs_interp_env_t<ELEM, CFF2Subrs>
cff2_cs_interp_env_t (const hb_ubytes_t &str, ACC &acc, unsigned int fd,
const int *coords_=nullptr, unsigned int num_coords_=0)
: SUPER (str, acc.globalSubrs, acc.privateDicts[fd].localSubrs),
cached_scalars_vector (&acc.cached_scalars_vector)
region_count (0), cached_scalars_vector (&acc.cached_scalars_vector)
{
coords = coords_;
num_coords = num_coords_;
varStore = acc.varStore;
do_blend = num_coords && coords && varStore->size;
do_blend = num_coords && varStore->size;
set_ivs (acc.privateDicts[fd].ivs);
}

View File

@ -40,43 +40,6 @@
**/
/* hb_options_t */
hb_atomic_t<unsigned> _hb_options;
void
_hb_options_init ()
{
hb_options_union_t u;
u.i = 0;
u.opts.initialized = true;
const char *c = getenv ("HB_OPTIONS");
if (c)
{
while (*c)
{
const char *p = strchr (c, ':');
if (!p)
p = c + strlen (c);
#define OPTION(name, symbol) \
if (0 == strncmp (c, name, p - c) && strlen (name) == static_cast<size_t>(p - c)) do { u.opts.symbol = true; } while (0)
OPTION ("uniscribe-bug-compatible", uniscribe_bug_compatible);
#undef OPTION
c = *p ? p + 1 : p;
}
}
/* This is idempotent and threadsafe. */
_hb_options = u.i;
}
/* hb_tag_t */
/**
@ -545,8 +508,11 @@ hb_script_to_iso15924_tag (hb_script_t script)
* Fetches the #hb_direction_t of a script when it is
* set horizontally. All right-to-left scripts will return
* #HB_DIRECTION_RTL. All left-to-right scripts will return
* #HB_DIRECTION_LTR. Scripts that can be written either
* horizontally or vertically will return #HB_DIRECTION_INVALID.
* #HB_DIRECTION_LTR.
*
* Scripts that can be written either right-to-left or
* left-to-right will return #HB_DIRECTION_INVALID.
*
* Unknown scripts will return #HB_DIRECTION_LTR.
*
* Return value: The horizontal #hb_direction_t of @script
@ -628,6 +594,9 @@ hb_script_get_horizontal_direction (hb_script_t script)
/* Unicode-16.0 additions */
case HB_SCRIPT_GARAY:
/* Unicode-17.0 additions */
case HB_SCRIPT_SIDETIC:
return HB_DIRECTION_RTL;

View File

@ -38,7 +38,6 @@
#ifndef HB_EXPERIMENTAL_API
#define HB_NO_BEYOND_64K
#define HB_NO_CUBIC_GLYF
#define HB_NO_VAR_COMPOSITES
#endif
#ifdef HB_TINY
@ -91,7 +90,10 @@
#ifdef HB_MINI
#define HB_NO_AAT
#define HB_NO_LEGACY
#define HB_NO_BORING_EXPANSION
#define HB_NO_BEYOND_64K
#define HB_NO_CUBIC_GLYF
#define HB_NO_VAR_COMPOSITES
#define HB_NO_VAR_HVF
#endif
#ifdef __OPTIMIZE_SIZE__
@ -109,12 +111,6 @@
/* Closure of options. */
#ifdef HB_NO_BORING_EXPANSION
#define HB_NO_BEYOND_64K
#define HB_NO_CUBIC_GLYF
#define HB_NO_VAR_COMPOSITES
#endif
#ifdef HB_NO_VAR
#define HB_NO_VAR_COMPOSITES
#endif
@ -149,10 +145,6 @@
#define HB_NO_PAINT
#endif
#ifdef HB_NO_GETENV
#define HB_NO_UNISCRIBE_BUG_COMPATIBLE
#endif
#ifdef HB_NO_LEGACY
#define HB_NO_CMAP_LEGACY_SUBTABLES
#define HB_NO_FALLBACK_SHAPE

View File

@ -37,48 +37,6 @@
#endif
/*
* Global runtime options.
*/
struct hb_options_t
{
bool unused : 1; /* In-case sign bit is here. */
bool initialized : 1;
bool uniscribe_bug_compatible : 1;
};
union hb_options_union_t {
unsigned i;
hb_options_t opts;
};
static_assert ((sizeof (hb_atomic_t<unsigned>) >= sizeof (hb_options_union_t)), "");
HB_INTERNAL void
_hb_options_init ();
extern HB_INTERNAL hb_atomic_t<unsigned> _hb_options;
static inline hb_options_t
hb_options ()
{
#ifdef HB_NO_GETENV
return hb_options_t ();
#endif
/* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u;
u.i = _hb_options;
if (unlikely (!u.i))
{
_hb_options_init ();
u.i = _hb_options;
}
return u.opts;
}
/*
* Debug output (needs enabling at compile time.)
*/
@ -394,6 +352,10 @@ struct hb_no_trace_t {
#define HB_DEBUG_WASM (HB_DEBUG+0)
#endif
#ifndef HB_DEBUG_KBTS
#define HB_DEBUG_KBTS (HB_DEBUG+0)
#endif
/*
* With tracing.
*/
@ -484,7 +446,7 @@ struct hb_no_trace_t {
#ifndef HB_BUFFER_MESSAGE_MORE
#define HB_BUFFER_MESSAGE_MORE (HB_DEBUG+1)
#define HB_BUFFER_MESSAGE_MORE (HB_DEBUG+0)
#endif

View File

@ -287,7 +287,7 @@ typedef void (*hb_font_get_glyph_shape_func_t) (hb_font_t *font, void *font_data
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* Since: 7.0.0
* XDeprecated: REPLACEME: Use hb_font_draw_glyph_func_or_fail_t instead.
* Deprecated: 11.2.0: Use hb_font_draw_glyph_func_or_fail_t instead.
**/
typedef void (*hb_font_draw_glyph_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
@ -308,7 +308,7 @@ typedef void (*hb_font_draw_glyph_func_t) (hb_font_t *font, void *font_data,
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* Since: 7.0.0
* XDeprecated: REPLACEME: Use hb_font_paint_glyph_or_fail_func_t instead.
* Deprecated: 11.2.0: Use hb_font_paint_glyph_or_fail_func_t instead.
*/
typedef hb_bool_t (*hb_font_paint_glyph_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
@ -346,7 +346,7 @@ hb_font_funcs_set_glyph_shape_func (hb_font_funcs_t *ffuncs,
* Sets the implementation function for #hb_font_draw_glyph_func_t.
*
* Since: 7.0.0
* XDeprecated: REPLACEME: Use hb_font_funcs_set_draw_glyph_or_fail_func instead.
* Deprecated: 11.2.0: Use hb_font_funcs_set_draw_glyph_or_fail_func instead.
**/
HB_DEPRECATED_FOR (hb_font_funcs_set_draw_glyph_or_fail_func)
HB_EXTERN void
@ -364,7 +364,7 @@ hb_font_funcs_set_draw_glyph_func (hb_font_funcs_t *ffuncs,
* Sets the implementation function for #hb_font_paint_glyph_func_t.
*
* Since: 7.0.0
* XDeprecated: REPLACEME: Use hb_font_funcs_set_paint_glyph_or_fail_func() instead.
* Deprecated: 11.2.0: Use hb_font_funcs_set_paint_glyph_or_fail_func() instead.
*/
HB_DEPRECATED_FOR (hb_font_funcs_set_paint_glyph_or_fail_func)
HB_EXTERN void

View File

@ -63,14 +63,14 @@ hb_draw_quadratic_to_nil (hb_draw_funcs_t *dfuncs, void *draw_data,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
#define HB_ONE_THIRD 0.33333333f
#define HB_TWO_THIRD 0.66666666666666666666666667f
dfuncs->emit_cubic_to (draw_data, *st,
(st->current_x + 2.f * control_x) * HB_ONE_THIRD,
(st->current_y + 2.f * control_y) * HB_ONE_THIRD,
(to_x + 2.f * control_x) * HB_ONE_THIRD,
(to_y + 2.f * control_y) * HB_ONE_THIRD,
st->current_x + (control_x - st->current_x) * HB_TWO_THIRD,
st->current_y + (control_y - st->current_y) * HB_TWO_THIRD,
to_x + (control_x - to_x) * HB_TWO_THIRD,
to_y + (control_y - to_y) * HB_TWO_THIRD,
to_x, to_y);
#undef HB_ONE_THIRD
#undef HB_TWO_THIRD
}
static void
@ -467,7 +467,7 @@ hb_draw_extents_move_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_extents_t *extents = (hb_extents_t *) data;
hb_extents_t<> *extents = (hb_extents_t<> *) data;
extents->add_point (to_x, to_y);
}
@ -479,7 +479,7 @@ hb_draw_extents_line_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_extents_t *extents = (hb_extents_t *) data;
hb_extents_t<> *extents = (hb_extents_t<> *) data;
extents->add_point (to_x, to_y);
}
@ -492,7 +492,7 @@ hb_draw_extents_quadratic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_extents_t *extents = (hb_extents_t *) data;
hb_extents_t<> *extents = (hb_extents_t<> *) data;
extents->add_point (control_x, control_y);
extents->add_point (to_x, to_y);
@ -507,7 +507,7 @@ hb_draw_extents_cubic_to (hb_draw_funcs_t *dfuncs HB_UNUSED,
float to_x, float to_y,
void *user_data HB_UNUSED)
{
hb_extents_t *extents = (hb_extents_t *) data;
hb_extents_t<> *extents = (hb_extents_t<> *) data;
extents->add_point (control1_x, control1_y);
extents->add_point (control2_x, control2_y);

View File

@ -169,8 +169,7 @@ _hb_face_builder_get_table_tags (const hb_face_t *face HB_UNUSED,
if (unlikely (start_offset >= population))
{
if (table_count)
*table_count = 0;
*table_count = 0;
return population;
}

View File

@ -84,8 +84,7 @@ hb_face_count (hb_blob_t *blob)
hb_sanitize_context_t c (blob);
const char *start = hb_blob_get_data (blob, nullptr);
auto *ot = reinterpret_cast<OT::OpenTypeFontFile *> (const_cast<char *> (start));
auto *ot = blob->as<OT::OpenTypeFontFile> ();
if (unlikely (!ot->sanitize (&c)))
return 0;
@ -329,7 +328,7 @@ hb_face_create_from_file_or_fail (const char *file_name,
return face;
}
static struct supported_face_loaders_t {
static const struct supported_face_loaders_t {
char name[16];
hb_face_t * (*from_file) (const char *font_file, unsigned face_index);
hb_face_t * (*from_blob) (hb_blob_t *blob, unsigned face_index);

View File

@ -246,7 +246,6 @@ hb_font_get_glyph_v_advance_nil (hb_font_t *font,
hb_codepoint_t glyph HB_UNUSED,
void *user_data HB_UNUSED)
{
/* TODO use font_extents.ascender+descender */
return -font->y_scale;
}
@ -352,6 +351,10 @@ hb_font_get_glyph_h_origin_default (hb_font_t *font,
hb_position_t *y,
void *user_data HB_UNUSED)
{
if (font->has_glyph_h_origins_func_set ())
{
return font->get_glyph_h_origins (1, &glyph, 0, x, 0, y, 0, false);
}
hb_bool_t ret = font->parent->get_glyph_h_origin (glyph, x, y);
if (ret)
font->parent_scale_position (x, y);
@ -366,7 +369,6 @@ hb_font_get_glyph_v_origin_nil (hb_font_t *font HB_UNUSED,
hb_position_t *y,
void *user_data HB_UNUSED)
{
*x = *y = 0;
return false;
}
@ -378,12 +380,100 @@ hb_font_get_glyph_v_origin_default (hb_font_t *font,
hb_position_t *y,
void *user_data HB_UNUSED)
{
if (font->has_glyph_v_origins_func_set ())
{
return font->get_glyph_v_origins (1, &glyph, 0, x, 0, y, 0, false);
}
hb_bool_t ret = font->parent->get_glyph_v_origin (glyph, x, y);
if (ret)
font->parent_scale_position (x, y);
return ret;
}
#define hb_font_get_glyph_h_origins_nil hb_font_get_glyph_h_origins_default
static hb_bool_t
hb_font_get_glyph_h_origins_default (hb_font_t *font HB_UNUSED,
void *font_data HB_UNUSED,
unsigned int count,
const hb_codepoint_t *first_glyph HB_UNUSED,
unsigned glyph_stride HB_UNUSED,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride,
void *user_data HB_UNUSED)
{
if (font->has_glyph_h_origin_func_set ())
{
for (unsigned int i = 0; i < count; i++)
{
font->get_glyph_h_origin (*first_glyph, first_x, first_y, false);
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
return true;
}
hb_bool_t ret = font->parent->get_glyph_h_origins (count,
first_glyph, glyph_stride,
first_x, x_stride,
first_y, y_stride);
if (ret)
{
for (unsigned i = 0; i < count; i++)
{
font->parent_scale_position (first_x, first_y);
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
return ret;
}
#define hb_font_get_glyph_v_origins_nil hb_font_get_glyph_v_origins_default
static hb_bool_t
hb_font_get_glyph_v_origins_default (hb_font_t *font HB_UNUSED,
void *font_data HB_UNUSED,
unsigned int count,
const hb_codepoint_t *first_glyph HB_UNUSED,
unsigned glyph_stride HB_UNUSED,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride,
void *user_data HB_UNUSED)
{
if (font->has_glyph_v_origin_func_set ())
{
for (unsigned int i = 0; i < count; i++)
{
font->get_glyph_v_origin (*first_glyph, first_x, first_y, false);
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
return true;
}
hb_bool_t ret = font->parent->get_glyph_v_origins (count,
first_glyph, glyph_stride,
first_x, x_stride,
first_y, y_stride);
if (ret)
{
for (unsigned i = 0; i < count; i++)
{
font->parent_scale_position (first_x, first_y);
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
return ret;
}
static hb_position_t
hb_font_get_glyph_h_kerning_nil (hb_font_t *font HB_UNUSED,
void *font_data HB_UNUSED,
@ -1256,6 +1346,77 @@ hb_font_get_glyph_v_origin (hb_font_t *font,
return font->get_glyph_v_origin (glyph, x, y);
}
/**
* hb_font_get_glyph_h_origins:
* @font: #hb_font_t to work upon
* @count: The number of glyph IDs in the sequence queried
* @first_glyph: The first glyph ID to query
* @glyph_stride: The stride between successive glyph IDs
* @first_x: (out): The first X coordinate of the origin retrieved
* @x_stride: The stride between successive X coordinates
* @first_y: (out): The first Y coordinate of the origin retrieved
* @y_stride: The stride between successive Y coordinates
*
* Fetches the (X,Y) coordinates of the origin for requested glyph IDs
* in the specified font, for horizontal text segments.
*
* Return value: `true` if data found, `false` otherwise
*
* Since: 11.3.0
**/
hb_bool_t
hb_font_get_glyph_h_origins (hb_font_t *font,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned int glyph_stride,
hb_position_t *first_x,
unsigned int x_stride,
hb_position_t *first_y,
unsigned int y_stride)
{
return font->get_glyph_h_origins (count,
first_glyph, glyph_stride,
first_x, x_stride,
first_y, y_stride);
}
/**
* hb_font_get_glyph_v_origins:
* @font: #hb_font_t to work upon
* @count: The number of glyph IDs in the sequence queried
* @first_glyph: The first glyph ID to query
* @glyph_stride: The stride between successive glyph IDs
* @first_x: (out): The first X coordinate of the origin retrieved
* @x_stride: The stride between successive X coordinates
* @first_y: (out): The first Y coordinate of the origin retrieved
* @y_stride: The stride between successive Y coordinates
*
* Fetches the (X,Y) coordinates of the origin for requested glyph IDs
* in the specified font, for vertical text segments.
*
* Return value: `true` if data found, `false` otherwise
*
* Since: 11.3.0
**/
hb_bool_t
hb_font_get_glyph_v_origins (hb_font_t *font,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned int glyph_stride,
hb_position_t *first_x,
unsigned int x_stride,
hb_position_t *first_y,
unsigned int y_stride)
{
return font->get_glyph_v_origins (count,
first_glyph, glyph_stride,
first_x, x_stride,
first_y, y_stride);
}
/**
* hb_font_get_glyph_h_kerning:
* @font: #hb_font_t to work upon
@ -1443,7 +1604,7 @@ hb_font_get_glyph_shape (hb_font_t *font,
*
* Return value: `true` if glyph was drawn, `false` otherwise
*
* XSince: REPLACEME
* Since: 11.2.0
**/
hb_bool_t
hb_font_draw_glyph_or_fail (hb_font_t *font,
@ -1480,7 +1641,7 @@ hb_font_draw_glyph_or_fail (hb_font_t *font,
*
* Return value: `true` if glyph was painted, `false` otherwise
*
* XSince: REPLACEME
* Since: 11.2.0
*/
hb_bool_t
hb_font_paint_glyph_or_fail (hb_font_t *font,
@ -1883,6 +2044,7 @@ DEFINE_NULL_INSTANCE (hb_font_t) =
1000, /* x_scale */
1000, /* y_scale */
false, /* is_synthetic */
0.f, /* x_embolden */
0.f, /* y_embolden */
true, /* embolden_in_place */
@ -1900,6 +2062,7 @@ DEFINE_NULL_INSTANCE (hb_font_t) =
0, /* ptem */
HB_FONT_NO_VAR_NAMED_INSTANCE, /* instance_index */
false, /* has_nonzero_coords */
0, /* num_coords */
nullptr, /* coords */
nullptr, /* design_coords */
@ -1960,8 +2123,14 @@ hb_font_create (hb_face_t *face)
hb_font_set_funcs_using (font, nullptr);
#ifndef HB_NO_VAR
if (face && face->index >> 16)
hb_font_set_var_named_instance (font, (face->index >> 16) - 1);
// Initialize variations.
if (likely (face))
{
if (face->index >> 16)
hb_font_set_var_named_instance (font, (face->index >> 16) - 1);
else
hb_font_set_variations (font, nullptr, 0);
}
#endif
return font;
@ -1979,6 +2148,7 @@ _hb_font_adopt_var_coords (hb_font_t *font,
font->coords = coords;
font->design_coords = design_coords;
font->num_coords = coords_length;
font->has_nonzero_coords = hb_any (hb_array (coords, coords_length));
font->changed ();
font->serial_coords = font->serial;
@ -2393,7 +2563,7 @@ hb_font_set_funcs_data (hb_font_t *font,
font->changed ();
}
static struct supported_font_funcs_t {
static const struct supported_font_funcs_t {
char name[16];
void (*func) (hb_font_t *);
} supported_font_funcs[] =
@ -2450,6 +2620,9 @@ hb_bool_t
hb_font_set_funcs_using (hb_font_t *font,
const char *name)
{
if (unlikely (hb_object_is_immutable (font)))
return false;
bool retry = false;
if (!name || !*name)
@ -2704,12 +2877,12 @@ hb_font_get_ptem (hb_font_t *font)
*
* Return value: `true` if the font is synthetic, `false` otherwise.
*
* XSince: REPLACEME
* Since: 11.2.0
*/
hb_bool_t
hb_font_is_synthetic (hb_font_t *font)
{
return font->is_synthetic ();
return font->is_synthetic;
}
/**
@ -2858,12 +3031,6 @@ hb_font_set_variations (hb_font_t *font,
if (hb_object_is_immutable (font))
return;
if (!variations_length && font->instance_index == HB_FONT_NO_VAR_NAMED_INSTANCE)
{
hb_font_set_var_coords_normalized (font, nullptr, 0);
return;
}
const OT::fvar &fvar = *font->face->table.fvar;
auto axes = fvar.get_axes ();
const unsigned coords_length = axes.length;
@ -2970,7 +3137,6 @@ hb_font_set_variation (hb_font_t *font,
hb_ot_var_normalize_coords (font->face, coords_length, design_coords, normalized);
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
}
/**
@ -2991,11 +3157,16 @@ hb_font_set_variation (hb_font_t *font,
void
hb_font_set_var_coords_design (hb_font_t *font,
const float *coords,
unsigned int coords_length)
unsigned int input_coords_length)
{
if (hb_object_is_immutable (font))
return;
const OT::fvar &fvar = *font->face->table.fvar;
auto axes = fvar.get_axes ();
const unsigned coords_length = axes.length;
input_coords_length = hb_min (input_coords_length, coords_length);
int *normalized = coords_length ? (int *) hb_calloc (coords_length, sizeof (int)) : nullptr;
float *design_coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (float)) : nullptr;
@ -3006,8 +3177,11 @@ hb_font_set_var_coords_design (hb_font_t *font,
return;
}
if (coords_length)
hb_memcpy (design_coords, coords, coords_length * sizeof (font->design_coords[0]));
if (input_coords_length)
hb_memcpy (design_coords, coords, input_coords_length * sizeof (font->design_coords[0]));
// Fill in the rest with default values
for (unsigned int i = input_coords_length; i < coords_length; i++)
design_coords[i] = axes[i].get_default ();
hb_ot_var_normalize_coords (font->face, coords_length, coords, normalized);
_hb_font_adopt_var_coords (font, normalized, design_coords, coords_length);
@ -3072,34 +3246,31 @@ hb_font_get_var_named_instance (hb_font_t *font)
void
hb_font_set_var_coords_normalized (hb_font_t *font,
const int *coords, /* 2.14 normalized */
unsigned int coords_length)
unsigned int input_coords_length)
{
if (hb_object_is_immutable (font))
return;
const OT::fvar &fvar = *font->face->table.fvar;
auto axes = fvar.get_axes ();
unsigned coords_length = axes.length;
input_coords_length = hb_min (input_coords_length, coords_length);
int *copy = coords_length ? (int *) hb_calloc (coords_length, sizeof (coords[0])) : nullptr;
int *unmapped = coords_length ? (int *) hb_calloc (coords_length, sizeof (coords[0])) : nullptr;
float *design_coords = coords_length ? (float *) hb_calloc (coords_length, sizeof (design_coords[0])) : nullptr;
if (unlikely (coords_length && !(copy && unmapped && design_coords)))
if (unlikely (coords_length && !(copy && design_coords)))
{
hb_free (copy);
hb_free (unmapped);
hb_free (design_coords);
return;
}
if (coords_length)
{
hb_memcpy (copy, coords, coords_length * sizeof (coords[0]));
hb_memcpy (unmapped, coords, coords_length * sizeof (coords[0]));
}
if (input_coords_length)
hb_memcpy (copy, coords, input_coords_length * sizeof (coords[0]));
/* Best effort design coords simulation */
font->face->table.avar->unmap_coords (unmapped, coords_length);
for (unsigned int i = 0; i < coords_length; ++i)
design_coords[i] = font->face->table.fvar->unnormalize_axis_value (i, unmapped[i]);
hb_free (unmapped);
design_coords[i] = NAN;
_hb_font_adopt_var_coords (font, copy, design_coords, coords_length);
}
@ -3112,8 +3283,8 @@ hb_font_set_var_coords_normalized (hb_font_t *font,
* Fetches the list of normalized variation coordinates currently
* set on a font.
*
* Note that this returned array may only contain values for some
* (or none) of the axes; omitted axes effectively have zero values.
* <note>Note that if no variation coordinates are set, this function may
* return %NULL.</note>
*
* Return value is valid as long as variation coordinates of the font
* are not modified.
@ -3140,9 +3311,12 @@ hb_font_get_var_coords_normalized (hb_font_t *font,
* Fetches the list of variation coordinates (in design-space units) currently
* set on a font.
*
* Note that this returned array may only contain values for some
* (or none) of the axes; omitted axes effectively have their default
* values.
* <note>Note that if no variation coordinates are set, this function may
* return %NULL.</note>
*
* <note>If variations have been set on the font using normalized coordinates
* (i.e. via hb_font_set_var_coords_normalized()), the design coordinates will
* have NaN (Not a Number) values.</note>
*
* Return value is valid as long as variation coordinates of the font
* are not modified.

View File

@ -97,7 +97,7 @@ hb_font_funcs_is_immutable (hb_font_funcs_t *ffuncs);
* @descender: The depth of typographic descenders.
* @line_gap: The suggested line-spacing gap.
*
* Font-wide extent values, measured in font units.
* Font-wide extent values, measured in scaled units.
*
* Note that typically @ascender is positive and @descender
* negative, in coordinate systems that grow up.
@ -332,7 +332,7 @@ typedef hb_font_get_glyph_advances_func_t hb_font_get_glyph_v_advances_func_t;
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in font units) of the
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for a glyph. Each coordinate must be returned in an #hb_position_t
* output parameter.
*
@ -349,7 +349,7 @@ typedef hb_bool_t (*hb_font_get_glyph_origin_func_t) (hb_font_t *font, void *fon
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in font units) of the
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for a glyph, for horizontal-direction text segments. Each
* coordinate must be returned in an #hb_position_t output parameter.
*
@ -361,13 +361,72 @@ typedef hb_font_get_glyph_origin_func_t hb_font_get_glyph_h_origin_func_t;
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in font units) of the
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for a glyph, for vertical-direction text segments. Each coordinate
* must be returned in an #hb_position_t output parameter.
*
**/
typedef hb_font_get_glyph_origin_func_t hb_font_get_glyph_v_origin_func_t;
/**
* hb_font_get_glyph_origins_func_t:
* @font: #hb_font_t to work upon
* @font_data: @font user data pointer
* @first_glyph: The first glyph ID to query
* @count: number of glyphs to query
* @glyph_stride: The stride between successive glyph IDs
* @first_x: (out): The first origin X coordinate retrieved
* @x_stride: The stride between successive origin X coordinates
* @first_y: (out): The first origin Y coordinate retrieved
* @y_stride: The stride between successive origin Y coordinates
* @user_data: User data pointer passed by the caller
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for each requested glyph. Each coordinate value must be returned in
* an #hb_position_t in the two output parameters.
*
* Return value: `true` if data found, `false` otherwise
*
* Since: 11.3.0
**/
typedef hb_bool_t (*hb_font_get_glyph_origins_func_t) (hb_font_t *font, void *font_data,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned glyph_stride,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride,
void *user_data);
/**
* hb_font_get_glyph_h_origins_func_t:
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for requested glyph, for horizontal-direction text segments. Each
* coordinate must be returned in a the x/y #hb_position_t output parameters.
*
* Since: 11.3.0
**/
typedef hb_font_get_glyph_origins_func_t hb_font_get_glyph_h_origins_func_t;
/**
* hb_font_get_glyph_v_origins_func_t:
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in scaled units) of the
* origin for requested glyph, for vertical-direction text segments. Each
* coordinate must be returned in a the x/y #hb_position_t output parameters.
*
* Since: 11.3.0
**/
typedef hb_font_get_glyph_origins_func_t hb_font_get_glyph_v_origins_func_t;
/**
* hb_font_get_glyph_kerning_func_t:
* @font: #hb_font_t to work upon
@ -428,7 +487,7 @@ typedef hb_bool_t (*hb_font_get_glyph_extents_func_t) (hb_font_t *font, void *fo
*
* A virtual method for the #hb_font_funcs_t of an #hb_font_t object.
*
* This method should retrieve the (X,Y) coordinates (in font units) for a
* This method should retrieve the (X,Y) coordinates (in scaled units) for a
* specified contour point in a glyph. Each coordinate must be returned as
* an #hb_position_t output parameter.
*
@ -498,7 +557,7 @@ typedef hb_bool_t (*hb_font_get_glyph_from_name_func_t) (hb_font_t *font, void *
*
* Return value: `true` if glyph was drawn, `false` otherwise
*
* XSince: REPLACEME
* Since: 11.2.0
**/
typedef hb_bool_t (*hb_font_draw_glyph_or_fail_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
@ -520,7 +579,7 @@ typedef hb_bool_t (*hb_font_draw_glyph_or_fail_func_t) (hb_font_t *font, void *f
*
* Return value: `true` if glyph was painted, `false` otherwise
*
* XSince: REPLACEME
* Since: 11.2.0
*/
typedef hb_bool_t (*hb_font_paint_glyph_or_fail_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t glyph,
@ -707,6 +766,38 @@ hb_font_funcs_set_glyph_v_origin_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_v_origin_func_t func,
void *user_data, hb_destroy_func_t destroy);
/**
* hb_font_funcs_set_glyph_h_origins_func:
* @ffuncs: A font-function structure
* @func: (closure user_data) (destroy destroy) (scope notified): The callback function to assign
* @user_data: Data to pass to @func
* @destroy: (nullable): The function to call when @user_data is not needed anymore
*
* Sets the implementation function for #hb_font_get_glyph_h_origins_func_t.
*
* Since: 11.3.0
**/
HB_EXTERN void
hb_font_funcs_set_glyph_h_origins_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_h_origins_func_t func,
void *user_data, hb_destroy_func_t destroy);
/**
* hb_font_funcs_set_glyph_v_origins_func:
* @ffuncs: A font-function structure
* @func: (closure user_data) (destroy destroy) (scope notified): The callback function to assign
* @user_data: Data to pass to @func
* @destroy: (nullable): The function to call when @user_data is not needed anymore
*
* Sets the implementation function for #hb_font_get_glyph_v_origins_func_t.
*
* Since: 11.3.0
**/
HB_EXTERN void
hb_font_funcs_set_glyph_v_origins_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_v_origins_func_t func,
void *user_data, hb_destroy_func_t destroy);
/**
* hb_font_funcs_set_glyph_h_kerning_func:
* @ffuncs: A font-function structure
@ -796,7 +887,7 @@ hb_font_funcs_set_glyph_from_name_func (hb_font_funcs_t *ffuncs,
*
* Sets the implementation function for #hb_font_draw_glyph_or_fail_func_t.
*
* XSince: REPLACEME
* Since: 11.2.0
**/
HB_EXTERN void
hb_font_funcs_set_draw_glyph_or_fail_func (hb_font_funcs_t *ffuncs,
@ -812,7 +903,7 @@ hb_font_funcs_set_draw_glyph_or_fail_func (hb_font_funcs_t *ffuncs,
*
* Sets the implementation function for #hb_font_paint_glyph_or_fail_func_t.
*
* XSince: REPLACEME
* Since: 11.2.0
*/
HB_EXTERN void
hb_font_funcs_set_paint_glyph_or_fail_func (hb_font_funcs_t *ffuncs,
@ -876,6 +967,26 @@ hb_font_get_glyph_v_origin (hb_font_t *font,
hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y);
HB_EXTERN hb_bool_t
hb_font_get_glyph_h_origins (hb_font_t *font,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned glyph_stride,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride);
HB_EXTERN hb_bool_t
hb_font_get_glyph_v_origins (hb_font_t *font,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned glyph_stride,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride);
HB_EXTERN hb_position_t
hb_font_get_glyph_h_kerning (hb_font_t *font,
hb_codepoint_t left_glyph, hb_codepoint_t right_glyph);

View File

@ -55,6 +55,8 @@
HB_FONT_FUNC_IMPLEMENT (get_,glyph_v_advances) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_h_origin) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_v_origin) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_h_origins) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_v_origins) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_h_kerning) \
HB_IF_NOT_DEPRECATED (HB_FONT_FUNC_IMPLEMENT (get_,glyph_v_kerning)) \
HB_FONT_FUNC_IMPLEMENT (get_,glyph_extents) \
@ -118,6 +120,8 @@ struct hb_font_t
int32_t x_scale;
int32_t y_scale;
bool is_synthetic;
float x_embolden;
float y_embolden;
bool embolden_in_place;
@ -139,6 +143,7 @@ struct hb_font_t
/* Font variation coordinates. */
unsigned int instance_index;
bool has_nonzero_coords;
unsigned int num_coords;
int *coords;
float *design_coords;
@ -430,21 +435,127 @@ struct hb_font_t
}
hb_bool_t get_glyph_h_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
hb_position_t *x, hb_position_t *y,
bool synthetic = true)
{
*x = *y = 0;
return klass->get.f.glyph_h_origin (this, user_data,
glyph, x, y,
!klass->user_data ? nullptr : klass->user_data->glyph_h_origin);
bool ret = klass->get.f.glyph_h_origin (this, user_data,
glyph, x, y,
!klass->user_data ? nullptr : klass->user_data->glyph_h_origin);
if (synthetic && ret)
{
/* Slant is ignored as it does not affect glyph origin */
/* Embolden */
if (!embolden_in_place)
{
*x += x_scale < 0 ? -x_strength : x_strength;
*y += y_scale < 0 ? -y_strength : y_strength;
}
}
return ret;
}
hb_bool_t get_glyph_v_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
hb_position_t *x, hb_position_t *y,
bool synthetic = true)
{
*x = *y = 0;
return klass->get.f.glyph_v_origin (this, user_data,
glyph, x, y,
!klass->user_data ? nullptr : klass->user_data->glyph_v_origin);
bool ret = klass->get.f.glyph_v_origin (this, user_data,
glyph, x, y,
!klass->user_data ? nullptr : klass->user_data->glyph_v_origin);
if (synthetic && ret)
{
/* Slant is ignored as it does not affect glyph origin */
/* Embolden */
if (!embolden_in_place)
{
*x += x_scale < 0 ? -x_strength : x_strength;
*y += y_scale < 0 ? -y_strength : y_strength;
}
}
return ret;
}
hb_bool_t get_glyph_h_origins (unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned int glyph_stride,
hb_position_t *first_x,
unsigned int x_stride,
hb_position_t *first_y,
unsigned int y_stride,
bool synthetic = true)
{
bool ret = klass->get.f.glyph_h_origins (this, user_data,
count,
first_glyph, glyph_stride,
first_x, x_stride, first_y, y_stride,
!klass->user_data ? nullptr : klass->user_data->glyph_h_origins);
if (synthetic && ret)
{
hb_position_t x_shift = x_scale < 0 ? -x_strength : x_strength;
hb_position_t y_shift = y_scale < 0 ? -y_strength : y_strength;
for (unsigned i = 0; i < count; i++)
{
/* Slant is ignored as it does not affect glyph origin */
/* Embolden */
if (!embolden_in_place)
{
*first_x += x_shift;
*first_y += y_shift;
}
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
return ret;
}
hb_bool_t get_glyph_v_origins (unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned int glyph_stride,
hb_position_t *first_x,
unsigned int x_stride,
hb_position_t *first_y,
unsigned int y_stride,
bool synthetic = true)
{
bool ret = klass->get.f.glyph_v_origins (this, user_data,
count,
first_glyph, glyph_stride,
first_x, x_stride, first_y, y_stride,
!klass->user_data ? nullptr : klass->user_data->glyph_v_origins);
if (synthetic && is_synthetic && ret)
{
hb_position_t x_shift = x_scale < 0 ? -x_strength : x_strength;
hb_position_t y_shift = y_scale < 0 ? -y_strength : y_strength;
for (unsigned i = 0; i < count; i++)
{
/* Slant is ignored as it does not affect glyph origin */
/* Embolden */
if (!embolden_in_place)
{
*first_x += x_shift;
*first_y += y_shift;
}
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
return ret;
}
hb_position_t get_glyph_h_kerning (hb_codepoint_t left_glyph,
@ -486,7 +597,7 @@ struct hb_font_t
extents,
!klass->user_data ? nullptr : klass->user_data->glyph_extents);
}
if (!is_synthetic () &&
if (!is_synthetic &&
klass->get.f.glyph_extents (this, user_data,
glyph,
extents,
@ -496,6 +607,7 @@ struct hb_font_t
/* Try getting extents from paint(), then draw(), *then* get_extents()
* and apply synthetic settings in the last case. */
#ifndef HB_NO_PAINT
hb_paint_extents_context_t paint_extents;
if (paint_glyph_or_fail (glyph,
hb_paint_extents_get_funcs (), &paint_extents,
@ -504,14 +616,17 @@ struct hb_font_t
*extents = paint_extents.get_extents ().to_glyph_extents ();
return true;
}
#endif
hb_extents_t draw_extents;
#ifndef HB_NO_DRAW
hb_extents_t<> draw_extents;
if (draw_glyph_or_fail (glyph,
hb_draw_extents_get_funcs (), &draw_extents))
{
*extents = draw_extents.to_glyph_extents ();
return true;
}
#endif
bool ret = klass->get.f.glyph_extents (this, user_data,
glyph,
@ -575,6 +690,7 @@ struct hb_font_t
hb_draw_funcs_t *draw_funcs, void *draw_data,
bool synthetic = true)
{
#ifndef HB_NO_DRAW
#ifndef HB_NO_OUTLINE
bool embolden = x_strength || y_strength;
bool slanted = slant_xy;
@ -603,7 +719,13 @@ struct hb_font_t
// Slant before embolden; produces nicer results.
if (slanted)
{
hb_position_t xo = 0, yo = 0;
get_glyph_h_origin (glyph, &xo, &yo, false);
outline.translate (-xo, -yo);
outline.slant (slant_xy);
outline.translate (xo, yo);
}
if (embolden)
{
@ -618,6 +740,8 @@ struct hb_font_t
return true;
#endif
#endif
return false;
}
bool paint_glyph_or_fail (hb_codepoint_t glyph,
@ -626,6 +750,7 @@ struct hb_font_t
hb_color_t foreground,
bool synthetic = true)
{
#ifndef HB_NO_PAINT
/* Slant */
if (synthetic && slant_xy)
hb_paint_push_transform (paint_funcs, paint_data,
@ -643,6 +768,8 @@ struct hb_font_t
hb_paint_pop_transform (paint_funcs, paint_data);
return ret;
#endif
return false;
}
/* A bit higher-level, and with fallback */
@ -704,6 +831,28 @@ struct hb_font_t
get_glyph_v_advances (count, first_glyph, glyph_stride, first_advance, advance_stride);
}
void apply_offset (hb_position_t *x, hb_position_t *y,
hb_position_t dx, hb_position_t dy,
signed mult)
{
assert (mult == -1 || mult == +1);
*x += dx * mult;
*y += dy * mult;
}
void add_offset (hb_position_t *x, hb_position_t *y,
hb_position_t dx, hb_position_t dy)
{
*x += dx;
*y += dy;
}
void subtract_offset (hb_position_t *x, hb_position_t *y,
hb_position_t dx, hb_position_t dy)
{
*x -= dx;
*y -= dy;
}
void guess_v_origin_minus_h_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
{
@ -714,6 +863,141 @@ struct hb_font_t
*y = extents.ascender;
}
void apply_glyph_h_origins_with_fallback (hb_buffer_t *buf, int mult)
{
bool has_ascender = false;
hb_position_t ascender = 0;
struct { hb_position_t x, y; } origins[32];
unsigned int offset = 0;
unsigned int count = buf->len;
while (offset < count)
{
unsigned n = hb_min (count - offset, ARRAY_LENGTH (origins));
if (!get_glyph_h_origins (n,
&buf->info[offset].codepoint, sizeof (hb_glyph_info_t),
&origins[0].x, sizeof (origins[0]),
&origins[0].y, sizeof (origins[0])))
{
if (get_glyph_v_origins (n,
&buf->info[offset].codepoint, sizeof (hb_glyph_info_t),
&origins[0].x, sizeof (origins[0]),
&origins[0].y, sizeof (origins[0])))
{
if (!has_ascender)
{
hb_font_extents_t extents;
get_h_extents_with_fallback (&extents);
ascender = extents.ascender;
has_ascender = true;
}
/* We got the v_origins, adjust them to h_origins. */
for (unsigned j = 0; j < n; j++)
{
hb_codepoint_t glyph = buf->info[offset + j].codepoint;
origins[j].x -= get_glyph_h_advance (glyph) / 2;
origins[j].y -= ascender;
}
}
else
{
for (unsigned j = 0; j < n; j++)
{
origins[j].x = 0;
origins[j].y = 0;
}
}
}
assert (mult == -1 || mult == +1);
if (mult == +1)
for (unsigned j = 0; j < n; j++)
{
hb_glyph_position_t *pos = &buf->pos[offset + j];
add_offset (&pos->x_offset, &pos->y_offset,
origins[j].x, origins[j].y);
}
else /* mult == -1 */
for (unsigned j = 0; j < n; j++)
{
hb_glyph_position_t *pos = &buf->pos[offset + j];
subtract_offset (&pos->x_offset, &pos->y_offset,
origins[j].x, origins[j].y);
}
offset += n;
}
}
void apply_glyph_v_origins_with_fallback (hb_buffer_t *buf, int mult)
{
bool has_ascender = false;
hb_position_t ascender = 0;
struct { hb_position_t x, y; } origins[32];
unsigned int offset = 0;
unsigned int count = buf->len;
while (offset < count)
{
unsigned n = hb_min (count - offset, ARRAY_LENGTH (origins));
if (!get_glyph_v_origins (n,
&buf->info[offset].codepoint, sizeof (hb_glyph_info_t),
&origins[0].x, sizeof (origins[0]),
&origins[0].y, sizeof (origins[0])))
{
if (get_glyph_h_origins (n,
&buf->info[offset].codepoint, sizeof (hb_glyph_info_t),
&origins[0].x, sizeof (origins[0]),
&origins[0].y, sizeof (origins[0])))
{
if (!has_ascender)
{
hb_font_extents_t extents;
get_h_extents_with_fallback (&extents);
ascender = extents.ascender;
has_ascender = true;
}
/* We got the h_origins, adjust them to v_origins. */
for (unsigned j = 0; j < n; j++)
{
hb_codepoint_t glyph = buf->info[offset + j].codepoint;
origins[j].x += get_glyph_h_advance (glyph) / 2;
origins[j].y += ascender;
}
}
else
{
for (unsigned j = 0; j < n; j++)
{
origins[j].x = 0;
origins[j].y = 0;
}
}
}
assert (mult == -1 || mult == +1);
if (mult == +1)
for (unsigned j = 0; j < n; j++)
{
hb_glyph_position_t *pos = &buf->pos[offset + j];
add_offset (&pos->x_offset, &pos->y_offset,
origins[j].x, origins[j].y);
}
else /* mult == -1 */
for (unsigned j = 0; j < n; j++)
{
hb_glyph_position_t *pos = &buf->pos[offset + j];
subtract_offset (&pos->x_offset, &pos->y_offset,
origins[j].x, origins[j].y);
}
offset += n;
}
}
void get_glyph_h_origin_with_fallback (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
{
@ -722,7 +1006,7 @@ struct hb_font_t
{
hb_position_t dx, dy;
guess_v_origin_minus_h_origin (glyph, &dx, &dy);
*x -= dx; *y -= dy;
subtract_offset (x, y, dx, dy);
}
}
void get_glyph_v_origin_with_fallback (hb_codepoint_t glyph,
@ -733,7 +1017,7 @@ struct hb_font_t
{
hb_position_t dx, dy;
guess_v_origin_minus_h_origin (glyph, &dx, &dy);
*x += dx; *y += dy;
add_offset (x, y, dx, dy);
}
}
@ -747,68 +1031,38 @@ struct hb_font_t
get_glyph_v_origin_with_fallback (glyph, x, y);
}
void add_glyph_h_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
void add_glyph_h_origins (hb_buffer_t *buf)
{
hb_position_t origin_x, origin_y;
get_glyph_h_origin_with_fallback (glyph, &origin_x, &origin_y);
*x += origin_x;
*y += origin_y;
apply_glyph_h_origins_with_fallback (buf, +1);
}
void add_glyph_v_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
void add_glyph_v_origins (hb_buffer_t *buf)
{
hb_position_t origin_x, origin_y;
get_glyph_v_origin_with_fallback (glyph, &origin_x, &origin_y);
*x += origin_x;
*y += origin_y;
apply_glyph_v_origins_with_fallback (buf, +1);
}
void add_glyph_origin_for_direction (hb_codepoint_t glyph,
hb_direction_t direction,
hb_position_t *x, hb_position_t *y)
{
hb_position_t origin_x, origin_y;
get_glyph_origin_for_direction (glyph, direction, &origin_x, &origin_y);
*x += origin_x;
*y += origin_y;
add_offset (x, y, origin_x, origin_y);
}
void subtract_glyph_h_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
void subtract_glyph_h_origins (hb_buffer_t *buf)
{
hb_position_t origin_x, origin_y;
get_glyph_h_origin_with_fallback (glyph, &origin_x, &origin_y);
*x -= origin_x;
*y -= origin_y;
apply_glyph_h_origins_with_fallback (buf, -1);
}
void subtract_glyph_v_origin (hb_codepoint_t glyph,
hb_position_t *x, hb_position_t *y)
void subtract_glyph_v_origins (hb_buffer_t *buf)
{
hb_position_t origin_x, origin_y;
get_glyph_v_origin_with_fallback (glyph, &origin_x, &origin_y);
*x -= origin_x;
*y -= origin_y;
apply_glyph_v_origins_with_fallback (buf, -1);
}
void subtract_glyph_origin_for_direction (hb_codepoint_t glyph,
hb_direction_t direction,
hb_position_t *x, hb_position_t *y)
{
hb_position_t origin_x, origin_y;
get_glyph_origin_for_direction (glyph, direction, &origin_x, &origin_y);
*x -= origin_x;
*y -= origin_y;
subtract_offset (x, y, origin_x, origin_y);
}
void get_glyph_kerning_for_direction (hb_codepoint_t first_glyph, hb_codepoint_t second_glyph,
@ -890,11 +1144,6 @@ struct hb_font_t
return false;
}
bool is_synthetic () const
{
return x_embolden || y_embolden || slant;
}
void changed ()
{
float upem = face->get_upem ();
@ -906,6 +1155,8 @@ struct hb_font_t
bool y_neg = y_scale < 0;
y_mult = (y_neg ? -((int64_t) -y_scale << 16) : ((int64_t) y_scale << 16)) / upem;
is_synthetic = x_embolden || y_embolden || slant;
x_strength = roundf (abs (x_scale) * x_embolden);
y_strength = roundf (abs (y_scale) * y_embolden);

View File

@ -24,12 +24,12 @@
* Facebook Author(s): Behdad Esfahbod
*/
#ifndef HB_POOL_HH
#define HB_POOL_HH
#ifndef HB_FREE_POOL_HH
#define HB_FREE_POOL_HH
#include "hb.hh"
/* Memory pool for persistent allocation of small objects.
/* Memory pool for persistent alloc/free of small objects.
*
* Some AI musings on this, not necessarily true:
*
@ -41,10 +41,10 @@
* sophisticated, use a real allocator. Or use a real language. */
template <typename T, unsigned ChunkLen = 32>
struct hb_pool_t
struct hb_free_pool_t
{
hb_pool_t () : next (nullptr) {}
~hb_pool_t ()
hb_free_pool_t () : next (nullptr) {}
~hb_free_pool_t ()
{
next = nullptr;
@ -104,4 +104,4 @@ struct hb_pool_t
};
#endif /* HB_POOL_HH */
#endif /* HB_FREE_POOL_HH */

View File

@ -143,6 +143,9 @@ _hb_ft_font_destroy (void *data)
/* hb_font changed, update FT_Face. */
static void _hb_ft_hb_font_changed (hb_font_t *font, FT_Face ft_face)
{
if (unlikely (font->destroy != (hb_destroy_func_t) _hb_ft_font_destroy))
return;
hb_ft_font_t *ft_font = (hb_ft_font_t *) font->user_data;
float x_mult = 1.f, y_mult = 1.f;
@ -184,12 +187,14 @@ static void _hb_ft_hb_font_changed (hb_font_t *font, FT_Face ft_face)
FT_Set_Transform (ft_face, &matrix, nullptr);
ft_font->transform = true;
}
else
FT_Set_Transform (ft_face, nullptr, nullptr);
#if defined(HAVE_FT_GET_VAR_BLEND_COORDINATES) && !defined(HB_NO_VAR)
unsigned int num_coords;
const float *coords = hb_font_get_var_coords_design (font, &num_coords);
if (num_coords)
if (font->has_nonzero_coords)
{
unsigned int num_coords;
const float *coords = hb_font_get_var_coords_design (font, &num_coords);
FT_Fixed *ft_coords = (FT_Fixed *) hb_calloc (num_coords, sizeof (FT_Fixed));
if (ft_coords)
{
@ -199,6 +204,12 @@ static void _hb_ft_hb_font_changed (hb_font_t *font, FT_Face ft_face)
hb_free (ft_coords);
}
}
else if (font->num_coords)
{
// Some old versions of FreeType crash if we
// call this function on non-variable fonts.
FT_Set_Var_Design_Coordinates (ft_face, 0, nullptr);
}
#endif
}
@ -1093,6 +1104,10 @@ _hb_ft_reference_table (hb_face_t *face HB_UNUSED, hb_tag_t tag, void *user_data
FT_ULong length = 0;
FT_Error error;
/* In new FreeType, a tag value of 1 loads the SFNT table directory. Reject it. */
if (tag == 1)
return nullptr;
/* Note: FreeType like HarfBuzz uses the NONE tag for fetching the entire blob */
error = FT_Load_Sfnt_Table (ft_face, tag, 0, nullptr, &length);
@ -1366,7 +1381,7 @@ hb_ft_font_changed (hb_font_t *font)
for (unsigned int i = 0; i < mm_var->num_axis; ++i)
{
coords[i] = ft_coords[i] >>= 2;
coords[i] = (ft_coords[i] + 2) >> 2;
nonzero = nonzero || coords[i];
}
@ -1717,7 +1732,12 @@ hb_ft_font_set_funcs (hb_font_t *font)
ft_face->generic.finalizer = _release_blob;
// And the FT_Library to the blob
hb_blob_set_user_data (blob, &ft_library_key, ft_library, destroy_ft_library, true);
if (unlikely (!hb_blob_set_user_data (blob, &ft_library_key, ft_library, destroy_ft_library, true)))
{
DEBUG_MSG (FT, font, "hb_blob_set_user_data() failed");
FT_Done_Face (ft_face);
return;
}
_hb_ft_font_set_funcs (font, ft_face, true);
hb_ft_font_set_load_flags (font, FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING);

View File

@ -26,7 +26,10 @@
#include "hb.hh"
#include "hb-algs.hh"
template <typename Float = float>
struct hb_extents_t
{
hb_extents_t () {}
@ -35,7 +38,7 @@ struct hb_extents_t
ymin (hb_min (extents.y_bearing, extents.y_bearing + extents.height)),
xmax (hb_max (extents.x_bearing, extents.x_bearing + extents.width)),
ymax (hb_max (extents.y_bearing, extents.y_bearing + extents.height)) {}
hb_extents_t (float xmin, float ymin, float xmax, float ymax) :
hb_extents_t (Float xmin, Float ymin, Float xmax, Float ymax) :
xmin (xmin), ymin (ymin), xmax (xmax), ymax (ymax) {}
bool is_empty () const { return xmin >= xmax || ymin >= ymax; }
@ -69,7 +72,7 @@ struct hb_extents_t
}
void
add_point (float x, float y)
add_point (Float x, Float y)
{
if (unlikely (is_void ()))
{
@ -97,62 +100,69 @@ struct hb_extents_t
yneg ? y1 - y0 : y0 - y1};
}
float xmin = 0.f;
float ymin = 0.f;
float xmax = -1.f;
float ymax = -1.f;
Float xmin = 0;
Float ymin = 0;
Float xmax = -1;
Float ymax = -1;
};
template <typename Float = float>
struct hb_transform_t
{
hb_transform_t () {}
hb_transform_t (float xx, float yx,
float xy, float yy,
float x0, float y0) :
hb_transform_t (Float xx, Float yx,
Float xy, Float yy,
Float x0, Float y0) :
xx (xx), yx (yx), xy (xy), yy (yy), x0 (x0), y0 (y0) {}
bool is_identity () const
{
return xx == 1.f && yx == 0.f &&
xy == 0.f && yy == 1.f &&
x0 == 0.f && y0 == 0.f;
return xx == 1 && yx == 0 &&
xy == 0 && yy == 1 &&
x0 == 0 && y0 == 0;
}
bool is_translation () const
{
return xx == 1 && yx == 0 &&
xy == 0 && yy == 1;
}
void multiply (const hb_transform_t &o)
void multiply (const hb_transform_t &o, bool before=false)
{
/* Copied from cairo, with "o" being "a" there and "this" being "b" there. */
hb_transform_t r;
r.xx = o.xx * xx + o.yx * xy;
r.yx = o.xx * yx + o.yx * yy;
r.xy = o.xy * xx + o.yy * xy;
r.yy = o.xy * yx + o.yy * yy;
r.x0 = o.x0 * xx + o.y0 * xy + x0;
r.y0 = o.x0 * yx + o.y0 * yy + y0;
*this = r;
// Copied from cairo-matrix.c
const hb_transform_t &a = before ? o : *this;
const hb_transform_t &b = before ? *this : o;
*this = {
a.xx * b.xx + a.xy * b.yx,
a.yx * b.xx + a.yy * b.yx,
a.xx * b.xy + a.xy * b.yy,
a.yx * b.xy + a.yy * b.yy,
a.xx * b.x0 + a.xy * b.y0 + a.x0,
a.yx * b.x0 + a.yy * b.y0 + a.y0
};
}
void transform_distance (float &dx, float &dy) const
HB_ALWAYS_INLINE
void transform_distance (Float &dx, Float &dy) const
{
float new_x = xx * dx + xy * dy;
float new_y = yx * dx + yy * dy;
Float new_x = xx * dx + xy * dy;
Float new_y = yx * dx + yy * dy;
dx = new_x;
dy = new_y;
}
void transform_point (float &x, float &y) const
HB_ALWAYS_INLINE
void transform_point (Float &x, Float &y) const
{
transform_distance (x, y);
x += x0;
y += y0;
Float new_x = x0 + xx * x + xy * y;
Float new_y = y0 + yx * x + yy * y;
x = new_x;
y = new_y;
}
void transform_extents (hb_extents_t &extents) const
void transform_extents (hb_extents_t<Float> &extents) const
{
float quad_x[4], quad_y[4];
Float quad_x[4], quad_y[4];
quad_x[0] = extents.xmin;
quad_y[0] = extents.ymin;
@ -163,7 +173,7 @@ struct hb_transform_t
quad_x[3] = extents.xmax;
quad_y[3] = extents.ymax;
extents = hb_extents_t {};
extents = hb_extents_t<Float> {};
for (unsigned i = 0; i < 4; i++)
{
transform_point (quad_x[i], quad_y[i]);
@ -171,20 +181,36 @@ struct hb_transform_t
}
}
void transform (const hb_transform_t &o) { multiply (o); }
void transform (const hb_transform_t &o, bool before=false) { multiply (o, before); }
void translate (float x, float y)
static hb_transform_t translation (Float x, Float y)
{
if (x == 0.f && y == 0.f)
return;
return {1, 0, 0, 1, x, y};
}
void translate (Float x, Float y, bool before=false)
{
if (before)
{
x0 += x;
y0 += y;
}
else
{
if (x == 0 && y == 0)
return;
x0 += xx * x + xy * y;
y0 += yx * x + yy * y;
x0 += xx * x + xy * y;
y0 += yx * x + yy * y;
}
}
void scale (float scaleX, float scaleY)
static hb_transform_t scaling (Float scaleX, Float scaleY)
{
if (scaleX == 1.f && scaleY == 1.f)
return {scaleX, 0, 0, scaleY, 0, 0};
}
void scale (Float scaleX, Float scaleY)
{
if (scaleX == 1 && scaleY == 1)
return;
xx *= scaleX;
@ -192,52 +218,94 @@ struct hb_transform_t
xy *= scaleY;
yy *= scaleY;
}
void rotate (float rotation)
static hb_transform_t scaling_around_center (Float scaleX, Float scaleY, Float center_x, Float center_y)
{
if (rotation == 0.f)
return {scaleX, 0, 0, scaleY,
center_x ? (1 - scaleX) * center_x : 0,
center_y ? (1 - scaleY) * center_y : 0};
}
void scale_around_center (Float scaleX, Float scaleY, Float center_x, Float center_y)
{
if (scaleX == 1 && scaleY == 1)
return;
transform (scaling_around_center (scaleX, scaleY, center_x, center_y));
}
static hb_transform_t rotation (Float radians)
{
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L240
rotation = rotation * HB_PI;
float c;
float s;
#ifdef HAVE_SINCOSF
sincosf (rotation, &s, &c);
#else
c = cosf (rotation);
s = sinf (rotation);
#endif
auto other = hb_transform_t{c, s, -s, c, 0.f, 0.f};
transform (other);
Float c;
Float s;
hb_sincos (radians, s, c);
return {c, s, -s, c, 0, 0};
}
void skew (float skewX, float skewY)
void rotate (Float radians, bool before=false)
{
if (skewX == 0.f && skewY == 0.f)
if (radians == 0)
return;
// https://github.com/fonttools/fonttools/blob/f66ee05f71c8b57b5f519ee975e95edcd1466e14/Lib/fontTools/misc/transform.py#L255
skewX = skewX * HB_PI;
skewY = skewY * HB_PI;
auto other = hb_transform_t{1.f,
skewY ? tanf (skewY) : 0.f,
skewX ? tanf (skewX) : 0.f,
1.f,
0.f, 0.f};
transform (other);
transform (rotation (radians), before);
}
float xx = 1.f;
float yx = 0.f;
float xy = 0.f;
float yy = 1.f;
float x0 = 0.f;
float y0 = 0.f;
static hb_transform_t rotation_around_center (Float radians, Float center_x, Float center_y)
{
Float s, c;
hb_sincos (radians, s, c);
return {
c, s, -s, c,
(1 - c) * center_x + s * center_y,
-s * center_x + (1 - c) * center_y
};
}
void rotate_around_center (Float radians, Float center_x, Float center_y, bool before=false)
{
if (radians == 0)
return;
transform (rotation_around_center (radians, center_x, center_y), before);
}
static hb_transform_t skewing (Float skewX, Float skewY)
{
return {1, skewY ? tanf (skewY) : 0, skewX ? tanf (skewX) : 0, 1, 0, 0};
}
void skew (Float skewX, Float skewY)
{
if (skewX == 0 && skewY == 0)
return;
transform (skewing (skewX, skewY));
}
static hb_transform_t skewing_around_center (Float skewX, Float skewY, Float center_x, Float center_y)
{
skewX = skewX ? tanf (skewX) : 0;
skewY = skewY ? tanf (skewY) : 0;
return {
1, skewY, skewX, 1,
center_y ? -skewX * center_y : 0,
center_x ? -skewY * center_x : 0
};
}
void skew_around_center (Float skewX, Float skewY, Float center_x, Float center_y)
{
if (skewX == 0 && skewY == 0)
return;
transform (skewing_around_center (skewX, skewY, center_x, center_y));
}
Float xx = 1;
Float yx = 0;
Float xy = 0;
Float yy = 1;
Float x0 = 0;
Float y0 = 0;
};
#define HB_TRANSFORM_IDENTITY hb_transform_t{1.f, 0.f, 0.f, 1.f, 0.f, 0.f}
#define HB_TRANSFORM_IDENTITY {1, 0, 0, 1, 0, 0}
template <typename Float = float>
struct hb_bounds_t
{
enum status_t {
@ -247,7 +315,7 @@ struct hb_bounds_t
};
hb_bounds_t (status_t status = UNBOUNDED) : status (status) {}
hb_bounds_t (const hb_extents_t &extents) :
hb_bounds_t (const hb_extents_t<Float> &extents) :
status (extents.is_empty () ? EMPTY : BOUNDED), extents (extents) {}
void union_ (const hb_bounds_t &o)
@ -281,20 +349,21 @@ struct hb_bounds_t
}
status_t status;
hb_extents_t extents;
hb_extents_t<Float> extents;
};
template <typename Float = float>
struct hb_transform_decomposed_t
{
float translateX = 0;
float translateY = 0;
float rotation = 0; // in degrees, counter-clockwise
float scaleX = 1;
float scaleY = 1;
float skewX = 0; // in degrees, counter-clockwise
float skewY = 0; // in degrees, counter-clockwise
float tCenterX = 0;
float tCenterY = 0;
Float translateX = 0;
Float translateY = 0;
Float rotation = 0; // in radians, counter-clockwise
Float scaleX = 1;
Float scaleY = 1;
Float skewX = 0; // in radians, counter-clockwise
Float skewY = 0; // in radians, counter-clockwise
Float tCenterX = 0;
Float tCenterY = 0;
operator bool () const
{
@ -305,9 +374,9 @@ struct hb_transform_decomposed_t
tCenterX || tCenterY;
}
hb_transform_t to_transform () const
hb_transform_t<Float> to_transform () const
{
hb_transform_t t;
hb_transform_t<Float> t;
t.translate (translateX + tCenterX, translateY + tCenterY);
t.rotate (rotation);
t.scale (scaleX, scaleY);

View File

@ -772,8 +772,9 @@ struct hb_iota_iter_t :
template <typename S2 = S>
auto
inc (hb_type_identity<S2> s, hb_priority<1>)
-> hb_void_t<decltype (hb_invoke (std::forward<S2> (s), hb_declval<T&> ()))>
{ v = hb_invoke (std::forward<S2> (s), v); }
-> hb_void_t<decltype (hb_invoke (std::forward<hb_type_identity<S2>> (s),
hb_declval<T&> ()))>
{ v = hb_invoke (std::forward<hb_type_identity<S2>> (s), v); }
void
inc (S s, hb_priority<0>)
@ -972,7 +973,7 @@ struct
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (!hb_match (std::forward<Pred> (p), hb_get (std::forward<Proj> (f), *it)))
if (!hb_match (p, hb_get (f, *it)))
return false;
return true;
}
@ -989,7 +990,7 @@ struct
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (hb_match (std::forward<Pred> (p), hb_get (std::forward<Proj> (f), *it)))
if (hb_match (p, hb_get (f, *it)))
return true;
return false;
}
@ -1006,7 +1007,7 @@ struct
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (hb_match (std::forward<Pred> (p), hb_get (std::forward<Proj> (f), *it)))
if (hb_match (p, hb_get (f, *it)))
return false;
return true;
}

View File

@ -70,7 +70,7 @@ struct hb_kern_machine_t
continue;
}
skippy_iter.reset (idx);
skippy_iter.reset_fast (idx);
unsigned unsafe_to;
if (!skippy_iter.next (&unsafe_to))
{

View File

@ -29,20 +29,20 @@
#ifndef HB_BUFFER_MAX_LEN_FACTOR
#define HB_BUFFER_MAX_LEN_FACTOR 64
#define HB_BUFFER_MAX_LEN_FACTOR 256
#endif
#ifndef HB_BUFFER_MAX_LEN_MIN
#define HB_BUFFER_MAX_LEN_MIN 16384
#define HB_BUFFER_MAX_LEN_MIN 65536
#endif
#ifndef HB_BUFFER_MAX_LEN_DEFAULT
#define HB_BUFFER_MAX_LEN_DEFAULT 0x3FFFFFFF /* Shaping more than a billion chars? Let us know! */
#endif
#ifndef HB_BUFFER_MAX_OPS_FACTOR
#define HB_BUFFER_MAX_OPS_FACTOR 1024
#define HB_BUFFER_MAX_OPS_FACTOR 4096
#endif
#ifndef HB_BUFFER_MAX_OPS_MIN
#define HB_BUFFER_MAX_OPS_MIN 16384
#define HB_BUFFER_MAX_OPS_MIN 65536
#endif
#ifndef HB_BUFFER_MAX_OPS_DEFAULT
#define HB_BUFFER_MAX_OPS_DEFAULT 0x1FFFFFFF /* Shaping more than a billion operations? Let us know! */

View File

@ -66,13 +66,22 @@ static inline Type& StructAtOffsetUnaligned(void *P, unsigned int offset)
}
/* StructAfter<T>(X) returns the struct T& that is placed after X.
* Works with X of variable size also. X must implement get_size() */
template<typename Type, typename TObject>
static inline const Type& StructAfter(const TObject &X)
{ return StructAtOffset<Type>(&X, X.get_size()); }
template<typename Type, typename TObject>
static inline Type& StructAfter(TObject &X)
{ return StructAtOffset<Type>(&X, X.get_size()); }
* Works with X of variable size also. X must implement get_size().
* Any extra arguments are forwarded to get_size, so for example
* it can work with UnsizedArrayOf<> as well. */
template <typename Type, typename TObject, typename ...Ts>
static inline auto StructAfter(const TObject &X, Ts... args) HB_AUTO_RETURN((
StructAtOffset<Type>(&X, X.get_size(std::forward<Ts> (args)...))
))
/* The is_const shenanigans is to avoid ambiguous overload with gcc-8.
* It disables this path when TObject is const.
* See: https://github.com/harfbuzz/harfbuzz/issues/5429 */
template <typename Type, typename TObject, typename ...Ts>
static inline auto StructAfter(TObject &X, Ts... args) HB_AUTO_RETURN((
sizeof(int[std::is_const<TObject>::value ? -1 : +1]) > 0 ?
StructAtOffset<Type>(&X, X.get_size(std::forward<Ts> (args)...))
: *reinterpret_cast<Type*> (0)
))
/*
@ -132,7 +141,6 @@ static inline Type& StructAfter(TObject &X)
DEFINE_SIZE_ARRAY(size, array)
/*
* Lazy loaders.
*

View File

@ -47,11 +47,11 @@ struct hb_hashmap_t
hb_hashmap_t () { init (); }
~hb_hashmap_t () { fini (); }
hb_hashmap_t (const hb_hashmap_t& o) : hb_hashmap_t ()
void _copy (const hb_hashmap_t& o)
{
if (unlikely (!o.mask)) return;
if (item_t::is_trivial)
if (hb_is_trivially_copy_assignable (item_t))
{
items = (item_t *) hb_malloc (sizeof (item_t) * (o.mask + 1));
if (unlikely (!items))
@ -70,8 +70,16 @@ struct hb_hashmap_t
alloc (o.population); hb_copy (o, *this);
}
hb_hashmap_t (const hb_hashmap_t& o) : hb_hashmap_t () { _copy (o); }
hb_hashmap_t& operator= (const hb_hashmap_t& o)
{
reset ();
if (!items) { _copy (o); return *this; }
alloc (o.population); hb_copy (o, *this); return *this;
}
hb_hashmap_t (hb_hashmap_t&& o) noexcept : hb_hashmap_t () { hb_swap (*this, o); }
hb_hashmap_t& operator= (const hb_hashmap_t& o) { reset (); alloc (o.population); hb_copy (o, *this); return *this; }
hb_hashmap_t& operator= (hb_hashmap_t&& o) noexcept { hb_swap (*this, o); return *this; }
hb_hashmap_t (std::initializer_list<hb_pair_t<K, V>> lst) : hb_hashmap_t ()
@ -130,10 +138,7 @@ struct hb_hashmap_t
uint32_t total_hash () const
{ return (hash * 31u) + hb_hash (value); }
static constexpr bool is_trivial = hb_is_trivially_constructible(K) &&
hb_is_trivially_destructible(K) &&
hb_is_trivially_constructible(V) &&
hb_is_trivially_destructible(V);
static constexpr bool is_trivially_constructible = (hb_is_trivially_constructible(K) && hb_is_trivially_constructible(V));
};
hb_object_header_t header;
@ -174,19 +179,19 @@ struct hb_hashmap_t
if (likely (items))
{
unsigned size = mask + 1;
if (!item_t::is_trivial)
for (unsigned i = 0; i < size; i++)
items[i].~item_t ();
for (unsigned i = 0; i < size; i++)
items[i].~item_t ();
hb_free (items);
items = nullptr;
}
population = occupancy = 0;
}
void reset ()
hb_hashmap_t& reset ()
{
successful = true;
clear ();
return *this;
}
bool in_error () const { return !successful; }
@ -197,7 +202,7 @@ struct hb_hashmap_t
if (new_population != 0 && (new_population + new_population / 2) < mask) return true;
unsigned int power = hb_bit_storage (hb_max ((unsigned) population, new_population) * 2 + 8);
unsigned int power = hb_bit_storage (hb_max (hb_max ((unsigned) population, new_population) * 2, 4u));
unsigned int new_size = 1u << power;
item_t *new_items = (item_t *) hb_malloc ((size_t) new_size * sizeof (item_t));
if (unlikely (!new_items))
@ -205,7 +210,7 @@ struct hb_hashmap_t
successful = false;
return false;
}
if (!item_t::is_trivial)
if (!item_t::is_trivially_constructible)
for (auto &_ : hb_iter (new_items, new_size))
new (&_) item_t ();
else
@ -231,9 +236,8 @@ struct hb_hashmap_t
std::move (old_items[i].value));
}
}
if (!item_t::is_trivial)
for (unsigned int i = 0; i < old_size; i++)
old_items[i].~item_t ();
for (unsigned int i = 0; i < old_size; i++)
old_items[i].~item_t ();
hb_free (old_items);
@ -335,7 +339,13 @@ struct hb_hashmap_t
bool has (const K &key, VV **vp = nullptr) const
{
if (!items) return false;
auto *item = fetch_item (key, hb_hash (key));
return has_with_hash (key, hb_hash (key), vp);
}
template <typename VV=V>
bool has_with_hash (const K &key, uint32_t hash, VV **vp = nullptr) const
{
if (!items) return false;
auto *item = fetch_item (key, hash);
if (item)
{
if (vp) *vp = std::addressof (item->value);
@ -481,10 +491,17 @@ struct hb_hashmap_t
/* Sink interface. */
hb_hashmap_t& operator << (const hb_pair_t<K, V>& v)
{ set (v.first, v.second); return *this; }
template <typename V2 = V,
hb_enable_if (!hb_is_trivially_copyable (V2))>
hb_hashmap_t& operator << (const hb_pair_t<K, V&&>& v)
{ set (v.first, std::move (v.second)); return *this; }
template <typename K2 = K,
hb_enable_if (!hb_is_trivially_copyable (K2))>
hb_hashmap_t& operator << (const hb_pair_t<K&&, V>& v)
{ set (std::move (v.first), v.second); return *this; }
template <typename K2 = K, typename V2 = V,
hb_enable_if (!hb_is_trivially_copyable (K2) &&
!hb_is_trivially_copyable (V2))>
hb_hashmap_t& operator << (const hb_pair_t<K&&, V&&>& v)
{ set (std::move (v.first), std::move (v.second)); return *this; }

View File

@ -31,7 +31,7 @@
#include "hb.hh"
#line 35 "hb-number-parser.hh"
#line 32 "hb-number-parser.hh"
static const unsigned char _double_parser_trans_keys[] = {
0u, 0u, 43u, 57u, 46u, 57u, 48u, 57u, 43u, 57u, 48u, 57u, 48u, 101u, 48u, 57u,
46u, 101u, 0
@ -135,12 +135,12 @@ strtod_rl (const char *p, const char **end_ptr /* IN/OUT */)
int cs;
#line 139 "hb-number-parser.hh"
#line 132 "hb-number-parser.hh"
{
cs = double_parser_start;
}
#line 144 "hb-number-parser.hh"
#line 135 "hb-number-parser.hh"
{
int _slen;
int _trans;
@ -198,7 +198,7 @@ _resume:
exp_overflow = true;
}
break;
#line 202 "hb-number-parser.hh"
#line 187 "hb-number-parser.hh"
}
_again:

View File

@ -465,11 +465,11 @@ struct OpenTypeFontFile
Typ1Tag = HB_TAG ('t','y','p','1') /* Obsolete Apple Type1 font in SFNT container */
};
hb_tag_t get_tag () const { return u.tag; }
hb_tag_t get_tag () const { return u.tag.v; }
unsigned int get_face_count () const
{
switch (u.tag) {
switch (u.tag.v) {
case CFFTag: /* All the non-collection tags */
case TrueTag:
case Typ1Tag:
@ -483,7 +483,7 @@ struct OpenTypeFontFile
{
if (base_offset)
*base_offset = 0;
switch (u.tag) {
switch (u.tag.v) {
/* Note: for non-collection SFNT data we ignore index. This is because
* Apple dfont container is a container of SFNT's. So each SFNT is a
* non-TTC, but the index is more than zero. */
@ -512,9 +512,9 @@ struct OpenTypeFontFile
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (unlikely (!u.tag.sanitize (c))) return_trace (false);
if (unlikely (!u.tag.v.sanitize (c))) return_trace (false);
hb_barrier ();
switch (u.tag) {
switch (u.tag.v) {
case CFFTag: /* All the non-collection tags */
case TrueTag:
case Typ1Tag:
@ -527,13 +527,13 @@ struct OpenTypeFontFile
protected:
union {
Tag tag; /* 4-byte identifier. */
struct { Tag v; } tag; /* 4-byte identifier. */
OpenTypeFontFace fontFace;
TTCHeader ttcHeader;
ResourceForkHeader rfHeader;
} u;
public:
DEFINE_SIZE_UNION (4, tag);
DEFINE_SIZE_UNION (4, tag.v);
};

View File

@ -54,35 +54,41 @@ namespace OT {
*/
/* Integer types in big-endian order and no alignment requirement */
template <typename Type,
template <bool BE,
typename Type,
unsigned int Size = sizeof (Type)>
struct IntType
struct NumType
{
typedef Type type;
IntType () = default;
explicit constexpr IntType (Type V) : v {V} {}
IntType& operator = (Type i) { v = i; return *this; }
/* For reason we define cast out operator for signed/unsigned, instead of Type, see:
* https://github.com/harfbuzz/harfbuzz/pull/2875/commits/09836013995cab2b9f07577a179ad7b024130467 */
operator typename std::conditional<std::is_signed<Type>::value, signed, unsigned>::type () const { return v; }
typedef typename std::conditional<std::is_integral<Type>::value && sizeof (Type) <= sizeof(int),
typename std::conditional<std::is_signed<Type>::value, signed, unsigned>::type,
Type>::type WideType;
bool operator == (const IntType &o) const { return (Type) v == (Type) o.v; }
bool operator != (const IntType &o) const { return !(*this == o); }
NumType () = default;
explicit constexpr NumType (Type V) : v {V} {}
NumType& operator = (Type V) { v = V; return *this; }
IntType& operator += (unsigned count) { *this = *this + count; return *this; }
IntType& operator -= (unsigned count) { *this = *this - count; return *this; }
IntType& operator ++ () { *this += 1; return *this; }
IntType& operator -- () { *this -= 1; return *this; }
IntType operator ++ (int) { IntType c (*this); ++*this; return c; }
IntType operator -- (int) { IntType c (*this); --*this; return c; }
operator WideType () const { return v; }
HB_INTERNAL static int cmp (const IntType *a, const IntType *b)
bool operator == (const NumType &o) const { return (Type) v == (Type) o.v; }
bool operator != (const NumType &o) const { return !(*this == o); }
NumType& operator += (WideType count) { *this = *this + count; return *this; }
NumType& operator -= (WideType count) { *this = *this - count; return *this; }
NumType& operator ++ () { *this += 1; return *this; }
NumType& operator -- () { *this -= 1; return *this; }
NumType operator ++ (int) { NumType c (*this); ++*this; return c; }
NumType operator -- (int) { NumType c (*this); --*this; return c; }
uint32_t hash () const { return hb_array ((const char *) &v, sizeof (v)).hash (); }
HB_INTERNAL static int cmp (const NumType *a, const NumType *b)
{ return b->cmp (*a); }
HB_INTERNAL static int cmp (const void *a, const void *b)
{
IntType *pa = (IntType *) a;
IntType *pb = (IntType *) b;
NumType *pa = (NumType *) a;
NumType *pb = (NumType *) b;
return pb->cmp (*pa);
}
@ -99,20 +105,36 @@ struct IntType
return_trace (c->check_struct (this));
}
protected:
BEInt<Type, Size> v;
typename std::conditional<std::is_integral<Type>::value,
HBInt<BE, Type, Size>,
HBFloat<BE, Type, Size>>::type v;
public:
DEFINE_SIZE_STATIC (Size);
};
typedef IntType<uint8_t> HBUINT8; /* 8-bit unsigned integer. */
typedef IntType<int8_t> HBINT8; /* 8-bit signed integer. */
typedef IntType<uint16_t> HBUINT16; /* 16-bit unsigned integer. */
typedef IntType<int16_t> HBINT16; /* 16-bit signed integer. */
typedef IntType<uint32_t> HBUINT32; /* 32-bit unsigned integer. */
typedef IntType<int32_t> HBINT32; /* 32-bit signed integer. */
typedef NumType<true, uint8_t> HBUINT8; /* 8-bit big-endian unsigned integer. */
typedef NumType<true, int8_t> HBINT8; /* 8-bit big-endian signed integer. */
typedef NumType<true, uint16_t> HBUINT16; /* 16-bit big-endian unsigned integer. */
typedef NumType<true, int16_t> HBINT16; /* 16-bit big-endian signed integer. */
typedef NumType<true, uint32_t> HBUINT32; /* 32-bit big-endian unsigned integer. */
typedef NumType<true, int32_t> HBINT32; /* 32-bit big-endian signed integer. */
typedef NumType<true, uint64_t> HBUINT64; /* 64-bit big-endian unsigned integer. */
typedef NumType<true, int64_t> HBINT64; /* 64-bit big-endian signed integer. */
/* Note: we cannot defined a signed HBINT24 because there's no corresponding C type.
* Works for unsigned, but not signed, since we rely on compiler for sign-extension. */
typedef IntType<uint32_t, 3> HBUINT24; /* 24-bit unsigned integer. */
typedef NumType<true, uint32_t, 3> HBUINT24; /* 24-bit big-endian unsigned integer. */
typedef NumType<false, uint16_t> HBUINT16LE; /* 16-bit little-endian unsigned integer. */
typedef NumType<false, int16_t> HBINT16LE; /* 16-bit little-endian signed integer. */
typedef NumType<false, uint32_t> HBUINT32LE; /* 32-bit little-endian unsigned integer. */
typedef NumType<false, int32_t> HBINT32LE; /* 32-bit little-endian signed integer. */
typedef NumType<false, uint64_t> HBUINT64LE; /* 64-bit little-endian unsigned integer. */
typedef NumType<false, int64_t> HBINT64LE; /* 64-bit little-endian signed integer. */
typedef NumType<true, float> HBFLOAT32BE; /* 32-bit little-endian floating point number. */
typedef NumType<true, double> HBFLOAT64BE; /* 64-bit little-endian floating point number. */
typedef NumType<false, float> HBFLOAT32LE; /* 32-bit little-endian floating point number. */
typedef NumType<false, double> HBFLOAT64LE; /* 64-bit little-endian floating point number. */
/* 15-bit unsigned number; top bit used for extension. */
struct HBUINT15 : HBUINT16
@ -218,7 +240,7 @@ typedef HBUINT16 UFWORD;
template <typename Type, unsigned fraction_bits>
struct HBFixed : Type
{
static constexpr float shift = (float) (1 << fraction_bits);
static constexpr float mult = 1.f / (1 << fraction_bits);
static_assert (Type::static_size * 8 > fraction_bits, "");
operator signed () const = delete;
@ -226,8 +248,8 @@ struct HBFixed : Type
explicit operator float () const { return to_float (); }
typename Type::type to_int () const { return Type::v; }
void set_int (typename Type::type i ) { Type::v = i; }
float to_float (float offset = 0) const { return ((int32_t) Type::v + offset) / shift; }
void set_float (float f) { Type::v = roundf (f * shift); }
float to_float (float offset = 0) const { return ((int32_t) Type::v + offset) * mult; }
void set_float (float f) { Type::v = roundf (f / mult); }
public:
DEFINE_SIZE_STATIC (Type::static_size);
};
@ -504,16 +526,9 @@ struct OffsetTo : Offset<OffsetType, has_null>
return_trace (sanitize_shallow (c, base) &&
hb_barrier () &&
(this->is_null () ||
c->dispatch (StructAtOffset<Type> (base, *this), std::forward<Ts> (ds)...) ||
neuter (c)));
c->dispatch (StructAtOffset<Type> (base, *this), std::forward<Ts> (ds)...)));
}
/* Set the offset to Null */
bool neuter (hb_sanitize_context_t *c) const
{
if (!has_null) return false;
return c->try_set (this, 0);
}
DEFINE_SIZE_STATIC (sizeof (OffsetType));
};
/* Partial specializations. */
@ -1481,8 +1496,8 @@ struct TupleValues
VALUE_RUN_COUNT_MASK = 0x3F
};
static unsigned compile (hb_array_t<const int> values, /* IN */
hb_array_t<unsigned char> encoded_bytes /* OUT */)
static unsigned compile_unsafe (hb_array_t<const int> values, /* IN */
unsigned char *encoded_bytes /* OUT */)
{
unsigned num_values = values.length;
unsigned encoded_len = 0;
@ -1491,24 +1506,23 @@ struct TupleValues
{
int val = values.arrayZ[i];
if (val == 0)
encoded_len += encode_value_run_as_zeroes (i, encoded_bytes.sub_array (encoded_len), values);
else if (val >= -128 && val <= 127)
encoded_len += encode_value_run_as_bytes (i, encoded_bytes.sub_array (encoded_len), values);
else if (val >= -32768 && val <= 32767)
encoded_len += encode_value_run_as_words (i, encoded_bytes.sub_array (encoded_len), values);
encoded_len += encode_value_run_as_zeroes (i, encoded_bytes + encoded_len, values);
else if ((int8_t) val == val)
encoded_len += encode_value_run_as_bytes (i, encoded_bytes + encoded_len, values);
else if ((int16_t) val == val)
encoded_len += encode_value_run_as_words (i, encoded_bytes + encoded_len, values);
else
encoded_len += encode_value_run_as_longs (i, encoded_bytes.sub_array (encoded_len), values);
encoded_len += encode_value_run_as_longs (i, encoded_bytes + encoded_len, values);
}
return encoded_len;
}
static unsigned encode_value_run_as_zeroes (unsigned& i,
hb_array_t<unsigned char> encoded_bytes,
unsigned char *it,
hb_array_t<const int> values)
{
unsigned num_values = values.length;
unsigned run_length = 0;
auto it = encoded_bytes.iter ();
unsigned encoded_len = 0;
while (i < num_values && values.arrayZ[i] == 0)
{
@ -1532,7 +1546,7 @@ struct TupleValues
}
static unsigned encode_value_run_as_bytes (unsigned &i,
hb_array_t<unsigned char> encoded_bytes,
unsigned char *it,
hb_array_t<const int> values)
{
unsigned start = i;
@ -1540,7 +1554,7 @@ struct TupleValues
while (i < num_values)
{
int val = values.arrayZ[i];
if (val > 127 || val < -128)
if ((int8_t) val != val)
break;
/* from fonttools: if there're 2 or more zeros in a sequence,
@ -1553,7 +1567,6 @@ struct TupleValues
unsigned run_length = i - start;
unsigned encoded_len = 0;
auto it = encoded_bytes.iter ();
while (run_length >= 64)
{
@ -1561,10 +1574,9 @@ struct TupleValues
encoded_len++;
for (unsigned j = 0; j < 64; j++)
{
*it++ = static_cast<char> (values.arrayZ[start + j]);
encoded_len++;
}
it[j] = static_cast<char> (values.arrayZ[start + j]);
it += 64;
encoded_len += 64;
start += 64;
run_length -= 64;
@ -1575,18 +1587,16 @@ struct TupleValues
*it++ = (VALUES_ARE_BYTES | (run_length - 1));
encoded_len++;
while (start < i)
{
*it++ = static_cast<char> (values.arrayZ[start++]);
encoded_len++;
}
for (unsigned j = 0; j < run_length; j++)
it[j] = static_cast<char> (values.arrayZ[start + j]);
encoded_len += run_length;
}
return encoded_len;
}
static unsigned encode_value_run_as_words (unsigned &i,
hb_array_t<unsigned char> encoded_bytes,
unsigned char *it,
hb_array_t<const int> values)
{
unsigned start = i;
@ -1595,22 +1605,24 @@ struct TupleValues
{
int val = values.arrayZ[i];
/* start a new run for a single zero value*/
if ((int16_t) val != val)
break;
/* start a new run for a single zero value. */
if (val == 0) break;
/* from fonttools: continue word-encoded run if there's only one
/* From fonttools: continue word-encoded run if there's only one
* single value in the range [-128, 127] because it is more compact.
* Only start a new run when there're 2 continuous such values. */
if (val >= -128 && val <= 127 &&
if ((int8_t) val == val &&
i + 1 < num_values &&
values.arrayZ[i+1] >= -128 && values.arrayZ[i+1] <= 127)
(int8_t) values.arrayZ[i+1] == values.arrayZ[i+1])
break;
i++;
}
unsigned run_length = i - start;
auto it = encoded_bytes.iter ();
unsigned encoded_len = 0;
while (run_length >= 64)
{
@ -1647,7 +1659,7 @@ struct TupleValues
}
static unsigned encode_value_run_as_longs (unsigned &i,
hb_array_t<unsigned char> encoded_bytes,
unsigned char *it,
hb_array_t<const int> values)
{
unsigned start = i;
@ -1656,14 +1668,13 @@ struct TupleValues
{
int val = values.arrayZ[i];
if (val >= -32768 && val <= 32767)
if ((int16_t) val == val)
break;
i++;
}
unsigned run_length = i - start;
auto it = encoded_bytes.iter ();
unsigned encoded_len = 0;
while (run_length >= 64)
{
@ -1704,10 +1715,14 @@ struct TupleValues
}
template <typename T>
#ifndef HB_OPTIMIZE_SIZE
HB_ALWAYS_INLINE
#endif
static bool decompile (const HBUINT8 *&p /* IN/OUT */,
hb_vector_t<T> &values /* IN/OUT */,
const HBUINT8 *end,
bool consume_all = false)
bool consume_all = false,
unsigned start = 0)
{
unsigned i = 0;
unsigned count = consume_all ? UINT_MAX : values.length;
@ -1720,19 +1735,24 @@ struct TupleValues
unsigned run_count = (control & VALUE_RUN_COUNT_MASK) + 1;
if (consume_all)
{
if (unlikely (!values.resize (values.length + run_count, false)))
if (unlikely (!values.resize_dirty (values.length + run_count)))
return false;
}
unsigned stop = i + run_count;
if (unlikely (stop > count)) return false;
unsigned skip = i < start ? hb_min (start - i, run_count) : 0;
i += skip;
if ((control & VALUES_SIZE_MASK) == VALUES_ARE_ZEROS)
{
for (; i < stop; i++)
values.arrayZ[i] = 0;
hb_memset (&values.arrayZ[i], 0, (stop - i) * sizeof (T));
i = stop;
}
else if ((control & VALUES_SIZE_MASK) == VALUES_ARE_WORDS)
{
if (unlikely (p + run_count * HBINT16::static_size > end)) return false;
p += skip * HBINT16::static_size;
#ifndef HB_OPTIMIZE_SIZE
for (; i + 3 < stop; i += 4)
{
@ -1755,6 +1775,7 @@ struct TupleValues
else if ((control & VALUES_SIZE_MASK) == VALUES_ARE_LONGS)
{
if (unlikely (p + run_count * HBINT32::static_size > end)) return false;
p += skip * HBINT32::static_size;
for (; i < stop; i++)
{
values.arrayZ[i] = * (const HBINT32 *) p;
@ -1764,6 +1785,7 @@ struct TupleValues
else if ((control & VALUES_SIZE_MASK) == VALUES_ARE_BYTES)
{
if (unlikely (p + run_count > end)) return false;
p += skip * HBINT8::static_size;
#ifndef HB_OPTIMIZE_SIZE
for (; i + 3 < stop; i += 4)
{
@ -1784,7 +1806,7 @@ struct TupleValues
{
iter_t (const unsigned char *p_, unsigned len_)
: p (p_), endp (p_ + len_)
{ if (ensure_run ()) read_value (); }
{ if (likely (ensure_run ())) read_value (); }
private:
const unsigned char *p;
@ -1793,10 +1815,14 @@ struct TupleValues
signed run_count = 0;
unsigned width = 0;
HB_ALWAYS_INLINE
bool ensure_run ()
{
if (likely (run_count > 0)) return true;
return _ensure_run ();
}
bool _ensure_run ()
{
if (unlikely (p >= endp))
{
run_count = 0;
@ -1886,10 +1912,15 @@ struct TupleValues
signed run_count = 0;
unsigned width = 0;
HB_ALWAYS_INLINE
bool ensure_run ()
{
if (run_count > 0) return true;
if (likely (run_count > 0)) return true;
return _ensure_run ();
}
bool _ensure_run ()
{
if (unlikely (p >= end))
{
run_count = 0;
@ -2013,7 +2044,10 @@ struct TupleValues
}
#ifndef HB_OPTIMIZE_SIZE
if (scale == 1.0f)
// The following branch is supposed to speed things up by avoiding
// the multiplication in _add_to<> if scale is 1.0f.
// But in practice it seems to bloat the code and slow things down.
if (false && scale == 1.0f)
_add_to<false> (out);
else
#endif
@ -2038,6 +2072,23 @@ struct TupleList : CFF2Index
};
// Alignment
template <unsigned int alignment>
struct Align
{
unsigned get_size (const void *base) const
{
unsigned offset = (const char *) this - (const char *) base;
return (alignment - offset) & (alignment - 1);
}
public:
DEFINE_SIZE_MIN (0);
};
} /* namespace OT */

View File

@ -79,7 +79,7 @@ struct Dict : UnsizedByteStr
{
TRACE_SERIALIZE (this);
for (unsigned int i = 0; i < dictval.get_count (); i++)
if (unlikely (!opszr.serialize (c, dictval[i], std::forward<Ts> (ds)...)))
if (unlikely (!opszr.serialize (c, dictval[i], ds...)))
return_trace (false);
return_trace (true);

View File

@ -30,396 +30,396 @@
#include "hb.hh"
#endif
_S(".notdef")
_S("space")
_S("exclam")
_S("quotedbl")
_S("numbersign")
_S("dollar")
_S("percent")
_S("ampersand")
_S("quoteright")
_S("parenleft")
_S("parenright")
_S("asterisk")
_S("plus")
_S("comma")
_S("hyphen")
_S("period")
_S("slash")
_S("zero")
_S("one")
_S("two")
_S("three")
_S("four")
_S("five")
_S("six")
_S("seven")
_S("eight")
_S("nine")
_S("colon")
_S("semicolon")
_S("less")
_S("equal")
_S("greater")
_S("question")
_S("at")
_S("A")
_S("B")
_S("C")
_S("D")
_S("E")
_S("F")
_S("G")
_S("H")
_S("I")
_S("J")
_S("K")
_S("L")
_S("M")
_S("N")
_S("O")
_S("P")
_S("Q")
_S("R")
_S("S")
_S("T")
_S("U")
_S("V")
_S("W")
_S("X")
_S("Y")
_S("Z")
_S("bracketleft")
_S("backslash")
_S("bracketright")
_S("asciicircum")
_S("underscore")
_S("quoteleft")
_S("a")
_S("b")
_S("c")
_S("d")
_S("e")
_S("f")
_S("g")
_S("h")
_S("i")
_S("j")
_S("k")
_S("l")
_S("m")
_S("n")
_S("o")
_S("p")
_S("q")
_S("r")
_S("s")
_S("t")
_S("u")
_S("v")
_S("w")
_S("x")
_S("y")
_S("z")
_S("braceleft")
_S("bar")
_S("braceright")
_S("asciitilde")
_S("exclamdown")
_S("cent")
_S("sterling")
_S("fraction")
_S("yen")
_S("florin")
_S("section")
_S("currency")
_S("quotesingle")
_S("quotedblleft")
_S("guillemotleft")
_S("guilsinglleft")
_S("guilsinglright")
_S("fi")
_S("fl")
_S("endash")
_S("dagger")
_S("daggerdbl")
_S("periodcentered")
_S("paragraph")
_S("bullet")
_S("quotesinglbase")
_S("quotedblbase")
_S("quotedblright")
_S("guillemotright")
_S("ellipsis")
_S("perthousand")
_S("questiondown")
_S("grave")
_S("acute")
_S("circumflex")
_S("tilde")
_S("macron")
_S("breve")
_S("dotaccent")
_S("dieresis")
_S("ring")
_S("cedilla")
_S("hungarumlaut")
_S("ogonek")
_S("caron")
_S("emdash")
_S("AE")
_S("ordfeminine")
_S("Lslash")
_S("Oslash")
_S("OE")
_S("ordmasculine")
_S("ae")
_S("dotlessi")
_S("lslash")
_S("oslash")
_S("oe")
_S("germandbls")
_S("onesuperior")
_S("logicalnot")
_S("mu")
_S("trademark")
_S("Eth")
_S("onehalf")
_S("plusminus")
_S("Thorn")
_S("onequarter")
_S("divide")
_S("brokenbar")
_S("degree")
_S("thorn")
_S("threequarters")
_S("twosuperior")
_S("registered")
_S("minus")
_S("eth")
_S("multiply")
_S("threesuperior")
_S("copyright")
_S("Aacute")
_S("Acircumflex")
_S("Adieresis")
_S("Agrave")
_S("Aring")
_S("Atilde")
_S("Ccedilla")
_S("Eacute")
_S("Ecircumflex")
_S("Edieresis")
_S("Egrave")
_S("Iacute")
_S("Icircumflex")
_S("Idieresis")
_S("Igrave")
_S("Ntilde")
_S("Oacute")
_S("Ocircumflex")
_S("Odieresis")
_S("Ograve")
_S("Otilde")
_S("Scaron")
_S("Uacute")
_S("Ucircumflex")
_S("Udieresis")
_S("Ugrave")
_S("Yacute")
_S("Ydieresis")
_S("Zcaron")
_S("aacute")
_S("acircumflex")
_S("adieresis")
_S("agrave")
_S("aring")
_S("atilde")
_S("ccedilla")
_S("eacute")
_S("ecircumflex")
_S("edieresis")
_S("egrave")
_S("iacute")
_S("icircumflex")
_S("idieresis")
_S("igrave")
_S("ntilde")
_S("oacute")
_S("ocircumflex")
_S("odieresis")
_S("ograve")
_S("otilde")
_S("scaron")
_S("uacute")
_S("ucircumflex")
_S("udieresis")
_S("ugrave")
_S("yacute")
_S("ydieresis")
_S("zcaron")
_S("exclamsmall")
_S("Hungarumlautsmall")
_S("dollaroldstyle")
_S("dollarsuperior")
_S("ampersandsmall")
_S("Acutesmall")
_S("parenleftsuperior")
_S("parenrightsuperior")
_S("twodotenleader")
_S("onedotenleader")
_S("zerooldstyle")
_S("oneoldstyle")
_S("twooldstyle")
_S("threeoldstyle")
_S("fouroldstyle")
_S("fiveoldstyle")
_S("sixoldstyle")
_S("sevenoldstyle")
_S("eightoldstyle")
_S("nineoldstyle")
_S("commasuperior")
_S("threequartersemdash")
_S("periodsuperior")
_S("questionsmall")
_S("asuperior")
_S("bsuperior")
_S("centsuperior")
_S("dsuperior")
_S("esuperior")
_S("isuperior")
_S("lsuperior")
_S("msuperior")
_S("nsuperior")
_S("osuperior")
_S("rsuperior")
_S("ssuperior")
_S("tsuperior")
_S("ff")
_S("ffi")
_S("ffl")
_S("parenleftinferior")
_S("parenrightinferior")
_S("Circumflexsmall")
_S("hyphensuperior")
_S("Gravesmall")
_S("Asmall")
_S("Bsmall")
_S("Csmall")
_S("Dsmall")
_S("Esmall")
_S("Fsmall")
_S("Gsmall")
_S("Hsmall")
_S("Ismall")
_S("Jsmall")
_S("Ksmall")
_S("Lsmall")
_S("Msmall")
_S("Nsmall")
_S("Osmall")
_S("Psmall")
_S("Qsmall")
_S("Rsmall")
_S("Ssmall")
_S("Tsmall")
_S("Usmall")
_S("Vsmall")
_S("Wsmall")
_S("Xsmall")
_S("Ysmall")
_S("Zsmall")
_S("colonmonetary")
_S("onefitted")
_S("rupiah")
_S("Tildesmall")
_S("exclamdownsmall")
_S("centoldstyle")
_S("Lslashsmall")
_S("Scaronsmall")
_S("Zcaronsmall")
_S("Dieresissmall")
_S("Brevesmall")
_S("Caronsmall")
_S("Dotaccentsmall")
_S("Macronsmall")
_S("figuredash")
_S("hypheninferior")
_S("Ogoneksmall")
_S("Ringsmall")
_S("Cedillasmall")
_S("questiondownsmall")
_S("oneeighth")
_S("threeeighths")
_S("fiveeighths")
_S("seveneighths")
_S("onethird")
_S("twothirds")
_S("zerosuperior")
_S("foursuperior")
_S("fivesuperior")
_S("sixsuperior")
_S("sevensuperior")
_S("eightsuperior")
_S("ninesuperior")
_S("zeroinferior")
_S("oneinferior")
_S("twoinferior")
_S("threeinferior")
_S("fourinferior")
_S("fiveinferior")
_S("sixinferior")
_S("seveninferior")
_S("eightinferior")
_S("nineinferior")
_S("centinferior")
_S("dollarinferior")
_S("periodinferior")
_S("commainferior")
_S("Agravesmall")
_S("Aacutesmall")
_S("Acircumflexsmall")
_S("Atildesmall")
_S("Adieresissmall")
_S("Aringsmall")
_S("AEsmall")
_S("Ccedillasmall")
_S("Egravesmall")
_S("Eacutesmall")
_S("Ecircumflexsmall")
_S("Edieresissmall")
_S("Igravesmall")
_S("Iacutesmall")
_S("Icircumflexsmall")
_S("Idieresissmall")
_S("Ethsmall")
_S("Ntildesmall")
_S("Ogravesmall")
_S("Oacutesmall")
_S("Ocircumflexsmall")
_S("Otildesmall")
_S("Odieresissmall")
_S("OEsmall")
_S("Oslashsmall")
_S("Ugravesmall")
_S("Uacutesmall")
_S("Ucircumflexsmall")
_S("Udieresissmall")
_S("Yacutesmall")
_S("Thornsmall")
_S("Ydieresissmall")
_S("001.000")
_S("001.001")
_S("001.002")
_S("001.003")
_S("Black")
_S("Bold")
_S("Book")
_S("Light")
_S("Medium")
_S("Regular")
_S("Roman")
_S("Semibold")
HB_STR(".notdef")
HB_STR("space")
HB_STR("exclam")
HB_STR("quotedbl")
HB_STR("numbersign")
HB_STR("dollar")
HB_STR("percent")
HB_STR("ampersand")
HB_STR("quoteright")
HB_STR("parenleft")
HB_STR("parenright")
HB_STR("asterisk")
HB_STR("plus")
HB_STR("comma")
HB_STR("hyphen")
HB_STR("period")
HB_STR("slash")
HB_STR("zero")
HB_STR("one")
HB_STR("two")
HB_STR("three")
HB_STR("four")
HB_STR("five")
HB_STR("six")
HB_STR("seven")
HB_STR("eight")
HB_STR("nine")
HB_STR("colon")
HB_STR("semicolon")
HB_STR("less")
HB_STR("equal")
HB_STR("greater")
HB_STR("question")
HB_STR("at")
HB_STR("A")
HB_STR("B")
HB_STR("C")
HB_STR("D")
HB_STR("E")
HB_STR("F")
HB_STR("G")
HB_STR("H")
HB_STR("I")
HB_STR("J")
HB_STR("K")
HB_STR("L")
HB_STR("M")
HB_STR("N")
HB_STR("O")
HB_STR("P")
HB_STR("Q")
HB_STR("R")
HB_STR("S")
HB_STR("T")
HB_STR("U")
HB_STR("V")
HB_STR("W")
HB_STR("X")
HB_STR("Y")
HB_STR("Z")
HB_STR("bracketleft")
HB_STR("backslash")
HB_STR("bracketright")
HB_STR("asciicircum")
HB_STR("underscore")
HB_STR("quoteleft")
HB_STR("a")
HB_STR("b")
HB_STR("c")
HB_STR("d")
HB_STR("e")
HB_STR("f")
HB_STR("g")
HB_STR("h")
HB_STR("i")
HB_STR("j")
HB_STR("k")
HB_STR("l")
HB_STR("m")
HB_STR("n")
HB_STR("o")
HB_STR("p")
HB_STR("q")
HB_STR("r")
HB_STR("s")
HB_STR("t")
HB_STR("u")
HB_STR("v")
HB_STR("w")
HB_STR("x")
HB_STR("y")
HB_STR("z")
HB_STR("braceleft")
HB_STR("bar")
HB_STR("braceright")
HB_STR("asciitilde")
HB_STR("exclamdown")
HB_STR("cent")
HB_STR("sterling")
HB_STR("fraction")
HB_STR("yen")
HB_STR("florin")
HB_STR("section")
HB_STR("currency")
HB_STR("quotesingle")
HB_STR("quotedblleft")
HB_STR("guillemotleft")
HB_STR("guilsinglleft")
HB_STR("guilsinglright")
HB_STR("fi")
HB_STR("fl")
HB_STR("endash")
HB_STR("dagger")
HB_STR("daggerdbl")
HB_STR("periodcentered")
HB_STR("paragraph")
HB_STR("bullet")
HB_STR("quotesinglbase")
HB_STR("quotedblbase")
HB_STR("quotedblright")
HB_STR("guillemotright")
HB_STR("ellipsis")
HB_STR("perthousand")
HB_STR("questiondown")
HB_STR("grave")
HB_STR("acute")
HB_STR("circumflex")
HB_STR("tilde")
HB_STR("macron")
HB_STR("breve")
HB_STR("dotaccent")
HB_STR("dieresis")
HB_STR("ring")
HB_STR("cedilla")
HB_STR("hungarumlaut")
HB_STR("ogonek")
HB_STR("caron")
HB_STR("emdash")
HB_STR("AE")
HB_STR("ordfeminine")
HB_STR("Lslash")
HB_STR("Oslash")
HB_STR("OE")
HB_STR("ordmasculine")
HB_STR("ae")
HB_STR("dotlessi")
HB_STR("lslash")
HB_STR("oslash")
HB_STR("oe")
HB_STR("germandbls")
HB_STR("onesuperior")
HB_STR("logicalnot")
HB_STR("mu")
HB_STR("trademark")
HB_STR("Eth")
HB_STR("onehalf")
HB_STR("plusminus")
HB_STR("Thorn")
HB_STR("onequarter")
HB_STR("divide")
HB_STR("brokenbar")
HB_STR("degree")
HB_STR("thorn")
HB_STR("threequarters")
HB_STR("twosuperior")
HB_STR("registered")
HB_STR("minus")
HB_STR("eth")
HB_STR("multiply")
HB_STR("threesuperior")
HB_STR("copyright")
HB_STR("Aacute")
HB_STR("Acircumflex")
HB_STR("Adieresis")
HB_STR("Agrave")
HB_STR("Aring")
HB_STR("Atilde")
HB_STR("Ccedilla")
HB_STR("Eacute")
HB_STR("Ecircumflex")
HB_STR("Edieresis")
HB_STR("Egrave")
HB_STR("Iacute")
HB_STR("Icircumflex")
HB_STR("Idieresis")
HB_STR("Igrave")
HB_STR("Ntilde")
HB_STR("Oacute")
HB_STR("Ocircumflex")
HB_STR("Odieresis")
HB_STR("Ograve")
HB_STR("Otilde")
HB_STR("Scaron")
HB_STR("Uacute")
HB_STR("Ucircumflex")
HB_STR("Udieresis")
HB_STR("Ugrave")
HB_STR("Yacute")
HB_STR("Ydieresis")
HB_STR("Zcaron")
HB_STR("aacute")
HB_STR("acircumflex")
HB_STR("adieresis")
HB_STR("agrave")
HB_STR("aring")
HB_STR("atilde")
HB_STR("ccedilla")
HB_STR("eacute")
HB_STR("ecircumflex")
HB_STR("edieresis")
HB_STR("egrave")
HB_STR("iacute")
HB_STR("icircumflex")
HB_STR("idieresis")
HB_STR("igrave")
HB_STR("ntilde")
HB_STR("oacute")
HB_STR("ocircumflex")
HB_STR("odieresis")
HB_STR("ograve")
HB_STR("otilde")
HB_STR("scaron")
HB_STR("uacute")
HB_STR("ucircumflex")
HB_STR("udieresis")
HB_STR("ugrave")
HB_STR("yacute")
HB_STR("ydieresis")
HB_STR("zcaron")
HB_STR("exclamsmall")
HB_STR("Hungarumlautsmall")
HB_STR("dollaroldstyle")
HB_STR("dollarsuperior")
HB_STR("ampersandsmall")
HB_STR("Acutesmall")
HB_STR("parenleftsuperior")
HB_STR("parenrightsuperior")
HB_STR("twodotenleader")
HB_STR("onedotenleader")
HB_STR("zerooldstyle")
HB_STR("oneoldstyle")
HB_STR("twooldstyle")
HB_STR("threeoldstyle")
HB_STR("fouroldstyle")
HB_STR("fiveoldstyle")
HB_STR("sixoldstyle")
HB_STR("sevenoldstyle")
HB_STR("eightoldstyle")
HB_STR("nineoldstyle")
HB_STR("commasuperior")
HB_STR("threequartersemdash")
HB_STR("periodsuperior")
HB_STR("questionsmall")
HB_STR("asuperior")
HB_STR("bsuperior")
HB_STR("centsuperior")
HB_STR("dsuperior")
HB_STR("esuperior")
HB_STR("isuperior")
HB_STR("lsuperior")
HB_STR("msuperior")
HB_STR("nsuperior")
HB_STR("osuperior")
HB_STR("rsuperior")
HB_STR("ssuperior")
HB_STR("tsuperior")
HB_STR("ff")
HB_STR("ffi")
HB_STR("ffl")
HB_STR("parenleftinferior")
HB_STR("parenrightinferior")
HB_STR("Circumflexsmall")
HB_STR("hyphensuperior")
HB_STR("Gravesmall")
HB_STR("Asmall")
HB_STR("Bsmall")
HB_STR("Csmall")
HB_STR("Dsmall")
HB_STR("Esmall")
HB_STR("Fsmall")
HB_STR("Gsmall")
HB_STR("Hsmall")
HB_STR("Ismall")
HB_STR("Jsmall")
HB_STR("Ksmall")
HB_STR("Lsmall")
HB_STR("Msmall")
HB_STR("Nsmall")
HB_STR("Osmall")
HB_STR("Psmall")
HB_STR("Qsmall")
HB_STR("Rsmall")
HB_STR("Ssmall")
HB_STR("Tsmall")
HB_STR("Usmall")
HB_STR("Vsmall")
HB_STR("Wsmall")
HB_STR("Xsmall")
HB_STR("Ysmall")
HB_STR("Zsmall")
HB_STR("colonmonetary")
HB_STR("onefitted")
HB_STR("rupiah")
HB_STR("Tildesmall")
HB_STR("exclamdownsmall")
HB_STR("centoldstyle")
HB_STR("Lslashsmall")
HB_STR("Scaronsmall")
HB_STR("Zcaronsmall")
HB_STR("Dieresissmall")
HB_STR("Brevesmall")
HB_STR("Caronsmall")
HB_STR("Dotaccentsmall")
HB_STR("Macronsmall")
HB_STR("figuredash")
HB_STR("hypheninferior")
HB_STR("Ogoneksmall")
HB_STR("Ringsmall")
HB_STR("Cedillasmall")
HB_STR("questiondownsmall")
HB_STR("oneeighth")
HB_STR("threeeighths")
HB_STR("fiveeighths")
HB_STR("seveneighths")
HB_STR("onethird")
HB_STR("twothirds")
HB_STR("zerosuperior")
HB_STR("foursuperior")
HB_STR("fivesuperior")
HB_STR("sixsuperior")
HB_STR("sevensuperior")
HB_STR("eightsuperior")
HB_STR("ninesuperior")
HB_STR("zeroinferior")
HB_STR("oneinferior")
HB_STR("twoinferior")
HB_STR("threeinferior")
HB_STR("fourinferior")
HB_STR("fiveinferior")
HB_STR("sixinferior")
HB_STR("seveninferior")
HB_STR("eightinferior")
HB_STR("nineinferior")
HB_STR("centinferior")
HB_STR("dollarinferior")
HB_STR("periodinferior")
HB_STR("commainferior")
HB_STR("Agravesmall")
HB_STR("Aacutesmall")
HB_STR("Acircumflexsmall")
HB_STR("Atildesmall")
HB_STR("Adieresissmall")
HB_STR("Aringsmall")
HB_STR("AEsmall")
HB_STR("Ccedillasmall")
HB_STR("Egravesmall")
HB_STR("Eacutesmall")
HB_STR("Ecircumflexsmall")
HB_STR("Edieresissmall")
HB_STR("Igravesmall")
HB_STR("Iacutesmall")
HB_STR("Icircumflexsmall")
HB_STR("Idieresissmall")
HB_STR("Ethsmall")
HB_STR("Ntildesmall")
HB_STR("Ogravesmall")
HB_STR("Oacutesmall")
HB_STR("Ocircumflexsmall")
HB_STR("Otildesmall")
HB_STR("Odieresissmall")
HB_STR("OEsmall")
HB_STR("Oslashsmall")
HB_STR("Ugravesmall")
HB_STR("Uacutesmall")
HB_STR("Ucircumflexsmall")
HB_STR("Udieresissmall")
HB_STR("Yacutesmall")
HB_STR("Thornsmall")
HB_STR("Ydieresissmall")
HB_STR("001.000")
HB_STR("001.001")
HB_STR("001.002")
HB_STR("001.003")
HB_STR("Black")
HB_STR("Bold")
HB_STR("Book")
HB_STR("Light")
HB_STR("Medium")
HB_STR("Regular")
HB_STR("Roman")
HB_STR("Semibold")
#endif /* HB_OT_CFF1_STD_STR_HH */

View File

@ -326,7 +326,7 @@ struct Charset0
void collect_glyph_to_sid_map (glyph_to_sid_map_t *mapping, unsigned int num_glyphs) const
{
mapping->resize (num_glyphs, false);
mapping->resize_dirty (num_glyphs);
for (hb_codepoint_t gid = 1; gid < num_glyphs; gid++)
mapping->arrayZ[gid] = {sids[gid - 1], gid};
}
@ -426,7 +426,7 @@ struct Charset1_2 {
void collect_glyph_to_sid_map (glyph_to_sid_map_t *mapping, unsigned int num_glyphs) const
{
mapping->resize (num_glyphs, false);
mapping->resize_dirty (num_glyphs);
hb_codepoint_t gid = 1;
if (gid >= num_glyphs)
return;

View File

@ -202,7 +202,11 @@ struct cff2_cs_opset_path_t : cff2_cs_opset_t<cff2_cs_opset_path_t, cff2_path_pa
bool OT::cff2::accelerator_t::get_path (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session) const
{
return get_path_at (font, glyph, draw_session, hb_array (font->coords, font->num_coords));
return get_path_at (font,
glyph,
draw_session,
hb_array (font->coords,
font->has_nonzero_coords ? font->num_coords : 0));
}
bool OT::cff2::accelerator_t::get_path_at (hb_font_t *font, hb_codepoint_t glyph, hb_draw_session_t &draw_session, hb_array_t<const int> coords) const

View File

@ -501,10 +501,6 @@ struct CmapSubtableFormat4
this->length = c->length () - table_initpos;
if ((long long) this->length != (long long) c->length () - table_initpos)
{
// Length overflowed. Discard the current object before setting the error condition, otherwise
// discard is a noop which prevents the higher level code from reverting the serializer to the
// pre-error state in cmap4 overflow handling code.
c->pop_discard ();
c->err (HB_SERIALIZE_ERROR_INT_OVERFLOW);
return;
}
@ -701,16 +697,7 @@ struct CmapSubtableFormat4
hb_barrier ();
if (unlikely (!c->check_range (this, length)))
{
/* Some broken fonts have too long of a "length" value.
* If that is the case, just change the value to truncate
* the subtable at the end of the blob. */
uint16_t new_length = (uint16_t) hb_min ((uintptr_t) 65535,
(uintptr_t) (c->end -
(char *) this));
if (!c->try_set (&length, new_length))
return_trace (false);
}
return_trace (false);
return_trace (16 + 4 * (unsigned int) segCountX2 <= length);
}
@ -1500,7 +1487,7 @@ struct CmapSubtable
bool get_glyph (hb_codepoint_t codepoint,
hb_codepoint_t *glyph) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return u.format0 .get_glyph (codepoint, glyph);
case 4: hb_barrier (); return u.format4 .get_glyph (codepoint, glyph);
case 6: hb_barrier (); return u.format6 .get_glyph (codepoint, glyph);
@ -1513,7 +1500,7 @@ struct CmapSubtable
}
void collect_unicodes (hb_set_t *out, unsigned int num_glyphs = UINT_MAX) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); u.format0 .collect_unicodes (out); return;
case 4: hb_barrier (); u.format4 .collect_unicodes (out); return;
case 6: hb_barrier (); u.format6 .collect_unicodes (out); return;
@ -1529,7 +1516,7 @@ struct CmapSubtable
hb_map_t *mapping, /* OUT */
unsigned num_glyphs = UINT_MAX) const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); u.format0 .collect_mapping (unicodes, mapping); return;
case 4: hb_barrier (); u.format4 .collect_mapping (unicodes, mapping); return;
case 6: hb_barrier (); u.format6 .collect_mapping (unicodes, mapping); return;
@ -1543,7 +1530,7 @@ struct CmapSubtable
unsigned get_language () const
{
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return u.format0 .get_language ();
case 4: hb_barrier (); return u.format4 .get_language ();
case 6: hb_barrier (); return u.format6 .get_language ();
@ -1574,9 +1561,9 @@ struct CmapSubtable
bool sanitize (hb_sanitize_context_t *c) const
{
TRACE_SANITIZE (this);
if (!u.format.sanitize (c)) return_trace (false);
if (!u.format.v.sanitize (c)) return_trace (false);
hb_barrier ();
switch (u.format) {
switch (u.format.v) {
case 0: hb_barrier (); return_trace (u.format0 .sanitize (c));
case 4: hb_barrier (); return_trace (u.format4 .sanitize (c));
case 6: hb_barrier (); return_trace (u.format6 .sanitize (c));
@ -1590,7 +1577,7 @@ struct CmapSubtable
public:
union {
HBUINT16 format; /* Format identifier */
struct { HBUINT16 v; } format; /* Format identifier */
CmapSubtableFormat0 format0;
CmapSubtableFormat4 format4;
CmapSubtableFormat6 format6;
@ -1600,7 +1587,7 @@ struct CmapSubtable
CmapSubtableFormat14 format14;
} u;
public:
DEFINE_SIZE_UNION (2, format);
DEFINE_SIZE_UNION (2, format.v);
};
@ -1646,7 +1633,7 @@ struct EncodingRecord
CmapSubtable *cmapsubtable = c->push<CmapSubtable> ();
unsigned origin_length = c->length ();
cmapsubtable->serialize (c, it, format, plan, &(base+subtable));
if (c->length () - origin_length > 0) *objidx = c->pop_pack ();
if (c->length () - origin_length > 0 && !c->in_error()) *objidx = c->pop_pack ();
else c->pop_discard ();
}
@ -1683,6 +1670,10 @@ struct SubtableUnicodesCache {
{
SubtableUnicodesCache* cache =
(SubtableUnicodesCache*) hb_malloc (sizeof(SubtableUnicodesCache));
if (unlikely (!cache))
return nullptr;
new (cache) SubtableUnicodesCache (source_table);
return cache;
}
@ -1776,6 +1767,10 @@ struct cmap
;
SubtableUnicodesCache* cache = SubtableUnicodesCache::create(source_table);
if (unlikely (!cache))
return nullptr;
for (const EncodingRecord& _ : it)
cache->set_for(&_); // populate the cache for this encoding record.
@ -1810,7 +1805,7 @@ struct cmap
if (c->in_error ())
return false;
unsigned format = (base+_.subtable).u.format;
unsigned format = (base+_.subtable).u.format.v;
if (format != 4 && format != 12 && format != 14) continue;
const hb_set_t* unicodes_set = unicodes_cache->set_for (&_, local_unicodes_cache);
@ -1912,7 +1907,7 @@ struct cmap
+ hb_iter (encodingRecord)
| hb_map (&EncodingRecord::subtable)
| hb_map (hb_add (this))
| hb_filter ([&] (const CmapSubtable& _) { return _.u.format == 14; })
| hb_filter ([&] (const CmapSubtable& _) { return _.u.format.v == 14; })
| hb_apply ([=] (const CmapSubtable& _) { _.u.format14.closure_glyphs (unicodes, glyphset); })
;
}
@ -1937,7 +1932,7 @@ struct cmap
for (const EncodingRecord& _ : encodingrec_iter)
{
unsigned format = (this + _.subtable).u.format;
unsigned format = (this + _.subtable).u.format.v;
if (format == 12) has_format12 = true;
const EncodingRecord *table = std::addressof (_);
@ -2025,7 +2020,7 @@ struct cmap
this->subtable_uvs = &Null (CmapSubtableFormat14);
{
const CmapSubtable *st = table->find_subtable (0, 5);
if (st && st->u.format == 14)
if (st && st->u.format.v == 14)
subtable_uvs = &st->u.format14;
}
@ -2069,7 +2064,7 @@ struct cmap
else
#endif
{
switch (subtable->u.format) {
switch (subtable->u.format.v) {
/* Accelerate format 4 and format 12. */
default:
this->get_glyph_funcZ = get_glyph_from<CmapSubtable>;
@ -2276,7 +2271,7 @@ struct cmap
(_.platformID == 0 && _.encodingID == 4) ||
(_.platformID == 3 && _.encodingID == 1) ||
(_.platformID == 3 && _.encodingID == 10) ||
(cmap + _.subtable).u.format == 14;
(cmap + _.subtable).u.format.v == 14;
}
protected:

View File

@ -37,6 +37,7 @@
#include "hb-ot-cmap-table.hh"
#include "hb-ot-glyf-table.hh"
#include "hb-ot-var-gvar-table.hh"
#include "hb-ot-cff2-table.hh"
#include "hb-ot-cff1-table.hh"
#include "hb-ot-hmtx-table.hh"
@ -64,18 +65,22 @@
using hb_ot_font_advance_cache_t = hb_cache_t<24, 16>;
static_assert (sizeof (hb_ot_font_advance_cache_t) == 1024, "");
using hb_ot_font_origin_cache_t = hb_cache_t<20, 20>;
static_assert (sizeof (hb_ot_font_origin_cache_t) == 1024, "");
struct hb_ot_font_t
{
const hb_ot_face_t *ot_face;
/* h_advance caching */
mutable hb_atomic_t<int> cached_serial;
mutable hb_atomic_t<int> cached_coords_serial;
struct advance_cache_t
struct direction_cache_t
{
mutable hb_atomic_t<hb_ot_font_advance_cache_t *> advance_cache;
mutable hb_atomic_t<OT::ItemVariationStore::cache_t *> varStore_cache;
mutable hb_atomic_t<OT::hb_scalar_cache_t *> varStore_cache;
~advance_cache_t ()
~direction_cache_t ()
{
clear ();
}
@ -116,7 +121,7 @@ struct hb_ot_font_t
goto retry;
}
OT::ItemVariationStore::cache_t *acquire_varStore_cache (const OT::ItemVariationStore &varStore) const
OT::hb_scalar_cache_t *acquire_varStore_cache (const OT::ItemVariationStore &varStore) const
{
retry:
auto *cache = varStore_cache.get_acquire ();
@ -127,7 +132,7 @@ struct hb_ot_font_t
else
goto retry;
}
void release_varStore_cache (OT::ItemVariationStore::cache_t *cache) const
void release_varStore_cache (OT::hb_scalar_cache_t *cache) const
{
if (!cache)
return;
@ -154,17 +159,157 @@ struct hb_ot_font_t
} h, v;
struct origin_cache_t
{
mutable hb_atomic_t<hb_ot_font_origin_cache_t *> origin_cache;
mutable hb_atomic_t<OT::hb_scalar_cache_t *> varStore_cache;
~origin_cache_t ()
{
clear ();
}
hb_ot_font_origin_cache_t *acquire_origin_cache () const
{
retry:
auto *cache = origin_cache.get_acquire ();
if (!cache)
{
cache = (hb_ot_font_origin_cache_t *) hb_malloc (sizeof (hb_ot_font_origin_cache_t));
if (!cache)
return nullptr;
new (cache) hb_ot_font_origin_cache_t;
return cache;
}
if (origin_cache.cmpexch (cache, nullptr))
return cache;
else
goto retry;
}
void release_origin_cache (hb_ot_font_origin_cache_t *cache) const
{
if (!cache)
return;
if (!origin_cache.cmpexch (nullptr, cache))
hb_free (cache);
}
void clear_origin_cache () const
{
retry:
auto *cache = origin_cache.get_acquire ();
if (!cache)
return;
if (origin_cache.cmpexch (cache, nullptr))
hb_free (cache);
else
goto retry;
}
OT::hb_scalar_cache_t *acquire_varStore_cache (const OT::ItemVariationStore &varStore) const
{
retry:
auto *cache = varStore_cache.get_acquire ();
if (!cache)
return varStore.create_cache ();
if (varStore_cache.cmpexch (cache, nullptr))
return cache;
else
goto retry;
}
void release_varStore_cache (OT::hb_scalar_cache_t *cache) const
{
if (!cache)
return;
if (!varStore_cache.cmpexch (nullptr, cache))
OT::ItemVariationStore::destroy_cache (cache);
}
void clear_varStore_cache () const
{
retry:
auto *cache = varStore_cache.get_acquire ();
if (!cache)
return;
if (varStore_cache.cmpexch (cache, nullptr))
OT::ItemVariationStore::destroy_cache (cache);
else
goto retry;
}
void clear () const
{
clear_origin_cache ();
clear_varStore_cache ();
}
} v_origin;
struct draw_cache_t
{
mutable hb_atomic_t<OT::hb_scalar_cache_t *> gvar_cache;
~draw_cache_t ()
{
clear ();
}
OT::hb_scalar_cache_t *acquire_gvar_cache (const OT::gvar_accelerator_t &gvar) const
{
retry:
auto *cache = gvar_cache.get_acquire ();
if (!cache)
return gvar.create_cache ();
if (gvar_cache.cmpexch (cache, nullptr))
return cache;
else
goto retry;
}
void release_gvar_cache (OT::hb_scalar_cache_t *cache) const
{
if (!cache)
return;
if (!gvar_cache.cmpexch (nullptr, cache))
OT::gvar_accelerator_t::destroy_cache (cache);
}
void clear_gvar_cache () const
{
retry:
auto *cache = gvar_cache.get_acquire ();
if (!cache)
return;
if (gvar_cache.cmpexch (cache, nullptr))
OT::gvar_accelerator_t::destroy_cache (cache);
else
goto retry;
}
void clear () const
{
clear_gvar_cache ();
}
} draw;
void check_serial (hb_font_t *font) const
{
int font_serial = font->serial_coords.get_acquire ();
if (cached_serial.get_acquire () != font_serial)
{
/* These caches are dependent on scale and synthetic settings.
* Any change to the font invalidates them. */
v_origin.clear ();
if (cached_coords_serial.get_acquire () == font_serial)
return;
cached_serial.set_release (font_serial);
}
h.clear ();
v.clear ();
int font_serial_coords = font->serial_coords.get_acquire ();
if (cached_coords_serial.get_acquire () != font_serial_coords)
{
/* These caches are independent of scale or synthetic settings.
* Just variation changes will invalidate them. */
h.clear ();
v.clear ();
draw.clear ();
cached_coords_serial.set_release (font_serial);
cached_coords_serial.set_release (font_serial_coords);
}
}
};
@ -242,37 +387,59 @@ hb_ot_get_glyph_h_advances (hb_font_t* font, void* font_data,
unsigned advance_stride,
void *user_data HB_UNUSED)
{
// Duplicated in v_advances. Ugly. Keep in sync'ish.
const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data;
const hb_ot_face_t *ot_face = ot_font->ot_face;
const OT::hmtx_accelerator_t &hmtx = *ot_face->hmtx;
ot_font->check_serial (font);
const OT::HVAR &HVAR = *hmtx.var_table;
const OT::ItemVariationStore &varStore = &HVAR + HVAR.varStore;
OT::ItemVariationStore::cache_t *varStore_cache = ot_font->h.acquire_varStore_cache (varStore);
hb_ot_font_advance_cache_t *advance_cache = nullptr;
bool use_cache = font->num_coords;
if (use_cache)
{
advance_cache = ot_font->h.acquire_advance_cache ();
if (!advance_cache)
use_cache = false;
}
if (!use_cache)
if (unlikely (!hmtx.has_data ()))
{
hb_position_t advance = font->face->get_upem () / 2;
advance = font->em_scale_x (advance);
for (unsigned int i = 0; i < count; i++)
{
*first_advance = font->em_scale_x (hmtx.get_advance_with_var_unscaled (*first_glyph, font, varStore_cache));
*first_advance = advance;
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
return;
}
#ifndef HB_NO_VAR
if (!font->has_nonzero_coords)
{
fallback:
#else
{
#endif
// Just plain htmx data. No need to cache.
for (unsigned int i = 0; i < count; i++)
{
*first_advance = font->em_scale_x (hmtx.get_advance_without_var_unscaled (*first_glyph));
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
return;
}
else
{ /* Use cache. */
#ifndef HB_NO_VAR
/* has_nonzero_coords. */
ot_font->check_serial (font);
hb_ot_font_advance_cache_t *advance_cache = ot_font->h.acquire_advance_cache ();
if (!advance_cache)
{
// malloc failure. Just use the fallback non-variable path.
goto fallback;
}
/* If HVAR is present, use it.*/
const OT::HVAR &HVAR = *hmtx.var_table;
if (HVAR.has_data ())
{
const OT::ItemVariationStore &varStore = &HVAR + HVAR.varStore;
OT::hb_scalar_cache_t *varStore_cache = ot_font->h.acquire_varStore_cache (varStore);
for (unsigned int i = 0; i < count; i++)
{
hb_position_t v;
@ -289,10 +456,49 @@ hb_ot_get_glyph_h_advances (hb_font_t* font, void* font_data,
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
ot_font->h.release_varStore_cache (varStore_cache);
ot_font->h.release_advance_cache (advance_cache);
return;
}
ot_font->h.release_varStore_cache (varStore_cache);
const auto &gvar = *ot_face->gvar;
if (gvar.has_data ())
{
const auto &glyf = *ot_face->glyf;
auto *scratch = glyf.acquire_scratch ();
if (unlikely (!scratch))
{
ot_font->h.release_advance_cache (advance_cache);
goto fallback;
}
OT::hb_scalar_cache_t *gvar_cache = ot_font->draw.acquire_gvar_cache (gvar);
for (unsigned int i = 0; i < count; i++)
{
hb_position_t v;
unsigned cv;
if (advance_cache->get (*first_glyph, &cv))
v = cv;
else
{
v = glyf.get_advance_with_var_unscaled (*first_glyph, font, false, *scratch, gvar_cache);
advance_cache->set (*first_glyph, v);
}
*first_advance = font->em_scale_x (v);
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
ot_font->draw.release_gvar_cache (gvar_cache);
glyf.release_scratch (scratch);
ot_font->h.release_advance_cache (advance_cache);
return;
}
ot_font->h.release_advance_cache (advance_cache);
// No HVAR or GVAR. Just use the fallback non-variable path.
goto fallback;
#endif
}
#ifndef HB_NO_VERTICAL
@ -305,99 +511,290 @@ hb_ot_get_glyph_v_advances (hb_font_t* font, void* font_data,
unsigned advance_stride,
void *user_data HB_UNUSED)
{
// Duplicated from h_advances. Ugly. Keep in sync'ish.
const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data;
const hb_ot_face_t *ot_face = ot_font->ot_face;
const OT::vmtx_accelerator_t &vmtx = *ot_face->vmtx;
if (vmtx.has_data ())
if (unlikely (!vmtx.has_data ()))
{
hb_font_extents_t font_extents;
font->get_h_extents_with_fallback (&font_extents);
hb_position_t advance = font_extents.descender - font_extents.ascender;
for (unsigned int i = 0; i < count; i++)
{
*first_advance = advance;
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
return;
}
#ifndef HB_NO_VAR
if (!font->has_nonzero_coords)
{
fallback:
#else
{
#endif
// Just plain vtmx data. No need to cache.
for (unsigned int i = 0; i < count; i++)
{
*first_advance = font->em_scale_y (- (int) vmtx.get_advance_without_var_unscaled (*first_glyph));
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
return;
}
#ifndef HB_NO_VAR
/* has_nonzero_coords. */
ot_font->check_serial (font);
hb_ot_font_advance_cache_t *advance_cache = ot_font->v.acquire_advance_cache ();
if (!advance_cache)
{
// malloc failure. Just use the fallback non-variable path.
goto fallback;
}
/* If VVAR is present, use it.*/
const OT::VVAR &VVAR = *vmtx.var_table;
if (VVAR.has_data ())
{
ot_font->check_serial (font);
const OT::VVAR &VVAR = *vmtx.var_table;
const OT::ItemVariationStore &varStore = &VVAR + VVAR.varStore;
OT::ItemVariationStore::cache_t *varStore_cache = ot_font->v.acquire_varStore_cache (varStore);
// TODO Use advance_cache.
OT::hb_scalar_cache_t *varStore_cache = ot_font->v.acquire_varStore_cache (varStore);
for (unsigned int i = 0; i < count; i++)
{
*first_advance = font->em_scale_y (-(int) vmtx.get_advance_with_var_unscaled (*first_glyph, font, varStore_cache));
hb_position_t v;
unsigned cv;
if (advance_cache->get (*first_glyph, &cv))
v = cv;
else
{
v = vmtx.get_advance_with_var_unscaled (*first_glyph, font, varStore_cache);
advance_cache->set (*first_glyph, v);
}
*first_advance = font->em_scale_y (- (int) v);
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
ot_font->v.release_varStore_cache (varStore_cache);
ot_font->v.release_advance_cache (advance_cache);
return;
}
else
const auto &gvar = *ot_face->gvar;
if (gvar.has_data ())
{
hb_font_extents_t font_extents;
font->get_h_extents_with_fallback (&font_extents);
hb_position_t advance = -(font_extents.ascender - font_extents.descender);
const auto &glyf = *ot_face->glyf;
auto *scratch = glyf.acquire_scratch ();
if (unlikely (!scratch))
{
ot_font->v.release_advance_cache (advance_cache);
goto fallback;
}
OT::hb_scalar_cache_t *gvar_cache = ot_font->draw.acquire_gvar_cache (gvar);
for (unsigned int i = 0; i < count; i++)
{
*first_advance = advance;
hb_position_t v;
unsigned cv;
if (advance_cache->get (*first_glyph, &cv))
v = cv;
else
{
v = glyf.get_advance_with_var_unscaled (*first_glyph, font, true, *scratch, gvar_cache);
advance_cache->set (*first_glyph, v);
}
*first_advance = font->em_scale_y (- (int) v);
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_advance = &StructAtOffsetUnaligned<hb_position_t> (first_advance, advance_stride);
}
ot_font->draw.release_gvar_cache (gvar_cache);
glyf.release_scratch (scratch);
ot_font->v.release_advance_cache (advance_cache);
return;
}
ot_font->v.release_advance_cache (advance_cache);
// No VVAR or GVAR. Just use the fallback non-variable path.
goto fallback;
#endif
}
#endif
#ifndef HB_NO_VERTICAL
HB_HOT
static hb_bool_t
hb_ot_get_glyph_v_origin (hb_font_t *font,
void *font_data,
hb_codepoint_t glyph,
hb_position_t *x,
hb_position_t *y,
void *user_data HB_UNUSED)
hb_ot_get_glyph_v_origins (hb_font_t *font,
void *font_data,
unsigned int count,
const hb_codepoint_t *first_glyph,
unsigned glyph_stride,
hb_position_t *first_x,
unsigned x_stride,
hb_position_t *first_y,
unsigned y_stride,
void *user_data HB_UNUSED)
{
const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data;
const hb_ot_face_t *ot_face = ot_font->ot_face;
*x = font->get_glyph_h_advance (glyph) / 2;
const OT::VORG &VORG = *ot_face->VORG;
if (VORG.has_data ())
/* First, set all the x values to half the advance width. */
font->get_glyph_h_advances (count,
first_glyph, glyph_stride,
first_x, x_stride);
for (unsigned i = 0; i < count; i++)
{
float delta = 0;
*first_x /= 2;
first_x = &StructAtOffsetUnaligned<hb_position_t> (first_x, x_stride);
}
/* The vertical origin business is messy...
*
* We allocate the cache, then have various code paths that use the cache.
* Each one is responsible to free it before returning.
*/
hb_ot_font_origin_cache_t *origin_cache = ot_font->v_origin.acquire_origin_cache ();
/* If there is VORG, always use it. It uses VVAR for variations if necessary. */
const OT::VORG &VORG = *ot_face->VORG;
if (origin_cache && VORG.has_data ())
{
#ifndef HB_NO_VAR
const OT::vmtx_accelerator_t &vmtx = *ot_face->vmtx;
const OT::VVAR &VVAR = *vmtx.var_table;
if (font->num_coords)
VVAR.get_vorg_delta_unscaled (glyph,
font->coords, font->num_coords,
&delta);
if (!font->has_nonzero_coords)
#endif
{
for (unsigned i = 0; i < count; i++)
{
hb_position_t origin;
unsigned cv;
if (origin_cache->get (*first_glyph, &cv))
origin = font->y_scale < 0 ? -static_cast<hb_position_t>(cv) : static_cast<hb_position_t>(cv);
else
{
origin = font->em_scalef_y (VORG.get_y_origin (*first_glyph));
origin_cache->set (*first_glyph, font->y_scale < 0 ? -origin : origin);
}
*y = font->em_scalef_y (VORG.get_y_origin (glyph) + delta);
*first_y = origin;
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
#ifndef HB_NO_VAR
else
{
const OT::VVAR &VVAR = *ot_face->vmtx->var_table;
const auto &varStore = &VVAR + VVAR.varStore;
auto *varStore_cache = ot_font->v_origin.acquire_varStore_cache (varStore);
for (unsigned i = 0; i < count; i++)
{
hb_position_t origin;
unsigned cv;
if (origin_cache->get (*first_glyph, &cv))
origin = font->y_scale < 0 ? -static_cast<hb_position_t>(cv) : static_cast<hb_position_t>(cv);
else
{
origin = font->em_scalef_y (VORG.get_y_origin (*first_glyph) +
VVAR.get_vorg_delta_unscaled (*first_glyph,
font->coords, font->num_coords,
varStore_cache));
origin_cache->set (*first_glyph, font->y_scale < 0 ? -origin : origin);
}
*first_y = origin;
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
ot_font->v_origin.release_varStore_cache (varStore_cache);
}
#endif
ot_font->v_origin.release_origin_cache (origin_cache);
return true;
}
hb_glyph_extents_t extents = {0};
if (hb_font_get_glyph_extents (font, glyph, &extents))
/* If and only if `vmtx` is present and it's a `glyf` font,
* we use the top phantom point, deduced from vmtx,glyf[,gvar]. */
const auto &vmtx = *ot_face->vmtx;
const auto &glyf = *ot_face->glyf;
if (origin_cache && vmtx.has_data() && glyf.has_data ())
{
const OT::vmtx_accelerator_t &vmtx = *ot_face->vmtx;
int tsb = 0;
if (vmtx.get_leading_bearing_with_var_unscaled (font, glyph, &tsb))
auto *scratch = glyf.acquire_scratch ();
if (unlikely (!scratch))
{
*y = extents.y_bearing + font->em_scale_y (tsb);
return true;
ot_font->v_origin.release_origin_cache (origin_cache);
return false;
}
OT::hb_scalar_cache_t *gvar_cache = font->has_nonzero_coords ?
ot_font->draw.acquire_gvar_cache (*ot_face->gvar) :
nullptr;
for (unsigned i = 0; i < count; i++)
{
hb_position_t origin;
unsigned cv;
if (origin_cache->get (*first_glyph, &cv))
origin = font->y_scale < 0 ? -static_cast<hb_position_t>(cv) : static_cast<hb_position_t>(cv);
else
{
origin = font->em_scalef_y (glyf.get_v_origin_with_var_unscaled (*first_glyph, font, *scratch, gvar_cache));
origin_cache->set (*first_glyph, font->y_scale < 0 ? -origin : origin);
}
*first_y = origin;
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
hb_font_extents_t font_extents;
font->get_h_extents_with_fallback (&font_extents);
hb_position_t advance = font_extents.ascender - font_extents.descender;
hb_position_t diff = advance - -extents.height;
*y = extents.y_bearing + (diff >> 1);
if (gvar_cache)
ot_font->draw.release_gvar_cache (gvar_cache);
glyf.release_scratch (scratch);
ot_font->v_origin.release_origin_cache (origin_cache);
return true;
}
hb_font_extents_t font_extents;
font->get_h_extents_with_fallback (&font_extents);
*y = font_extents.ascender;
/* Otherwise, use glyph extents to center the glyph vertically.
* If getting glyph extents failed, just use the font ascender. */
if (origin_cache && font->has_glyph_extents_func ())
{
hb_font_extents_t font_extents;
font->get_h_extents_with_fallback (&font_extents);
hb_position_t font_advance = font_extents.ascender - font_extents.descender;
for (unsigned i = 0; i < count; i++)
{
hb_position_t origin;
unsigned cv;
if (origin_cache->get (*first_glyph, &cv))
origin = font->y_scale < 0 ? -static_cast<hb_position_t>(cv) : static_cast<hb_position_t>(cv);
else
{
hb_glyph_extents_t extents = {0};
if (likely (font->get_glyph_extents (*first_glyph, &extents)))
origin = extents.y_bearing + ((font_advance - -extents.height) >> 1);
else
origin = font_extents.ascender;
origin_cache->set (*first_glyph, font->y_scale < 0 ? -origin : origin);
}
*first_y = origin;
first_glyph = &StructAtOffsetUnaligned<hb_codepoint_t> (first_glyph, glyph_stride);
first_y = &StructAtOffsetUnaligned<hb_position_t> (first_y, y_stride);
}
}
ot_font->v_origin.release_origin_cache (origin_cache);
return true;
}
#endif
@ -498,17 +895,33 @@ hb_ot_draw_glyph_or_fail (hb_font_t *font,
hb_draw_funcs_t *draw_funcs, void *draw_data,
void *user_data)
{
const hb_ot_font_t *ot_font = (const hb_ot_font_t *) font_data;
hb_draw_session_t draw_session {draw_funcs, draw_data};
bool ret = false;
OT::hb_scalar_cache_t *gvar_cache = nullptr;
if (font->num_coords)
{
ot_font->check_serial (font);
gvar_cache = ot_font->draw.acquire_gvar_cache (*ot_font->ot_face->gvar);
}
#ifndef HB_NO_VAR_COMPOSITES
if (font->face->table.VARC->get_path (font, glyph, draw_session)) return true;
if (font->face->table.VARC->get_path (font, glyph, draw_session)) { ret = true; goto done; }
#endif
// Keep the following in synch with VARC::get_path_at()
if (font->face->table.glyf->get_path (font, glyph, draw_session)) return true;
if (font->face->table.glyf->get_path (font, glyph, draw_session, gvar_cache)) { ret = true; goto done; }
#ifndef HB_NO_CFF
if (font->face->table.cff2->get_path (font, glyph, draw_session)) return true;
if (font->face->table.cff1->get_path (font, glyph, draw_session)) return true;
if (font->face->table.cff2->get_path (font, glyph, draw_session)) { ret = true; goto done; }
if (font->face->table.cff1->get_path (font, glyph, draw_session)) { ret = true; goto done; }
#endif
return false;
done:
ot_font->draw.release_gvar_cache (gvar_cache);
return ret;
}
#endif
@ -548,12 +961,11 @@ static struct hb_ot_font_funcs_lazy_loader_t : hb_font_funcs_lazy_loader_t<hb_ot
hb_font_funcs_set_font_h_extents_func (funcs, hb_ot_get_font_h_extents, nullptr, nullptr);
hb_font_funcs_set_glyph_h_advances_func (funcs, hb_ot_get_glyph_h_advances, nullptr, nullptr);
//hb_font_funcs_set_glyph_h_origin_func (funcs, hb_ot_get_glyph_h_origin, nullptr, nullptr);
#ifndef HB_NO_VERTICAL
hb_font_funcs_set_font_v_extents_func (funcs, hb_ot_get_font_v_extents, nullptr, nullptr);
hb_font_funcs_set_glyph_v_advances_func (funcs, hb_ot_get_glyph_v_advances, nullptr, nullptr);
hb_font_funcs_set_glyph_v_origin_func (funcs, hb_ot_get_glyph_v_origin, nullptr, nullptr);
hb_font_funcs_set_glyph_v_origins_func (funcs, hb_ot_get_glyph_v_origins, nullptr, nullptr);
#endif
#ifndef HB_NO_DRAW

View File

@ -45,16 +45,6 @@
#define HB_OT_TAG_vmtx HB_TAG('v','m','t','x')
HB_INTERNAL bool
_glyf_get_leading_bearing_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical, int *lsb);
HB_INTERNAL unsigned
_glyf_get_advance_with_var_unscaled (hb_font_t *font, hb_codepoint_t glyph, bool is_vertical);
HB_INTERNAL bool
_glyf_get_leading_bearing_without_var_unscaled (hb_face_t *face, hb_codepoint_t gid, bool is_vertical, int *lsb);
namespace OT {
@ -237,7 +227,7 @@ struct hmtxvmtx
auto it =
+ hb_iter (c->plan->new_to_old_gid_list)
| hb_map ([c, &_mtx, mtx_map] (hb_codepoint_pair_t _)
| hb_map ([&_mtx, mtx_map] (hb_codepoint_pair_t _)
{
hb_codepoint_t new_gid = _.first;
hb_codepoint_t old_gid = _.second;
@ -246,8 +236,7 @@ struct hmtxvmtx
if (!mtx_map->has (new_gid, &v))
{
int lsb = 0;
if (!_mtx.get_leading_bearing_without_var_unscaled (old_gid, &lsb))
(void) _glyf_get_leading_bearing_without_var_unscaled (c->plan->source, old_gid, !T::is_horizontal, &lsb);
_mtx.get_leading_bearing_without_var_unscaled (old_gid, &lsb);
return hb_pair (_mtx.get_advance_without_var_unscaled (old_gid), +lsb);
}
return *v;
@ -326,49 +315,23 @@ struct hmtxvmtx
bool has_data () const { return (bool) num_bearings; }
bool get_leading_bearing_without_var_unscaled (hb_codepoint_t glyph,
void get_leading_bearing_without_var_unscaled (hb_codepoint_t glyph,
int *lsb) const
{
if (glyph < num_long_metrics)
{
*lsb = table->longMetricZ[glyph].sb;
return true;
return;
}
if (unlikely (glyph >= num_bearings))
return false;
{
*lsb = 0;
return;
}
const FWORD *bearings = (const FWORD *) &table->longMetricZ[num_long_metrics];
*lsb = bearings[glyph - num_long_metrics];
return true;
}
bool get_leading_bearing_with_var_unscaled (hb_font_t *font,
hb_codepoint_t glyph,
int *lsb) const
{
if (!font->num_coords)
return get_leading_bearing_without_var_unscaled (glyph, lsb);
#ifndef HB_NO_VAR
float delta;
if (var_table->get_lsb_delta_unscaled (glyph, font->coords, font->num_coords, &delta) &&
get_leading_bearing_without_var_unscaled (glyph, lsb))
{
*lsb += roundf (delta);
return true;
}
// If there's no vmtx data, the phantom points from glyf table are not accurate,
// so we cannot take the next path.
bool is_vertical = T::tableTag == HB_OT_TAG_vmtx;
if (is_vertical && !has_data ())
return false;
return _glyf_get_leading_bearing_with_var_unscaled (font, glyph, is_vertical, lsb);
#else
return false;
#endif
}
unsigned int get_advance_without_var_unscaled (hb_codepoint_t glyph) const
@ -402,27 +365,17 @@ struct hmtxvmtx
return advances[hb_min (glyph - num_bearings, num_advances - num_bearings - 1)];
}
unsigned get_advance_with_var_unscaled (hb_codepoint_t glyph,
hb_font_t *font,
ItemVariationStore::cache_t *store_cache = nullptr) const
#ifndef HB_NO_VAR
unsigned get_advance_with_var_unscaled (hb_codepoint_t glyph,
hb_font_t *font,
hb_scalar_cache_t *store_cache = nullptr) const
{
unsigned int advance = get_advance_without_var_unscaled (glyph);
#ifndef HB_NO_VAR
if (unlikely (glyph >= num_bearings) || !font->num_coords)
return advance;
if (var_table.get_length ())
return advance + roundf (var_table->get_advance_delta_unscaled (glyph,
font->coords, font->num_coords,
store_cache));
unsigned glyf_advance = _glyf_get_advance_with_var_unscaled (font, glyph, T::tableTag == HB_OT_TAG_vmtx);
return glyf_advance ? glyf_advance : advance;
#else
return advance;
#endif
return hb_max(0.0f, advance + roundf (var_table->get_advance_delta_unscaled (glyph,
font->coords, font->num_coords,
store_cache)));
}
#endif
protected:
// 0 <= num_long_metrics <= num_bearings <= num_advances <= num_glyphs

Some files were not shown because too many files have changed in this diff Show More