diff --git a/src/hb-repacker.hh b/src/hb-repacker.hh index d536e003e..7339eeb2c 100644 --- a/src/hb-repacker.hh +++ b/src/hb-repacker.hh @@ -45,24 +45,57 @@ struct graph_t vertex_t () : distance (0), space (0), - incoming_edges (0), + parents (), start (0), end (0), priority(0) {} - void fini () { obj.fini (); } + void fini () { + obj.fini (); + parents.fini (); + } hb_serialize_context_t::object_t obj; int64_t distance; int64_t space; - unsigned incoming_edges; + hb_vector_t parents; unsigned start; unsigned end; unsigned priority; bool is_shared () const { - return incoming_edges > 1; + return parents.length > 1; + } + + unsigned incoming_edges () const + { + return parents.length; + } + + void remove_parent (unsigned parent_index) + { + for (unsigned i = 0; i < parents.length; i++) + { + if (parents[i] != parent_index) continue; + parents.remove (i); + break; + } + } + + void remap_parents (const hb_vector_t& id_map) + { + for (unsigned i = 0; i < parents.length; i++) + parents[i] = id_map[parents[i]]; + } + + void remap_parent (unsigned old_index, unsigned new_index) + { + for (unsigned i = 0; i < parents.length; i++) + { + if (parents[i] == old_index) + parents[i] = new_index; + } } bool is_leaf () const @@ -100,33 +133,6 @@ struct graph_t unsigned child; }; - struct clone_buffer_t - { - clone_buffer_t () : head (nullptr), tail (nullptr) {} - - bool copy (const hb_serialize_context_t::object_t& object) - { - fini (); - unsigned size = object.tail - object.head; - head = (char*) hb_malloc (size); - if (!head) return false; - - memcpy (head, object.head, size); - tail = head + size; - return true; - } - - char* head; - char* tail; - - void fini () - { - if (!head) return; - hb_free (head); - head = nullptr; - } - }; - /* * A topological sorting of an object graph. Ordered * in reverse serialization order (first object in the @@ -135,11 +141,12 @@ struct graph_t * serializer */ graph_t (const hb_vector_t& objects) - : edge_count_invalid (true), + : parents_invalid (true), distance_invalid (true), positions_invalid (true), successful (true) { + num_roots_for_space_.push (1); bool removed_nil = false; for (unsigned i = 0; i < objects.length; i++) { @@ -166,12 +173,13 @@ struct graph_t ~graph_t () { vertices_.fini_deep (); - clone_buffers_.fini_deep (); } bool in_error () const { - return !successful || vertices_.in_error () || clone_buffers_.in_error (); + return !successful || + vertices_.in_error () || + num_roots_for_space_.in_error (); } const vertex_t& root () const @@ -232,12 +240,13 @@ struct graph_t hb_vector_t queue; hb_vector_t sorted_graph; + if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return; hb_vector_t id_map; if (unlikely (!check_success (id_map.resize (vertices_.length)))) return; hb_vector_t removed_edges; if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return; - update_incoming_edge_count (); + update_parents (); queue.push (root_idx ()); int new_id = vertices_.length - 1; @@ -248,12 +257,12 @@ struct graph_t queue.remove (0); vertex_t& next = vertices_[next_id]; - sorted_graph.push (next); + sorted_graph[new_id] = next; id_map[next_id] = new_id--; for (const auto& link : next.obj.links) { removed_edges[link.objidx]++; - if (!(vertices_[link.objidx].incoming_edges - removed_edges[link.objidx])) + if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx])) queue.push (link.objidx); } } @@ -261,14 +270,11 @@ struct graph_t check_success (!queue.in_error ()); check_success (!sorted_graph.in_error ()); if (!check_success (new_id == -1)) - DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); + print_orphaned_nodes (); - remap_obj_indices (id_map, &sorted_graph); + remap_all_obj_indices (id_map, &sorted_graph); - sorted_graph.as_array ().reverse (); - - vertices_.fini_deep (); - vertices_ = sorted_graph; + vertices_.swap (sorted_graph); sorted_graph.fini_deep (); } @@ -289,12 +295,13 @@ struct graph_t hb_priority_queue_t queue; hb_vector_t sorted_graph; + if (unlikely (!check_success (sorted_graph.resize (vertices_.length)))) return; hb_vector_t id_map; if (unlikely (!check_success (id_map.resize (vertices_.length)))) return; hb_vector_t removed_edges; if (unlikely (!check_success (removed_edges.resize (vertices_.length)))) return; - update_incoming_edge_count (); + update_parents (); queue.insert (root ().modified_distance (0), root_idx ()); int new_id = root_idx (); @@ -304,12 +311,12 @@ struct graph_t unsigned next_id = queue.pop_minimum().second; vertex_t& next = vertices_[next_id]; - sorted_graph.push (next); + sorted_graph[new_id] = next; id_map[next_id] = new_id--; for (const auto& link : next.obj.links) { removed_edges[link.objidx]++; - if (!(vertices_[link.objidx].incoming_edges - removed_edges[link.objidx])) + if (!(vertices_[link.objidx].incoming_edges () - removed_edges[link.objidx])) // Add the order that the links were encountered to the priority. // This ensures that ties between priorities objects are broken in a consistent // way. More specifically this is set up so that if a set of objects have the same @@ -323,61 +330,89 @@ struct graph_t check_success (!queue.in_error ()); check_success (!sorted_graph.in_error ()); if (!check_success (new_id == -1)) - DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); + print_orphaned_nodes (); - remap_obj_indices (id_map, &sorted_graph); + remap_all_obj_indices (id_map, &sorted_graph); - sorted_graph.as_array ().reverse (); - - vertices_.fini_deep (); - vertices_ = sorted_graph; + vertices_.swap (sorted_graph); sorted_graph.fini_deep (); } /* - * Finds any links using 32 bits and isolates the subgraphs they point too. + * Assign unique space numbers to each connected subgraph of 32 bit offset(s). */ - bool isolate_32bit_links () + bool assign_32bit_spaces () { - bool made_changes = false; - hb_set_t target_links; unsigned root_index = root_idx (); - int64_t next_space = 0; + hb_set_t visited; + hb_set_t roots; for (unsigned i = 0; i <= root_index; i++) { - if (i == root_index && root_idx () > i) - // root index may have moved due to graph modifications. - i = root_idx (); - for (auto& l : vertices_[i].obj.links) { if (l.width == 4 && !l.is_signed) { - isolate_subgraph (l.objidx); - vertices_[l.objidx].space = next_space++; - distance_invalid = true; - made_changes = true; + roots.add (l.objidx); + find_subgraph (l.objidx, visited); } } } - return made_changes; + + // Mark everything not in the subgraphs of 32 bit roots as visited. + // This prevents 32 bit subgraphs from being connected via nodes not in the 32 bit subgraphs. + visited.invert (); + + if (!roots) return false; + + while (roots) + { + unsigned next = HB_SET_VALUE_INVALID; + if (!roots.next (&next)) break; + + hb_set_t connected_roots; + find_connected_nodes (next, roots, visited, connected_roots); + isolate_subgraph (connected_roots); + + unsigned next_space = this->next_space (); + num_roots_for_space_.push (0); + for (unsigned root : connected_roots) + { + DEBUG_MSG (SUBSET_REPACK, nullptr, "Subgraph %u gets space %u", root, next_space); + vertices_[root].space = next_space; + num_roots_for_space_[next_space] = num_roots_for_space_[next_space] + 1; + distance_invalid = true; + positions_invalid = true; + } + + // TODO(grieger): special case for GSUB/GPOS use extension promotions to move 16 bit space + // into the 32 bit space as needed, instead of using isolation. + } + + return true; } /* * Isolates the subgraph of nodes reachable from root. Any links to nodes in the subgraph * that originate from outside of the subgraph will be removed by duplicating the linked to * object. + * + * Indices stored in roots will be updated if any of the roots are duplicated to new indices. */ - bool isolate_subgraph (unsigned root_idx) + bool isolate_subgraph (hb_set_t& roots) { - update_incoming_edge_count (); + update_parents (); hb_hashmap_t subgraph; // incoming edges to root_idx should be all 32 bit in length so we don't need to de-dup these // set the subgraph incoming edge count to match all of root_idx's incoming edges - subgraph.set (root_idx, vertices_[root_idx].incoming_edges); - find_subgraph (root_idx, subgraph); + hb_set_t parents; + for (unsigned root_idx : roots) + { + subgraph.set (root_idx, wide_parents (root_idx, parents)); + find_subgraph (root_idx, subgraph); + } + unsigned original_root_idx = root_idx (); hb_hashmap_t index_map; bool made_changes = false; for (auto entry : subgraph.iter ()) @@ -385,7 +420,7 @@ struct graph_t const auto& node = vertices_[entry.first]; unsigned subgraph_incoming_edges = entry.second; - if (subgraph_incoming_edges < node.incoming_edges) + if (subgraph_incoming_edges < node.incoming_edges ()) { // Only de-dup objects with incoming links from outside the subgraph. made_changes = true; @@ -396,6 +431,14 @@ struct graph_t if (!made_changes) return false; + if (original_root_idx != root_idx () + && parents.has (original_root_idx)) + { + // If the root idx has changed since parents was determined, update root idx in parents + parents.add (root_idx ()); + parents.del (original_root_idx); + } + auto new_subgraph = + subgraph.keys () | hb_map([&] (unsigned node_idx) { @@ -403,7 +446,21 @@ struct graph_t return node_idx; }) ; + remap_obj_indices (index_map, new_subgraph); + remap_obj_indices (index_map, parents.iter (), true); + + // Update roots set with new indices as needed. + unsigned next = HB_SET_VALUE_INVALID; + while (roots.next (&next)) + { + if (index_map.has (next)) + { + roots.del (next); + roots.add (index_map[next]); + } + } + return true; } @@ -421,6 +478,14 @@ struct graph_t } } + void find_subgraph (unsigned node_idx, hb_set_t& subgraph) + { + if (subgraph.has (node_idx)) return; + subgraph.add (node_idx); + for (const auto& link : vertices_[node_idx].obj.links) + find_subgraph (link.objidx, subgraph); + } + /* * duplicates all nodes in the subgraph reachable from node_idx. Does not re-assign * links. index_map is updated with mappings from old id to new id. If a duplication has already @@ -447,23 +512,21 @@ struct graph_t auto* clone = vertices_.push (); auto& child = vertices_[node_idx]; - clone_buffer_t* buffer = clone_buffers_.push (); - if (vertices_.in_error () - || clone_buffers_.in_error () - || !check_success (buffer->copy (child.obj))) { + if (vertices_.in_error ()) { return -1; } - clone->obj.head = buffer->head; - clone->obj.tail = buffer->tail; + clone->obj.head = child.obj.head; + clone->obj.tail = child.obj.tail; clone->distance = child.distance; clone->space = child.space; - clone->incoming_edges = 0; + clone->parents.reset (); + unsigned clone_idx = vertices_.length - 2; for (const auto& l : child.obj.links) { clone->obj.links.push (l); - vertices_[l.objidx].incoming_edges++; + vertices_[l.objidx].parents.push (clone_idx); } check_success (!clone->obj.links.in_error ()); @@ -472,10 +535,14 @@ struct graph_t // The root's obj idx does change, however since it's root nothing else refers to it. // all other obj idx's will be unaffected. vertex_t root = vertices_[vertices_.length - 2]; - vertices_[vertices_.length - 2] = *clone; + vertices_[clone_idx] = *clone; vertices_[vertices_.length - 1] = root; - return vertices_.length - 2; + // Since the root moved, update the parents arrays of all children on the root. + for (const auto& l : root.obj.links) + vertices_[l.objidx].remap_parent (root_idx () - 1, root_idx ()); + + return clone_idx; } /* @@ -485,7 +552,7 @@ struct graph_t */ bool duplicate (unsigned parent_idx, unsigned child_idx) { - update_incoming_edge_count (); + update_parents (); unsigned links_to_child = 0; for (const auto& l : vertices_[parent_idx].obj.links) @@ -493,7 +560,7 @@ struct graph_t if (l.objidx == child_idx) links_to_child++; } - if (vertices_[child_idx].incoming_edges <= links_to_child) + if (vertices_[child_idx].incoming_edges () <= links_to_child) { // Can't duplicate this node, doing so would orphan the original one as all remaining links // to child are from parent. @@ -509,19 +576,15 @@ struct graph_t if (clone_idx == (unsigned) -1) return false; // duplicate shifts the root node idx, so if parent_idx was root update it. if (parent_idx == clone_idx) parent_idx++; - auto& clone = vertices_[clone_idx]; - auto& child = vertices_[child_idx]; auto& parent = vertices_[parent_idx]; for (unsigned i = 0; i < parent.obj.links.length; i++) { auto& l = parent.obj.links[i]; - if (l.objidx == child_idx) - { - l.objidx = clone_idx; - clone.incoming_edges++; - child.incoming_edges--; - } + if (l.objidx != child_idx) + continue; + + reassign_link (l, parent_idx, clone_idx); } return true; @@ -571,52 +634,134 @@ struct graph_t return overflows->length; } + void print_orphaned_nodes () + { + if (!DEBUG_ENABLED(SUBSET_REPACK)) return; + + DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); + parents_invalid = true; + update_parents(); + + for (unsigned i = 0; i < root_idx (); i++) + { + const auto& v = vertices_[i]; + if (!v.parents) + DEBUG_MSG (SUBSET_REPACK, nullptr, "Node %u is orphaned.", i); + } + } + void print_overflows (const hb_vector_t& overflows) { if (!DEBUG_ENABLED(SUBSET_REPACK)) return; - update_incoming_edge_count (); + update_parents (); for (const auto& o : overflows) { const auto& parent = vertices_[o.parent]; const auto& child = vertices_[o.child]; DEBUG_MSG (SUBSET_REPACK, nullptr, - " overflow from %d (%d in, %d out) => %d (%d in, %d out)", + " overflow from " + "%4d (%4d in, %4d out, space %2d) => " + "%4d (%4d in, %4d out, space %2d)", o.parent, - parent.incoming_edges, + parent.incoming_edges (), parent.obj.links.length, + space_for (o.parent), o.child, - child.incoming_edges, - child.obj.links.length); + child.incoming_edges (), + child.obj.links.length, + space_for (o.child)); } } + unsigned num_roots_for_space (unsigned space) const + { + return num_roots_for_space_[space]; + } + + unsigned next_space () const + { + return num_roots_for_space_.length; + } + + void move_to_new_space (unsigned index) + { + auto& node = vertices_[index]; + num_roots_for_space_.push (1); + num_roots_for_space_[node.space] = num_roots_for_space_[node.space] - 1; + node.space = num_roots_for_space_.length - 1; + } + + unsigned space_for (unsigned index, unsigned* root = nullptr) const + { + const auto& node = vertices_[index]; + if (node.space) + { + if (root != nullptr) + *root = index; + return node.space; + } + + if (!node.parents) + { + if (root) + *root = index; + return 0; + } + + return space_for (node.parents[0], root); + } + void err_other_error () { this->successful = false; } private: + /* + * Returns the numbers of incoming edges that are 32bits wide. + */ + unsigned wide_parents (unsigned node_idx, hb_set_t& parents) const + { + unsigned count = 0; + hb_set_t visited; + for (unsigned p : vertices_[node_idx].parents) + { + if (visited.has (p)) continue; + visited.add (p); + + for (const auto& l : vertices_[p].obj.links) + { + if (l.objidx == node_idx && l.width == 4 && !l.is_signed) + { + count++; + parents.add (p); + } + } + } + return count; + } + bool check_success (bool success) { return this->successful && (success || (err_other_error (), false)); } /* * Creates a map from objid to # of incoming edges. */ - void update_incoming_edge_count () + void update_parents () { - if (!edge_count_invalid) return; + if (!parents_invalid) return; for (unsigned i = 0; i < vertices_.length; i++) - vertices_[i].incoming_edges = 0; + vertices_[i].parents.reset (); - for (const vertex_t& v : vertices_) + for (unsigned p = 0; p < vertices_.length; p++) { - for (auto& l : v.obj.links) + for (auto& l : vertices_[p].obj.links) { - vertices_[l.objidx].incoming_edges++; + vertices_[l.objidx].parents.push (p); } } - edge_count_invalid = false; + parents_invalid = false; } /* @@ -667,19 +812,20 @@ struct graph_t hb_priority_queue_t queue; queue.insert (0, vertices_.length - 1); - hb_set_t visited; + hb_vector_t visited; + visited.resize (vertices_.length); while (!queue.in_error () && !queue.is_empty ()) { unsigned next_idx = queue.pop_minimum ().second; - if (visited.has (next_idx)) continue; + if (visited[next_idx]) continue; const auto& next = vertices_[next_idx]; int64_t next_distance = vertices_[next_idx].distance; - visited.add (next_idx); + visited[next_idx] = true; for (const auto& link : next.obj.links) { - if (visited.has (link.objidx)) continue; + if (visited[link.objidx]) continue; const auto& child = vertices_[link.objidx].obj; int64_t child_weight = (child.tail - child.head) + @@ -697,7 +843,7 @@ struct graph_t check_success (!queue.in_error ()); if (!check_success (queue.is_empty ())) { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Graph is not fully connected."); + print_orphaned_nodes (); return; } @@ -746,12 +892,27 @@ struct graph_t } } + /* + * Updates a link in the graph to point to a different object. Corrects the + * parents vector on the previous and new child nodes. + */ + void reassign_link (hb_serialize_context_t::object_t::link_t& link, + unsigned parent_idx, + unsigned new_idx) + { + unsigned old_idx = link.objidx; + link.objidx = new_idx; + vertices_[old_idx].remove_parent (parent_idx); + vertices_[new_idx].parents.push (parent_idx); + } + /* * Updates all objidx's in all links using the provided mapping. Corrects incoming edge counts. */ template void remap_obj_indices (const hb_hashmap_t& id_map, - Iterator subgraph) + Iterator subgraph, + bool only_wide = false) { if (!id_map) return; for (unsigned i : subgraph) @@ -760,9 +921,9 @@ struct graph_t { auto& link = vertices_[i].obj.links[j]; if (!id_map.has (link.objidx)) continue; - vertices_[link.objidx].incoming_edges--; - link.objidx = id_map[link.objidx]; - vertices_[link.objidx].incoming_edges++; + if (only_wide && !(link.width == 4 && !link.is_signed)) continue; + + reassign_link (link, i, id_map[link.objidx]); } } } @@ -770,11 +931,12 @@ struct graph_t /* * Updates all objidx's in all links using the provided mapping. */ - void remap_obj_indices (const hb_vector_t& id_map, - hb_vector_t* sorted_graph) const + void remap_all_obj_indices (const hb_vector_t& id_map, + hb_vector_t* sorted_graph) const { for (unsigned i = 0; i < sorted_graph->length; i++) { + (*sorted_graph)[i].remap_parents (id_map); for (unsigned j = 0; j < (*sorted_graph)[i].obj.links.length; j++) { auto& link = (*sorted_graph)[i].obj.links[j]; @@ -829,17 +991,74 @@ struct graph_t } } + /* + * Finds all nodes in targets that are reachable from start_idx, nodes in visited will be skipped. + * For this search the graph is treated as being undirected. + * + * Connected targets will be added to connected and removed from targets. All visited nodes + * will be added to visited. + */ + void find_connected_nodes (unsigned start_idx, + hb_set_t& targets, + hb_set_t& visited, + hb_set_t& connected) + { + if (visited.has (start_idx)) return; + visited.add (start_idx); + + if (targets.has (start_idx)) + { + targets.del (start_idx); + connected.add (start_idx); + } + + const auto& v = vertices_[start_idx]; + + // Graph is treated as undirected so search children and parents of start_idx + for (const auto& l : v.obj.links) + find_connected_nodes (l.objidx, targets, visited, connected); + + for (unsigned p : v.parents) + find_connected_nodes (p, targets, visited, connected); + } + public: // TODO(garretrieger): make private, will need to move most of offset overflow code into graph. hb_vector_t vertices_; private: - hb_vector_t clone_buffers_; - bool edge_count_invalid; + bool parents_invalid; bool distance_invalid; bool positions_invalid; bool successful; + hb_vector_t num_roots_for_space_; }; +static bool _try_isolating_subgraphs (const hb_vector_t& overflows, + graph_t& sorted_graph) +{ + for (int i = overflows.length - 1; i >= 0; i--) + { + const graph_t::overflow_record_t& r = overflows[i]; + unsigned root = 0; + unsigned space = sorted_graph.space_for (r.parent, &root); + if (!space) continue; + if (sorted_graph.num_roots_for_space (space) <= 1) continue; + + DEBUG_MSG (SUBSET_REPACK, nullptr, "Overflow in space %d moving subgraph %d to space %d.", + space, + root, + sorted_graph.next_space ()); + + hb_set_t roots; + roots.add (root); + sorted_graph.isolate_subgraph (roots); + for (unsigned new_root : roots) + sorted_graph.move_to_new_space (new_root); + return true; + } + return false; +} + static bool _process_overflows (const hb_vector_t& overflows, hb_set_t& priority_bumped_parents, graph_t& sorted_graph) @@ -921,11 +1140,9 @@ hb_resolve_overflows (const hb_vector_t& pac || table_tag == HB_OT_TAG_GSUB) && sorted_graph.will_overflow ()) { - if (sorted_graph.isolate_32bit_links ()) - { - DEBUG_MSG (SUBSET_REPACK, nullptr, "Isolated extension sub tables."); + DEBUG_MSG (SUBSET_REPACK, nullptr, "Assigning spaces to 32 bit subgraphs."); + if (sorted_graph.assign_32bit_spaces ()) sorted_graph.sort_shortest_distance (); - } } unsigned round = 0; @@ -938,10 +1155,14 @@ hb_resolve_overflows (const hb_vector_t& pac sorted_graph.print_overflows (overflows); hb_set_t priority_bumped_parents; - if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph)) + + if (!_try_isolating_subgraphs (overflows, sorted_graph)) { - DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :("); - break; + if (!_process_overflows (overflows, priority_bumped_parents, sorted_graph)) + { + DEBUG_MSG (SUBSET_REPACK, nullptr, "No resolution available :("); + break; + } } sorted_graph.sort_shortest_distance (); diff --git a/src/hb-vector.hh b/src/hb-vector.hh index 110d457ca..95b5295ba 100644 --- a/src/hb-vector.hh +++ b/src/hb-vector.hh @@ -87,6 +87,21 @@ struct hb_vector_t resize (0); } + void swap (hb_vector_t& other) + { + int allocated_copy = allocated; + unsigned int length_copy = length; + Type *arrayZ_copy = arrayZ; + + allocated = other.allocated; + length = other.length; + arrayZ = other.arrayZ; + + other.allocated = allocated_copy; + other.length = length_copy; + other.arrayZ = arrayZ_copy; + } + hb_vector_t& operator = (const hb_vector_t &o) { reset (); diff --git a/src/test-repacker.cc b/src/test-repacker.cc index 32228f7a7..aa4872535 100644 --- a/src/test-repacker.cc +++ b/src/test-repacker.cc @@ -64,6 +64,39 @@ static void add_wide_offset (unsigned id, c->add_link (*offset, id); } +static void run_resolve_overflow_test (const char* name, + hb_serialize_context_t& overflowing, + hb_serialize_context_t& expected, + unsigned num_iterations = 0) +{ + printf (">>> Testing overflowing resolution for %s\n", + name); + + graph_t graph (overflowing.object_graph ()); + + unsigned buffer_size = overflowing.end - overflowing.start; + void* out_buffer = malloc (buffer_size); + hb_serialize_context_t out (out_buffer, buffer_size); + + assert (overflowing.offset_overflow ()); + hb_resolve_overflows (overflowing.object_graph (), HB_TAG ('G', 'S', 'U', 'B'), &out, num_iterations); + assert (!out.offset_overflow ()); + hb_bytes_t result = out.copy_bytes (); + + assert (!expected.offset_overflow ()); + hb_bytes_t expected_result = expected.copy_bytes (); + + assert (result.length == expected_result.length); + for (unsigned i = 0; i < expected_result.length; i++) + { + assert (result[i] == expected_result[i]); + } + + result.fini (); + expected_result.fini (); + free (out_buffer); +} + static void populate_serializer_simple (hb_serialize_context_t* c) { @@ -75,7 +108,7 @@ populate_serializer_simple (hb_serialize_context_t* c) start_object ("abc", 3, c); add_offset (obj_2, c); add_offset (obj_1, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -94,7 +127,7 @@ populate_serializer_with_overflow (hb_serialize_context_t* c) add_offset (obj_3, c); add_offset (obj_2, c); add_offset (obj_1, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -129,16 +162,16 @@ populate_serializer_with_isolation_overflow (hb_serialize_context_t* c) start_object (large_string.c_str(), 60000, c); add_offset (obj_4, c); - unsigned obj_3 = c->pop_pack (); + unsigned obj_3 = c->pop_pack (false); start_object (large_string.c_str(), 10000, c); add_offset (obj_4, c); - unsigned obj_2 = c->pop_pack (); + unsigned obj_2 = c->pop_pack (false); start_object ("1", 1, c); add_wide_offset (obj_3, c); add_offset (obj_2, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -153,29 +186,38 @@ populate_serializer_with_isolation_overflow_complex (hb_serialize_context_t* c) start_object ("e", 1, c); add_offset (obj_f, c); - unsigned obj_e = c->pop_pack (); + unsigned obj_e = c->pop_pack (false); - start_object ("cc", 2, c); + start_object ("c", 1, c); add_offset (obj_e, c); - unsigned obj_c = c->pop_pack (); + unsigned obj_c = c->pop_pack (false); start_object ("d", 1, c); add_offset (obj_e, c); - unsigned obj_d = c->pop_pack (); + unsigned obj_d = c->pop_pack (false); + + start_object (large_string.c_str(), 60000, c); + add_offset (obj_d, c); + unsigned obj_h = c->pop_pack (false); start_object (large_string.c_str(), 60000, c); add_offset (obj_c, c); - add_offset (obj_d, c); - unsigned obj_b = c->pop_pack (); + add_offset (obj_h, c); + unsigned obj_b = c->pop_pack (false); start_object (large_string.c_str(), 10000, c); add_offset (obj_d, c); - unsigned obj_g = c->pop_pack (); + unsigned obj_g = c->pop_pack (false); + + start_object (large_string.c_str(), 11000, c); + add_offset (obj_d, c); + unsigned obj_i = c->pop_pack (false); start_object ("a", 1, c); add_wide_offset (obj_b, c); add_offset (obj_g, c); - c->pop_pack (); + add_offset (obj_i, c); + c->pop_pack (false); c->end_serialize(); } @@ -186,45 +228,58 @@ populate_serializer_with_isolation_overflow_complex_expected (hb_serialize_conte std::string large_string(70000, 'a'); c->start_serialize (); - // 32 bit subgraph + + // space 1 + unsigned obj_f_prime = add_object ("f", 1, c); start_object ("e", 1, c); add_offset (obj_f_prime, c); - unsigned obj_e_prime = c->pop_pack (); - - start_object ("cc", 2, c); - add_offset (obj_e_prime, c); - unsigned obj_c = c->pop_pack (); + unsigned obj_e_prime = c->pop_pack (false); start_object ("d", 1, c); add_offset (obj_e_prime, c); - unsigned obj_d_prime = c->pop_pack (); + unsigned obj_d_prime = c->pop_pack (false); + + start_object (large_string.c_str(), 60000, c); + add_offset (obj_d_prime, c); + unsigned obj_h = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e_prime, c); + unsigned obj_c = c->pop_pack (false); start_object (large_string.c_str(), 60000, c); add_offset (obj_c, c); - add_offset (obj_d_prime, c); - unsigned obj_b = c->pop_pack (); + add_offset (obj_h, c); + unsigned obj_b = c->pop_pack (false); + + // space 0 - // 16 bit subgraph unsigned obj_f = add_object ("f", 1, c); start_object ("e", 1, c); add_offset (obj_f, c); - unsigned obj_e = c->pop_pack (); + unsigned obj_e = c->pop_pack (false); + start_object ("d", 1, c); add_offset (obj_e, c); - unsigned obj_d = c->pop_pack (); + unsigned obj_d = c->pop_pack (false); + + start_object (large_string.c_str(), 11000, c); + add_offset (obj_d, c); + unsigned obj_i = c->pop_pack (false); start_object (large_string.c_str(), 10000, c); add_offset (obj_d, c); - unsigned obj_g = c->pop_pack (); + unsigned obj_g = c->pop_pack (false); start_object ("a", 1, c); add_wide_offset (obj_b, c); add_offset (obj_g, c); - c->pop_pack (); + add_offset (obj_i, c); + c->pop_pack (false); c->end_serialize(); } @@ -255,6 +310,368 @@ populate_serializer_with_isolation_overflow_spaces (hb_serialize_context_t* c) c->end_serialize(); } +static void +populate_serializer_spaces (hb_serialize_context_t* c, bool with_overflow) +{ + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_i; + + if (with_overflow) + obj_i = add_object ("i", 1, c); + + // Space 2 + unsigned obj_h = add_object ("h", 1, c); + + start_object (large_string.c_str(), 30000, c); + add_offset (obj_h, c); + unsigned obj_e = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_e, c); + unsigned obj_b = c->pop_pack (false); + + // Space 1 + if (!with_overflow) + obj_i = add_object ("i", 1, c); + + start_object (large_string.c_str(), 30000, c); + add_offset (obj_i, c); + unsigned obj_g = c->pop_pack (false); + + start_object (large_string.c_str(), 30000, c); + add_offset (obj_i, c); + unsigned obj_f = c->pop_pack (false); + + start_object ("d", 1, c); + add_offset (obj_g, c); + unsigned obj_d = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_f, c); + unsigned obj_c = c->pop_pack (false); + + start_object ("a", 1, c); + add_wide_offset (obj_b, c); + add_wide_offset (obj_c, c); + add_wide_offset (obj_d, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_spaces_16bit_connection (hb_serialize_context_t* c) +{ + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_g = add_object ("g", 1, c); + unsigned obj_h = add_object ("h", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_g, c); + unsigned obj_e = c->pop_pack (false); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_h, c); + unsigned obj_f = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + start_object ("d", 1, c); + add_offset (obj_f, c); + unsigned obj_d = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_e, c); + add_offset (obj_h, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_c, c); + add_wide_offset (obj_d, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_spaces_16bit_connection_expected (hb_serialize_context_t* c) +{ + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_g_prime = add_object ("g", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_g_prime, c); + unsigned obj_e_prime = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e_prime, c); + unsigned obj_c = c->pop_pack (false); + + unsigned obj_h_prime = add_object ("h", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_h_prime, c); + unsigned obj_f = c->pop_pack (false); + + start_object ("d", 1, c); + add_offset (obj_f, c); + unsigned obj_d = c->pop_pack (false); + + unsigned obj_g = add_object ("g", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_g, c); + unsigned obj_e = c->pop_pack (false); + + unsigned obj_h = add_object ("h", 1, c); + + start_object ("b", 1, c); + add_offset (obj_e, c); + add_offset (obj_h, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_c, c); + add_wide_offset (obj_d, c); + c->pop_pack (false); + + c->end_serialize (); +} + +static void +populate_serializer_short_and_wide_subgraph_root (hb_serialize_context_t* c) +{ + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_e = add_object ("e", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_c, c); + unsigned obj_d = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_c, c); + add_offset (obj_e, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_c, c); + add_wide_offset (obj_d, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_short_and_wide_subgraph_root_expected (hb_serialize_context_t* c) +{ + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_e_prime = add_object ("e", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_e_prime, c); + unsigned obj_c_prime = c->pop_pack (false); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_c_prime, c); + unsigned obj_d = c->pop_pack (false); + + unsigned obj_e = add_object ("e", 1, c); + + start_object (large_string.c_str (), 40000, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + + start_object ("b", 1, c); + add_offset (obj_c, c); + add_offset (obj_e, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_c_prime, c); + add_wide_offset (obj_d, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_with_split_spaces (hb_serialize_context_t* c) +{ + // Overflow needs to be resolved by splitting the single space + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_f = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_d = c->pop_pack (false); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_e = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_d, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + start_object ("a", 1, c); + add_wide_offset (obj_b, c); + add_wide_offset (obj_c, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_with_split_spaces_2 (hb_serialize_context_t* c) +{ + // Overflow needs to be resolved by splitting the single space + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_f = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_d = c->pop_pack (false); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_e = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_d, c); + unsigned obj_b = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_b, c); + add_wide_offset (obj_c, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_with_split_spaces_expected (hb_serialize_context_t* c) +{ + // Overflow needs to be resolved by splitting the single space + + std::string large_string(70000, 'a'); + c->start_serialize (); + + unsigned obj_f_prime = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f_prime, c); + unsigned obj_d = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_d, c); + unsigned obj_b = c->pop_pack (false); + + unsigned obj_f = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_e = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + start_object ("a", 1, c); + add_wide_offset (obj_b, c); + add_wide_offset (obj_c, c); + c->pop_pack (false); + + c->end_serialize(); +} + +static void +populate_serializer_with_split_spaces_expected_2 (hb_serialize_context_t* c) +{ + // Overflow needs to be resolved by splitting the single space + + std::string large_string(70000, 'a'); + c->start_serialize (); + + // Space 2 + + unsigned obj_f_double_prime = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f_double_prime, c); + unsigned obj_d_prime = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_d_prime, c); + unsigned obj_b_prime = c->pop_pack (false); + + // Space 1 + + unsigned obj_f_prime = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f_prime, c); + unsigned obj_e = c->pop_pack (false); + + start_object ("c", 1, c); + add_offset (obj_e, c); + unsigned obj_c = c->pop_pack (false); + + // Space 0 + + unsigned obj_f = add_object ("f", 1, c); + + start_object (large_string.c_str(), 40000, c); + add_offset (obj_f, c); + unsigned obj_d = c->pop_pack (false); + + start_object ("b", 1, c); + add_offset (obj_d, c); + unsigned obj_b = c->pop_pack (false); + + // Root + start_object ("a", 1, c); + add_offset (obj_b, c); + add_wide_offset (obj_b_prime, c); + add_wide_offset (obj_c, c); + c->pop_pack (false); + + c->end_serialize(); +} + static void populate_serializer_complex_1 (hb_serialize_context_t* c) { @@ -270,7 +687,7 @@ populate_serializer_complex_1 (hb_serialize_context_t* c) start_object ("abc", 3, c); add_offset (obj_2, c); add_offset (obj_4, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -296,7 +713,7 @@ populate_serializer_complex_2 (hb_serialize_context_t* c) add_offset (obj_2, c); add_offset (obj_4, c); add_offset (obj_5, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -326,7 +743,7 @@ populate_serializer_complex_3 (hb_serialize_context_t* c) add_offset (obj_2, c); add_offset (obj_4, c); add_offset (obj_5, c); - c->pop_pack (); + c->pop_pack (false); c->end_serialize(); } @@ -608,6 +1025,24 @@ static void test_resolve_overflows_via_duplication () free (out_buffer); } +static void test_resolve_overflows_via_space_assignment () +{ + size_t buffer_size = 160000; + void* buffer = malloc (buffer_size); + hb_serialize_context_t c (buffer, buffer_size); + populate_serializer_spaces (&c, true); + + void* expected_buffer = malloc (buffer_size); + hb_serialize_context_t e (expected_buffer, buffer_size); + populate_serializer_spaces (&e, false); + + run_resolve_overflow_test ("test_resolve_overflows_via_space_assignment", + c, + e); + + free (buffer); + free (expected_buffer); +} static void test_resolve_overflows_via_isolation () { @@ -638,31 +1073,54 @@ static void test_resolve_overflows_via_isolation_with_recursive_duplication () void* buffer = malloc (buffer_size); hb_serialize_context_t c (buffer, buffer_size); populate_serializer_with_isolation_overflow_complex (&c); - graph_t graph (c.object_graph ()); - - void* out_buffer = malloc (buffer_size); - hb_serialize_context_t out (out_buffer, buffer_size); - - assert (c.offset_overflow ()); - hb_resolve_overflows (c.object_graph (), HB_TAG ('G', 'S', 'U', 'B'), &out, 0); - assert (!out.offset_overflow ()); - hb_bytes_t result = out.copy_bytes (); void* expected_buffer = malloc (buffer_size); hb_serialize_context_t e (expected_buffer, buffer_size); - assert (!e.offset_overflow ()); populate_serializer_with_isolation_overflow_complex_expected (&e); - hb_bytes_t expected_result = e.copy_bytes (); - assert (result.length == expected_result.length); - for (unsigned i = 0; i < result.length; i++) - assert (result[i] == expected_result[i]); - - result.fini (); - expected_result.fini (); + run_resolve_overflow_test ("test_resolve_overflows_via_isolation_with_recursive_duplication", + c, + e); + free (buffer); + free (expected_buffer); +} + +static void test_resolve_overflows_via_isolating_16bit_space () +{ + size_t buffer_size = 160000; + void* buffer = malloc (buffer_size); + hb_serialize_context_t c (buffer, buffer_size); + populate_serializer_spaces_16bit_connection (&c); + + void* expected_buffer = malloc (buffer_size); + hb_serialize_context_t e (expected_buffer, buffer_size); + populate_serializer_spaces_16bit_connection_expected (&e); + + run_resolve_overflow_test ("test_resolve_overflows_via_isolating_16bit_space", + c, + e); + + free (buffer); + free (expected_buffer); +} + +static void test_resolve_overflows_via_isolating_16bit_space_2 () +{ + size_t buffer_size = 160000; + void* buffer = malloc (buffer_size); + hb_serialize_context_t c (buffer, buffer_size); + populate_serializer_short_and_wide_subgraph_root (&c); + + void* expected_buffer = malloc (buffer_size); + hb_serialize_context_t e (expected_buffer, buffer_size); + populate_serializer_short_and_wide_subgraph_root_expected (&e); + + run_resolve_overflow_test ("test_resolve_overflows_via_isolating_16bit_space_2", + c, + e); + free (buffer); free (expected_buffer); - free (out_buffer); } static void test_resolve_overflows_via_isolation_spaces () @@ -690,6 +1148,47 @@ static void test_resolve_overflows_via_isolation_spaces () free (out_buffer); } +static void test_resolve_overflows_via_splitting_spaces () +{ + size_t buffer_size = 160000; + void* buffer = malloc (buffer_size); + hb_serialize_context_t c (buffer, buffer_size); + populate_serializer_with_split_spaces (&c); + + void* expected_buffer = malloc (buffer_size); + hb_serialize_context_t e (expected_buffer, buffer_size); + populate_serializer_with_split_spaces_expected (&e); + + run_resolve_overflow_test ("test_resolve_overflows_via_splitting_spaces", + c, + e, + 1); + + free (buffer); + free (expected_buffer); + +} + +static void test_resolve_overflows_via_splitting_spaces_2 () +{ + size_t buffer_size = 160000; + void* buffer = malloc (buffer_size); + hb_serialize_context_t c (buffer, buffer_size); + populate_serializer_with_split_spaces_2 (&c); + + void* expected_buffer = malloc (buffer_size); + hb_serialize_context_t e (expected_buffer, buffer_size); + populate_serializer_with_split_spaces_expected_2 (&e); + + run_resolve_overflow_test ("test_resolve_overflows_via_splitting_spaces_2", + c, + e, + 1); + free (buffer); + free (expected_buffer); + +} + // TODO(garretrieger): update will_overflow tests to check the overflows array. // TODO(garretrieger): add tests for priority raising. @@ -705,9 +1204,14 @@ main (int argc, char **argv) test_will_overflow_3 (); test_resolve_overflows_via_sort (); test_resolve_overflows_via_duplication (); + test_resolve_overflows_via_space_assignment (); test_resolve_overflows_via_isolation (); test_resolve_overflows_via_isolation_with_recursive_duplication (); test_resolve_overflows_via_isolation_spaces (); + test_resolve_overflows_via_isolating_16bit_space (); + test_resolve_overflows_via_isolating_16bit_space_2 (); + test_resolve_overflows_via_splitting_spaces (); + test_resolve_overflows_via_splitting_spaces_2 (); test_duplicate_leaf (); test_duplicate_interior (); } diff --git a/test/subset/data/fonts/Harmattan-Regular.ttf b/test/subset/data/fonts/Harmattan-Regular.ttf new file mode 100644 index 000000000..0100cf485 Binary files /dev/null and b/test/subset/data/fonts/Harmattan-Regular.ttf differ diff --git a/test/subset/data/repack_tests/Makefile.sources b/test/subset/data/repack_tests/Makefile.sources index a9ffc81fb..9e85174b3 100644 --- a/test/subset/data/repack_tests/Makefile.sources +++ b/test/subset/data/repack_tests/Makefile.sources @@ -3,10 +3,11 @@ TESTS = \ prioritization.tests \ table_duplication.tests \ isolation.tests \ + advanced_prioritization.tests \ + space_splitting.tests \ $(NULL) XFAIL_TESTS = \ - advanced_prioritization.tests \ $(NULL) DISABLED_TESTS = \ diff --git a/test/subset/data/repack_tests/space_splitting.tests b/test/subset/data/repack_tests/space_splitting.tests new file mode 100644 index 000000000..0c1f1793f --- /dev/null +++ b/test/subset/data/repack_tests/space_splitting.tests @@ -0,0 +1,2 @@ +Harmattan-Regular.ttf +* diff --git a/test/subset/meson.build b/test/subset/meson.build index 51d0944d3..9a5377c9e 100644 --- a/test/subset/meson.build +++ b/test/subset/meson.build @@ -47,6 +47,8 @@ repack_tests = [ 'prioritization', 'table_duplication', 'isolation', + 'advanced_prioritization', + 'space_splitting', ]