tests: Replace clock counting with scanned bytes in linear-time test

This removes the dependency on CLOCKS_PER_SEC that prevented this test
from running properly on some platforms, as well as the inherent
flakiness of time measurements.

Since later commits have introduced g_bytesScanned (and before that,
g_parseAttempts), we can use that value as a proxy for parse time
instead of clock().
This commit is contained in:
Snild Dolkow 2024-02-07 13:00:45 +01:00
parent fe0177cd3f
commit dc8499f295

View file

@ -5202,13 +5202,7 @@ START_TEST(test_nested_entity_suspend) {
END_TEST
/* Regression test for quadratic parsing on large tokens */
START_TEST(test_big_tokens_take_linear_time) {
const char *const too_slow_failure_message
= "Compared to the baseline runtime of the first test, this test has a "
"slowdown of more than <max_slowdown>. "
"Please keep increasing the value by 1 until it reliably passes the "
"test on your hardware and open a bug sharing that number with us. "
"Thanks in advance!";
START_TEST(test_big_tokens_scale_linearly) {
const struct {
const char *pre;
const char *post;
@ -5220,65 +5214,57 @@ START_TEST(test_big_tokens_take_linear_time) {
{"<e><", "/></e>"}, // big elem name, used to be O(N²)
};
const int num_cases = sizeof(text) / sizeof(text[0]);
// For the test we need a <max_slowdown> value that is:
// (1) big enough that the test passes reliably (avoiding flaky tests), and
// (2) small enough that the test actually catches regressions.
const int max_slowdown = 15;
char aaaaaa[4096];
const int fillsize = (int)sizeof(aaaaaa);
const int fillcount = 100;
const unsigned approx_bytes = fillsize * fillcount; // ignore pre/post.
const unsigned max_factor = 4;
const unsigned max_scanned = max_factor * approx_bytes;
memset(aaaaaa, 'a', fillsize);
if (! g_reparseDeferralEnabledDefault) {
return; // heuristic is disabled; we would get O(n^2) and fail.
}
#if ! defined(__linux__)
if (CLOCKS_PER_SEC < 100000) {
// Skip this test if clock() doesn't have reasonably good resolution.
// This workaround is primarily targeting Windows and FreeBSD, since
// XSI requires the value to be 1.000.000 (10x the condition here), and
// we want to be very sure that at least one platform in CI can catch
// regressions (through a failing test).
return;
}
#endif
clock_t baseline = 0;
for (int i = 0; i < num_cases; ++i) {
XML_Parser parser = XML_ParserCreate(NULL);
assert_true(parser != NULL);
enum XML_Status status;
set_subtest("max_slowdown=%d text=\"%saaaaaa%s\"", max_slowdown,
text[i].pre, text[i].post);
const clock_t start = clock();
set_subtest("text=\"%saaaaaa%s\"", text[i].pre, text[i].post);
// parse the start text
g_bytesScanned = 0;
status = _XML_Parse_SINGLE_BYTES(parser, text[i].pre,
(int)strlen(text[i].pre), XML_FALSE);
if (status != XML_STATUS_OK) {
xml_failure(parser);
}
// parse lots of 'a', failing the test early if it takes too long
unsigned past_max_count = 0;
for (int f = 0; f < fillcount; ++f) {
status = _XML_Parse_SINGLE_BYTES(parser, aaaaaa, fillsize, XML_FALSE);
if (status != XML_STATUS_OK) {
xml_failure(parser);
}
// i == 0 means we're still calculating the baseline value
if (i > 0) {
const clock_t now = clock();
const clock_t clocks_so_far = now - start;
const int slowdown = clocks_so_far / baseline;
if (slowdown >= max_slowdown) {
fprintf(
stderr,
"fill#%d: clocks_so_far=%d baseline=%d slowdown=%d max_slowdown=%d\n",
f, (int)clocks_so_far, (int)baseline, slowdown, max_slowdown);
fail(too_slow_failure_message);
}
if (g_bytesScanned > max_scanned) {
// We're not done, and have already passed the limit -- the test will
// definitely fail. This block allows us to save time by failing early.
const unsigned pushed
= (unsigned)strlen(text[i].pre) + (f + 1) * fillsize;
fprintf(
stderr,
"after %d/%d loops: pushed=%u scanned=%u (factor ~%.2f) max_scanned: %u (factor ~%u)\n",
f + 1, fillcount, pushed, g_bytesScanned,
g_bytesScanned / (double)pushed, max_scanned, max_factor);
past_max_count++;
// We are failing, but allow a few log prints first. If we don't reach
// a count of five, the test will fail after the loop instead.
assert_true(past_max_count < 5);
}
}
// parse the end text
status = _XML_Parse_SINGLE_BYTES(parser, text[i].post,
(int)strlen(text[i].post), XML_TRUE);
@ -5286,18 +5272,14 @@ START_TEST(test_big_tokens_take_linear_time) {
xml_failure(parser);
}
// how long did it take in total?
const clock_t end = clock();
const clock_t taken = end - start;
if (i == 0) {
assert_true(taken > 0); // just to make sure we don't div-by-0 later
baseline = taken;
}
const int slowdown = taken / baseline;
if (slowdown >= max_slowdown) {
fprintf(stderr, "taken=%d baseline=%d slowdown=%d max_slowdown=%d\n",
(int)taken, (int)baseline, slowdown, max_slowdown);
fail(too_slow_failure_message);
assert_true(g_bytesScanned > approx_bytes); // or the counter isn't working
if (g_bytesScanned > max_scanned) {
fprintf(
stderr,
"after all input: scanned=%u (factor ~%.2f) max_scanned: %u (factor ~%u)\n",
g_bytesScanned, g_bytesScanned / (double)approx_bytes, max_scanned,
max_factor);
fail("scanned too many bytes");
}
XML_ParserFree(parser);
@ -6055,7 +6037,7 @@ make_basic_test_case(Suite *s) {
tcase_add_test__ifdef_xml_dtd(tc_basic,
test_pool_integrity_with_unfinished_attr);
tcase_add_test__if_xml_ge(tc_basic, test_nested_entity_suspend);
tcase_add_test(tc_basic, test_big_tokens_take_linear_time);
tcase_add_test(tc_basic, test_big_tokens_scale_linearly);
tcase_add_test(tc_basic, test_set_reparse_deferral);
tcase_add_test(tc_basic, test_reparse_deferral_is_inherited);
tcase_add_test(tc_basic, test_set_reparse_deferral_on_null_parser);