This reverts commit
9c2174350cc0ae0f6bad126e15fe1f9f044117ab.
Nico analyzed and found out that this does not really help, and
I agree with it.
By the time this gets into action and data is actively thrown
away, performance simply goes down the drain due to the data
constantly being reloaded over and over and over and over and
over and over again, to the point of virtually making no
relative progress at all. The previous behavior of enforcing
the memory limit by dynamically shrinking the window size at
least had the effect of allowing some kind of progress, even if
the end result wouldn't be optimal.
And that's the whole point behind this memory limiting feature:
allowing some progress to be made when resources are too limited
to let the repack go unbounded.
return m;
}
-static unsigned long free_unpacked_data(struct unpacked *n)
+static unsigned long free_unpacked(struct unpacked *n)
{
unsigned long freed_mem = sizeof_delta_index(n->index);
free_delta_index(n->index);
free(n->data);
n->data = NULL;
}
- return freed_mem;
-}
-
-static unsigned long free_unpacked(struct unpacked *n)
-{
- unsigned long freed_mem = free_unpacked_data(n);
n->entry = NULL;
n->depth = 0;
return freed_mem;
mem_usage > window_memory_limit &&
count > 1) {
uint32_t tail = (idx + window - count) % window;
- mem_usage -= free_unpacked_data(array + tail);
+ mem_usage -= free_unpacked(array + tail);
count--;
}
if (!m->entry)
break;
ret = try_delta(n, m, max_depth, &mem_usage);
- if (window_memory_limit &&
- mem_usage > window_memory_limit)
- mem_usage -= free_unpacked_data(m);
if (ret < 0)
break;
else if (ret > 0)