summaryrefslogtreecommitdiffstats
path: root/crawl-ref/source/package.cc
diff options
context:
space:
mode:
authorAdam Borowski <kilobyte@angband.pl>2010-08-19 14:50:43 +0200
committerAdam Borowski <kilobyte@angband.pl>2011-06-08 18:30:23 +0200
commit8492ceb405c0aa17d47ba55e48ac8891f4a582d6 (patch)
treeca236afb58cb30761112b772158082d0035ca143 /crawl-ref/source/package.cc
parentbb8cc240470638dd85c4e5709df77b4aa4e3deeb (diff)
downloadcrawl-ref-8492ceb405c0aa17d47ba55e48ac8891f4a582d6.tar.gz
crawl-ref-8492ceb405c0aa17d47ba55e48ac8891f4a582d6.zip
Try to fight fragmentation by choosing the best-fitting block.
Unlike my attempt a year ago, this version actually tries a block that fits, rather than one that's just below what we need, so a tiny new fragment is created in most cases, making things worse :p
Diffstat (limited to 'crawl-ref/source/package.cc')
-rw-r--r--crawl-ref/source/package.cc51
1 files changed, 31 insertions, 20 deletions
diff --git a/crawl-ref/source/package.cc b/crawl-ref/source/package.cc
index 4a068c8e9f..357ea68d97 100644
--- a/crawl-ref/source/package.cc
+++ b/crawl-ref/source/package.cc
@@ -9,9 +9,6 @@ Guarantees:
the exact state it had at the last commit().
Caveats/issues:
-* Benchmarked on random chunks of size 2^random2(X) * frandom(1),
- naive reusing of the first slab of free space produces less waste than
- best fit. I don't fully understand why, but for now, let's use that.
* Unless DO_FSYNC is defined, crashes that put down the operating system
may break the consistency guarantee.
* A commit() will break readers who read a chunk that was deleted or
@@ -291,25 +288,40 @@ len_t package::extend_block(len_t at, len_t size, len_t by)
return by;
}
-len_t package::alloc_block()
+len_t package::alloc_block(len_t &size)
{
- for(fb_t::iterator bl = free_blocks.begin(); bl!=free_blocks.end(); bl++)
+ fb_t::iterator bl, best_big, best_small;
+ len_t bb_size = (len_t)-1, bs_size = 0;
+ for(bl = free_blocks.begin(); bl!=free_blocks.end(); bl++)
{
- // don't reuse very small blocks
- if (bl->second < 16)
- continue;
-
- len_t at = bl->first;
- len_t free = bl->second;
- dprintf("found a block for reuse at %u size %u\n", at, free);
- free_blocks.erase(bl);
- free_blocks[at + sizeof(block_header)] = free - sizeof(block_header);
+ if (bl->second < bb_size && bl->second >= size + sizeof(block_header))
+ best_big = bl, bb_size = bl->second;
+ // don't reuse very small blocks unless they're big enough
+ else if (bl->second >= 16 && bl->second > bs_size)
+ best_small = bl, bs_size = bl->second;
+ }
+ if (bb_size != (len_t)-1)
+ bl = best_big;
+ else if (bs_size != 0)
+ bl = best_small;
+ else
+ {
+ len_t at = file_len;
- return at;
+ file_len += sizeof(block_header) + size;
+ return at;
}
- len_t at = file_len;
- file_len += sizeof(block_header);
+ len_t at = bl->first;
+ len_t free = bl->second;
+ dprintf("found a block for reuse at %u size %u\n", at, free);
+ free_blocks.erase(bl);
+ free -= sizeof(block_header);
+ if (size > free)
+ size = free;
+ if ((free -= size))
+ free_blocks[at + sizeof(block_header) + size] = free;
+
return at;
}
@@ -686,11 +698,10 @@ void chunk_writer::raw_write(const void *data, len_t len)
{
while (len > 0)
{
- int space = pkg->extend_block(cur_block, block_len, len);
+ len_t space = pkg->extend_block(cur_block, block_len, len);
if (!space)
{
- len_t next_block = pkg->alloc_block();
- space = pkg->extend_block(next_block, 0, len);
+ len_t next_block = pkg->alloc_block(space = len);
ASSERT(space > 0);
if (cur_block)
finish_block(next_block);