Skip to content

Commit 476502d

Browse files
committed
[GC] Avoid leaving fragmented slabs behind us when refilling the thread bins.
1 parent 4223fb6 commit 476502d

File tree

3 files changed

+25
-10
lines changed

3 files changed

+25
-10
lines changed

sdlib/d/gc/arena.d

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,7 @@ public:
126126
ubyte sizeClass,
127127
void** top,
128128
void** bottom,
129+
void** requested,
129130
size_t slotSize,
130131
) shared {
131132
// TODO: in contracts
@@ -134,8 +135,8 @@ public:
134135
import d.gc.slab;
135136
assert(slotSize == binInfos[sizeClass].slotSize, "Invalid slot size!");
136137

137-
return bins[sizeClass]
138-
.batchAllocate(&filler, emap, sizeClass, top, bottom, slotSize);
138+
return bins[sizeClass].batchAllocate(&filler, emap, sizeClass, top,
139+
bottom, requested, slotSize);
139140
}
140141

141142
uint batchFree(ref CachedExtentMap emap, const(void*)[] worklist,

sdlib/d/gc/bin.d

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ struct Bin {
2727
ubyte sizeClass,
2828
void** top,
2929
void** bottom,
30+
void** requested,
3031
size_t slotSize,
3132
) shared {
3233
import d.gc.sizeclass;
@@ -38,6 +39,9 @@ struct Bin {
3839
assert(bottom < top, "Invalid stack boundaries!");
3940
assert((top - bottom) < uint.max, "Invalid stack size!");
4041

42+
assert(bottom < requested && requested <= top,
43+
"Invalid requested slot count!");
44+
4145
/**
4246
* When we run out of slab with free space, we allocate a fresh slab.
4347
* However, while we do so, another thread may have returned slabs to
@@ -112,8 +116,20 @@ struct Bin {
112116
assert(freshSlab is null);
113117
assert(progressed);
114118

115-
freshSlab = filler.allocSlab(emap, sizeClass);
119+
/**
120+
* We want to avoid leaving partially filled slabs behind us,
121+
* so, before allocating a new slab, we make sure that it'll
122+
* either be required to meet the requested number of slot,
123+
* or that it'll be completely filled.
124+
*/
125+
assert(bottom <= insert && insert < top, "Invalid insertion point!");
126+
auto navailable = top - insert;
116127
auto nslots = binInfos[sizeClass].nslots;
128+
if (requested <= insert && navailable <= nslots) {
129+
return insert;
130+
}
131+
132+
freshSlab = filler.allocSlab(emap, sizeClass);
117133
assert(freshSlab is null || freshSlab.nfree == nslots);
118134

119135
progressed = false;

sdlib/d/gc/tbin.d

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -78,16 +78,14 @@ public:
7878
auto nfill = state.getFill(nmax);
7979
assert(nfill > 0);
8080

81-
/**
82-
* TODO: We should pass available in addition to nfill to batchAllocSmall.
83-
* This would ensure batchAllocSmall has some wiggle room to provide
84-
* as many slots as possible without allocating new slabs.
85-
*/
8681
auto insert = _head - nfill;
8782
assert(available <= insert);
8883

89-
auto filled =
90-
arena.batchAllocSmall(emap, sizeClass, _head, insert, slotSize);
84+
auto requested = insert + (nfill >> 1) + 1;
85+
assert(insert < requested && requested <= _head);
86+
87+
auto filled = arena.batchAllocSmall(emap, sizeClass, _head, insert,
88+
requested, slotSize);
9189
state.onRefill();
9290

9391
/**

0 commit comments

Comments
 (0)