@@ -36,6 +36,62 @@ MiniHeapID GetMiniHeapID(const MiniHeapT *mh) {
3636 return runtime<PageSize>().heap ().miniheapIDFor (mh);
3737}
3838
39+ template <size_t PageSize>
40+ std::array<uint64_t , kNumBins > GlobalHeap<PageSize>::initMeshEventBudget() {
41+ std::array<uint64_t , kNumBins > budgets{};
42+ constexpr uint64_t minBudget = PageSize;
43+ constexpr uint64_t maxBudget = static_cast <uint64_t >(PageSize) * 64ULL ;
44+ for (size_t i = 0 ; i < kNumBins ; i++) {
45+ const uint64_t objSize = static_cast <uint64_t >(SizeMap::ByteSizeForClass (static_cast <int32_t >(i)));
46+ uint64_t budget = objSize * 32 ;
47+ if (budget < minBudget) {
48+ budget = minBudget;
49+ } else if (budget > maxBudget) {
50+ budget = maxBudget;
51+ }
52+ budgets[i] = budget;
53+ }
54+ return budgets;
55+ }
56+
57+ template <size_t PageSize>
58+ struct MeshScratch {
59+ MergeSetArray<PageSize> &mergeSets;
60+ SplitArray<PageSize> &left;
61+ SplitArray<PageSize> &right;
62+ };
63+
64+ template <size_t PageSize>
65+ static MeshScratch<PageSize> getMeshScratch () {
66+ static MergeSetArray<PageSize> *MergeSetsPtr = []() {
67+ void *ptr =
68+ mmap (nullptr , sizeof (MergeSetArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
69+ hard_assert (ptr != MAP_FAILED);
70+ return new (ptr) MergeSetArray<PageSize>();
71+ }();
72+ static MergeSetArray<PageSize> &MergeSets = *MergeSetsPtr;
73+
74+ static SplitArray<PageSize> *LeftPtr = []() {
75+ void *ptr = mmap (nullptr , sizeof (SplitArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
76+ hard_assert (ptr != MAP_FAILED);
77+ return new (ptr) SplitArray<PageSize>();
78+ }();
79+ static SplitArray<PageSize> &Left = *LeftPtr;
80+
81+ static SplitArray<PageSize> *RightPtr = []() {
82+ void *ptr = mmap (nullptr , sizeof (SplitArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
83+ hard_assert (ptr != MAP_FAILED);
84+ return new (ptr) SplitArray<PageSize>();
85+ }();
86+ static SplitArray<PageSize> &Right = *RightPtr;
87+
88+ d_assert ((reinterpret_cast <uintptr_t >(&MergeSets) & (getPageSize () - 1 )) == 0 );
89+ d_assert ((reinterpret_cast <uintptr_t >(&Left) & (getPageSize () - 1 )) == 0 );
90+ d_assert ((reinterpret_cast <uintptr_t >(&Right) & (getPageSize () - 1 )) == 0 );
91+
92+ return {MergeSets, Left, Right};
93+ }
94+
3995template <size_t PageSize>
4096void *GlobalHeap<PageSize>::malloc(size_t sz) {
4197#ifndef NDEBUG
@@ -88,9 +144,14 @@ void GlobalHeap<PageSize>::freeFor(MiniHeapT *mh, void *ptr, size_t startEpoch)
88144
89145 d_assert (mh->maxCount () > 1 );
90146
147+ const size_t objectSize = mh->objectSize ();
148+ const size_t spanBytes = mh->spanSize ();
149+
91150 auto freelistId = mh->freelistId ();
92151 auto isAttached = mh->isAttached ();
93152 auto sizeClass = mh->sizeClass ();
153+ bool transitionedToPartial = false ;
154+ bool becameEmpty = false ;
94155
95156 // try to avoid storing to this cacheline; the branch is worth it to avoid
96157 // multi-threaded contention
@@ -151,6 +212,9 @@ void GlobalHeap<PageSize>::freeFor(MiniHeapT *mh, void *ptr, size_t startEpoch)
151212 isAttached = mh->isAttached ();
152213
153214 if (!isAttached && (remaining == 0 || freelistId == list::Full)) {
215+ becameEmpty = remaining == 0 ;
216+ transitionedToPartial = remaining > 0 && freelistId == list::Full;
217+ shouldMesh = true ;
154218 // this may free the miniheap -- we can't safely access it after
155219 // this point.
156220 postFreeLocked (mh, sizeClass, remaining);
@@ -173,6 +237,7 @@ void GlobalHeap<PageSize>::freeFor(MiniHeapT *mh, void *ptr, size_t startEpoch)
173237 // the exact crossing, the next free will catch it.
174238 if (isBelowPartialThreshold (remaining, mh->maxCount ())) {
175239 tryPushPendingPartial (mh, sizeClass);
240+ transitionedToPartial = true ;
176241 }
177242 shouldMesh = true ;
178243 } else {
@@ -238,21 +303,67 @@ void GlobalHeap<PageSize>::freeFor(MiniHeapT *mh, void *ptr, size_t startEpoch)
238303 remaining = mh->inUseCount ();
239304 postFreeLocked (mh, sizeClass, remaining);
240305 // Note: flushBinLocked deferred to next mesh cycle (requires arena lock)
306+ becameEmpty = true ;
307+ shouldMesh = true ;
241308 }
242309 } else {
243310 shouldMesh = !isAttached;
244311 }
245312 }
246313
247- if (shouldMesh) {
248- // Sample maybeMesh calls using pointer address bits to avoid overhead of
249- // calling clock_gettime on every free. Check ~1 in 4096 frees.
250- // Use bits 12-23 (above page offset, below typical allocation patterns).
251- constexpr uintptr_t kMeshSampleMask = 0xFFF000 ;
252- if (unlikely ((reinterpret_cast <uintptr_t >(ptr) & kMeshSampleMask ) == 0 )) {
253- maybeMesh ();
314+ if (shouldMesh && !isAttached && sizeClass >= 0 ) {
315+ size_t delta = objectSize;
316+ if (transitionedToPartial || becameEmpty) {
317+ delta += spanBytes;
318+ }
319+ _meshTrigger.add (static_cast <size_t >(sizeClass), delta);
320+ maybeMesh (sizeClass);
321+ }
322+ }
323+
324+ template <size_t PageSize>
325+ void GlobalHeap<PageSize>::processMeshRequest(size_t sizeClass) {
326+ d_assert (sizeClass < kNumBins );
327+ auto scratch = getMeshScratch<PageSize>();
328+ const uint64_t budget = _meshTrigger.adjustedBudget (sizeClass);
329+
330+ size_t meshCount = 0 ;
331+ bool aboveThreshold = false ;
332+
333+ {
334+ // Lock ordering: size-class lock -> arena lock -> epoch lock
335+ // This matches meshAllSizeClassesLocked (called via AllLocksGuard in mallctl)
336+ lock_guard<mutex> sizeLock (_miniheapLocks[sizeClass]);
337+ lock_guard<mutex> arenaLock (_arenaLock);
338+
339+ // if we have freed but not reset meshed mappings, this will reset
340+ // them to the identity mapping, ensuring we don't blow past our VMA
341+ // limit (which is why we set the force flag to true)
342+ Super::scavenge (true );
343+
344+ if (Super::aboveMeshThreshold ()) {
345+ aboveThreshold = true ;
346+ } else {
347+ // Acquire epoch lock last to match lock ordering in meshAllSizeClassesLocked
348+ lock_guard<EpochLock> epochLock (_meshEpoch);
349+
350+ drainPendingPartialLocked (sizeClass);
351+ flushBinLocked (sizeClass);
352+ meshCount = meshSizeClassLocked (sizeClass, scratch.mergeSets , scratch.left , scratch.right );
353+ madvise (&scratch.left , sizeof (scratch.left ), MADV_DONTNEED);
354+ madvise (&scratch.right , sizeof (scratch.right ), MADV_DONTNEED);
355+ madvise (&scratch.mergeSets , sizeof (scratch.mergeSets ), MADV_DONTNEED);
356+ Super::scavenge (true );
254357 }
255358 }
359+
360+ if (!aboveThreshold) {
361+ _lastMeshEffective.store (meshCount > 0 ? 1 : 0 , std::memory_order_release);
362+ _stats.meshCount += meshCount;
363+ }
364+
365+ _meshTrigger.onMeshComplete (sizeClass, meshCount > 0 , budget);
366+ _lastMesh.store (time::now (), std::memory_order_release);
256367}
257368
258369template <size_t PageSize>
@@ -428,34 +539,7 @@ size_t GlobalHeap<PageSize>::meshSizeClassLocked(size_t sizeClass, MergeSetArray
428539
429540template <size_t PageSize>
430541void GlobalHeap<PageSize>::meshAllSizeClassesLocked() {
431- static MergeSetArray<PageSize> *MergeSetsPtr = []() {
432- void *ptr =
433- mmap (nullptr , sizeof (MergeSetArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
434- hard_assert (ptr != MAP_FAILED);
435- return new (ptr) MergeSetArray<PageSize>();
436- }();
437- static MergeSetArray<PageSize> &MergeSets = *MergeSetsPtr;
438- // static_assert(sizeof(MergeSets) == sizeof(void *) * 2 * 4096, "array too big");
439- d_assert ((reinterpret_cast <uintptr_t >(&MergeSets) & (getPageSize () - 1 )) == 0 );
440-
441- static SplitArray<PageSize> *LeftPtr = []() {
442- void *ptr = mmap (nullptr , sizeof (SplitArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
443- hard_assert (ptr != MAP_FAILED);
444- return new (ptr) SplitArray<PageSize>();
445- }();
446- static SplitArray<PageSize> &Left = *LeftPtr;
447-
448- static SplitArray<PageSize> *RightPtr = []() {
449- void *ptr = mmap (nullptr , sizeof (SplitArray<PageSize>), PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1 , 0 );
450- hard_assert (ptr != MAP_FAILED);
451- return new (ptr) SplitArray<PageSize>();
452- }();
453- static SplitArray<PageSize> &Right = *RightPtr;
454-
455- // static_assert(sizeof(Left) == sizeof(void *) * 16384, "array too big");
456- // static_assert(sizeof(Right) == sizeof(void *) * 16384, "array too big");
457- d_assert ((reinterpret_cast <uintptr_t >(&Left) & (getPageSize () - 1 )) == 0 );
458- d_assert ((reinterpret_cast <uintptr_t >(&Right) & (getPageSize () - 1 )) == 0 );
542+ auto scratch = getMeshScratch<PageSize>();
459543
460544 // if we have freed but not reset meshed mappings, this will reset
461545 // them to the identity mapping, ensuring we don't blow past our VMA
@@ -483,12 +567,12 @@ void GlobalHeap<PageSize>::meshAllSizeClassesLocked() {
483567 size_t totalMeshCount = 0 ;
484568
485569 for (size_t sizeClass = 0 ; sizeClass < kNumBins ; sizeClass++) {
486- totalMeshCount += meshSizeClassLocked (sizeClass, MergeSets, Left, Right );
570+ totalMeshCount += meshSizeClassLocked (sizeClass, scratch. mergeSets , scratch. left , scratch. right );
487571 }
488572
489- madvise (&Left , sizeof (Left ), MADV_DONTNEED);
490- madvise (&Right , sizeof (Right ), MADV_DONTNEED);
491- madvise (&MergeSets , sizeof (MergeSets ), MADV_DONTNEED);
573+ madvise (&scratch. left , sizeof (scratch. left ), MADV_DONTNEED);
574+ madvise (&scratch. right , sizeof (scratch. right ), MADV_DONTNEED);
575+ madvise (&scratch. mergeSets , sizeof (scratch. mergeSets ), MADV_DONTNEED);
492576
493577 _lastMeshEffective = totalMeshCount > 256 ;
494578 _stats.meshCount += totalMeshCount;
0 commit comments