Qt 6.x
The Qt SDK
Loading...
Searching...
No Matches
qv4mm.cpp
Go to the documentation of this file.
1// Copyright (C) 2021 The Qt Company Ltd.
2// SPDX-License-Identifier: LicenseRef-Qt-Commercial OR LGPL-3.0-only OR GPL-2.0-only OR GPL-3.0-only
3
4#include "qv4engine_p.h"
5#include "qv4object_p.h"
6#include "qv4mm_p.h"
9#include <QtCore/qalgorithms.h>
10#include <QtCore/private/qnumeric_p.h>
11#include <QtCore/qloggingcategory.h>
12#include <private/qv4alloca_p.h>
13#include <qqmlengine.h>
14#include "PageReservation.h"
15#include "PageAllocation.h"
16
17#include <QElapsedTimer>
18#include <QMap>
19#include <QScopedValueRollback>
20
21#include <iostream>
22#include <cstdlib>
23#include <algorithm>
24#include "qv4profiling_p.h"
25#include "qv4mapobject_p.h"
26#include "qv4setobject_p.h"
27
28//#define MM_STATS
29
30#if !defined(MM_STATS) && !defined(QT_NO_DEBUG)
31#define MM_STATS
32#endif
33
34#if MM_DEBUG
35#define DEBUG qDebug() << "MM:"
36#else
37#define DEBUG if (1) ; else qDebug() << "MM:"
38#endif
39
40#ifdef V4_USE_VALGRIND
41#include <valgrind/valgrind.h>
42#include <valgrind/memcheck.h>
43#endif
44
45#ifdef V4_USE_HEAPTRACK
46#include <heaptrack_api.h>
47#endif
48
49#if OS(QNX)
50#include <sys/storage.h> // __tls()
51#endif
52
53#if USE(PTHREADS) && HAVE(PTHREAD_NP_H)
54#include <pthread_np.h>
55#endif
56
57Q_LOGGING_CATEGORY(lcGcStats, "qt.qml.gc.statistics")
59Q_LOGGING_CATEGORY(lcGcAllocatorStats, "qt.qml.gc.allocatorStats")
60Q_DECLARE_LOGGING_CATEGORY(lcGcAllocatorStats)
61
62using namespace WTF;
63
65
66namespace QV4 {
67
68enum {
70 GCOverallocation = 200 /* Max overallocation by the GC in % */
71};
72
74 enum {
75#ifdef Q_OS_RTEMS
76 NumChunks = sizeof(quint64),
77#else
78 NumChunks = 8*sizeof(quint64),
79#endif
80 SegmentSize = NumChunks*Chunk::ChunkSize,
81 };
82
84 {
85 size += Chunk::ChunkSize; // make sure we can get enough 64k alignment memory
86 if (size < SegmentSize)
87 size = SegmentSize;
88
89 pageReservation = PageReservation::reserve(size, OSAllocator::JSGCHeapPages);
90 base = reinterpret_cast<Chunk *>((reinterpret_cast<quintptr>(pageReservation.base()) + Chunk::ChunkSize - 1) & ~(Chunk::ChunkSize - 1));
91 nChunks = NumChunks;
92 availableBytes = size - (reinterpret_cast<quintptr>(base) - reinterpret_cast<quintptr>(pageReservation.base()));
93 if (availableBytes < SegmentSize)
94 --nChunks;
95 }
97 qSwap(pageReservation, other.pageReservation);
98 qSwap(base, other.base);
99 qSwap(allocatedMap, other.allocatedMap);
100 qSwap(availableBytes, other.availableBytes);
101 qSwap(nChunks, other.nChunks);
102 }
103
105 if (base)
106 pageReservation.deallocate();
107 }
108
109 void setBit(size_t index) {
110 Q_ASSERT(index < nChunks);
111 quint64 bit = static_cast<quint64>(1) << index;
112// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
113 allocatedMap |= bit;
114 }
115 void clearBit(size_t index) {
116 Q_ASSERT(index < nChunks);
117 quint64 bit = static_cast<quint64>(1) << index;
118// qDebug() << " setBit" << hex << index << (index & (Bits - 1)) << bit;
119 allocatedMap &= ~bit;
120 }
121 bool testBit(size_t index) const {
122 Q_ASSERT(index < nChunks);
123 quint64 bit = static_cast<quint64>(1) << index;
124 return (allocatedMap & bit);
125 }
126
127 Chunk *allocate(size_t size);
128 void free(Chunk *chunk, size_t size) {
129 DEBUG << "freeing chunk" << chunk;
130 size_t index = static_cast<size_t>(chunk - base);
131 size_t end = qMin(static_cast<size_t>(NumChunks), index + (size - 1)/Chunk::ChunkSize + 1);
132 while (index < end) {
134 clearBit(index);
135 ++index;
136 }
137
138 size_t pageSize = WTF::pageSize();
139 size = (size + pageSize - 1) & ~(pageSize - 1);
140#if !defined(Q_OS_LINUX) && !defined(Q_OS_WIN)
141 // Linux and Windows zero out pages that have been decommitted and get committed again.
142 // unfortunately that's not true on other OSes (e.g. BSD based ones), so zero out the
143 // memory before decommit, so that we can be sure that all chunks we allocate will be
144 // zero initialized.
145 memset(chunk, 0, size);
146#endif
147 pageReservation.decommit(chunk, size);
148 }
149
150 bool contains(Chunk *c) const {
151 return c >= base && c < base + nChunks;
152 }
153
154 PageReservation pageReservation;
155 Chunk *base = nullptr;
156 quint64 allocatedMap = 0;
157 size_t availableBytes = 0;
158 uint nChunks = 0;
159};
160
161Chunk *MemorySegment::allocate(size_t size)
162{
163 if (!allocatedMap && size >= SegmentSize) {
164 // chunk allocated for one huge allocation
165 Q_ASSERT(availableBytes >= size);
166 pageReservation.commit(base, size);
167 allocatedMap = ~static_cast<quint64>(0);
168 return base;
169 }
170 size_t requiredChunks = (size + sizeof(Chunk) - 1)/sizeof(Chunk);
171 uint sequence = 0;
172 Chunk *candidate = nullptr;
173 for (uint i = 0; i < nChunks; ++i) {
174 if (!testBit(i)) {
175 if (!candidate)
176 candidate = base + i;
177 ++sequence;
178 } else {
179 candidate = nullptr;
180 sequence = 0;
181 }
182 if (sequence == requiredChunks) {
183 pageReservation.commit(candidate, size);
184 for (uint i = 0; i < requiredChunks; ++i)
185 setBit(candidate - base + i);
186 DEBUG << "allocated chunk " << candidate << Qt::hex << size;
187
188 return candidate;
189 }
190 }
191 return nullptr;
192}
193
196
197 size_t requiredChunkSize(size_t size) {
198 size += Chunk::HeaderSize; // space required for the Chunk header
199 size_t pageSize = WTF::pageSize();
200 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
201 if (size < Chunk::ChunkSize)
202 size = Chunk::ChunkSize;
203 return size;
204 }
205
206 Chunk *allocate(size_t size = 0);
207 void free(Chunk *chunk, size_t size = 0);
208
209 std::vector<MemorySegment> memorySegments;
210};
211
212Chunk *ChunkAllocator::allocate(size_t size)
213{
214 size = requiredChunkSize(size);
215 for (auto &m : memorySegments) {
216 if (~m.allocatedMap) {
217 Chunk *c = m.allocate(size);
218 if (c)
219 return c;
220 }
221 }
222
223 // allocate a new segment
224 memorySegments.push_back(MemorySegment(size));
225 Chunk *c = memorySegments.back().allocate(size);
226 Q_ASSERT(c);
227 return c;
228}
229
230void ChunkAllocator::free(Chunk *chunk, size_t size)
231{
232 size = requiredChunkSize(size);
233 for (auto &m : memorySegments) {
234 if (m.contains(chunk)) {
235 m.free(chunk, size);
236 return;
237 }
238 }
239 Q_ASSERT(false);
240}
241
242#ifdef DUMP_SWEEP
245 while (s.length() < 64)
246 s.prepend(QChar::fromLatin1('0'));
247 return s;
248}
249#define SDUMP qDebug
250#else
252#define SDUMP if (1) ; else qDebug
253#endif
254
255// Stores a classname -> freed count mapping.
257Q_GLOBAL_STATIC(MMStatsHash, freedObjectStatsGlobal)
258
259// This indirection avoids sticking QHash code in each of the call sites, which
260// shaves off some instructions in the case that it's unused.
261static void increaseFreedCountForClass(const char *className)
262{
263 (*freedObjectStatsGlobal())[className]++;
264}
265
266//bool Chunk::sweep(ClassDestroyStatsCallback classCountPtr)
267bool Chunk::sweep(ExecutionEngine *engine)
268{
269 bool hasUsedSlots = false;
270 SDUMP() << "sweeping chunk" << this;
271 HeapItem *o = realBase();
272 bool lastSlotFree = false;
273 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
274 quintptr toFree = objectBitmap[i] ^ blackBitmap[i];
275 Q_ASSERT((toFree & objectBitmap[i]) == toFree); // check all black objects are marked as being used
276 quintptr e = extendsBitmap[i];
277 SDUMP() << " index=" << i;
278 SDUMP() << " toFree =" << binary(toFree);
279 SDUMP() << " black =" << binary(blackBitmap[i]);
280 SDUMP() << " object =" << binary(objectBitmap[i]);
281 SDUMP() << " extends =" << binary(e);
282 if (lastSlotFree)
283 e &= (e + 1); // clear all lowest extent bits
284 while (toFree) {
286 quintptr bit = (static_cast<quintptr>(1) << index);
287
288 toFree ^= bit; // mask out freed slot
289 // DEBUG << " index" << hex << index << toFree;
290
291 // remove all extends slots that have been freed
292 // this is a bit of bit trickery.
293 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
294 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
295 quintptr result = objmask + 1;
296 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
297 result |= mask; // ensure we don't clear stuff to the right of the current object
298 e &= result;
299
300 HeapItem *itemToFree = o + index;
301 Heap::Base *b = *itemToFree;
302 const VTable *v = b->internalClass->vtable;
303// if (Q_UNLIKELY(classCountPtr))
304// classCountPtr(v->className);
305 if (v->destroy) {
306 v->destroy(b);
307 b->_checkIsDestroyed();
308 }
309#ifdef V4_USE_HEAPTRACK
310 heaptrack_report_free(itemToFree);
311#endif
312 }
313 Q_V4_PROFILE_DEALLOC(engine, qPopulationCount((objectBitmap[i] | extendsBitmap[i])
314 - (blackBitmap[i] | e)) * Chunk::SlotSize,
315 Profiling::SmallItem);
316 objectBitmap[i] = blackBitmap[i];
317 hasUsedSlots |= (blackBitmap[i] != 0);
318 extendsBitmap[i] = e;
319 lastSlotFree = !((objectBitmap[i]|extendsBitmap[i]) >> (sizeof(quintptr)*8 - 1));
320 SDUMP() << " new extends =" << binary(e);
321 SDUMP() << " lastSlotFree" << lastSlotFree;
322 Q_ASSERT((objectBitmap[i] & extendsBitmap[i]) == 0);
323 o += Chunk::Bits;
324 }
325 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
326 return hasUsedSlots;
327}
328
329void Chunk::freeAll(ExecutionEngine *engine)
330{
331 // DEBUG << "sweeping chunk" << this << (*freeList);
332 HeapItem *o = realBase();
333 for (uint i = 0; i < Chunk::EntriesInBitmap; ++i) {
334 quintptr toFree = objectBitmap[i];
335 quintptr e = extendsBitmap[i];
336 // DEBUG << hex << " index=" << i << toFree;
337 while (toFree) {
339 quintptr bit = (static_cast<quintptr>(1) << index);
340
341 toFree ^= bit; // mask out freed slot
342 // DEBUG << " index" << hex << index << toFree;
343
344 // remove all extends slots that have been freed
345 // this is a bit of bit trickery.
346 quintptr mask = (bit << 1) - 1; // create a mask of 1's to the right of and up to the current bit
347 quintptr objmask = e | mask; // or'ing mask with e gives all ones until the end of the current object
348 quintptr result = objmask + 1;
349 Q_ASSERT(qCountTrailingZeroBits(result) - index != 0); // ensure we freed something
350 result |= mask; // ensure we don't clear stuff to the right of the current object
351 e &= result;
352
353 HeapItem *itemToFree = o + index;
354 Heap::Base *b = *itemToFree;
355 if (b->internalClass->vtable->destroy) {
356 b->internalClass->vtable->destroy(b);
357 b->_checkIsDestroyed();
358 }
359#ifdef V4_USE_HEAPTRACK
360 heaptrack_report_free(itemToFree);
361#endif
362 }
363 Q_V4_PROFILE_DEALLOC(engine, (qPopulationCount(objectBitmap[i]|extendsBitmap[i])
364 - qPopulationCount(e)) * Chunk::SlotSize, Profiling::SmallItem);
365 objectBitmap[i] = 0;
366 extendsBitmap[i] = e;
367 o += Chunk::Bits;
368 }
369 // DEBUG << "swept chunk" << this << "freed" << slotsFreed << "slots.";
370}
371
372void Chunk::resetBlackBits()
373{
374 memset(blackBitmap, 0, sizeof(blackBitmap));
375}
376
377void Chunk::sortIntoBins(HeapItem **bins, uint nBins)
378{
379// qDebug() << "sortIntoBins:";
380 HeapItem *base = realBase();
381#if QT_POINTER_SIZE == 8
382 const int start = 0;
383#else
384 const int start = 1;
385#endif
386#ifndef QT_NO_DEBUG
387 uint freeSlots = 0;
388 uint allocatedSlots = 0;
389#endif
390 for (int i = start; i < EntriesInBitmap; ++i) {
391 quintptr usedSlots = (objectBitmap[i]|extendsBitmap[i]);
392#if QT_POINTER_SIZE == 8
393 if (!i)
394 usedSlots |= (static_cast<quintptr>(1) << (HeaderSize/SlotSize)) - 1;
395#endif
396#ifndef QT_NO_DEBUG
397 allocatedSlots += qPopulationCount(usedSlots);
398// qDebug() << hex << " i=" << i << "used=" << usedSlots;
399#endif
400 while (1) {
401 uint index = qCountTrailingZeroBits(usedSlots + 1);
402 if (index == Bits)
403 break;
404 uint freeStart = i*Bits + index;
405 usedSlots &= ~((static_cast<quintptr>(1) << index) - 1);
406 while (!usedSlots) {
407 if (++i < EntriesInBitmap) {
408 usedSlots = (objectBitmap[i]|extendsBitmap[i]);
409 } else {
410 Q_ASSERT(i == EntriesInBitmap);
411 // Overflows to 0 when counting trailing zeroes above in next iteration.
412 // Then, all the bits are zeroes and we break.
413 usedSlots = std::numeric_limits<quintptr>::max();
414 break;
415 }
416#ifndef QT_NO_DEBUG
417 allocatedSlots += qPopulationCount(usedSlots);
418// qDebug() << hex << " i=" << i << "used=" << usedSlots;
419#endif
420 }
421 HeapItem *freeItem = base + freeStart;
422
423 index = qCountTrailingZeroBits(usedSlots);
424 usedSlots |= (quintptr(1) << index) - 1;
425 uint freeEnd = i*Bits + index;
426 uint nSlots = freeEnd - freeStart;
427#ifndef QT_NO_DEBUG
428// qDebug() << hex << " got free slots from" << freeStart << "to" << freeEnd << "n=" << nSlots << "usedSlots=" << usedSlots;
429 freeSlots += nSlots;
430#endif
431 Q_ASSERT(freeEnd > freeStart && freeEnd <= NumSlots);
432 freeItem->freeData.availableSlots = nSlots;
433 uint bin = qMin(nBins - 1, nSlots);
434 freeItem->freeData.next = bins[bin];
435 bins[bin] = freeItem;
436 }
437 }
438#ifndef QT_NO_DEBUG
439 Q_ASSERT(freeSlots + allocatedSlots == (EntriesInBitmap - start) * 8 * sizeof(quintptr));
440#endif
441}
442
443HeapItem *BlockAllocator::allocate(size_t size, bool forceAllocation) {
444 Q_ASSERT((size % Chunk::SlotSize) == 0);
445 size_t slotsRequired = size >> Chunk::SlotSizeShift;
446
447 if (allocationStats)
448 ++allocationStats[binForSlots(slotsRequired)];
449
450 HeapItem **last;
451
452 HeapItem *m;
453
454 if (slotsRequired < NumBins - 1) {
455 m = freeBins[slotsRequired];
456 if (m) {
457 freeBins[slotsRequired] = m->freeData.next;
458 goto done;
459 }
460 }
461
462 if (nFree >= slotsRequired) {
463 // use bump allocation
464 Q_ASSERT(nextFree);
465 m = nextFree;
466 nextFree += slotsRequired;
467 nFree -= slotsRequired;
468 goto done;
469 }
470
471 // DEBUG << "No matching bin found for item" << size << bin;
472 // search last bin for a large enough item
473 last = &freeBins[NumBins - 1];
474 while ((m = *last)) {
475 if (m->freeData.availableSlots >= slotsRequired) {
476 *last = m->freeData.next; // take it out of the list
477
478 size_t remainingSlots = m->freeData.availableSlots - slotsRequired;
479 // DEBUG << "found large free slots of size" << m->freeData.availableSlots << m << "remaining" << remainingSlots;
480 if (remainingSlots == 0)
481 goto done;
482
483 HeapItem *remainder = m + slotsRequired;
484 if (remainingSlots > nFree) {
485 if (nFree) {
486 size_t bin = binForSlots(nFree);
487 nextFree->freeData.next = freeBins[bin];
488 nextFree->freeData.availableSlots = nFree;
489 freeBins[bin] = nextFree;
490 }
491 nextFree = remainder;
492 nFree = remainingSlots;
493 } else {
494 remainder->freeData.availableSlots = remainingSlots;
495 size_t binForRemainder = binForSlots(remainingSlots);
496 remainder->freeData.next = freeBins[binForRemainder];
497 freeBins[binForRemainder] = remainder;
498 }
499 goto done;
500 }
501 last = &m->freeData.next;
502 }
503
504 if (slotsRequired < NumBins - 1) {
505 // check if we can split up another slot
506 for (size_t i = slotsRequired + 1; i < NumBins - 1; ++i) {
507 m = freeBins[i];
508 if (m) {
509 freeBins[i] = m->freeData.next; // take it out of the list
510// qDebug() << "got item" << slotsRequired << "from slot" << i;
511 size_t remainingSlots = i - slotsRequired;
512 Q_ASSERT(remainingSlots < NumBins - 1);
513 HeapItem *remainder = m + slotsRequired;
514 remainder->freeData.availableSlots = remainingSlots;
515 remainder->freeData.next = freeBins[remainingSlots];
516 freeBins[remainingSlots] = remainder;
517 goto done;
518 }
519 }
520 }
521
522 if (!m) {
523 if (!forceAllocation)
524 return nullptr;
525 if (nFree) {
526 // Save any remaining slots of the current chunk
527 // for later, smaller allocations.
528 size_t bin = binForSlots(nFree);
529 nextFree->freeData.next = freeBins[bin];
530 nextFree->freeData.availableSlots = nFree;
531 freeBins[bin] = nextFree;
532 }
533 Chunk *newChunk = chunkAllocator->allocate();
534 Q_V4_PROFILE_ALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
535 chunks.push_back(newChunk);
536 nextFree = newChunk->first();
537 nFree = Chunk::AvailableSlots;
538 m = nextFree;
539 nextFree += slotsRequired;
540 nFree -= slotsRequired;
541 }
542
543done:
544 m->setAllocatedSlots(slotsRequired);
545 Q_V4_PROFILE_ALLOC(engine, slotsRequired * Chunk::SlotSize, Profiling::SmallItem);
546#ifdef V4_USE_HEAPTRACK
547 heaptrack_report_alloc(m, slotsRequired * Chunk::SlotSize);
548#endif
549 // DEBUG << " " << hex << m->chunk() << m->chunk()->objectBitmap[0] << m->chunk()->extendsBitmap[0] << (m - m->chunk()->realBase());
550 return m;
551}
552
553void BlockAllocator::sweep()
554{
555 nextFree = nullptr;
556 nFree = 0;
557 memset(freeBins, 0, sizeof(freeBins));
558
559// qDebug() << "BlockAlloc: sweep";
560 usedSlotsAfterLastSweep = 0;
561
562 auto firstEmptyChunk = std::partition(chunks.begin(), chunks.end(), [this](Chunk *c) {
563 return c->sweep(engine);
564 });
565
566 std::for_each(chunks.begin(), firstEmptyChunk, [this](Chunk *c) {
567 c->sortIntoBins(freeBins, NumBins);
568 usedSlotsAfterLastSweep += c->nUsedSlots();
569 });
570
571 // only free the chunks at the end to avoid that the sweep() calls indirectly
572 // access freed memory
573 std::for_each(firstEmptyChunk, chunks.end(), [this](Chunk *c) {
574 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
575 chunkAllocator->free(c);
576 });
577
578 chunks.erase(firstEmptyChunk, chunks.end());
579}
580
581void BlockAllocator::freeAll()
582{
583 for (auto c : chunks)
584 c->freeAll(engine);
585 for (auto c : chunks) {
586 Q_V4_PROFILE_DEALLOC(engine, Chunk::DataSize, Profiling::HeapPage);
587 chunkAllocator->free(c);
588 }
589}
590
591void BlockAllocator::resetBlackBits()
592{
593 for (auto c : chunks)
594 c->resetBlackBits();
595}
596
597HeapItem *HugeItemAllocator::allocate(size_t size) {
598 MemorySegment *m = nullptr;
599 Chunk *c = nullptr;
600 if (size >= MemorySegment::SegmentSize/2) {
601 // too large to handle through the ChunkAllocator, let's get our own memory segement
602 size += Chunk::HeaderSize; // space required for the Chunk header
603 size_t pageSize = WTF::pageSize();
604 size = (size + pageSize - 1) & ~(pageSize - 1); // align to page sizes
605 m = new MemorySegment(size);
606 c = m->allocate(size);
607 } else {
608 c = chunkAllocator->allocate(size);
609 }
610 Q_ASSERT(c);
611 chunks.push_back(HugeChunk{m, c, size});
612 Chunk::setBit(c->objectBitmap, c->first() - c->realBase());
613 Q_V4_PROFILE_ALLOC(engine, size, Profiling::LargeItem);
614#ifdef V4_USE_HEAPTRACK
615 heaptrack_report_alloc(c, size);
616#endif
617 return c->first();
618}
619
620static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
621{
622 HeapItem *itemToFree = c.chunk->first();
623 Heap::Base *b = *itemToFree;
624 const VTable *v = b->internalClass->vtable;
625 if (Q_UNLIKELY(classCountPtr))
626 classCountPtr(v->className);
627
628 if (v->destroy) {
629 v->destroy(b);
630 b->_checkIsDestroyed();
631 }
632 if (c.segment) {
633 // own memory segment
634 c.segment->free(c.chunk, c.size);
635 delete c.segment;
636 } else {
637 chunkAllocator->free(c.chunk, c.size);
638 }
639#ifdef V4_USE_HEAPTRACK
640 heaptrack_report_free(c.chunk);
641#endif
642}
643
644void HugeItemAllocator::sweep(ClassDestroyStatsCallback classCountPtr)
645{
646 auto isBlack = [this, classCountPtr] (const HugeChunk &c) {
647 bool b = c.chunk->first()->isBlack();
648 Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
649 if (!b) {
650 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
651 freeHugeChunk(chunkAllocator, c, classCountPtr);
652 }
653 return !b;
654 };
655
656 auto newEnd = std::remove_if(chunks.begin(), chunks.end(), isBlack);
657 chunks.erase(newEnd, chunks.end());
658}
659
660void HugeItemAllocator::resetBlackBits()
661{
662 for (auto c : chunks)
663 Chunk::clearBit(c.chunk->blackBitmap, c.chunk->first() - c.chunk->realBase());
664}
665
666void HugeItemAllocator::freeAll()
667{
668 for (auto &c : chunks) {
669 Q_V4_PROFILE_DEALLOC(engine, c.size, Profiling::LargeItem);
670 freeHugeChunk(chunkAllocator, c, nullptr);
671 }
672}
673
674
675MemoryManager::MemoryManager(ExecutionEngine *engine)
676 : engine(engine)
677 , chunkAllocator(new ChunkAllocator)
678 , blockAllocator(chunkAllocator, engine)
679 , icAllocator(chunkAllocator, engine)
680 , hugeItemAllocator(chunkAllocator, engine)
681 , m_persistentValues(new PersistentValueStorage(engine))
682 , m_weakValues(new PersistentValueStorage(engine))
683 , unmanagedHeapSizeGCLimit(MinUnmanagedHeapSizeGCLimit)
684 , aggressiveGC(!qEnvironmentVariableIsEmpty("QV4_MM_AGGRESSIVE_GC"))
685 , gcStats(lcGcStats().isDebugEnabled())
686 , gcCollectorStats(lcGcAllocatorStats().isDebugEnabled())
687{
688#ifdef V4_USE_VALGRIND
689 VALGRIND_CREATE_MEMPOOL(this, 0, true);
690#endif
691 memset(statistics.allocations, 0, sizeof(statistics.allocations));
692 if (gcStats)
694}
695
696Heap::Base *MemoryManager::allocString(std::size_t unmanagedSize)
697{
698 const size_t stringSize = align(sizeof(Heap::String));
699#ifdef MM_STATS
702#endif
703 unmanagedHeapSize += unmanagedSize;
704
705 HeapItem *m = allocate(&blockAllocator, stringSize);
706 memset(m, 0, stringSize);
707 if (gcBlocked) {
708 // If the gc is running right now, it will not have a chance to mark the newly created item
709 // and may therefore sweep it right away.
710 // Protect the new object from the current GC run to avoid this.
711 m->as<Heap::Base>()->setMarkBit();
712 }
713
714 return *m;
715}
716
718{
719#ifdef MM_STATS
722#endif
723
726
728 memset(m, 0, size);
729 if (gcBlocked) {
730 // If the gc is running right now, it will not have a chance to mark the newly created item
731 // and may therefore sweep it right away.
732 // Protect the new object from the current GC run to avoid this.
733 m->as<Heap::Base>()->setMarkBit();
734 }
735
736 return *m;
737}
738
739Heap::Object *MemoryManager::allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
740{
741 uint size = (vtable->nInlineProperties + vtable->inlinePropertyOffset)*sizeof(Value);
742 Q_ASSERT(!(size % sizeof(HeapItem)));
743
744 Heap::Object *o;
745 if (nMembers <= vtable->nInlineProperties) {
746 o = static_cast<Heap::Object *>(allocData(size));
747 } else {
748 // Allocate both in one go through the block allocator
749 nMembers -= vtable->nInlineProperties;
750 std::size_t memberSize = align(sizeof(Heap::MemberData) + (nMembers - 1)*sizeof(Value));
751 size_t totalSize = size + memberSize;
752 Heap::MemberData *m;
753 if (totalSize > Chunk::DataSize) {
754 o = static_cast<Heap::Object *>(allocData(size));
755 m = hugeItemAllocator.allocate(memberSize)->as<Heap::MemberData>();
756 } else {
757 HeapItem *mh = reinterpret_cast<HeapItem *>(allocData(totalSize));
758 Heap::Base *b = *mh;
759 o = static_cast<Heap::Object *>(b);
760 mh += (size >> Chunk::SlotSizeShift);
761 m = mh->as<Heap::MemberData>();
762 Chunk *c = mh->chunk();
763 size_t index = mh - c->realBase();
764 Chunk::setBit(c->objectBitmap, index);
765 Chunk::clearBit(c->extendsBitmap, index);
766 }
767 o->memberData.set(engine, m);
769 Q_ASSERT(o->memberData->internalClass);
770 m->values.alloc = static_cast<uint>((memberSize - sizeof(Heap::MemberData) + sizeof(Value))/sizeof(Value));
771 m->values.size = o->memberData->values.alloc;
772 m->init();
773// qDebug() << " got" << o->memberData << o->memberData->size;
774 }
775// qDebug() << "allocating object with memberData" << o << o->memberData.operator->();
776 return o;
777}
778
780
782 : m_engine(engine)
783{
784 m_base = (Heap::Base **)engine->gcStack->base();
785 m_top = m_base;
786 const size_t size = engine->maxGCStackSize() / sizeof(Heap::Base);
787 m_hardLimit = m_base + size;
788 m_softLimit = m_base + size * 3 / 4;
789}
790
791void MarkStack::drain()
792{
793 while (m_top > m_base) {
794 Heap::Base *h = pop();
796 Q_ASSERT(h); // at this point we should only have Heap::Base objects in this area on the stack. If not, weird things might happen.
797 h->internalClass->vtable->markObjects(h, this);
798 }
799}
800
801void MemoryManager::collectRoots(MarkStack *markStack)
802{
803 engine->markObjects(markStack);
804
805// qDebug() << " mark stack after engine->mark" << (engine->jsStackTop - markBase);
806
807 collectFromJSStack(markStack);
808
809// qDebug() << " mark stack after js stack collect" << (engine->jsStackTop - markBase);
810 m_persistentValues->mark(markStack);
811
812// qDebug() << " mark stack after persistants" << (engine->jsStackTop - markBase);
813
814 // Preserve QObject ownership rules within JavaScript: A parent with c++ ownership
815 // keeps all of its children alive in JavaScript.
816
817 // Do this _after_ collectFromStack to ensure that processing the weak
818 // managed objects in the loop down there doesn't make then end up as leftovers
819 // on the stack and thus always get collected.
820 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
821 QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>();
822 if (!qobjectWrapper)
823 continue;
824 QObject *qobject = qobjectWrapper->object();
825 if (!qobject)
826 continue;
827 bool keepAlive = QQmlData::keepAliveDuringGarbageCollection(qobject);
828
829 if (!keepAlive) {
830 if (QObject *parent = qobject->parent()) {
831 while (parent->parent())
832 parent = parent->parent();
833
835 }
836 }
837
838 if (keepAlive)
839 qobjectWrapper->mark(markStack);
840 }
841}
842
843void MemoryManager::mark()
844{
845 markStackSize = 0;
846 MarkStack markStack(engine);
847 collectRoots(&markStack);
848 // dtor of MarkStack drains
849}
850
851void MemoryManager::sweep(bool lastSweep, ClassDestroyStatsCallback classCountPtr)
852{
853 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
854 Managed *m = (*it).managed();
855 if (!m || m->markBit())
856 continue;
857 // we need to call destroyObject on qobjectwrappers now, so that they can emit the destroyed
858 // signal before we start sweeping the heap
859 if (QObjectWrapper *qobjectWrapper = (*it).as<QObjectWrapper>())
860 qobjectWrapper->destroyObject(lastSweep);
861 }
862
863 // remove objects from weak maps and sets
864 Heap::MapObject *map = weakMaps;
865 Heap::MapObject **lastMap = &weakMaps;
866 while (map) {
867 if (map->isMarked()) {
868 map->removeUnmarkedKeys();
869 *lastMap = map;
870 lastMap = &map->nextWeakMap;
871 }
872 map = map->nextWeakMap;
873 }
874
875 Heap::SetObject *set = weakSets;
876 Heap::SetObject **lastSet = &weakSets;
877 while (set) {
878 if (set->isMarked()) {
879 set->removeUnmarkedKeys();
880 *lastSet = set;
881 lastSet = &set->nextWeakSet;
882 }
883 set = set->nextWeakSet;
884 }
885
886 // onDestruction handlers may have accessed other QObject wrappers and reset their value, so ensure
887 // that they are all set to undefined.
888 for (PersistentValueStorage::Iterator it = m_weakValues->begin(); it != m_weakValues->end(); ++it) {
889 Managed *m = (*it).managed();
890 if (!m || m->markBit())
891 continue;
892 (*it) = Value::undefinedValue();
893 }
894
895 // Now it is time to free QV4::QObjectWrapper Value, we must check the Value's tag to make sure its object has been destroyed
896 const int pendingCount = m_pendingFreedObjectWrapperValue.size();
897 if (pendingCount) {
898 QVector<Value *> remainingWeakQObjectWrappers;
899 remainingWeakQObjectWrappers.reserve(pendingCount);
900 for (int i = 0; i < pendingCount; ++i) {
902 if (v->isUndefined() || v->isEmpty())
904 else
905 remainingWeakQObjectWrappers.append(v);
906 }
907 m_pendingFreedObjectWrapperValue = remainingWeakQObjectWrappers;
908 }
909
910 if (MultiplyWrappedQObjectMap *multiplyWrappedQObjects = engine->m_multiplyWrappedQObjects) {
911 for (MultiplyWrappedQObjectMap::Iterator it = multiplyWrappedQObjects->begin(); it != multiplyWrappedQObjects->end();) {
912 if (it.value().isNullOrUndefined())
913 it = multiplyWrappedQObjects->erase(it);
914 else
915 ++it;
916 }
917 }
918
919
920 if (!lastSweep) {
922 blockAllocator.sweep(/*classCountPtr*/);
923 hugeItemAllocator.sweep(classCountPtr);
924 icAllocator.sweep(/*classCountPtr*/);
925 }
926}
927
928bool MemoryManager::shouldRunGC() const
929{
930 size_t total = blockAllocator.totalSlots() + icAllocator.totalSlots();
931 if (total > MinSlotsGCLimit && usedSlotsAfterLastFullSweep * GCOverallocation < total * 100)
932 return true;
933 return false;
934}
935
936static size_t dumpBins(BlockAllocator *b, const char *title)
937{
938 const QLoggingCategory &stats = lcGcAllocatorStats();
939 size_t totalSlotMem = 0;
940 if (title)
941 qDebug(stats) << "Slot map for" << title << "allocator:";
942 for (uint i = 0; i < BlockAllocator::NumBins; ++i) {
943 uint nEntries = 0;
944 HeapItem *h = b->freeBins[i];
945 while (h) {
946 ++nEntries;
947 totalSlotMem += h->freeData.availableSlots;
948 h = h->freeData.next;
949 }
950 if (title)
951 qDebug(stats) << " number of entries in slot" << i << ":" << nEntries;
952 }
953 SDUMP() << " large slot map";
954 HeapItem *h = b->freeBins[BlockAllocator::NumBins - 1];
955 while (h) {
956 SDUMP() << " " << Qt::hex << (quintptr(h)/32) << h->freeData.availableSlots;
957 h = h->freeData.next;
958 }
959
960 if (title)
961 qDebug(stats) << " total mem in bins" << totalSlotMem*Chunk::SlotSize;
962 return totalSlotMem*Chunk::SlotSize;
963}
964
966{
967 if (gcBlocked) {
968// qDebug() << "Not running GC.";
969 return;
970 }
971
972 QScopedValueRollback<bool> gcBlocker(gcBlocked, true);
973// qDebug() << "runGC";
974
975 if (gcStats) {
976 statistics.maxReservedMem = qMax(statistics.maxReservedMem, getAllocatedMem());
977 statistics.maxAllocatedMem = qMax(statistics.maxAllocatedMem, getUsedMem() + getLargeItemsMem());
978 }
979
980 if (!gcCollectorStats) {
981 mark();
982 sweep();
983 } else {
984 bool triggeredByUnmanagedHeap = (unmanagedHeapSize > unmanagedHeapSizeGCLimit);
985 size_t oldUnmanagedSize = unmanagedHeapSize;
986
987 const size_t totalMem = getAllocatedMem();
988 const size_t usedBefore = getUsedMem();
989 const size_t largeItemsBefore = getLargeItemsMem();
990
991 const QLoggingCategory &stats = lcGcAllocatorStats();
992 qDebug(stats) << "========== GC ==========";
993#ifdef MM_STATS
994 qDebug(stats) << " Triggered by alloc request of" << lastAllocRequestedSlots << "slots.";
995 qDebug(stats) << " Allocations since last GC" << allocationCount;
996 allocationCount = 0;
997#endif
998 size_t oldChunks = blockAllocator.chunks.size();
999 qDebug(stats) << "Allocated" << totalMem << "bytes in" << oldChunks << "chunks";
1000 qDebug(stats) << "Fragmented memory before GC" << (totalMem - usedBefore);
1001 dumpBins(&blockAllocator, "Block");
1002 dumpBins(&icAllocator, "InternalClass");
1003
1005 t.start();
1006 mark();
1007 qint64 markTime = t.nsecsElapsed()/1000;
1008 t.restart();
1009 sweep(false, increaseFreedCountForClass);
1010 const size_t usedAfter = getUsedMem();
1011 const size_t largeItemsAfter = getLargeItemsMem();
1012 qint64 sweepTime = t.nsecsElapsed()/1000;
1013
1014 if (triggeredByUnmanagedHeap) {
1015 qDebug(stats) << "triggered by unmanaged heap:";
1016 qDebug(stats) << " old unmanaged heap size:" << oldUnmanagedSize;
1017 qDebug(stats) << " new unmanaged heap:" << unmanagedHeapSize;
1018 qDebug(stats) << " unmanaged heap limit:" << unmanagedHeapSizeGCLimit;
1019 }
1020 size_t memInBins = dumpBins(&blockAllocator, "Block")
1021 + dumpBins(&icAllocator, "InternalClasss");
1022 qDebug(stats) << "Marked object in" << markTime << "us.";
1023 qDebug(stats) << " " << markStackSize << "objects marked";
1024 qDebug(stats) << "Sweeped object in" << sweepTime << "us.";
1025
1026 // sort our object types by number of freed instances
1027 MMStatsHash freedObjectStats;
1028 std::swap(freedObjectStats, *freedObjectStatsGlobal());
1029 typedef std::pair<const char*, int> ObjectStatInfo;
1030 std::vector<ObjectStatInfo> freedObjectsSorted;
1031 freedObjectsSorted.reserve(freedObjectStats.size());
1032 for (auto it = freedObjectStats.constBegin(); it != freedObjectStats.constEnd(); ++it) {
1033 freedObjectsSorted.push_back(std::make_pair(it.key(), it.value()));
1034 }
1035 std::sort(freedObjectsSorted.begin(), freedObjectsSorted.end(), [](const ObjectStatInfo &a, const ObjectStatInfo &b) {
1036 return a.second > b.second && strcmp(a.first, b.first) < 0;
1037 });
1038
1039 qDebug(stats) << "Used memory before GC:" << usedBefore;
1040 qDebug(stats) << "Used memory after GC:" << usedAfter;
1041 qDebug(stats) << "Freed up bytes :" << (usedBefore - usedAfter);
1042 qDebug(stats) << "Freed up chunks :" << (oldChunks - blockAllocator.chunks.size());
1044 - memInBins - usedAfter;
1045 if (lost)
1046 qDebug(stats) << "!!!!!!!!!!!!!!!!!!!!! LOST MEM:" << lost << "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!";
1047 if (largeItemsBefore || largeItemsAfter) {
1048 qDebug(stats) << "Large item memory before GC:" << largeItemsBefore;
1049 qDebug(stats) << "Large item memory after GC:" << largeItemsAfter;
1050 qDebug(stats) << "Large item memory freed up:" << (largeItemsBefore - largeItemsAfter);
1051 }
1052
1053 for (auto it = freedObjectsSorted.cbegin(); it != freedObjectsSorted.cend(); ++it) {
1054 qDebug(stats).noquote() << QString::fromLatin1("Freed JS type: %1 (%2 instances)").arg(QString::fromLatin1(it->first), QString::number(it->second));
1055 }
1056
1057 qDebug(stats) << "======== End GC ========";
1058 }
1059
1060 if (gcStats)
1061 statistics.maxUsedMem = qMax(statistics.maxUsedMem, getUsedMem() + getLargeItemsMem());
1062
1063 if (aggressiveGC) {
1064 // ensure we don't 'loose' any memory
1066 == blockAllocator.usedMem() + dumpBins(&blockAllocator, nullptr));
1068 == icAllocator.usedMem() + dumpBins(&icAllocator, nullptr));
1069 }
1070
1072
1073 // reset all black bits
1077}
1078
1080{
1082}
1083
1085{
1087}
1088
1090{
1091 return hugeItemAllocator.usedMem();
1092}
1093
1095{
1096 map->nextWeakMap = weakMaps;
1097 weakMaps = map;
1098}
1099
1101{
1102 set->nextWeakSet = weakSets;
1103 weakSets = set;
1104}
1105
1107{
1108 delete m_persistentValues;
1109
1110 dumpStats();
1111
1112 sweep(/*lastSweep*/true);
1116
1117 delete m_weakValues;
1118#ifdef V4_USE_VALGRIND
1119 VALGRIND_DESTROY_MEMPOOL(this);
1120#endif
1121 delete chunkAllocator;
1122}
1123
1124
1126{
1127 if (!gcStats)
1128 return;
1129
1130 const QLoggingCategory &stats = lcGcStats();
1131 qDebug(stats) << "Qml GC memory allocation statistics:";
1132 qDebug(stats) << "Total memory allocated:" << statistics.maxReservedMem;
1133 qDebug(stats) << "Max memory used before a GC run:" << statistics.maxAllocatedMem;
1134 qDebug(stats) << "Max memory used after a GC run:" << statistics.maxUsedMem;
1135 qDebug(stats) << "Requests for different item sizes:";
1136 for (int i = 1; i < BlockAllocator::NumBins - 1; ++i)
1137 qDebug(stats) << " <" << (i << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[i];
1138 qDebug(stats) << " >=" << ((BlockAllocator::NumBins - 1) << Chunk::SlotSizeShift) << " bytes: " << statistics.allocations[BlockAllocator::NumBins - 1];
1139}
1140
1141void MemoryManager::collectFromJSStack(MarkStack *markStack) const
1142{
1145 while (v < top) {
1146 Managed *m = v->managed();
1147 if (m) {
1148 Q_ASSERT(m->inUse());
1149 // Skip pointers to already freed objects, they are bogus as well
1150 m->mark(markStack);
1151 }
1152 ++v;
1153 }
1154}
1155
1156} // namespace QV4
1157
static constexpr QChar fromLatin1(char c) noexcept
Converts the Latin-1 character c to its equivalent QChar.
Definition qchar.h:461
\inmodule QtCore
\inmodule QtCore
Definition qhash.h:818
qsizetype size() const noexcept
Returns the number of items in the hash.
Definition qhash.h:925
const_iterator constEnd() const noexcept
Returns a const \l{STL-style iterators}{STL-style iterator} pointing to the imaginary item after the ...
Definition qhash.h:1209
const_iterator constBegin() const noexcept
Returns a const \l{STL-style iterators}{STL-style iterator} pointing to the first item in the hash.
Definition qhash.h:1205
Definition qlist.h:74
void reserve(qsizetype size)
Definition qlist.h:746
void append(parameter_type t)
Definition qlist.h:441
\inmodule QtCore
\inmodule QtCore
Definition qobject.h:90
QObject * parent() const
Returns a pointer to the parent object.
Definition qobject.h:311
static bool keepAliveDuringGarbageCollection(const QObject *object)
Definition qqmldata_p.h:233
iterator begin()
Definition qset.h:136
iterator end()
Definition qset.h:140
const_iterator cend() const noexcept
Definition qset.h:142
iterator erase(const_iterator i)
Definition qset.h:145
const_iterator cbegin() const noexcept
Definition qset.h:138
\macro QT_RESTRICTED_CAST_FROM_ASCII
Definition qstring.h:127
static QString fromLatin1(QByteArrayView ba)
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition qstring.cpp:5710
QString arg(qlonglong a, int fieldwidth=0, int base=10, QChar fillChar=u' ') const
Definition qstring.cpp:8606
static QString number(int, int base=10)
This is an overloaded member function, provided for convenience. It differs from the above function o...
Definition qstring.cpp:7822
struct QV4::MemoryManager::@638 statistics
Heap::Base * allocString(std::size_t unmanagedSize)
expects size to be aligned
Definition qv4mm.cpp:696
QVector< Value * > m_pendingFreedObjectWrapperValue
Definition qv4mm_p.h:299
Heap::SetObject * weakSets
Definition qv4mm_p.h:301
QV4::ExecutionEngine * engine
Definition qv4mm_p.h:292
Heap::Object * allocObjectWithMemberData(const QV4::VTable *vtable, uint nMembers)
Definition qv4mm.cpp:739
size_t getLargeItemsMem() const
Definition qv4mm.cpp:1089
size_t lastAllocRequestedSlots
Definition qv4mm_p.h:313
ObjectType::Data * allocate(Args &&... args)
Definition qv4mm_p.h:199
ChunkAllocator * chunkAllocator
Definition qv4mm_p.h:293
PersistentValueStorage * m_persistentValues
Definition qv4mm_p.h:297
PersistentValueStorage * m_weakValues
Definition qv4mm_p.h:298
std::size_t unmanagedHeapSize
Definition qv4mm_p.h:303
void registerWeakMap(Heap::MapObject *map)
Definition qv4mm.cpp:1094
void dumpStats() const
Definition qv4mm.cpp:1125
std::size_t usedSlotsAfterLastFullSweep
Definition qv4mm_p.h:305
BlockAllocator blockAllocator
Definition qv4mm_p.h:294
HugeItemAllocator hugeItemAllocator
Definition qv4mm_p.h:296
size_t getUsedMem() const
Definition qv4mm.cpp:1079
Heap::Base * allocData(std::size_t size)
Definition qv4mm.cpp:717
size_t getAllocatedMem() const
Definition qv4mm.cpp:1084
void registerWeakSet(Heap::SetObject *set)
Definition qv4mm.cpp:1100
Heap::MapObject * weakMaps
Definition qv4mm_p.h:300
std::size_t unmanagedHeapSizeGCLimit
Definition qv4mm_p.h:304
static constexpr std::size_t align(std::size_t size)
Definition qv4mm_p.h:117
BlockAllocator icAllocator
Definition qv4mm_p.h:295
QHash< QObjectBiPointer, QV4::WeakValue >::Iterator Iterator
QMap< QString, QString > map
[6]
qSwap(pi, e)
double e
QSet< QString >::iterator it
Combined button and popup list for selecting options.
\qmltype Particle \inqmlmodule QtQuick.Particles
void(* ClassDestroyStatsCallback)(const char *)
Definition qv4mmdefs_p.h:28
static void increaseFreedCountForClass(const char *className)
Definition qv4mm.cpp:261
static size_t dumpBins(BlockAllocator *b, const char *title)
Definition qv4mm.cpp:936
static uint markStackSize
Definition qv4mm.cpp:779
static void freeHugeChunk(ChunkAllocator *chunkAllocator, const HugeItemAllocator::HugeChunk &c, ClassDestroyStatsCallback classCountPtr)
Definition qv4mm.cpp:620
@ MinSlotsGCLimit
Definition qv4mm.cpp:69
@ GCOverallocation
Definition qv4mm.cpp:70
QTextStream & hex(QTextStream &stream)
Calls QTextStream::setIntegerBase(16) on stream and returns stream.
constexpr uint qCountTrailingZeroBits(quint32 v) noexcept
Q_DECL_CONST_FUNCTION QT_POPCOUNT_CONSTEXPR uint qPopulationCount(quint32 v) noexcept
#define Q_UNLIKELY(x)
static bool testBit(long bit, const long *field)
#define Q_GLOBAL_STATIC(TYPE, NAME,...)
#define qDebug
[1]
Definition qlogging.h:160
#define Q_LOGGING_CATEGORY(name,...)
#define Q_DECLARE_LOGGING_CATEGORY(name)
constexpr const T & qMin(const T &a, const T &b)
Definition qminmax.h:40
constexpr const T & qMax(const T &a, const T &b)
Definition qminmax.h:42
GLsizei GLsizei GLenum void * binary
GLboolean GLboolean GLboolean b
GLsizei const GLfloat * v
[13]
const GLfloat * m
GLboolean GLboolean GLboolean GLboolean a
[7]
GLenum GLuint GLintptr GLsizeiptr size
[1]
GLuint index
[2]
GLuint GLuint end
GLdouble GLdouble GLdouble GLdouble top
GLuint start
GLint GLint GLint GLint GLint GLint GLint GLbitfield mask
GLfloat n
GLfloat GLfloat GLfloat GLfloat h
const GLubyte * c
GLdouble GLdouble t
Definition qopenglext.h:243
GLuint64EXT * result
[6]
GLdouble s
[6]
Definition qopenglext.h:235
static constexpr qint64 HeaderSize
#define Q_ASSERT(cond)
Definition qrandom.cpp:47
Q_CORE_EXPORT bool qEnvironmentVariableIsEmpty(const char *varName) noexcept
size_t quintptr
Definition qtypes.h:72
unsigned long long quint64
Definition qtypes.h:56
unsigned int uint
Definition qtypes.h:29
long long qint64
Definition qtypes.h:55
#define SDUMP
Definition qv4mm.cpp:252
#define DEBUG
Definition qv4mm.cpp:37
#define Q_V4_PROFILE_DEALLOC(engine, size, type)
#define Q_V4_PROFILE_ALLOC(engine, size, type)
const char className[16]
[1]
Definition qwizard.cpp:100
QFuture< QSet< QChar > > set
[10]
QString title
[35]
ba setBit(0, true)
QSharedPointer< T > other(t)
[5]
QJSEngine engine
[0]
size_t usedMem() const
Definition qv4mm_p.h:56
uint * allocationStats
Definition qv4mm_p.h:75
size_t usedSlotsAfterLastSweep
Definition qv4mm_p.h:70
std::vector< Chunk * > chunks
Definition qv4mm_p.h:74
void resetBlackBits()
Definition qv4mm.cpp:591
size_t totalSlots() const
Definition qv4mm_p.h:49
size_t allocatedMem() const
Definition qv4mm_p.h:53
void free(Chunk *chunk, size_t size=0)
Definition qv4mm.cpp:230
size_t requiredChunkSize(size_t size)
Definition qv4mm.cpp:197
std::vector< MemorySegment > memorySegments
Definition qv4mm.cpp:209
static void setBit(quintptr *bitmap, size_t index)
Definition qv4mmdefs_p.h:89
HeapItem * first()
static void clearBit(quintptr *bitmap, size_t index)
Definition qv4mmdefs_p.h:95
IdentifierTable * identifierTable
Heap::InternalClass * internalClasses(InternalClassType icType)
WTF::PageAllocation * gcStack
void markObjects(MarkStack *markStack)
int maxGCStackSize() const
MultiplyWrappedQObjectMap * m_multiplyWrappedQObjects
struct QV4::HeapItem::@641::@645 freeData
HeapItem * next
size_t availableSlots
Chunk * chunk() const
size_t usedMem() const
Definition qv4mm_p.h:88
HeapItem * allocate(size_t size)
Definition qv4mm.cpp:597
void sweep(ClassDestroyStatsCallback classCountPtr)
Definition qv4mm.cpp:644
MarkStack(ExecutionEngine *engine)
Definition qv4mm.cpp:781
ExecutionEngine * engine() const
void free(Chunk *chunk, size_t size)
Definition qv4mm.cpp:128
MemorySegment(size_t size)
Definition qv4mm.cpp:83
PageReservation pageReservation
Definition qv4mm.cpp:154
void setBit(size_t index)
Definition qv4mm.cpp:109
bool testBit(size_t index) const
Definition qv4mm.cpp:121
void clearBit(size_t index)
Definition qv4mm.cpp:115
bool contains(Chunk *c) const
Definition qv4mm.cpp:150
MemorySegment(MemorySegment &&other)
Definition qv4mm.cpp:96
static void free(Value *v)
void mark(MarkStack *markStack)
quint16 inlinePropertyOffset
Definition qv4vtable_p.h:62
quint16 nInlineProperties
Definition qv4vtable_p.h:63
Destroy destroy
Definition qv4vtable_p.h:75
QML_NEARLY_ALWAYS_INLINE ManagedPtr managed() const
Definition qv4value_p.h:75
static constexpr Value undefinedValue()
Definition qv4value_p.h:191
IUIAutomationTreeWalker __RPC__deref_out_opt IUIAutomationElement ** parent