TraceAndSweepImpl.mesa
Copyright © 1985, 1986 by Xerox Corporation. All rights reserved.
Russ Atkinson (RRA) May 7, 1986 7:43:04 pm PDT
This module implements a trace-and-sweep garbage collector for Cedar ("DoTraceAndSweepCollection"). It uses auxilliary storage for a reference stack. To allow subsequent incremental collection, DoTraceAndSweepCollection restores reference counts to correct values. No collectible storage is allocated while DoTraceAndSweepCollection is active, nor are any REFs change in counted storage during that time. Client processes that attempt to do so are suspended until after DoTraceAndSweepCollection finishes.
DIRECTORY
Allocator USING [bsiEscape, ExtendedHeader, HeaderP, LastAddress, logPagesPerQuantum, NHeaderP, NormalHeader, QuantumIndex, wordsPerQuantum],
AllocatorOps USING [AddressToQuantumIndex, BlockSize, EnterAndCallBack, IsValidRef, NHPToREF, quantumMap, REFToNHP, Reset, TAndSFreeObject],
Basics USING [BITAND, CARD, DoubleShiftLeft, LowHalf],
Collector USING [EstablishTAndSProc],
DebuggerSwap USING [CallDebugger],
LoadState USING [Acquire, ConfigID, EnumerateAllModules, GlobalFrameToType, local, ModuleIndex, ModuleToGlobalFrame, Release],
PrincOps USING [GlobalFrameHandle, logWordsPerPage, wordsPerPage],
RCMap USING [nullIndex],
RCMicrocodeOps USING [ASSIGNREF, OnZ, RCOverflowOccurred, RCUnderflowOccurred, ZCTFull],
RTFrameHeapSnapshot USING [AcquireFrameHeapSnapshot, MapUncountedBodies, ReleaseFrameHeapSnapshot],
RTTypesBasicPrivate USING [MapRefs, MapTiRcmx, NumberPackageRefs],
SafeStorage USING [GetReferentType, nullType, ReclaimCollectibleObjects, Type],
VM USING [AddressForPageNumber, Free, PageNumberForAddress, SimpleAllocate],
ZCT USING [EnterAndCallBack, EnterRCOvAndCallBack, InnerHandleRCOverflow, InnerHandleRCUnderflow, RCOvReset, zct, zctBlockWords];
TraceAndSweepImpl: PROGRAM
IMPORTS AllocatorOps, Basics, Collector, DebuggerSwap, LoadState, RCMicrocodeOps, RTFrameHeapSnapshot, RTTypesBasicPrivate, SafeStorage, VM, ZCT
= BEGIN OPEN Allocator;
CARD: TYPE = Basics.CARD;
Type: TYPE = SafeStorage.Type;
nullType: Type = SafeStorage.nullType;
Variables
global: REF GlobalRep ← NEW[GlobalRep];
GlobalRep: TYPE = RECORD [
objectsSeen: CARD ← 0,
objectsReclaimed: CARD ← 0,
objectsKept: CARD ← 0,
nGlobalFrames: CARD ← 0,
nRCGlobalFrames: CARD ← 0,
nLocalFrames: CARD ← 0,
nRetainedObjects: CARD ← 0,
number of objects retained only because REFs appear onstack
nRefsPushed: CARD ← 0,
suspect: LONG POINTERLOOPHOLE[LONG[-1]],
suspectRef: LONG POINTERNIL, -- a LOOPHOLE'd REF
support for WhoPointsTo and FindCircularGarbage
destructiveTestEnabled: BOOLFALSE,
destructiveTestEnabled TRUE => do a second TandS without the stack scan, then punt
findingCircularGarbage: BOOLFALSE,
findingCircularGarbage TRUE => remember stuff found, don't reclaim it
forgiving: BOOLTRUE,
IF TRUE, then logs finalizable objects with counts that are too small
IF FALSE, then crashes for finalizable objects with counts that are too small
suspectRefFoundInLocalFrame: BOOLFALSE,
circularStructureFound: BOOLFALSE,
forgivingLog: ForgivingLog ← NIL,
forgivingLogCount: INT ← 0,
forgivingLogIndex: [0..ForgivingLogEntries) ← 0,
refStackChain: RefStack ← NIL,
refStackFree: RefStack ← NIL,
gfhToSuspectRef: PrincOps.GlobalFrameHandle ← NIL,
nextRpaIndex: CARDINAL ← 0,
rpa: ARRAY [0..maxNRPIndex] OF LONG POINTERALL[NIL]
];
maxNRPIndex: CARDINAL = 20;
SuspectSeen: SIGNAL = CODE;
ForgivingLog declarations
ForgivingLogPages: NAT = 4;
ForgivingLog: TYPE = LONG POINTER TO ForgivingLogRep;
ForgivingLogRep: TYPE = ARRAY [0..ForgivingLogEntries) OF ForgivingLogEntry;
ForgivingLogEntry: TYPE = RECORD [
nhp: Allocator.NHeaderP,
header: Allocator.NormalHeader
];
ForgivingLogEntries: NAT = (ForgivingLogPages*PrincOps.wordsPerPage)/SIZE[ForgivingLogEntry];
RefStackRep: TYPE = RECORD [
next: RefStack ← NIL,
size: CARDINAL ← 0,
max: CARDINAL ← RefStackMax,
refs: SEQUENCE COMPUTED CARDINAL OF LONG POINTER
];
RefStack: TYPE = LONG POINTER TO RefStackRep;
RefStackPages: NAT = 4;
RefStackMax: NAT = (PrincOps.wordsPerPage*RefStackPages - (SIZE[RefStackRep])) / (SIZE[LONG POINTER]);
Leave room for the header info
APNodes: TYPE = ARRAY [0..maxNReferers) OF NHeaderP;
suspectReferers: REF APNodes ← NEW[APNodes ← ALL[NIL]];
maxNReferers: NAT = 10;
nSuspectReferers: NAT ← 0;
AIPNodes: TYPE = ARRAY [0..maxNInnerReferers) OF NHeaderP;
innerReferers: REF AIPNodes ← NEW[AIPNodes ← ALL[NIL]];
maxNInnerReferers: NAT = 50;
nInnerReferers: NAT ← 0;
PROCS
WhoPointsTo: PROC [nhp: NHeaderP, cycleLimit: NAT ← 20] RETURNS [ referers: REF APNodes, gfh: PrincOps.GlobalFrameHandle, foundInLocalFrame, foundInCircularStructure: BOOL, cycles: NAT ← 0] = {
Look up the referer chain. Stop if a gfh found, or if foundInLocalFrame, or if a loop is found, or if more than 1 referer is found, or if no referers are found (changed conserv scan)
FOR i: NAT IN [0..maxNReferers) DO suspectReferers[i] ← NIL ENDLOOP;
suspectReferers[0] ← nhp;
nSuspectReferers ← 1;
FOR i: NAT IN [0..maxNInnerReferers) DO innerReferers[i] ← NIL ENDLOOP;
nInnerReferers ← 0;
global.gfhToSuspectRef ← NIL;
global.suspectRefFoundInLocalFrame ← FALSE;
global.circularStructureFound ← FALSE;
UNTIL global.gfhToSuspectRef # NIL
OR global.circularStructureFound
OR global.suspectRefFoundInLocalFrame
OR nSuspectReferers > 1
OR nSuspectReferers = 0
OR cycles = cycleLimit
DO
global.suspectRef ← LOOPHOLE[AllocatorOps.NHPToREF[suspectReferers[0]], LONG POINTER];
FOR i: NAT IN [0..maxNReferers) DO suspectReferers[i] ← NIL ENDLOOP;
nSuspectReferers ← 0;
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE, traceAndSweep: TRUE];
cycles ← cycles + 1;
ENDLOOP;
global.suspectRef ← NIL;
RETURN[
referers: suspectReferers,
gfh: global.gfhToSuspectRef,
foundInLocalFrame: global.suspectRefFoundInLocalFrame,
foundInCircularStructure: global.circularStructureFound,
cycles: cycles];
};
FindCircularGarbage: PROC = {
This guy first does a standard incremental collection. Then it goes thru the motions of a TandS, but does not reclaim any garbage. Instead, it leaves the marked bit set.
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE];
global.findingCircularGarbage ← TRUE;
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE, traceAndSweep: TRUE];
global.findingCircularGarbage ← FALSE;
};
ActuallyDoIt: PROC [ignoreStack: BOOLFALSE] = {
clearRC: PROC [nhp: NHeaderP] = {
global.objectsSeen ← global.objectsSeen + 1;
nhp.inZCT ← FALSE;
nhp.maybeOnStack ← FALSE;
Leave the f flag and the type field alone
nhp.refCount ← 0;
nhp.rcOverflowed ← FALSE;
};
Initialize
global.objectsSeen ← 0;
global.objectsKept ← global.objectsReclaimed ← 0;
global.nRCGlobalFrames ← global.nGlobalFrames ← global.nLocalFrames ← 0;
global.nRetainedObjects ← 0;
global.nextRpaIndex ← 0;
global.nRefsPushed ← 0;
Clear all free lists
ZCT.zct.bsiToFreeList ← ALL[NIL];
AllocatorOps.Reset[];
Clear the zero-count table
ZCT.zct.markingDecrements ← FALSE;
ZCT.zct.rp
ZCT.zct.wp
ZCT.zct.rp
- Basics.BITAND[
Basics.LowHalf[LOOPHOLE[ZCT.zct.rp, CARD]],
ZCT.zctBlockWords-1
];
mask rp by (LastAddress-(zctBlockWords-1))
Clear all ref counts and count objects
ZCT.RCOvReset[];
AllObjects[clearRC];
trace from all local and global frames in the world
IF NOT ignoreStack THEN RTFrameHeapSnapshot.MapUncountedBodies[TraceLocalFrame];
[] ← LoadState.local.EnumerateAllModules[order: newestFirst, proc: TraceGlobalFrame];
continue marking from the ref stack and from objects
UNTIL global.refStackChain = NIL DO
TraceRefsInObject[LOOPHOLE[PopRef[], REF]]
ENDLOOP;
AllObjects[fudgeFinalized];
RRA: trace from all objects with finalization and RC = 0; otherwise we get situations where subsidiary refs are visible from the object when it is taken from the finalization queue, but the subsidiary objects have been reclaimed!
UNTIL global.refStackChain = NIL DO
TraceRefsInObject[LOOPHOLE[PopRef[], REF]]
ENDLOOP;
We have to trace from objects found in that last sweep.
FreeRefStacks[];
Free the objects
Reclaim unmarked and free stuff, rebuild the zct and remember retained stuff due to frame heap only
AllObjects[visitOneObject];
IF global.objectsReclaimed + global.objectsKept # global.objectsSeen THEN ERROR;
};
DoTraceAndSweepCollection: PROC = {
haveAllocatorLocked: PROC = {
here with the loader and allocator locked
haveRCOvLocked: PROC = {
here with the loader, allocator and RCOvBank locked
haveZCTLocked: PROC = {
here with the loader, allocator, RCOvBank and ZCT locked
RTFrameHeapSnapshot.AcquireFrameHeapSnapshot[];
{ ENABLE UNWIND => RTFrameHeapSnapshot.ReleaseFrameHeapSnapshot[];
ActuallyDoIt[ignoreStack: FALSE];
IF global.destructiveTestEnabled THEN {
ActuallyDoIt[ignoreStack: TRUE];
DebuggerSwap.CallDebugger["destructive collection finished."L];
};
RTFrameHeapSnapshot.ReleaseFrameHeapSnapshot[];
};
};
START haveRCOvLocked HERE
Acquire the lock on the ref counting machinery, do the work, then release the lock.
ZCT.EnterAndCallBack[haveZCTLocked];
};
START haveAllocatorLocked HERE
Acquire the lock on the RCOverflowBank, do the work, then release the lock.
ZCT.EnterRCOvAndCallBack[haveRCOvLocked];
};
START DoTraceAndSweepCollection HERE
First, acquire the lock on the loader (REFs in GF's are counted)
LoadState.local.Acquire[exclusive];
Next, acquire the lock on the allocator, do the work, then release the lock.
AllocatorOps.EnterAndCallBack[haveAllocatorLocked ! UNWIND => LoadState.local.Release[]];
Release the loader's lock
LoadState.local.Release[];
};
fudgeFinalized: PROC [nhp: NHeaderP] = {
Trace from an object with finalization enabled and with RC = 0. We have already traced out all objects that started from local or global frames.
header: Allocator.NormalHeader ← nhp^;
This sample is useful for debugging
SELECT TRUE FROM
header.f AND NOT header.inZCT => {
This object has finalization enabled, and has never been seen! So we trace out references from it now. However, it is not proper to increment the RC for this visit.
ref: REF ← AllocatorOps.NHPToREF[nhp];
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
ENDCASE;
};
visitOneObject: PROC [nhp: NHeaderP] = {
Reclaim unmarked and free stuff, rebuild the zct and remember retained stuff due to only frame heap
header: Allocator.NormalHeader ← nhp^;
This sample is useful for debugging
SELECT TRUE FROM
NOT header.f AND NOT header.inZCT => {
This one is NOT being retained (and has no finalization)
IF global.findingCircularGarbage
THEN global.objectsKept ← global.objectsKept + 1
ELSE {global.objectsReclaimed ← global.objectsReclaimed + 1; AllocatorOps.TAndSFreeObject[nhp]};
RETURN;
};
header.f => {
This object has finalization enabled, so adjust refCount to reflect the package count.
packageCount: NAT ← RTTypesBasicPrivate.NumberPackageRefs[nhp.type];
dummy: REF ← AllocatorOps.NHPToREF[nhp];
FOR i: CARDINAL IN [1..packageCount] DO
IF nhp.refCount = 0 AND NOT nhp.rcOverflowed THEN {
RRA: We are trying to decrement a reference count for a finalizable object that already has a reference count of 0. The most likely cause of this is that a permanent reference is not being maintained to the object. If only a circularity is holding the count high, then this bug may ONLY surface during trace and sweep. We keep a log of this stuff to allow the world to continue. If we are not global.forgiving this fault we crash and burn immediately.
IF global.forgivingLog = NIL THEN
global.forgivingLog ← VM.AddressForPageNumber[VM.SimpleAllocate[ForgivingLogPages].page];
global.forgivingLog[global.forgivingLogIndex] ← ForgivingLogEntry[nhp, header];
global.forgivingLogCount ← global.forgivingLogCount + 1;
IF NOT global.forgiving THEN DebuggerSwap.CallDebugger["Unforgivable!"L];
IF global.forgivingLogIndex = ForgivingLogEntries-1
THEN global.forgivingLogIndex ← 0
ELSE global.forgivingLogIndex ← global.forgivingLogIndex + 1;
EXIT;
};
Decrement the reference count
RCMicrocodeOps.ASSIGNREF[rhs: NIL, lhs: @dummy
! RCMicrocodeOps.RCUnderflowOccurred => {
ZCT.InnerHandleRCUnderflow[dummy]; RETRY}];
ENDLOOP;
header ← nhp^;
Resample to make it all consistent
};
ENDCASE;
At this point we keep the object in play
global.objectsKept ← global.objectsKept + 1;
IF NOT global.findingCircularGarbage THEN
Clear the mark.
nhp.inZCT ← FALSE;
IF header.refCount = 0 AND NOT header.rcOverflowed THEN {
The reference count is 0.
IF NOT header.f THEN {
global.rpa[global.nextRpaIndex] ← LOOPHOLE[AllocatorOps.NHPToREF[nhp], LONG POINTER];
IF global.nextRpaIndex < maxNRPIndex THEN global.nextRpaIndex ← global.nextRpaIndex + 1;
global.nRetainedObjects ← global.nRetainedObjects + 1;
};
put it on zct
RCMicrocodeOps.OnZ[nhp
! RCMicrocodeOps.ZCTFull => DebuggerSwap.CallDebugger["TAndS disaster."L]];
};
};
TraceLocalFrame: PROC [d: LONG DESCRIPTOR FOR ARRAY OF WORD] = {
mark refs in a local frame
pa: LONG POINTER TO LONG POINTER
= LOOPHOLE[BASE[d], LONG POINTER TO LONG POINTER];
nWords: CARDINAL = LENGTH[d];
global.nLocalFrames ← global.nLocalFrames + 1;
IF nWords >= SIZE[REF] THEN
FOR i: CARDINAL IN [0..nWords-SIZE[REF]] DO
addr: LONG POINTER = (pa+i)^;
IF addr # NIL AND AllocatorOps.IsValidRef[addr] THEN {
if this REF is valid, non-NIL
ref: REF = LOOPHOLE[addr, REF];
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
IF global.suspectRef # NIL AND addr = global.suspectRef THEN
global.suspectRefFoundInLocalFrame ← TRUE;
IF NOT nhp.inZCT THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
};
ENDLOOP;
};
TraceGlobalFrame: PROC [configID: LoadState.ConfigID, moduleIndex: LoadState.ModuleIndex] RETURNS [stop: BOOLFALSE] = {
gfh: PrincOps.GlobalFrameHandle
= LoadState.local.ModuleToGlobalFrame[configID, moduleIndex];
this procedure is used to mark refs from a global frame
the algorithm is essentially the same as for regular objects
type: Type ← LoadState.local.GlobalFrameToType[gfh];
procRef: PROC [ref: REF] = {
SELECT LOOPHOLE[ref, LONG POINTER] FROM
NIL => RETURN;
global.suspectRef => global.gfhToSuspectRef ← gfh;
ENDCASE;
{
Mark the object as being seen.
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
dummy: REFNIL;
IF NOT nhp.inZCT THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
Increment the reference count
RCMicrocodeOps.ASSIGNREF[rhs: ref, lhs: @dummy
! RCMicrocodeOps.RCOverflowOccurred => {
ZCT.InnerHandleRCOverflow[ref]; RETRY};
];
};
};
global.nGlobalFrames ← global.nGlobalFrames + 1;
IF type # nullType THEN {
global.nRCGlobalFrames ← global.nRCGlobalFrames + 1;
RTTypesBasicPrivate.MapRefs[LONG[gfh], RTTypesBasicPrivate.MapTiRcmx[type], procRef];
};
RETURN[FALSE];
};
TraceRefsInObject: PROC [container: REF] = {
traces all references from the given reference
type: Type ← SafeStorage.GetReferentType[container];
refererPushed: BOOLFALSE;
procRef: PROC [ref: REF] = {
SELECT LOOPHOLE[ref, LONG POINTER] FROM
NIL => RETURN;
global.suspectRef => IF NOT refererPushed THEN {
nhp: NHeaderP = AllocatorOps.REFToNHP[container];
refererPushed ← TRUE;
IF nSuspectReferers < maxNReferers THEN {
suspectReferers[nSuspectReferers] ← nhp;
nSuspectReferers ← nSuspectReferers + 1;
};
FOR i: NAT IN [0..nInnerReferers) DO
IF nhp = innerReferers[i] THEN GO TO circular;
ENDLOOP;
IF NOT global.circularStructureFound AND nInnerReferers < maxNInnerReferers THEN {
innerReferers[nInnerReferers] ← nhp;
nInnerReferers ← nInnerReferers + 1;
};
EXITS circular => global.circularStructureFound ← TRUE;
};
ENDCASE;
{
Mark the object as being seen.
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
dummy: REFNIL;
IF NOT nhp.inZCT THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
Increment the reference count
RCMicrocodeOps.ASSIGNREF[rhs: ref, lhs: @dummy
! RCMicrocodeOps.RCOverflowOccurred => {
ZCT.InnerHandleRCOverflow[ref]; RETRY};
];
};
};
DO
oldNRefsPushed: CARD = global.nRefsPushed;
RTTypesBasicPrivate.MapRefs[
LOOPHOLE[container, LONG POINTER],
RTTypesBasicPrivate.MapTiRcmx[type],
procRef];
IF global.nRefsPushed # oldNRefsPushed + 1 THEN RETURN;
container ← LOOPHOLE[PopRef[], REF];
type ← SafeStorage.GetReferentType[container];
refererPushed ← FALSE;
ENDLOOP;
};
AllObjects: PROC [visit: PROC [NHeaderP]] = {
this proc visits all collectible objects (free AND allocated)
qi: QuantumIndex ← FIRST[QuantumIndex];
DO
Find the beginning of the next run of quanta from the quantum map
hp: HeaderP;
blockSize: INT ← 0;
UNTIL AllocatorOps.quantumMap[qi] DO
IF qi = LAST[QuantumIndex] THEN RETURN;
qi ← qi+1;
ENDLOOP;
Start parsing at the beginning of this run
FOR hp ← QuantumIndexToLP[qi], hp + blockSize
WHILE
LOOPHOLE[hp, CARD] < LastAddress
AND
AllocatorOps.quantumMap[LPToQuantumIndex[hp]] DO
nhp: NHeaderP ← LOOPHOLE[hp];
IF nhp.blockSizeIndex = bsiEscape THEN
nhp ← nhp + (SIZE[ExtendedHeader] - SIZE[NormalHeader]);
blockSize ← AllocatorOps.BlockSize[hp];
visit[nhp];
ENDLOOP;
IF LOOPHOLE[hp, CARD] >= LastAddress THEN RETURN;
qi ← LPToQuantumIndex[hp];
ENDLOOP;
};
logWordsPerQuantum: NAT = PrincOps.logWordsPerPage+Allocator.logPagesPerQuantum;
The shift distance = Log2[wordsPerPage*pagesPerQuantum]
LPToQuantumIndex: PROC [lp: LONG POINTER] RETURNS [QuantumIndex] = INLINE {
RETURN[AllocatorOps.AddressToQuantumIndex[LOOPHOLE[lp]]];
};
QuantumIndexToLP: PROC [qi: QuantumIndex] RETURNS [LONG POINTER] = INLINE {
RETURN [Basics.DoubleShiftLeft[ [pair[lo: qi, hi: 0]], logWordsPerQuantum].lp ];
};
PushRef: PROC [pref: LONG POINTER] = {
pushes ref to object onto the reference stack
stack: RefStack ← global.refStackChain;
IF stack = NIL OR stack.size = stack.max THEN {
time to get a new stack node
IF global.refStackFree = NIL
oh well, nothing comes for free
THEN {
stack ← VM.AddressForPageNumber[VM.SimpleAllocate[RefStackPages].page];
stack.next ← NIL;
stack.size ← 0;
stack.max ← RefStackMax;
}
ELSE {
stack ← global.refStackFree;
global.refStackFree ← stack.next;
};
stack.next ← global.refStackChain;
global.refStackChain ← stack;
};
stack[stack.size] ← pref;
stack.size ← stack.size + 1;
global.nRefsPushed ← global.nRefsPushed + 1;
};
PopRef: PROC RETURNS [pref: LONG POINTER] = {
pops ref to object from the reference stack
stack: RefStack ← global.refStackChain;
IF stack # NIL THEN {
size: CARDINAL ← stack.size;
IF size = 0 THEN {
global.refStackChain ← stack.next;
stack.next ← global.refStackFree;
global.refStackFree ← stack;
IF (stack ← global.refStackChain) = NIL THEN RETURN [NIL];
IF (size ← stack.size) = 0 THEN ERROR;
};
size ← size - 1;
stack.size ← size;
RETURN [stack[size]];
};
RETURN [NIL];
};
FreeRefStacks: PROC = {
IF global.refStackChain # NIL THEN ERROR; -- should never happen, but check anyway
WHILE global.refStackFree # NIL DO
stack: RefStack ← global.refStackFree.next;
VM.Free[[page: VM.PageNumberForAddress[global.refStackFree], count: RefStackPages]];
global.refStackFree ← stack
ENDLOOP;
};
START TraceAndSweepImpl HERE
Collector.EstablishTAndSProc[DoTraceAndSweepCollection]
END.