DIRECTORY
Allocator USING [bsiEscape, ExtendedHeader, HeaderP, LastAddress, logPagesPerQuantum, NHeaderP, NormalHeader, QuantumIndex, wordsPerQuantum],
AllocatorOps USING [AddressToQuantumIndex, BlockSize, EnterAndCallBack, IsValidRef, NHPToREF, quantumMap, REFToNHP, Reset, TAndSFreeObject],
AMBridge USING [TVForReferent, TVToLC],
AMModelContextImpl USING [GFContextRec],
AMTypes USING [Class, Error, Index, IndexToTV, IsComputed, IsNil, IsOverlaid, NComponents, Referent, TypeClass, TV, TVType, UnderClass, UnderType, Variant],
Basics USING [BITAND, CARD, DoubleShiftLeft, LowHalf],
CircularGarbageTraceAndSweep USING [APNodes, maxNReferers],
Collector USING [EstablishTAndSProc],
DebuggerSwap USING [CallDebugger],
List USING [Reverse],
LoadState USING [Acquire, ConfigID, EnumerateAllModules, GlobalFrameToType, local, ModuleIndex, ModuleToGlobalFrame, Release],
PrincOps USING [GlobalFrameHandle, logWordsPerPage, wordsPerPage],
Process USING [MsecToTicks, Pause],
RCMap USING [nullIndex],
RCMicrocodeOps USING [ASSIGNREF, OnZ, RCOverflowOccurred, RCUnderflowOccurred, ZCTFull],
Rope USING [RopeRep],
RTFrameHeapSnapshot USING [AcquireFrameHeapSnapshot, MapUncountedBodies, ReleaseFrameHeapSnapshot],
RTTypesBasicPrivate USING [MapRefs, MapTiRcmx, NumberPackageRefs],
RTTypesPrivate USING [TypedVariableRec],
SafeStorage USING [GetReferentType, nullType, ReclaimCollectibleObjects, Type, WaitForCollectorDone],
UnsafeStorage USING [GetSystemUZone],
VM USING [AddressForPageNumber, Free, PageNumberForAddress, SimpleAllocate],
ZCT USING [EnterAndCallBack, EnterRCOvAndCallBack, InnerHandleRCOverflow, InnerHandleRCUnderflow, RCOvReset, zct, zctBlockWords];
CircularGarbageTraceAndSweepImpl:
PROGRAM
IMPORTS AllocatorOps, AMBridge, AMTypes, Basics, Collector, DebuggerSwap, List, LoadState, Process, RCMicrocodeOps, RTFrameHeapSnapshot, RTTypesBasicPrivate, SafeStorage, UnsafeStorage, VM, ZCT
EXPORTS CircularGarbageTraceAndSweep
SHARES AMModelContextImpl, Rope
= BEGIN OPEN Allocator;
CARD: TYPE = Basics.CARD;
Type:
TYPE = SafeStorage.Type;
nullType: Type = SafeStorage.nullType;
Variables
nRetainedObjects:
CARD ← 0;
number of objects retained only because REFs appear onstack
rpa: ARRAY [0..maxNRPIndex] OF LONG POINTER ← ALL[NIL];
maxNRPIndex: CARDINAL = 20;
nextRpaIndex: CARDINAL ← 0;
destructiveTestEnabled:
BOOL ←
FALSE;
destructiveTestEnabled TRUE => do a second TandS without the stack scan, then punt
findingCircularGarbage:
BOOL ←
FALSE;
findingCircularGarbage TRUE => remember stuff found; don't reclaim it
circularGarbageObject:
TYPE =
RECORD[
put: INT ← 0,
max: INT ← 0,
item: SEQUENCE size: CARDINAL OF NHeaderP
];
circularGarbageSeq: LONG POINTER TO circularGarbageObject ← NIL;
typedVariableType: Type;
listOfGFContextRecType: Type;
refStackChain: RefStack ← NIL;
refStackFree: RefStack ← NIL;
nRefsPushed: INT ← 0;
rememberRopes: BOOL;
interesting statistics
objectsSeen: CARD ← 0;
objectsReclaimed: CARD ← 0;
objectsKept: CARD ← 0;
objectsGarbage: CARD ← 0;
nGlobalFrames: CARD ← 0;
nRCGlobalFrames: CARD ← 0;
nLocalFrames: CARD ← 0;
debugging aids
SuspectSeen: SIGNAL = CODE;
suspect: LONG POINTER TO UNSPECIFIED ← LOOPHOLE[LONG[-1]];
forgiving:
BOOL ←
TRUE;
IF TRUE, then logs finalizable objects with counts that are too small
IF FALSE, then crashes for finalizable objects with counts that are too small
forgivingLog: ForgivingLog ← NIL;
forgivingLogCount: INT ← 0;
forgivingLogIndex: [0..ForgivingLogEntries) ← 0;
ForgivingLog declarations
ForgivingLogPages: NAT = 4;
ForgivingLog: TYPE = LONG POINTER TO ForgivingLogRep;
ForgivingLogRep: TYPE = ARRAY [0..ForgivingLogEntries) OF ForgivingLogEntry;
ForgivingLogEntry:
TYPE =
RECORD [
nhp: Allocator.NHeaderP,
header: Allocator.NormalHeader
];
ForgivingLogEntries: NAT = (ForgivingLogPages*PrincOps.wordsPerPage)/SIZE[ForgivingLogEntry];
TYPEs and constants
RefStackRep:
TYPE =
RECORD [
next: RefStack ← NIL,
size: CARDINAL ← 0,
max: CARDINAL ← RefStackMax,
refs: SEQUENCE COMPUTED CARDINAL OF LONG POINTER
];
RefStack:
TYPE =
LONG
POINTER
TO RefStackRep;
RefStackPages: NAT = 4;
RefStackMax:
NAT = (PrincOps.wordsPerPage*RefStackPages - (
SIZE[RefStackRep])) / (
SIZE[
LONG
POINTER]);
Leave room for the header info
support for WhoPointsTo and FindCircularGarbage
suspectRef: LONG POINTER ← NIL; -- a LOOPHOLE'd REF
APNodes: TYPE = CircularGarbageTraceAndSweep.APNodes;
suspectReferers: REF APNodes ← NEW[APNodes ← ALL[NIL]];
maxNReferers: NAT = CircularGarbageTraceAndSweep.maxNReferers;
nSuspectReferers: NAT ← 0;
AIPNodes: TYPE = ARRAY [0..maxNInnerReferers) OF NHeaderP;
innerReferers: REF AIPNodes ← NEW[AIPNodes ← ALL[NIL]];
maxNInnerReferers: NAT = 50;
nInnerReferers: NAT ← 0;
gfhToSuspectRef: PrincOps.GlobalFrameHandle ← NIL;
suspectRefFoundInLocalFrame: BOOL ← FALSE;
circularStructureFound: BOOL ← FALSE;
PROCS
REFToNHP:
PUBLIC
PROC[ref:
REF
ANY]
RETURNS[Allocator.NHeaderP] = {
RETURN[AllocatorOps.REFToNHP[ref]];
};
NHPToREF:
PUBLIC
PROC[nhp: Allocator.NHeaderP]
RETURNS[
REF
ANY] = {
RETURN[AllocatorOps.NHPToREF[nhp]];
};
WhoPointsToRef:
PUBLIC
PROC [ref:
REF, cycleLimit:
NAT ← 20]
RETURNS [ referers:
LIST
OF
REF ←
NIL, gfh: PrincOps.GlobalFrameHandle, foundInLocalFrame, foundInCircularStructure:
BOOL, cycles:
NAT ← 0] = {
nhp: NHeaderP;
referersAPNodes: REF APNodes;
nhp ← AllocatorOps.REFToNHP[ref];
[referersAPNodes, gfh, foundInLocalFrame, foundInCircularStructure, cycles] ← WhoPointsTo[nhp, cycleLimit];
FOR index:
INT
IN [0..maxNReferers)
DO
IF referersAPNodes[index] = NIL THEN EXIT;
referers ← CONS[AllocatorOps.NHPToREF[referersAPNodes[index]], referers];
referers ← List.Reverse[referers];
ENDLOOP;
RETURN ;
};
WhoPointsTo:
PUBLIC PROC [nhp: NHeaderP, cycleLimit:
NAT ← 20]
RETURNS [ referers:
REF APNodes, gfh: PrincOps.GlobalFrameHandle, foundInLocalFrame, foundInCircularStructure:
BOOL, cycles:
NAT ← 0] = {
Look up the referer chain. Stop if a gfh found, or if foundInLocalFrame, or if a loop is found, or if more than 1 referer is found, or if no referers are found (changed conserv scan)
FOR i: NAT IN [0..maxNReferers) DO suspectReferers[i] ← NIL ENDLOOP;
suspectReferers[0] ← nhp;
nSuspectReferers ← 1;
FOR i: NAT IN [0..maxNInnerReferers) DO innerReferers[i] ← NIL ENDLOOP;
nInnerReferers ← 0;
gfhToSuspectRef ← NIL;
suspectRefFoundInLocalFrame ← FALSE;
circularStructureFound ← FALSE;
UNTIL gfhToSuspectRef #
NIL
OR circularStructureFound
OR suspectRefFoundInLocalFrame
OR nSuspectReferers > 1
OR nSuspectReferers = 0
OR cycles = cycleLimit
DO
suspectRef ← LOOPHOLE[AllocatorOps.NHPToREF[suspectReferers[0]], LONG POINTER];
FOR i: NAT IN [0..maxNReferers) DO suspectReferers[i] ← NIL ENDLOOP;
nSuspectReferers ← 0;
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE, traceAndSweep: TRUE];
cycles ← cycles + 1;
ENDLOOP;
suspectRef ← NIL;
RETURN[
referers: suspectReferers,
gfh: gfhToSuspectRef,
foundInLocalFrame: suspectRefFoundInLocalFrame,
foundInCircularStructure: circularStructureFound,
cycles: cycles];
};
InitFindCircularGarbage:
PROC [nObjects:
CARDINAL, uZone:
UNCOUNTED
ZONE, reportRopes:
BOOL] = {
incrCollectCnt: INT ← 0;
incrWordsReclaimed: INT ← 0;
lastWordsReclaimed: INT ← LAST[INT];
UNTIL (incrCollectCnt > 20)
OR (incrWordsReclaimed >= lastWordsReclaimed AND incrCollectCnt > 3)
DO
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE];
incrWordsReclaimed ← SafeStorage.WaitForCollectorDone[].wordsReclaimed;
Process.Pause[Process.MsecToTicks[100]];
lastWordsReclaimed ← incrWordsReclaimed;
incrCollectCnt ← incrCollectCnt + 1;
ENDLOOP;
findingCircularGarbage ← TRUE;
objectsGarbage ← 0;
rememberRopes ← reportRopes;
typedVariableType ← CODE[RTTypesPrivate.TypedVariableRec];
listOfGFContextRecType ← CODE [RECORD[first: AMModelContextImpl.GFContextRec, rest: LIST OF AMModelContextImpl.GFContextRec]];
ropeRepType ← CODE[Rope.RopeRep];
circularGarbageSeq ← uZone.NEW[circularGarbageObject[nObjects]];
circularGarbageSeq.max ← nObjects;
SafeStorage.ReclaimCollectibleObjects[suspendMe: TRUE, traceAndSweep: TRUE];
};
FindCircularGarbageTypes:
PUBLIC PROC [nObjects:
CARDINAL, reportRopes:
BOOL ←
TRUE]
RETURNS [ nGarbage, nSeen:
INT, garbage:
LIST
OF Type ←
NIL] = {
This procedure first does a standard incremental collection. Then it goes thru the motions of a TandS, but does not reclaim any garbage. Instead, it records the Types in a list. Do not do two of these at once!!
uZone: UNCOUNTED ZONE = UnsafeStorage.GetSystemUZone[];
InitFindCircularGarbage [nObjects: nObjects, uZone: uZone, reportRopes: reportRopes];
FOR index:
INT ← circularGarbageSeq.put, index-1
UNTIL index <= 0
DO
garbage ← CONS[circularGarbageSeq[index-1].type, garbage];
circularGarbageSeq[index-1] ← NIL;
ENDLOOP;
uZone.FREE[@circularGarbageSeq];
circularGarbageSeq ← NIL;
findingCircularGarbage ← FALSE;
nGarbage ← objectsGarbage;
nSeen ← objectsSeen;
};
FindCircularGarbage:
PUBLIC PROC [nObjects:
CARDINAL, reportRopes:
BOOL ←
TRUE]
RETURNS [ nGarbage, nSeen:
INT, garbage:
LIST
OF
REF ←
NIL] = {
This procedure first does a standard incremental collection. Then it goes thru the motions of a TandS, but does not reclaim any garbage. Instead, it records the REFs in a list. Do not do two of these at once!!
uZone: UNCOUNTED ZONE = UnsafeStorage.GetSystemUZone[];
InitFindCircularGarbage [nObjects: nObjects, uZone: uZone, reportRopes: reportRopes];
FOR index:
INT ← circularGarbageSeq.put, index-1
UNTIL index <= 0
DO
garbage ← CONS[AllocatorOps.NHPToREF[circularGarbageSeq[index-1]], garbage];
circularGarbageSeq[index-1] ← NIL;
ENDLOOP;
uZone.FREE[@circularGarbageSeq];
circularGarbageSeq ← NIL;
findingCircularGarbage ← FALSE;
nGarbage ← objectsGarbage;
nSeen ← objectsSeen;
};
FindConnectedComponent:
PUBLIC
PROC [rootObject:
REF
ANY, nObjects:
CARDINAL, reportRopes:
BOOL ←
TRUE]
RETURNS [ allOnList:
BOOL ←
TRUE, countReturned:
INT ← 1, circularObjects:
LIST
OF
REF ←
NIL ] = {
This procedure takes the rootObject, and find all object that it recursively points to.
putAREF:
PROC [itemToPut:
REF, ItemTV: AMTypes.
TV]
RETURNS [tooMany:
BOOL ←
FALSE] = {
isADup:
PROC [itemREF:
REF]
RETURNS [dup:
BOOL ←
FALSE] = {
FOR l:
LIST
OF
REF ← circularObjects, l.rest
UNTIL l =
NIL
DO
IF l.first = itemREF THEN RETURN[TRUE];
ENDLOOP;
};
IF ~isADup[itemToPut]
AND ~AMTypes.IsNil[ItemTV]
THEN {
IF countReturned >= nObjects THEN RETURN[TRUE];
lastItem.rest ← CONS[itemToPut, NIL];
lastItem ← lastItem.rest;
countReturned ← countReturned + 1;
};
};
putList:
PROC [node: AMTypes
.TV]
RETURNS [tooMany:
BOOL ←
FALSE] = {
WHILE node #
NIL
DO
elem: AMTypes.TV;
elemREF: REF;
elemLC: LONG CARDINAL;
elem ← AMTypes.IndexToTV[node, 2];
IF node = NIL THEN EXIT;
IF reportRopes
OR AMTypes.TVType[elem] # ropeRepType
THEN {
elemLC ← AMBridge.TVToLC[elem];
elemREF ← LOOPHOLE[elemLC];
IF putAREF[elemREF, elem] THEN RETURN[TRUE];
};
node ← AMTypes.Referent[AMTypes.IndexToTV[node, 2]];
ENDLOOP;
};
doRecord:
PROC [recordTV: AMTypes.
TV]
RETURNS [tooMany:
BOOL ←
FALSE] = {
currentType: SafeStorage.Type;
underClassCurrentType: AMTypes.Class;
currentType ← AMTypes.TVType[recordTV];
underClassCurrentType ← AMTypes.UnderClass[currentType];
IF underClassCurrentType = record
OR underClassCurrentType = structure
THEN {
components: AMTypes.Index;
components ← AMTypes.NComponents[currentType];
FOR index: AMTypes.Index
IN [1..components]
DO
underTypeAndClass:
PROC [type: Type]
RETURNS [under: Type, class: AMTypes.Class] = {
under ← type;
WHILE (class ← AMTypes.TypeClass[under]) = definition
DO
under ← AMTypes.UnderType[under];
ENDLOOP;
};
refIsOK: BOOL ← TRUE;
itemTV: AMTypes.TV;
itemType: SafeStorage.Type;
itemUnderType: SafeStorage.Type;
itemClass: AMTypes.Class;
itemREF: REF;
itemTV ← AMTypes.IndexToTV[recordTV, index];
itemType ← AMTypes.TVType[itemTV];
[itemUnderType, itemClass] ← underTypeAndClass[itemType];
SELECT itemClass
FROM
ref => {
itemLC: LONG CARDINAL;
IF ~reportRopes AND itemType = ropeRepType THEN LOOP;
itemLC ← AMBridge.TVToLC[itemTV];
itemREF ← AMBridge.RefFromTV[itemTV ! AMTypes.Error => {refIsOK ← FALSE; CONTINUE;}];
IF refIsOK
THEN {
itemREF ← LOOPHOLE[itemLC];
IF putAREF[itemREF, itemTV] THEN RETURN[TRUE];
};
};
list => {
IF AMTypes.IsNil[itemTV] THEN LOOP;
IF putList[AMTypes.Referent[itemTV]] THEN RETURN [TRUE];
};
union => {
IF index = components
THEN {
IF ~AMTypes.IsOverlaid[itemUnderType]
AND ~AMTypes.IsComputed[itemUnderType]
THEN {
variantTV: AMTypes.TV;
variantTV ← AMTypes.Variant[itemTV];
IF doRecord[variantTV] THEN RETURN [TRUE];
};
};
};
ENDCASE;
ENDLOOP;
};
};
lastItem: LIST OF REF ← NIL;
currentItem: LIST OF REF ← NIL;
circularObjects ← CONS[rootObject, NIL];
lastItem ← circularObjects;
ropeRepType ← CODE[Rope.RopeRep];
currentItem ← circularObjects;
WHILE currentItem #
NIL
DO
currentTV: AMTypes.TV;
currentType: SafeStorage.Type;
skipThisOne: BOOL ← FALSE;
underClassCurrentType: AMTypes.Class;
currentTV ← AMBridge.TVForReferent[ref: currentItem.first ! AMTypes.Error => {skipThisOne ← TRUE; CONTINUE;}]; -- error on ATOMs and ROPEs
IF ~skipThisOne
THEN {
currentType ← AMTypes.TVType[currentTV];
underClassCurrentType ← AMTypes.UnderClass[currentType];
IF underClassCurrentType = record
OR underClassCurrentType = structure
THEN {
IF doRecord[currentTV] THEN RETURN[FALSE, countReturned, circularObjects];
};
};
currentItem ← currentItem.rest;
ENDLOOP;
};
ActuallyDoIt:
PROC [ignoreStack:
BOOL ←
FALSE] = {
clearRC:
PROC [nhp: NHeaderP] = {
objectsSeen ← objectsSeen + 1;
nhp.inZCT ← FALSE;
nhp.maybeOnStack ← FALSE;
Leave the f flag and the type field alone
nhp.refCount ← 0;
nhp.rcOverflowed ← FALSE;
};
Initialize
objectsSeen ← 0;
objectsKept ← objectsReclaimed ← 0;
nRCGlobalFrames ← nGlobalFrames ← nLocalFrames ← 0;
nRetainedObjects ← 0;
nextRpaIndex ← 0;
nRefsPushed ← 0;
Clear all free lists
ZCT.zct.bsiToFreeList ← ALL[NIL];
AllocatorOps.Reset[];
Clear the zero-count table
ZCT.zct.markingDecrements ← FALSE;
ZCT.zct.rp
← ZCT.zct.wp
←
ZCT.zct.rp
- Basics.
BITAND[
Basics.LowHalf[LOOPHOLE[ZCT.zct.rp, CARD]],
ZCT.zctBlockWords-1
];
mask rp by (LastAddress-(zctBlockWords-1))
Clear all ref counts and count objects
ZCT.RCOvReset[];
AllObjects[clearRC];
trace from all local and global frames in the world
IF NOT ignoreStack THEN RTFrameHeapSnapshot.MapUncountedBodies[TraceLocalFrame];
[] ← LoadState.local.EnumerateAllModules[order: newestFirst, proc: TraceGlobalFrame];
continue marking from the ref stack and from objects
UNTIL refStackChain =
NIL
DO
TraceRefsInObject[LOOPHOLE[PopRef[], REF]]
ENDLOOP;
FreeRefStacks[];
Free the objects
Reclaim unmarked and free stuff, rebuild the zct and remember retained stuff due to frame heap only
AllObjects[visitOneObject];
IF objectsReclaimed + objectsKept # objectsSeen THEN ERROR;
};
DoTraceAndSweepCollection:
PROC = {
haveAllocatorLocked:
PROC = {
here with the loader and allocator locked
haveRCOvLocked:
PROC = {
here with the loader, allocator and RCOvBank locked
haveZCTLocked:
PROC = {
here with the loader, allocator, RCOvBank and ZCT locked
RTFrameHeapSnapshot.AcquireFrameHeapSnapshot[];
{
ENABLE
UNWIND => RTFrameHeapSnapshot.ReleaseFrameHeapSnapshot[];
ActuallyDoIt[ignoreStack: FALSE];
IF destructiveTestEnabled
THEN {
ActuallyDoIt[ignoreStack: TRUE];
DebuggerSwap.CallDebugger["destructive collection finished."];
};
RTFrameHeapSnapshot.ReleaseFrameHeapSnapshot[];
};
};
START haveRCOvLocked HERE
Acquire the lock on the ref counting machinery, do the work, then release the lock.
ZCT.EnterAndCallBack[haveZCTLocked];
};
START haveAllocatorLocked HERE
Acquire the lock on the RCOverflowBank, do the work, then release the lock.
ZCT.EnterRCOvAndCallBack[haveRCOvLocked];
};
START DoTraceAndSweepCollection HERE
First, acquire the lock on the loader (REFs in GF's are counted)
LoadState.local.Acquire[exclusive];
Next, acquire the lock on the allocator, do the work, then release the lock.
AllocatorOps.EnterAndCallBack[haveAllocatorLocked !
UNWIND => LoadState.local.Release[]];
Release the loader's lock
LoadState.local.Release[];
};
visitOneObject:
PROC [nhp: NHeaderP] = {
Reclaim unmarked and free stuff, rebuild the zct and remember retained stuff due to only frame heap
dummy: REF ← NIL;
ref: REF = AllocatorOps.NHPToREF[nhp];
bumpRC:
PROC [] = {
RCMicrocodeOps.
ASSIGNREF[rhs: ref, lhs: @dummy
! RCMicrocodeOps.RCOverflowOccurred => {
ZCT.InnerHandleRCOverflow[ref]; RETRY};
];
dummy ← NIL;
};
header: Allocator.NormalHeader ← nhp^;
This sample is useful for debugging
IF findingCircularGarbage
AND nhp.type = listOfGFContextRecType
THEN {
``LIST OF AMModelContextImpl.GFContextRec'' have some problem, probably due to some unsafety in the interpreter. Bump the reference count to avoid it.
bumpRC[];
header ← nhp^;
};
SELECT
TRUE
FROM
NOT header.f
AND
NOT header.inZCT => {
This one is NOT being retained (and has no finalization) unless we are finding circular garbage
IF findingCircularGarbage
AND nhp.type # nullType
THEN {
objectsGarbage ← objectsGarbage + 1;
bumpRC[];
bumpRC[];
bumpRC[];
bumpRC[];
IF nhp.type # typedVariableType
AND nhp.type # listOfGFContextRecType
AND (nhp.type # ropeRepType
OR rememberRopes)
THEN {
IF circularGarbageSeq.put < circularGarbageSeq.max
THEN {
circularGarbageSeq[circularGarbageSeq.put] ← nhp;
circularGarbageSeq.put ← circularGarbageSeq.put + 1;
};
};
header ← nhp^;
Resample to make it all consistent
}
ELSE {
objectsReclaimed ← objectsReclaimed + 1;
AllocatorOps.TAndSFreeObject[nhp];
RETURN;
};
};
header.f => {
This object has finalization enabled, so adjust refCount to reflect the package count.
packageCount: NAT ← RTTypesBasicPrivate.NumberPackageRefs[nhp.type];
dummy: REF ← AllocatorOps.NHPToREF[nhp];
FOR i:
CARDINAL
IN [1..packageCount]
DO
IF nhp.refCount = 0
AND
NOT nhp.rcOverflowed
THEN {
RRA: We are trying to decrement a reference count for a finalizable object that already has a reference count of 0. The most likely cause of this is that a permanent reference is not being maintained to the object. If only a circularity is holding the count high, then this bug may ONLY surface during trace and sweep. We keep a log of this stuff to allow the world to continue. If we are not forgiving this fault we crash and burn immediately.
IF forgivingLog =
NIL
THEN
forgivingLog ← VM.AddressForPageNumber[VM.SimpleAllocate[ForgivingLogPages].page];
forgivingLog[forgivingLogIndex] ← ForgivingLogEntry[nhp, header];
forgivingLogCount ← forgivingLogCount + 1;
IF NOT forgiving THEN DebuggerSwap.CallDebugger["Unforgivable!"L];
IF forgivingLogIndex = ForgivingLogEntries-1
THEN forgivingLogIndex ← 0
ELSE forgivingLogIndex ← forgivingLogIndex + 1;
EXIT;
};
Decrement the reference count
RCMicrocodeOps.
ASSIGNREF[rhs:
NIL, lhs: @dummy
! RCMicrocodeOps.RCUnderflowOccurred => {
ZCT.InnerHandleRCUnderflow[dummy]; RETRY}];
ENDLOOP;
header ← nhp^;
Resample to make it all consistent
};
ENDCASE;
At this point we keep the object in play
objectsKept ← objectsKept + 1;
nhp.inZCT ← FALSE;
IF header.refCount = 0
AND
NOT header.rcOverflowed
THEN {
The reference count is 0.
IF
NOT header.f
THEN {
rpa[nextRpaIndex] ← LOOPHOLE[AllocatorOps.NHPToREF[nhp], LONG POINTER];
IF nextRpaIndex < maxNRPIndex THEN nextRpaIndex ← nextRpaIndex + 1;
nRetainedObjects ← nRetainedObjects + 1;
};
put it on zct
RCMicrocodeOps.OnZ[nhp
! RCMicrocodeOps.ZCTFull => DebuggerSwap.CallDebugger["TAndS disaster."]];
};
};
TraceLocalFrame:
PROC [d:
LONG
DESCRIPTOR
FOR
ARRAY
OF
WORD] = {
this procedure is used to mark refs in a local frame; RCs must be decremented on first encounter
pa: LONG POINTER TO LONG POINTER
= LOOPHOLE[BASE[d], LONG POINTER TO LONG POINTER];
nWords: CARDINAL = LENGTH[d];
nLocalFrames ← nLocalFrames + 1;
IF nWords >=
SIZE[
REF]
THEN
FOR i:
CARDINAL
IN [0..nWords-
SIZE[
REF]]
DO
addr: LONG POINTER = (pa+i)^;
IF addr #
NIL
AND AllocatorOps.IsValidRef[addr]
THEN {
if this REF is valid, non-NIL
ref: REF = LOOPHOLE[addr, REF];
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
IF suspectRef #
NIL
AND addr = suspectRef
THEN
suspectRefFoundInLocalFrame ← TRUE;
IF
NOT nhp.inZCT
THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
};
ENDLOOP;
TraceGlobalFrame:
PROC [configID: LoadState.ConfigID, moduleIndex: LoadState.ModuleIndex]
RETURNS [stop:
BOOL ←
FALSE] = {
gfh: PrincOps.GlobalFrameHandle
= LoadState.local.ModuleToGlobalFrame[configID, moduleIndex];
this procedure is used to mark refs from a global frame
the algorithm is essentially the same as for regular objects
type: Type ← LoadState.local.GlobalFrameToType[gfh];
procRef:
PROC [ref:
REF] = {
SELECT
LOOPHOLE[ref,
LONG
POINTER]
FROM
NIL => RETURN;
suspectRef => gfhToSuspectRef ← gfh;
ENDCASE;
{
Mark the object as being seen.
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
dummy: REF ← NIL;
IF
NOT nhp.inZCT
THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
Increment the reference count
RCMicrocodeOps.
ASSIGNREF[rhs: ref, lhs: @dummy
! RCMicrocodeOps.RCOverflowOccurred => {
ZCT.InnerHandleRCOverflow[ref]; RETRY};
];
};
};
nGlobalFrames ← nGlobalFrames + 1;
IF type # nullType
THEN {
nRCGlobalFrames ← nRCGlobalFrames + 1;
RTTypesBasicPrivate.MapRefs[LONG[gfh], RTTypesBasicPrivate.MapTiRcmx[type], procRef];
};
RETURN[FALSE];
};
TraceRefsInObject:
PROC [container:
REF] = {
applies P to each reference in the indicated object (ref)
type: Type ← SafeStorage.GetReferentType[container];
refererPushed: BOOL ← FALSE;
procRef:
PROC [ref:
REF] = {
SELECT
LOOPHOLE[ref,
LONG
POINTER]
FROM
NIL => RETURN;
suspectRef =>
IF
NOT refererPushed
THEN {
nhp: NHeaderP = AllocatorOps.REFToNHP[container];
refererPushed ← TRUE;
IF nSuspectReferers < maxNReferers
THEN {
suspectReferers[nSuspectReferers] ← nhp;
nSuspectReferers ← nSuspectReferers + 1;
};
FOR i:
NAT
IN [0..nInnerReferers)
DO
IF nhp = innerReferers[i] THEN GO TO circular;
ENDLOOP;
IF
NOT circularStructureFound
AND nInnerReferers < maxNInnerReferers
THEN {
innerReferers[nInnerReferers] ← nhp;
nInnerReferers ← nInnerReferers + 1;
};
EXITS circular => circularStructureFound ← TRUE;
};
ENDCASE;
{
Mark the object as being seen.
nhp: Allocator.NHeaderP ← AllocatorOps.REFToNHP[ref];
dummy: REF ← NIL;
IF
NOT nhp.inZCT
THEN {
this REF is valid and not seen before
nhp.inZCT ← TRUE;
IF RTTypesBasicPrivate.MapTiRcmx[SafeStorage.GetReferentType[ref]] # RCMap.nullIndex THEN PushRef[LOOPHOLE[ref, LONG POINTER]];
};
Increment the reference count
RCMicrocodeOps.
ASSIGNREF[rhs: ref, lhs: @dummy
! RCMicrocodeOps.RCOverflowOccurred => {
ZCT.InnerHandleRCOverflow[ref]; RETRY};
];
};
};
DO
oldNRefsPushed: INT = nRefsPushed;
RTTypesBasicPrivate.MapRefs[
LOOPHOLE[container, LONG POINTER],
RTTypesBasicPrivate.MapTiRcmx[type],
procRef];
IF nRefsPushed-oldNRefsPushed # 1 THEN RETURN;
container ← LOOPHOLE[PopRef[], REF];
type ← SafeStorage.GetReferentType[container];
refererPushed ← FALSE;
ENDLOOP;
AllObjects:
PROC [visit:
PROC [NHeaderP]] = {
this proc visits all collectible objects (free AND allocated)
qi: QuantumIndex ← FIRST[QuantumIndex];
DO
Find the beginning of the next run of quanta from the quantum map
hp: HeaderP;
blockSize: INT ← 0;
UNTIL AllocatorOps.quantumMap[qi]
DO
IF qi = LAST[QuantumIndex] THEN RETURN;
qi ← qi+1;
ENDLOOP;
Start parsing at the beginning of this run
FOR hp ← QuantumIndexToLP[qi], hp + blockSize
WHILE
LOOPHOLE[hp,
CARD] < LastAddress
AND AllocatorOps.quantumMap[LPToQuantumIndex[hp]]
DO
nhp: NHeaderP ← LOOPHOLE[hp];
IF nhp.blockSizeIndex = bsiEscape
THEN
nhp ← nhp + (SIZE[ExtendedHeader] - SIZE[NormalHeader]);
blockSize ← AllocatorOps.BlockSize[hp];
visit[nhp];
ENDLOOP;
IF LOOPHOLE[hp, CARD] >= LastAddress THEN RETURN;
qi ← LPToQuantumIndex[hp];
ENDLOOP;
};
logWordsPerQuantum:
NAT = PrincOps.logWordsPerPage+Allocator.logPagesPerQuantum;
The shift distance = Log2[wordsPerPage*pagesPerQuantum]
LPToQuantumIndex:
PROC [lp:
LONG
POINTER]
RETURNS [QuantumIndex] =
INLINE {
RETURN[AllocatorOps.AddressToQuantumIndex[LOOPHOLE[lp]]];
};
QuantumIndexToLP:
PROC [qi: QuantumIndex]
RETURNS [
LONG
POINTER] =
INLINE {
RETURN [Basics.DoubleShiftLeft[ [pair[lo: qi, hi: 0]], logWordsPerQuantum].lp ];
};
PushRef:
PROC [pref:
LONG
POINTER] = {
pushes ref to object onto the reference stack
stack: RefStack ← refStackChain;
IF stack =
NIL
OR stack.size = stack.max
THEN {
time to get a new stack node
IF refStackFree =
NIL
oh well, nothing comes for free
THEN {
stack ← VM.AddressForPageNumber[VM.SimpleAllocate[RefStackPages].page];
stack.next ← NIL;
stack.size ← 0;
stack.max ← RefStackMax;
}
ELSE {
stack ← refStackFree;
refStackFree ← stack.next;
};
stack.next ← refStackChain;
refStackChain ← stack;
};
stack[stack.size] ← pref;
stack.size ← stack.size + 1;
nRefsPushed ← nRefsPushed + 1;
};
PopRef:
PROC
RETURNS [pref:
LONG
POINTER] = {
pops ref to object from the reference stack
stack: RefStack ← refStackChain;
IF stack #
NIL
THEN {
size: CARDINAL ← stack.size;
IF size = 0
THEN {
refStackChain ← stack.next;
stack.next ← refStackFree;
refStackFree ← stack;
IF (stack ← refStackChain) = NIL THEN RETURN [NIL];
IF (size ← stack.size) = 0 THEN ERROR;
};
size ← size - 1;
stack.size ← size;
RETURN [stack[size]];
};
RETURN [NIL];
};
FreeRefStacks:
PROC = {
IF refStackChain # NIL THEN ERROR; -- should never happen, but check anyway
WHILE refStackFree #
NIL
DO
stack: RefStack ← refStackFree.next;
VM.Free[[page: VM.PageNumberForAddress[refStackFree], count: RefStackPages]];
refStackFree ← stack
ENDLOOP;
START TraceAndSweepImpl HERE
Collector.EstablishTAndSProc[DoTraceAndSweepCollection];