ExamineCycles.mesa
Copyright © 1985, 1986 by Xerox Corporation. All rights reserved.
Russ Atkinson (RRA) April 1, 1986 5:17:44 am PST
DIRECTORY
Allocator USING [BlockSizeIndex, bsiEscape, EHeaderP, ExtendedHeader, HeaderP, LastAddress, logPagesPerQuantum, NHeaderP, NormalHeader, QuantumIndex, RefCount],
AllocatorOps USING [AddressToQuantumIndex, bsiToSize, EnterAndCallBack, quantumMap],
Basics,
BackStop USING [Call],
Commander USING [CommandProc, Register],
CommandTool USING [ArgumentVector, Failed, Parse],
DebuggerSwap USING [CallDebugger],
IO,
PrintTV USING [PrintType],
Rope,
RCMap,
RTTypesBasicPrivate,
RuntimeError USING [UNCAUGHT],
SafeStorage USING [nullType, ReclaimCollectibleObjects, Type, TypeIndex],
VM,
ZCT USING [EnterAndCallBack, EnterRCOvAndCallBack];
ExamineCycles:
CEDAR
PROGRAM
IMPORTS AllocatorOps, Basics, BackStop, Commander, CommandTool, DebuggerSwap, IO, PrintTV, Rope, RTTypesBasicPrivate, RuntimeError, SafeStorage, VM, ZCT
= BEGIN
CARD: TYPE = LONG CARDINAL;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
Type:
TYPE = SafeStorage.Type;
nullType: Type = SafeStorage.nullType;
Command procedures
FindCyclicTypesProc: Commander.CommandProc =
TRUSTED {
[cmd: Handle] RETURNS [result: REF ← NIL, msg: ROPE ← NIL]
CommandObject = [in, out, err: STREAM, commandLine, command: ROPE, ...]
out: STREAM = cmd.out;
switches: Switches ← ALL[FALSE];
argsProcessed:
NAT ← 0;
# of arguments processed
argv: CommandTool.ArgumentVector ← CommandTool.Parse[cmd: cmd, starExpand:
FALSE
! CommandTool.Failed => {msg ← errorMsg; GO TO failed}];
When parsing the command line, be prepared for failure. The error is reported to the user
objects: INT ← 0;
possibles: INT ← 0;
refs: INT ← 0;
interval: VM.Interval;
intervalPtr: LONG POINTER TO Allocator.NHeaderP;
intervalLimit: LONG POINTER TO Allocator.NHeaderP;
countObject: InfoProc = {
[type: SafeStorage.Type, size: INT, object: Allocator.HeaderP, objectHP: Allocator.NHeaderP] RETURNS [continue: BOOLEAN]
pr: LONG POINTER ← objectHP+SIZE[Allocator.NormalHeader];
SELECT
TRUE
FROM
type = nullType => {};
type = CODE[ROPE] => {};
type > RTTypesBasicPrivate.MapTiTd.length => {};
objectHP.refCount = 0 => {};
ENDCASE => {
Immediately cull things that only refer forwards.
rcmx: RCMap.Index = RTTypesBasicPrivate.MapTiRcmx[type];
SELECT rcmx
FROM
FIRST[RCMap.Index], LAST[RCMap.Index] => {};
ENDCASE => {
Worth enumerating
eachRef:
UNSAFE
PROC [p:
REF] = {
IF p #
NIL
THEN {
refs ← refs + 1;
IF none
THEN {
none ← FALSE;
possibles ← possibles + 1;
WriteFlags[base, objectHP, [legal: TRUE]];
};
};
};
none: BOOL ← TRUE;
RTTypesBasicPrivate.MapRefs[pr, rcmx, eachRef];
};
objects ← objects + 1;
};
RETURN [TRUE];
};
base: BasePtr ← SetupBase[];
{
ENABLE UNWIND => CleanupBase[base];
SafeStorage.ReclaimCollectibleObjects[];
EnumerateCollectableStorage[countObject];
interval ← VM.SimpleAllocate[
VM.PagesForWords[ SIZE[Allocator.NHeaderP]*(objects+100) ]];
intervalPtr ← VM.AddressForPageNumber[interval.page];
intervalLimit ← intervalPtr + SIZE[Allocator.NHeaderP]*(objects+99);
{
ENABLE UNWIND => VM.Free[interval];
trace: InfoProc = {
[type: SafeStorage.Type, size: INT, object: Allocator.HeaderP, objectHP: Allocator.NHeaderP] RETURNS [continue: BOOLEAN]
IF ReadFlags[base, objectHP].legal THEN StartVisit[objectHP];
RETURN [TRUE];
};
StartVisit:
UNSAFE
PROC [start: Allocator.NHeaderP] = {
startFlags: Flags = [marked: TRUE, cyclic: FALSE, stacked: TRUE, legal: TRUE];
stackP ← intervalLimit;
stackP^ ← start;
DO
p: Allocator.NHeaderP ← stackP^;
type: Type = p.type;
rcmx: RCMap.Index = RTTypesBasicPrivate.MapTiRcmx[type];
eachRef:
UNSAFE
PROC [p:
REF] = {
IF p #
NIL
THEN {
nhp: Allocator.NHeaderP ← LOOPHOLE[p, Allocator.NHeaderP] - SIZE[Allocator.NormalHeader];
ff: Flags ← ReadFlags[base, nhp];
IF ff.legal
THEN {
IF ff.marked
THEN {
We have seen this one before. Is it on the stack?
IF ff.stacked
THEN {
Yes! So this is a cyclic object! We also have to mark all objects that could be in this cycle.
stackQ: LONG POINTER TO Allocator.NHeaderP ← stackP;
ff.cyclic ← TRUE;
WriteFlags[base, nhp, ff];
We also have to mark all objects that are in this cycle.
DO
pp: Allocator.NHeaderP = stackQ^;
pf: Flags = ReadFlags[base, pp];
IF pp = nhp THEN EXIT;
IF stackQ = intervalLimit THEN EXIT;
IF pf.stacked AND NOT pf.cyclic THEN WriteFlags[base, pp, ff];
stackQ ← stackQ - SIZE[Allocator.NHeaderP];
ENDLOOP;
};
}
ELSE {
Never seen before. Just mark it as being on the stack.
ff.marked ← ff.stacked ← TRUE;
stackP ← stackP + SIZE[Allocator.NHeaderP];
stackP^ ← nhp;
WriteFlags[base, nhp, ff];
};
};
};
};
WriteFlags[base, p, startFlags];
RTTypesBasicPrivate.MapRefs[p+SIZE[Allocator.NormalHeader], rcmx, eachRef];
Now we have to scan through the stack for something that is on the stack but not yet enumerated (marked as 'stacked'). If we don't find such a thing, then we are done.
DO
IF stackP = intervalLimit
THEN
RETURN
ELSE {
pp: Allocator.NHeaderP ← stackP^;
ff: Flags ← ReadFlags[base, pp];
IF NOT ff.stacked THEN EXIT;
ff.stacked ← FALSE;
WriteFlags[base, p, ff];
stackP ← stackP - SIZE[Allocator.NHeaderP];
};
ENDLOOP;
At this point we have stackP # intervalLimit and the flags indicate that we have not yet enumerated from this header.
ENDLOOP;
};
stackP:
LONG
POINTER
TO Allocator.NHeaderP;
EnumerateCollectableStorage[trace];
};
VM.Free[interval];
};
CleanupBase[base];
EXITS
failed => {result ← $Failure};
};
Flags:
TYPE =
RECORD [
marked: BOOL ← FALSE,
stacked: BOOL ← FALSE,
cyclic: BOOL ← FALSE,
legal: BOOL ← FALSE
];
nullFlags: Flags = [];
FlagsArrayPtr: TYPE = LONG POINTER TO FlagsArray;
FlagsArray: TYPE = PACKED ARRAY FlagIndex OF Flags;
BasePtr: TYPE = LONG POINTER TO BaseArray;
BaseArray: TYPE = ARRAY BaseIndex OF FlagsArrayPtr;
FlagMod: NAT = 4096;
FlagIndex: TYPE = [0..FlagMod);
logFlagMod: NAT = 12;
BaseMod: NAT = 4096;
BaseIndex: TYPE = [0..BaseMod);
ReadFlags:
UNSAFE
PROC [base: BasePtr, nhp: Allocator.NHeaderP]
RETURNS [Flags] =
UNCHECKED {
ln: Basics.LongNumber = LOOPHOLE[nhp];
ptr: FlagsArrayPtr = base[Basics.DoubleShiftRight[ln, logFlagMod+1].lo];
IF ln.lc >= Allocator.LastAddress THEN Crash[nhp];
IF ln.lo MOD 2 = 1 THEN Crash[nhp];
IF ptr = NIL THEN RETURN [nullFlags];
RETURN [ptr[(ln.lo / 2) MOD FlagMod]];
};
WriteFlags:
UNSAFE
PROC [base: BasePtr, nhp: Allocator.NHeaderP, flags: Flags] =
UNCHECKED {
ln: Basics.LongNumber = LOOPHOLE[nhp];
xHi: NAT = Basics.DoubleShiftRight[ln, logFlagMod+1].lo;
ptr: FlagsArrayPtr ← base[xHi];
IF ln.lc >= Allocator.LastAddress THEN Crash[nhp];
IF ln.lo MOD 2 = 1 THEN Crash[nhp];
IF ptr =
NIL
THEN {
Sigh, we need to allocate a new goodie
interval: VM.Interval ← VM.SimpleAllocate[VM.PagesForWords[ SIZE[FlagsArray] ]];
base[xHi] ← ptr ← VM.AddressForPageNumber[interval.page];
};
ptr[(ln.lo / 2) MOD FlagMod] ← flags;
};
SetupBase:
UNSAFE
PROC
RETURNS [base: BasePtr] =
UNCHECKED {
interval: VM.Interval ← VM.SimpleAllocate[VM.PagesForWords[ SIZE[BaseArray] ]];
base ← VM.AddressForPageNumber[interval.page];
base^ ← ALL[NIL];
};
CleanupBase:
UNSAFE
PROC [base: BasePtr] =
UNCHECKED {
pages: NAT = VM.PagesForWords[ SIZE[FlagsArray] ];
FOR i:
NAT
IN BaseIndex
DO
IF base[i] #
NIL
THEN {
page: VM.PageNumber = VM.PageNumberForAddress[base[i]];
base[i] ← NIL;
VM.Free[ [page, pages] ];
};
ENDLOOP;
VM.Free[ [VM.PageNumberForAddress[base], VM.PagesForWords[SIZE[BaseArray]] ]];
};
Utilities
Switches: TYPE = PACKED ARRAY CHAR['a..'z] OF BOOL;
TypeIndex: TYPE = [0..LAST[SafeStorage.TypeIndex]-256];
typeWidth: NAT ← 8;
PrintType:
PROC [out:
STREAM, type: TypeIndex] = {
This routine prints a type defensively.
innerPrint:
PROC = {
PrintTV.PrintType[type: [type], put: out, depth: typeDepth, width: typeWidth, verbose: FALSE];
};
msg: ROPE ← NIL;
IF type = 0 THEN msg ← "(free)" ELSE msg ← BackStop.Call[innerPrint];
IF msg # NIL THEN IO.PutRope[out, msg];
};
ProcessSwitches:
PROC [swIn: Switches, arg:
ROPE]
RETURNS [switches: Switches] = {
sense: BOOL ← TRUE;
switches ← swIn;
FOR index:
INT
IN [0..Rope.Length[arg])
DO
char: CHAR ← Rope.Fetch[arg, index];
SELECT char
FROM
'- => LOOP;
'~ => {sense ← NOT sense; LOOP};
IN ['a..'z] => switches[char] ← sense;
IN ['A..'Z] => switches[char + ('a-'A)] ← sense;
ENDCASE;
sense ← TRUE;
ENDLOOP;
};
PrintHeader:
PROC [out:
STREAM, nhp: Allocator.NHeaderP] =
TRUSTED {
IO.PutF1[out, "[inZCT: %g", [rope[IF nhp.inZCT THEN "TRUE" ELSE "FALSE"]] ];
IO.PutF1[out, ", maybeOnStack: %g", [rope[IF nhp.maybeOnStack THEN "TRUE" ELSE "FALSE"]] ];
IO.PutF1[out, "\n bsi: %g", IF nhp.blockSizeIndex = Allocator.bsiEscape THEN [rope["bsiEscape"]] ELSE [cardinal[nhp.blockSizeIndex]] ];
IO.PutF1[out, ", f: %g", [rope[IF nhp.f THEN "TRUE" ELSE "FALSE"]] ];
IF nhp.f
THEN {
td: RTTypesBasicPrivate.PTypeDesc ← RTTypesBasicPrivate.MapTiTd[nhp.type];
IF td.numberPackageRefs > nhp.refCount
THEN {
Remember this guy!
IO.PutF1[out, " (prc: %g)", [integer[td.numberPackageRefs]] ];
};
};
IO.PutF1[out, ", refCount: %g", [cardinal[nhp.refCount]] ];
IF nhp.rcOverflowed THEN IO.PutRope[out, " (overflowed)" ];
IO.PutF1[out, "\n typePad: %g", [cardinal[nhp.typePad]] ];
IO.PutF1[out, ", type: %g (", [cardinal[nhp.type]] ];
PrintType[out, nhp.type];
IO.PutRope[out, ")]" ];
};
InfoProc:
TYPE =
UNSAFE
PROC [type: SafeStorage.Type, size:
INT, object: Allocator.HeaderP, objectHP: Allocator.NHeaderP]
RETURNS [continue:
BOOLEAN ←
TRUE];
EnumerateCollectableStorage:
PROC [callBack: InfoProc ←
NIL] =
TRUSTED {
This procedure enumerates all of the objects in the heap. All interesting monitor locks are held while doing this. Each object is verified as it is examined. An optional callback procedure can examine each object as well.
haveAllocatorLocked:
PROC =
TRUSTED {
haveRCOvLocked:
PROC =
TRUSTED {
haveZCTLocked:
PROC =
TRUSTED {
qx: Allocator.QuantumIndex ← FIRST[Allocator.QuantumIndex];
DO
hp: Allocator.HeaderP ← NIL;
blockSize: LONG CARDINAL ← 0;
Scan for the next start of a quantum run
WHILE
NOT AllocatorOps.quantumMap[qx]
DO
IF qx = LAST[Allocator.QuantumIndex] THEN RETURN;
qx ← qx + 1;
ENDLOOP;
hp ← Basics.DoubleShiftLeft[[lc[qx]], Allocator.logPagesPerQuantum+VM.logWordsPerPage].lp;
DO
start: Allocator.QuantumIndex ← qx;
nhp: Allocator.NHeaderP ← LOOPHOLE[hp];
IF LOOPHOLE[hp, LONG CARDINAL] >= Allocator.LastAddress THEN RETURN;
IF nhp.blockSizeIndex = Allocator.bsiEscape
THEN {
ehp: Allocator.EHeaderP ← LOOPHOLE[hp];
nhp ← nhp + (SIZE[Allocator.ExtendedHeader] - SIZE[Allocator.NormalHeader]);
SELECT ehp.sizeTag
FROM
words => blockSize ← ehp.extendedSize;
pages => blockSize ← VM.WordsForPages[ehp.extendedSize];
ENDCASE => {Crash[nhp]; RETURN};
}
ELSE {
blockSize ← AllocatorOps.bsiToSize[nhp.blockSizeIndex];
};
IF blockSize <= 0 THEN {Crash[nhp]; RETURN};
IF blockSize >= Allocator.LastAddress THEN {Crash[nhp]; RETURN};
IF Basics.LowHalf[blockSize] MOD 2 = 1 THEN {Crash[nhp]; RETURN};
IF callBack #
NIL
THEN {
There is a user routine to call. The user can force a crash (soft or hard) by setting foundNhp.
IF
NOT callBack[nhp.type, blockSize, hp, nhp
! RuntimeError.UNCAUGHT => Crash[nhp]
] THEN RETURN;
IF foundNhp # NIL THEN {Crash[nhp]; RETURN};
};
hp ← hp + blockSize;
qx ← AllocatorOps.AddressToQuantumIndex[LOOPHOLE[hp]];
FOR qq: Allocator.QuantumIndex
IN (start..qx)
DO
IF NOT AllocatorOps.quantumMap[qq] THEN {Crash[nhp]; RETURN};
ENDLOOP;
IF qx = LAST[Allocator.QuantumIndex] THEN RETURN;
IF NOT AllocatorOps.quantumMap[qx] THEN EXIT;
ENDLOOP;
ENDLOOP;
};
ZCT.EnterAndCallBack[haveZCTLocked];
};
ZCT.EnterRCOvAndCallBack[haveRCOvLocked];
};
AllocatorOps.EnterAndCallBack[haveAllocatorLocked];
IF foundNhp # NIL THEN ERROR CrashError[foundNhp];
};
foundNhp: Allocator.NHeaderP ← NIL;
worldSwap: BOOL ← TRUE;
Crash:
PROC [nhp: Allocator.NHeaderP] =
TRUSTED {
foundNhp ← nhp;
IF worldSwap
THEN DebuggerSwap.CallDebugger["Kosher it's not!"L]
ELSE ERROR CrashError[nhp];
};
CrashError:
ERROR [nhp: Allocator.NHeaderP] =
CODE;