YggBuffManImpl.mesa
Copyright Ó 1988, 1989 by Xerox Corporation. All rights reserved.
Bob Hagmann July 6, 1989 9:28:11 am PDT
The buffer manager.
DIRECTORY
Basics USING [Comparison],
BasicTime USING [Now],
Camelot USING [DSLogOldValueNewValue, DSPinObject, DSQPreflush, DSQZeroFill, ListOfSegmentDesc, optrT, segmentDescT, segmentIdT, tidT],
ConstArith USING [Add, Const, DivMod, ToCard],
Mach USING [kernReturnT, ListOfPorts, pagingObjectT, portT, taskSelf, vmAddressT, vmAllocateWithPager, vmDeallocate],
PBasics USING [ByteBlt, bytesPerWord, Move, Word],
Process USING [Detach, MsecToTicks, Pause, Ticks],
RedBlackTree USING [Compare, Create, Delete, GetKey, Insert, Lookup, LookupNextSmaller, LookupProc, Size, Table, UserData],
SafeStorage USING [CantEstablishFinalization, EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, NewFQ],
YggBuffMan USING [ Chunk, ReleaseState, VMPageSet],
YggCamelotSegment USING [SegPort],
YggDIDMap USING [Run, RunList],
YggFile USING [Locate],
YggFileInternal USING [InitializeFile],
YggEnvironment USING [dsPort, PageRun],
YggInternal USING[FileHandle],
YggTransaction USING[IsTopLevel];
YggBuffManImpl: CEDAR MONITOR
IMPORTS BasicTime, Camelot, ConstArith, PBasics, Process, Mach, RedBlackTree, SafeStorage, YggFile, YggFileInternal, YggEnvironment, YggTransaction
EXPORTS YggBuffMan, YggCamelotSegment
SHARES YggBuffMan, YggBuffManPrivate
~ BEGIN
Data types and variables
TopLevelTransactionOnly: PUBLIC ERROR = CODE;
maxChunkSize: INT ← 8;
wordsPerPage: INT ← 1024;
bytesPerPage: INT ← wordsPerPage * PBasics.bytesPerWord;
unitsPerPage: INT ← wordsPerPage * UNITS[PBasics.Word];
Chunk: TYPE = YggBuffMan.Chunk;
intervalToChunkMap: RedBlackTree.Table;
desiredNumberOfChunks: INT ← 500;
lruListTop: REF Chunk ← NIL;
lruListTail: REF Chunk ← NIL;
myCondition: CONDITION;
MyQueue: SafeStorage.FinalizationQueue = SafeStorage.NewFQ[length: 200];
SegmentList: PUBLIC LIST OF YggCamelotSegment.SegPort ← NIL;
Exported procedures for buffer management
ReadPages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, pageRun:
YggEnvironment.PageRun, zeroFill: BOOLFALSE] RETURNS [vMPageSet: YggBuffMan.VMPageSet ← NIL] ~ {
errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
The first data page of the file is logical page number 0. Returns a VMPageSet containing the pages [pageRun.firstPage..pageRun.firstPage + n) of the file, where n <= pageRun.count. The choice of n is entirely up to FilePageMgr. vMPageSet.pages points to the data for pageRun.firstPage, and vMPageSet.pageRun indicates what was returned. The caller has read/write access to these pages. Increments the use count of the Chunk returned in the VMPageSet.
addVMPageItem: PROC [refChunk: REF Chunk] ~ {
firstPage: CARD;
lastPage: CARD;
runPages: INT;
offsetPages: INT;
buffer: LONG POINTER;
pageRun: YggDIDMap.Run;
firstPage ← MIN[scratchChunk.run.segmentPage, refChunk.run.segmentPage];
lastPage ← MIN[scratchChunk.run.segmentPage + scratchChunk.run.pages, refChunk.run.segmentPage + refChunk.run.pages] - 1;
runPages ← lastPage-firstPage+1;
offsetPages ← firstPage - refChunk.run.segmentPage;
IF offsetPages = 0 THEN buffer ← refChunk.startAddress
ELSE buffer ← refChunk.startAddress + offsetPages * bytesPerPage;
IF firstPage = refChunk.run.segmentPage AND lastPage = refChunk.run.segmentPage + CARD[refChunk.run.pages] - 1 THEN {
pageRun ← refChunk.run
}
ELSE {
pageRun ← [scratchChunk.run.segmentId, firstPage, scratchChunk.run.firstPage+offsetPages, runPages, FALSE];
};
IF vMPageSetTail = NIL THEN {
vMPageSet ← LIST[[buffer, pageRun, refChunk]];
vMPageSetTail ← vMPageSet;
}
ELSE {
vMPageSetTail.rest ← CONS[[buffer, pageRun, refChunk], NIL];
};
scratchChunk.run.segmentPage ← scratchChunk.run.segmentPage + runPages;
scratchChunk.run.pages ← scratchChunk.run.pages - runPages;
};
scratchChunk: REF Chunk;
vMPageSetTail: YggBuffMan.VMPageSet ← NIL;
runListFromLocate: YggDIDMap.RunList;
runListFromLocate ← YggFile.Locate[file: fileHandle, tid: tid, from: [pageRun.firstPage], nPages: pageRun.count];
scratchChunk ← getScratchRefChunk[];
FOR rl: YggDIDMap.RunList ← runListFromLocate, rl.rest UNTIL rl = NIL DO
data: RedBlackTree.UserData;
scratchChunk.run ← rl.first;
WHILE scratchChunk.run.pages > 0 DO
insureChunkValid: ENTRY PROC [chunk: REF Chunk] ~ {
insure that a concurrent chunk creation finishs before it is used
WHILE ~chunk.valid DO WAIT myCondition; ENDLOOP;
};
data ← LookupFirst[intervalToChunkMap, scratchChunk];
IF data = NIL THEN {
newChunk: REF Chunk;
newChunk ← mapInRun[scratchChunk.run.segmentId, scratchChunk.run.segmentPage, scratchChunk.run.pages, scratchChunk.run.firstPage, TRUE, zeroFill];
IF newChunk = NIL THEN LOOP -- try again should find a chunk
ELSE insureChunkValid[newChunk];
markInterestInChunk[newChunk];
newChunk.file ← fileHandle;
addVMPageItem[refChunk: newChunk];
}
ELSE {
grabChunk: ENTRY PROC RETURNS [tryAgain: BOOLFALSE] ~ {
IF firstRefChunk.startAddress = NIL THEN RETURN[TRUE];
IF firstRefChunk.lruListPrev # NIL OR firstRefChunk.lruListNext # NIL OR lruListTop = firstRefChunk THEN {
On lrulist. Remove it and use it.
IF firstRefChunk.lruListPrev = NIL THEN {
lruListTop ← firstRefChunk.lruListNext
}
ELSE {
firstRefChunk.lruListPrev.lruListNext ← firstRefChunk.lruListNext;
};
IF lruListTop # NIL THEN lruListTop.lruListPrev ← NIL;
IF firstRefChunk.lruListNext = NIL THEN {
lruListTail ← firstRefChunk.lruListPrev
}
ELSE firstRefChunk.lruListNext.lruListPrev ← firstRefChunk.lruListPrev;
IF lruListTail # NIL THEN lruListTail.lruListNext ← NIL;
};
firstRefChunk.useCount ← firstRefChunk.useCount + 1;
firstRefChunk.lastTouchTime ← BasicTime.Now[];
firstRefChunk.lruListPrev ← NIL;
firstRefChunk.lruListNext ← NIL;
};
firstRefChunk: REF Chunk ← NARROW[data];
IF firstRefChunk.run.segmentPage >= scratchChunk.run.segmentPage AND firstRefChunk.run.segmentPage < scratchChunk.run.segmentPage + CARD[scratchChunk.run.pages] THEN {
chunk contains first page needed
IF grabChunk[] THEN LOOP; -- LOOP if chunk no longer active or on lru list => it won't be found next time through.
insureChunkValid[firstRefChunk];
addVMPageItem[firstRefChunk];
}
ELSE {
some chunks overlap but chunk does not contain first page needed
newChunk: REF Chunk;
newChunk ← mapInRun[scratchChunk.run.segmentId, scratchChunk.run.segmentPage, firstRefChunk.run.segmentPage - scratchChunk.run.segmentPage, firstRefChunk.run.firstPage, TRUE, zeroFill];
IF newChunk = NIL THEN LOOP -- try again should find a chunk
ELSE insureChunkValid[newChunk];
markInterestInChunk[newChunk];
newChunk.file ← fileHandle;
addVMPageItem[refChunk: newChunk];
};
};
ENDLOOP;
ENDLOOP;
returnScratchRefChunk[scratchChunk];
};
ReadAheadPages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, pageRun:
YggEnvironment.PageRun] ~ {
errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, VolumeWentOffline.
Semantically identical to ReadPages, except that it notifies the file page manager that the indicated pages are likely to be read soon rather than waiting for them now and for its handling of PageRunExtendsPastEof.
};
UsePages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, pageRun: YggEnvironment.PageRun] RETURNS [vMPageSet: YggBuffMan.VMPageSet] ~ {
errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
Semantically identical to ReadPages, except that the contents of the pages given by the PageRun are undefined; the implementation may therefore avoid actually reading the pages.
vMPageSet ← ReadPages[fileHandle: fileHandle, tid: tid, pageRun:
pageRun, zeroFill: TRUE];
};
ShareVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet] ~ {
errors defined in this interface: none.
Increments the use count of the Chunk in the VMPageSet.
};
WritePages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, to: INT, nPages: INT, from: LONG POINTER] ~ {
Top level transactions only.
nowFrom: LONG POINTER ← from;
nowTo: INT ← to;
nPagesLeft: INT ← nPages;
vMPageSet: YggBuffMan.VMPageSet ← NIL;
IF ~YggTransaction.IsTopLevel[tid] THEN ERROR TopLevelTransactionOnly;
WHILE nPagesLeft > 0 DO
didSomething: BOOLFALSE;
vMPageSet ← ReadPages[fileHandle: fileHandle, tid: tid, pageRun:
[firstPage: nowTo, count: nPagesLeft]];
FOR vmps: YggBuffMan.VMPageSet ← vMPageSet, vmps.rest UNTIL vmps = NIL DO
IF nowTo >= vmps.first.pageRun.firstPage AND nowTo <= vmps.first.pageRun.firstPage + vmps.first.pageRun.pages - 1 THEN {
kernCode: Mach.kernReturnT;
startOffset: INT ← nowTo - vmps.first.pageRun.firstPage;
pageCount: INTMIN[vmps.first.pageRun.pages - startOffset, nPagesLeft];
wordCount: INT ← pageCount * wordsPerPage;
byteCount: INT ← pageCount * bytesPerPage;
optr: Camelot.optrT;
IF pageCount <= 0 THEN ERROR;
didSomething ← TRUE;
optr ← [segmentId: vmps.first.pageRun.segmentId, highOffset: 0, lowOffset: (vmps.first.pageRun.segmentPage+startOffset)* bytesPerPage];
kernCode ← Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, size: byteCount, raiseSignal: TRUE];
kernCode ← Camelot.DSLogOldValueNewValue [dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, oldValue: LOOPHOLE[vmps.first.buffer + startOffset*unitsPerPage], oldValueCnt: byteCount, newValue: LOOPHOLE[nowFrom], newValueCnt: byteCount, raiseSignal: TRUE];
TRUSTED {PBasics.Move[dst: vmps.first.buffer + startOffset*unitsPerPage, src: nowFrom, nWords: wordCount];};
nPagesLeft ← nPagesLeft - pageCount;
nowFrom ← nowFrom + unitsPerPage;
nowTo ← nowTo + pageCount;
}
ELSE LOOP;
ENDLOOP;
vMPageSet ← NIL;
IF ~didSomething THEN ERROR;
ENDLOOP;
};
WriteBytes: PUBLIC PROC [optr: Camelot.optrT, value: LONG POINTER, valueCnt: CARD] ~ {
Back door for recovery.
This is called whenever a SR←RestoreObject or SR←RestoreBatch message is received (multiple calls for SR←RestoreBatch). These occur at server startup as well as during normal operations when a transaction aborts.
tid: Camelot.tidT; -- null tid
newChunk: REF Chunk;
firstPage: CARD;
lastPage: CARD;
pageOffset: CARD;
[page: firstPage, pageOffset: pageOffset] ← MapOptrToPage[optr.highOffset, optr.lowOffset, 0];
[page: lastPage] ← MapOptrToPage[optr.highOffset, optr.lowOffset, valueCnt-1];
[] ← Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, size: valueCnt, raiseSignal: TRUE];
newChunkmapInRun[segmentId: optr.segmentId, segmentPage: firstPage, pages: lastPage - firstPage + 1, firstPage: 0, insertChunkInTable: FALSE];
TRUSTED {[] ← PBasics.ByteBlt[to: [blockPointer: newChunk.startAddress, startIndex: pageOffset, stopIndexPlusOne: valueCnt+pageOffset], from: [blockPointer: value, startIndex: 0, stopIndexPlusOne: valueCnt]];};
UnmapRun[newChunk];
newChunk ← NIL;
};
ReleaseVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet, releaseState: YggBuffMan.ReleaseState, keep: BOOLEAN] ~ {
errors defined in this interface: none.
Indicates that the client is through with the given VMPageSet (decrements use count.) keep is a hint that the FilePageManager should try to keep these pages in its cache, as the client expects them to be reused shortly. The ReleaseState hint means: clean: client has not dirtied this page; writeBatched: will cause IO for some likely disk-contiguous pages to be bunched together; writeIndividual: the client is probably not dealing with disk-contiguous pages. The implementation is optimized for "normal" clients, not mixed modes on a file, etc. Other types of client usage and incorrect hints, such as specifying clean for a dirty page, will not result in data being lost, but are likely to degrade performance. We expect: sequential access clients to release pages with keep = FALSE and (clean or writeBatchedNoWait), random access clients to release pages with keep = TRUE and (clean or writeIndividualNoWait), the log to release pages with various options.
I don't think that this procedure has to do anything. The chunk will
};
ForceOutVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet] ~ {
errors defined in this interface: none.
Returns when all the dirty pages in the VMPageSet have been written to the disk. Does not alter use count.
FOR nowPage: YggBuffMan.VMPageSet ← vMPageSet, nowPage.rest UNTIL nowPage = NIL DO
Camelot.DSQPreflush[
dsPort: YggEnvironment.dsPort,
optr: [segmentId: nowPage.first.pageRun.segmentId, highOffset: 0, lowOffset: nowPage.first.pageRun.segmentPage*bytesPerPage],
sizeInBytes: nowPage.first.pageRun.pages*bytesPerPage];
ENDLOOP;
};
ForceOutFile: PUBLIC PROC [fileHandle: YggInternal.FileHandle] ~ {
errors defined in this interface: NoSuchFile, NoSuchVolume, VolumeWentOffline.
Returns when all the dirty pages in this file have been written to the disk.
};
ForceOutEverything: PUBLIC PROC ~ {
errors defined in this interface: NoSuchVolume, VolumeWentOffline.
Returns when all the dirty pages under control of the file page manager have been written to the disk.
};
InitializeFilePageMgr: PUBLIC PROC [seqDescList: Camelot.ListOfSegmentDesc, seqPortList: Mach.ListOfPorts, firstPass: BOOL] RETURNS [firstTime : BOOL ] ~ {
errors defined in this interface: none.
sdl: Camelot.ListOfSegmentDesc ← seqDescList;
spl: Mach.ListOfPorts ← seqPortList;
lastSL: LIST OF YggCamelotSegment.SegPort ← NIL;
makeListItem: PROC RETURNS [r: LIST OF YggCamelotSegment.SegPort] ~ {
r ← LIST[[sdl.first, spl.first]];
sdl ← sdl.rest;
spl ← spl.rest;
};
IF firstPass THEN {
SegmentList ← makeListItem[];
lastSL ← SegmentList;
WHILE sdl # NIL DO
lastSL.rest ← makeListItem[];
lastSL ← lastSL.rest;
ENDLOOP;
IF sdl # NIL OR spl # NIL THEN ERROR;
}
ELSE {
sList: PUBLIC LIST OF YggCamelotSegment.SegPort ← NIL;
sList ← makeListItem[];
lastSL ← sList;
WHILE sdl # NIL DO
lastSL.rest ← makeListItem[];
lastSL ← lastSL.rest;
ENDLOOP;
IF sdl # NIL OR spl # NIL THEN ERROR;
firstTime ← YggFileInternal.InitializeFile[sList];
};
};
FindSegmentPager: PROC [segmentId: Camelot.segmentIdT] RETURNS [pagingObject: Mach.pagingObjectT] ~ {
lookup segment in list
FOR lastSL: LIST OF YggCamelotSegment.SegPort ← SegmentList, lastSL.rest UNTIL lastSL = NIL DO
IF lastSL.first.segment.segmentId = segmentId THEN {
RETURN[lastSL.first.port];
};
ENDLOOP;
ERROR;
};
RestoreCacheToCleanState: PUBLIC PROC ~ {
errors defined in this interface: none.
for debugging. Call ForceOutEverything first.
};
Internal procedures
savedScratchRefChunk: REF Chunk;
getScratchRefChunk: ENTRY PROC RETURNS [scratchRefChunk: REF Chunk] ~ {
IF savedScratchRefChunk # NIL THEN {
scratchRefChunk ← savedScratchRefChunk;
savedScratchRefChunk ← NIL;
}
ELSE {
scratchRefChunk ← NEW[Chunk];
};
};
returnScratchRefChunk: ENTRY PROC [scratchRefChunk: REF Chunk] ~ {
savedScratchRefChunk ← scratchRefChunk;
};
mapInRun: PROC [segmentId: Camelot.segmentIdT, segmentPage: CARD, pages: INT, firstPage: INT, insertChunkInTable: BOOLTRUE, zeroFill: BOOLFALSE] RETURNS [newChunk: REF Chunk] ~ {
Map in a run from a segment. Normally, insertChunkInTable is TRUE and the Chunk is entered into the table for mapped chunks. During recovery, mapInRun is called with insertChunkInTable as FALSE; the chunk is not entered into the table and UnmapRun must be called by the client when the run is no longer needed.
insertChunkIfStillPossible: ENTRY PROC RETURNS [notPossible: BOOLFALSE] ~ {
data: RedBlackTree.UserData;
data ← RedBlackTree.Lookup[intervalToChunkMap, newChunk];
IF data = NIL THEN {
RedBlackTree.Insert[intervalToChunkMap, newChunk, newChunk];
}
ELSE {
some other thread has concurrently inserted into the tree. Try again.
newChunk ← NIL;
RETURN [TRUE];
};
};
setValid: ENTRY PROC ~ {
newChunk.valid ← TRUE;
BROADCAST myCondition;
};
mappedAddress: Mach.vmAddressT;
kernCode: Mach.kernReturnT;
pagingObject: Mach.pagingObjectT;
newChunk ← NEW[Chunk ← [run: [segmentId: segmentId, segmentPage: segmentPage, firstPage: 0, pages: MIN[pages, maxChunkSize], leader: FALSE]]];
IF insertChunkInTable AND insertChunkIfStillPossible[] THEN RETURN;
pagingObject ← FindSegmentPager[segmentId];
IF zeroFill THEN Camelot.DSQZeroFill[
dsPort: YggEnvironment.dsPort,
optr: [segmentId: segmentId, highOffset: 0, lowOffset: segmentPage*bytesPerPage],
sizeInBytes: newChunk.run.pages*bytesPerPage];
[mappedAddress: mappedAddress, kernCode: kernCode] ← Mach.vmAllocateWithPager[targetTask: Mach.taskSelf[], address: 0, size: newChunk.run.pages*bytesPerPage, anywhere: TRUE, pagingObject: pagingObject, offset: newChunk.run.segmentPage*bytesPerPage, raiseSignal: TRUE];
newChunk.startAddress ← LOOPHOLE[mappedAddress];
newChunk.run.firstPage ← firstPage;
setValid[];
IF insertChunkInTable THEN SafeStorage.EnableFinalization[newChunk];
};
UnmapRun: PROC [chunk: REF Chunk] ~ {
kernCode: Mach.kernReturnT;
kernCode ← Mach.vmDeallocate[targetTask: Mach.taskSelf[], address: LOOPHOLE[chunk.startAddress], size: chunk.run.pages*bytesPerPage, raiseSignal: TRUE];
};
markInterestInChunk: ENTRY PROC [chunk: REF Chunk] ~ {
chunk.useCount ← chunk.useCount + 1;
chunk.lastTouchTime ← BasicTime.Now[];
};
Internal red black procs
A Red Black tree is used to store and find chunks. The tree is indexed by the segment and page run of the chunk. No chunks intersect. To find an interval, it is looked up in the tree. Any intersecting chunk is "equal" to the interval. The tree is then traversed backwards to find the first intersecting interval.
LookupFirst: ENTRY RedBlackTree.LookupProc = {
PROC [self: Table, lookupKey: Key] RETURNS [data: UserData];
data ← RedBlackTree.Lookup[self, lookupKey];
IF data # NIL THEN DO
prevData: RedBlackTree.UserData;
prevData ← RedBlackTree.LookupNextSmaller[self, data];
IF prevData = NIL THEN EXIT;
IF CompareProc[lookupKey, prevData] # equal THEN EXIT;
data ← prevData;
ENDLOOP;
};
GetKeyProc: RedBlackTree.GetKey = {
PROC [data: UserData] RETURNS [Key]
chunk: REF Chunk ← NARROW[data];
RETURN[ chunk ];
};
CompareProc: RedBlackTree.Compare = {
PROC [k: Key, data: UserData] RETURNS [Basics.Comparison]
dataChunk: REF Chunk ← NARROW[data];
keyChunk: REF Chunk ← NARROW[k];
SELECT keyChunk.run.segmentId FROM
> dataChunk.run.segmentId => RETURN [greater];
< dataChunk.run.segmentId => RETURN [less];
ENDCASE => {
SELECT TRUE FROM
keyChunk.run.segmentPage >= dataChunk.run.segmentPage + CARD[dataChunk.run.pages] => RETURN [greater];
keyChunk.run.segmentPage + CARD[keyChunk.run.pages] <= dataChunk.run.segmentPage => RETURN [less];
ENDCASE => RETURN [equal];
};
};
Map optr to pages
bytesPerPage64: ConstArith.Const ← [sign: positive, low: bytesPerPage, high: 0];
MapOptrToPage: PROC [highOffset: CARD16, lowOffset: CARD32, delta: CARD] RETURNS [page: CARD, pageOffset: CARD] = TRUSTED {
base, del, total, page64, pageOffset64: ConstArith.Const;
base ← [sign: positive, low: lowOffset, high: highOffset];
del ← [sign: positive, low: delta, high: 0];
total ← ConstArith.Add[base, del];
[page64, pageOffset64] ← ConstArith.DivMod[total, bytesPerPage64];
RETURN[ConstArith.ToCard[page64], ConstArith.ToCard[pageOffset64]];
};
Finalization
FinalizationProcess: PROC = {
DO
innerFinalizationProcess: ENTRY PROC [] = {
IF lruListTop = NIL THEN {
lruListTop ← chunk;
}
ELSE {
lruListTail.lruListNext ← chunk;
chunk.lruListPrev ← lruListTail;
};
lruListTail ← chunk;
};
chunk: REF Chunk ← NIL;
chunk ← NARROW[SafeStorage.FQNext[MyQueue]];
innerFinalizationProcess[];
chunk ← NIL;
ENDLOOP;
};
Trim lru list
CacheTrimProcess: PROC = {
ticksToWait: Process.Ticks;
ticksToWait ← Process.MsecToTicks[213];
DO
overage: INT ← 0;
Process.Pause[ticksToWait];
overage ← RedBlackTree.Size[intervalToChunkMap] - desiredNumberOfChunks;
IF overage > 0 THEN TrimLruList[overage];
ENDLOOP;
};
NumberOfIntervalsTrimmed: INT ← 0;
LastFewChunksFreed: ARRAY [0..32) OF ChunksFreedItem;
ChunksFreedItem: TYPE = RECORD [
run: YggDIDMap.Run,
startAddress: LONG POINTERNIL
];
NextFreePtr: INT ← 0;
TrimLruList: PROC [entriesToTrim: INT] = {
FOR entNo: INT IN [1..entriesToTrim] DO
grabTopOffOfLRU: ENTRY PROC ~ {
IF lruListTop # NIL THEN {
chunk ← lruListTop;
lruListTop ← lruListTop.lruListNext;
IF lruListTop = NIL THEN lruListTail ← NIL ELSE lruListTop.lruListPrev ← NIL;
chunk.lruListNext ← NIL;
chunk.lruListPrev ← NIL;
address ← chunk.startAddress;
chunk.startAddress ← NIL;
IF RedBlackTree.Delete[intervalToChunkMap, chunk] = NIL THEN ERROR;
kernCode ← Mach.vmDeallocate[targetTask: Mach.taskSelf[], address: LOOPHOLE[address], size: chunk.run.pages*bytesPerPage, raiseSignal: TRUE];
NumberOfIntervalsTrimmed ← NumberOfIntervalsTrimmed + 1;
LastFewChunksFreed[NextFreePtr] ← [chunk.run, chunk.startAddress];
NextFreePtr ← NextFreePtr ← NextFreePtr + 1;
IF NextFreePtr >= 32 THEN NextFreePtr ← 0;
};
};
kernCode: Mach.kernReturnT;
chunk: REF Chunk ← NIL;
address: LONG POINTERNIL;
grabTopOffOfLRU[];
IF chunk = NIL THEN EXIT;
ENDLOOP;
};
Initialization
Init: PROC ~ {
intervalToChunkMap ← RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc];
SafeStorage.EstablishFinalization[type: CODE[Chunk], npr: 1, fq: MyQueue ! SafeStorage.CantEstablishFinalization => CONTINUE;];
TRUSTED {Process.Detach[FORK FinalizationProcess[] ];};
TRUSTED {Process.Detach[FORK CacheTrimProcess[] ];};
};
Init[];
END.