YggFilePageMgrMainImpl.mesa
Copyright © 1985, 1988 by Xerox Corporation. All rights reserved.
Last edited by
Kolling on February 16, 1984 12:02:55 pm PST
MBrown on January 31, 1984 9:01:07 pm PST
Carl Hauser, February 24, 1987 4:46:43 pm PST
Bob Hagmann May 13, 1988 5:04:31 pm PDT
DIRECTORY
Basics USING[CompareInt, CardMod],
BasicTime USING[GetClockPulses, Pulses, PulsesToMicroseconds],
YggHostFS USING[Delete, Error, HostFile, Info, PageCount, RC, SetSize],
YggDummyProcess USING[Detach, GetCurrent, PauseMsec],
RedBlackTree,
Rope,
VM USING[AddressForPageNumber, Allocate, Interval, PageCount, PageNumber],
YggDummyVM USING [MakeUnchanged, Pin, State],
YggEnvironment USING [DID, PageCount, PageNumber, PageRun, VolOrVolGroupID],
YggDIDMap USING[GetNext, Document, VerifyFilePageMgrHandle],
YggFile USING[FileHandleRep],
YggFileStream USING[CreateFileInDirectory],
YggFilePageMgr
USING[DirtyNoWaitReleaseState, DirtyWaitReleaseState, ReleaseState,
VMPageSet],
YggFilePageMgrIO USING[DoIO, GetNext, IORequest, IOType, LogError, RegisterRequest],
YggFilePageMgrLru USING[CheckCacheInCleanState, GetOtherChunkFromLruList, GetOurChunkFromLruList, InitializeLruLists, LruListPlace, PutMappedChunkOnLruList, PutUnmappedChunkOnLruList, RelinkChunkAsLruOnLruList, SweepItem, UsingTooMuchOfCache, WaitToSweep],
YggFilePageMgrPrivateChunk USING[Chunk, ChunkFilePageCount, ChunkVMPageCount, ChunkType, ClientChunkType, ListChunkType, RefChunk],
YggFilePageMgrPrivateFile USING[FPMFileObject, VolumeState],
YggInternal
USING[Document, FileHandle];
YggFilePageMgrMainImpl:
CEDAR
MONITOR
LOCKS fpmFileHandle USING fpmFileHandle: FPMFileHandle
IMPORTS Basics, RedBlackTree, YggHostFS, YggDIDMap, YggFileStream, YggFilePageMgrIO, YggFilePageMgrLru, YggDummyProcess, YggDummyVM, VM
EXPORTS YggInternal, YggFilePageMgr, YggFilePageMgrPrivateChunk
SHARES YggFilePageMgr =
BEGIN
VolOrVolGroupID: TYPE = YggEnvironment.VolOrVolGroupID;
FPMFileHandle: TYPE = REF FPMFileObject;
FPMFileObject: PUBLIC TYPE = YggFilePageMgrPrivateFile.FPMFileObject;
FileHandle: TYPE ~ PUBLIC YggInternal.FileHandle;
FileHandleRep: TYPE ~ PUBLIC YggFile.FileHandleRep;
RefChunk: TYPE = REF Chunk;
Chunk: PUBLIC TYPE = YggFilePageMgrPrivateChunk.Chunk;
PageKey: TYPE = REF YggEnvironment.PageNumber;
Monitors can only be nested in this order:
FPMFileObject monitor
RedBlackTreeImpl
YggFilePageMgrLruImpl
YggDIDMap's FileObject monitor
No process ever holds more than one FPMFileObject monitor.
InsufficientSpaceOnVolume: PUBLIC -- ABSTRACTION -- ERROR = CODE;
NoSuchFile: PUBLIC -- CALLING -- ERROR = CODE;
NoSuchVolume: PUBLIC -- CALLING -- ERROR = CODE;
PageRunArgIllegal: PUBLIC -- CALLING -- ERROR = CODE;
PageRunExtendsPastEof: PUBLIC -- CALLING -- ERROR = CODE;
SizeArgIllegal: PUBLIC -- CALLING -- ERROR = CODE;
VolumeTooFragmented: PUBLIC -- ABSTRACTION -- ERROR = CODE;
VolumeWentOffline: PUBLIC -- ABSTRACTION -- ERROR = CODE;
VolumeError:
ARRAY YggFilePageMgrPrivateFile.VolumeState
OF
ERROR ← [online: InternalFilePageMgrLogicError,
wentOffline: VolumeWentOffline, nonExist: NoSuchVolume];
CurrentEpoch: NAT ← 0;
ExpectedSecondsToNextCheckpoint: NAT ← 300;
SweeperControl: ARRAY ListChunkType OF SweeperControlRec;
SweeperControlRec:
TYPE =
RECORD[
newCheckpoint: BOOL ← TRUE
];
InternalFilePageMgrLogicError: PUBLIC -- PROGRAMMING -- ERROR = CODE;
Okay: ERROR = CODE; -- for our own use.
ListChunkType: TYPE = YggFilePageMgrPrivateChunk.ListChunkType;
ReadDone: CONDITION;
WriteDone: CONDITION;
ReadPages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun]
RETURNS [vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
vMPageSet ← BasicGetPages[fileHandle, pageRun, read, normal].vMPageSet;
ReadLogPages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun]
RETURNS [vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
vMPageSet ← BasicGetPages[fileHandle, pageRun, read, log].vMPageSet;
UsePages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun]
RETURNS [vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
vMPageSet ← BasicGetPages[fileHandle, pageRun, use, normal].vMPageSet;
UseLogPages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun]
RETURNS [vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
vMPageSet ← BasicGetPages[fileHandle, pageRun, use, log].vMPageSet;
Notifies the file page manager that the indicated pages are likely to be read soon.
ReadAheadPages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, VolumeWentOffline.
BasicReadAhead[fileHandle, pageRun, normal];
END;
ReadAheadLogPages:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun:
YggEnvironment.PageRun] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, VolumeWentOffline.
BasicReadAhead[fileHandle, pageRun, log];
Bumps the share count of the Chunk in the VMPageSet.
ShareVMPageSet:
PUBLIC
PROCEDURE[vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk ← vMPageSet.refChunk; -- keep the compiler happy.
chunk can't change mapping since useCount >= 1.
MonitoredShareVMPageSet[GetFilePageMgrHandle[refChunk.doc, TRUE], refChunk];
END;
ReleaseVMPageSet:
PUBLIC
PROCEDURE[vMPageSet: YggFilePageMgr.VMPageSet, releaseState:
YggFilePageMgr.ReleaseState, keep: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk ← vMPageSet.refChunk;
fpmFileHandle: FPMFileHandle ← GetFilePageMgrHandle[refChunk.doc, TRUE];
writeNeeded: BOOLEAN ← FALSE;
IF releaseState
IN YggFilePageMgr.DirtyWaitReleaseState
THEN
BEGIN
IF (writeNeeded ← MonitoredWaitForWriteToCompleteThenMaybeSetWIP[
fpmFileHandle, refChunk]) THEN CleanAndWriteChunk[fpmFileHandle,
refChunk];
END;
MonitoredMainReleaseVMPageSet[fpmFileHandle, refChunk, releaseState, keep, writeNeeded];
ForceOutVMPageSet:
PUBLIC
PROCEDURE[vMPageSet: YggFilePageMgr.VMPageSet] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk ← vMPageSet.refChunk; -- keep compiler happy.
fpmFileHandle: FPMFileHandle ← GetFilePageMgrHandle[refChunk.doc,
TRUE];
IF MonitoredWaitForWriteToCompleteThenMaybeSetWIP[fpmFileHandle, refChunk]
THEN
BEGIN
CleanAndWriteChunk[fpmFileHandle, refChunk];
MonitoredSetChunkValidAfterIO[fpmFileHandle, refChunk, writeCompleted];
END;
END;
ForceOutFile: PUBLIC PROCEDURE[fileHandle: YggInternal.FileHandle] =
BEGIN -- non system fatal errors: NoSuchFile, NoSuchVolume, VolumeWentOffline.
fpmFileHandle: FPMFileHandle ← GetFilePageMgrHandle[doc, FALSE];
errors: ERROR ← Okay;
listOfValidAndDirty: LIST OF RefChunk;
listOfWIPAndClean: LIST OF ChunkAndPage;
[errors, listOfValidAndDirty, listOfWIPAndClean] ←
MonitoredForceOutFileSortChunks[fpmFileHandle];
IF errors # Okay THEN ERROR errors;
DoSequentialIO[fpmFileHandle, write, listOfValidAndDirty, 0];
MonitoredForceOutFileSetValidAndWait[fpmFileHandle, listOfValidAndDirty,
listOfWIPAndClean];
END;
CheckPointOccuring:
PUBLIC
PROC [checkPointEpoch:
NAT ← 1, expectedSecondsToNextCheckpoint:
NAT]
RETURNS [oldestEpochWithDirtyUnwrittenPages:
NAT ← 1] = {
CurrentEpoch ← checkPointEpoch;
ExpectedSecondsToNextCheckpoint ← MAX [5, expectedSecondsToNextCheckpoint];
FOR chunkType: ListChunkType
IN ListChunkType
DO
SweeperControl[chunkType].newCheckpoint ← TRUE;
ENDLOOP;
};
ProcessControlList: TYPE = RECORD[s: SEQUENCE size: NAT OF ProcessControlItem];
ProcessControlItem:
TYPE =
RECORD[
processID: PROCESS,
processRunning: BOOL ← FALSE
];
Sweeper:
PUBLIC
PROCEDURE[chunkType: YggFilePageMgrPrivateChunk.ChunkType] = {
needToHurry: BOOLEAN ← FALSE;
hurryInARowCount: INT ← 0;
sweepList: LIST OF YggFilePageMgrLru.SweepItem;
DO
listSize: INT ← 0;
millisecondsBetweenWrites: INT ← 0;
processControlList:
REF ProcessControlList;
Wait for a new list
[needToHurry, sweepList] ← YggFilePageMgrLru.WaitToSweep[needToHurry, CurrentEpoch, chunkType];
hurryInARowCount ← IF needToHurry THEN hurryInARowCount + 1 ELSE 1;
IF needToHurry THEN processControlList ← NEW[ProcessControlList[hurryInARowCount]];
SweeperControl[chunkType].newCheckpoint ← FALSE;
FOR sl:
LIST
OF YggFilePageMgrLru.SweepItem ← sweepList, sl.rest
UNTIL sl =
NIL
DO
listSize ← listSize + 1;
ENDLOOP;
Compute how long we expect it to take between writes
millisecondsBetweenWrites ← (ExpectedSecondsToNextCheckpoint*1000)/listSize;
UNTIL sweepList =
NIL
OR SweeperControl[chunkType].newCheckpoint
DO
fpmFileHandle: FPMFileHandle ← NIL;
listOfValidAndDirty: LIST OF RefChunk;
fpmFileHandle ← GetFilePageMgrHandle[sweepList.first.doc,
FALSE];
pick off all the entries for one file that are valid and dirty into listOfValidAndDirty
[listOfValidAndDirty, sweepList] ← MonitoredSweeperSortChunks[fpmFileHandle, sweepList.first.doc, sweepList];
IF needToHurry
THEN {
Use multiple processes without any delays to get lots of writes done
DO
processControlIndex: INT ← 0;
processForked: PROCESS;
FOR processControlIndex
IN [1..hurryInARowCount]
DO
IF processControlList[processControlIndex].processRunning THEN LOOP ELSE EXIT;
REPEAT FINISHED => LOOP;
ENDLOOP;
processControlList[processControlIndex].processRunning ← TRUE;
processForked ← FORK ForkedSweepAFile[processControlList, processControlIndex, fpmFileHandle, listOfValidAndDirty, 0, chunkType, 30];
TRUSTED {YggDummyProcess.Detach[processForked];};
processControlList[processControlIndex].processID ← processForked;
ENDLOOP;
}
ELSE SweepAFile[fpmFileHandle, listOfValidAndDirty, millisecondsBetweenWrites, chunkType, 5];
ENDLOOP;
ENDLOOP;
};
ForkedSweepAFile:
PROC [processControlList:
REF ProcessControlList, processControlIndex:
INT, fpmFileHandle: FPMFileHandle, listOfValidAndDirty:
LIST
OF RefChunk, millisecondsBetweenWrites:
INT, chunkType: YggFilePageMgrPrivateChunk.ChunkType, maxWriteGroupSize:
INT ] = {
SweepAFile[fpmFileHandle, listOfValidAndDirty, millisecondsBetweenWrites, chunkType, maxWriteGroupSize];
processControlList[processControlIndex].processRunning ← FALSE;
};
SweepAFile:
PROC [fpmFileHandle: FPMFileHandle, listOfValidAndDirty:
LIST
OF RefChunk, millisecondsBetweenWrites:
INT, chunkType: YggFilePageMgrPrivateChunk.ChunkType, maxWriteGroupSize:
INT ] = {
DO
sizeInHead: INT ← 0;
restOfListOfValidAndDirty: LIST OF RefChunk;
FOR sl:
LIST
OF RefChunk ← listOfValidAndDirty, sl.rest
UNTIL sl =
NIL
OR SweeperControl[chunkType].newCheckpoint
DO
Pick off maxWriteGroupSize (normally 5) or so entries at the front of the list
sizeInHead ← sizeInHead + 1;
IF sizeInHead >= maxWriteGroupSize
OR sl.rest =
NIL
THEN {
startPulse, endPulse: BasicTime.Pulses;
millisecondsForWrites:
INT ← 100;
Save the rest of the list in restOfListOfValidAndDirty
restOfListOfValidAndDirty ← sl.rest;
sl.rest ←
NIL;
Do all the writes in the (possibly) shortened list. This process is the controller for the writes, but it will use additional IOThread processes in YggFilePageMgrIOImpl (if needed).
startPulse ← BasicTime.GetClockPulses[];
DoSequentialIO[fpmFileHandle, write, listOfValidAndDirty, 0];
MonitoredSetListOfChunksValidAfterIO[fpmFileHandle, listOfValidAndDirty, writeCompleted];
endPulse ← BasicTime.GetClockPulses[];
If we are supposed to delay between writes, then do it now.
millisecondsForWrites ← BasicTime.PulsesToMicroseconds[endPulse-startPulse]/1000;
IF millisecondsForWrites >= 0
AND millisecondsForWrites < 1000
THEN {
millisecondsToDelay: INT ← (sizeInHead * millisecondsBetweenWrites) - millisecondsForWrites;
IF millisecondsToDelay > 0 THEN YggDummyProcess.PauseMsec[millisecondsToDelay];
};
listOfValidAndDirty ← restOfListOfValidAndDirty;
EXIT;
};
ENDLOOP;
ENDLOOP;
};
ForceOutEverything: PUBLIC PROCEDURE =
BEGIN -- non system fatal errors: NoSuchVolume, VolumeWentOffline.
NilProc: PROCEDURE RETURNS[FPMFileHandle] ~ { RETURN[NIL] };
FOR fileHandle: YggInternal.FileHandle ← YggDIDMap.GetNext[NIL], YggDIDMap.GetNext[fileHandle]
UNTIL fileHandle = NIL
DO IF YggDIDMap.VerifyFilePageMgrHandle[fileHandle, NilProc] # NIL THEN ForceOutFile[fileHandle ! NoSuchFile => CONTINUE];
ENDLOOP;
END;
this is a routine used for debugging. On its return, it only guarantees that the cache was clean at some point in its processing. The caller must not be doing things like concurrent maps, etc. Call ForceOutEverything first so no chunks have writeinProgress up.
RestoreCacheToCleanState:
PUBLIC
PROCEDURE =
BEGIN
-- non system fatal errors: none.
FOR doc: YggInternal.Document ← YggDIDMap.GetNext[
NIL], YggDIDMap.GetNext[doc]
UNTIL doc = NIL DO
errors: ERROR ← MonitoredUnmapFile[GetFilePageMgrHandle[doc, FALSE]];
IF ((errors # Okay) AND (errors # NoSuchFile)) THEN ERROR errors;
ENDLOOP;
IF (
NOT YggFilePageMgrLru.CheckCacheInCleanState[])
THEN ERROR InternalFilePageMgrLogicError; -- maybe not, maybe the client did a number on us.
END;
MyPageRun: TYPE = RECORD[firstPage: YggEnvironment.PageNumber, count: NAT, chunkStartFilePage: YggEnvironment.PageNumber];
ReadReadAheadOrUse: TYPE = {read, readAhead, use};
the parameters ReadReadAheadOrUse and ChunkType are used by BasicGetPages as follows:
ChunkType:
(a) each chunk type may have a different size, which affects the conversion of the client's page number, etc. to the chunk page number, etc.
(b) tells which lru list to go to to get a new chunk.
ReadReadAheadOrUse:
(a) read may do a read, use doesn't have to read unless partial chunk, readAhead's caller will do the reads in a bunch.
BasicGetPages:
PROCEDURE[fileHandle: FileHandle, pageRun: YggEnvironment.PageRun,
readReadAheadOrUse: ReadReadAheadOrUse, chunkType: YggFilePageMgrPrivateChunk.ClientChunkType]
RETURNS [vMPageSet: YggFilePageMgr.VMPageSet, nFilePagesToRead: YggEnvironment.PageCount] = -- the peculiar structure is to avoid monitor conflicts: see discussion below in the DO loop.
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline.
fpmFileHandle: FPMFileHandle ← GetFilePageMgrHandleFromFileHandle[fileHandle, FALSE];
myPageRun: MyPageRun;
otherRefChunk: RefChunk ← NIL;
errors: ERROR;
done: BOOLEAN;
IF ((pageRun.firstPage < 0)
OR (pageRun.count = 0))
THEN ERROR PageRunArgIllegal;
myPageRun ← [firstPage: pageRun.firstPage, count: pageRun.count,
chunkStartFilePage: ChunkStartFilePage[pageRun.firstPage, chunkType]];
DO
This is the "peculiar structure" referred to above.
This loop is executed at most twice. It might be better expressed with the structure:
[...] ← MonitoredFindOldOrGiveNewChunk[...]; -- see if its already mapped
IF ~done THEN { -- if not successful
-- get a new chunk
otherRefChunk ← MonitoredOtherGetNewChunk[...];
IF otherRefChunk=NIL THEN ERROR InternalFilePageMgrLogicError;
-- see if its already mapped; if not use the otherRefChunk
[...] ← MonitoredFindOldOrGiveNewChunk[...otherRefChunk...];
};
-- code from the IF done below
The idea is to avoid the call to MonitoredOtherGetNewChunk when the desired chunk is already mapped. This is important since MonitoredOtherGetNewChunk may have to do IO to acquire an empty Chunk. Why, you may ask, does not MonitoredFindOldOrGiveNewChunk acquire a chunk itself when the desired chunk is not already mapped? Ah, Monitor Locks. MonitoredFindOldOrGiveNewChunk locks the object monitor for its first argument. MonitoredOtherGetNewChunk must lock the object monitor for the file currently containing the Chunk it returns. We must therefore not call MonitoredOtherGetNewChunk from MonitoredFindOldOrGiveNewChunk.
[errors, done, nFilePagesToRead, vMPageSet] ← MonitoredFindOldOrGiveNewChunk[fpmFileHandle, fileHandle, otherRefChunk,
myPageRun, readReadAheadOrUse, chunkType];
IF errors # Okay THEN ERROR errors;
IF done
THEN
BEGIN
IF ((nFilePagesToRead # 0)
AND (readReadAheadOrUse # readAhead))
THEN
BEGIN
The vm under the chunk needs to be filled.
refChunk: RefChunk ← vMPageSet.refChunk;
YggFilePageMgrIO.DoIO[read, fpmFileHandle.lowerHandle,
[[refChunk.startFilePageNumber], nFilePagesToRead,
VMAddressForPageNumber[refChunk.startVMPageNumber]]];
Turn off modified bits so we can tell later whether the data in the vm has changed.
MakeAllPagesInChunkClean[refChunk];
Change chunk state so other client processes can see that it is valid.
MonitoredSetChunkValidAfterIO[fpmFileHandle, vMPageSet.refChunk,
readCompleted];
END;
RETURN;
END;
IF otherRefChunk =
NIL
THEN otherRefChunk ← MonitoredOtherGetNewChunk[chunkType]
ELSE ERROR InternalFilePageMgrLogicError;
ENDLOOP;
try to find a Chunk belonging to fpmFileHandle and covering the requested myPageRun. If one is found then it is unique; return information about it. Otherwise, if otherRefChunk is non-NIL set it up to cover the requested myPageRun. Otherwise return with done=FALSE. Increments the useCount of the appropriate Chunk before returning.
MonitoredFindOldOrGiveNewChunk:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
fileHandle: YggInternal.FileHandle, otherRefChunk: RefChunk, myPageRun: MyPageRun,
readReadAheadOrUse: ReadReadAheadOrUse, chunkType: YggFilePageMgrPrivateChunk.ClientChunkType] RETURNS
[errors: ERROR, done: BOOLEAN, nFilePagesToRead: YggEnvironment.PageCount, vMPageSet:
YggFilePageMgr.VMPageSet] = -- values of errors are {NoSuchFile, NoSuchVolume, PageRunExtendsPastEof, VolumeWentOffline}
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
new, wholeChunk: BOOLEAN;
chunkTrueSize: YggEnvironment.PageCount;
[errors, refChunk] ← GetMappedChunk[fpmFileHandle, myPageRun, readReadAheadOrUse];
IF errors # Okay
THEN
BEGIN
Make otherRefChunk available for somebody else
IF otherRefChunk # NIL THEN YggFilePageMgrLru.PutUnmappedChunkOnLruList[otherRefChunk];
RETURN;
END;
new ← (refChunk = NIL);
SELECT
TRUE
FROM
new =>
IF otherRefChunk =
NIL
THEN RETURN[Okay, FALSE, 0, [NIL, [0, 0], NIL]]
ELSE
BEGIN
refChunk ← otherRefChunk;
MapChunk[fpmFileHandle, GetDocumentFromFileHandle[fileHandle],
myPageRun.chunkStartFilePage, refChunk];
END;
NOT new =>
IF otherRefChunk #
NIL
This is what happens when the desired chunk was not found the first time through and then was found the second time. Give back the acquired Chunk.
THEN YggFilePageMgrLru.PutUnmappedChunkOnLruList[otherRefChunk];
ENDCASE;
done ← TRUE;
refChunk.useCount ← refChunk.useCount + 1;
nFilePagesToRead ← 0;
[vMPageSet, wholeChunk, chunkTrueSize] ← SetUpVmPageSet[refChunk,
myPageRun.firstPage, myPageRun.count, fpmFileHandle.fileDataSize];
SELECT
TRUE
FROM
new =>
IF ((readReadAheadOrUse = use)
AND (wholeChunk))
THEN
BEGIN
MakeAllPagesInChunkClean[refChunk];
refChunk.state ← valid;
END
ELSE
BEGIN
refChunk.state ← readInProgress;
nFilePagesToRead ← chunkTrueSize;
END;
NOT new =>
SELECT readReadAheadOrUse
FROM
read, use =>
DO
IF refChunk.state = readInProgress
THEN WAIT ReadDone
ELSE EXIT;
ENDLOOP;
ENDCASE;
ENDCASE;
Finds the Chunk covering myPageRun, if it exists. If it exists and is on an lru list it is taken off the list. returns NIL if the requested chunk doesn't exist.
GetMappedChunk:
INTERNAL
PROCEDURE[fpmFileHandle: FPMFileHandle, myPageRun:
MyPageRun, readReadAheadOrUse: ReadReadAheadOrUse] RETURNS [errors: ERROR,
refChunk: RefChunk] = -- values of errors are {NoSuchFile, NoSuchVolume, PageRunExtendsPastEof, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
IF (NOT fpmFileHandle.exists) THEN RETURN[NoSuchFile, NIL];
IF myPageRun.firstPage + myPageRun.count > fpmFileHandle.fileDataSize
THEN RETURN[PageRunExtendsPastEof, NIL];
IF ((refChunk ← RBTLookup[fpmFileHandle, myPageRun.chunkStartFilePage])
# NIL) AND (refChunk.useCount = 0)
THEN YggFilePageMgrLru.GetOurChunkFromLruList[refChunk, FALSE];
errors ← Okay;
on entry state and pages are already set to undefined; inserts into chunkTable, sets doc and startFilePageNumber.
MapChunk:
INTERNAL
PROCEDURE[fpmFileHandle: FPMFileHandle, doc:
YggInternal.Document, startChunkFilePage: YggEnvironment.PageNumber, refChunk: RefChunk] =
BEGIN
-- non system fatal errors: none.
RBTInsert[fpmFileHandle, refChunk, startChunkFilePage];
refChunk.doc ← doc;
refChunk.startFilePageNumber ← startChunkFilePage;
fpmFileHandle.nMappedChunks ← fpmFileHandle.nMappedChunks + 1;
io is not in progress on entry; removes from chunkTable, clears doc, handles defWritePending and nDefWriteChunks.
UnmapChunk:
INTERNAL
PROCEDURE[fpmFileHandle: FPMFileHandle, refChunk:
RefChunk] =
BEGIN
-- non system fatal errors: none.
IF RBTDelete[fpmFileHandle, refChunk.startFilePageNumber] =
NIL
THEN ERROR InternalFilePageMgrLogicError;
IF refChunk.defWritePending
THEN
BEGIN
refChunk.defWritePending ← FALSE;
fpmFileHandle.nDefWriteChunks ← fpmFileHandle.nDefWriteChunks - 1;
END;
refChunk.doc ← NIL;
fpmFileHandle.nMappedChunks ← fpmFileHandle.nMappedChunks - 1;
refChunk.state ← undefined;
SetUpVmPageSet:
INTERNAL
PROCEDURE[refChunk: RefChunk, clientStartFilePage:
YggEnvironment.PageNumber, clientFilePageCount: NAT, fileDataSize: YggEnvironment.PageCount] RETURNS [vMPageSet:
YggFilePageMgr.VMPageSet, wholeChunk: BOOLEAN, chunkTrueSize: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: none.
chunkEndFilePagePlus1: YggEnvironment.PageNumber;
clientEndFilePagePlus1: YggEnvironment.PageNumber;
chunkEndFilePagePlus1 ← MIN[refChunk.startFilePageNumber + YggFilePageMgrPrivateChunk.ChunkFilePageCount[refChunk.chunkType], fileDataSize];
clientEndFilePagePlus1 ←
MIN[clientStartFilePage + clientFilePageCount,
chunkEndFilePagePlus1];
vMPageSet.pages ← VMAddressForPageNumber[GetVMIntervalFromFileInterval[refChunk.startFilePageNumber, clientStartFilePage, 0, refChunk.startVMPageNumber].vmPageNumber];
vMPageSet.pageRun.firstPage ← clientStartFilePage;
vMPageSet.pageRun.count ← clientEndFilePagePlus1 - clientStartFilePage;
vMPageSet.refChunk ← refChunk;
chunkTrueSize ← chunkEndFilePagePlus1 - refChunk.startFilePageNumber;
wholeChunk ← ((clientStartFilePage = refChunk.startFilePageNumber) AND (vMPageSet.pageRun.count = chunkTrueSize));
this procedure isn't monitored, but it calls one that is.
gets a chunk from the lru list.
MonitoredOtherGetNewChunk:
PROCEDURE[chunkType: YggFilePageMgrPrivateChunk.ClientChunkType]
RETURNS [otherRefChunk: RefChunk] =
BEGIN
-- non system fatal errors: none.
DO
otherFileHandle: YggInternal.FileHandle;
otherStartFilePageNumber: YggEnvironment.PageNumber;
mapped: BOOLEAN;
[mapped, otherRefChunk, otherFileHandle, otherStartFilePageNumber] ←
YggFilePageMgrLru.GetOtherChunkFromLruList[chunkType];
FIX THIS UP !!! I commented out the next block!
IF mapped -- we didn't get it yet, try to free it.
THEN BEGIN
ErrorProc: PROCEDURE RETURNS[fpmFileHandle: FPMFileHandle] =
BEGIN ERROR ConsistencyError; END; -- non system fatal errors: none.
IF (NOT MonitoredOtherFreeChunkFromFile[ YggDIDMap.VerifyFilePageMgrHandle[otherFileHandle, ErrorProc], otherFileHandle, otherRefChunk, otherStartFilePageNumber]) THEN LOOP;
END;
EXIT;
ENDLOOP;
"Other" because it gets the monitor lock for a different file than the one we are working on -- namely the file that currently contains the Chunk we want to map into this file.
MonitoredOtherFreeChunkFromFile:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
fileHandle: YggInternal.FileHandle, refChunk: RefChunk, startFilePageNumber: YggEnvironment.PageNumber]
RETURNS [okay: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
hurryUp: BOOLEAN ← FALSE;
dirty: BOOLEAN ← FALSE;
DO
okay ← ((RBTLookup[fpmFileHandle, startFilePageNumber] = refChunk)
AND
(refChunk.useCount = 0));
IF NOT okay THEN RETURN;
IF refChunk.state = readInProgress
THEN BEGIN WAIT ReadDone; LOOP; END;
IF refChunk.state = writeInProgress
THEN BEGIN hurryUp ← TRUE; WAIT WriteDone; LOOP; END;
EXIT;
ENDLOOP;
dirty ← ChunkIsDirty[refChunk];
YggFilePageMgrLru.GetOurChunkFromLruList[refChunk, (hurryUp OR dirty)];
IF dirty
THEN
BEGIN
refChunk.state ← writeInProgress;
CleanAndWriteChunk[fpmFileHandle: fpmFileHandle, refChunk: refChunk, lockHeld: TRUE];
END;
UnmapChunk[fpmFileHandle, refChunk];
BasicReadAhead:
PROCEDURE[fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun,
chunkType: YggFilePageMgrPrivateChunk.ClientChunkType[normal..log]] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, PageRunArgIllegal, VolumeWentOffline.
vMPageSet: YggFilePageMgr.VMPageSet;
lastValidNFilePagesToRead: YggEnvironment.PageCount ← 0;
lastIndex: NAT ← 0;
listOfRefChunk: LIST OF RefChunk ← NIL;
listOfVMPageSet: LIST OF YggFilePageMgr.VMPageSet ← NIL;
DO
nFilePagesToRead: YggEnvironment.PageCount;
[vMPageSet, nFilePagesToRead] ← BasicGetPages[fileHandle, pageRun, readAhead, chunkType
! PageRunExtendsPastEof => GOTO done];
IF pageRun.count < vMPageSet.pageRun.count THEN ERROR InternalFilePageMgrLogicError;
pageRun.firstPage ← pageRun.firstPage + vMPageSet.pageRun.count;
pageRun.count ← pageRun.count - vMPageSet.pageRun.count;
IF nFilePagesToRead # 0
THEN
BEGIN lastIndex ← lastIndex + 1;
lastValidNFilePagesToRead ← nFilePagesToRead;
listOfRefChunk ← CONS[vMPageSet.refChunk, listOfRefChunk];
listOfVMPageSet ← CONS[vMPageSet, listOfVMPageSet];
END
ELSE ReleaseVMPageSet[vMPageSet, clean, TRUE];
IF ((pageRun.count = 0) OR (lastIndex = MaxReadAheadSets)) THEN EXIT;
REPEAT done => NULL;
ENDLOOP;
IF listOfRefChunk #
NIL
THEN
TRUSTED
BEGIN
YggDummyProcess.Detach[FORK ForkedBasicReader[fileHandle, listOfRefChunk, listOfVMPageSet, lastValidNFilePagesToRead]];
END;
ForkedBasicReader:
PROCEDURE[fileHandle: YggInternal.FileHandle, listOfRefChunk:
LIST
OF RefChunk,
listOfVMPageSet: LIST OF YggFilePageMgr.VMPageSet, nFilePagesToRead: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: none.
fpmFileHandle: FPMFileHandle ← GetFilePageMgrHandleFromFileHandle[fileHandle, TRUE];
DoSequentialIO[fpmFileHandle, read, listOfRefChunk, nFilePagesToRead];
FOR listOfVMPageSet ← listOfVMPageSet, listOfVMPageSet.rest
UNTIL listOfVMPageSet =
NIL
DO
ReleaseVMPageSet[listOfVMPageSet.first, clean, TRUE];
ENDLOOP;
MonitoredSetListOfChunksValidAfterIO[fpmFileHandle, listOfRefChunk, readCompleted];
MonitoredShareVMPageSet:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle, refChunk:
RefChunk] =
BEGIN
-- non system fatal errors: none.
IF (refChunk.useCount ← refChunk.useCount + 1) = 1 THEN ERROR ConsistencyError;
MonitoredMainReleaseVMPageSet:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
refChunk: RefChunk, releaseState: YggFilePageMgr.ReleaseState, keep: BOOLEAN, setValid: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
IF setValid THEN BEGIN refChunk.state ← valid; BROADCAST WriteDone; END;
IF refChunk.chunkType = normal
AND YggFilePageMgrLru.UsingTooMuchOfCache[fpmFileHandle]
AND refChunk.state # readInProgress
THEN
BEGIN
-- we catch after the fact.
keep ← FALSE;
IF releaseState = writeIndividualNoWait THEN releaseState ← writeBatchedNoWait;
END;
IF (refChunk.useCount ← refChunk.useCount - 1) = 0
THEN { YggFilePageMgrLru.PutMappedChunkOnLruList[refChunk,
IF ((releaseState
IN
YggFilePageMgr.DirtyNoWaitReleaseState) OR (keep)) THEN mru ELSE lru];
IF ((releaseState = writeBatchedNoWait)
AND (
NOT refChunk.defWritePending))
THEN {
refChunk.defWritePending ← TRUE;
IF (fpmFileHandle.nDefWriteChunks ← fpmFileHandle.nDefWriteChunks + 1) >= LimitDefWriteChunks THEN StartSomeDeferredWrites[fpmFileHandle, refChunk, keep];
};
};
Spins off lists of chunks, in ascending or descending order.
ChunkAndPage: TYPE = RECORD[refChunk: RefChunk, startFilePageNumber: YggEnvironment.PageNumber];
StartSomeDeferredWrites:
INTERNAL
PROCEDURE[fpmFileHandle: FPMFileHandle,
startChunk: RefChunk, keep: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
nextChunk: RefChunk ← startChunk;
searchProc: RBTLookupProc ← RBTLookupNextSmaller;
listOfChunksAndPages: LIST OF ChunkAndPage ← NIL;
AddChunk:
PROCEDURE =
BEGIN
-- non system fatal errors: none.
listOfChunksAndPages ←
CONS[[nextChunk, nextChunk.startFilePageNumber],
listOfChunksAndPages];
END;
ForkDemon:
PROCEDURE =
BEGIN
-- non system fatal errors: none.
TRUSTED
BEGIN YggDummyProcess.Detach[
FORK DeferredWriteDemon[fpmFileHandle,
listOfChunksAndPages, keep]]; END;
listOfChunksAndPages ← NIL;
END;
DO
IF nextChunk.defWritePending
THEN
BEGIN
nextChunk.defWritePending ← FALSE;
fpmFileHandle.nDefWriteChunks ← fpmFileHandle.nDefWriteChunks - 1;
AddChunk[];
IF fpmFileHandle.nDefWriteChunks = 0 THEN EXIT;
END;
IF (nextChunk ← searchProc[fpmFileHandle, nextChunk.startFilePageNumber]) =
NIL
THEN
BEGIN
IF searchProc = RBTLookupNextLarger THEN ERROR InternalFilePageMgrLogicError;
ForkDemon[]; -- keep all the chunks going one way, for performance.
searchProc ← RBTLookupNextLarger;
IF (nextChunk ← searchProc[fpmFileHandle, startChunk.startFilePageNumber]) = NIL THEN ERROR InternalFilePageMgrLogicError;
END;
ENDLOOP;
ForkDemon[];
forces the chunks out and then, if requested, moves them to the lru end of the lru list.
DeferredWriteDemon:
PROCEDURE[fpmFileHandle: FPMFileHandle, listOfChunksAndPages:
LIST OF ChunkAndPage, keep: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
listOfValidAndDirty: LIST OF RefChunk;
listOfWIPAndClean: LIST OF ChunkAndPage;
[listOfValidAndDirty, listOfWIPAndClean] ←
MonitoredDefWriteSortChunks[fpmFileHandle, listOfChunksAndPages, keep];
DoSequentialIO[fpmFileHandle, write, listOfValidAndDirty, 0];
MonitoredDefWriteSetValidAndWait[fpmFileHandle, listOfValidAndDirty, listOfWIPAndClean];
END;
This routine ignores chunks in the listOfChunksAndPages that have undergone various state transitions such as being remapped. Essentially, it has three functions:
It returns a list of the valid and dirty chunks so that the caller can write them out outside of the monitor and then possibly reorder them.
If keep is TRUE:
it returns a list of the writeInProgress and clean chunks so that the caller can possibly reorder them after it's waited for them to complete their writes.
it reorders any valid and clean chunks that are still on the lru list.
MonitoredDefWriteSortChunks:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
listOfChunksAndPages: LIST OF ChunkAndPage, keep: BOOLEAN] RETURNS[listOfValidAndDirty:
LIST OF RefChunk, listOfWIPAndClean: LIST OF ChunkAndPage] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
listOfValidAndDirty ← NIL;
listOfWIPAndClean ← NIL;
FOR listOfChunksAndPages ← listOfChunksAndPages, listOfChunksAndPages.rest
UNTIL listOfChunksAndPages =
NIL
DO
dirty: BOOLEAN;
refChunk ← RBTLookup[fpmFileHandle, listOfChunksAndPages.first.startFilePageNumber];
IF ((refChunk # listOfChunksAndPages.first.refChunk) OR (refChunk.defWritePending)) THEN LOOP;
dirty ← ChunkIsDirty[refChunk];
SELECT
TRUE
FROM
((refChunk.state = valid)
AND (dirty)) =>
BEGIN
listOfValidAndDirty ← CONS[listOfChunksAndPages.first.refChunk,
listOfValidAndDirty];
refChunk.state ← writeInProgress;
END;
(keep) =>
SELECT
TRUE
FROM
((refChunk.state = valid)
AND (refChunk.useCount = 0)) =>
YggFilePageMgrLru.RelinkChunkAsLruOnLruList[refChunk];
((refChunk.state = writeInProgress)
AND (
NOT dirty)) => listOfWIPAndClean ←
CONS[listOfChunksAndPages.first, listOfWIPAndClean];
ENDCASE;
ENDCASE => NULL;
ENDLOOP;
END;
This ignores chunks that have undergone various state transitions such as being remapped. Essentially, it has two functions:
It has a list of chunks that were written to be set to valid and possibly reordered.
It has a list of chunks that were wIP and clean. It wants to wait for them to become valid and clean, at which point it will possibly reorder them.
MonitoredDefWriteSetValidAndWait:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
listOfValidAndDirty: LIST OF RefChunk, listOfWIPAndClean: LIST OF ChunkAndPage] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
InternalSetListOfChunksValidAfterIOThenMaybeReorder[fpmFileHandle, listOfValidAndDirty,
writeCompleted, TRUE];
DO
IF listOfWIPAndClean = NIL THEN RETURN;
refChunk ← RBTLookup[fpmFileHandle, listOfWIPAndClean.first.startFilePageNumber];
IF ((refChunk = listOfWIPAndClean.first.refChunk)
AND
(NOT refChunk.defWritePending) AND
(NOT ChunkIsDirty[refChunk]))
THEN
SELECT
TRUE
FROM
(refChunk.state = valid) =>
IF refChunk.useCount = 0
THEN YggFilePageMgrLru.RelinkChunkAsLruOnLruList[refChunk];
(refChunk.state = writeInProgress) => BEGIN WAIT WriteDone; LOOP; END;
ENDCASE => NULL;
listOfWIPAndClean ← listOfWIPAndClean.rest;
ENDLOOP;
MonitoredForceOutFileSortChunks:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle]
RETURNS [errors: ERROR, listOfValidAndDirty: LIST OF RefChunk, listOfWIPAndClean: LIST
OF ChunkAndPage] = -- values of errors are {NoSuchFile, NoSuchVolume, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
dirty: BOOLEAN;
currentFilePageNumber: YggEnvironment.PageNumber;
listOfValidAndDirty ← NIL;
listOfWIPAndClean ← NIL;
IF (
NOT fpmFileHandle.exists)
THEN BEGIN errors ← NoSuchFile; RETURN; END;
errors ← Okay;
refChunk ← RBTLookupSmallest[fpmFileHandle];
DO
IF refChunk = NIL THEN RETURN;
currentFilePageNumber ← refChunk.startFilePageNumber;
BEGIN
DO
dirty ← ChunkIsDirty[refChunk];
IF ((refChunk.state # writeInProgress) OR (NOT dirty)) THEN EXIT;
WAIT WriteDone;
IF (RBTLookup[fpmFileHandle, currentFilePageNumber] #
refChunk) THEN GOTO doneWithThisChunk;
ENDLOOP;
SELECT
TRUE
FROM
((refChunk.state = valid)
AND (dirty)) =>
BEGIN listOfValidAndDirty ←
CONS[refChunk, listOfValidAndDirty];
refChunk.state ← writeInProgress;
END;
(refChunk.state = writeInProgress) => listOfWIPAndClean ←
CONS[[refChunk,
refChunk.startFilePageNumber], listOfWIPAndClean];
ENDCASE;
EXITS doneWithThisChunk => NULL;
END;
refChunk ← RBTLookupNextLarger[fpmFileHandle, currentFilePageNumber];
ENDLOOP;
MonitoredForceOutFileSetValidAndWait:
ENTRY
PROCEDURE[fpmFileHandle:
FPMFileHandle, listOfValidAndDirty: LIST OF RefChunk, listOfWIPAndClean: LIST OF
ChunkAndPage] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
InternalSetListOfChunksValidAfterIOThenMaybeReorder[fpmFileHandle, listOfValidAndDirty,
writeCompleted, FALSE];
DO
IF listOfWIPAndClean = NIL THEN RETURN;
refChunk ← RBTLookup[fpmFileHandle,
listOfWIPAndClean.first.startFilePageNumber];
IF ((refChunk = listOfWIPAndClean.first.refChunk)
AND
(listOfWIPAndClean.first.refChunk.state = writeInProgress))
THEN WAIT WriteDone
ELSE listOfWIPAndClean ← listOfWIPAndClean.rest;
ENDLOOP;
MonitoredSweeperSortChunks:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
doc: YggDIDMap.Document, sweepList: LIST OF YggFilePageMgrLru.SweepItem] RETURNS[listOfValidAndDirty: LIST OF RefChunk, newSweepList: LIST OF YggFilePageMgrLru.SweepItem] =
BEGIN
-- non system fatal errors: none.
refChunk: RefChunk;
listOfValidAndDirty ← NIL;
DO
refChunk ← RBTLookup[fpmFileHandle, sweepList.first.startFilePageNumber];
IF ((refChunk #
NIL)
AND (refChunk.useCount = 0)
AND (refChunk.state = valid)
AND (ChunkIsDirty[refChunk]))
THEN
BEGIN
listOfValidAndDirty ← CONS[refChunk, listOfValidAndDirty];
refChunk.state ← writeInProgress;
END;
sweepList ← sweepList.rest;
IF ((sweepList = NIL) OR (sweepList.first.doc # doc)) THEN EXIT;
ENDLOOP;
newSweepList ← sweepList;
MonitoredUnmapFile:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle]
RETURNS[errors: ERROR] = -- values of errors are {NoSuchFile, NoSuchVolume, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
IF (NOT fpmFileHandle.exists) THEN RETURN[NoSuchFile];
DumpInconvenientlyMappedChunks[fpmFileHandle, TRUE, 0];
RETURN[Okay];
Create:
PUBLIC
PROCEDURE[did: YggEnvironment.DID, filePart: Rope.
ROPE, initialSize:
YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: InsufficientSpaceOnVolume, NoSuchVolume, SizeArgIllegal, VolumeTooFragmented, VolumeWentOffline plus any raised by proc.
IF initialSize < 0 THEN ERROR SizeArgIllegal;
[] ← YggFileStream.CreateFileInDirectory[did: did, filePart: filePart, initialSize: initialSize ];
Delete:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, VolumeWentOffline.
errors: ERROR;
IF (errors ← MonitoredDelete[GetFilePageMgrHandleFromFileHandle[fileHandle, FALSE]]) # Okay
THEN ERROR errors;
END;
MonitoredDelete:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle]
RETURNS[errors:
ERROR] = -- values of errors are {NoSuchFile, NoSuchVolume, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
IF (NOT fpmFileHandle.exists) THEN RETURN[NoSuchFile];
DumpInconvenientlyMappedChunks[fpmFileHandle, TRUE, 0];
YggHostFS.Delete[fpmFileHandle.lowerHandle
! YggHostFS.Error =>
SELECT why
FROM
wentOffline => GOTO wentOffline;
unknownFile => GOTO horrible;
ENDCASE;];
fpmFileHandle.fileDataSize ← 0;
fpmFileHandle.exists ← FALSE;
RETURN[Okay];
EXITS
horrible => ERROR InternalFilePageMgrLogicError;
wentOffline => RETURN[VolumeWentOffline];
SetSize:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle, size: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: InsufficientSpaceOnVolume, NoSuchFile, NoSuchVolume, SizeArgIllegal, VolumeTooFragmented, VolumeWentOffline.
errors: ERROR;
IF size < 0 THEN ERROR SizeArgIllegal;
IF (errors ← MonitoredSetSize[GetFilePageMgrHandleFromFileHandle[fileHandle,
FALSE], size]) # Okay
THEN ERROR errors;
END;
MonitoredSetSize:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle, newSize:
YggEnvironment.PageCount] RETURNS[errors: ERROR] = -- values of errors are {InsufficientSpaceOnVolume, NoSuchFile, NoSuchVolume, VolumeTooFragmented, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
IF (NOT fpmFileHandle.exists) THEN RETURN[NoSuchFile];
IF fpmFileHandle.fileDataSize = newSize THEN RETURN[Okay];
IF newSize < fpmFileHandle.fileDataSize
THEN
-- truncating.
DumpInconvenientlyMappedChunks[fpmFileHandle, FALSE, newSize];
YggHostFS.SetSize[fpmFileHandle.lowerHandle, newSize
! YggHostFS.Error =>
SELECT why
FROM
fragmented => GOTO fragmented;
unknownFile => GOTO horrible;
volumeFull => GOTO volumeFull;
wentOffline => GOTO wentOffline;
ENDCASE;];
fpmFileHandle.fileDataSize ← newSize;
RETURN[Okay];
EXITS
fragmented =>
RETURN[VolumeTooFragmented];
horrible => ERROR InternalFilePageMgrLogicError;
volumeFull =>
RETURN[InsufficientSpaceOnVolume];
wentOffline => RETURN[VolumeWentOffline];
Called from unmap file, delete file, and setsize truncate. Wants to unmap chunks mapped to areas of the file that will no longer exist. None of those chunks should have useCounts > 0 except ones left around in ReadAhead, which we wait for. Some consistency checking is done.
DumpInconvenientlyMappedChunks:
INTERNAL
PROCEDURE[fpmFileHandle:
FPMFileHandle, dumpingWholeFile: BOOLEAN, size: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: none.
IF fpmFileHandle.nMappedChunks # 0
THEN
BEGIN
refChunk: RefChunk;
firstChunkIsPartial: BOOLEAN;
[refChunk, firstChunkIsPartial] ← FirstChunkToCheck[fpmFileHandle,
dumpingWholeFile, size];
IF refChunk #
NIL
THEN CheckTheChunks[fpmFileHandle, refChunk, firstChunkIsPartial];
bug catchers:
[refChunk, firstChunkIsPartial] ← FirstChunkToCheck[fpmFileHandle,
dumpingWholeFile, size];
IF refChunk #
NIL
THEN
BEGIN
IF ((dumpingWholeFile)
OR
(NOT firstChunkIsPartial) OR
(RBTLookupNextLarger[fpmFileHandle, refChunk.startFilePageNumber] # NIL)) THEN ERROR;
END;
END;
If unmap file or delete file, start at lowest chunk in file. If setsize truncate, start at chunk containing new eof.
FirstChunkToCheck:
INTERNAL
SAFE
PROCEDURE[fpmFileHandle: FPMFileHandle,
dumpingWholeFile: BOOLEAN, newSize: YggEnvironment.PageCount] RETURNS [refChunk: RefChunk,
firstChunkIsPartial: BOOLEAN] = CHECKED
BEGIN
-- non system fatal errors: none.
firstPageToClip: YggEnvironment.PageNumber;
fileChunkType: YggFilePageMgrPrivateChunk.ClientChunkType;
IF dumpingWholeFile
THEN
RETURN [RBTLookupSmallest[fpmFileHandle],
FALSE];
IF RBTLookupLargest[fpmFileHandle] = NIL THEN RETURN[NIL, FALSE];
firstPageToClip ← ChunkStartFilePage[newSize, fileChunkType];
IF (refChunk ← RBTLookup[fpmFileHandle, firstPageToClip]) =
NIL
THEN refChunk ← RBTLookupNextLarger[fpmFileHandle, firstPageToClip];
firstChunkIsPartial ← ((refChunk # NIL) AND (refChunk.startFilePageNumber # newSize));
CheckTheChunks:
INTERNAL
SAFE
PROCEDURE[fpmFileHandle: FPMFileHandle, refChunk:
RefChunk, firstChunkIsPartial: BOOLEAN] = CHECKED
BEGIN
-- non system fatal errors: none.
startPageOfChunk: YggEnvironment.PageNumber;
DO
startPageOfChunk ← refChunk.startFilePageNumber;
BEGIN
DO
IF (refChunk ← RBTLookup[fpmFileHandle, startPageOfChunk]) =
NIL
THEN GOTO doneWithThisChunk;
IF refChunk.state = readInProgress
THEN
BEGIN
WAIT ReadDone;
LOOP;
END;
IF refChunk.useCount # 0 THEN ERROR;
IF refChunk.state = writeInProgress THEN BEGIN WAIT WriteDone; LOOP; END;
EXIT;
ENDLOOP;
IF ((refChunk.state = valid)
AND (ChunkIsDirty[refChunk]))
THEN
BEGIN
refChunk.state ← writeInProgress;
CleanAndWriteChunk[fpmFileHandle: fpmFileHandle, refChunk: refChunk, lockHeld: TRUE];
END;
IF
NOT firstChunkIsPartial
THEN
BEGIN
YggFilePageMgrLru.GetOurChunkFromLruList[refChunk, FALSE];
UnmapChunk[fpmFileHandle, refChunk];
YggFilePageMgrLru.PutUnmappedChunkOnLruList[refChunk];
END
ELSE refChunk.state ← valid;
EXITS doneWithThisChunk => NULL;
END;
firstChunkIsPartial ← FALSE;
IF (refChunk ← RBTLookupNextLarger[fpmFileHandle, startPageOfChunk]) = NIL THEN EXIT;
ENDLOOP;
FileExists:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle]
RETURNS [fileExists:
BOOLEAN] =
BEGIN
-- non system fatal errors: NoSuchVolume, VolumeWentOffline.
errors: ERROR;
[errors, fileExists] ← MonitoredFileExists[GetFilePageMgrHandleFromFileHandle[fileHandle, FALSE]];
IF errors # Okay THEN ERROR errors;
END;
MonitoredFileExists:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle]
RETURNS
[errors: ERROR, fileExists: BOOLEAN] = -- values of errors are {NoSuchVolume, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
RETURN[Okay, fpmFileHandle.exists];
GetSize:
PUBLIC
PROCEDURE[fileHandle: YggInternal.FileHandle]
RETURNS [size:
YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: NoSuchFile, NoSuchVolume, VolumeWentOffline.
errors: ERROR;
[errors, size] ← MonitoredGetDataSize[GetFilePageMgrHandleFromFileHandle[fileHandle, FALSE]];
IF errors # Okay THEN ERROR errors;
MonitoredGetDataSize:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle]
RETURNS
[errors: ERROR, size: YggEnvironment.PageCount] = -- values of errors are {NoSuchFile, NoSuchVolume, VolumeWentOffline}.
BEGIN
-- non system fatal errors: none.
IF (NOT fpmFileHandle.exists) THEN RETURN[NoSuchFile, 0];
RETURN[Okay, fpmFileHandle.fileDataSize];
utility routines:
GetKeyProc: RedBlackTree.GetKey -- PROC [data: UserData] RETURNS [Key]
CompareProc: RedBlackTree.Compare -- PROC [k: Key, data: UserData] RETURNS [Basics.Comparison]
= {
dataRefChunk: RefChunk = NARROW[ data ];
WITH k
SELECT
FROM
pnRef: REF YggEnvironment.PageNumber => RETURN[Basics.CompareInt[pnRef^, dataRefChunk.startFilePageNumber]];
keyRefChunk: RefChunk => RETURN[Basics.CompareInt[keyRefChunk.startFilePageNumber, dataRefChunk.startFilePageNumber]];
ENDCASE => ERROR;
};
GetFilePageMgrHandleFromFileHandle:
PROCEDURE[fileHandle: YggInternal.FileHandle, handleMustExist:
BOOLEAN ← FALSE] RETURNS [fpmFileHandle: FPMFileHandle ← NIL] = {ERROR};
GetDocument
FromFileHandle:
PROCEDURE[fileHandle: YggInternal.FileHandle, handleMustExist:
BOOLEAN ← FALSE]
RETURNS [doc: YggDIDMap.Document ← NIL] =
{ERROR};
GetFilePageMgrHandle:
PROCEDURE[doc: YggDIDMap.Document, handleMustExist:
BOOLEAN] RETURNS [fpmFileHandle: FPMFileHandle] =
BEGIN
-- non system fatal errors: none.
InitFPMFileHandle:
PROCEDURE
RETURNS[fpmFileHandle: FPMFileHandle] =
BEGIN
-- non system fatal errors: none.
fileExists: BOOLEAN ← TRUE;
lowerHandle: YggHostFS.HostFile ← NIL;
fileDataSize: YggEnvironment.PageCount;
IF handleMustExist THEN ERROR ConsistencyError;
BEGIN
lowerHandle ← YggHostFS.LabeledOpen[volume: NIL, fp: fileID];
fileDataSize ← YggHostFS.Info[lowerHandle].size;
END;
fpmFileHandle ←
NEW[FPMFileObject ← [
chunkTable: RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc],
nMappedChunks: 0,
fileDataSize: fileDataSize,
exists: fileExists,
nDefWriteChunks: 0,
nLruListChunks: 0,
lowerHandle: lowerHandle,
rbKeyRef: NEW[YggEnvironment.PageNumber ← 0]]];
END;
fpmFileHandle ← YggDIDMap.VerifyFilePageMgrHandle[doc, InitFPMFileHandle];
ChunkAllocator:
PUBLIC
PROCEDURE[chunkType: YggFilePageMgrPrivateChunk.ChunkType, permanent:
BOOLEAN]
RETURNS [refChunk: RefChunk] =
BEGIN
-- non system fatal errors: running out of vm is system fatal.
refChunk ← NEW[Chunk];
refChunk^ ← [chunkType: chunkType, defWritePending:
FALSE, state: undefined,
useCount: 0, doc: NIL, startFilePageNumber: 0, startVMPageNumber: 0,
nVMPages: 0,
prev: NIL, next: NIL -- , rbColor: , rbLLink: NIL, rbRLink: NIL -- ];
IF (chunkType
IN YggFilePageMgrPrivateChunk.ClientChunkType)
THEN
BEGIN
refChunk.nVMPages ← YggFilePageMgrPrivateChunk.ChunkVMPageCount[chunkType];
refChunk.startVMPageNumber ← VM.Allocate[count:
refChunk.nVMPages, partition: normalVM,
subRange: [0, 0],start: 0, alignment: 0, in64K: FALSE].page;
refChunk.nVMPages ← YggFilePageMgrPrivateChunk.ChunkVMPageCount[chunkType];
YggDummyVM.Pin[[refChunk.startVMPageNumber, YggFilePageMgrPrivateChunk.ChunkVMPageCount[chunkType]]];
END;
END;
xx needs work when sizes differ.
GetVMIntervalFromFileInterval:
PROCEDURE[startFilePageNumber, filePageNumber:
YggEnvironment.PageNumber, filePageCount: YggEnvironment.PageCount, startVMPageNumber: VM.PageNumber]
RETURNS[vmPageNumber: VM.PageNumber, vmPageCount: VM.PageCount] =
BEGIN
-- non system fatal errors: none.
vmPageNumber ← startVMPageNumber + (filePageNumber - startFilePageNumber);
vmPageCount ← filePageCount;
END;
xx needs work when sizes differ.
GetFileIntervalFromVMInterval:
PROCEDURE[startVMPageNumber, vmPageNumber:
VM.PageNumber, vmPageCount: VM.PageCount, startFilePageNumber: YggEnvironment.PageNumber]
RETURNS[filePageNumber: YggEnvironment.PageNumber, filePageCount: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: none.
filePageNumber ← startFilePageNumber + (vmPageNumber - startVMPageNumber);
filePageCount ← vmPageCount;
ChunkStartFilePage:
PROCEDURE[clientFilePage: YggEnvironment.PageNumber, chunkType:
YggFilePageMgrPrivateChunk.ClientChunkType] RETURNS[chunkStartFilePage: YggEnvironment.PageNumber] =
BEGIN
-- non system fatal errors: none.
RETURN[ (clientFilePage - Basics.CardMod[clientFilePage, YggFilePageMgrPrivateChunk.ChunkFilePageCount[chunkType]])];
VMAddressForPageNumber:
PROCEDURE[page:
VM.PageNumber]
RETURNS [address:
LONG
POINTER] = -- non system fatal errors: none.
TRUSTED BEGIN RETURN[VM.AddressForPageNumber[page]];
MakePagesClean:
PROCEDURE[interval:
VM.Interval] =
BEGIN
-- non system fatal errors: none.
YggDummyVM.MakeUnchanged[interval];
END;
MakeAllPagesInChunkClean:
PROCEDURE[refChunk: RefChunk] =
BEGIN
-- non system fatal errors: none.
YggDummyVM.MakeUnchanged[[page: refChunk.startVMPageNumber, count:
refChunk.nVMPages]];
END;
ChunkIsDirty:
PUBLIC
PROCEDURE[refChunk: RefChunk]
RETURNS [dirty:
BOOLEAN] =
BEGIN
-- non system fatal errors: none.
FOR vMPage:
VM.PageNumber
IN
[refChunk.startVMPageNumber..refChunk.startVMPageNumber +
refChunk.nVMPages)
DO
IF YggDummyVM.State[vMPage].dataState = changed THEN RETURN[TRUE];
ENDLOOP;
RETURN[FALSE];
END;
chunks are presented in ascending or descending order. Sets pages clean before the writes and after the reads.
DoSequentialIO:
PROCEDURE[fpmFileHandle: FPMFileHandle, io: YggFilePageMgrIO.IOType,
listOfRefChunk: LIST OF RefChunk, nFilePagesHighestChunkForRead: YggEnvironment.PageCount] =
BEGIN
-- non system fatal errors: none.
controllingProcess: PROCESS ← LOOPHOLE[YggDummyProcess.GetCurrent[]];
iORequest: YggFilePageMgrIO.IORequest;
workToDo: BOOLEAN;
listOfIOReq: LIST OF YggFilePageMgrIO.IORequest ← NIL;
error: YggHostFS.RC;
errorDiskPage: INT;
errorIORequest: YggFilePageMgrIO.IORequest;
first: BOOLEAN ← TRUE;
IF listOfRefChunk = NIL THEN RETURN;
IF ((listOfRefChunk.rest #
NIL)
AND (listOfRefChunk.first.startFilePageNumber < listOfRefChunk.rest.first.startFilePageNumber))
THEN
BEGIN
tempList: LIST OF RefChunk ← NIL;
FOR listOfRefChunk ← listOfRefChunk, listOfRefChunk.rest
UNTIL listOfRefChunk =
NIL
DO
tempList ← CONS[listOfRefChunk.first, tempList];
ENDLOOP;
listOfRefChunk ← tempList;
END;
FOR tempList:
LIST
OF RefChunk ← listOfRefChunk, tempList.rest
UNTIL tempList =
NIL
DO
SELECT io
FROM
read =>
BEGIN
listOfIOReq ←
CONS[[filePageNumber: [tempList.first.startFilePageNumber],
nPages: (IF first THEN nFilePagesHighestChunkForRead
ELSE YggFilePageMgrPrivateChunk.ChunkFilePageCount[tempList.first.chunkType]), vM:
VMAddressForPageNumber[tempList.first.startVMPageNumber]],
listOfIOReq];
first ← FALSE;
END;
write =>
BEGIN
AddToCmdList:
PROCEDURE[filePageNumber: YggEnvironment.PageNumber, filePageCount:
YggEnvironment.PageCount, vmPageNumber:
VM.PageNumber]
RETURNS[stop:
BOOLEAN] =
BEGIN
-- non system fatal errors: none.
listOfIOReq ←
CONS[[filePageNumber: [filePageNumber], nPages:
filePageCount, vM: VMAddressForPageNumber[vmPageNumber]],
listOfIOReq];
stop ← FALSE;
END;
GetAndCleanNextSeqForWrite[tempList.first, descending, AddToCmdList];
END;
ENDCASE;
ENDLOOP;
IF listOfIOReq = NIL THEN RETURN;
iORequest ← YggFilePageMgrIO.RegisterRequest[controllingProcess: controllingProcess, io: io, file: fpmFileHandle.lowerHandle, list: listOfIOReq];
DO
YggFilePageMgrIO.DoIO[io, fpmFileHandle.lowerHandle, iORequest !
YggHostFS.Error =>
BEGIN error ← why;
errorDiskPage ← diskPage;
errorIORequest ← iORequest;
YggFilePageMgrIO.LogError[controllingProcess, controller, why, iORequest];
GOTO errorSeen;
END];
[error, errorIORequest, workToDo, iORequest] ← YggFilePageMgrIO.GetNext[controllingProcess:
controllingProcess, who: controller];
IF error # ok THEN GOTO errorSeen;
IF workToDo = FALSE THEN EXIT;
REPEAT errorSeen => ERROR; -- someday we'll do the right thing.
ENDLOOP;
IF io = read
THEN
FOR tempList:
LIST
OF RefChunk ← listOfRefChunk, tempList.rest
UNTIL tempList =
NIL
DO
MakeAllPagesInChunkClean[tempList.first];
ENDLOOP;
sets the dirty pages to clean and does the write.
CleanAndWriteChunk:
PROCEDURE[fpmFileHandle: FPMFileHandle, refChunk: RefChunk, lockHeld:
BOOLEAN ←
FALSE] =
BEGIN
-- non system fatal errors: none.
WriterProc:
PROCEDURE[filePageNumber: YggEnvironment.PageNumber, filePageCount: YggEnvironment.PageCount,
vmPageNumber:
VM.PageNumber]
RETURNS[stop:
BOOLEAN] =
BEGIN
-- non system fatal errors: none.
YggFilePageMgrIO.DoIO[write, fpmFileHandle.lowerHandle, [[filePageNumber], filePageCount,
VMAddressForPageNumber[vmPageNumber]]];
stop ← FALSE;
END;
GetAndCleanNextSeqForWrite[refChunk, ascending, WriterProc, lockHeld];
GetAndCleanNextSeqForWrite:
PROCEDURE[refChunk: RefChunk, whichWay: {ascending,
descending}, proc: PROCEDURE[filePageNumber: YggEnvironment.PageNumber, filePageCount: YggEnvironment.PageCount,
vmPageNumber: VM.PageNumber] RETURNS[stop: BOOLEAN], lockHeld: BOOLEAN ← FALSE] =
BEGIN
-- non system fatal errors: any non system fatal errors returned by "proc".
filePageNumber: YggEnvironment.PageNumber;
filePageCount: YggEnvironment.PageCount;
vmPageNumber: VM.PageNumber;
vmPageCount: VM.PageCount;
startVMPage: VM.PageNumber ← refChunk.startVMPageNumber;
endVMPage:
VM.PageNumber ← refChunk.startVMPageNumber +
refChunk.nVMPages - 1;
DO
BEGIN
IF whichWay = ascending
THEN
BEGIN
IF YggDummyVM.State[startVMPage].dataState = changed
THEN
BEGIN
vmPageNumber ← startVMPage;
DO
startVMPage ← startVMPage + 1;
IF ((startVMPage > endVMPage)
OR
(YggDummyVM.State[startVMPage].dataState # changed)) THEN EXIT;
ENDLOOP;
vmPageCount ← startVMPage - vmPageNumber;
GOTO doInterval;
END;
END
ELSE
BEGIN
IF YggDummyVM.State[endVMPage].dataState = changed
THEN
BEGIN
saveEndVMPage: VM.PageNumber ← endVMPage;
DO
endVMPage ← endVMPage - 1;
IF ((startVMPage > endVMPage)
OR
(YggDummyVM.State[endVMPage].dataState # changed)) THEN EXIT;
ENDLOOP;
vmPageNumber ← endVMPage + 1;
vmPageCount ← saveEndVMPage - endVMPage;
GOTO doInterval;
END;
END;
EXITS
doInterval =>
BEGIN
[filePageNumber, filePageCount] ←
GetFileIntervalFromVMInterval[refChunk.startVMPageNumber,
vmPageNumber, vmPageCount, refChunk.startFilePageNumber];
MakePagesClean[[vmPageNumber, vmPageCount]];
IF proc[filePageNumber, filePageCount, vmPageNumber] THEN RETURN;
END;
END;
IF whichWay = ascending
THEN startVMPage ← startVMPage + 1
ELSE endVMPage ← endVMPage - 1;
IF startVMPage > endVMPage THEN EXIT;
ENDLOOP;
MonitoredWaitForWriteToCompleteThenMaybeSetWIP:
ENTRY
PROCEDURE[fpmFileHandle:
FPMFileHandle, refChunk: RefChunk] RETURNS[writeNeeded: BOOLEAN]=
BEGIN
-- non system fatal errors: none.
DO
IF refChunk.state # writeInProgress THEN EXIT;
WAIT WriteDone;
ENDLOOP;
IF (writeNeeded ← ChunkIsDirty[refChunk]) THEN refChunk.state ← writeInProgress;
MonitoredSetChunkValidAfterIO:
ENTRY
PROCEDURE[fpmFileHandle: FPMFileHandle,
refChunk: RefChunk, what: {readCompleted, writeCompleted}] =
BEGIN
-- non system fatal errors: none.
refChunk.state ← valid;
IF what = readCompleted
-- weird form to keep compiler happy.
THEN BROADCAST ReadDone
ELSE BROADCAST WriteDone;
MonitoredSetListOfChunksValidAfterIO:
ENTRY
PROCEDURE[fpmFileHandle:
FPMFileHandle, listOfRefChunk: LIST OF RefChunk, what: {readCompleted, writeCompleted}] =
BEGIN
-- non system fatal errors: none.
FOR list:
LIST
OF RefChunk ← listOfRefChunk, list.rest
UNTIL list =
NIL
DO
list.first.state ← valid;
ENDLOOP;
IF what = readCompleted
-- weird form to keep compiler happy.
THEN BROADCAST ReadDone
ELSE BROADCAST WriteDone;
InternalSetListOfChunksValidAfterIOThenMaybeReorder:
INTERNAL
PROCEDURE[fpmFileHandle: FPMFileHandle, listOfRefChunk: LIST OF RefChunk, what:
{readCompleted, writeCompleted}, reorder: BOOLEAN] =
BEGIN
-- non system fatal errors: none.
FOR list:
LIST
OF RefChunk ← listOfRefChunk, list.rest
UNTIL list =
NIL
DO
list.first.state ← valid;
IF ((reorder)
AND (
NOT ChunkIsDirty[list.first])
AND (list.first.useCount = 0))
THEN YggFilePageMgrLru.RelinkChunkAsLruOnLruList[list.first];
ENDLOOP;
IF what = readCompleted
-- weird form to keep compiler happy.
THEN BROADCAST ReadDone
ELSE BROADCAST WriteDone;
END;
A thin veneer over RedBlackTree handling the coercions. Should be called only with the lock in fpmFileHandle already held.
RBTLookupProc: TYPE = PROCEDURE[ fpmFileHandle: FPMFileHandle, key: YggEnvironment.PageNumber ] RETURNS [RefChunk];
RBTLookup: RBTLookupProc = {
fpmFileHandle.rbKeyRef^ ← key;
RETURN[ NARROW[RedBlackTree.Lookup[ fpmFileHandle.chunkTable, fpmFileHandle.rbKeyRef]] ];
};
RBTLookupNextLarger: RBTLookupProc = {
fpmFileHandle.rbKeyRef^ ← key;
RETURN[ NARROW[RedBlackTree.LookupNextLarger[ fpmFileHandle.chunkTable, fpmFileHandle.rbKeyRef]] ];
};
RBTLookupNextSmaller: RBTLookupProc = {
fpmFileHandle.rbKeyRef^ ← key;
RETURN[ NARROW[RedBlackTree.LookupNextSmaller[ fpmFileHandle.chunkTable, fpmFileHandle.rbKeyRef]] ];
};
RBTLookupLargest:
PROCEDURE[ fpmFileHandle: FPMFileHandle ]
RETURNS [RefChunk] = {
RETURN[ NARROW[RedBlackTree.LookupLargest[ fpmFileHandle.chunkTable]] ];
};
RBTLookupSmallest:
PROCEDURE[ fpmFileHandle: FPMFileHandle ]
RETURNS [RefChunk] = {
RETURN[ NARROW[RedBlackTree.LookupSmallest[ fpmFileHandle.chunkTable ]] ];
};
RBTDelete:
PROCEDURE[ fpmFileHandle: FPMFileHandle, key: YggEnvironment.PageNumber ]
RETURNS [RefChunk] = {
n: RedBlackTree.Node;
fpmFileHandle.rbKeyRef^ ← key;
n ← RedBlackTree.Delete[ fpmFileHandle.chunkTable, fpmFileHandle.rbKeyRef];
RETURN[ IF n=NIL THEN NIL ELSE NARROW[n.data] ];
};
RBTInsert:
PROCEDURE[ fpmFileHandle: FPMFileHandle, refChunk: RefChunk, key: YggEnvironment.PageNumber ] = {
fpmFileHandle.rbKeyRef^ ← key;
RedBlackTree.Insert[ fpmFileHandle.chunkTable, refChunk, fpmFileHandle.rbKeyRef ];
};
InitializeFilePageMgr:
PUBLIC
PROCEDURE[nNormalChunksInCache:
NAT,
nLogChunksInCache: NAT, checkPointEpoch: NAT] =
BEGIN
-- non system fatal errors: none.
IF moduleInitialized THEN ERROR;
YggFilePageMgrLru.InitializeLruLists[[normal: nNormalChunksInCache,
log: nLogChunksInCache]]; -- operates inside the lru list monitor.
CurrentEpoch ← checkPointEpoch;
moduleInitialized ← TRUE;
END;
fatal errors:
ConsistencyError: -- CALLING or FPM -- ERROR = CODE; -- caller has asserted that a FPMFileHandle exists or a vmpageset has its usecount positive, but it doesn't.
main line code:
MaxReadAheadSets:
NAT = 10;
-- to avoid swamping the cache, we won't read ahead more
sets than these on one request.
The remainder of the request is ignored.
LimitDefWriteChunks:
NAT = 10;
-- once this many chunks from a given file are
waiting for a demon, we incarnate one or more.
moduleInitialized: BOOLEAN ← FALSE;
END.