DIRECTORY Basics USING [Comparison], BasicTime USING [Now], Camelot USING [DSLogOldValueNewValue, DSPinObject, DSQPreflush, DSQZeroFill, ListOfSegmentDesc, optrT, segmentDescT, segmentIdT, tidT], ConstArith USING [Add, Const, DivMod, ToCard], Mach USING [kernReturnT, ListOfPorts, pagingObjectT, portT, taskSelf, vmAddressT, vmAllocateWithPager, vmDeallocate], PBasics USING [ByteBlt, bytesPerWord, Move, Word], Process USING [Detach, MsecToTicks, Pause, Ticks], RedBlackTree USING [Compare, Create, Delete, GetKey, Insert, Lookup, LookupNextSmaller, LookupProc, Size, Table, UserData], SafeStorage USING [CantEstablishFinalization, EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, NewFQ], YggBuffMan USING [ Chunk, ReleaseState, VMPageSet], YggCamelotSegment USING [SegPort], YggDIDMap USING [Run, RunList], YggFile USING [Locate], YggFileInternal USING [InitializeFile], YggEnvironment USING [dsPort, PageRun], YggInternal USING[FileHandle], YggTransaction USING[IsTopLevel]; YggBuffManImpl: CEDAR MONITOR IMPORTS BasicTime, Camelot, ConstArith, PBasics, Process, Mach, RedBlackTree, SafeStorage, YggFile, YggFileInternal, YggEnvironment, YggTransaction EXPORTS YggBuffMan, YggCamelotSegment ~ BEGIN TopLevelTransactionOnly: PUBLIC ERROR = CODE; maxChunkSize: INT _ 8; wordsPerPage: INT _ 1024; bytesPerPage: INT _ wordsPerPage * PBasics.bytesPerWord; unitsPerPage: INT _ wordsPerPage * UNITS[PBasics.Word]; Chunk: TYPE = YggBuffMan.Chunk; intervalToChunkMap: RedBlackTree.Table; desiredNumberOfChunks: INT _ 500; lruListTop: REF Chunk _ NIL; lruListTail: REF Chunk _ NIL; myCondition: CONDITION; MyQueue: SafeStorage.FinalizationQueue = SafeStorage.NewFQ[length: 200]; SegmentList: PUBLIC LIST OF YggCamelotSegment.SegPort _ NIL; ReadPages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, pageRun: YggEnvironment.PageRun, zeroFill: BOOL _ FALSE] RETURNS [vMPageSet: YggBuffMan.VMPageSet _ NIL] ~ { addVMPageItem: PROC [refChunk: REF Chunk] ~ { firstPage: CARD; lastPage: CARD; runPages: INT; offsetPages: INT; buffer: LONG POINTER; pageRun: YggDIDMap.Run; firstPage _ MIN[scratchChunk.run.segmentPage, refChunk.run.segmentPage]; lastPage _ MIN[scratchChunk.run.segmentPage + scratchChunk.run.pages, refChunk.run.segmentPage + refChunk.run.pages] - 1; runPages _ lastPage-firstPage+1; offsetPages _ firstPage - refChunk.run.segmentPage; IF offsetPages = 0 THEN buffer _ refChunk.startAddress ELSE buffer _ refChunk.startAddress + offsetPages * bytesPerPage; IF firstPage = refChunk.run.segmentPage AND lastPage = refChunk.run.segmentPage + CARD[refChunk.run.pages] - 1 THEN { pageRun _ refChunk.run } ELSE { pageRun _ [scratchChunk.run.segmentId, firstPage, scratchChunk.run.firstPage+offsetPages, runPages, FALSE]; }; IF vMPageSetTail = NIL THEN { vMPageSet _ LIST[[buffer, pageRun, refChunk]]; vMPageSetTail _ vMPageSet; } ELSE { vMPageSetTail.rest _ CONS[[buffer, pageRun, refChunk], NIL]; }; scratchChunk.run.segmentPage _ scratchChunk.run.segmentPage + runPages; scratchChunk.run.pages _ scratchChunk.run.pages - runPages; }; scratchChunk: REF Chunk; vMPageSetTail: YggBuffMan.VMPageSet _ NIL; runListFromLocate: YggDIDMap.RunList; runListFromLocate _ YggFile.Locate[file: fileHandle, tid: tid, from: [pageRun.firstPage], nPages: pageRun.count]; scratchChunk _ getScratchRefChunk[]; FOR rl: YggDIDMap.RunList _ runListFromLocate, rl.rest UNTIL rl = NIL DO data: RedBlackTree.UserData; scratchChunk.run _ rl.first; WHILE scratchChunk.run.pages > 0 DO insureChunkValid: ENTRY PROC [chunk: REF Chunk] ~ { WHILE ~chunk.valid DO WAIT myCondition; ENDLOOP; }; data _ LookupFirst[intervalToChunkMap, scratchChunk]; IF data = NIL THEN { newChunk: REF Chunk; newChunk _ mapInRun[scratchChunk.run.segmentId, scratchChunk.run.segmentPage, scratchChunk.run.pages, scratchChunk.run.firstPage, TRUE, zeroFill]; IF newChunk = NIL THEN LOOP -- try again should find a chunk ELSE insureChunkValid[newChunk]; markInterestInChunk[newChunk]; newChunk.file _ fileHandle; addVMPageItem[refChunk: newChunk]; } ELSE { grabChunk: ENTRY PROC RETURNS [tryAgain: BOOL _ FALSE] ~ { IF firstRefChunk.startAddress = NIL THEN RETURN[TRUE]; IF firstRefChunk.lruListPrev # NIL OR firstRefChunk.lruListNext # NIL OR lruListTop = firstRefChunk THEN { IF firstRefChunk.lruListPrev = NIL THEN { lruListTop _ firstRefChunk.lruListNext } ELSE { firstRefChunk.lruListPrev.lruListNext _ firstRefChunk.lruListNext; }; IF lruListTop # NIL THEN lruListTop.lruListPrev _ NIL; IF firstRefChunk.lruListNext = NIL THEN { lruListTail _ firstRefChunk.lruListPrev } ELSE firstRefChunk.lruListNext.lruListPrev _ firstRefChunk.lruListPrev; IF lruListTail # NIL THEN lruListTail.lruListNext _ NIL; }; firstRefChunk.useCount _ firstRefChunk.useCount + 1; firstRefChunk.lastTouchTime _ BasicTime.Now[]; firstRefChunk.lruListPrev _ NIL; firstRefChunk.lruListNext _ NIL; }; firstRefChunk: REF Chunk _ NARROW[data]; IF firstRefChunk.run.segmentPage >= scratchChunk.run.segmentPage AND firstRefChunk.run.segmentPage < scratchChunk.run.segmentPage + CARD[scratchChunk.run.pages] THEN { IF grabChunk[] THEN LOOP; -- LOOP if chunk no longer active or on lru list => it won't be found next time through. insureChunkValid[firstRefChunk]; addVMPageItem[firstRefChunk]; } ELSE { newChunk: REF Chunk; newChunk _ mapInRun[scratchChunk.run.segmentId, scratchChunk.run.segmentPage, firstRefChunk.run.segmentPage - scratchChunk.run.segmentPage, firstRefChunk.run.firstPage, TRUE, zeroFill]; IF newChunk = NIL THEN LOOP -- try again should find a chunk ELSE insureChunkValid[newChunk]; markInterestInChunk[newChunk]; newChunk.file _ fileHandle; addVMPageItem[refChunk: newChunk]; }; }; ENDLOOP; ENDLOOP; returnScratchRefChunk[scratchChunk]; }; ReadAheadPages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, pageRun: YggEnvironment.PageRun] ~ { }; UsePages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, pageRun: YggEnvironment.PageRun] RETURNS [vMPageSet: YggBuffMan.VMPageSet] ~ { vMPageSet _ ReadPages[fileHandle: fileHandle, tid: tid, pageRun: pageRun, zeroFill: TRUE]; }; ShareVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet] ~ { }; WritePages: PUBLIC PROC [fileHandle: YggInternal.FileHandle, tid: Camelot.tidT, to: INT, nPages: INT, from: LONG POINTER] ~ { nowFrom: LONG POINTER _ from; nowTo: INT _ to; nPagesLeft: INT _ nPages; vMPageSet: YggBuffMan.VMPageSet _ NIL; IF ~YggTransaction.IsTopLevel[tid] THEN ERROR TopLevelTransactionOnly; WHILE nPagesLeft > 0 DO didSomething: BOOL _ FALSE; vMPageSet _ ReadPages[fileHandle: fileHandle, tid: tid, pageRun: [firstPage: nowTo, count: nPagesLeft]]; FOR vmps: YggBuffMan.VMPageSet _ vMPageSet, vmps.rest UNTIL vmps = NIL DO IF nowTo >= vmps.first.pageRun.firstPage AND nowTo <= vmps.first.pageRun.firstPage + vmps.first.pageRun.pages - 1 THEN { kernCode: Mach.kernReturnT; startOffset: INT _ nowTo - vmps.first.pageRun.firstPage; pageCount: INT _ MIN[vmps.first.pageRun.pages - startOffset, nPagesLeft]; wordCount: INT _ pageCount * wordsPerPage; byteCount: INT _ pageCount * bytesPerPage; optr: Camelot.optrT; IF pageCount <= 0 THEN ERROR; didSomething _ TRUE; optr _ [segmentId: vmps.first.pageRun.segmentId, highOffset: 0, lowOffset: (vmps.first.pageRun.segmentPage+startOffset)* bytesPerPage]; kernCode _ Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, size: byteCount, raiseSignal: TRUE]; kernCode _ Camelot.DSLogOldValueNewValue [dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, oldValue: LOOPHOLE[vmps.first.buffer + startOffset*unitsPerPage], oldValueCnt: byteCount, newValue: LOOPHOLE[nowFrom], newValueCnt: byteCount, raiseSignal: TRUE]; TRUSTED {PBasics.Move[dst: vmps.first.buffer + startOffset*unitsPerPage, src: nowFrom, nWords: wordCount];}; nPagesLeft _ nPagesLeft - pageCount; nowFrom _ nowFrom + unitsPerPage; nowTo _ nowTo + pageCount; } ELSE LOOP; ENDLOOP; vMPageSet _ NIL; IF ~didSomething THEN ERROR; ENDLOOP; }; WriteBytes: PUBLIC PROC [optr: Camelot.optrT, value: LONG POINTER, valueCnt: CARD] ~ { tid: Camelot.tidT; -- null tid newChunk: REF Chunk; firstPage: CARD; lastPage: CARD; pageOffset: CARD; [page: firstPage, pageOffset: pageOffset] _ MapOptrToPage[optr.highOffset, optr.lowOffset, 0]; [page: lastPage] _ MapOptrToPage[optr.highOffset, optr.lowOffset, valueCnt-1]; [] _ Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: tid, optr: optr, size: valueCnt, raiseSignal: TRUE]; newChunk _ mapInRun[segmentId: optr.segmentId, segmentPage: firstPage, pages: lastPage - firstPage + 1, firstPage: 0, insertChunkInTable: FALSE]; TRUSTED {[] _ PBasics.ByteBlt[to: [blockPointer: newChunk.startAddress, startIndex: pageOffset, stopIndexPlusOne: valueCnt+pageOffset], from: [blockPointer: value, startIndex: 0, stopIndexPlusOne: valueCnt]];}; UnmapRun[newChunk]; newChunk _ NIL; }; ReleaseVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet, releaseState: YggBuffMan.ReleaseState, keep: BOOLEAN] ~ { }; ForceOutVMPageSet: PUBLIC PROC [vMPageSet: YggBuffMan.VMPageSet] ~ { FOR nowPage: YggBuffMan.VMPageSet _ vMPageSet, nowPage.rest UNTIL nowPage = NIL DO Camelot.DSQPreflush[ dsPort: YggEnvironment.dsPort, optr: [segmentId: nowPage.first.pageRun.segmentId, highOffset: 0, lowOffset: nowPage.first.pageRun.segmentPage*bytesPerPage], sizeInBytes: nowPage.first.pageRun.pages*bytesPerPage]; ENDLOOP; }; ForceOutFile: PUBLIC PROC [fileHandle: YggInternal.FileHandle] ~ { }; ForceOutEverything: PUBLIC PROC ~ { }; InitializeFilePageMgr: PUBLIC PROC [seqDescList: Camelot.ListOfSegmentDesc, seqPortList: Mach.ListOfPorts, firstPass: BOOL] RETURNS [firstTime : BOOL ] ~ { sdl: Camelot.ListOfSegmentDesc _ seqDescList; spl: Mach.ListOfPorts _ seqPortList; lastSL: LIST OF YggCamelotSegment.SegPort _ NIL; makeListItem: PROC RETURNS [r: LIST OF YggCamelotSegment.SegPort] ~ { r _ LIST[[sdl.first, spl.first]]; sdl _ sdl.rest; spl _ spl.rest; }; IF firstPass THEN { SegmentList _ makeListItem[]; lastSL _ SegmentList; WHILE sdl # NIL DO lastSL.rest _ makeListItem[]; lastSL _ lastSL.rest; ENDLOOP; IF sdl # NIL OR spl # NIL THEN ERROR; } ELSE { sList: PUBLIC LIST OF YggCamelotSegment.SegPort _ NIL; sList _ makeListItem[]; lastSL _ sList; WHILE sdl # NIL DO lastSL.rest _ makeListItem[]; lastSL _ lastSL.rest; ENDLOOP; IF sdl # NIL OR spl # NIL THEN ERROR; firstTime _ YggFileInternal.InitializeFile[sList]; }; }; FindSegmentPager: PROC [segmentId: Camelot.segmentIdT] RETURNS [pagingObject: Mach.pagingObjectT] ~ { FOR lastSL: LIST OF YggCamelotSegment.SegPort _ SegmentList, lastSL.rest UNTIL lastSL = NIL DO IF lastSL.first.segment.segmentId = segmentId THEN { RETURN[lastSL.first.port]; }; ENDLOOP; ERROR; }; RestoreCacheToCleanState: PUBLIC PROC ~ { }; savedScratchRefChunk: REF Chunk; getScratchRefChunk: ENTRY PROC RETURNS [scratchRefChunk: REF Chunk] ~ { IF savedScratchRefChunk # NIL THEN { scratchRefChunk _ savedScratchRefChunk; savedScratchRefChunk _ NIL; } ELSE { scratchRefChunk _ NEW[Chunk]; }; }; returnScratchRefChunk: ENTRY PROC [scratchRefChunk: REF Chunk] ~ { savedScratchRefChunk _ scratchRefChunk; }; mapInRun: PROC [segmentId: Camelot.segmentIdT, segmentPage: CARD, pages: INT, firstPage: INT, insertChunkInTable: BOOL _ TRUE, zeroFill: BOOL _ FALSE] RETURNS [newChunk: REF Chunk] ~ { insertChunkIfStillPossible: ENTRY PROC RETURNS [notPossible: BOOL _ FALSE] ~ { data: RedBlackTree.UserData; data _ RedBlackTree.Lookup[intervalToChunkMap, newChunk]; IF data = NIL THEN { RedBlackTree.Insert[intervalToChunkMap, newChunk, newChunk]; } ELSE { newChunk _ NIL; RETURN [TRUE]; }; }; setValid: ENTRY PROC ~ { newChunk.valid _ TRUE; BROADCAST myCondition; }; mappedAddress: Mach.vmAddressT; kernCode: Mach.kernReturnT; pagingObject: Mach.pagingObjectT; newChunk _ NEW[Chunk _ [run: [segmentId: segmentId, segmentPage: segmentPage, firstPage: 0, pages: MIN[pages, maxChunkSize], leader: FALSE]]]; IF insertChunkInTable AND insertChunkIfStillPossible[] THEN RETURN; pagingObject _ FindSegmentPager[segmentId]; IF zeroFill THEN Camelot.DSQZeroFill[ dsPort: YggEnvironment.dsPort, optr: [segmentId: segmentId, highOffset: 0, lowOffset: segmentPage*bytesPerPage], sizeInBytes: newChunk.run.pages*bytesPerPage]; [mappedAddress: mappedAddress, kernCode: kernCode] _ Mach.vmAllocateWithPager[targetTask: Mach.taskSelf[], address: 0, size: newChunk.run.pages*bytesPerPage, anywhere: TRUE, pagingObject: pagingObject, offset: newChunk.run.segmentPage*bytesPerPage, raiseSignal: TRUE]; newChunk.startAddress _ LOOPHOLE[mappedAddress]; newChunk.run.firstPage _ firstPage; setValid[]; IF insertChunkInTable THEN SafeStorage.EnableFinalization[newChunk]; }; UnmapRun: PROC [chunk: REF Chunk] ~ { kernCode: Mach.kernReturnT; kernCode _ Mach.vmDeallocate[targetTask: Mach.taskSelf[], address: LOOPHOLE[chunk.startAddress], size: chunk.run.pages*bytesPerPage, raiseSignal: TRUE]; }; markInterestInChunk: ENTRY PROC [chunk: REF Chunk] ~ { chunk.useCount _ chunk.useCount + 1; chunk.lastTouchTime _ BasicTime.Now[]; }; LookupFirst: ENTRY RedBlackTree.LookupProc = { data _ RedBlackTree.Lookup[self, lookupKey]; IF data # NIL THEN DO prevData: RedBlackTree.UserData; prevData _ RedBlackTree.LookupNextSmaller[self, data]; IF prevData = NIL THEN EXIT; IF CompareProc[lookupKey, prevData] # equal THEN EXIT; data _ prevData; ENDLOOP; }; GetKeyProc: RedBlackTree.GetKey = { chunk: REF Chunk _ NARROW[data]; RETURN[ chunk ]; }; CompareProc: RedBlackTree.Compare = { dataChunk: REF Chunk _ NARROW[data]; keyChunk: REF Chunk _ NARROW[k]; SELECT keyChunk.run.segmentId FROM > dataChunk.run.segmentId => RETURN [greater]; < dataChunk.run.segmentId => RETURN [less]; ENDCASE => { SELECT TRUE FROM keyChunk.run.segmentPage >= dataChunk.run.segmentPage + CARD[dataChunk.run.pages] => RETURN [greater]; keyChunk.run.segmentPage + CARD[keyChunk.run.pages] <= dataChunk.run.segmentPage => RETURN [less]; ENDCASE => RETURN [equal]; }; }; bytesPerPage64: ConstArith.Const _ [sign: positive, low: bytesPerPage, high: 0]; MapOptrToPage: PROC [highOffset: CARD16, lowOffset: CARD32, delta: CARD] RETURNS [page: CARD, pageOffset: CARD] = TRUSTED { base, del, total, page64, pageOffset64: ConstArith.Const; base _ [sign: positive, low: lowOffset, high: highOffset]; del _ [sign: positive, low: delta, high: 0]; total _ ConstArith.Add[base, del]; [page64, pageOffset64] _ ConstArith.DivMod[total, bytesPerPage64]; RETURN[ConstArith.ToCard[page64], ConstArith.ToCard[pageOffset64]]; }; FinalizationProcess: PROC = { DO innerFinalizationProcess: ENTRY PROC [] = { IF lruListTop = NIL THEN { lruListTop _ chunk; } ELSE { lruListTail.lruListNext _ chunk; chunk.lruListPrev _ lruListTail; }; lruListTail _ chunk; }; chunk: REF Chunk _ NIL; chunk _ NARROW[SafeStorage.FQNext[MyQueue]]; innerFinalizationProcess[]; chunk _ NIL; ENDLOOP; }; CacheTrimProcess: PROC = { ticksToWait: Process.Ticks; ticksToWait _ Process.MsecToTicks[213]; DO overage: INT _ 0; Process.Pause[ticksToWait]; overage _ RedBlackTree.Size[intervalToChunkMap] - desiredNumberOfChunks; IF overage > 0 THEN TrimLruList[overage]; ENDLOOP; }; NumberOfIntervalsTrimmed: INT _ 0; LastFewChunksFreed: ARRAY [0..32) OF ChunksFreedItem; ChunksFreedItem: TYPE = RECORD [ run: YggDIDMap.Run, startAddress: LONG POINTER _ NIL ]; NextFreePtr: INT _ 0; TrimLruList: PROC [entriesToTrim: INT] = { FOR entNo: INT IN [1..entriesToTrim] DO grabTopOffOfLRU: ENTRY PROC ~ { IF lruListTop # NIL THEN { chunk _ lruListTop; lruListTop _ lruListTop.lruListNext; IF lruListTop = NIL THEN lruListTail _ NIL ELSE lruListTop.lruListPrev _ NIL; chunk.lruListNext _ NIL; chunk.lruListPrev _ NIL; address _ chunk.startAddress; chunk.startAddress _ NIL; IF RedBlackTree.Delete[intervalToChunkMap, chunk] = NIL THEN ERROR; kernCode _ Mach.vmDeallocate[targetTask: Mach.taskSelf[], address: LOOPHOLE[address], size: chunk.run.pages*bytesPerPage, raiseSignal: TRUE]; NumberOfIntervalsTrimmed _ NumberOfIntervalsTrimmed + 1; LastFewChunksFreed[NextFreePtr] _ [chunk.run, chunk.startAddress]; NextFreePtr _ NextFreePtr _ NextFreePtr + 1; IF NextFreePtr >= 32 THEN NextFreePtr _ 0; }; }; kernCode: Mach.kernReturnT; chunk: REF Chunk _ NIL; address: LONG POINTER _ NIL; grabTopOffOfLRU[]; IF chunk = NIL THEN EXIT; ENDLOOP; }; Init: PROC ~ { intervalToChunkMap _ RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc]; SafeStorage.EstablishFinalization[type: CODE[Chunk], npr: 1, fq: MyQueue ! SafeStorage.CantEstablishFinalization => CONTINUE;]; TRUSTED {Process.Detach[FORK FinalizationProcess[] ];}; TRUSTED {Process.Detach[FORK CacheTrimProcess[] ];}; }; Init[]; END. JYggBuffManImpl.mesa Copyright ำ 1988, 1989 by Xerox Corporation. All rights reserved. Bob Hagmann July 6, 1989 9:28:11 am PDT The buffer manager. SHARES YggBuffMan, YggBuffManPrivate Data types and variables Exported procedures for buffer management errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline. The first data page of the file is logical page number 0. Returns a VMPageSet containing the pages [pageRun.firstPage..pageRun.firstPage + n) of the file, where n <= pageRun.count. The choice of n is entirely up to FilePageMgr. vMPageSet.pages points to the data for pageRun.firstPage, and vMPageSet.pageRun indicates what was returned. The caller has read/write access to these pages. Increments the use count of the Chunk returned in the VMPageSet. insure that a concurrent chunk creation finishs before it is used On lrulist. Remove it and use it. chunk contains first page needed some chunks overlap but chunk does not contain first page needed errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, VolumeWentOffline. Semantically identical to ReadPages, except that it notifies the file page manager that the indicated pages are likely to be read soon rather than waiting for them now and for its handling of PageRunExtendsPastEof. errors defined in this interface: NoSuchFile, NoSuchVolume, PageRunArgIllegal, PageRunExtendsPastEof, VolumeWentOffline. Semantically identical to ReadPages, except that the contents of the pages given by the PageRun are undefined; the implementation may therefore avoid actually reading the pages. errors defined in this interface: none. Increments the use count of the Chunk in the VMPageSet. Top level transactions only. Back door for recovery. This is called whenever a SR_RestoreObject or SR_RestoreBatch message is received (multiple calls for SR_RestoreBatch). These occur at server startup as well as during normal operations when a transaction aborts. errors defined in this interface: none. Indicates that the client is through with the given VMPageSet (decrements use count.) keep is a hint that the FilePageManager should try to keep these pages in its cache, as the client expects them to be reused shortly. The ReleaseState hint means: clean: client has not dirtied this page; writeBatched: will cause IO for some likely disk-contiguous pages to be bunched together; writeIndividual: the client is probably not dealing with disk-contiguous pages. The implementation is optimized for "normal" clients, not mixed modes on a file, etc. Other types of client usage and incorrect hints, such as specifying clean for a dirty page, will not result in data being lost, but are likely to degrade performance. We expect: sequential access clients to release pages with keep = FALSE and (clean or writeBatchedNoWait), random access clients to release pages with keep = TRUE and (clean or writeIndividualNoWait), the log to release pages with various options. I don't think that this procedure has to do anything. The chunk will errors defined in this interface: none. Returns when all the dirty pages in the VMPageSet have been written to the disk. Does not alter use count. errors defined in this interface: NoSuchFile, NoSuchVolume, VolumeWentOffline. Returns when all the dirty pages in this file have been written to the disk. errors defined in this interface: NoSuchVolume, VolumeWentOffline. Returns when all the dirty pages under control of the file page manager have been written to the disk. errors defined in this interface: none. lookup segment in list errors defined in this interface: none. for debugging. Call ForceOutEverything first. Internal procedures Map in a run from a segment. Normally, insertChunkInTable is TRUE and the Chunk is entered into the table for mapped chunks. During recovery, mapInRun is called with insertChunkInTable as FALSE; the chunk is not entered into the table and UnmapRun must be called by the client when the run is no longer needed. some other thread has concurrently inserted into the tree. Try again. Internal red black procs A Red Black tree is used to store and find chunks. The tree is indexed by the segment and page run of the chunk. No chunks intersect. To find an interval, it is looked up in the tree. Any intersecting chunk is "equal" to the interval. The tree is then traversed backwards to find the first intersecting interval. PROC [self: Table, lookupKey: Key] RETURNS [data: UserData]; PROC [data: UserData] RETURNS [Key] PROC [k: Key, data: UserData] RETURNS [Basics.Comparison] Map optr to pages Finalization Trim lru list Initialization สH˜code•Mark outsideHeaderšœ™KšœB™BKšœ'™'—K™K™K™šฯk ˜ Kšœœ˜Kšœ œ˜Kšœœz˜‡Kšœ œ˜.Kšœœk˜uKšœœ%˜2Kšœœ%˜2Jšœ œi˜{Kšœ œj˜{Kšœ œ#˜3Kšœœ ˜"Kšœ œ˜Kšœœ ˜Kšœœ˜'Kšœœ˜'Kšœ œ ˜Kšœœ ˜!—K˜Kšัblnœœ˜KšœŒ˜“Kšœ˜%Kšœ™$šœ˜K˜—head™K˜Jšฯnœœœœ˜.J˜Kšœœ˜K˜Icode0šœœ˜Mšœœ'˜8Mšœœ&˜7K˜Kšฯbœœ˜K˜Kšœ'˜'K˜K˜!K˜Kšœ œ œ˜Kšœ œ œ˜K˜Kšœ  œ˜K˜KšŸœA˜HK˜Kš Ÿ œœœœœ˜<—šœ)™)šŸ œœœA˜Wš œ"œœœ$œ˜cJšœx™xJšœว™วš  œœ œ ˜-Jšœ œ˜Jšœ œ˜Jšœ œ˜Jšœ œ˜Jšœœœ˜Jšœ˜Jšœ œ9˜HJšœ œk˜yJšœ ˜ Jšœ3˜3Jšœœ˜6Jšœœ=˜Bšœ&œ'œœ˜uJšœ˜J˜—šœœ˜Jšœdœ˜kJ˜—šœœœ˜Jšœ œ˜.Jšœ˜J˜—šœœ˜Jšœœœ˜