<> <> DIRECTORY Ascii USING [Lower], BTree USING [Compare, CompareEntries, DeleteKey, Entry, EntryFromRecord, EntrySize, EnumerateEntries, GetState, Key, New, NewPathStk, NewRecordFromEntry, Open, PageNumber, PagePtr, PageStorage, PathStk, ReadEntry, ReferencePage, Relation, ReleasePage, SalvageEntries, SetState, SetUpdateInProgress, State, statePage, Tree, UpdateEntry, UpdateType, Validate], BTreeSimple, BTreeVM USING [CacheSize, FilePagesPerPage], FS USING [BytesForPages, GetInfo, OpenFile, Read, SetByteCountAndCreatedTime, SetPageCount, WordsForPages, Write], PrincOpsUtils USING [GetClockPulses, LongCopy], Process USING [Detach], Rope USING [InlineFlatten, TextRep], SafeStorage USING [CantEstablishFinalization, EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, NewFQ, ReEstablishFinalization], VM USING [AddressForPageNumber, Allocate, Free, Interval, PagesForWords, PageNumberForAddress]; BTreeSimpleImpl: CEDAR MONITOR -- the monitor protects the BTreeVM cache only LOCKS h USING h: Handle IMPORTS Ascii, BTree, FS, PrincOpsUtils, Process, Rope, SafeStorage, VM EXPORTS BTreeSimple SHARES Rope = BEGIN OPEN BTreeSimple; <> KeyFromEntry: PUBLIC UNSAFE PROCEDURE [entryKey: EntryKey] RETURNS [key: InternalKey] = UNCHECKED BEGIN IF entryKey=NIL THEN RETURN[NIL]; key _ NEW[Rope.TextRep[entryKey.length] _ [text[length: entryKey.length, text: ]]]; PrincOpsUtils.LongCopy[to: BASE[DESCRIPTOR[key.text]], from: BASE[DESCRIPTOR[entryKey.text]], nwords: (entryKey.length+1)/2]; END; ValueFromEntry: PUBLIC UNSAFE PROCEDURE [entryValue: EntryValue] RETURNS [value: Value] = UNCHECKED BEGIN IF entryValue=NIL THEN RETURN[NIL]; value _ NEW[ValueObject[entryValue.length]]; PrincOpsUtils.LongCopy[to: BASE[DESCRIPTOR[value.words]], from: BASE[DESCRIPTOR[entryValue.words]], nwords: entryValue.length]; END; DefaultCompare: Compare --[key: InternalKey, entryKey: EntryKey] RETURNS [Comparison]-- = UNCHECKED BEGIN FOR i: CARDINAL IN [0 .. MIN[key.length, entryKey.length]) DO kc: CHAR _ Ascii.Lower[key[i]]; ec: CHAR _ Ascii.Lower[entryKey[i]]; IF kc#ec THEN RETURN[IF kc> New: PUBLIC PROCEDURE [compareProcs: CompareProcs _ defaultCompareProcs, initialState: State[closed..suspended] _ closed] RETURNS [tree: Tree] = BEGIN RETURN[BTree.New[repPrim: [compare: LOOPHOLE[compareProcs.compare], compareEntries: LOOPHOLE[compareProcs.compareEntries], entrySize: EntrySize, entryFromRecord: EntryFromRecord, newRecordFromEntry: NewRecordFromEntry], storPrim: [referencePage: ReferencePage, releasePage: ReleasePage], minEntrySize: EntryKeyObject[1].SIZE+ValueObject[0].SIZE, initialState: initialState]]; END; Open: PUBLIC PROCEDURE [tree: Tree, file: FS.OpenFile, filePagesPerPage: FilePagesPerPage _ 1, cacheSize: CacheSize _ 25, initialize: BOOLEAN _ FALSE, maintainRecomputableState: BOOLEAN _ TRUE] = BEGIN storage: Handle = CacheOpen[file: file, filePagesPerPage: filePagesPerPage, cacheSize: cacheSize]; tree.Open[storage: storage, pageSize: FS.WordsForPages[filePagesPerPage], initialize: initialize, maintainRecomputableState: maintainRecomputableState]; END; SetState: PUBLIC PROCEDURE [tree: Tree, state: State[closed..suspended]] = BEGIN tree.SetState[state]; END; GetState: PUBLIC PROCEDURE [tree: Tree] RETURNS [state: State, entryCount: LONG CARDINAL, greatestPage: INT, depth: CARDINAL, file: FS.OpenFile] = BEGIN storage: BTree.PageStorage; [state: state, entryCount: entryCount, greatestPage: greatestPage, depth: depth, storage: storage] _ tree.GetState[]; file _ NARROW[storage, Handle].file; END; GetStatistics: PUBLIC PROCEDURE [tree: Tree] RETURNS [hits, misses, reads, writes, cumChainLength, cumReadWriteTime: LONG CARDINAL] = BEGIN RETURN GetCacheStats[NARROW[tree.GetState[].storage]]; END; Validate: PUBLIC PROCEDURE [tree: Tree] = BEGIN tree.Validate[]; END; SetUpdateInProgress: PUBLIC PROCEDURE [tree: Tree, updateInProgress: BOOLEAN] = BEGIN tree.SetUpdateInProgress[updateInProgress]; END; <> ReadRecord: PUBLIC PROCEDURE [tree: Tree, key: Key, relation: Relation _ equal, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE] RETURNS [actualKey: InternalKey, value: Value] = TRUSTED BEGIN EntryProc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue] = UNCHECKED BEGIN IF key=NIL THEN {actualKey _ NIL; returnValue _ NIL} ELSE {actualKey _ KeyFromEntry[key]; returnValue _ ValueFromEntry[value]}; END; returnValue: Value; ReadEntry[tree: tree, key: key, relation: relation, pathStk: pathStk, useExistingPath: useExistingPath, Proc: EntryProc]; RETURN [actualKey, returnValue]; END; ReadValue: PUBLIC PROCEDURE [tree: Tree, key: Key, relation: Relation _ equal, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE] RETURNS [value: Value] = TRUSTED BEGIN EntryProc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue] = UNCHECKED BEGIN returnValue _ (IF value=NIL THEN NIL ELSE ValueFromEntry[value]); END; returnValue: Value; ReadEntry[tree: tree, key: key, relation: relation, pathStk: pathStk, useExistingPath: useExistingPath, Proc: EntryProc]; RETURN [returnValue]; END; EnumerateRecords: PUBLIC PROCEDURE [tree: Tree, key: Key, relation: Relation _ equal, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE, Proc: PROCEDURE [key: InternalKey, value: Value] RETURNS [continue: BOOLEAN]] RETURNS [exhausted: BOOLEAN] = TRUSTED BEGIN EntryProc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue] RETURNS [continue: BOOLEAN] = UNCHECKED { RETURN [Proc[key: KeyFromEntry[key], value: ValueFromEntry[value]]] }; exhausted _ EnumerateEntries[tree: tree, key: key, pathStk: pathStk, useExistingPath: useExistingPath, Proc: EntryProc]; END; UpdateRecord: PUBLIC PROCEDURE [tree: Tree, key: Key, value: Value, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE, updateType: UpdateType _ insertOrReplace] = TRUSTED BEGIN ProduceEntry: UNSAFE PROCEDURE [value: EntryValue] = UNCHECKED {PrincOpsUtils.LongCopy[to: value, from: suppliedValue, nwords: ValueObject[suppliedValue.length].SIZE]}; suppliedValue: LONG POINTER TO ValueObject = LOOPHOLE[value]; UpdateEntry[tree: tree, key: key, pathStk: pathStk, useExistingPath: useExistingPath, valueLength: suppliedValue.length, Proc: ProduceEntry, updateType: updateType]; END; DeleteKey: PUBLIC PROCEDURE [tree: Tree, key: Key, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE] RETURNS [found: BOOLEAN] = BEGIN RETURN[tree.DeleteKey[key: key.InlineFlatten[], pathStk: pathStk, useExistingPath: useExistingPath]]; END; <> ReadEntry: PUBLIC UNSAFE PROCEDURE [tree: Tree, key: Key, relation: Relation _ equal, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE, Proc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue]] = UNCHECKED BEGIN EntryProc: UNSAFE PROCEDURE [entry: BTree.Entry] = UNCHECKED BEGIN IF entry=NIL THEN Proc[NIL, NIL] ELSE Proc[entry, entry+EntryKeyObject[LOOPHOLE[entry, EntryKey].length].SIZE]; END; tree.ReadEntry[key: key.InlineFlatten[], relation: relation, pathStk: pathStk, useExistingPath: useExistingPath, Proc: EntryProc]; END; EnumerateEntries: PUBLIC UNSAFE PROCEDURE [tree: Tree, key: Key, relation: Relation _ equal, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE, Proc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue] RETURNS [continue: BOOLEAN]] RETURNS [exhausted: BOOLEAN] = UNCHECKED BEGIN EntryProc: UNSAFE PROCEDURE [entry: BTree.Entry] RETURNS [continue: BOOLEAN] = UNCHECKED {RETURN[Proc[entry, entry+EntryKeyObject[LOOPHOLE[entry, EntryKey].length].SIZE]]}; exhausted _ tree.EnumerateEntries[key: key.InlineFlatten[], relation: relation, pathStk: pathStk, useExistingPath: useExistingPath, Proc: EntryProc]; END; UpdateEntry: PUBLIC UNSAFE PROCEDURE [tree: Tree, key: Key, pathStk: PathStk _ NIL, useExistingPath: BOOLEAN _ FALSE, valueLength: CARDINAL, Proc: UNSAFE PROCEDURE [value: EntryValue], updateType: UpdateType _ insertOrReplace] = UNCHECKED BEGIN EntryProc: UNSAFE PROCEDURE [entry: BTree.Entry] = UNCHECKED BEGIN entryKey: EntryKey = LOOPHOLE[entry]; entryValue: EntryValue = LOOPHOLE[entry+entryKeySize]; LOOPHOLE[entryKey, LONG POINTER TO CARDINAL]^ _ internalKey.length; -- entryKey.length _ internalKey.length PrincOpsUtils.LongCopy[to: BASE[DESCRIPTOR[entryKey.text]], from: BASE[DESCRIPTOR[internalKey.text]], nwords: (internalKey.length+1)/2]; Proc[entryValue]; LOOPHOLE[entryValue, LONG POINTER TO CARDINAL]^ _ valueLength; -- entryValue.length _ valueLength END; internalKey: InternalKey = key.InlineFlatten[]; entryKeySize: CARDINAL = EntryKeyObject[internalKey.length].SIZE; tree.UpdateEntry[key: internalKey, pathStk: pathStk, useExistingPath: useExistingPath, words: entryKeySize+ValueObject[valueLength].SIZE, Proc: EntryProc, updateType: updateType]; END; SalvageEntries: PUBLIC UNSAFE PROCEDURE [tree: Tree, Proc: UNSAFE PROCEDURE [key: EntryKey, value: EntryValue] RETURNS [continue: BOOLEAN]] RETURNS [exhausted: BOOLEAN] = UNCHECKED BEGIN EntryProc: UNSAFE PROCEDURE [entry: BTree.Entry] RETURNS [continue: BOOLEAN] = UNCHECKED {RETURN[Proc[entry, entry+EntryKeyObject[LOOPHOLE[entry, EntryKey].length].SIZE]]}; exhausted _ tree.SalvageEntries[Proc: EntryProc]; END; <> NewPathStk: PUBLIC PROCEDURE RETURNS [pathStk: PathStk] = BEGIN RETURN[BTree.NewPathStk[]]; END; <> <> EntrySize: BTree.EntrySize --[entry: BTree.Entry] RETURNS [words: BTree.EntSize]-- = UNCHECKED BEGIN words _ EntryKeyObject[LOOPHOLE[entry, EntryKey].length].SIZE; words _ words + ValueObject[LOOPHOLE[entry+words, EntryValue].length].SIZE; END; EntryFromRecord: BTree.EntryFromRecord --[record: BTree.Record] RETURNS [entry: BTree.Entry]-- = UNCHECKED BEGIN ERROR Bug[cantGetHere]; -- we never call the BTree procedures that traffic in Records END; NewRecordFromEntry: BTree.NewRecordFromEntry --[entry: BTree.Entry] RETURNS [record: BTree.Record]-- = UNCHECKED BEGIN ERROR Bug[cantGetHere]; -- we never call the BTree procedures that traffic in Records END; <> <> CacheIndex: TYPE = [0 .. LAST[BTreeVM.CacheSize]]; noIndex: CacheIndex = LAST[CacheIndex]; Handle: TYPE = REF VMObject; VMObject: TYPE = MONITORED RECORD [ file: FS.OpenFile, filePages: INT, filePagesPerPage: BTreeVM.FilePagesPerPage, vmPagesPerPage: CARDINAL, rover: CacheIndex, cache: REF Cache, hits, misses, reads, writes, cumChainLength, cumReadWriteTime: LONG CARDINAL _ 0, hashTable: PACKED SEQUENCE count: CARDINAL OF CacheIndex ]; Cache: TYPE = RECORD [PACKED SEQUENCE count: CacheIndex OF CacheEntry]; CacheEntry: TYPE = RECORD [ pNum: BTree.PageNumber, ptr: BTree.PagePtr, referenceCount: [0..256), hash: CacheIndex, empty, recentlyUsed, dirty: BOOLEAN ]; CacheOpen: PROC [file: FS.OpenFile, filePagesPerPage: BTreeVM.FilePagesPerPage, cacheSize: BTreeVM.CacheSize] RETURNS [Handle] = BEGIN h: Handle _ NEW[VMObject[2*cacheSize+1]]; h.file _ file; h.filePages _ 0; -- indicates that we don't know -- h.filePagesPerPage _ filePagesPerPage; h.vmPagesPerPage _ VM.PagesForWords[FS.WordsForPages[filePagesPerPage]]; h.rover _ 0; h.cache _ NEW[Cache[cacheSize]]; FOR i: CacheIndex IN [0 .. h.cache.count) DO h.cache[i].ptr _ NIL; h.cache[i].empty _ TRUE; ENDLOOP; FOR c: CARDINAL IN [0..h.count) DO h.hashTable[c] _ noIndex; ENDLOOP; SafeStorage.EnableFinalization[h]; RETURN [h]; END; ReferencePage: BTree.ReferencePage --[storage: PageStorage, number: PageNumber, type: ReferenceType] RETURNS [ptr: PagePtr]-- = BEGIN InnerReference: ENTRY PROCEDURE [h: Handle] = INLINE BEGIN hash: CARDINAL = HashIndex[h, number]; i: CacheIndex; FOR i _ h.hashTable[hash], h.cache[i].hash UNTIL i = noIndex DO h.cumChainLength _ h.cumChainLength+1; IF h.cache[i].pNum = number THEN { h.hits _ h.hits+1; EXIT }; REPEAT FINISHED => BEGIN -- not found -- h.misses _ h.misses+1; i _ GetEmptyCacheEntry[h]; h.cache[i].pNum _ number; h.cache[i].empty _ FALSE; IF type # new THEN LoadCachePage[h, i]; h.cache[i].hash _ h.hashTable[hash]; h.hashTable[hash] _ i; END; ENDLOOP; h.cache[i].recentlyUsed _ TRUE; h.cache[i].referenceCount _ h.cache[i].referenceCount + 1; IF type # read THEN h.cache[i].dirty _ TRUE; IF type = new THEN ExtendFile[h, number]; ptr _ h.cache[i].ptr; END; InnerReference[NARROW[storage]]; END; ReleasePage: BTree.ReleasePage --[storage: PageStorage, number: PageNumber, update: UpdateState]-- = BEGIN InnerRelease: ENTRY PROCEDURE [h: Handle] = INLINE BEGIN i: CacheIndex; FOR i _ h.hashTable[HashIndex[h, number]], h.cache[i].hash UNTIL i = noIndex DO IF h.cache[i].pNum = number THEN EXIT; REPEAT FINISHED => ERROR Bug[pageNotInUse]; ENDLOOP; IF h.cache[i].referenceCount = 0 THEN ERROR Bug[pageNotInUse] ELSE h.cache[i].referenceCount _ h.cache[i].referenceCount - 1; IF update # unchanged THEN BEGIN IF number = BTree.statePage THEN BEGIN -- is the statePage; can be either startOfUpdate or endOfUpdate IF update = startOfUpdate THEN StoreCachePage[h, i] -- only this page is dirty, so just write it ELSE WriteDirtyPagesWithStatePageLast[h]; -- many pages possibly dirty END ELSE StoreCachePage[h, i]; -- is not the statePage; must be endOfUpdate; only page i is dirty END; END; InnerRelease[NARROW[storage]]; END; GetCacheStats: ENTRY PROC [h: Handle] RETURNS [hits, misses, reads, writes, cumChainLength, cumReadWriteTime: LONG CARDINAL] = BEGIN RETURN [hits: h.hits, misses: h.misses, reads: h.reads, writes: h.writes, cumChainLength: h.cumChainLength, cumReadWriteTime: h.cumReadWriteTime]; END; <> <> <> <> <> <> <> <> <> <> FreeBuffers: ENTRY PROC [h: Handle] = BEGIN FOR i: CacheIndex IN [0 .. h.cache.count) DO IF h.cache[i].ptr#NIL THEN {ReleaseSpace[h, h.cache[i].ptr]; h.cache[i].ptr _ NIL}; h.cache[i].empty _ TRUE; ENDLOOP; FOR c: CARDINAL IN [0..h.count) DO h.hashTable[c] _ noIndex; ENDLOOP; END; HashIndex: INTERNAL PROC [h: Handle, pNum: BTree.PageNumber] RETURNS [CARDINAL] = INLINE { RETURN [pNum MOD h.count] }; WriteDirtyPagesWithStatePageLast: INTERNAL PROC [h: Handle] = BEGIN statePageIndex: CacheIndex _ noIndex; FOR i: CacheIndex IN [0 .. h.cache.count) DO IF NOT h.cache[i].empty AND h.cache[i].dirty THEN BEGIN IF h.cache[i].pNum = BTree.statePage THEN statePageIndex _ i ELSE StoreCachePage[h, i]; END; ENDLOOP; IF statePageIndex # noIndex THEN StoreCachePage[h, statePageIndex]; END; GetEmptyCacheEntry: INTERNAL PROC [h: Handle] RETURNS [i: CacheIndex] = <> BEGIN FOR cntr: CARDINAL IN [0..2*h.cache.count) DO i _ (h.rover + cntr) MOD h.cache.count; SELECT TRUE FROM h.cache[i].empty => BEGIN -- empty entry IF h.cache[i].ptr = NIL THEN h.cache[i].ptr _ GetSpace[h]; h.cache[i].recentlyUsed _ FALSE; h.cache[i].dirty _ FALSE; h.cache[i].referenceCount _ 0; EXIT; END; h.cache[i].referenceCount = 0 => BEGIN -- not currently being used IF h.cache[i].recentlyUsed THEN h.cache[i].recentlyUsed _ FALSE ELSE BEGIN -- not even recently used IF h.cache[i].dirty THEN IF h.cache[i].pNum = BTree.statePage THEN LOOP -- never force out the statePage ELSE StoreCachePage[h, i]; EXIT; END; END; ENDCASE; REPEAT FINISHED => ERROR Bug[noAvailableCacheEntries]; ENDLOOP; IF NOT h.cache[i].empty THEN BEGIN hx: CARDINAL = HashIndex[h, h.cache[i].pNum]; IF h.hashTable[hx] = i THEN h.hashTable[hx] _ h.cache[i].hash ELSE FOR j: CacheIndex _ h.hashTable[hx], h.cache[j].hash UNTIL j = noIndex DO IF h.cache[j].hash = i THEN {h.cache[j].hash _ h.cache[i].hash; EXIT}; REPEAT FINISHED => ERROR Bug[cacheEntryNotInHashTable]; ENDLOOP; h.cache[i].empty _ TRUE; END; h.rover _ (i+1) MOD h.cache.count; END; ExtendFile: INTERNAL PROC [h: Handle, n: BTree.PageNumber] = BEGIN fileSizeForNPlus1: INT = (n+1)*h.filePagesPerPage; IF h.filePages = 0 THEN h.filePages _ h.file.GetInfo[].pages; IF h.filePages < fileSizeForNPlus1 THEN BEGIN h.filePages _ fileSizeForNPlus1 + 8*h.filePagesPerPage; h.file.SetPageCount[h.filePages]; h.file.SetByteCountAndCreatedTime[bytes: FS.BytesForPages[h.filePages]]; END; END; LoadCachePage: INTERNAL PROC [h: Handle, i: CacheIndex] = BEGIN then: LONG CARDINAL = PrincOpsUtils.GetClockPulses[]; TRUSTED {h.file.Read[from: h.cache[i].pNum*h.filePagesPerPage, nPages: h.filePagesPerPage, to: h.cache[i].ptr]}; h.cumReadWriteTime _ h.cumReadWriteTime + (PrincOpsUtils.GetClockPulses[]-then); h.reads _ h.reads+1; END; StoreCachePage: INTERNAL PROC [h: Handle, i: CacheIndex] = BEGIN then: LONG CARDINAL = PrincOpsUtils.GetClockPulses[]; TRUSTED {h.file.Write[to: h.cache[i].pNum*h.filePagesPerPage, nPages: h.filePagesPerPage, from: h.cache[i].ptr]}; h.cumReadWriteTime _ h.cumReadWriteTime + (PrincOpsUtils.GetClockPulses[]-then); h.writes _ h.writes+1; h.cache[i].dirty _ FALSE; END; GetSpace: INTERNAL PROC [h: Handle] RETURNS [BTree.PagePtr] = TRUSTED BEGIN interval: VM.Interval = VM.Allocate[count: h.vmPagesPerPage]; RETURN [VM.AddressForPageNumber[interval.page]]; END; ReleaseSpace: INTERNAL PROC [h: Handle, ptr: BTree.PagePtr] = TRUSTED BEGIN VM.Free[[page: VM.PageNumberForAddress[ptr], count: h.vmPagesPerPage]]; END; <> Bug: ERROR [type: BugType] = CODE; BugType: TYPE = {cacheEntryNotInHashTable, cantGetHere, noAvailableCacheEntries, pageNotInUse}; <> Initialize: PROC = BEGIN finalizationQueue _ SafeStorage.NewFQ[]; SafeStorage.EstablishFinalization[type: VMObject.CODE, npr: 0, fq: finalizationQueue ! SafeStorage.CantEstablishFinalization => SafeStorage.ReEstablishFinalization[type: VMObject.CODE, npr: 0, fq: finalizationQueue]]; TRUSTED {Process.Detach[FORK FinalizationProcess]}; END; FinalizationProcess: PROC = BEGIN DO h: Handle _ NARROW [SafeStorage.FQNext[finalizationQueue]]; FreeBuffers[h]; h _ NIL; ENDLOOP; END; finalizationQueue: SafeStorage.FinalizationQueue; Initialize[]; END.