<> <> <> <> DIRECTORY Atom USING [GetPName, MakeAtom], Basics USING [BITAND, ByteBlt, charsPerWord, Comparison, Move], BasicTime USING [Now, nullGMT], BTree USING [DeleteKey, Entry, EntSize, EnumerateEntries, Key, New, NewPathStk, Open, PageSize, PathStk, ReferencePage, ReleasePage, Tree, UpdateEntry], Camelot USING [btidT, tidT], Process USING [Detach, InitializeCondition, MsecToTicks], RedBlackTree USING [Compare, Create, Delete, GetKey, Insert, Lookup, Table, UserData], RefText USING [New, TrustTextAsRope], Rope USING [Equal, Fetch, Length, NewText, ROPE, Substr, Text, TextBound], SafeStorage USING [EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, IsFinalizationEnabled, NewFQ], YggBuffMan USING [ReadPages, ReleaseState, ReleaseVMPageSet, UsePages, VMPageSet], YggDID USING [CompareDIDs, DID, EqualDIDs, HashDID, StableHashDID], YggDIDMap, YggDIDMapPrivate USING [DocItem, DocPart, DocPartPtr, Document, DocumentRep, Entry, EntryPtr, OverheadPerEntry, notCompressed, PartType, RunItem, SegmentItem, SegmentRep, TextRep, TextRP], YggDIDPrivate USING [DIDRep], YggEnvironment USING [nullDID], YggFile USING [BytesForPages, FileHandle, Info, Open, PageCount, SetSize], YggFixedNames USING [IndexPrefix], YggInternal USING [Document, FileHandle]; YggDIDMapImpl: CEDAR MONITOR IMPORTS Atom, Basics, BasicTime, BTree, Process, RedBlackTree, RefText, Rope, SafeStorage, YggBuffMan, YggDID, YggFile, YggFixedNames EXPORTS YggInternal, YggDIDMap, YggDID = BEGIN <> Document: TYPE = REF DocumentRep; DocumentRep: PUBLIC TYPE = YggDIDMapPrivate.DocumentRep; DID: PUBLIC TYPE ~ REF DIDRep; DIDRep: PUBLIC TYPE ~ YggDIDPrivate.DIDRep; TreeEntry: TYPE = REF TreeEntryRep; TreeEntryRep: TYPE = RECORD [ interlock: BOOL _ FALSE, tid: Camelot.tidT, -- holder of interlock tree: BTree.Tree, file: YggFile.FileHandle, currentVMPageSets: LIST OF YggBuffMan.VMPageSet _ NIL ]; Btrees: ARRAY [0..64) OF TreeEntry; BtreePageSizeInWords: INT = 4096; FilePagesPerBtreePage: INT = 8; MaxWordsPerEntry: INT = (BtreePageSizeInWords - 6)/5; NextDID: YggDIDPrivate.DIDRep; LastDIDAllocateable: YggDIDPrivate.DIDRep; LastDidLow: CARD _ 1000000000; NumberOfDIDsToGrap: CARD _ 200; ThresholdToGrabDIDs: CARD _ 20; KeyObject: TYPE = RECORD [did: YggDID.DID, extensionCount: INT32]; EntryDataBuffer: TYPE = RECORD[SEQUENCE COMPUTED CARD OF EntryWord]; EntryWord: TYPE = MACHINE DEPENDENT RECORD[ SELECT OVERLAID * FROM card => [card: CARD32], int => [int: INT32], pair => [hi, lo: CARD16], bytes => [hh, hl, lh, ll: BYTE], bits => [bits: PACKED ARRAY [0..32) OF BOOL], ENDCASE ]; updateAction: TYPE = {create, delete, newValue}; updates: TYPE = RECORD[ did: YggDID.DID, tid: Camelot.tidT, action: updateAction, doc: Document ]; TIDObj: TYPE = REF TIDObjRep; TIDObjRep: TYPE = RECORD [ btid: Camelot.btidT]; scratchTIDObj: TIDObj _ NEW[TIDObjRep]; DocTrans: TYPE = REF DocTransRep; DocTransRep: TYPE = RECORD [ btidForDoc: TIDObj, docs: LIST OF Document _ NIL ]; transactionToModifiedDocumentMap: RedBlackTree.Table; finalizationQ: SafeStorage.FinalizationQueue; ZeroIllegal: --CALLING-- ERROR = CODE; secondLookupHashDocument: HashDocument _ NIL; -- wierdness to protect object from finalization. <> OpenDocumentFromDID: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT] RETURNS [doc: YggInternal.Document _ NIL] = { <> tryCount: CARD _ 0; DO tryAgain: BOOL _ FALSE; IF (doc _ LookupInCache[did]) = NIL THEN { found: BOOL _ FALSE; gottaBeInCache: BOOL _ FALSE; doc _ NEW[DocumentRep _ [did: did, componentFiles: NIL, interlock: TRUE, next: NIL]]; [found, gottaBeInCache] _ LookupInBtree[did, tid, doc]; IF gottaBeInCache THEN { doc _ LookupInCache[did]; IF doc = NIL THEN ERROR; } ELSE { Insert[doc ! HashPkgDuplicateKey => { tryAgain _ TRUE; CONTINUE; }; ]; <> openFilesFromParsedDocList[doc, did, tid]; }; } ELSE { -- found in cache waitForInterlockDone: ENTRY PROC ~ { WHILE doc.interlock DO WAIT aShortTime ENDLOOP; }; IF doc.interlock THEN waitForInterlockDone[]; }; IF ~tryAgain THEN EXIT; tryCount _ tryCount + 1; IF tryCount > 25 THEN ERROR; ENDLOOP; }; GetDID: PUBLIC PROC [doc: Document] RETURNS [did: YggDID.DID] ~ { <> RETURN[doc.did]; }; GetLinkInfo: PUBLIC PROC [doc: Document] RETURNS [fromDID, toDID: YggDID.DID, linkType: Rope.ROPE] ~ { <> RETURN[doc.fromDID, doc.toDID, doc.linkType]; }; SwapLinkInfo: PUBLIC PROC [doc: Document, newFromDID, newToDID: YggDID.DID, newLinkType: Rope.ROPE] RETURNS [oldFromDID, oldToDID: YggDID.DID, oldLinkType: Rope.ROPE] ~ { <> oldFromDID _ doc.fromDID; doc.fromDID _ newFromDID; oldToDID _ doc.toDID; doc.toDID _ newToDID; oldLinkType _ doc.linkType; doc.linkType _ newLinkType; }; <<>> GetComponentFiles: PUBLIC PROC [doc: Document] RETURNS [componentFiles: LIST OF YggInternal.FileHandle] ~ { <> RETURN[doc.componentFiles]; }; AddComponentFile: PUBLIC ENTRY PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO lastCF _ files; ENDLOOP; IF lastCF = NIL THEN doc.componentFiles _ CONS[componentFile, NIL] ELSE lastCF.rest _ CONS[componentFile, doc.componentFiles]; }; RemoveComponentFile: PUBLIC PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO IF files.first = componentFile THEN { IF lastCF = NIL THEN doc.componentFiles _ files.rest ELSE lastCF.rest _ files.rest; EXIT; }; lastCF _ files; ENDLOOP; }; <<>> GetName: PUBLIC PROC [doc: Document] RETURNS [Rope.ROPE] ~ { <> RETURN[doc.name]; }; <<>> CreateNewDID: PUBLIC PROC [tid: Camelot.tidT] RETURNS [did: YggDID.DID ] ~ { doc: YggInternal.Document _ NIL; did _ GetNextDID[]; doc _ NEW[DocumentRep _ [did: did, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: TRUE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => ERROR]; noteUseInTransaction[doc, tid]; }; DestroyDID: PUBLIC PROC [tid: Camelot.tidT, did: YggDID.DID] ~ { ERROR; }; GetRootDIDs: PUBLIC PROC RETURNS [dids: LIST OF YggDID.DID] ~ { }; AddRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { }; RemoveRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { }; <> aShortTime: CONDITION; grapDoc: ENTRY PROC [doc: YggInternal.Document] ~ { WHILE doc.interlock DO WAIT aShortTime ENDLOOP; doc.interlock _ TRUE; }; releaseDoc: ENTRY PROC [doc: YggInternal.Document] ~ {doc.interlock _ FALSE}; RemoveRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, runList: YggDIDMap.RunList] ~ { <> <<>> <> <<>> doc: YggInternal.Document _ NIL; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR rl: YggDIDMap.RunList _ runList, rl.rest UNTIL rl = NIL DO pdl: LIST OF YggDIDMapPrivate.DocItem; prevPdl: LIST OF YggDIDMapPrivate.DocItem _ NIL; runToDelete: YggDIDMap.Run _ rl.first; FOR pdl _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO segments: LIST OF YggDIDMapPrivate.SegmentItem; prevSegments: LIST OF YggDIDMapPrivate.SegmentItem; FOR segments _ pdl.first.segments, segments.rest UNTIL segments = NIL DO runs: LIST OF YggDIDMapPrivate.RunItem; prevRuns: LIST OF YggDIDMapPrivate.RunItem; FOR runs _ segments.first.runs, runs.rest UNTIL runs = NIL DO firstIntersection: INT _ -1; lastIntersection: INT _ -1; runLast: CARD _ 0; runToDeleteLast: CARD _ 0; IF runs.first.segmentId # runToDelete.segmentId THEN {-- wrong segment prevRuns _ runs; LOOP; }; firstIntersection _ MAX[runs.first.segmentPage, runToDelete.segmentPage]; lastIntersection _ MIN[ (runLast _ runs.first.segmentPage + runs.first.pages - 1), (runToDeleteLast _ runToDelete.segmentPage + runToDelete.pages - 1) ]; IF lastIntersection < firstIntersection THEN LOOP; -- no intersection IF firstIntersection = runs.first.segmentPage AND lastIntersection = runLast THEN { -- run is eliminated IF prevRuns = NIL THEN segments.first.runs _ runs.rest ELSE prevRuns.rest _ runs.rest; LOOP; } ELSE { runs.first.segmentPage _ firstIntersection; runs.first.pages _ lastIntersection - firstIntersection + 1; }; prevRuns _ runs; ENDLOOP; IF segments.first.runs = NIL THEN { -- no more runs, so dump the segment too IF prevSegments = NIL THEN pdl.first.segments _ segments.rest ELSE prevSegments.rest _ segments.rest; LOOP; }; prevSegments _ segments; ENDLOOP; IF pdl.first.segments = NIL THEN { -- no more segments, so dump the DocItem too IF prevPdl = NIL THEN doc.parsedDocList _ pdl.rest ELSE prevPdl.rest _ pdl.rest; LOOP; }; prevPdl _ pdl; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; AddRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, runList: YggDIDMap.RunList] ~ { <> doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { lastPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; thisPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { lastPDL _ pdl; EXIT; }; lastPDL _ pdl; REPEAT FINISHED => { thisPDL _ LIST[[partType, fileUse, 0, NIL]]; IF lastPDL = NIL THEN doc.parsedDocList _ thisPDL ELSE lastPDL.rest _ thisPDL; }; ENDLOOP; -- pdl IF thisPDL = NIL THEN ERROR; FOR runs: YggDIDMap.RunList _ runList, runs.rest UNTIL runs = NIL DO lastSegment: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ thisPDL.first.segments, segs.rest UNTIL segs = NIL DO lastRun: LIST OF YggDIDMapPrivate.RunItem _ NIL; runPages: YggFile.PageCount _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; lastRun _ lori; ENDLOOP; IF segs.first.firstPage + runPages = runs.first.firstPage THEN { -- append at end of segment lastRun.rest _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages], NIL]; EXIT; -- appended at end of segment; go do the next run }; lastSegment _ segs; REPEAT FINISHED => { -- make a new segment with a singleton run lastSegment.rest _ CONS[[firstPage: runs.first.firstPage, numberOfBytes: YggFile.BytesForPages[runs.first.pages], runs: LIST[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages]]], NIL]; }; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; SetByteSize: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, bytes: INT] ~ { <> doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segs.rest UNTIL segs = NIL DO firstByte: CARD; firstByte _ YggFile.BytesForPages[segs.first.firstPage]; IF firstByte > bytes THEN segs.first.numberOfBytes _ 0 ELSE { runPages: YggFile.PageCount _ 0; runBytes: CARD _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; ENDLOOP; runBytes _ YggFile.BytesForPages[runPages]; segs.first.numberOfBytes _ IF firstByte + runBytes > bytes THEN bytes - firstByte ELSE runBytes; }; ENDLOOP; EXIT; }; REPEAT FINISHED => { ERROR; }; ENDLOOP; -- pdl }; }; <> mapFileUseToPartType: PROC [fileUse: ATOM] RETURNS [partType: YggDIDMapPrivate.PartType] ~ { SELECT fileUse FROM $root => partType _ root; $contents => partType _ contents; $outlinks => partType _ outlinks; $attributes => partType _ attributes; ENDCASE => { pName: Rope.Text; pName _ Atom.GetPName[fileUse]; IF Rope.Equal[YggFixedNames.IndexPrefix, Rope.Substr[pName, 0, 7]] THEN { partType _ index; } ELSE ERROR; }; }; <> PreCommit: PUBLIC PROC [tid: Camelot.tidT] ~ { <> <<>> <> <<>> <> <<>> <> <<>> data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; IF data # NIL THEN { dt: DocTrans; maxBTreeDone: INT _ -1; dt _ NARROW[data]; DO minBTree: INT _ 65; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents to find the next hash value to do hashedDidToBTree: INT; hashedDidToBTree _ HashToBtree[docList.first.did]; IF hashedDidToBTree > maxBTreeDone AND hashedDidToBTree < minBTree THEN minBTree _ hashedDidToBTree; ENDLOOP; IF minBTree = maxBTreeDone THEN EXIT; LockBTree[minBTree, tid]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and process those with the correct hash value hashedDidToBTree: INT; hashedDidToBTree _ HashToBtree[docList.first.did]; IF hashedDidToBTree = minBTree THEN { writeDocumentToBTree[hashedDidToBTree, docList.first]; }; ENDLOOP; maxBTreeDone _ minBTree; ENDLOOP; }; }; Commit: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; maxBTreeDone: INT _ -1; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and turn off the modified flag docList.first.modifiedDIDMap _ FALSE; ENDLOOP; DO minBTree: INT _ 65; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents to find the next hash value to do hashedDidToBTree: INT; hashedDidToBTree _ HashToBtree[docList.first.did]; IF hashedDidToBTree > maxBTreeDone AND hashedDidToBTree < minBTree THEN minBTree _ hashedDidToBTree; ENDLOOP; IF minBTree = maxBTreeDone THEN EXIT; maxBTreeDone _ minBTree; UnlockBTree[minBTree, tid]; ENDLOOP; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; Abort: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; maxBTreeDone: INT _ -1; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and remove them from the cache Delete[ docList.first]; ENDLOOP; DO minBTree: INT _ 65; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents to find the next hash value to do hashedDidToBTree: INT; hashedDidToBTree _ HashToBtree[docList.first.did]; IF hashedDidToBTree > maxBTreeDone AND hashedDidToBTree < minBTree THEN minBTree _ hashedDidToBTree; ENDLOOP; IF minBTree = maxBTreeDone THEN EXIT; maxBTreeDone _ minBTree; UnlockBTree[minBTree, tid]; ENDLOOP; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; <> openFilesFromParsedDocList: PROC [doc: YggInternal.Document, did: YggDID.DID, tid: Camelot.tidT] = { FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO atomCode: ATOM _ NIL; theRuns: YggDIDMap.RunList _ NIL; theFile: YggFile.FileHandle; SELECT pdl.first.partType FROM root => atomCode _ $root; contents => atomCode _ $contents; outlinks => atomCode _ $outlinks; index => { atomCode _ pdl.first.indexName; }; attributes => atomCode _ $attributes; ENDCASE => atomCode _ $huh; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segments.rest UNTIL segments = NIL DO nowPage: CARD _ segments.first.firstPage; FOR runs: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, runs.rest UNTIL runs = NIL DO theRuns _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, firstPage: nowPage, pages: runs.first.pages, leader: FALSE], theRuns]; nowPage _ nowPage + runs.first.pages; ENDLOOP; ENDLOOP; theFile _ YggFile.Open[runList: theRuns, did: did, fileUse: atomCode, verifyLeaderNow: FALSE]; doc.componentFiles _ CONS[theFile, doc.componentFiles]; ENDLOOP; }; <> AppendText: UNSAFE PROC [dPP: YggDIDMapPrivate.DocPartPtr, text: Rope.Text, offset: CARD] RETURNS [textRP: YggDIDMapPrivate.TextRP, words: CARD] = UNCHECKED { textRep: POINTER TO YggDIDMapPrivate.TextRep; textRP _ LOOPHOLE[offset]; textRep _ @dPP[textRP]; LOOPHOLE[textRep, LONG POINTER TO Rope.TextBound]^ _ Rope.Length[text]; -- sets textRep.length [] _ Basics.ByteBlt [ to: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[text]], 0, textRep.length ] ]; words _ WORDS[YggDIDMapPrivate.TextRep[textRep.length]]; }; GetText: UNSAFE PROC [textRep: LONG POINTER TO YggDIDMapPrivate.TextRep, text: REF TEXT] = UNCHECKED { nBytes: CARD; text.length _ textRep.length; nBytes _ Basics.ByteBlt [ to: [ BASE[DESCRIPTOR[text]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ] ]; }; <> didsCondition: CONDITION; GetNextDID: ENTRY PROC RETURNS [did: YggDID.DID] ~ { WHILE NextDID.didHigh = LastDIDAllocateable.didHigh AND NextDID.didLow = LastDIDAllocateable.didLow DO WAIT didsCondition ENDLOOP; did _ NEW[YggDIDPrivate.DIDRep _ [NextDID.didHigh, NextDID.didLow]]; NextDID.didLow _ NextDID.didLow + 1; IF LastDIDAllocateable.didLow - NextDID.didLow <= ThresholdToGrabDIDs THEN Process.Detach[FORK GrabSomeDIDs[]]; }; GrabberCount: CARD _ 0; GrabSomeDIDs: PROC [] ~ { newLastDIDAllocateable: YggDIDPrivate.DIDRep; newHigh: BOOL _ FALSE; onlyOne: ENTRY PROC RETURNS [exit: BOOL] ~ { IF GrabberCount # 0 THEN RETURN[TRUE]; GrabberCount _ GrabberCount + 1; }; IF onlyOne[] THEN RETURN; newLastDIDAllocateable.didHigh _ LastDIDAllocateable.didHigh; newLastDIDAllocateable.didLow _ LastDIDAllocateable.didLow + NumberOfDIDsToGrap; IF newLastDIDAllocateable.didLow > LastDidLow THEN { newLastDIDAllocateable.didHigh _ newLastDIDAllocateable.didHigh + 1; newLastDIDAllocateable.didLow _ NumberOfDIDsToGrap; newHigh _ TRUE; }; <> IF newHigh THEN { m: ENTRY PROC ~ { LastDIDAllocateable _ newLastDIDAllocateable; NextDID.didHigh _ newLastDIDAllocateable.didHigh; NextDID.didLow _ 0; BROADCAST didsCondition; }; m[]; } ELSE { n: ENTRY PROC ~ { LastDIDAllocateable.didLow _ newLastDIDAllocateable.didLow; BROADCAST didsCondition; }; n[]; }; }; <> bTreeCondition: CONDITION; LockBTree: ENTRY PROC [treeNo: INT, tid: Camelot.tidT] ~ { WHILE Btrees[treeNo].interlock DO WAIT bTreeCondition ENDLOOP; Btrees[treeNo].interlock _ TRUE; Btrees[treeNo].tid _ tid; }; UnlockBTree: ENTRY PROC [treeNo: INT, tid: Camelot.tidT] ~ { IF ~Btrees[treeNo].interlock OR Btrees[treeNo].tid # tid THEN ERROR; }; stockKey: REF KeyObject _ NIL; CheckoutKey: ENTRY PROC [did: YggDID.DID, extendedDataCount: INT32] RETURNS [k: REF KeyObject] = { IF stockKey = NIL THEN k _ NEW [KeyObject] ELSE { k _ stockKey; stockKey _ NIL }; k^ _ KeyObject[did, extendedDataCount]; }; RecycleKey: ENTRY PROC [k: REF KeyObject] = { stockKey _ k; }; HashToBtree: PROC [did: YggDID.DID] RETURNS [h: INT] = { h _ Basics.BITAND[YggDID.StableHashDID[did], 03Fh]; -- low 6 bits }; LookupInBtree: PROC [did: YggDID.DID, tid: Camelot.tidT, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE, gottaBeInCache: BOOL _ FALSE] = { hashedDidToBTree: INT; btreeEntry: TreeEntry; pathStk: BTree.PathStk _ BTree.NewPathStk[]; keyRef: REF KeyObject = CheckoutKey[did, -3]; extendedData: REF EntryDataBuffer _ NIL; extendedDataCount: CARD _ 0; expectedExtendedDataSize: CARD _ 0; maxDataCount: CARD _ 0; fromLink: DID _ YggEnvironment.nullDID; toLink: DID _ YggEnvironment.nullDID; linkType: Rope.ROPE _ NIL; updatesForTrans: LIST OF updates; getAll: UNSAFE PROC [entry: BTree.Entry] RETURNS [continue: BOOL _ FALSE] = { entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[entry]; IF entry # NIL THEN TRUSTED { IF YggDID.CompareDIDs[did, @entryPtr.did] # equal THEN RETURN; found _ TRUE; SELECT entryPtr.extensionCount FROM 0 => { -- normal, non-extended entry index: INT _ 0; IF entryPtr.link THEN { wordIndex: INT _ 0; noChars: INT _ 0; nullSeen: BOOL _ FALSE; fromLink _ NEW[DIDRep]; Basics.Move[dst: @fromLink, src: @entryPtr.data[0], nWords: WORDS[DIDRep]]; index _ WORDS[DIDRep]; toLink _ NEW[DIDRep]; Basics.Move[dst: @toLink, src: @entryPtr.data[index], nWords: WORDS[DIDRep]]; index _ index + index; FOR wordIndex _ index, wordIndex + 1 UNTIL nullSeen DO fourChars: PACKED ARRAY [0..Basics.charsPerWord) OF CHAR; fourChars _ LOOPHOLE[@entryPtr.data[index]]; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; noChars _ noChars + 1; ENDLOOP; ENDLOOP; IF noChars = 0 THEN { index _ index + 1; } ELSE { linkTypeRT: REF TEXT _ NIL; linkTypeRT _ RefText.New[noChars]; nullSeen _ FALSE; UNTIL nullSeen DO fourChars: PACKED ARRAY [0..Basics.charsPerWord) OF CHAR; fourChars _ LOOPHOLE[@entryPtr.data[index]]; index _ index + 1; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; linkTypeRT[noChars] _ fourChars[charNo]; noChars _ noChars + 1; ENDLOOP; ENDLOOP; linkTypeRT.length _ noChars; linkType _ RefText.TrustTextAsRope[linkTypeRT]; }; }; [] _ parseDocPart[@entryPtr.data[index], entryPtr.size - YggDIDMapPrivate.OverheadPerEntry - index, doc]; }; 1 => { -- normal, extended entries to follow expectedExtendedDataSize _ entryPtr.data[0]; extendedData _ NEW[EntryDataBuffer[expectedExtendedDataSize]]; extendedDataCount _ entryPtr.size - YggDIDMapPrivate.OverheadPerEntry - 1; Basics.Move[dst: @extendedData[0], src: @entryPtr.data[1], nWords: extendedDataCount]; }; >1 => { -- extended entry IF extendedDataCount + entryPtr.size - YggDIDMapPrivate.OverheadPerEntry > maxDataCount THEN ERROR; Basics.Move[dst: @extendedData[extendedDataCount], src: @entryPtr.data[0], nWords: entryPtr.size - YggDIDMapPrivate.OverheadPerEntry]; extendedDataCount _ extendedDataCount + entryPtr.size - YggDIDMapPrivate.OverheadPerEntry; }; ENDCASE => ERROR; }; }; hashedDidToBTree _ HashToBtree[did]; btreeEntry _ Btrees[hashedDidToBTree]; updatesForTrans _ getUpdates[tid]; FOR ut: LIST OF updates _ updatesForTrans, ut.rest UNTIL ut = NIL DO IF YggDID.EqualDIDs[ut.first.did, did] THEN RETURN[FALSE, TRUE]; -- if it has updates, then is should be in the cache - so why are we in this routine? ENDLOOP; DO TRUSTED {IF BTree.EnumerateEntries[tree: btreeEntry.tree, relation: greater, key: keyRef, pathStk: pathStk, useExistingPath: TRUE, Proc: getAll] THEN EXIT; }; ENDLOOP; IF maxDataCount # 0 THEN TRUSTED { IF maxDataCount # extendedDataCount THEN ERROR; [] _ parseDocPart[@extendedData[0], maxDataCount, doc]; }; RecycleKey[keyRef]; }; InsertIntoBtree: PROC [bTree: BTree.Tree, did: DID, doc: YggInternal.Document, buffer: REF EntryDataBuffer, wordsInBuffer: INT, link: BOOL] = { totalEntrySize: INT; pathStk: BTree.PathStk _ BTree.NewPathStk[]; keyRef: REF KeyObject = CheckoutKey[did, -3]; totalEntrySize _ YggDIDMapPrivate.OverheadPerEntry + wordsInBuffer; IF totalEntrySize <= MaxWordsPerEntry THEN { writeEntrySingle: UNSAFE PROCEDURE [entry: BTree.Entry] = UNCHECKED { entPtr: POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry]; entPtr.size _ totalEntrySize; entPtr.extensionCount _ 0; entPtr.did _ did^; entPtr.link _ link; Basics.Move[dst: @entPtr.data[0], src: LOOPHOLE[buffer], nWords: wordsInBuffer]; }; keyRef. extensionCount _ 0; TRUSTED {BTree.UpdateEntry[tree: bTree, key: keyRef, pathStk: pathStk, words: totalEntrySize, Proc: writeEntrySingle, updateType: insertOrReplace];}; <> keyRef.extensionCount _ 1; } ELSE { writeEntryFirst: UNSAFE PROCEDURE [entry: BTree.Entry] = UNCHECKED { writeWords: INT _ MaxWordsPerEntry - YggDIDMapPrivate.OverheadPerEntry - 1; entPtr: POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry]; entPtr.size _ MaxWordsPerEntry; entPtr.extensionCount _ 1; entPtr.did _ did^; entPtr.data[0] _ wordsInBuffer; leftToWrite _ wordsInBuffer - writeWords; Basics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer], nWords: writeWords]; writeOffset _ writeWords; }; leftToWrite: INT _ 0; writeOffset: INT _ 0; keyRef.extensionCount _ 1; TRUSTED {BTree.UpdateEntry[tree: bTree, key: keyRef, pathStk: pathStk, words: MaxWordsPerEntry, Proc: writeEntryFirst, updateType: insertOrReplace]; }; keyRef.extensionCount _ 0; [] _ BTree.DeleteKey[tree: bTree, key: keyRef, pathStk: pathStk, useExistingPath: TRUE]; DO writeEntryExtended: UNSAFE PROCEDURE [entry: BTree.Entry] = UNCHECKED{ entPtr: POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry]; entPtr.size _ writeThisTime+YggDIDMapPrivate.OverheadPerEntry; entPtr.extensionCount _ keyRef.extensionCount; entPtr.did _ did^; entPtr.data[0] _ writeThisTime; Basics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer, POINTER]+writeOffset, nWords: writeThisTime]; }; writeThisTime: INT _ MIN[MaxWordsPerEntry - YggDIDMapPrivate.OverheadPerEntry, leftToWrite]; leftToWrite _ leftToWrite - writeThisTime; keyRef.extensionCount _ keyRef.extensionCount + 1; TRUSTED {BTree.UpdateEntry[tree: bTree, key: keyRef, pathStk: pathStk, useExistingPath: TRUE, words: writeThisTime + YggDIDMapPrivate.OverheadPerEntry, Proc: writeEntryFirst, updateType: insertOrReplace]; }; IF leftToWrite < 0 THEN ERROR; IF leftToWrite = 0 THEN EXIT; ENDLOOP; }; DO -- insure that old extra extentions do not exist IF ~BTree.DeleteKey[tree: bTree, key: keyRef, pathStk: pathStk, useExistingPath: TRUE] THEN EXIT; keyRef.extensionCount _ keyRef.extensionCount + 1; ENDLOOP; RecycleKey[keyRef]; }; getUpdates: PROC [tid: Camelot.tidT] RETURNS [updateList: LIST OF updates] ~ { }; parseDocPart: PROC [dataToParse: LONG POINTER, size: CARD, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE] = { <> parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; parsedDocListTail: LIST OF YggDIDMapPrivate.DocItem _ NIL; index: CARD _ 0; WHILE index < size DO TRUSTED { segIndex: POINTER TO YggDIDMapPrivate.SegmentRep; docPart: POINTER TO YggDIDMapPrivate.DocPart; newDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; segmentsListTail: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; startOffset: CARD _ 0; offsetToLastSegment: CARD _ 0; indexName: ATOM _ NIL; docPart _ LOOPHOLE[dataToParse + index]; IF index + docPart.docPartSize > size THEN ERROR; IF docPart.indexName # NIL THEN { r: Rope.Text; docPartPtr: YggDIDMapPrivate.DocPartPtr = LOOPHOLE[docPart]; indexNamePtr: POINTER TO YggDIDMapPrivate.TextRep; indexNamePtr _ @docPartPtr[docPartPtr.indexName]; IF indexNamePtr.length = 0 THEN ERROR; offsetToLastSegment _ LOOPHOLE[indexNamePtr, CARD] - LOOPHOLE[docPart, CARD]; r _ Rope.NewText[indexNamePtr.length]; GetText[indexNamePtr, LOOPHOLE[r]]; indexName _ Atom.MakeAtom[r]; } ELSE offsetToLastSegment _ docPart.docPartSize; newDocList _ LIST[[partType: docPart.partType, indexName: indexName, slotNo: docPart.slotNo, segments: NIL]]; segIndex _ LOOPHOLE[@docPart.segments]; WHILE (startOffset _ LOOPHOLE[segIndex, CARD] - LOOPHOLE[docPart, CARD]) < offsetToLastSegment DO segSize: CARD _ 0; runs: LIST OF YggDIDMapPrivate.RunItem _ NIL; runTail: LIST OF YggDIDMapPrivate.RunItem _ NIL; newSegListItem: LIST OF YggDIDMapPrivate.SegmentItem; newSegListItem _ LIST[[firstPage: segIndex.firstPage, numberOfBytes: segIndex.numberOfBytes, runs: NIL]]; IF (segSize _ WORDS[YggDIDMapPrivate.SegmentRep[segIndex.length]]) > docPart.docPartSize - startOffset THEN ERROR; -- too many runs FOR inx: CARDINAL IN [0..segIndex.length) DO newRunList: LIST OF YggDIDMapPrivate.RunItem _ NIL; newRunList _ LIST[[segmentId: segIndex.runs[inx].segmentId, segmentPage: segIndex.runs[inx].segmentPage, pages: segIndex.runs[inx].pages]]; IF runTail = NIL THEN runs _ runTail _ newRunList ELSE { runTail.rest _ newRunList; runTail _ newRunList; }; ENDLOOP; newSegListItem.first.runs _ runs; IF newDocList.first.segments = NIL THEN newDocList.first.segments _ segmentsListTail _ newSegListItem ELSE { segmentsListTail.rest _ newSegListItem; segmentsListTail _ newSegListItem; }; segIndex _ segIndex + segSize; ENDLOOP; IF startOffset # docPart.docPartSize THEN ERROR; IF parsedDocListTail = NIL THEN parsedDocList _ parsedDocListTail _ newDocList ELSE { parsedDocListTail.rest _ newDocList; parsedDocListTail _ newDocList; }; index _ index + docPart.docPartSize; }; ENDLOOP; doc.parsedDocList _ parsedDocList; }; stockBuffer: REF EntryDataBuffer _ NIL; CheckoutStockBuffer: ENTRY PROC RETURNS [buf: REF EntryDataBuffer] = { IF stockBuffer = NIL THEN buf _ NEW[EntryDataBuffer[256]] ELSE { buf _ stockBuffer; stockBuffer _ NIL }; }; RecycleStockBuffer: ENTRY PROC [buf: REF EntryDataBuffer] = { stockBuffer _ buf; }; writeDocumentToBTree: PROC [ hashedDidToBTree: INT, doc: Document] ~ { buffer: REF EntryDataBuffer _ NIL; stockBuffer: REF EntryDataBuffer _ NIL; bufferSize: INT _ 256; link: BOOL; wordsInBuffer: INT; stockBuffer _ buffer _ CheckoutStockBuffer[]; DO didNotFit: BOOL _ TRUE; [wordsInBuffer: wordsInBuffer, didNotFit: didNotFit, link: link] _ writeDocToBuffer[doc, buffer, bufferSize]; IF didNotFit THEN { bufferSize _ bufferSize + bufferSize; buffer _ NEW[EntryDataBuffer[bufferSize]]; LOOP; }; EXIT; ENDLOOP; InsertIntoBtree[bTree: Btrees[hashedDidToBTree].tree, did: doc.did, doc: doc, buffer: buffer, wordsInBuffer: wordsInBuffer, link: link]; RecycleStockBuffer[stockBuffer]; }; writeDocToBuffer: PROC [ doc: Document, buffer: REF EntryDataBuffer, bufferSize: INT] RETURNS [wordsInBuffer: INT _ 0, didNotFit: BOOL _ TRUE, link: BOOL _ FALSE] ~ { <> IF doc.fromDID # YggEnvironment.nullDID THEN { linkTypeLen: INT _ 0; linkTypeWords: INT _ 0; nowCharNum: INT _ 0; link _ TRUE; TRUSTED {Basics.Move[dst: LOOPHOLE[@buffer[0]], src: LOOPHOLE[@(doc.fromDID^)], nWords: WORDS[DIDRep]]; }; wordsInBuffer _ WORDS[DIDRep]; TRUSTED {Basics.Move[dst: LOOPHOLE[@buffer[wordsInBuffer]], src: LOOPHOLE[@(doc.toDID^)], nWords: WORDS[DIDRep]]; }; wordsInBuffer _ wordsInBuffer + WORDS[DIDRep]; linkTypeLen _ Rope.Length[doc.linkType]; linkTypeWords _ (linkTypeLen/Basics.charsPerWord) + 1; IF linkTypeWords + wordsInBuffer > bufferSize THEN RETURN[0, TRUE]; FOR i: INT IN [0..linkTypeWords) DO fourChars: PACKED ARRAY [0..Basics.charsPerWord) OF CHAR; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF nowCharNum >= linkTypeLen THEN fourChars[charNo] _ 0C ELSE fourChars[charNo] _ Rope.Fetch[doc.linkType, nowCharNum]; nowCharNum _ nowCharNum + 1; ENDLOOP; TRUSTED{buffer[wordsInBuffer] _ LOOPHOLE[fourChars];}; wordsInBuffer _ wordsInBuffer + 1; ENDLOOP; }; FOR parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, parsedDocList.rest UNTIL parsedDocList = NIL DO indexNameWords: CARD; segmentOffset: CARD _ 0; docPart: POINTER TO YggDIDMapPrivate.DocPart; IF wordsInBuffer + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]] > bufferSize THEN RETURN [0, TRUE]; TRUSTED { docPart _ LOOPHOLE[@buffer[wordsInBuffer]]; docPart.compression _ YggDIDMapPrivate.notCompressed; docPart.keepingModifications _ FALSE; docPart.partType _ parsedDocList.first.partType; docPart.extraFlags _ 0; docPart.slotNo _ parsedDocList.first.slotNo; docPart.indexName _ NIL; docPart.versionIdentifierString _ NIL; }; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ parsedDocList.first.segments, segments.rest UNTIL segments = NIL DO runIndex: CARD _ 0; segIndex: POINTER TO YggDIDMapPrivate.SegmentRep; IF wordsInBuffer + segmentOffset + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]] > bufferSize THEN RETURN [0, TRUE]; TRUSTED { segIndex _ LOOPHOLE[@docPart.segments + segmentOffset]; segIndex.firstPage _ segments.first.firstPage; segIndex.numberOfBytes _ segments.first.numberOfBytes; segIndex.modificationDeltasOffset _ 0; segIndex.lastUsedTime _ BasicTime.Now[]; segIndex.backUpTime _ BasicTime.nullGMT; segIndex.backUpID _ 0; }; FOR rl: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, rl.rest UNTIL rl = NIL DO IF wordsInBuffer + segmentOffset + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]] > bufferSize THEN RETURN [0, TRUE]; TRUSTED {segIndex.runs[runIndex] _ [segmentId: rl.first.segmentId, segmentPage: rl.first.segmentPage, pages: rl.first.pages]; }; runIndex _ runIndex + 1; ENDLOOP; TRUSTED {segIndex.length _ runIndex; }; segmentOffset _ segmentOffset + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]]; ENDLOOP; TRUSTED { docPart.docPartSize _ WORDS[YggDIDMapPrivate.DocPart] - WORDS[CARD] + segmentOffset + indexNameWords; wordsInBuffer _ wordsInBuffer + docPart.docPartSize; }; IF parsedDocList.first.indexName # NIL THEN { pName: Rope.Text; pName _ Atom.GetPName[parsedDocList.first.indexName]; IF wordsInBuffer + WORDS[YggDIDMapPrivate.TextRep[Rope.Length[pName]]] > bufferSize THEN RETURN [0, TRUE]; TRUSTED { [docPart.indexName, indexNameWords] _ AppendText[dPP: docPart, text: pName, offset: docPart.docPartSize]; docPart.docPartSize _ docPart.docPartSize + indexNameWords; }; wordsInBuffer _ wordsInBuffer + indexNameWords; }; ENDLOOP; RETURN[wordsInBuffer: wordsInBuffer, didNotFit: FALSE]; }; <> BtreeCompare: UNSAFE PROC [key: BTree.Key, entry: BTree.Entry] RETURNS [comp: Basics.Comparison] = UNCHECKED { keyRef: REF KeyObject = NARROW [key]; entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[entry]; entryDID: POINTER TO DIDRep = @entryPtr.did; SELECT YggDID.CompareDIDs[keyRef.did, entryDID] FROM less => RETURN[less]; greater => RETURN[greater]; ENDCASE => SELECT keyRef.extensionCount FROM < entryPtr.extensionCount => RETURN [less]; > entryPtr.extensionCount => RETURN [greater]; ENDCASE => RETURN[equal]; }; BtreeEntrySize: UNSAFE PROC [entry: BTree.Entry] RETURNS [words: BTree.EntSize] = UNCHECKED { RETURN [ MIN[LOOPHOLE[entry, YggDIDMapPrivate.EntryPtr].size, BTree.PageSize.LAST] ]; }; <> StartUpBTree: PROC [btreeInfo: REF] RETURNS [tree: BTree.Tree] = { tree _ BTree.New [ repPrim: [compare: BtreeCompare, entrySize: BtreeEntrySize], storPrim: [referencePage: BTreeReferencePage, releasePage: BTreeReleasePage], minEntrySize: WORDS[YggDIDMapPrivate.Entry] + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]], initialState: suspended ]; BTree.Open[ tree: tree, storage: btreeInfo, pageSize: BtreePageSizeInWords, initialize: FALSE, maintainRecomputableState: TRUE ]; }; debugReferencePageNumber: INT _ 0; debugReferencePageNumberCount: INT _ 0; BTreeReferencePage: PUBLIC BTree.ReferencePage = { <> addVMPageSet: ENTRY PROC = { treeEntry.currentVMPageSets _ CONS[vMPageSet, treeEntry.currentVMPageSets]; }; debugReferencePageNumberCount: INT _ 0; treeEntry: TreeEntry; vMPageSet: YggBuffMan.VMPageSet; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; treeEntry _ NARROW[storage]; IF type = new THEN { IF YggFile.Info[treeEntry.file, treeEntry.tid].size < (number+2)*FilePagesPerBtreePage THEN YggFile.SetSize[treeEntry.file, (number+25)*FilePagesPerBtreePage, treeEntry.tid]; vMPageSet _ YggBuffMan.UsePages[fileHandle: treeEntry.file, tid: treeEntry.tid, pageRun: [firstPage: number+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; } ELSE { vMPageSet _ YggBuffMan.ReadPages[fileHandle: treeEntry.file, tid: treeEntry.tid, pageRun: [firstPage: number*FilePagesPerBtreePage+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; }; IF vMPageSet = NIL OR vMPageSet.rest # NIL THEN ERROR; addVMPageSet[]; RETURN[vMPageSet.first.buffer]; }; BTreeReleasePage: PUBLIC BTree.ReleasePage = { <> removeVMPageSet: ENTRY PROC = { prevlovmps: LIST OF YggBuffMan.VMPageSet _ NIL; FOR lovmps: LIST OF YggBuffMan.VMPageSet _ treeEntry.currentVMPageSets, lovmps.rest UNTIL lovmps = NIL DO IF lovmps.first.first.pageRun.firstPage = number*FilePagesPerBtreePage+FilePagesPerBtreePage THEN { IF prevlovmps = NIL THEN treeEntry.currentVMPageSets _ lovmps.rest ELSE prevlovmps.rest _ lovmps.rest; EXIT; }; prevlovmps _ lovmps; ENDLOOP; }; debugReferencePageNumberCount: INT _ 0; treeEntry: TreeEntry; releaseState: YggBuffMan.ReleaseState; vMPageSet: YggBuffMan.VMPageSet; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; treeEntry _ NARROW[storage]; SELECT update FROM startOfUpdate => { releaseState _ writeBatchedNoWait; }; endOfUpdate => { releaseState _ writeBatchedNoWait; }; unchanged => { releaseState _ clean; }; ENDCASE; YggBuffMan.ReleaseVMPageSet[vMPageSet: vMPageSet, releaseState: releaseState, keep: TRUE]; }; <> noteUseInTransaction: ENTRY PROC [doc: Document, tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scratchTIDObj.btid _ tid.top; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scratchTIDObj]; IF data = NIL THEN { dt: DocTrans _ NEW[DocTransRep]; dt.btidForDoc _ NEW[TIDObjRep _ [tid.top]]; dt.docs _ LIST[doc]; RedBlackTree.Insert[transactionToModifiedDocumentMap, dt, scratchTIDObj]; } ELSE { dt: DocTrans; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO IF YggDID.EqualDIDs[docList.first.did, doc.did] THEN RETURN; ENDLOOP; dt.docs _ CONS[doc, dt.docs]; }; }; <<>> GetKeyProc: RedBlackTree.GetKey = { <> docTrans: DocTrans _ NARROW[data]; RETURN[ docTrans.btidForDoc ]; }; CompareProc: RedBlackTree.Compare = { <> fileTransData: DocTrans _ NARROW[data]; keyData: TIDObj _ NARROW[k]; SELECT keyData.highTicker FROM > fileTransData.btidForDoc.highTicker => RETURN [greater]; < fileTransData.btidForDoc.highTicker => RETURN [less]; ENDCASE => { SELECT keyData.lowTicker FROM > fileTransData.btidForDoc.lowTicker => RETURN [greater]; < fileTransData.btidForDoc.lowTicker => RETURN [less]; ENDCASE => RETURN [equal]; }; }; stockTIDObj: TIDObj _ NIL; CheckoutTIDObj: ENTRY PROC [btid: Camelot.btidT] RETURNS [k: TIDObj] = { IF stockTIDObj = NIL THEN k _ NEW [TIDObjRep] ELSE { k _ stockTIDObj; stockTIDObj _ NIL }; k.btid _ btid; }; RecycleTIDObj: ENTRY PROC [k: TIDObj] = { stockTIDObj _ k; }; <> <> GetNext: PUBLIC --ENTRY-- PROCEDURE[doc: Document] RETURNS [nextDocument: Document] = BEGIN -- errors defined in FileMap: none. ENABLE UNWIND => NULL; RETURN[EnumerateNext[doc]]; END; <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <<[hashDocument: HashDocument];>> <> <> <> <> <> <> <> <> <> HashDocument: TYPE = Document; Key: TYPE = YggDID.DID; ClientHashInit: INTERNAL PROCEDURE[numHashSlotsDesired: NAT] RETURNS [numHashSlotsAllowed: NAT] = BEGIN divisor, quotient, remainder: CARDINAL; DO divisor _ 2; DO <<[quotient, remainder] _ Basics.DivMod[numHashSlotsDesired, divisor];>> quotient _ numHashSlotsDesired / divisor; remainder _ numHashSlotsDesired MOD divisor; IF remainder = 0 THEN EXIT; -- this number is not prime. IF quotient < divisor THEN RETURN[numHashSlotsDesired]; -- prime. divisor _ divisor + 1; ENDLOOP; numHashSlotsDesired _ numHashSlotsDesired + 1; ENDLOOP; END; ClientHash: INTERNAL PROCEDURE[hashDocument: HashDocument] RETURNS [index: NAT--[0..numHashSlots)--] = INLINE { index _ YggDID.HashDID[hashDocument.did] MOD numHashSlots; }; ClientEqualKeys: INTERNAL PROCEDURE[hashDocument1, hashDocument2: HashDocument] RETURNS [equal: BOOLEAN] = INLINE { RETURN[YggDID.EqualDIDs[hashDocument1.did, hashDocument2.did]]; }; <> <> hashSlots: REF HashSlots _ NIL; HashSlots: TYPE = RECORD[SEQUENCE nSlots: NAT OF HashDocument]; numHashSlots: NAT _ 0; -- boy, will they be sorry if they don't init this package. lookupHashDocument: HashDocument _ NIL; -- for the "package's" use only. <> HashPkgCallerProgrammingError: ERROR = CODE; -- various fatal conditions. HashPkgDuplicateKey: ERROR = CODE; -- from Insert. InitializeHashTable: INTERNAL PROCEDURE[numHashSlotsDesired: NAT, hashDocument: HashDocument] = BEGIN -- errors: HashPkgCallerProgrammingError (numHashSlotsDesired = 0). numHashSlots _ ClientHashInit[numHashSlotsDesired]; IF numHashSlots = 0 THEN ERROR HashPkgCallerProgrammingError; lookupHashDocument _ hashDocument; hashSlots _ NEW[HashSlots[numHashSlots]]; FOR index: NAT IN [0..numHashSlots) DO hashSlots[index] _ NIL; ENDLOOP; END; Insert: ENTRY PROC [hashDocument: HashDocument] = { <> index: NAT _ ClientHash[hashDocument]; FOR newHashDocument: HashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] THEN ERROR HashPkgDuplicateKey; ENDLOOP; hashDocument.next _ hashSlots[index]; hashSlots[index] _ hashDocument; SafeStorage.EnableFinalization[hashDocument]; }; LookupInCache: ENTRY PROC [key: Key] RETURNS [hashDocument: HashDocument] = { <> lookupHashDocument.did _ key; FOR hashDocument _ hashSlots[ClientHash[lookupHashDocument]], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, lookupHashDocument] THEN RETURN; ENDLOOP; RETURN[NIL]; }; Delete: ENTRY PROC [hashDocument: HashDocument] = { <> index: NAT _ ClientHash[hashDocument]; prevHashDocument: HashDocument _ NIL; FOR newHashDocument: HashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] THEN EXIT; prevHashDocument _ newHashDocument; REPEAT FINISHED => ERROR HashPkgCallerProgrammingError; ENDLOOP; IF prevHashDocument = NIL THEN hashSlots[index] _ hashDocument.next ELSE prevHashDocument.next _ hashDocument.next; }; <> EnumerateNext: ENTRY PROCEDURE[prevHashDocument: HashDocument] RETURNS [hashDocument: HashDocument] = { <> index: NAT; IF prevHashDocument = NIL THEN index _ 0 ELSE { index _ ClientHash[prevHashDocument]; FOR hashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, prevHashDocument] THEN GOTO found; REPEAT found => { IF hashDocument.next # NIL THEN RETURN[hashDocument.next]; index _ index + 1; }; ENDLOOP; }; UNTIL index >= numHashSlots DO IF hashSlots[index] # NIL THEN RETURN[hashSlots[index]]; index _ index + 1; ENDLOOP; RETURN[NIL]; }; EnumerateWithProc: INTERNAL PROCEDURE[proc: PROCEDURE[hashDocument: HashDocument] RETURNS[stop: BOOLEAN]] = { <> FOR index: NAT IN [0..numHashSlots) DO FOR hashDocument: HashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF proc[hashDocument] THEN RETURN; ENDLOOP; ENDLOOP; }; <> <> FinalizationProcess: PROCEDURE = { FinalizeEntry: ENTRY PROCEDURE [docm: Document] = { IF SafeStorage.IsFinalizationEnabled[docm] THEN RETURN; Delete[docm]; docm _ NIL; }; DO doc: Document; FinalizeEntry[doc _ NARROW[SafeStorage.FQNext[finalizationQ]]]; doc _ NIL; ENDLOOP; }; <> Initialize: PUBLIC ENTRY PROCEDURE[numHashSlotsDesired: NAT, fQLength: CARDINAL] = { IF fQLength = 0 THEN RETURN WITH ERROR ZeroIllegal; finalizationQ _ SafeStorage.NewFQ[fQLength]; -- do this before SafeStorage.EstablishFinalization[CODE[DocumentRep], 1, finalizationQ]; -- allocating any FileObjects. TRUSTED BEGIN Process.Detach[FORK FinalizationProcess[]]; END; secondLookupHashDocument _ NEW[DocumentRep _ [did: NIL, componentFiles: NIL, interlock: FALSE, next: NIL]]; InitializeHashTable[numHashSlotsDesired, secondLookupHashDocument]; <> TRUSTED {Process.InitializeCondition[LOOPHOLE[@aShortTime], Process.MsecToTicks[50]]; }; transactionToModifiedDocumentMap _ RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc]; }; Initialize[129, 250]; END.