<> <> <> <> DIRECTORY Atom USING [GetPName, MakeAtom], Basics USING [BITAND, charsPerWord, Comparison], BasicTime USING [Now, nullGMT], ExtendibleHash USING [DeleteEntry, Entry, ExtendibleHashTable, New, Open, PageNumber, ReadEntry, ReferencePage, ReleasePage, UpdateEntry], Camelot USING [btidT, DSLogNewValue, DSPinObject, -- ErSuccess, -- optrT, -- TABegin, TAEnd, -- tidT], Mach USING [kernReturnT, -- KernSuccess, -- pointerT], Process USING [Detach, InitializeCondition, MsecToTicks, Pause, Ticks], PBasics USING [BITAND, ByteBlt, bytesPerWord, charsPerWord, Move], RedBlackTree USING [Compare, Create, Delete, GetKey, Insert, Lookup, Table, UserData], RefText USING [New, TrustTextAsRope], Rope USING [Equal, Fetch, Length, NewText, ROPE, Substr, Text], SafeStorage USING [CantEstablishFinalization, EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, IsFinalizationEnabled, NewFQ], YggBuffMan USING [ReadPages, ReleaseState, ReleaseVMPageSet, VMPageSet, WritePages], YggDID USING [CompareDIDs, DID, EqualDIDs, HashDID, ValidateDID], YggDIDMap, YggDIDMapPrivate USING [DocItem, DocPart, DocPartPtr, Document, DocumentRep, Entry, EntryPtr, notCompressed, PartType, RunItem, SegmentItem, SegmentRep, TextRep, TextRP, Word32OverheadPerEntry], YggDIDPrivate USING [DIDForRootDIDs, DIDRep], YggEnvironment USING [dsPort, nullDID, nullTransID, Outcome, -- taPort, -- TransID], YggFile USING [BytesForPages, FileHandle, GetMaxDIDAddress, Info, Open, OpenDIDMapFile, PageCount, SetSize], YggFixedNames USING [IndexPrefix, IndexPrefixSize], YggInternal USING [Document, FileHandle], YggNav USING [GetProperty, SetContents, SetProperty], YggRep USING [Attribute, did, TypedPrimitiveElement], YggTransaction USING [CreateTrans, Finish]; YggDIDMapImpl: CEDAR MONITOR IMPORTS Atom, Basics, BasicTime, Camelot, ExtendibleHash, PBasics, Process, RedBlackTree, RefText, Rope, SafeStorage, YggBuffMan, YggDID, YggDIDPrivate, YggEnvironment, YggFile, YggFixedNames, YggNav, YggTransaction EXPORTS YggInternal, YggDIDMap, YggDID = BEGIN <> Document: TYPE = REF DocumentRep; DocumentRep: PUBLIC TYPE = YggDIDMapPrivate.DocumentRep; DID: PUBLIC TYPE ~ REF DIDRep; DIDRep: PUBLIC TYPE ~ YggDIDPrivate.DIDRep; HashPageSizeInWords: INT = 1024; MaxWordsPerEntry: INT = HashPageSizeInWords/3 - 5; FilePagesPerBtreePage: INT = 1; Hash64Table: ARRAY [0..64) OF CARD; Lock64Table: ARRAY [0..64) OF Lock64Item; Lock64Item: TYPE = RECORD[ interlock: BOOL _ FALSE, tid: Camelot.tidT ]; TheDIDMap: ExtendibleHash.ExtendibleHashTable; FileForDIDMap: YggFile.FileHandle; CurrentVMPageSetsForDIDMap: LIST OF YggBuffMan.VMPageSet _ NIL; NextDID: DIDRep; LastDIDAllocateable: DIDRep; MaxDIDOptr: Camelot.optrT; AddressForMaxDID: Mach.pointerT; RecoverableMaxDID: LONG POINTER TO DIDRep; LastDidLow: CARD _ 1000000000; NumberOfDIDsToGrap: CARD _ 200; ThresholdToGrabDIDs: CARD _ 20; KeyObject: TYPE = RECORD [did: YggDID.DID, extensionCount: INT32]; EntryDataBuffer: TYPE = RECORD[SEQUENCE COMPUTED CARD16 OF EntryWord]; EntryWord: TYPE = MACHINE DEPENDENT RECORD[ SELECT OVERLAID * FROM card => [card: CARD32], int => [int: INT32], pair => [hi, lo: CARD16], bytes => [hh, hl, lh, ll: BYTE], bits => [bits: PACKED ARRAY [0..32) OF BOOL], ENDCASE ]; updateAction: TYPE = {create, delete, newValue}; updates: TYPE = RECORD[ did: YggDID.DID, tid: Camelot.tidT, action: updateAction, doc: Document ]; TIDObj: TYPE = REF TIDObjRep; TIDObjRep: TYPE = RECORD [ btid: Camelot.btidT]; scratchTIDObj: TIDObj _ NEW[TIDObjRep]; DocTrans: TYPE = REF DocTransRep; DocTransRep: TYPE = RECORD [ btidForDoc: TIDObj, docs: LIST OF Document _ NIL, bucketsToUnlatch: LIST OF CARD _ NIL ]; transactionToModifiedDocumentMap: RedBlackTree.Table; finalizationQ: SafeStorage.FinalizationQueue; ZeroIllegal: --CALLING-- ERROR = CODE; secondLookupHashDocument: HashDocument _ NIL; -- wierdness to protect object from finalization. WORDSInWords: CARD _ 0; <> MarkInterestInDID: PUBLIC PROC [did: YggDID.DID] ~ { <> [] _ LookupInCache[did, FALSE]; }; OpenDocumentFromDID: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, destroyedOK: BOOL] RETURNS [doc: YggInternal.Document _ NIL] = { <> tryCount: CARD _ 0; DO tryAgain: BOOL _ FALSE; IF (doc _ LookupInCache[did, destroyedOK]) = NIL THEN { found: BOOL _ FALSE; gottaBeInCache: BOOL _ FALSE; doc _ NEW[DocumentRep _ [did: did, componentFiles: NIL, interlock: TRUE, next: NIL]]; [found, gottaBeInCache] _ LookupInMap[did, tid, doc]; IF gottaBeInCache THEN { doc _ LookupInCache[did, destroyedOK]; IF doc = NIL THEN ERROR; } ELSE { IF ~found THEN RETURN [NIL]; Insert[doc ! HashPkgDuplicateKey => { tryAgain _ TRUE; CONTINUE; }; ]; <> IF ~tryAgain THEN openFilesFromParsedDocList[doc, did, tid]; }; } ELSE { -- found in cache waitForInterlockDone: ENTRY PROC ~ { ENABLE UNWIND => {}; WHILE doc.interlock DO WAIT aShortTime ENDLOOP; }; IF doc.interlock THEN waitForInterlockDone[]; }; IF ~tryAgain THEN { releaseDoc[doc]; EXIT; }; tryCount _ tryCount + 1; IF tryCount > 25 THEN ERROR; ENDLOOP; }; GetDID: PUBLIC PROC [doc: Document] RETURNS [did: YggDID.DID] ~ { <> RETURN[doc.did]; }; GetLinkInfo: PUBLIC PROC [doc: Document] RETURNS [fromDID, toDID: YggDID.DID, linkType: Rope.ROPE] ~ { <> RETURN[doc.fromDID, doc.toDID, doc.linkType]; }; SwapLinkInfo: PUBLIC PROC [doc: Document, newFromDID, newToDID: YggDID.DID, newLinkType: Rope.ROPE] RETURNS [oldFromDID, oldToDID: YggDID.DID, oldLinkType: Rope.ROPE] ~ { <> oldFromDID _ doc.fromDID; doc.fromDID _ newFromDID; oldToDID _ doc.toDID; doc.toDID _ newToDID; oldLinkType _ doc.linkType; doc.linkType _ newLinkType; }; <<>> GetComponentFiles: PUBLIC PROC [doc: Document] RETURNS [componentFiles: LIST OF YggInternal.FileHandle] ~ { <> RETURN[doc.componentFiles]; }; AddComponentFile: PUBLIC ENTRY PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO lastCF _ files; ENDLOOP; IF lastCF = NIL THEN doc.componentFiles _ CONS[componentFile, NIL] ELSE lastCF.rest _ CONS[componentFile, NIL]; }; RemoveComponentFile: PUBLIC PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO IF files.first = componentFile THEN { IF lastCF = NIL THEN doc.componentFiles _ files.rest ELSE lastCF.rest _ files.rest; EXIT; }; lastCF _ files; ENDLOOP; }; <<>> GetName: PUBLIC PROC [doc: Document] RETURNS [Rope.ROPE] ~ { <> RETURN[doc.name]; }; <<>> CreateNewDID: PUBLIC PROC [tid: Camelot.tidT] RETURNS [did: YggDID.DID ] ~ { doc: YggInternal.Document _ NIL; did _ GetNextDID[]; doc _ NEW[DocumentRep _ [did: did, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => ERROR]; noteUseInTransaction[doc, tid]; }; CreateExplicitDID: PUBLIC PROC [tid: Camelot.tidT, did: YggDID.DID] RETURNS [ok: BOOL _ TRUE ] ~ { doc: YggInternal.Document _ NIL; IF YggDID.ValidateDID[did] THEN RETURN [FALSE]; doc _ NEW[DocumentRep _ [did: did, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => {ok _ FALSE; CONTINUE}]; IF ok THEN noteUseInTransaction[doc, tid]; }; DestroyDID: PUBLIC PROC [tid: Camelot.tidT, did: YggDID.DID] ~ { doc: YggInternal.Document _ NIL; doc _ OpenDocumentFromDID[did: did, tid: tid, destroyedOK: FALSE]; doc.destroyed _ TRUE; }; GetRootDIDs: PUBLIC PROC RETURNS [dids: LIST OF YggDID.DID _ NIL] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: YggEnvironment.nullTransID, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; IF propertyExists THEN { lastDIDs: LIST OF YggDID.DID _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF dids = NIL THEN {dids _ LIST[didInList]; lastDIDs _ dids} ELSE {lastDIDs.rest _ LIST[didInList]; lastDIDs _ lastDIDs.rest;}; ENDLOOP; }; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; }; RootDIDLatch: BOOL _ FALSE; LatchRootDID: ENTRY PROC ~ { ENABLE UNWIND => {}; WHILE RootDIDLatch DO WAIT aShortTime ENDLOOP; RootDIDLatch _ TRUE; }; UnlatchRootDID: ENTRY PROC ~ { RootDIDLatch _ FALSE; }; AddRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; alreadyRoot: BOOL _ FALSE; newProperty: YggRep.Attribute; LatchRootDID[]; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; newProperty _ [attributeName: "$rootDIDs", ordered: TRUE, value: LIST[[fieldName: "", valueSet: LIST [[docType: YggRep.did, bits: did]]]]]; IF propertyExists THEN { lastDIDs: LIST OF YggRep.TypedPrimitiveElement _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF YggDID.EqualDIDs[did, didInList] THEN {alreadyRoot _ TRUE; EXIT}; lastDIDs _ lotpe; ENDLOOP; IF ~alreadyRoot THEN lastDIDs.rest _ LIST[[docType: YggRep.did, bits: did]]; } ELSE property _ LIST[newProperty]; IF ~alreadyRoot THEN YggNav.SetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs", property: property.first, appendProperty: FALSE]; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; UnlatchRootDID[]; }; RemoveRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; isARoot: BOOL _ FALSE; newProperty: YggRep.Attribute; LatchRootDID[]; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; newProperty _ [attributeName: "$rootDIDs", ordered: TRUE, value: LIST[[fieldName: "", valueSet: LIST [[docType: YggRep.did, bits: did]]]]]; IF propertyExists THEN { prevDIDs: LIST OF YggRep.TypedPrimitiveElement _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF YggDID.EqualDIDs[did, didInList] THEN { isARoot_ TRUE; IF prevDIDs = NIL THEN property.first.value.first.valueSet _ lotpe.rest ELSE prevDIDs.rest _ lotpe.rest; EXIT; }; prevDIDs _ lotpe; ENDLOOP; }; IF isARoot THEN YggNav.SetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs", property: property.first, appendProperty: FALSE]; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; UnlatchRootDID[]; RETURN[isARoot]; }; <> aShortTime: CONDITION; grapDoc: ENTRY PROC [doc: YggInternal.Document] ~ { ENABLE UNWIND => {}; WHILE doc.interlock DO WAIT aShortTime ENDLOOP; doc.interlock _ TRUE; }; releaseDoc: ENTRY PROC [doc: YggInternal.Document] ~ {doc.interlock _ FALSE}; RemoveRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, runList: YggDIDMap.RunList] ~ { <> <<>> <> <<>> doc: YggInternal.Document _ NIL; IF (doc _ LookupInCache[did, TRUE]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR rl: YggDIDMap.RunList _ runList, rl.rest UNTIL rl = NIL DO pdl: LIST OF YggDIDMapPrivate.DocItem; prevPdl: LIST OF YggDIDMapPrivate.DocItem _ NIL; runToDelete: YggDIDMap.Run _ rl.first; FOR pdl _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO segments: LIST OF YggDIDMapPrivate.SegmentItem; prevSegments: LIST OF YggDIDMapPrivate.SegmentItem; FOR segments _ pdl.first.segments, segments.rest UNTIL segments = NIL DO runs: LIST OF YggDIDMapPrivate.RunItem; prevRuns: LIST OF YggDIDMapPrivate.RunItem; FOR runs _ segments.first.runs, runs.rest UNTIL runs = NIL DO firstIntersection: INT _ -1; lastIntersection: INT _ -1; runLast: CARD _ 0; runToDeleteLast: CARD _ 0; IF runs.first.segmentId # runToDelete.segmentId THEN {-- wrong segment prevRuns _ runs; LOOP; }; firstIntersection _ MAX[runs.first.segmentPage, runToDelete.segmentPage]; lastIntersection _ MIN[ (runLast _ runs.first.segmentPage + runs.first.pages - 1), (runToDeleteLast _ runToDelete.segmentPage + runToDelete.pages - 1) ]; IF lastIntersection < firstIntersection THEN LOOP; -- no intersection IF firstIntersection = INT[runs.first.segmentPage] AND lastIntersection = INT[runLast] THEN { -- run is eliminated IF prevRuns = NIL THEN segments.first.runs _ runs.rest ELSE prevRuns.rest _ runs.rest; LOOP; } ELSE { runs.first.segmentPage _ firstIntersection; runs.first.pages _ lastIntersection - firstIntersection + 1; }; prevRuns _ runs; ENDLOOP; IF segments.first.runs = NIL THEN { -- no more runs, so dump the segment too IF prevSegments = NIL THEN pdl.first.segments _ segments.rest ELSE prevSegments.rest _ segments.rest; LOOP; }; prevSegments _ segments; ENDLOOP; IF pdl.first.segments = NIL THEN { -- no more segments, so dump the DocItem too IF prevPdl = NIL THEN doc.parsedDocList _ pdl.rest ELSE prevPdl.rest _ pdl.rest; LOOP; }; prevPdl _ pdl; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; AddRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, runList: YggDIDMap.RunList] ~ { <> doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { lastPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; thisPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { thisPDL _ pdl; EXIT; }; lastPDL _ pdl; REPEAT FINISHED => { thisPDL _ LIST[[partType, fileUse, 0, NIL]]; IF lastPDL = NIL THEN doc.parsedDocList _ thisPDL ELSE lastPDL.rest _ thisPDL; }; ENDLOOP; -- pdl IF thisPDL = NIL THEN ERROR; FOR runs: YggDIDMap.RunList _ runList, runs.rest UNTIL runs = NIL DO lastSegment: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ thisPDL.first.segments, segs.rest UNTIL segs = NIL DO lastRun: LIST OF YggDIDMapPrivate.RunItem _ NIL; runPages: YggFile.PageCount _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; lastRun _ lori; ENDLOOP; IF INT[segs.first.firstPage] + runPages = runs.first.firstPage THEN { -- append at end of segment lastRun.rest _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages], NIL]; EXIT; -- appended at end of segment; go do the next run }; lastSegment _ segs; REPEAT FINISHED => { -- make a new segment with a singleton run newItem: LIST OF YggDIDMapPrivate.SegmentItem; newItem _ LIST[[firstPage: runs.first.firstPage, numberOfBytes: YggFile.BytesForPages[runs.first.pages], runs: LIST[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages]]]]; IF lastSegment = NIL THEN thisPDL.first.segments _ newItem ELSE lastSegment.rest _ newItem; }; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; SetByteSize: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, bytes: INT] ~ { <> doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segs.rest UNTIL segs = NIL DO firstByte: CARD; firstByte _ YggFile.BytesForPages[segs.first.firstPage]; IF INT[firstByte] > bytes THEN segs.first.numberOfBytes _ 0 ELSE { runPages: YggFile.PageCount _ 0; runBytes: CARD _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; ENDLOOP; runBytes _ YggFile.BytesForPages[runPages]; segs.first.numberOfBytes _ IF INT[firstByte + runBytes] > bytes THEN bytes - firstByte ELSE runBytes; }; ENDLOOP; EXIT; }; REPEAT FINISHED => { IF bytes > 0 THEN ERROR; }; ENDLOOP; -- pdl releaseDoc[doc]; }; }; <> Hash64DID: PROC [did: DID] RETURNS [bucket: CARD] ~ { RETURN [Hash64Table[Basics.BITAND[03Fh, Hash64[did.didLow]]]]; }; Hash64: PROC [c: CARD] RETURNS [bucket: CARD] ~ { RETURN [Hash64Table[PBasics.BITAND[03Fh, c]]]; }; mapFileUseToPartType: PROC [fileUse: ATOM] RETURNS [partType: YggDIDMapPrivate.PartType] ~ { SELECT fileUse FROM $root => partType _ root; $contents => partType _ contents; $outlinks => partType _ outlinks; $attributes => partType _ attributes; $directoryContents => partType _ directoryContents; ENDCASE => { pName: Rope.Text; pName _ Atom.GetPName[fileUse]; IF Rope.Equal[YggFixedNames.IndexPrefix, Rope.Substr[pName, 0, YggFixedNames.IndexPrefixSize]] THEN { partType _ index; } ELSE ERROR; }; }; <> PreCommit: PUBLIC PROC [tid: Camelot.tidT] ~ { <> <<>> <> <<>> <> <<>> <> <<>> data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; refTid: REF Camelot.tidT _ NEW [Camelot.tidT _ tid]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; IF data # NIL THEN { dt: DocTrans; max64Done: INT _ -1; dt _ NARROW[data]; DO minBucket: CARD _ 65; numberInMinBucket: CARD _ 0; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents to find the next 64 value to do hashedDidTo64: CARD; hashedDidTo64 _ Hash64DID[docList.first.did]; IF hashedDidTo64 = minBucket THEN numberInMinBucket _ numberInMinBucket + 1; IF INT[hashedDidTo64] > max64Done AND hashedDidTo64 < minBucket THEN { minBucket _ hashedDidTo64; numberInMinBucket _ 1; }; ENDLOOP; IF minBucket = 65 THEN EXIT; IF INT[minBucket] = max64Done THEN EXIT; LockBucket64[minBucket, tid]; dt.bucketsToUnlatch _ CONS[minBucket, dt.bucketsToUnlatch]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and process those with the correct hash value hashedDidTo64: CARD; hashedDidTo64 _ Hash64DID[docList.first.did]; IF hashedDidTo64 = minBucket THEN { writeDocumentToMap[docList.first, refTid]; }; ENDLOOP; max64Done _ minBucket; ENDLOOP; }; }; DropLatches: PROC [buckets: LIST OF CARD, tid: Camelot.tidT] ~ { FOR nowBuck: LIST OF CARD _ buckets, nowBuck.rest UNTIL nowBuck = NIL DO UnlockBucket64[nowBuck.first, tid]; ENDLOOP; }; Commit: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and turn off the modified flag docList.first.modifiedDIDMap _ FALSE; IF docList.first.destroyed THEN { Delete[docList.first]; }; ENDLOOP; DropLatches[dt.bucketsToUnlatch, tid]; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; Abort: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; max64Done: INT _ -1; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and remove them from the cache Delete[ docList.first]; ENDLOOP; DropLatches[dt.bucketsToUnlatch, tid]; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; <> openFilesFromParsedDocList: PROC [doc: YggInternal.Document, did: YggDID.DID, tid: Camelot.tidT] = { FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO atomCode: ATOM _ NIL; byteSize: INT _ 0; theRuns: YggDIDMap.RunList _ NIL; theFile: YggFile.FileHandle; SELECT pdl.first.partType FROM root => atomCode _ $root; contents => atomCode _ $contents; outlinks => atomCode _ $outlinks; index => { atomCode _ pdl.first.indexName; }; directoryContents => atomCode _ $directoryContents; attributes => atomCode _ $attributes; ENDCASE => atomCode _ $huh; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segments.rest UNTIL segments = NIL DO nowPage: CARD _ segments.first.firstPage; byteSize _ MAX[byteSize, YggFile.BytesForPages[segments.first.firstPage] + segments.first.numberOfBytes]; FOR runs: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, runs.rest UNTIL runs = NIL DO theRuns _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, firstPage: nowPage, pages: runs.first.pages, leader: FALSE], theRuns]; nowPage _ nowPage + runs.first.pages; ENDLOOP; ENDLOOP; theFile _ YggFile.Open[runList: theRuns, byteSize: byteSize, did: did, fileUse: atomCode, verifyLeaderNow: FALSE]; doc.componentFiles _ CONS[theFile, doc.componentFiles]; ENDLOOP; }; <> AppendText: UNSAFE PROC [dPP: YggDIDMapPrivate.DocPartPtr, text: Rope.Text, offset: CARD] RETURNS [textRP: YggDIDMapPrivate.TextRP, words: CARD] = UNCHECKED { textRep: LONG POINTER TO YggDIDMapPrivate.TextRep; byteOffset: CARD16 _ offset*UNITS[CARD32]; textRP _ LOOPHOLE[byteOffset]; textRep _ @dPP[textRP]; LOOPHOLE[textRep, LONG POINTER TO CARD16]^ _ Rope.Length[text]; -- sets textRep.length [] _ PBasics.ByteBlt [ to: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[text]], 0, textRep.length ] ]; words _ (WORDS[YggDIDMapPrivate.TextRep[textRep.length]] + WORDSInWords - 1) / WORDSInWords; }; GetText: UNSAFE PROC [textRep: LONG POINTER TO YggDIDMapPrivate.TextRep, text: REF TEXT] = UNCHECKED { nBytes: CARD; text.length _ textRep.length; nBytes _ PBasics.ByteBlt [ to: [ BASE[DESCRIPTOR[text]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ] ]; }; <> didsCondition: CONDITION; GetNextDID: ENTRY PROC RETURNS [did: YggDID.DID] ~ { ENABLE UNWIND => {}; loopCount: INT _ 0; WHILE NextDID.didHigh = LastDIDAllocateable.didHigh AND NextDID.didLow = LastDIDAllocateable.didLow DO WAIT didsCondition; loopCount _ loopCount + 1; IF loopCount > 5 THEN TRUSTED {loopCount _ 0; Process.Detach[FORK GrabSomeDIDs[]];}; ENDLOOP; did _ NEW[YggDIDPrivate.DIDRep _ [NextDID.didHigh, NextDID.didLow]]; NextDID.didLow _ NextDID.didLow + 1; IF LastDIDAllocateable.didLow - NextDID.didLow <= ThresholdToGrabDIDs THEN TRUSTED {Process.Detach[FORK GrabSomeDIDs[]];}; }; GrabberCount: CARD _ 0; GrabSomeDIDs: PROC [] ~ { newLastDIDAllocateable: REF DIDRep _ NEW[DIDRep]; newHigh: BOOL _ FALSE; moreDIDsTID: Camelot.tidT; kernCode: Mach.kernReturnT; onlyOne: ENTRY PROC RETURNS [exit: BOOL _ FALSE] ~ { ENABLE UNWIND => {}; IF GrabberCount # 0 THEN RETURN[TRUE]; GrabberCount _ GrabberCount + 1; }; IF onlyOne[] THEN RETURN; newLastDIDAllocateable.didHigh _ LastDIDAllocateable.didHigh; newLastDIDAllocateable.didLow _ LastDIDAllocateable.didLow + NumberOfDIDsToGrap; IF newLastDIDAllocateable.didLow > LastDidLow THEN { newLastDIDAllocateable.didHigh _ newLastDIDAllocateable.didHigh + 1; newLastDIDAllocateable.didLow _ NumberOfDIDsToGrap; newHigh _ TRUE; }; DO outcome: YggEnvironment.Outcome; status: INT _ -1; addrNewLastDIDAllocateable: Mach.pointerT; <<[newTid: moreDIDsTID, kernCode: kernCode] _ Camelot.TABegin[taPort: YggEnvironment.taPort, parentTid: YggEnvironment.nullTransID, transType: ttNvServerBased, raiseSignal: TRUE];>> moreDIDsTID _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; kernCode _ Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: moreDIDsTID, optr: MaxDIDOptr, size: BYTES[DIDRep], raiseSignal: TRUE]; TRUSTED {addrNewLastDIDAllocateable _ LOOPHOLE[newLastDIDAllocateable, CARD32]}; kernCode _ Camelot.DSLogNewValue[dsPort: YggEnvironment.dsPort, tid: moreDIDsTID, optr: MaxDIDOptr, newValue: addrNewLastDIDAllocateable, newValueCnt: BYTES[DIDRep], raiseSignal: TRUE]; <<[status: status, kernCode: kernCode] _ Camelot.TAEnd[taPort: YggEnvironment.taPort, tid: moreDIDsTID, protocolType: ptTwoPhased, raiseSignal: TRUE];>> <> [outcome: outcome] _ YggTransaction.Finish[transID: moreDIDsTID, requestedOutcome: commit]; IF outcome = commit THEN EXIT; ENDLOOP; IF newHigh THEN { m: ENTRY PROC ~ { ENABLE UNWIND => {}; LastDIDAllocateable _ newLastDIDAllocateable^; NextDID.didHigh _ newLastDIDAllocateable.didHigh; NextDID.didLow _ 0; GrabberCount _ 0; BROADCAST didsCondition; }; m[]; } ELSE { n: ENTRY PROC ~ { LastDIDAllocateable.didLow _ newLastDIDAllocateable.didLow; BROADCAST didsCondition; GrabberCount _ 0; }; n[]; }; }; <> LookupInMap: PROC [did: YggDID.DID, tid: Camelot.tidT, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE, gottaBeInCache: BOOL _ FALSE] = { notFound: BOOL _ FALSE; extendedData: REF EntryDataBuffer _ NIL; extendedDataCount: CARD _ 0; lowExtendedToAccept: CARD _ 0; highExtendedToAccept: CARD _ 1; expectMore: BOOL _ TRUE; expectedExtendedDataSize: CARD _ 0; maxDataCount: CARD _ 0; fromLink: DID _ YggEnvironment.nullDID; toLink: DID _ YggEnvironment.nullDID; linkType: Rope.ROPE _ NIL; <> lookupProc: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] ~ TRUSTED { nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount >= lowExtendedToAccept AND entryPtr.extensionCount <= highExtendedToAccept THEN { found _ TRUE; SELECT entryPtr.extensionCount FROM 0 => { -- normal, non-extended entry index: INT _ 0; expectMore _ FALSE; -- everything is in this entry IF entryPtr.link THEN { wordIndex: INT _ 0; noChars: INT _ 0; nullSeen: BOOL _ FALSE; fromLink _ NEW[DIDRep]; PBasics.Move[dst: @fromLink, src: @entryPtr.data[0], nWords: WORDS[DIDRep]/WORDSInWords]; index _ WORDS[DIDRep]/WORDSInWords; toLink _ NEW[DIDRep]; PBasics.Move[dst: @toLink, src: @entryPtr.data[index], nWords: WORDS[DIDRep]/WORDSInWords]; index _ index + index; FOR wordIndex _ index, wordIndex + 1 UNTIL nullSeen DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; LOOPHOLE[fourChars, CARD] _ entryPtr.data[index]; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; noChars _ noChars + 1; ENDLOOP; ENDLOOP; IF noChars = 0 THEN { index _ index + 1; } ELSE { linkTypeRT: REF TEXT _ NIL; linkTypeRT _ RefText.New[noChars]; nullSeen _ FALSE; UNTIL nullSeen DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; LOOPHOLE[fourChars, CARD] _ entryPtr.data[index]; index _ index + 1; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; linkTypeRT[noChars] _ fourChars[charNo]; noChars _ noChars + 1; ENDLOOP; ENDLOOP; linkTypeRT.length _ noChars; linkType _ RefText.TrustTextAsRope[linkTypeRT]; }; }; [] _ parseDocPart[@entryPtr.data[index], entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry - index, doc]; }; 1 => { -- normal, extended entries to follow expectMore _ TRUE; -- everything is in this entry expectedExtendedDataSize _ entryPtr.data[0]; extendedData _ NEW[EntryDataBuffer[expectedExtendedDataSize]]; extendedDataCount _ entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry - 1; PBasics.Move[dst: @extendedData[0], src: @entryPtr.data[1], nWords: extendedDataCount]; }; >1 => { -- extended entry IF extendedDataCount + entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry > maxDataCount THEN ERROR; PBasics.Move[dst: @extendedData[extendedDataCount], src: @entryPtr.data[0], nWords: entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry]; extendedDataCount _ extendedDataCount + entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry; }; ENDCASE => ERROR; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); REPEAT FINISHED => notFound _ TRUE; ENDLOOP; }; <> <> <> <> WHILE expectMore DO TRUSTED {ExtendibleHash.ReadEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupProc]}; IF notFound THEN EXIT; ENDLOOP; IF maxDataCount # 0 THEN TRUSTED { IF maxDataCount # extendedDataCount THEN ERROR; [] _ parseDocPart[@extendedData[0], maxDataCount, doc]; }; }; InsertIntoMap: PROC [did: DID, doc: YggInternal.Document, buffer: REF EntryDataBuffer, wordsInBuffer: INT, link: BOOL, refTid: REF Camelot.tidT] = { extensionCount: INT _ 0; foundEntry: BOOL _ TRUE; lookupProc: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToUpdate: ExtendibleHash.Entry _ NIL] ~ TRUSTED { <> nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToUpdate _ nowEntry; RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; lookupForDelete: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToDelete: ExtendibleHash.Entry _ NIL] ~ TRUSTED { <> nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; foundEntry _ FALSE; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToDelete _ nowEntry; foundEntry _ TRUE; RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; totalEntrySize: INT; totalEntrySize _ YggDIDMapPrivate.Word32OverheadPerEntry + wordsInBuffer; IF totalEntrySize <= MaxWordsPerEntry THEN { modifyProc: UNSAFE PROC [entry: ExtendibleHash.Entry] ~ TRUSTED { entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ totalEntrySize; entPtr.extensionCount _ 0; entPtr.did _ did^; entPtr.link _ link; PBasics.Move[dst: @entPtr.data[0], src: LOOPHOLE[buffer], nWords: wordsInBuffer]; }; extensionCount _ 0; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: totalEntrySize, findProc: lookupProc, modifyProc: modifyProc, updateType: insertOrReplace, clientPerCallData: refTid];}; extensionCount _ 1; } ELSE { modifyProcFirst: UNSAFE PROC [entry: ExtendibleHash.Entry] ~ TRUSTED { writeWords: INT _ MaxWordsPerEntry - YggDIDMapPrivate.Word32OverheadPerEntry - 1; entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ MaxWordsPerEntry; entPtr.extensionCount _ 1; entPtr.did _ did^; entPtr.data[0] _ wordsInBuffer; leftToWrite _ wordsInBuffer - writeWords; PBasics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer], nWords: writeWords]; writeOffset _ writeWords; }; leftToWrite: INT _ 0; writeOffset: INT _ 0; extensionCount _ 1; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: MaxWordsPerEntry, findProc: lookupProc, modifyProc: modifyProcFirst, updateType: insertOrReplace, clientPerCallData: refTid]; }; extensionCount _ 0; TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; DO modifyProcExtended: UNSAFE PROCEDURE [entry: ExtendibleHash.Entry] = UNCHECKED{ entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ writeThisTime+YggDIDMapPrivate.Word32OverheadPerEntry; entPtr.extensionCount _ extensionCount; entPtr.did _ did^; entPtr.data[0] _ writeThisTime; PBasics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer, LONG POINTER]+writeOffset, nWords: writeThisTime]; }; writeThisTime: INT _ MIN[MaxWordsPerEntry - YggDIDMapPrivate.Word32OverheadPerEntry, leftToWrite]; leftToWrite _ leftToWrite - writeThisTime; extensionCount _ extensionCount + 1; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: writeThisTime + YggDIDMapPrivate.Word32OverheadPerEntry, findProc: lookupProc, modifyProc: modifyProcExtended, updateType: insertOrReplace, clientPerCallData: refTid]; }; IF leftToWrite < 0 THEN ERROR; IF leftToWrite = 0 THEN EXIT; ENDLOOP; }; foundEntry _ TRUE; WHILE foundEntry DO -- insure that old extra extentions do not exist TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; extensionCount _ extensionCount + 1; ENDLOOP; }; lock64Condition: CONDITION; LockBucket64: ENTRY PROC [bucket: INT, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; WHILE Lock64Table[bucket].interlock DO WAIT lock64Condition ENDLOOP; Lock64Table[bucket].interlock _ TRUE; Lock64Table[bucket].tid _ tid; }; UnlockBucket64: ENTRY PROC [bucket: INT, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; IF ~Lock64Table[bucket].interlock OR Lock64Table[bucket].tid # tid THEN ERROR; Lock64Table[bucket].interlock _ FALSE; Lock64Table[bucket].tid _ YggEnvironment.nullTransID; BROADCAST lock64Condition; }; getUpdates: PROC [tid: Camelot.tidT] RETURNS [updateList: LIST OF updates] ~ { ERROR; }; parseDocPart: PROC [dataToParse: LONG POINTER, size: CARD, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE] = { <> parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; parsedDocListTail: LIST OF YggDIDMapPrivate.DocItem _ NIL; index: CARD _ 0; WHILE index < size*WORDSInWords DO TRUSTED { segIndex: LONG POINTER TO YggDIDMapPrivate.SegmentRep; docPart: LONG POINTER TO YggDIDMapPrivate.DocPart; newDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; segmentsListTail: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; startOffset: CARD _ 0; offsetToLastSegment: CARD _ 0; indexName: ATOM _ NIL; docPart _ LOOPHOLE[dataToParse + index]; IF index + docPart.docPartSize*WORDSInWords > size*WORDSInWords THEN ERROR; IF LOOPHOLE[docPart.indexName, CARD16] # 0 THEN { r: Rope.Text; docPartPtr: YggDIDMapPrivate.DocPartPtr = LOOPHOLE[docPart]; indexNamePtr: LONG POINTER TO YggDIDMapPrivate.TextRep; indexNamePtr _ @docPartPtr[docPartPtr.indexName]; IF indexNamePtr.length = 0 THEN ERROR; offsetToLastSegment _ LOOPHOLE[indexNamePtr, CARD] - LOOPHOLE[docPart, CARD]; r _ Rope.NewText[indexNamePtr.length]; GetText[indexNamePtr, LOOPHOLE[r]]; indexName _ Atom.MakeAtom[r]; } ELSE { offsetToLastSegment _ docPart.docPartSize*WORDSInWords; SELECT docPart.partType FROM root => indexName _ $root; contents => indexName _ $contents; outlinks => indexName _ $outlinks; attributes => indexName _ $attributes; ENDCASE => ERROR; }; newDocList _ LIST[[partType: docPart.partType, indexName: indexName, slotNo: docPart.slotNo, segments: NIL]]; segIndex _ LOOPHOLE[@docPart.segments]; WHILE (startOffset _ LOOPHOLE[segIndex, CARD] - LOOPHOLE[docPart, CARD]) < offsetToLastSegment DO segSize: CARD _ 0; runs: LIST OF YggDIDMapPrivate.RunItem _ NIL; runTail: LIST OF YggDIDMapPrivate.RunItem _ NIL; newSegListItem: LIST OF YggDIDMapPrivate.SegmentItem; newSegListItem _ LIST[[firstPage: segIndex.firstPage, numberOfBytes: segIndex.numberOfBytes, runs: NIL]]; IF (segSize _ (WORDS[YggDIDMapPrivate.SegmentRep[segIndex.length]]/WORDSInWords)) > (docPart.docPartSize - (startOffset/WORDSInWords)) THEN ERROR; -- too many runs FOR inx: CARDINAL IN [0..segIndex.length) DO newRunList: LIST OF YggDIDMapPrivate.RunItem _ NIL; newRunList _ LIST[[segmentId: segIndex.runs[inx].segmentId, segmentPage: segIndex.runs[inx].segmentPage, pages: segIndex.runs[inx].pages]]; IF runTail = NIL THEN runs _ runTail _ newRunList ELSE { runTail.rest _ newRunList; runTail _ newRunList; }; ENDLOOP; newSegListItem.first.runs _ runs; IF newDocList.first.segments = NIL THEN newDocList.first.segments _ segmentsListTail _ newSegListItem ELSE { segmentsListTail.rest _ newSegListItem; segmentsListTail _ newSegListItem; }; segIndex _ segIndex + segSize*WORDSInWords; ENDLOOP; IF parsedDocListTail = NIL THEN parsedDocList _ parsedDocListTail _ newDocList ELSE { parsedDocListTail.rest _ newDocList; parsedDocListTail _ newDocList; }; index _ index + docPart.docPartSize*WORDSInWords; }; ENDLOOP; doc.parsedDocList _ parsedDocList; }; stockBuffer: REF EntryDataBuffer _ NIL; CheckoutStockBuffer: ENTRY PROC RETURNS [buf: REF EntryDataBuffer] = { ENABLE UNWIND => {}; IF stockBuffer = NIL THEN buf _ NEW[EntryDataBuffer[256]] ELSE { buf _ stockBuffer; stockBuffer _ NIL }; }; RecycleStockBuffer: ENTRY PROC [buf: REF EntryDataBuffer] = { stockBuffer _ buf; }; writeDocumentToMap: PROC [ doc: Document, refTid: REF Camelot.tidT] ~ { buffer: REF EntryDataBuffer _ NIL; stockBuffer: REF EntryDataBuffer _ NIL; bufferSize: CARD _ 256; link: BOOL; wordsInBuffer: INT; IF doc.destroyed THEN { lookupForDelete: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToDelete: ExtendibleHash.Entry _ NIL] ~ TRUSTED { <> nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; foundEntry _ FALSE; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[doc.did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToDelete _ nowEntry; foundEntry _ TRUE; <> RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; extensionCount: INT _ 0; foundEntry: BOOL _ TRUE; WHILE foundEntry DO -- insure that old extra extentions do not exist foundEntry _ FALSE; TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [doc.did.didHigh, doc.did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; extensionCount _ extensionCount + 1; ENDLOOP; } ELSE { stockBuffer _ buffer _ CheckoutStockBuffer[]; DO didNotFit: BOOL _ TRUE; [wordsInBuffer: wordsInBuffer, didNotFit: didNotFit, link: link] _ writeDocToBuffer[doc, buffer, bufferSize]; IF didNotFit THEN { bufferSize _ bufferSize + bufferSize; buffer _ NEW[EntryDataBuffer[bufferSize]]; LOOP; }; EXIT; ENDLOOP; InsertIntoMap[did: doc.did, doc: doc, buffer: buffer, wordsInBuffer: wordsInBuffer, link: link, refTid: refTid]; RecycleStockBuffer[stockBuffer]; }; }; writeDocToBuffer: PROC [ doc: Document, buffer: REF EntryDataBuffer, bufferSize: INT] RETURNS [wordsInBuffer: CARD _ 0, didNotFit: BOOL _ TRUE, link: BOOL _ FALSE] ~ { <> IF doc.fromDID # YggEnvironment.nullDID THEN { linkTypeLen: INT _ 0; linkTypeWords: CARD _ 0; nowCharNum: INT _ 0; link _ TRUE; TRUSTED {PBasics.Move[dst: LOOPHOLE[@buffer[0]], src: LOOPHOLE[@(doc.fromDID^)], nWords: WORDS[DIDRep]/WORDSInWords]; }; wordsInBuffer _ WORDS[DIDRep]/WORDSInWords; TRUSTED {PBasics.Move[dst: LOOPHOLE[@buffer[wordsInBuffer]], src: LOOPHOLE[@(doc.toDID^)], nWords: WORDS[DIDRep]/WORDSInWords]; }; wordsInBuffer _ wordsInBuffer + WORDS[DIDRep]/WORDSInWords; linkTypeLen _ Rope.Length[doc.linkType]; linkTypeWords _ (linkTypeLen + PBasics.charsPerWord)/PBasics.charsPerWord; IF linkTypeWords + wordsInBuffer > CARD[bufferSize] THEN RETURN[0, TRUE]; FOR i: INT IN [0..INT[linkTypeWords]) DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF nowCharNum >= linkTypeLen THEN fourChars[charNo] _ 0C ELSE fourChars[charNo] _ Rope.Fetch[doc.linkType, nowCharNum]; nowCharNum _ nowCharNum + 1; ENDLOOP; TRUSTED{buffer[wordsInBuffer].card _ LOOPHOLE[fourChars, CARD];}; wordsInBuffer _ wordsInBuffer + 1; ENDLOOP; }; FOR parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, parsedDocList.rest UNTIL parsedDocList = NIL DO indexNameWords: CARD _ 0; segmentOffset: CARD _ 0; docPart: LONG POINTER TO YggDIDMapPrivate.DocPart; IF INT[wordsInBuffer] + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]] > INT[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { docPart _ LOOPHOLE[@buffer[wordsInBuffer]]; docPart.compression _ YggDIDMapPrivate.notCompressed; docPart.keepingModifications _ FALSE; docPart.partType _ parsedDocList.first.partType; docPart.extraFlags _ 0; docPart.slotNo _ parsedDocList.first.slotNo; docPart.indexName _ LOOPHOLE[0]; docPart.versionIdentifierString _ LOOPHOLE[0]; }; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ parsedDocList.first.segments, segments.rest UNTIL segments = NIL DO runIndex: CARD _ 0; segIndex: LONG POINTER TO YggDIDMapPrivate.SegmentRep; IF wordsInBuffer + segmentOffset + CARD[WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { segIndex _ LOOPHOLE[@docPart.segments + segmentOffset]; segIndex.firstPage _ segments.first.firstPage; segIndex.numberOfBytes _ segments.first.numberOfBytes; segIndex.modificationDeltasOffset _ 0; segIndex.lastUsedTime _ BasicTime.Now[]; segIndex.backUpTime _ BasicTime.nullGMT; segIndex.backUpID _ 0; }; FOR rl: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, rl.rest UNTIL rl = NIL DO IF wordsInBuffer + segmentOffset + CARD[WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED {segIndex.runs[runIndex] _ [segmentId: rl.first.segmentId, segmentPage: rl.first.segmentPage, pages: rl.first.pages]; }; runIndex _ runIndex + 1; ENDLOOP; TRUSTED {segIndex.length _ runIndex; }; segmentOffset _ segmentOffset + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]]/WORDSInWords; ENDLOOP; TRUSTED { docPart.docPartSize _ WORDS[YggDIDMapPrivate.DocPart]/WORDSInWords - WORDS[CARD]/WORDSInWords + segmentOffset + indexNameWords; wordsInBuffer _ wordsInBuffer + docPart.docPartSize; }; IF parsedDocList.first.indexName # NIL THEN { SELECT parsedDocList.first.partType FROM root, contents, outlinks, attributes => {}; ENDCASE => { pName: Rope.Text; pName _ Atom.GetPName[parsedDocList.first.indexName]; IF wordsInBuffer + WORDS[YggDIDMapPrivate.TextRep[Rope.Length[pName]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { [docPart.indexName, indexNameWords] _ AppendText[dPP: docPart, text: pName, offset: docPart.docPartSize]; docPart.docPartSize _ docPart.docPartSize + indexNameWords; }; wordsInBuffer _ wordsInBuffer + indexNameWords; }; }; ENDLOOP; RETURN[wordsInBuffer: wordsInBuffer, didNotFit: FALSE]; }; <> StartUpExtendibleHash: PROC [btreeInfo: REF, initialize: BOOL _ FALSE] = { TheDIDMap _ ExtendibleHash.New [ storPrim: [referencePage: HashReferencePage, releasePage: HashReleasePage], initialState: suspended ]; IF initialize THEN { outcome: YggEnvironment.Outcome _ active; doingInit _ TRUE; initTrans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; ExtendibleHash.Open[ hashTable: TheDIDMap, storage: btreeInfo, pageSize: HashPageSizeInWords, initialize: initialize, initialDirectorySize: 64 ]; doingInit _ FALSE; [outcome: outcome] _ YggTransaction.Finish[transID: initTrans, requestedOutcome: commit]; IF outcome # commit THEN ERROR; } ELSE { ExtendibleHash.Open[ hashTable: TheDIDMap, storage: btreeInfo, pageSize: HashPageSizeInWords, initialize: initialize, initialDirectorySize: 64 ]; }; }; debugReferencePageNumber: CARD _ 0; debugReferencePageNumberCount: INT _ 0; doingInit: BOOL _ FALSE; initTrans: YggEnvironment.TransID; <> <> <> <> <<];>> <> <<};>> <> <> <> <> <<};>> <> <> <> <> <<};>> HashReferencePage: PUBLIC ExtendibleHash.ReferencePage = { <> addVMPageSet: ENTRY PROC = { CurrentVMPageSetsForDIDMap _ CONS[vMPageSet, CurrentVMPageSetsForDIDMap]; }; debugReferencePageNumberCount: INT _ 0; vMPageSet: YggBuffMan.VMPageSet; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; IF type = new THEN { IF YggFile.Info[FileForDIDMap, YggEnvironment.nullTransID].size < (INT[number]+1)*FilePagesPerBtreePage THEN YggFile.SetSize[FileForDIDMap, (number+25)*FilePagesPerBtreePage, YggEnvironment.nullTransID]; vMPageSet _ YggBuffMan.ReadPages[fileHandle: FileForDIDMap, tid: YggEnvironment.nullTransID, pageRun: [firstPage: number+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; } ELSE { vMPageSet _ YggBuffMan.ReadPages[fileHandle: FileForDIDMap, tid: YggEnvironment.nullTransID, pageRun: [firstPage: number*FilePagesPerBtreePage+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; }; IF vMPageSet = NIL OR vMPageSet.rest # NIL THEN ERROR; addVMPageSet[]; RETURN[vMPageSet.first.buffer]; }; HashReleasePage: PUBLIC ExtendibleHash.ReleasePage = { <> findVMPageSet: ENTRY PROC [number: ExtendibleHash.PageNumber] RETURNS [vmPageSet: LIST OF YggBuffMan.VMPageSet] = { ENABLE UNWIND => {}; prevlovmps: LIST OF YggBuffMan.VMPageSet _ NIL; FOR lovmps: LIST OF YggBuffMan.VMPageSet _ CurrentVMPageSetsForDIDMap, lovmps.rest UNTIL lovmps = NIL DO IF lovmps.first.pageRun.segmentPage = INT[number]*FilePagesPerBtreePage+FilePagesPerBtreePage THEN { RETURN[lovmps.first.vmPageSet]; }; prevlovmps _ lovmps; ENDLOOP; ERROR; }; removeVMPageSet: ENTRY PROC = { ENABLE UNWIND => {}; prevlovmps: LIST OF YggBuffMan.VMPageSet _ NIL; FOR lovmps: LIST OF YggBuffMan.VMPageSet _ CurrentVMPageSetsForDIDMap, lovmps.rest UNTIL lovmps = NIL DO IF lovmps.first.first.pageRun.firstPage = INT[number]*FilePagesPerBtreePage+FilePagesPerBtreePage THEN { IF prevlovmps = NIL THEN CurrentVMPageSetsForDIDMap _ lovmps.rest ELSE prevlovmps.rest _ lovmps.rest; EXIT; }; prevlovmps _ lovmps; ENDLOOP; }; debugReferencePageNumberCount: INT _ 0; releaseState: YggBuffMan.ReleaseState; vMPageSet: YggBuffMan.VMPageSet _ NIL; refTid: REF Camelot.tidT _ NARROW[clientPerCallData]; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; IF refTid = NIL THEN ERROR; SELECT update FROM modified => { releaseState _ writeBatchedNoWait; }; unchanged => { releaseState _ clean; }; ENDCASE ; vMPageSet _ findVMPageSet[number]; IF doingInit THEN { YggBuffMan.WritePages[fileHandle: FileForDIDMap, tid: initTrans, to: number+FilePagesPerBtreePage, nPages: FilePagesPerBtreePage, from: vMPageSet.first.buffer]; } ELSE IF update = modified THEN { YggBuffMan.WritePages[fileHandle: FileForDIDMap, tid: refTid^, to: number+FilePagesPerBtreePage, nPages: FilePagesPerBtreePage, from: vMPageSet.first.buffer]; }; IF vMPageSet # NIL THEN { YggBuffMan.ReleaseVMPageSet[vMPageSet: vMPageSet, releaseState: releaseState, keep: TRUE]; removeVMPageSet[]; }; }; <> noteUseInTransaction: ENTRY PROC [doc: Document, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; data: RedBlackTree.UserData; scratchTIDObj.btid _ tid.top; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scratchTIDObj]; IF data = NIL THEN { dt: DocTrans _ NEW[DocTransRep]; dt.btidForDoc _ NEW[TIDObjRep _ [tid.top]]; dt.docs _ LIST[doc]; RedBlackTree.Insert[transactionToModifiedDocumentMap, dt, scratchTIDObj]; } ELSE { dt: DocTrans; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO IF YggDID.EqualDIDs[docList.first.did, doc.did] THEN RETURN; ENDLOOP; dt.docs _ CONS[doc, dt.docs]; }; }; <<>> GetKeyProc: RedBlackTree.GetKey = { <> docTrans: DocTrans _ NARROW[data]; RETURN[ docTrans.btidForDoc ]; }; CompareProc: RedBlackTree.Compare = { <> fileTransData: DocTrans _ NARROW[data]; keyData: TIDObj _ NARROW[k]; SELECT keyData.highTicker FROM > fileTransData.btidForDoc.highTicker => RETURN [greater]; < fileTransData.btidForDoc.highTicker => RETURN [less]; ENDCASE => { SELECT keyData.lowTicker FROM > fileTransData.btidForDoc.lowTicker => RETURN [greater]; < fileTransData.btidForDoc.lowTicker => RETURN [less]; ENDCASE => RETURN [equal]; }; }; stockTIDObj: TIDObj _ NIL; CheckoutTIDObj: ENTRY PROC [btid: Camelot.btidT] RETURNS [k: TIDObj] = { ENABLE UNWIND => {}; IF stockTIDObj = NIL THEN k _ NEW [TIDObjRep] ELSE { k _ stockTIDObj; stockTIDObj _ NIL }; k.btid _ btid; }; RecycleTIDObj: ENTRY PROC [k: TIDObj] = { stockTIDObj _ k; }; <> <> GetNext: PUBLIC --ENTRY-- PROCEDURE[doc: Document] RETURNS [nextDocument: Document] = BEGIN -- errors defined in FileMap: none. ENABLE UNWIND => NULL; RETURN[EnumerateNext[doc]]; END; <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <> <<[hashDocument: HashDocument];>> <> <> <> <> <> <> <> <> <> HashDocument: TYPE = Document; Key: TYPE = YggDID.DID; ClientHashInit: INTERNAL PROCEDURE[numHashSlotsDesired: NAT] RETURNS [numHashSlotsAllowed: NAT] = BEGIN divisor, quotient, remainder: CARDINAL; DO divisor _ 2; DO <<[quotient, remainder] _ Basics.DivMod[numHashSlotsDesired, divisor];>> quotient _ numHashSlotsDesired / divisor; remainder _ numHashSlotsDesired MOD divisor; IF remainder = 0 THEN EXIT; -- this number is not prime. IF quotient < divisor THEN RETURN[numHashSlotsDesired]; -- prime. divisor _ divisor + 1; ENDLOOP; numHashSlotsDesired _ numHashSlotsDesired + 1; ENDLOOP; END; ClientHash: INTERNAL PROCEDURE[hashDocument: HashDocument] RETURNS [index: NAT--[0..numHashSlots)--] = INLINE { index _ YggDID.HashDID[hashDocument.did] MOD numHashSlots; }; ClientEqualKeys: INTERNAL PROCEDURE[hashDocument1, hashDocument2: HashDocument] RETURNS [equal: BOOLEAN] = INLINE { RETURN[YggDID.EqualDIDs[hashDocument1.did, hashDocument2.did]]; }; <> <> hashSlots: REF HashSlots _ NIL; HashSlots: TYPE = RECORD[SEQUENCE nSlots: NAT OF HashDocument]; numHashSlots: NAT _ 0; -- boy, will they be sorry if they don't init this package. lookupHashDocument: HashDocument _ NIL; -- for the "package's" use only. lruListTop: Document _ NIL; lruListTail: Document _ NIL; numberOfHashDocuments: INT _ 0; desiredNumberOfHashDocuments: INT _ 500; <> HashPkgCallerProgrammingError: ERROR = CODE; -- various fatal conditions. HashPkgDuplicateKey: ERROR = CODE; -- from Insert. InitializeHashTable: INTERNAL PROCEDURE[numHashSlotsDesired: NAT, hashDocument: HashDocument] = BEGIN -- errors: HashPkgCallerProgrammingError (numHashSlotsDesired = 0). numHashSlots _ ClientHashInit[numHashSlotsDesired]; IF numHashSlots = 0 THEN ERROR HashPkgCallerProgrammingError; lookupHashDocument _ hashDocument; hashSlots _ NEW[HashSlots[numHashSlots]]; FOR index: NAT IN [0..numHashSlots) DO hashSlots[index] _ NIL; ENDLOOP; END; Insert: ENTRY PROC [hashDocument: HashDocument] = { <> ENABLE UNWIND => {}; index: NAT _ ClientHash[hashDocument]; FOR newHashDocument: HashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] AND ~hashDocument.destroyed THEN ERROR HashPkgDuplicateKey; ENDLOOP; hashDocument.next _ hashSlots[index]; hashSlots[index] _ hashDocument; SafeStorage.EnableFinalization[hashDocument]; numberOfHashDocuments _ numberOfHashDocuments + 1; }; LookupInCache: ENTRY PROC [key: Key, destroyedOK: BOOL _ FALSE] RETURNS [hashDocument: HashDocument] = { <> ENABLE UNWIND => {}; lookupHashDocument.did _ key; FOR hashDocument _ hashSlots[ClientHash[lookupHashDocument]], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, lookupHashDocument] AND (destroyedOK OR ~hashDocument.destroyed) THEN { SafeStorage.EnableFinalization[hashDocument]; IF hashDocument.lruListPrev # NIL OR hashDocument.lruListNext # NIL OR lruListTop = hashDocument THEN { <> IF hashDocument.lruListPrev = NIL THEN { lruListTop _ hashDocument.lruListNext } ELSE { hashDocument.lruListPrev.lruListNext _ hashDocument.lruListNext; }; IF lruListTop # NIL THEN lruListTop.lruListPrev _ NIL; IF hashDocument.lruListNext = NIL THEN { lruListTail _ hashDocument.lruListPrev } ELSE hashDocument.lruListNext.lruListPrev _ hashDocument.lruListPrev; IF lruListTail # NIL THEN lruListTail.lruListNext _ NIL; }; hashDocument.lruListPrev _ NIL; hashDocument.lruListNext _ NIL; RETURN; }; ENDLOOP; RETURN[NIL]; }; FlushDocument: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT] = { <> doc: YggInternal.Document _ NIL; IF (doc _ LookupInCache[did]) = NIL THEN { Delete[doc]; }; }; Delete: ENTRY PROC [hashDocument: HashDocument] = { <> InnerDelete[hashDocument]; }; InnerDelete: INTERNAL PROC [hashDocument: HashDocument] = { <> ENABLE UNWIND => {}; index: NAT _ ClientHash[hashDocument]; newHashDocument: HashDocument _ NIL; prevHashDocument: HashDocument _ NIL; FOR newHashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] THEN EXIT; prevHashDocument _ newHashDocument; REPEAT FINISHED => ERROR HashPkgCallerProgrammingError; ENDLOOP; IF SafeStorage.IsFinalizationEnabled[newHashDocument] THEN { newHashDocument.okToRemoveFromDIDMapCache _ TRUE; } ELSE { IF prevHashDocument = NIL THEN hashSlots[index] _ hashDocument.next ELSE prevHashDocument.next _ hashDocument.next; numberOfHashDocuments _ numberOfHashDocuments - 1; }; }; <> EnumerateNext: ENTRY PROCEDURE[prevHashDocument: HashDocument] RETURNS [hashDocument: HashDocument] = { <> ENABLE UNWIND => {}; index: NAT; IF prevHashDocument = NIL THEN index _ 0 ELSE { index _ ClientHash[prevHashDocument]; FOR hashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, prevHashDocument] THEN GOTO found; REPEAT found => { IF hashDocument.next # NIL THEN RETURN[hashDocument.next]; index _ index + 1; }; ENDLOOP; }; UNTIL index >= numHashSlots DO IF hashSlots[index] # NIL THEN RETURN[hashSlots[index]]; index _ index + 1; ENDLOOP; RETURN[NIL]; }; EnumerateWithProc: INTERNAL PROCEDURE[proc: PROCEDURE[hashDocument: HashDocument] RETURNS[stop: BOOLEAN]] = { <> FOR index: NAT IN [0..numHashSlots) DO FOR hashDocument: HashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF proc[hashDocument] THEN RETURN; ENDLOOP; ENDLOOP; }; <> <> FinalizationProcess: PROCEDURE = { FinalizeEntry: ENTRY PROCEDURE [docm: Document] = { ENABLE UNWIND => {}; IF SafeStorage.IsFinalizationEnabled[docm] THEN RETURN; IF lruListTop = NIL THEN { lruListTop _ docm; } ELSE { lruListTail.lruListNext _ docm; docm.lruListPrev _ lruListTail; }; lruListTail _ docm; docm _ NIL; }; DO doc: Document; doc _ NARROW[SafeStorage.FQNext[finalizationQ]]; FinalizeEntry[doc]; doc _ NIL; ENDLOOP; }; <> CacheTrimProcess: PROC = { ticksToWait: Process.Ticks; ticksToWait _ Process.MsecToTicks[197]; DO overage: INT _ 0; Process.Pause[ticksToWait]; overage _ numberOfHashDocuments - desiredNumberOfHashDocuments; IF overage > 0 THEN TrimLruList[overage]; ENDLOOP; }; TrimLruList: PROC [entriesToTrim: INT] = { FOR entNo: INT IN [1..entriesToTrim] DO grabTopOffOfLRU: ENTRY PROC ~ { IF lruListTop # NIL THEN { document _ lruListTop; lruListTop _ lruListTop.lruListNext; IF lruListTop = NIL THEN lruListTail _ NIL ELSE lruListTop.lruListPrev _ NIL; document.lruListNext _ NIL; document.lruListPrev _ NIL; InnerDelete[document ! HashPkgCallerProgrammingError => CONTINUE; ]; -- CONTINUE so that finalization will work if this module is re-run during debugging }; }; document: Document _ NIL; grabTopOffOfLRU[]; ENDLOOP; }; <> InitDIDMap: PUBLIC PROC [firstTime: BOOL] ~ { FileForDIDMap _ YggFile.OpenDIDMapFile[]; [optr: MaxDIDOptr, address: AddressForMaxDID] _ YggFile.GetMaxDIDAddress[]; RecoverableMaxDID _ LOOPHOLE[AddressForMaxDID]; TRUSTED {NextDID _ RecoverableMaxDID^;}; LastDIDAllocateable _ NextDID; GrabSomeDIDs[]; StartUpExtendibleHash[NIL, firstTime]; IF firstTime THEN { <> outcome: YggEnvironment.Outcome _ active; trans: YggEnvironment.TransID; WHILE outcome # commit DO doc: YggInternal.Document _ NIL; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; doc _ NEW[DocumentRep _ [did: YggDIDPrivate.DIDForRootDIDs, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => ERROR]; noteUseInTransaction[doc, trans]; YggNav.SetContents[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, contents: [YggRep.did, YggDIDPrivate.DIDForRootDIDs]]; [outcome: outcome] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; ENDLOOP; }; }; Initialize: PUBLIC ENTRY PROCEDURE[numHashSlotsDesired: NAT, fQLength: CARDINAL] = { ENABLE UNWIND => {}; IF fQLength = 0 THEN RETURN WITH ERROR ZeroIllegal; WORDSInWords _ PBasics.bytesPerWord/BYTES[WORD]; finalizationQ _ SafeStorage.NewFQ[fQLength]; -- do this before SafeStorage.EstablishFinalization[CODE[DocumentRep], 1, finalizationQ ! SafeStorage.CantEstablishFinalization => CONTINUE]; -- allocating any FileObjects. TRUSTED BEGIN Process.Detach[FORK FinalizationProcess[]]; END; <> TRUSTED BEGIN Process.Detach[FORK CacheTrimProcess[]]; END; secondLookupHashDocument _ NEW[DocumentRep _ [did: NIL, componentFiles: NIL, interlock: FALSE, next: NIL]]; InitializeHashTable[numHashSlotsDesired, secondLookupHashDocument]; TRUSTED { Process.InitializeCondition[@aShortTime, Process.MsecToTicks[50]]; Process.InitializeCondition[@didsCondition, Process.MsecToTicks[177]]; Process.InitializeCondition[@lock64Condition, Process.MsecToTicks[43]]; }; transactionToModifiedDocumentMap _ RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc]; FOR h: CARD IN [0..64) DO index: CARD _ 0; hLeft: CARD _ h; addPowerOf2: CARD _ 32; WHILE hLeft # 0 DO IF hLeft MOD 2 = 1 THEN index _ index + addPowerOf2; addPowerOf2 _ addPowerOf2 /2; hLeft _ hLeft / 2; ENDLOOP; Hash64Table[h] _ index; ENDLOOP; }; Initialize[129, 250]; END.