DIRECTORY Atom USING [GetPName, MakeAtom], Basics USING [BITAND, charsPerWord, Comparison], BasicTime USING [Now, nullGMT], ExtendibleHash USING [DeleteEntry, Entry, ExtendibleHashTable, New, Open, PageNumber, ReadEntry, ReferencePage, ReleasePage, UpdateEntry], Camelot USING [btidT, DSLogNewValue, DSPinObject, -- ErSuccess, -- optrT, -- TABegin, TAEnd, -- tidT], Mach USING [kernReturnT, -- KernSuccess, -- pointerT], Process USING [Detach, InitializeCondition, MsecToTicks, Pause, Ticks], PBasics USING [BITAND, ByteBlt, bytesPerWord, charsPerWord, Move], RedBlackTree USING [Compare, Create, Delete, GetKey, Insert, Lookup, Table, UserData], RefText USING [New, TrustTextAsRope], Rope USING [Equal, Fetch, Length, NewText, ROPE, Substr, Text], SafeStorage USING [CantEstablishFinalization, EnableFinalization, EstablishFinalization, FinalizationQueue, FQNext, IsFinalizationEnabled, NewFQ], YggBuffMan USING [ReadPages, ReleaseState, ReleaseVMPageSet, VMPageSet, WritePages], YggDID USING [CompareDIDs, DID, EqualDIDs, HashDID, ValidateDID], YggDIDMap, YggDIDMapPrivate USING [DocItem, DocPart, DocPartPtr, Document, DocumentRep, Entry, EntryPtr, notCompressed, PartType, RunItem, SegmentItem, SegmentRep, TextRep, TextRP, Word32OverheadPerEntry], YggDIDPrivate USING [DIDForRootDIDs, DIDRep], YggEnvironment USING [dsPort, nullDID, nullTransID, Outcome, -- taPort, -- TransID], YggFile USING [BytesForPages, FileHandle, GetMaxDIDAddress, Info, Open, OpenDIDMapFile, PageCount, SetSize], YggFixedNames USING [IndexPrefix, IndexPrefixSize], YggInternal USING [Document, FileHandle], YggNav USING [GetProperty, SetContents, SetProperty], YggRep USING [Attribute, did, TypedPrimitiveElement], YggTransaction USING [CreateTrans, Finish]; YggDIDMapImpl: CEDAR MONITOR IMPORTS Atom, Basics, BasicTime, Camelot, ExtendibleHash, PBasics, Process, RedBlackTree, RefText, Rope, SafeStorage, YggBuffMan, YggDID, YggDIDPrivate, YggEnvironment, YggFile, YggFixedNames, YggNav, YggTransaction EXPORTS YggInternal, YggDIDMap, YggDID = BEGIN Document: TYPE = REF DocumentRep; DocumentRep: PUBLIC TYPE = YggDIDMapPrivate.DocumentRep; DID: PUBLIC TYPE ~ REF DIDRep; DIDRep: PUBLIC TYPE ~ YggDIDPrivate.DIDRep; HashPageSizeInWords: INT = 1024; MaxWordsPerEntry: INT = HashPageSizeInWords/3 - 5; FilePagesPerBtreePage: INT = 1; Hash64Table: ARRAY [0..64) OF CARD; Lock64Table: ARRAY [0..64) OF Lock64Item; Lock64Item: TYPE = RECORD[ interlock: BOOL _ FALSE, tid: Camelot.tidT ]; TheDIDMap: ExtendibleHash.ExtendibleHashTable; FileForDIDMap: YggFile.FileHandle; CurrentVMPageSetsForDIDMap: LIST OF YggBuffMan.VMPageSet _ NIL; NextDID: DIDRep; LastDIDAllocateable: DIDRep; MaxDIDOptr: Camelot.optrT; AddressForMaxDID: Mach.pointerT; RecoverableMaxDID: LONG POINTER TO DIDRep; LastDidLow: CARD _ 1000000000; NumberOfDIDsToGrap: CARD _ 200; ThresholdToGrabDIDs: CARD _ 20; KeyObject: TYPE = RECORD [did: YggDID.DID, extensionCount: INT32]; EntryDataBuffer: TYPE = RECORD[SEQUENCE COMPUTED CARD16 OF EntryWord]; EntryWord: TYPE = MACHINE DEPENDENT RECORD[ SELECT OVERLAID * FROM card => [card: CARD32], int => [int: INT32], pair => [hi, lo: CARD16], bytes => [hh, hl, lh, ll: BYTE], bits => [bits: PACKED ARRAY [0..32) OF BOOL], ENDCASE ]; updateAction: TYPE = {create, delete, newValue}; updates: TYPE = RECORD[ did: YggDID.DID, tid: Camelot.tidT, action: updateAction, doc: Document ]; TIDObj: TYPE = REF TIDObjRep; TIDObjRep: TYPE = RECORD [ btid: Camelot.btidT]; scratchTIDObj: TIDObj _ NEW[TIDObjRep]; DocTrans: TYPE = REF DocTransRep; DocTransRep: TYPE = RECORD [ btidForDoc: TIDObj, docs: LIST OF Document _ NIL, bucketsToUnlatch: LIST OF CARD _ NIL ]; transactionToModifiedDocumentMap: RedBlackTree.Table; finalizationQ: SafeStorage.FinalizationQueue; ZeroIllegal: --CALLING-- ERROR = CODE; secondLookupHashDocument: HashDocument _ NIL; -- wierdness to protect object from finalization. WORDSInWords: CARD _ 0; MarkInterestInDID: PUBLIC PROC [did: YggDID.DID] ~ { [] _ LookupInCache[did, FALSE]; }; OpenDocumentFromDID: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, destroyedOK: BOOL] RETURNS [doc: YggInternal.Document _ NIL] = { tryCount: CARD _ 0; DO tryAgain: BOOL _ FALSE; IF (doc _ LookupInCache[did, destroyedOK]) = NIL THEN { found: BOOL _ FALSE; gottaBeInCache: BOOL _ FALSE; doc _ NEW[DocumentRep _ [did: did, componentFiles: NIL, interlock: TRUE, next: NIL]]; [found, gottaBeInCache] _ LookupInMap[did, tid, doc]; IF gottaBeInCache THEN { doc _ LookupInCache[did, destroyedOK]; IF doc = NIL THEN ERROR; } ELSE { IF ~found THEN RETURN [NIL]; Insert[doc ! HashPkgDuplicateKey => { tryAgain _ TRUE; CONTINUE; }; ]; IF ~tryAgain THEN openFilesFromParsedDocList[doc, did, tid]; }; } ELSE { -- found in cache waitForInterlockDone: ENTRY PROC ~ { ENABLE UNWIND => {}; WHILE doc.interlock DO WAIT aShortTime ENDLOOP; }; IF doc.interlock THEN waitForInterlockDone[]; }; IF ~tryAgain THEN { releaseDoc[doc]; EXIT; }; tryCount _ tryCount + 1; IF tryCount > 25 THEN ERROR; ENDLOOP; }; GetDID: PUBLIC PROC [doc: Document] RETURNS [did: YggDID.DID] ~ { RETURN[doc.did]; }; GetLinkInfo: PUBLIC PROC [doc: Document] RETURNS [fromDID, toDID: YggDID.DID, linkType: Rope.ROPE] ~ { RETURN[doc.fromDID, doc.toDID, doc.linkType]; }; SwapLinkInfo: PUBLIC PROC [doc: Document, newFromDID, newToDID: YggDID.DID, newLinkType: Rope.ROPE] RETURNS [oldFromDID, oldToDID: YggDID.DID, oldLinkType: Rope.ROPE] ~ { oldFromDID _ doc.fromDID; doc.fromDID _ newFromDID; oldToDID _ doc.toDID; doc.toDID _ newToDID; oldLinkType _ doc.linkType; doc.linkType _ newLinkType; }; GetComponentFiles: PUBLIC PROC [doc: Document] RETURNS [componentFiles: LIST OF YggInternal.FileHandle] ~ { RETURN[doc.componentFiles]; }; AddComponentFile: PUBLIC ENTRY PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO lastCF _ files; ENDLOOP; IF lastCF = NIL THEN doc.componentFiles _ CONS[componentFile, NIL] ELSE lastCF.rest _ CONS[componentFile, NIL]; }; RemoveComponentFile: PUBLIC PROC [doc: Document, componentFile: YggInternal.FileHandle, tid: Camelot.tidT] ~ { lastCF: LIST OF YggInternal.FileHandle _ NIL; FOR files: LIST OF YggInternal.FileHandle _ doc.componentFiles, files.rest UNTIL files = NIL DO IF files.first = componentFile THEN { IF lastCF = NIL THEN doc.componentFiles _ files.rest ELSE lastCF.rest _ files.rest; EXIT; }; lastCF _ files; ENDLOOP; }; GetName: PUBLIC PROC [doc: Document] RETURNS [Rope.ROPE] ~ { RETURN[doc.name]; }; CreateNewDID: PUBLIC PROC [tid: Camelot.tidT] RETURNS [did: YggDID.DID ] ~ { doc: YggInternal.Document _ NIL; did _ GetNextDID[]; doc _ NEW[DocumentRep _ [did: did, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => ERROR]; noteUseInTransaction[doc, tid]; }; CreateExplicitDID: PUBLIC PROC [tid: Camelot.tidT, did: YggDID.DID] RETURNS [ok: BOOL _ TRUE ] ~ { doc: YggInternal.Document _ NIL; IF YggDID.ValidateDID[did] THEN RETURN [FALSE]; doc _ NEW[DocumentRep _ [did: did, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => {ok _ FALSE; CONTINUE}]; IF ok THEN noteUseInTransaction[doc, tid]; }; DestroyDID: PUBLIC PROC [tid: Camelot.tidT, did: YggDID.DID] ~ { doc: YggInternal.Document _ NIL; doc _ OpenDocumentFromDID[did: did, tid: tid, destroyedOK: FALSE]; doc.destroyed _ TRUE; }; GetRootDIDs: PUBLIC PROC RETURNS [dids: LIST OF YggDID.DID _ NIL] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: YggEnvironment.nullTransID, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; IF propertyExists THEN { lastDIDs: LIST OF YggDID.DID _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF dids = NIL THEN {dids _ LIST[didInList]; lastDIDs _ dids} ELSE {lastDIDs.rest _ LIST[didInList]; lastDIDs _ lastDIDs.rest;}; ENDLOOP; }; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; }; RootDIDLatch: BOOL _ FALSE; LatchRootDID: ENTRY PROC ~ { ENABLE UNWIND => {}; WHILE RootDIDLatch DO WAIT aShortTime ENDLOOP; RootDIDLatch _ TRUE; }; UnlatchRootDID: ENTRY PROC ~ { RootDIDLatch _ FALSE; }; AddRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; alreadyRoot: BOOL _ FALSE; newProperty: YggRep.Attribute; LatchRootDID[]; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; newProperty _ [attributeName: "$rootDIDs", ordered: TRUE, value: LIST[[fieldName: "", valueSet: LIST [[docType: YggRep.did, bits: did]]]]]; IF propertyExists THEN { lastDIDs: LIST OF YggRep.TypedPrimitiveElement _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF YggDID.EqualDIDs[did, didInList] THEN {alreadyRoot _ TRUE; EXIT}; lastDIDs _ lotpe; ENDLOOP; IF ~alreadyRoot THEN lastDIDs.rest _ LIST[[docType: YggRep.did, bits: did]]; } ELSE property _ LIST[newProperty]; IF ~alreadyRoot THEN YggNav.SetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs", property: property.first, appendProperty: FALSE]; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; UnlatchRootDID[]; }; RemoveRootDID: PUBLIC PROC [did: YggDID.DID] RETURNS [ok: BOOL _ FALSE] ~ { trans: YggEnvironment.TransID; propertyExists: BOOL _ FALSE; property: LIST OF YggRep.Attribute _ NIL; isARoot: BOOL _ FALSE; newProperty: YggRep.Attribute; LatchRootDID[]; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; [propertyExists: propertyExists, property: property] _ YggNav.GetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs"]; newProperty _ [attributeName: "$rootDIDs", ordered: TRUE, value: LIST[[fieldName: "", valueSet: LIST [[docType: YggRep.did, bits: did]]]]]; IF propertyExists THEN { prevDIDs: LIST OF YggRep.TypedPrimitiveElement _ NIL; IF property.rest # NIL THEN ERROR; IF property.first.value.rest # NIL THEN ERROR; FOR lotpe: LIST OF YggRep.TypedPrimitiveElement _ property.first.value.first.valueSet, lotpe.rest UNTIL lotpe = NIL DO didInList: DID; IF lotpe.first.docType # YggRep.did THEN ERROR; didInList _ NARROW[lotpe.first.bits]; IF YggDID.EqualDIDs[did, didInList] THEN { isARoot_ TRUE; IF prevDIDs = NIL THEN property.first.value.first.valueSet _ lotpe.rest ELSE prevDIDs.rest _ lotpe.rest; EXIT; }; prevDIDs _ lotpe; ENDLOOP; }; IF isARoot THEN YggNav.SetProperty[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, propertyName: "$rootDIDs", property: property.first, appendProperty: FALSE]; [] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; UnlatchRootDID[]; RETURN[isARoot]; }; aShortTime: CONDITION; grapDoc: ENTRY PROC [doc: YggInternal.Document] ~ { ENABLE UNWIND => {}; WHILE doc.interlock DO WAIT aShortTime ENDLOOP; doc.interlock _ TRUE; }; releaseDoc: ENTRY PROC [doc: YggInternal.Document] ~ {doc.interlock _ FALSE}; RemoveRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, runList: YggDIDMap.RunList] ~ { doc: YggInternal.Document _ NIL; IF (doc _ LookupInCache[did, TRUE]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR rl: YggDIDMap.RunList _ runList, rl.rest UNTIL rl = NIL DO pdl: LIST OF YggDIDMapPrivate.DocItem; prevPdl: LIST OF YggDIDMapPrivate.DocItem _ NIL; runToDelete: YggDIDMap.Run _ rl.first; FOR pdl _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO segments: LIST OF YggDIDMapPrivate.SegmentItem; prevSegments: LIST OF YggDIDMapPrivate.SegmentItem; FOR segments _ pdl.first.segments, segments.rest UNTIL segments = NIL DO runs: LIST OF YggDIDMapPrivate.RunItem; prevRuns: LIST OF YggDIDMapPrivate.RunItem; FOR runs _ segments.first.runs, runs.rest UNTIL runs = NIL DO firstIntersection: INT _ -1; lastIntersection: INT _ -1; runLast: CARD _ 0; runToDeleteLast: CARD _ 0; IF runs.first.segmentId # runToDelete.segmentId THEN {-- wrong segment prevRuns _ runs; LOOP; }; firstIntersection _ MAX[runs.first.segmentPage, runToDelete.segmentPage]; lastIntersection _ MIN[ (runLast _ runs.first.segmentPage + runs.first.pages - 1), (runToDeleteLast _ runToDelete.segmentPage + runToDelete.pages - 1) ]; IF lastIntersection < firstIntersection THEN LOOP; -- no intersection IF firstIntersection = INT[runs.first.segmentPage] AND lastIntersection = INT[runLast] THEN { -- run is eliminated IF prevRuns = NIL THEN segments.first.runs _ runs.rest ELSE prevRuns.rest _ runs.rest; LOOP; } ELSE { runs.first.segmentPage _ firstIntersection; runs.first.pages _ lastIntersection - firstIntersection + 1; }; prevRuns _ runs; ENDLOOP; IF segments.first.runs = NIL THEN { -- no more runs, so dump the segment too IF prevSegments = NIL THEN pdl.first.segments _ segments.rest ELSE prevSegments.rest _ segments.rest; LOOP; }; prevSegments _ segments; ENDLOOP; IF pdl.first.segments = NIL THEN { -- no more segments, so dump the DocItem too IF prevPdl = NIL THEN doc.parsedDocList _ pdl.rest ELSE prevPdl.rest _ pdl.rest; LOOP; }; prevPdl _ pdl; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; AddRuns: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, runList: YggDIDMap.RunList] ~ { doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { lastPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; thisPDL: LIST OF YggDIDMapPrivate.DocItem _ NIL; doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { thisPDL _ pdl; EXIT; }; lastPDL _ pdl; REPEAT FINISHED => { thisPDL _ LIST[[partType, fileUse, 0, NIL]]; IF lastPDL = NIL THEN doc.parsedDocList _ thisPDL ELSE lastPDL.rest _ thisPDL; }; ENDLOOP; -- pdl IF thisPDL = NIL THEN ERROR; FOR runs: YggDIDMap.RunList _ runList, runs.rest UNTIL runs = NIL DO lastSegment: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ thisPDL.first.segments, segs.rest UNTIL segs = NIL DO lastRun: LIST OF YggDIDMapPrivate.RunItem _ NIL; runPages: YggFile.PageCount _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; lastRun _ lori; ENDLOOP; IF INT[segs.first.firstPage] + runPages = runs.first.firstPage THEN { -- append at end of segment lastRun.rest _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages], NIL]; EXIT; -- appended at end of segment; go do the next run }; lastSegment _ segs; REPEAT FINISHED => { -- make a new segment with a singleton run newItem: LIST OF YggDIDMapPrivate.SegmentItem; newItem _ LIST[[firstPage: runs.first.firstPage, numberOfBytes: YggFile.BytesForPages[runs.first.pages], runs: LIST[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, pages: runs.first.pages]]]]; IF lastSegment = NIL THEN thisPDL.first.segments _ newItem ELSE lastSegment.rest _ newItem; }; ENDLOOP; ENDLOOP; releaseDoc[doc]; }; }; SetByteSize: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT, fileUse: ATOM, bytes: INT] ~ { doc: YggInternal.Document _ NIL; partType: YggDIDMapPrivate.PartType; partType _ mapFileUseToPartType[fileUse]; IF (doc _ LookupInCache[did]) = NIL THEN ERROR ELSE { doc.modifiedDIDMap _ TRUE; grapDoc[doc]; noteUseInTransaction[doc, tid]; FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO IF pdl.first.indexName = fileUse THEN { FOR segs: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segs.rest UNTIL segs = NIL DO firstByte: CARD; firstByte _ YggFile.BytesForPages[segs.first.firstPage]; IF INT[firstByte] > bytes THEN segs.first.numberOfBytes _ 0 ELSE { runPages: YggFile.PageCount _ 0; runBytes: CARD _ 0; FOR lori: LIST OF YggDIDMapPrivate.RunItem _ segs.first.runs, lori.rest UNTIL lori = NIL DO runPages _ runPages + lori.first.pages; ENDLOOP; runBytes _ YggFile.BytesForPages[runPages]; segs.first.numberOfBytes _ IF INT[firstByte + runBytes] > bytes THEN bytes - firstByte ELSE runBytes; }; ENDLOOP; EXIT; }; REPEAT FINISHED => { IF bytes > 0 THEN ERROR; }; ENDLOOP; -- pdl releaseDoc[doc]; }; }; Hash64DID: PROC [did: DID] RETURNS [bucket: CARD] ~ { RETURN [Hash64Table[Basics.BITAND[03Fh, Hash64[did.didLow]]]]; }; Hash64: PROC [c: CARD] RETURNS [bucket: CARD] ~ { RETURN [Hash64Table[PBasics.BITAND[03Fh, c]]]; }; mapFileUseToPartType: PROC [fileUse: ATOM] RETURNS [partType: YggDIDMapPrivate.PartType] ~ { SELECT fileUse FROM $root => partType _ root; $contents => partType _ contents; $outlinks => partType _ outlinks; $attributes => partType _ attributes; $directoryContents => partType _ directoryContents; ENDCASE => { pName: Rope.Text; pName _ Atom.GetPName[fileUse]; IF Rope.Equal[YggFixedNames.IndexPrefix, Rope.Substr[pName, 0, YggFixedNames.IndexPrefixSize]] THEN { partType _ index; } ELSE ERROR; }; }; PreCommit: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; refTid: REF Camelot.tidT _ NEW [Camelot.tidT _ tid]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; IF data # NIL THEN { dt: DocTrans; max64Done: INT _ -1; dt _ NARROW[data]; DO minBucket: CARD _ 65; numberInMinBucket: CARD _ 0; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents to find the next 64 value to do hashedDidTo64: CARD; hashedDidTo64 _ Hash64DID[docList.first.did]; IF hashedDidTo64 = minBucket THEN numberInMinBucket _ numberInMinBucket + 1; IF INT[hashedDidTo64] > max64Done AND hashedDidTo64 < minBucket THEN { minBucket _ hashedDidTo64; numberInMinBucket _ 1; }; ENDLOOP; IF minBucket = 65 THEN EXIT; IF INT[minBucket] = max64Done THEN EXIT; LockBucket64[minBucket, tid]; dt.bucketsToUnlatch _ CONS[minBucket, dt.bucketsToUnlatch]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and process those with the correct hash value hashedDidTo64: CARD; hashedDidTo64 _ Hash64DID[docList.first.did]; IF hashedDidTo64 = minBucket THEN { writeDocumentToMap[docList.first, refTid]; }; ENDLOOP; max64Done _ minBucket; ENDLOOP; }; }; DropLatches: PROC [buckets: LIST OF CARD, tid: Camelot.tidT] ~ { FOR nowBuck: LIST OF CARD _ buckets, nowBuck.rest UNTIL nowBuck = NIL DO UnlockBucket64[nowBuck.first, tid]; ENDLOOP; }; Commit: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and turn off the modified flag docList.first.modifiedDIDMap _ FALSE; IF docList.first.destroyed THEN { Delete[docList.first]; }; ENDLOOP; DropLatches[dt.bucketsToUnlatch, tid]; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; Abort: PUBLIC PROC [tid: Camelot.tidT] ~ { data: RedBlackTree.UserData; scrTIDObj: TIDObj _ CheckoutTIDObj[tid.top]; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scrTIDObj]; IF data # NIL THEN { dt: DocTrans; max64Done: INT _ -1; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO -- look at all the documents and remove them from the cache Delete[ docList.first]; ENDLOOP; DropLatches[dt.bucketsToUnlatch, tid]; }; [] _ RedBlackTree.Delete[ transactionToModifiedDocumentMap, scrTIDObj]; RecycleTIDObj[scrTIDObj]; }; openFilesFromParsedDocList: PROC [doc: YggInternal.Document, did: YggDID.DID, tid: Camelot.tidT] = { FOR pdl: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, pdl.rest UNTIL pdl = NIL DO atomCode: ATOM _ NIL; byteSize: INT _ 0; theRuns: YggDIDMap.RunList _ NIL; theFile: YggFile.FileHandle; SELECT pdl.first.partType FROM root => atomCode _ $root; contents => atomCode _ $contents; outlinks => atomCode _ $outlinks; index => { atomCode _ pdl.first.indexName; }; directoryContents => atomCode _ $directoryContents; attributes => atomCode _ $attributes; ENDCASE => atomCode _ $huh; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ pdl.first.segments, segments.rest UNTIL segments = NIL DO nowPage: CARD _ segments.first.firstPage; byteSize _ MAX[byteSize, YggFile.BytesForPages[segments.first.firstPage] + segments.first.numberOfBytes]; FOR runs: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, runs.rest UNTIL runs = NIL DO theRuns _ CONS[[segmentId: runs.first.segmentId, segmentPage: runs.first.segmentPage, firstPage: nowPage, pages: runs.first.pages, leader: FALSE], theRuns]; nowPage _ nowPage + runs.first.pages; ENDLOOP; ENDLOOP; theFile _ YggFile.Open[runList: theRuns, byteSize: byteSize, did: did, fileUse: atomCode, verifyLeaderNow: FALSE]; doc.componentFiles _ CONS[theFile, doc.componentFiles]; ENDLOOP; }; AppendText: UNSAFE PROC [dPP: YggDIDMapPrivate.DocPartPtr, text: Rope.Text, offset: CARD] RETURNS [textRP: YggDIDMapPrivate.TextRP, words: CARD] = UNCHECKED { textRep: LONG POINTER TO YggDIDMapPrivate.TextRep; byteOffset: CARD16 _ offset*UNITS[CARD32]; textRP _ LOOPHOLE[byteOffset]; textRep _ @dPP[textRP]; LOOPHOLE[textRep, LONG POINTER TO CARD16]^ _ Rope.Length[text]; -- sets textRep.length [] _ PBasics.ByteBlt [ to: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[text]], 0, textRep.length ] ]; words _ (WORDS[YggDIDMapPrivate.TextRep[textRep.length]] + WORDSInWords - 1) / WORDSInWords; }; GetText: UNSAFE PROC [textRep: LONG POINTER TO YggDIDMapPrivate.TextRep, text: REF TEXT] = UNCHECKED { nBytes: CARD; text.length _ textRep.length; nBytes _ PBasics.ByteBlt [ to: [ BASE[DESCRIPTOR[text]], 0, textRep.length ], from: [ BASE[DESCRIPTOR[textRep]], 0, textRep.length ] ]; }; didsCondition: CONDITION; GetNextDID: ENTRY PROC RETURNS [did: YggDID.DID] ~ { ENABLE UNWIND => {}; loopCount: INT _ 0; WHILE NextDID.didHigh = LastDIDAllocateable.didHigh AND NextDID.didLow = LastDIDAllocateable.didLow DO WAIT didsCondition; loopCount _ loopCount + 1; IF loopCount > 5 THEN TRUSTED {loopCount _ 0; Process.Detach[FORK GrabSomeDIDs[]];}; ENDLOOP; did _ NEW[YggDIDPrivate.DIDRep _ [NextDID.didHigh, NextDID.didLow]]; NextDID.didLow _ NextDID.didLow + 1; IF LastDIDAllocateable.didLow - NextDID.didLow <= ThresholdToGrabDIDs THEN TRUSTED {Process.Detach[FORK GrabSomeDIDs[]];}; }; GrabberCount: CARD _ 0; GrabSomeDIDs: PROC [] ~ { newLastDIDAllocateable: REF DIDRep _ NEW[DIDRep]; newHigh: BOOL _ FALSE; moreDIDsTID: Camelot.tidT; kernCode: Mach.kernReturnT; onlyOne: ENTRY PROC RETURNS [exit: BOOL _ FALSE] ~ { ENABLE UNWIND => {}; IF GrabberCount # 0 THEN RETURN[TRUE]; GrabberCount _ GrabberCount + 1; }; IF onlyOne[] THEN RETURN; newLastDIDAllocateable.didHigh _ LastDIDAllocateable.didHigh; newLastDIDAllocateable.didLow _ LastDIDAllocateable.didLow + NumberOfDIDsToGrap; IF newLastDIDAllocateable.didLow > LastDidLow THEN { newLastDIDAllocateable.didHigh _ newLastDIDAllocateable.didHigh + 1; newLastDIDAllocateable.didLow _ NumberOfDIDsToGrap; newHigh _ TRUE; }; DO outcome: YggEnvironment.Outcome; status: INT _ -1; addrNewLastDIDAllocateable: Mach.pointerT; moreDIDsTID _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; kernCode _ Camelot.DSPinObject[dsPort: YggEnvironment.dsPort, tid: moreDIDsTID, optr: MaxDIDOptr, size: BYTES[DIDRep], raiseSignal: TRUE]; TRUSTED {addrNewLastDIDAllocateable _ LOOPHOLE[newLastDIDAllocateable, CARD32]}; kernCode _ Camelot.DSLogNewValue[dsPort: YggEnvironment.dsPort, tid: moreDIDsTID, optr: MaxDIDOptr, newValue: addrNewLastDIDAllocateable, newValueCnt: BYTES[DIDRep], raiseSignal: TRUE]; [outcome: outcome] _ YggTransaction.Finish[transID: moreDIDsTID, requestedOutcome: commit]; IF outcome = commit THEN EXIT; ENDLOOP; IF newHigh THEN { m: ENTRY PROC ~ { ENABLE UNWIND => {}; LastDIDAllocateable _ newLastDIDAllocateable^; NextDID.didHigh _ newLastDIDAllocateable.didHigh; NextDID.didLow _ 0; GrabberCount _ 0; BROADCAST didsCondition; }; m[]; } ELSE { n: ENTRY PROC ~ { LastDIDAllocateable.didLow _ newLastDIDAllocateable.didLow; BROADCAST didsCondition; GrabberCount _ 0; }; n[]; }; }; LookupInMap: PROC [did: YggDID.DID, tid: Camelot.tidT, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE, gottaBeInCache: BOOL _ FALSE] = { notFound: BOOL _ FALSE; extendedData: REF EntryDataBuffer _ NIL; extendedDataCount: CARD _ 0; lowExtendedToAccept: CARD _ 0; highExtendedToAccept: CARD _ 1; expectMore: BOOL _ TRUE; expectedExtendedDataSize: CARD _ 0; maxDataCount: CARD _ 0; fromLink: DID _ YggEnvironment.nullDID; toLink: DID _ YggEnvironment.nullDID; linkType: Rope.ROPE _ NIL; lookupProc: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] ~ TRUSTED { nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount >= lowExtendedToAccept AND entryPtr.extensionCount <= highExtendedToAccept THEN { found _ TRUE; SELECT entryPtr.extensionCount FROM 0 => { -- normal, non-extended entry index: INT _ 0; expectMore _ FALSE; -- everything is in this entry IF entryPtr.link THEN { wordIndex: INT _ 0; noChars: INT _ 0; nullSeen: BOOL _ FALSE; fromLink _ NEW[DIDRep]; PBasics.Move[dst: @fromLink, src: @entryPtr.data[0], nWords: WORDS[DIDRep]/WORDSInWords]; index _ WORDS[DIDRep]/WORDSInWords; toLink _ NEW[DIDRep]; PBasics.Move[dst: @toLink, src: @entryPtr.data[index], nWords: WORDS[DIDRep]/WORDSInWords]; index _ index + index; FOR wordIndex _ index, wordIndex + 1 UNTIL nullSeen DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; LOOPHOLE[fourChars, CARD] _ entryPtr.data[index]; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; noChars _ noChars + 1; ENDLOOP; ENDLOOP; IF noChars = 0 THEN { index _ index + 1; } ELSE { linkTypeRT: REF TEXT _ NIL; linkTypeRT _ RefText.New[noChars]; nullSeen _ FALSE; UNTIL nullSeen DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; LOOPHOLE[fourChars, CARD] _ entryPtr.data[index]; index _ index + 1; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF fourChars[charNo] = 0C THEN {nullSeen_ TRUE; EXIT;}; linkTypeRT[noChars] _ fourChars[charNo]; noChars _ noChars + 1; ENDLOOP; ENDLOOP; linkTypeRT.length _ noChars; linkType _ RefText.TrustTextAsRope[linkTypeRT]; }; }; [] _ parseDocPart[@entryPtr.data[index], entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry - index, doc]; }; 1 => { -- normal, extended entries to follow expectMore _ TRUE; -- everything is in this entry expectedExtendedDataSize _ entryPtr.data[0]; extendedData _ NEW[EntryDataBuffer[expectedExtendedDataSize]]; extendedDataCount _ entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry - 1; PBasics.Move[dst: @extendedData[0], src: @entryPtr.data[1], nWords: extendedDataCount]; }; >1 => { -- extended entry IF extendedDataCount + entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry > maxDataCount THEN ERROR; PBasics.Move[dst: @extendedData[extendedDataCount], src: @entryPtr.data[0], nWords: entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry]; extendedDataCount _ extendedDataCount + entryPtr.size - YggDIDMapPrivate.Word32OverheadPerEntry; }; ENDCASE => ERROR; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); REPEAT FINISHED => notFound _ TRUE; ENDLOOP; }; WHILE expectMore DO TRUSTED {ExtendibleHash.ReadEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupProc]}; IF notFound THEN EXIT; ENDLOOP; IF maxDataCount # 0 THEN TRUSTED { IF maxDataCount # extendedDataCount THEN ERROR; [] _ parseDocPart[@extendedData[0], maxDataCount, doc]; }; }; InsertIntoMap: PROC [did: DID, doc: YggInternal.Document, buffer: REF EntryDataBuffer, wordsInBuffer: INT, link: BOOL, refTid: REF Camelot.tidT] = { extensionCount: INT _ 0; foundEntry: BOOL _ TRUE; lookupProc: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToUpdate: ExtendibleHash.Entry _ NIL] ~ TRUSTED { nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToUpdate _ nowEntry; RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; lookupForDelete: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToDelete: ExtendibleHash.Entry _ NIL] ~ TRUSTED { nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; foundEntry _ FALSE; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToDelete _ nowEntry; foundEntry _ TRUE; RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; totalEntrySize: INT; totalEntrySize _ YggDIDMapPrivate.Word32OverheadPerEntry + wordsInBuffer; IF totalEntrySize <= MaxWordsPerEntry THEN { modifyProc: UNSAFE PROC [entry: ExtendibleHash.Entry] ~ TRUSTED { entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ totalEntrySize; entPtr.extensionCount _ 0; entPtr.did _ did^; entPtr.link _ link; PBasics.Move[dst: @entPtr.data[0], src: LOOPHOLE[buffer], nWords: wordsInBuffer]; }; extensionCount _ 0; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: totalEntrySize, findProc: lookupProc, modifyProc: modifyProc, updateType: insertOrReplace, clientPerCallData: refTid];}; extensionCount _ 1; } ELSE { modifyProcFirst: UNSAFE PROC [entry: ExtendibleHash.Entry] ~ TRUSTED { writeWords: INT _ MaxWordsPerEntry - YggDIDMapPrivate.Word32OverheadPerEntry - 1; entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ MaxWordsPerEntry; entPtr.extensionCount _ 1; entPtr.did _ did^; entPtr.data[0] _ wordsInBuffer; leftToWrite _ wordsInBuffer - writeWords; PBasics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer], nWords: writeWords]; writeOffset _ writeWords; }; leftToWrite: INT _ 0; writeOffset: INT _ 0; extensionCount _ 1; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: MaxWordsPerEntry, findProc: lookupProc, modifyProc: modifyProcFirst, updateType: insertOrReplace, clientPerCallData: refTid]; }; extensionCount _ 0; TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; DO modifyProcExtended: UNSAFE PROCEDURE [entry: ExtendibleHash.Entry] = UNCHECKED{ entPtr: LONG POINTER TO YggDIDMapPrivate.Entry = LOOPHOLE[entry, LONG POINTER] + UNITS[CARD32]; entPtr.size _ writeThisTime+YggDIDMapPrivate.Word32OverheadPerEntry; entPtr.extensionCount _ extensionCount; entPtr.did _ did^; entPtr.data[0] _ writeThisTime; PBasics.Move[dst: @entPtr.data[1], src: LOOPHOLE[buffer, LONG POINTER]+writeOffset, nWords: writeThisTime]; }; writeThisTime: INT _ MIN[MaxWordsPerEntry - YggDIDMapPrivate.Word32OverheadPerEntry, leftToWrite]; leftToWrite _ leftToWrite - writeThisTime; extensionCount _ extensionCount + 1; TRUSTED {ExtendibleHash.UpdateEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], words: writeThisTime + YggDIDMapPrivate.Word32OverheadPerEntry, findProc: lookupProc, modifyProc: modifyProcExtended, updateType: insertOrReplace, clientPerCallData: refTid]; }; IF leftToWrite < 0 THEN ERROR; IF leftToWrite = 0 THEN EXIT; ENDLOOP; }; foundEntry _ TRUE; WHILE foundEntry DO -- insure that old extra extentions do not exist TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [did.didHigh, did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; extensionCount _ extensionCount + 1; ENDLOOP; }; lock64Condition: CONDITION; LockBucket64: ENTRY PROC [bucket: INT, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; WHILE Lock64Table[bucket].interlock DO WAIT lock64Condition ENDLOOP; Lock64Table[bucket].interlock _ TRUE; Lock64Table[bucket].tid _ tid; }; UnlockBucket64: ENTRY PROC [bucket: INT, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; IF ~Lock64Table[bucket].interlock OR Lock64Table[bucket].tid # tid THEN ERROR; Lock64Table[bucket].interlock _ FALSE; Lock64Table[bucket].tid _ YggEnvironment.nullTransID; BROADCAST lock64Condition; }; getUpdates: PROC [tid: Camelot.tidT] RETURNS [updateList: LIST OF updates] ~ { ERROR; }; parseDocPart: PROC [dataToParse: LONG POINTER, size: CARD, doc: YggInternal.Document] RETURNS [found: BOOL _ FALSE] = { parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; parsedDocListTail: LIST OF YggDIDMapPrivate.DocItem _ NIL; index: CARD _ 0; WHILE index < size*WORDSInWords DO TRUSTED { segIndex: LONG POINTER TO YggDIDMapPrivate.SegmentRep; docPart: LONG POINTER TO YggDIDMapPrivate.DocPart; newDocList: LIST OF YggDIDMapPrivate.DocItem _ NIL; segmentsListTail: LIST OF YggDIDMapPrivate.SegmentItem _ NIL; startOffset: CARD _ 0; offsetToLastSegment: CARD _ 0; indexName: ATOM _ NIL; docPart _ LOOPHOLE[dataToParse + index]; IF index + docPart.docPartSize*WORDSInWords > size*WORDSInWords THEN ERROR; IF LOOPHOLE[docPart.indexName, CARD16] # 0 THEN { r: Rope.Text; docPartPtr: YggDIDMapPrivate.DocPartPtr = LOOPHOLE[docPart]; indexNamePtr: LONG POINTER TO YggDIDMapPrivate.TextRep; indexNamePtr _ @docPartPtr[docPartPtr.indexName]; IF indexNamePtr.length = 0 THEN ERROR; offsetToLastSegment _ LOOPHOLE[indexNamePtr, CARD] - LOOPHOLE[docPart, CARD]; r _ Rope.NewText[indexNamePtr.length]; GetText[indexNamePtr, LOOPHOLE[r]]; indexName _ Atom.MakeAtom[r]; } ELSE { offsetToLastSegment _ docPart.docPartSize*WORDSInWords; SELECT docPart.partType FROM root => indexName _ $root; contents => indexName _ $contents; outlinks => indexName _ $outlinks; attributes => indexName _ $attributes; ENDCASE => ERROR; }; newDocList _ LIST[[partType: docPart.partType, indexName: indexName, slotNo: docPart.slotNo, segments: NIL]]; segIndex _ LOOPHOLE[@docPart.segments]; WHILE (startOffset _ LOOPHOLE[segIndex, CARD] - LOOPHOLE[docPart, CARD]) < offsetToLastSegment DO segSize: CARD _ 0; runs: LIST OF YggDIDMapPrivate.RunItem _ NIL; runTail: LIST OF YggDIDMapPrivate.RunItem _ NIL; newSegListItem: LIST OF YggDIDMapPrivate.SegmentItem; newSegListItem _ LIST[[firstPage: segIndex.firstPage, numberOfBytes: segIndex.numberOfBytes, runs: NIL]]; IF (segSize _ (WORDS[YggDIDMapPrivate.SegmentRep[segIndex.length]]/WORDSInWords)) > (docPart.docPartSize - (startOffset/WORDSInWords)) THEN ERROR; -- too many runs FOR inx: CARDINAL IN [0..segIndex.length) DO newRunList: LIST OF YggDIDMapPrivate.RunItem _ NIL; newRunList _ LIST[[segmentId: segIndex.runs[inx].segmentId, segmentPage: segIndex.runs[inx].segmentPage, pages: segIndex.runs[inx].pages]]; IF runTail = NIL THEN runs _ runTail _ newRunList ELSE { runTail.rest _ newRunList; runTail _ newRunList; }; ENDLOOP; newSegListItem.first.runs _ runs; IF newDocList.first.segments = NIL THEN newDocList.first.segments _ segmentsListTail _ newSegListItem ELSE { segmentsListTail.rest _ newSegListItem; segmentsListTail _ newSegListItem; }; segIndex _ segIndex + segSize*WORDSInWords; ENDLOOP; IF parsedDocListTail = NIL THEN parsedDocList _ parsedDocListTail _ newDocList ELSE { parsedDocListTail.rest _ newDocList; parsedDocListTail _ newDocList; }; index _ index + docPart.docPartSize*WORDSInWords; }; ENDLOOP; doc.parsedDocList _ parsedDocList; }; stockBuffer: REF EntryDataBuffer _ NIL; CheckoutStockBuffer: ENTRY PROC RETURNS [buf: REF EntryDataBuffer] = { ENABLE UNWIND => {}; IF stockBuffer = NIL THEN buf _ NEW[EntryDataBuffer[256]] ELSE { buf _ stockBuffer; stockBuffer _ NIL }; }; RecycleStockBuffer: ENTRY PROC [buf: REF EntryDataBuffer] = { stockBuffer _ buf; }; writeDocumentToMap: PROC [ doc: Document, refTid: REF Camelot.tidT] ~ { buffer: REF EntryDataBuffer _ NIL; stockBuffer: REF EntryDataBuffer _ NIL; bufferSize: CARD _ 256; link: BOOL; wordsInBuffer: INT; IF doc.destroyed THEN { lookupForDelete: UNSAFE PROC [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT, overFlowPageProc: UNSAFE PROC RETURNS [entry: ExtendibleHash.Entry, wordsForEntriesOnPage: INT]] RETURNS[entryToDelete: ExtendibleHash.Entry _ NIL] ~ TRUSTED { nowEntry: ExtendibleHash.Entry _ entry; wordsLeft: INT _ wordsForEntriesOnPage; foundEntry _ FALSE; WHILE wordsLeft > 0 DO entryPtr: YggDIDMapPrivate.EntryPtr = LOOPHOLE[nowEntry, LONG POINTER] + UNITS[CARD32]; IF nowEntry.entryWordSize > CARD[HashPageSizeInWords] THEN ERROR; IF YggDID.CompareDIDs[doc.did, @entryPtr.did] = equal AND entryPtr.extensionCount = extensionCount THEN { entryToDelete _ nowEntry; foundEntry _ TRUE; RETURN; }; wordsLeft _ wordsLeft - nowEntry.entryWordSize - 1 ; nowEntry _ nowEntry + ((nowEntry.entryWordSize + 1) * UNITS[CARD32]); ENDLOOP; }; extensionCount: INT _ 0; foundEntry: BOOL _ TRUE; WHILE foundEntry DO -- insure that old extra extentions do not exist foundEntry _ FALSE; TRUSTED {ExtendibleHash.DeleteEntry[hashTable: TheDIDMap, hashValue: [doc.did.didHigh, doc.did.didLow], proc: lookupForDelete, clientPerCallData: refTid];}; extensionCount _ extensionCount + 1; ENDLOOP; } ELSE { stockBuffer _ buffer _ CheckoutStockBuffer[]; DO didNotFit: BOOL _ TRUE; [wordsInBuffer: wordsInBuffer, didNotFit: didNotFit, link: link] _ writeDocToBuffer[doc, buffer, bufferSize]; IF didNotFit THEN { bufferSize _ bufferSize + bufferSize; buffer _ NEW[EntryDataBuffer[bufferSize]]; LOOP; }; EXIT; ENDLOOP; InsertIntoMap[did: doc.did, doc: doc, buffer: buffer, wordsInBuffer: wordsInBuffer, link: link, refTid: refTid]; RecycleStockBuffer[stockBuffer]; }; }; writeDocToBuffer: PROC [ doc: Document, buffer: REF EntryDataBuffer, bufferSize: INT] RETURNS [wordsInBuffer: CARD _ 0, didNotFit: BOOL _ TRUE, link: BOOL _ FALSE] ~ { IF doc.fromDID # YggEnvironment.nullDID THEN { linkTypeLen: INT _ 0; linkTypeWords: CARD _ 0; nowCharNum: INT _ 0; link _ TRUE; TRUSTED {PBasics.Move[dst: LOOPHOLE[@buffer[0]], src: LOOPHOLE[@(doc.fromDID^)], nWords: WORDS[DIDRep]/WORDSInWords]; }; wordsInBuffer _ WORDS[DIDRep]/WORDSInWords; TRUSTED {PBasics.Move[dst: LOOPHOLE[@buffer[wordsInBuffer]], src: LOOPHOLE[@(doc.toDID^)], nWords: WORDS[DIDRep]/WORDSInWords]; }; wordsInBuffer _ wordsInBuffer + WORDS[DIDRep]/WORDSInWords; linkTypeLen _ Rope.Length[doc.linkType]; linkTypeWords _ (linkTypeLen + PBasics.charsPerWord)/PBasics.charsPerWord; IF linkTypeWords + wordsInBuffer > CARD[bufferSize] THEN RETURN[0, TRUE]; FOR i: INT IN [0..INT[linkTypeWords]) DO fourChars: PACKED ARRAY [0..PBasics.charsPerWord) OF CHAR; FOR charNo: INT IN [0..Basics.charsPerWord) DO IF nowCharNum >= linkTypeLen THEN fourChars[charNo] _ 0C ELSE fourChars[charNo] _ Rope.Fetch[doc.linkType, nowCharNum]; nowCharNum _ nowCharNum + 1; ENDLOOP; TRUSTED{buffer[wordsInBuffer].card _ LOOPHOLE[fourChars, CARD];}; wordsInBuffer _ wordsInBuffer + 1; ENDLOOP; }; FOR parsedDocList: LIST OF YggDIDMapPrivate.DocItem _ doc.parsedDocList, parsedDocList.rest UNTIL parsedDocList = NIL DO indexNameWords: CARD _ 0; segmentOffset: CARD _ 0; docPart: LONG POINTER TO YggDIDMapPrivate.DocPart; IF INT[wordsInBuffer] + WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]] > INT[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { docPart _ LOOPHOLE[@buffer[wordsInBuffer]]; docPart.compression _ YggDIDMapPrivate.notCompressed; docPart.keepingModifications _ FALSE; docPart.partType _ parsedDocList.first.partType; docPart.extraFlags _ 0; docPart.slotNo _ parsedDocList.first.slotNo; docPart.indexName _ LOOPHOLE[0]; docPart.versionIdentifierString _ LOOPHOLE[0]; }; FOR segments: LIST OF YggDIDMapPrivate.SegmentItem _ parsedDocList.first.segments, segments.rest UNTIL segments = NIL DO runIndex: CARD _ 0; segIndex: LONG POINTER TO YggDIDMapPrivate.SegmentRep; IF wordsInBuffer + segmentOffset + CARD[WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[1]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { segIndex _ LOOPHOLE[@docPart.segments + segmentOffset]; segIndex.firstPage _ segments.first.firstPage; segIndex.numberOfBytes _ segments.first.numberOfBytes; segIndex.modificationDeltasOffset _ 0; segIndex.lastUsedTime _ BasicTime.Now[]; segIndex.backUpTime _ BasicTime.nullGMT; segIndex.backUpID _ 0; }; FOR rl: LIST OF YggDIDMapPrivate.RunItem _ segments.first.runs, rl.rest UNTIL rl = NIL DO IF wordsInBuffer + segmentOffset + CARD[WORDS[YggDIDMapPrivate.DocPart] + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED {segIndex.runs[runIndex] _ [segmentId: rl.first.segmentId, segmentPage: rl.first.segmentPage, pages: rl.first.pages]; }; runIndex _ runIndex + 1; ENDLOOP; TRUSTED {segIndex.length _ runIndex; }; segmentOffset _ segmentOffset + WORDS[YggDIDMapPrivate.SegmentRep[runIndex]]/WORDSInWords; ENDLOOP; TRUSTED { docPart.docPartSize _ WORDS[YggDIDMapPrivate.DocPart]/WORDSInWords - WORDS[CARD]/WORDSInWords + segmentOffset + indexNameWords; wordsInBuffer _ wordsInBuffer + docPart.docPartSize; }; IF parsedDocList.first.indexName # NIL THEN { SELECT parsedDocList.first.partType FROM root, contents, outlinks, attributes => {}; ENDCASE => { pName: Rope.Text; pName _ Atom.GetPName[parsedDocList.first.indexName]; IF wordsInBuffer + WORDS[YggDIDMapPrivate.TextRep[Rope.Length[pName]]] > CARD[bufferSize] THEN RETURN [0, TRUE]; TRUSTED { [docPart.indexName, indexNameWords] _ AppendText[dPP: docPart, text: pName, offset: docPart.docPartSize]; docPart.docPartSize _ docPart.docPartSize + indexNameWords; }; wordsInBuffer _ wordsInBuffer + indexNameWords; }; }; ENDLOOP; RETURN[wordsInBuffer: wordsInBuffer, didNotFit: FALSE]; }; StartUpExtendibleHash: PROC [btreeInfo: REF, initialize: BOOL _ FALSE] = { TheDIDMap _ ExtendibleHash.New [ storPrim: [referencePage: HashReferencePage, releasePage: HashReleasePage], initialState: suspended ]; IF initialize THEN { outcome: YggEnvironment.Outcome _ active; doingInit _ TRUE; initTrans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; ExtendibleHash.Open[ hashTable: TheDIDMap, storage: btreeInfo, pageSize: HashPageSizeInWords, initialize: initialize, initialDirectorySize: 64 ]; doingInit _ FALSE; [outcome: outcome] _ YggTransaction.Finish[transID: initTrans, requestedOutcome: commit]; IF outcome # commit THEN ERROR; } ELSE { ExtendibleHash.Open[ hashTable: TheDIDMap, storage: btreeInfo, pageSize: HashPageSizeInWords, initialize: initialize, initialDirectorySize: 64 ]; }; }; debugReferencePageNumber: CARD _ 0; debugReferencePageNumberCount: INT _ 0; doingInit: BOOL _ FALSE; initTrans: YggEnvironment.TransID; HashReferencePage: PUBLIC ExtendibleHash.ReferencePage = { addVMPageSet: ENTRY PROC = { CurrentVMPageSetsForDIDMap _ CONS[vMPageSet, CurrentVMPageSetsForDIDMap]; }; debugReferencePageNumberCount: INT _ 0; vMPageSet: YggBuffMan.VMPageSet; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; IF type = new THEN { IF YggFile.Info[FileForDIDMap, YggEnvironment.nullTransID].size < (INT[number]+1)*FilePagesPerBtreePage THEN YggFile.SetSize[FileForDIDMap, (number+25)*FilePagesPerBtreePage, YggEnvironment.nullTransID]; vMPageSet _ YggBuffMan.ReadPages[fileHandle: FileForDIDMap, tid: YggEnvironment.nullTransID, pageRun: [firstPage: number+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; } ELSE { vMPageSet _ YggBuffMan.ReadPages[fileHandle: FileForDIDMap, tid: YggEnvironment.nullTransID, pageRun: [firstPage: number*FilePagesPerBtreePage+FilePagesPerBtreePage, count: FilePagesPerBtreePage]]; }; IF vMPageSet = NIL OR vMPageSet.rest # NIL THEN ERROR; addVMPageSet[]; RETURN[vMPageSet.first.buffer]; }; HashReleasePage: PUBLIC ExtendibleHash.ReleasePage = { findVMPageSet: ENTRY PROC [number: ExtendibleHash.PageNumber] RETURNS [vmPageSet: LIST OF YggBuffMan.VMPageSet] = { ENABLE UNWIND => {}; prevlovmps: LIST OF YggBuffMan.VMPageSet _ NIL; FOR lovmps: LIST OF YggBuffMan.VMPageSet _ CurrentVMPageSetsForDIDMap, lovmps.rest UNTIL lovmps = NIL DO IF lovmps.first.first.pageRun.segmentPage = number*CARD[FilePagesPerBtreePage]+CARD[FilePagesPerBtreePage] THEN { RETURN[lovmps]; }; prevlovmps _ lovmps; ENDLOOP; ERROR; }; removeVMPageSet: ENTRY PROC = { ENABLE UNWIND => {}; prevlovmps: LIST OF YggBuffMan.VMPageSet _ NIL; FOR lovmps: LIST OF YggBuffMan.VMPageSet _ CurrentVMPageSetsForDIDMap, lovmps.rest UNTIL lovmps = NIL DO IF lovmps.first.first.pageRun.firstPage = INT[number]*FilePagesPerBtreePage+FilePagesPerBtreePage THEN { IF prevlovmps = NIL THEN CurrentVMPageSetsForDIDMap _ lovmps.rest ELSE prevlovmps.rest _ lovmps.rest; EXIT; }; prevlovmps _ lovmps; ENDLOOP; }; debugReferencePageNumberCount: INT _ 0; releaseState: YggBuffMan.ReleaseState; vMPageSet: YggBuffMan.VMPageSet _ NIL; refTid: REF Camelot.tidT _ NARROW[clientPerCallData]; IF number = debugReferencePageNumber THEN debugReferencePageNumberCount _ debugReferencePageNumberCount + 1; IF refTid = NIL THEN ERROR; SELECT update FROM modified => { releaseState _ writeBatchedNoWait; }; unchanged => { releaseState _ clean; }; ENDCASE ; vMPageSet _ findVMPageSet[number].first; IF doingInit THEN { YggBuffMan.WritePages[fileHandle: FileForDIDMap, tid: initTrans, to: number+FilePagesPerBtreePage, nPages: FilePagesPerBtreePage, from: vMPageSet.first.buffer]; } ELSE IF update = modified THEN { YggBuffMan.WritePages[fileHandle: FileForDIDMap, tid: refTid^, to: number+FilePagesPerBtreePage, nPages: FilePagesPerBtreePage, from: vMPageSet.first.buffer]; }; IF vMPageSet # NIL THEN { YggBuffMan.ReleaseVMPageSet[vMPageSet: vMPageSet, releaseState: releaseState, keep: TRUE]; removeVMPageSet[]; }; }; noteUseInTransaction: ENTRY PROC [doc: Document, tid: Camelot.tidT] ~ { ENABLE UNWIND => {}; data: RedBlackTree.UserData; scratchTIDObj.btid _ tid.top; data _ RedBlackTree.Lookup[ transactionToModifiedDocumentMap, scratchTIDObj]; IF data = NIL THEN { dt: DocTrans _ NEW[DocTransRep]; dt.btidForDoc _ NEW[TIDObjRep _ [tid.top]]; dt.docs _ LIST[doc]; RedBlackTree.Insert[transactionToModifiedDocumentMap, dt, scratchTIDObj]; } ELSE { dt: DocTrans; dt _ NARROW[data]; FOR docList: LIST OF Document _ dt.docs, docList.rest UNTIL docList = NIL DO IF YggDID.EqualDIDs[docList.first.did, doc.did] THEN RETURN; ENDLOOP; dt.docs _ CONS[doc, dt.docs]; }; }; GetKeyProc: RedBlackTree.GetKey = { docTrans: DocTrans _ NARROW[data]; RETURN[ docTrans.btidForDoc ]; }; CompareProc: RedBlackTree.Compare = { fileTransData: DocTrans _ NARROW[data]; keyData: TIDObj _ NARROW[k]; SELECT keyData.highTicker FROM > fileTransData.btidForDoc.highTicker => RETURN [greater]; < fileTransData.btidForDoc.highTicker => RETURN [less]; ENDCASE => { SELECT keyData.lowTicker FROM > fileTransData.btidForDoc.lowTicker => RETURN [greater]; < fileTransData.btidForDoc.lowTicker => RETURN [less]; ENDCASE => RETURN [equal]; }; }; stockTIDObj: TIDObj _ NIL; CheckoutTIDObj: ENTRY PROC [btid: Camelot.btidT] RETURNS [k: TIDObj] = { ENABLE UNWIND => {}; IF stockTIDObj = NIL THEN k _ NEW [TIDObjRep] ELSE { k _ stockTIDObj; stockTIDObj _ NIL }; k.btid _ btid; }; RecycleTIDObj: ENTRY PROC [k: TIDObj] = { stockTIDObj _ k; }; GetNext: PUBLIC --ENTRY-- PROCEDURE[doc: Document] RETURNS [nextDocument: Document] = BEGIN -- errors defined in FileMap: none. ENABLE UNWIND => NULL; RETURN[EnumerateNext[doc]]; END; HashDocument: TYPE = Document; Key: TYPE = YggDID.DID; ClientHashInit: INTERNAL PROCEDURE[numHashSlotsDesired: NAT] RETURNS [numHashSlotsAllowed: NAT] = BEGIN divisor, quotient, remainder: CARDINAL; DO divisor _ 2; DO quotient _ numHashSlotsDesired / divisor; remainder _ numHashSlotsDesired MOD divisor; IF remainder = 0 THEN EXIT; -- this number is not prime. IF quotient < divisor THEN RETURN[numHashSlotsDesired]; -- prime. divisor _ divisor + 1; ENDLOOP; numHashSlotsDesired _ numHashSlotsDesired + 1; ENDLOOP; END; ClientHash: INTERNAL PROCEDURE[hashDocument: HashDocument] RETURNS [index: NAT--[0..numHashSlots)--] = INLINE { index _ YggDID.HashDID[hashDocument.did] MOD numHashSlots; }; ClientEqualKeys: INTERNAL PROCEDURE[hashDocument1, hashDocument2: HashDocument] RETURNS [equal: BOOLEAN] = INLINE { RETURN[YggDID.EqualDIDs[hashDocument1.did, hashDocument2.did]]; }; hashSlots: REF HashSlots _ NIL; HashSlots: TYPE = RECORD[SEQUENCE nSlots: NAT OF HashDocument]; numHashSlots: NAT _ 0; -- boy, will they be sorry if they don't init this package. lookupHashDocument: HashDocument _ NIL; -- for the "package's" use only. lruListTop: Document _ NIL; lruListTail: Document _ NIL; numberOfHashDocuments: INT _ 0; desiredNumberOfHashDocuments: INT _ 500; HashPkgCallerProgrammingError: ERROR = CODE; -- various fatal conditions. HashPkgDuplicateKey: ERROR = CODE; -- from Insert. InitializeHashTable: INTERNAL PROCEDURE[numHashSlotsDesired: NAT, hashDocument: HashDocument] = BEGIN -- errors: HashPkgCallerProgrammingError (numHashSlotsDesired = 0). numHashSlots _ ClientHashInit[numHashSlotsDesired]; IF numHashSlots = 0 THEN ERROR HashPkgCallerProgrammingError; lookupHashDocument _ hashDocument; hashSlots _ NEW[HashSlots[numHashSlots]]; FOR index: NAT IN [0..numHashSlots) DO hashSlots[index] _ NIL; ENDLOOP; END; Insert: ENTRY PROC [hashDocument: HashDocument] = { ENABLE UNWIND => {}; index: NAT _ ClientHash[hashDocument]; FOR newHashDocument: HashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] AND ~hashDocument.destroyed THEN ERROR HashPkgDuplicateKey; ENDLOOP; hashDocument.next _ hashSlots[index]; hashSlots[index] _ hashDocument; SafeStorage.EnableFinalization[hashDocument]; numberOfHashDocuments _ numberOfHashDocuments + 1; }; LookupInCache: ENTRY PROC [key: Key, destroyedOK: BOOL _ FALSE] RETURNS [hashDocument: HashDocument] = { ENABLE UNWIND => {}; lookupHashDocument.did _ key; FOR hashDocument _ hashSlots[ClientHash[lookupHashDocument]], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, lookupHashDocument] AND (destroyedOK OR ~hashDocument.destroyed) THEN { SafeStorage.EnableFinalization[hashDocument]; IF hashDocument.lruListPrev # NIL OR hashDocument.lruListNext # NIL OR lruListTop = hashDocument THEN { IF hashDocument.lruListPrev = NIL THEN { lruListTop _ hashDocument.lruListNext } ELSE { hashDocument.lruListPrev.lruListNext _ hashDocument.lruListNext; }; IF lruListTop # NIL THEN lruListTop.lruListPrev _ NIL; IF hashDocument.lruListNext = NIL THEN { lruListTail _ hashDocument.lruListPrev } ELSE hashDocument.lruListNext.lruListPrev _ hashDocument.lruListPrev; IF lruListTail # NIL THEN lruListTail.lruListNext _ NIL; }; hashDocument.lruListPrev _ NIL; hashDocument.lruListNext _ NIL; RETURN; }; ENDLOOP; RETURN[NIL]; }; FlushDocument: PUBLIC PROC [did: YggDID.DID, tid: Camelot.tidT] = { doc: YggInternal.Document _ NIL; IF (doc _ LookupInCache[did]) = NIL THEN { Delete[doc]; }; }; Delete: ENTRY PROC [hashDocument: HashDocument] = { InnerDelete[hashDocument]; }; InnerDelete: INTERNAL PROC [hashDocument: HashDocument] = { ENABLE UNWIND => {}; index: NAT _ ClientHash[hashDocument]; newHashDocument: HashDocument _ NIL; prevHashDocument: HashDocument _ NIL; FOR newHashDocument _ hashSlots[index], newHashDocument.next UNTIL newHashDocument = NIL DO IF ClientEqualKeys[newHashDocument, hashDocument] THEN EXIT; prevHashDocument _ newHashDocument; REPEAT FINISHED => ERROR HashPkgCallerProgrammingError; ENDLOOP; IF SafeStorage.IsFinalizationEnabled[newHashDocument] THEN { newHashDocument.okToRemoveFromDIDMapCache _ TRUE; } ELSE { IF prevHashDocument = NIL THEN hashSlots[index] _ hashDocument.next ELSE prevHashDocument.next _ hashDocument.next; numberOfHashDocuments _ numberOfHashDocuments - 1; }; }; EnumerateNext: ENTRY PROCEDURE[prevHashDocument: HashDocument] RETURNS [hashDocument: HashDocument] = { ENABLE UNWIND => {}; index: NAT; IF prevHashDocument = NIL THEN index _ 0 ELSE { index _ ClientHash[prevHashDocument]; FOR hashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF ClientEqualKeys[hashDocument, prevHashDocument] THEN GOTO found; REPEAT found => { IF hashDocument.next # NIL THEN RETURN[hashDocument.next]; index _ index + 1; }; ENDLOOP; }; UNTIL index >= numHashSlots DO IF hashSlots[index] # NIL THEN RETURN[hashSlots[index]]; index _ index + 1; ENDLOOP; RETURN[NIL]; }; EnumerateWithProc: INTERNAL PROCEDURE[proc: PROCEDURE[hashDocument: HashDocument] RETURNS[stop: BOOLEAN]] = { FOR index: NAT IN [0..numHashSlots) DO FOR hashDocument: HashDocument _ hashSlots[index], hashDocument.next UNTIL hashDocument = NIL DO IF proc[hashDocument] THEN RETURN; ENDLOOP; ENDLOOP; }; FinalizationProcess: PROCEDURE = { FinalizeEntry: ENTRY PROCEDURE [docm: Document] = { ENABLE UNWIND => {}; IF SafeStorage.IsFinalizationEnabled[docm] THEN RETURN; IF lruListTop = NIL THEN { lruListTop _ docm; } ELSE { lruListTail.lruListNext _ docm; docm.lruListPrev _ lruListTail; }; lruListTail _ docm; docm _ NIL; }; DO doc: Document; doc _ NARROW[SafeStorage.FQNext[finalizationQ]]; FinalizeEntry[doc]; doc _ NIL; ENDLOOP; }; CacheTrimProcess: PROC = { ticksToWait: Process.Ticks; ticksToWait _ Process.MsecToTicks[197]; DO overage: INT _ 0; Process.Pause[ticksToWait]; overage _ numberOfHashDocuments - desiredNumberOfHashDocuments; IF overage > 0 THEN TrimLruList[overage]; ENDLOOP; }; TrimLruList: PROC [entriesToTrim: INT] = { FOR entNo: INT IN [1..entriesToTrim] DO grabTopOffOfLRU: ENTRY PROC ~ { IF lruListTop # NIL THEN { document _ lruListTop; lruListTop _ lruListTop.lruListNext; IF lruListTop = NIL THEN lruListTail _ NIL ELSE lruListTop.lruListPrev _ NIL; document.lruListNext _ NIL; document.lruListPrev _ NIL; InnerDelete[document ! HashPkgCallerProgrammingError => CONTINUE; ]; -- CONTINUE so that finalization will work if this module is re-run during debugging }; }; document: Document _ NIL; grabTopOffOfLRU[]; ENDLOOP; }; InitDIDMap: PUBLIC PROC [firstTime: BOOL] ~ { FileForDIDMap _ YggFile.OpenDIDMapFile[]; [optr: MaxDIDOptr, address: AddressForMaxDID] _ YggFile.GetMaxDIDAddress[]; RecoverableMaxDID _ LOOPHOLE[AddressForMaxDID]; TRUSTED {NextDID _ RecoverableMaxDID^;}; LastDIDAllocateable _ NextDID; GrabSomeDIDs[]; StartUpExtendibleHash[NIL, firstTime]; IF firstTime THEN { outcome: YggEnvironment.Outcome _ active; trans: YggEnvironment.TransID; WHILE outcome # commit DO doc: YggInternal.Document _ NIL; trans _ YggTransaction.CreateTrans[YggEnvironment.nullTransID]; doc _ NEW[DocumentRep _ [did: YggDIDPrivate.DIDForRootDIDs, modifiedDIDMap: TRUE, componentFiles: NIL, interlock: FALSE, next: NIL]]; Insert[doc ! HashPkgDuplicateKey => ERROR]; noteUseInTransaction[doc, trans]; YggNav.SetContents[trans: trans, did: YggDIDPrivate.DIDForRootDIDs, contents: [YggRep.did, YggDIDPrivate.DIDForRootDIDs]]; [outcome: outcome] _ YggTransaction.Finish[transID: trans, requestedOutcome: commit]; ENDLOOP; }; }; Initialize: PUBLIC ENTRY PROCEDURE[numHashSlotsDesired: NAT, fQLength: CARDINAL] = { ENABLE UNWIND => {}; IF fQLength = 0 THEN RETURN WITH ERROR ZeroIllegal; WORDSInWords _ PBasics.bytesPerWord/BYTES[WORD]; finalizationQ _ SafeStorage.NewFQ[fQLength]; -- do this before SafeStorage.EstablishFinalization[CODE[DocumentRep], 1, finalizationQ ! SafeStorage.CantEstablishFinalization => CONTINUE]; -- allocating any FileObjects. TRUSTED BEGIN Process.Detach[FORK FinalizationProcess[]]; END; TRUSTED BEGIN Process.Detach[FORK CacheTrimProcess[]]; END; secondLookupHashDocument _ NEW[DocumentRep _ [did: NIL, componentFiles: NIL, interlock: FALSE, next: NIL]]; InitializeHashTable[numHashSlotsDesired, secondLookupHashDocument]; TRUSTED { Process.InitializeCondition[@aShortTime, Process.MsecToTicks[50]]; Process.InitializeCondition[@didsCondition, Process.MsecToTicks[177]]; Process.InitializeCondition[@lock64Condition, Process.MsecToTicks[43]]; }; transactionToModifiedDocumentMap _ RedBlackTree.Create[getKey: GetKeyProc, compare: CompareProc]; FOR h: CARD IN [0..64) DO index: CARD _ 0; hLeft: CARD _ h; addPowerOf2: CARD _ 32; WHILE hLeft # 0 DO IF hLeft MOD 2 = 1 THEN index _ index + addPowerOf2; addPowerOf2 _ addPowerOf2 /2; hLeft _ hLeft / 2; ENDLOOP; Hash64Table[h] _ index; ENDLOOP; }; Initialize[129, 250]; END. ώYggDIDMapImpl.mesa Copyright Σ 1988, 1989 by Xerox Corporation. All rights reserved. Last edited by Bob Hagmann July 7, 1989 9:27:26 am PDT Data types and variables Exported procedures for documents Notes continued interest in this did. Finds or creates a Document for the supplied did and tid. Document is now inserted into the cache. Interlock is set. Open up the componentFiles. errors defined in this interface: none. Get the link info for the Document. Swap the link info for the document, and report what it used to be. no errors possible, but may return NIL. no errors possible, but may return NIL. Exported for document modifications Normally, only called by the file package during it's pre-commit. tid is a top level transaction. Runs listed are removed from the object. Also used when an object is migrated. Caller guarentees that PreCommit will later be called with this tid (if the system doesn't crash). The document is updated in the cache. PreCommit will make the updates to the disk resident B-tree(s). If Commit is later called, then all is well. If Abort is called, then the cache entry is destroyed. Undo is done in the B-trees by physical pages. Subsequent use of the document requires rebuilding the document object. Normally, only called by the file package during it's pre-commit. tid is a top level transaction. Runs listed are added to the object. Also used when an object is removed from the disk cache. Normally, only called by the file package during it's pre-commit. tid is a top level transaction. The size of valid bytes is set to this amount, but the pages for the file are not changed. Utilities Exported procedures for transactions Take all the modified documents from the transaction and write out their changes to the DID Map. Find all the hash pages that have to be updated. Process them in ascending order, doing all of the updates for the whole page before doing the next page. We can't deadlock within DIDMap update since we've ordered the locks. First, lock the page for exclusive access. Perform the update(s) including the Camelot pin and log dance. Downgrade to lock to allow readers (they can see uncommitted data). If the transaction commits, completely unlock the tree. If the transaction aborts, lock the tree for exclusive access, perform undo, and after undo processing is done unlock the tree. Files Utilities DID allocation [newTid: moreDIDsTID, kernCode: kernCode] _ Camelot.TABegin[taPort: YggEnvironment.taPort, parentTid: YggEnvironment.nullTransID, transType: ttNvServerBased, raiseSignal: TRUE]; [status: status, kernCode: kernCode] _ Camelot.TAEnd[taPort: YggEnvironment.taPort, tid: moreDIDsTID, protocolType: ptTwoPhased, raiseSignal: TRUE]; IF kernCode = Mach.KernSuccess AND status = Camelot.ErSuccess THEN EXIT; DID access to recoverable DID Map updatesForTrans: LIST OF updates; updatesForTrans _ getUpdates[tid]; FOR ut: LIST OF updates _ updatesForTrans, ut.rest UNTIL ut = NIL DO IF YggDID.EqualDIDs[ut.first.did, did] THEN RETURN[FALSE, TRUE]; -- if it has updates, then is should be in the cache - so why are we in this routine? ENDLOOP; Set extensionCount before calling Set extensionCount before calling This code must be the dual of writeDocToBuffer Set extensionCount before calling noteUpdatedBuffer[entry, tid]; This code must be the dual of parseDocPart ExtendibleHash storage primitives BufferAndTransList: LIST OF BufferAndTransItem _ NIL; BufferAndTransItem: TYPE = RECORD [ highBits: CARD, trans: YggEnvironment.TransID ]; findTransForPage: ENTRY PROC [number: PageNumber] RETURNS [trans: YggEnvironment.TransID] ~ { }; noteUpdatedBuffer: ENTRY PROC [entry: ExtendibleHash.Entry, trans: YggEnvironment.TransID] ~ { highBits: CARD; highBits _ LOOPHOLE[entry, CARD]/ExtendibleHash.wordsPerPage; BufferAndTransList _ CONS[[highBits, trans], BufferAndTransList]; }; doneWithTransUpdatedBuffer: ENTRY PROC [trans: YggEnvironment.TransID] ~ { highBits: CARD; highBits _ LOOPHOLE[entry, CARD]/ExtendibleHash.wordsPerPage; BufferAndTransList _ CONS[[highBits, trans], BufferAndTransList]; }; PROC [storage: PageStorage, number: PageNumber, type: ReferenceType, clientPerCallData: REF] RETURNS [ptr: PagePtr] PROC [storage: PageStorage, number: PageNumber, update: UpdateState, clientPerCallData: REF] Internal red black procs PROC [data: UserData] RETURNS [Key] PROC [k: Key, data: UserData] RETURNS [Basics.Comparison] Old enumeration proc doc = NIL starts a new enumeration, and GetNext returns nextDocument = NIL when the enumeration is exhausted. The only guaranteed property of the enumeration is that all documents in existence during the entire enumeration will be visited at least once. Some documents may be seen more than once. Hash table management: Explanation of client-supplied parameters: The procedure ClientHashInit is called during hash table initialization, to allow the hash function to precompute values based on the range and to make any small adjustments to the range that are necessary. HashDocument must: be a REF type. contain a field "next" of type HashDocument, under the exclusive control of the hash package. Key is an arbitrary type. SetKey sets the "key value" associated with a HashDocument. The key value must not change between the time the doc is Inserted into the table and the time it is deleted from the table. Hash must be a function of the key value of the parameter "hashDocument". EqualKeys must be the equality relation on the key values of the parameters "hashDocument1" and "hashDocument2". Interface description: InitializeHashTable: INTERNAL PROCEDURE[numHashSlotsDesired: NAT, hashTableZone: ZONE, hashDocument: HashDocument]; errors: HashPkgCallerProgrammingError (numHashSlotsDesired = 0). Insert: ENTRY PROCEDURE[hashDocument: HashDocument]; errors: HashPkgDuplicateKey. LookupInCache: ENTRY PROCEDURE[key: Key] RETURNS [hashDocument: HashDocument]; returns hashDocument = NIL if not found. Delete: ENTRY PROCEDURE[hashDocument: HashDocument]; errors: HashPkgCallerProgrammingError (not found). EnumerateNext: INTERNAL PROCEDURE[prevHashDocument: HashDocument] RETURNS [hashDocument: HashDocument]; errors: none. prevHashDocument = NIL starts the enumeration, returned hashDocument = NIL is the end of the enumeration. This procedure guarantees that any hashDocument in existence throughout the entire enumeration will be seen. Other docs may or not not be seen. HashDocuments may be seen more than once. EnumerateWithProc: INTERNAL PROCEDURE[proc: PROCEDURE[hashDocument: HashDocument] RETURNS[stop: BOOLEAN]]; errors: none. client-supplied parameters to hash package begin here: [quotient, remainder] _ Basics.DivMod[numHashSlotsDesired, divisor]; end of client-supplied parameters, start of invariant hash package code: The INTERNAL procedures below expect to be called from a client procedure holding the module monitor lock, which protects the following data structures: errors: errors: HashPkgDuplicateKey. returns hashDocument = NIL if not found. On lrulist. Remove it and use it. Flush a document out of the cache. Testing only!! errors: HashPkgCallerProgrammingError (not found). errors: HashPkgCallerProgrammingError (not found). prevHashDocument = NIL starts the enumeration, returned hashDocument = NIL is the end of the enumeration. This procedure guarantees that any hashDocument in existence throughout the entire enumeration will be seen. Other docs may or not not be seen. HashDocuments may be seen more than once. errors: none. errors: none. end of invariant hash package code. Finalization Trim lru list Initialization Init the Root DID TURN FINALIZATION BACK ON AFTER DEBUGGING ΚFi˜Icodešœ™KšœB™Bšœ™Kšœ'™'—K˜K˜šΟk ˜ Kšœœ˜ Kšœœœ˜0Kšœ œ˜Kšœœv˜ŠKšœœY˜fKšœœ,˜6Kšœœ:˜GKšœœœ-˜BJšœ œD˜VJšœœ˜%Jšœœ!œ˜?Kšœ œ˜’Kšœ œD˜TKšœœœ#˜AK˜ Kšœœ¬˜ΒKšœœ˜-Kšœœ@˜TKšœœ_˜lKšœœ ˜3Kšœ œ˜)Kšœœ)˜5Kšœœ)˜5šœœ˜+˜K˜———šΟn œœ˜KšœΠ˜ΧKšœ!˜(K˜K˜—Kšœ˜K˜head™K˜Kšœ œœ ˜!Kšœ œœ ˜8K˜Kšœœœœ˜Kšœœœ˜+K˜K˜Kšœœ˜ Kšœœ˜2Kšœœ˜K˜Kšœ œ œœ˜#Kšœ œ œ ˜)šœ œœ˜Kšœ œœ˜K˜K˜—K˜Kšœ.˜.K˜Kšœ"˜"Kšœœœœ˜?K˜Kšœ˜Kšœ˜K˜Kšœ˜Kšœ ˜ Kšœ œœ˜*K˜Kšœ œ˜K˜Kšœœ˜Kšœœ˜K˜Icode2š œ œœœœ˜BK˜Kš œœœœœœœ ˜Fš œ œœ œœ˜,šœœ˜Kšœœ˜Kšœ œ˜Kšœœ˜Kšœœ˜ Kš œœœ œœ˜-Kš˜—Kšœ˜—Kšœœ˜0šœ œœ˜Kšœ œ˜K˜K˜Kšœ ˜ K˜—K˜Kšœœœ ˜Kšœ œœ˜0Kšœœ ˜'K˜Kšœ œœ ˜!šœ œœ˜Kšœ˜Kšœœœ ˜Kšœœœ ˜$Kšœ˜K˜—Kšœ5˜5K˜Kšœ-˜-K˜K˜Kšœ Οc œœœ˜&Kšœ)œŸ1˜_K˜K˜—šœ!™!šžœœœœ˜4K™%Kšœœ˜K˜K˜—šžœœœœ"œœœ˜†Kšœ9™9Kšœ œ˜š˜Kšœ œœ˜šœ+œœ˜7Kšœœœ˜Kšœœœ˜Kš œœ*œ œœ˜UKšœ5˜5šœœ˜Kšœ&˜&Kšœœœœ˜K˜—šœœ˜Kšœœœœ˜šœ%˜%Kšœ œ˜Kšœ˜ Kšœ˜—Kšœ˜KšœX™XKšœ œ+˜Kšœœœ˜'Kšœ œœœ˜1Kšœ&˜&šœ#œœ˜8Kšœ œœ˜/Kšœœœ˜3šœ.œ œ˜HKšœœœ˜'Kšœ œœ˜+šœ'œœ˜=Kšœœ˜Kšœœ˜Kšœ œ˜Kšœœ˜šœ.œŸ˜FKšœ˜Kšœ˜Kšœ˜—Kšœœ2˜IKšœœƒ˜™Kšœ&œœŸ˜Fšœ1œ!œŸ˜rKšœ œœ ˜6Kšœœ˜ Kšœ˜K˜—šœ˜Kšœ+˜+Kšœ<˜œœ˜bKšœ œ˜Kšœ8˜8Kšœœœ˜;šœœ˜Kšœ ˜ Kšœ œ˜š œœœ7œœ˜[Kšœ'˜'Kšœ˜—Kšœ+˜+Kš œœœœœ ˜eK˜—Kšœ˜—Kšœ˜K˜—šœœ˜Kšœ œœ˜K˜—KšœŸ˜—Kšœ˜K˜—K˜—K˜—™ š Πbn œœœœ œ˜5Jšœœ˜>K˜K˜—š ‘œœœœ œ˜1Jšœœ ˜.K˜K˜—š œœ œœ*˜\šœ ˜Jšœ˜Jšœ!˜!Jšœ!˜!Jšœ%˜%Jšœ3˜3šœ˜ Kšœ˜Kšœ˜šœ]œ˜eKšœ˜K˜—Kšœœ˜ K˜——K˜——šœ$™$šž œœœ˜.KšœXœ™`K™K™αK™K™―K™K™ΈK™Kšœ˜Kšœ,˜,Kšœ4˜4KšœI˜IKšœ˜šœœœ˜K˜ Kšœ œ˜Kšœœ˜š˜Kšœ œ˜Kšœœ˜š œ œœ"œ œœŸ<˜ŠKšœœ˜Kšœ-˜-Kšœœ+˜Lšœ œœ˜FKšœ˜Kšœ˜K˜—Kšœ˜—Kšœœœ˜Kšœœœ˜(Kšœ˜Kšœ;˜;š œ œœ"œ œœŸJ˜˜Kšœœ˜Kšœ-˜-šœœ˜#Kšœ*˜*Kšœ˜—Kšœ˜—Kšœ˜Kšœ˜—K˜—K˜K˜—š ž œœ œœœ˜@šœH˜HKšœ#˜#K˜—K˜K˜—šžœœœ˜+Kšœ˜Kšœ,˜,KšœI˜Išœœœ˜K˜ Kšœœ˜š œ œœ"œ œœŸ;˜‰Kšœœ˜%šœœ˜!Kšœ˜K˜—Kšœ˜—Kšœ&˜&K˜—KšœG˜GKšœ˜K˜—K˜šžœœœ˜*Kšœ˜Kšœ,˜,KšœI˜Išœœœ˜K˜ Kšœ œ˜Kšœœ˜š œ œœ"œ œœŸ;˜‰Kšœ˜Kšœ˜—Kšœ&˜&K˜—KšœG˜GKšœ˜K˜—K˜—™š œœ)œ˜dš œœœ8œœ˜ZJšœ œœ˜Jšœ˜Jšœœ˜!Jšœ˜šœ˜Jšœ˜Jšœ!˜!Jšœ!˜!šœ ˜ Jšœ˜J˜—Jšœ3˜3Jšœ%˜%Jšœ˜—š œ œœBœ œ˜nJšœ œ˜)Jšœ œ[˜iš œœœ;œœ˜_Jšœ œ}œ ˜Jšœ%˜%Jšœ˜—Jšœ˜—Jšœkœ˜rJšœœ˜7Jšœ˜—K˜Kšœ˜K˜—K˜—™ K˜šž œœœ=œœ*œ œ˜žMšœ œœœ˜2Mšœ œ œœ˜*Mšœ œ ˜M˜Mš œ œœœœŸ˜Všœ˜Mšœœ œ ˜5Mšœœ œ˜3Mšœ˜—Mšœ œN˜\Mšœ˜—šžœœœ œœœ!œœ œ˜fMšœœ˜ Mšœ˜šœ˜Mšœœ œ˜2Mšœœ œ˜6Mšœ˜—Mšœ˜—K˜—šœ ™Kšœ œ˜K˜š ž œœœœœ˜4K˜K˜šœ/œ-˜fKšœ˜K˜Kšœœœ œ˜TKšœ˜—Kšœœ;˜DKšœ$˜$KšœDœœ˜zK˜K˜—Kšœœ˜K˜šž œœ˜Kšœ1˜1Kšœ œœ˜Kšœ˜Kšœ˜š œ œœœ œ˜4K˜Kšœœœœ˜&K˜ K˜—Kšœ œœ˜Kšœ=˜=KšœP˜Pšœ,œ˜4KšœD˜DKšœ3˜3Kšœ œ˜K˜—š˜Kšœ ˜ Kšœœ˜Kšœ*˜*Kšœ«œ™±KšœE˜EKšœhœœ˜ŠKšœœœ˜PKšœ—œœ˜ΉKšœŽœ™”Kšœœœœ™HKšœ[˜[Kšœœœ˜Kšœ˜—šœ œ˜šœœœ˜K˜Kšœ.˜.Kšœ1˜1Kšœ˜K˜Kš œ˜K˜—K˜K˜—šœœ˜šœœœ˜Kšœ;˜;Kš œ˜K˜K˜—K˜K˜—K˜—K˜K˜—šœ™!šž œœœ0œ œœœœ˜‘Mšœ œœ˜Mšœœœ˜(Mšœœ˜Mšœœ˜Mšœœ˜M˜Mšœœ˜#Mšœœ˜Jšœ'˜'Jšœ%˜%Kšœœœ˜Kšœœœ ™!code1š  œœœ6œœœœ6œ˜ΎJšœ'˜'Jšœ œ˜'šœ˜Mš œ&œ œœœœ˜WMšœA˜Ašœ0œœœœœ&œ˜ŸMšœœ˜ šœ˜#šœŸ˜%J˜Jšœ œŸ˜2šœœ˜Jšœ œ˜Jšœ œ˜Jšœ œœ˜Jšœ œ ˜Jšœ=œ˜YJšœœ˜#Jšœ œ ˜Jšœ?œ˜[Jšœ˜šœ"œ ˜6Kš œ œœœœ˜:Jšœ œ˜1šœ œœ˜.Kšœœ œœ˜7K˜Kšœ˜—Jšœ˜—šœ œ˜Jšœ˜J˜—šœœ˜Jšœ œœœ˜Jšœ"˜"Jšœ œ˜šœ ˜Kš œ œœœœ˜:Jšœ œ˜1Kšœ˜šœ œœ˜.Kšœœ œœ˜7Kšœ(˜(K˜Kšœ˜—Jšœ˜—Jšœ˜Jšœ/˜/J˜—J˜—Jšœo˜oMšœ˜—šœŸ%˜-Jšœ œŸ˜1Mšœ,˜,Mšœœ,˜>MšœP˜PMšœW˜WMšœ˜—šœ Ÿ˜Mšœ\œœ˜iMšœ˜Mšœ`˜`Mšœ˜—Mšœœ˜—M˜M˜—Nšœ4˜4Jšœ6œ ˜EJšœœœ˜#Nšœ˜—N˜—Kšœ"™"š œœœ$œœ™DKš œ%œœœœŸU™—Kšœ™—šœ ˜Mšœj˜qMšœ œœ˜Mšœ˜—šœœœ˜"Kšœ"œœ˜/Kšœ7˜7K˜—Kšœ˜K˜—š ž œœœ%œ!œœ˜”Nšœœ˜Kšœ œœ˜š  œœœ6œœœœ6œœ'œœ˜ρNšœΟoœ™!Jšœ'˜'Jšœ œ˜'šœ˜Mš œ&œ œœœœ˜WMšœœœœ˜Ašœ0œ*œ˜eJšœ˜Jšœ˜J˜—Nšœ4˜4Jšœ6œœ˜ENšœ˜—N˜—š œœœ6œœœœ6œœ'œœ˜φNšœ’œ™!Jšœ'˜'Jšœ œ˜'Nšœ œ˜šœ˜Mš œ&œ œœœœ˜WMšœœœœ˜Ašœ0œ*œ˜eJšœ˜Jšœ œ˜Jšœ˜J˜—Nšœ4˜4Jšœ6œœ˜ENšœ˜—N˜—Mšœœ˜KšœI˜Išœ$œ˜,š  œœœ!œ˜AMšœœœœœœœœœ˜_Mšœ˜Mšœ˜Mšœ˜Mšœ˜Mšœ(œ!˜QN˜—Kšœ˜KšœΨ˜ίKšœ˜K˜—šœœ˜š œœœ!œ˜FMšœ œB˜QMšœœœœœœœœœ˜_Mšœ˜Mšœ˜Mšœ˜Mšœ˜Mšœ)˜)Mšœ(œ˜NMšœ˜K˜—Kšœ œ˜Kšœ œ˜Kšœ˜Kšœΰ˜ηKšœ˜Kšœ˜”š˜š œœ œ! œ˜OMšœœœœœœœœœ˜_MšœD˜DMšœ'˜'Mšœ˜Mšœ˜Mšœ(œ œœ&˜kK˜—KšœœœJ˜bKšœ*˜*Kšœ$˜$KšœŠ˜‘Kšœœœ˜Kšœœœ˜Kšœ˜—K˜—Kšœ œ˜šœ œŸ1˜FKšœ˜”Kšœ$˜$Kšœ˜—Kšœ˜K˜—Mšœ œ˜šž œœœ œ˜=K˜Mšœœœœ˜DMšœ œ˜%Mšœ˜M˜—šžœœœ œ˜?Kšœœ˜Mšœ œœœ˜NMšœ&˜&Mšœ5˜5Mš œ˜M˜M˜—š   œœœœœ ˜NK˜Kšœ˜K˜—š  œœœœœœ œœ˜wKšœ.™.Kšœœœœ˜6Kšœœœœ˜:Kšœœ˜šœ˜"šœ˜ Kšœ œœœ˜6Kšœ œœœ˜2Kšœ œœœ˜3Kšœœœ œ˜=Kšœ œ˜Kšœœ˜Kšœ œœ˜Kšœ œ˜(Kšœ>œœ˜Kšœœœœ˜1Kšœ ˜ Kšœ*œ ˜˜ešœœ˜Kšœ'˜'Kšœ"˜"K˜—Kšœ+˜+Kšœ˜—Kšœœœ/˜Nšœœ˜Kšœ$˜$Kšœ˜K˜—Kšœ1˜1K˜—Kšœ˜—Kšœ"˜"K˜K˜—Mšœ œœ˜'š žœœœœœ˜FK˜Mš œœœœœ$œ˜hMšœ˜—šžœœœœ˜=Mšœ˜Mšœ˜—J˜š œœ0˜HKšœœœ˜"Kšœ œœ˜'Kšœ œ˜Kšœœ˜ Kšœœ˜šœœ˜š œœœ6œœœœ6œœ'œœ˜φNšœ’œ™!Jšœ'˜'Jšœ œ˜'Nšœ œ˜šœ˜Mš œ&œ œœœœ˜WMšœœœœ˜Ašœ4œ*œ˜iJšœ˜Jšœ œ˜Jšœ™Jšœ˜J˜—Nšœ4˜4Jšœ6œœ˜ENšœ˜—N˜—Kšœœ˜Kšœ œœ˜šœ œŸ1˜FNšœ œ˜Kšœ•˜œKšœ$˜$Kšœ˜—K˜—šœœ˜Kšœ-˜-š˜Kšœ œœ˜Kšœm˜mšœ œ˜Kšœ%˜%Kšœ œ˜*Kšœ˜K˜—Kšœ˜Kšœ˜—Kšœp˜pKšœ ˜ K˜—˜K˜——š œœœœœœœœœœ˜¨Kšœ*™*šœ&œ˜.Kšœ œ˜Kšœœ˜Kšœ œ˜Kšœœ˜ Kšœœœœ˜xKšœœ˜+Kšœœœœ˜‚Kšœ œ˜;Kšœ(˜(KšœJ˜JKšœ2œœœ˜Išœœœ˜(Kš œ œœœœ˜:šœ œœ˜.Kšœœ˜9Kšœœ:˜?Kšœ˜Kšœ˜—Kšœœ˜AKšœ"˜"Kšœ˜—K˜—š œœœBœœ˜xKšœœ˜Kšœœ˜Kšœ œœœ˜2Kšœœœœ#œ œœœ˜ˆšœ˜ Kšœ œ˜+Kšœ5˜5Kšœœ˜%Kšœ0˜0Kšœ˜Kšœ,˜,Kšœœ˜ Kšœ"œ˜.K˜—š œ œœLœ œ˜xKšœ œ˜Kšœ œœœ˜6Kšœ!œœœ$œ œœœ˜ššœ˜ Kšœ œ$˜7Kšœ.˜.Kšœ6˜6Kšœ&˜&Kšœ(˜(Kšœ(˜(Kšœ˜K˜—š œœœ9œœ˜YKšœ!œœœ+œ œœœ˜‘Kšœy˜€Kšœ˜Kšœ˜—Kšœ ˜'Kšœ œ5˜ZKšœ˜—šœ˜ Kšœœ*œœ0˜Kšœ4˜4K˜—šœ!œœ˜-šœ˜(Jšœ+˜+šœ˜ Kšœ˜Kšœ5˜5Kš œœ1œ œœœ˜pšœ˜ Kšœi˜iKšœ;˜;K˜—Kšœ/˜/K˜——K˜—Kšœ˜—Kšœ*œ˜7K˜——šœ!™!K˜šžœœ œ  œ˜Jšœ!˜!KšœK˜KK˜K˜—šœ œ˜Kšœ)˜)Kšœ œ˜KšœC˜Cšœ˜Kšœ˜Kšœ˜Kšœ˜Kšœ˜Kšœ˜—K˜Kšœ œ˜KšœY˜YKšœœœ˜K˜—šœœ˜šœ˜Kšœ˜Kšœ˜Kšœ˜Kšœ˜Kšœ˜—K˜K˜—Kšœ˜—Mšœœ˜#Mšœœ˜'M˜Mšœ"˜"Mšœ5™5šœ#™#M™Mšœ™M™—šžœ œœ$™]Mšœ™—šžœ œA™^M™Mšœ=™=MšœA™AMšœ™—šžœ œ$™JM™Mšœ=™=MšœA™AMšœ™—šžœœ!˜:MšœTœœ™tš  œœœ˜Mšœœ(˜IM˜—Mšœœ˜'Mšœ ˜ Mšœ#œC˜lšœ œ˜Mšœfœ_˜ΛMšœ―˜―M˜—šœœ˜MšœΕ˜ΕM˜—Mš œ œœœœœ˜6Mšœ˜Mšœ˜Mšœ˜—šžœœ˜6MšœTœ™]š   œœœ%œ œœ˜sKšœœ˜Mšœ œœœ˜/š œ œœ@œ œœ˜išœiœ˜qMšœ ˜M˜—Mšœ˜Mšœ˜—Mšœ˜M˜—š œœœ˜Kšœœ˜Mšœ œœœ˜/š œ œœ@œ œœ˜išœ(œ5œ˜hMšœœœ)˜AMšœœ˜$Mšœ˜M˜—Mšœ˜Mšœ˜—M˜—Mšœœ˜'Mšœ&˜&Mšœ&˜&Mšœ5˜5Mšœ#œC˜lMšœ œœœ˜šœ˜šœ ˜ Mšœ"˜"M˜—šœ˜Mšœ˜M˜—Mšœ˜ —Mšœ(˜(šœ œ˜Mšœ ˜ M˜—šœœœœ˜!Mšœž˜žM˜—šœ˜šœM˜MJšœœ˜ —Mšœ˜M˜—Mšœ˜——šœ™š œœœ'˜GK˜Kšœ˜Kšœ˜KšœM˜Mšœœœ˜Kšœœ˜ Kšœœ˜+Kšœ œ˜KšœI˜IK˜—šœ˜Kšœ ˜ Kšœœ˜š œ œœ"œ œ˜LKšœ.œœ˜œβ™¬K˜š žœœŸ œ œœ˜UKšœŸ$˜+Kšœœœ˜Kšœ˜Kšœ˜K˜—K˜—šœ™Kšœ*™*K˜KšœΞ™Ξšœ™Kšœœ™Kšœ]™]—Kšœ™KšœΉ™ΉKšœI™IKšœp™pK˜K˜Kšœ™K˜Kšœœ œœ™PKšœ™"Kšœ@™@K˜Kšœœ œ™4Kšœ™K˜Kšœœ œ œ™NKšœœ™(K˜Kšœœ œ™4Kšœ2™2K˜Kšœœ œ!™IKšœ™Kšœ ™ Kšœœ1œ ™UKšœ\™\KšœW™WKšœ™K˜Kšœœ œ œ™QKšœœ™Kšœ ™ K˜K˜Kšœ6™6K˜Kšœœ ˜Kšœœ œ˜K˜š žœœ œœ˜Dšœœ˜Kš˜Kšœœ˜'šœ ˜Kš˜KšœD™DKšœ)˜)Kšœ œ ˜,KšœœœŸ˜8KšœœœŸ ˜AK˜Kšœ˜—K˜.Kšœ˜Kšœ˜K˜——š ž œœ œœ Ÿœœ˜oJšœ)œ˜:šœ˜K˜——š žœœ œ-œ œœ˜sšœ9˜?Kšœ˜K˜——KšœH™HK˜KšœœŒ™˜K˜Kšœ œ œ˜Kš œ œœœ œœ˜?K˜KšœœŸ;˜SKšœ#œŸ ˜HK˜Kšœœ˜Kšœœ˜K˜K˜K˜Kšœ(˜(K˜K˜K˜Kšœ™K˜KšœœœŸ˜IKšœœœŸ˜2K˜K˜šžœœ œœ˜_šœŸC˜JK˜3Kšœœœ˜=Kšœ"˜"Kšœ œ˜)šœœœ˜#Kšœœœ˜#—Kšœ˜K˜K˜——šžœœœ!˜3Kšœ™K˜Kšœœ˜&KšœG˜Jšœœ˜šœK˜MKšœœ˜—Kšœ˜ —Kšœ&˜&Kšœ ˜ Kšœ-˜-K˜2Kšœ˜K˜K˜—š ž œœœœœœ!˜hKšœœ™(K˜Kšœ˜šœMœœ˜kšœ3œœœ˜hKšœ-˜-š œœœœœœ˜gJ™"šœœœ˜(Jšœ%˜%J˜—šœœ˜Jšœ@˜@J˜—Jšœœœœ˜7šœœœ˜(Jšœ&˜&J˜—JšœœA˜FJšœœœœ˜8K˜—Jšœœ˜Jšœœ˜Kšœ˜K˜—Kšœ˜ —Kšœœ˜ Kšœ˜K˜K˜—šž œœœœ˜CK™2Kšœœ˜ šœœœ˜*Kšœ ˜ K˜—Kšœ˜K˜—šžœœœ!˜3Kšœ2™2Kšœ˜Kšœ˜K˜—šž œœœ!˜;Kšœ2™2K˜Kšœœ˜&Kšœ œ˜$Kšœ!œ˜%Kšœ9˜<šœœ˜Kšœ0œœ˜KšΠbl)™)Kš œœœžœœ˜;Kš œœœœ œœ˜kKšœC˜CK˜šœ˜ KšœB˜BKšœF˜FKšœG˜GKšœ˜—Kšœa˜ašœœœ ˜Kšœœ˜Kšœœ˜Kšœ œ˜šœ ˜Kšœœœ˜4K˜K˜Kšœ˜—K˜Kšœ˜—Kšœ˜—K˜K˜—K˜K˜Kšœ˜˜K˜——…—γκHQ