DIRECTORY Atom USING [GetPName, MakeAtom, GetProp, PutProp], BasicTime USING [GMT, Period], BTree USING [Error, GetState], BTreeSimple USING [DeleteKey, EnumerateRecords, --GetState,-- InternalKey, New, NewPathStk, Open, PathStk, ReadRecord, --ReadValue,-- Relation, SetState, Tree, UpdateRecord, Value, ValueObject], Convert USING [IntFromRope, RopeFromInt], DFUtilities USING [--DirectoryItem,-- FileItem, CommentItem, ParseFromStream, ProcessItemProc, RemoveVersionNumber, SyntaxError], FS USING [Close, Error, ExpandName, FileInfo, OpenFile], GeneralFS USING [Open, Create, StreamOpen], IO USING [BreakProc, Close, EndOf, EndOfStream, Error, Flush, GetChar, --GetID,-- GetIndex, GetInt, GetLength, GetLineRope, GetRopeLiteral, GetToken, GetTokenRope, IDProc, PeekChar, PutChar, PutF, RIS, rope, SetIndex, SetLength, SkipWhitespace, STREAM], RefID USING [Seal, Unseal, Release], RefTab USING [Create, EachPairAction, Fetch, Insert, Pairs, Ref], RefText USING [Equal, line, ObtainScratch, ReleaseScratch], Rope USING [Cat, Compare, Concat, Equal, Find, Index, ROPE, Substr], SymTab USING [Create, Fetch, Insert, Ref, SeqIndex], LB, LoganBerryBackdoor, LoganBerry, LoganBerryStructure; LoganBerryImpl: CEDAR MONITOR LOCKS dbinfo USING dbinfo: OpenDBInfo IMPORTS Atom, BasicTime, BTree, BTreeSimple, Convert, DFUtilities, FS, GeneralFS, IO, RefText, Rope, RefTab, RefID, SymTab EXPORTS LB, LoganBerry, LoganBerryBackdoor ~ BEGIN OPEN LoganBerry, LoganBerryStructure; ROPE: TYPE = Rope.ROPE; STREAM: TYPE = IO.STREAM; Error: PUBLIC ERROR [ec: ErrorCode, explanation: ROPE _ NIL] = CODE; Open: PUBLIC PROC [conv: Conv _ NIL, dbName: ROPE] RETURNS [db: OpenDB] ~ { dbinfo: OpenDBInfo _ OpenI[dbName]; IF NOT dbinfo.remoteAccess THEN ERROR Error[$DBNotAvailable, dbinfo.statusMsg]; db _ RefID.Seal[dbinfo]; }; ReadEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, value: AttributeValue] RETURNS [entry: Entry, others: BOOLEAN] ~ { [entry, others] _ ReadEntryI[GetInfo[db], key, value]; }; EnumerateEntries: PUBLIC PROC [db: OpenDB, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL, proc: EntryProc] RETURNS [] ~ { EnumerateEntriesI[GetInfo[db], key, start, end, proc]; }; GenerateEntries: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL] RETURNS [cursor: Cursor] ~ { newCursor: CursorInfo _ GenerateEntriesI[GetInfo[db], key, start, end]; cursor _ RefID.Seal[newCursor]; }; NextEntry: PUBLIC PROC [conv: Conv _ NIL, cursor: Cursor, dir: CursorDirection _ increasing] RETURNS [entry: Entry] ~ { c: CursorInfo; WITH RefID.Unseal[cursor] SELECT FROM ci: CursorInfo => c _ ci; ENDCASE => c _ NIL; IF c = NIL THEN ERROR Error[$BadCursor, "Invalid cursor passed to NextEntry."]; entry _ NextEntryI[c.dbinfo, c, dir]; }; EndGenerate: PUBLIC PROC [conv: Conv _ NIL, cursor: Cursor] RETURNS [] ~ { [] _ RefID.Release[cursor]; }; WriteEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, entry: Entry, log: LogID _ activityLog, replace: BOOLEAN _ FALSE] RETURNS [] ~ { WriteEntryI[GetInfo[db], entry, log, replace]; }; DeleteEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, value: AttributeValue] RETURNS [] ~ { DeleteEntryI[GetInfo[db], key, value]; }; Close: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { CloseI[GetInfo[db ! Error => IF ec=$DBClosed THEN CONTINUE]]; }; BuildIndices: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { BuildIndicesI[GetInfo[db]]; }; CompactLogs: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { CompactLogsI[GetInfo[db]]; }; Describe: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [info: SchemaInfo] ~ { info _ DescribeI[GetInfo[db]]; }; GetInfo: PROC [db: OpenDB] RETURNS [OpenDBInfo] ~ INLINE { ref: REF = RefID.Unseal[db]; IF ref = NIL THEN ERROR Error[$BadDBHandle, "NIL OpenDB handle."]; WITH ref SELECT FROM dbinfo: OpenDBInfo => { IF NOT dbinfo.remoteAccess THEN ERROR Error[$DBNotAvailable, dbinfo.statusMsg]; IF NOT dbinfo.isOpen THEN ERROR Error[$DBClosed, dbinfo.statusMsg]; RETURN[dbinfo]; }; ENDCASE => ERROR Error[$BadDBHandle, "Invalid OpenDB handle."]; }; OpenI: PUBLIC PROC [dbName: ROPE] RETURNS [dbinfo: OpenDBInfo] ~ { dbinfo _ GetSharedDB[dbName]; IF NOT dbinfo.isOpen THEN MonitoredOpen[dbinfo]; RETURN[dbinfo]; }; MonitoredOpen: ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; needToRebuild: BOOLEAN _ FALSE; IF dbinfo.isOpen THEN RETURN; -- lost race IF dbinfo.primaryIndex = NIL THEN ReadSchema[dbinfo]; OpenLogs[dbinfo.logs]; IF Recover[dbinfo] THEN needToRebuild _ TRUE; IF OpenIndices[dbinfo.indices] THEN needToRebuild _ TRUE; IF needToRebuild THEN BuildIndicesWorker[dbinfo]; dbinfo.isOpen _ TRUE; }; ReadEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, value: AttributeValue] RETURNS [entry: Entry, others: BOOLEAN] ~ { ENABLE UNWIND => NULL; indexEntry: IndexEntry; index: IndexInfo; aValue: AttributeValue; pathSkt: BTreeSimple.PathStk _ BTreeSimple.NewPathStk[]; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; [aValue, indexEntry] _ GetIndexEntry[index: index, value: value, pathStk: pathSkt]; IF aValue = NIL THEN RETURN[entry: NIL, others: FALSE]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; IF index.type = primary THEN others _ FALSE ELSE { aValue _ GetIndexEntry[index: index, value: aValue, valueIsKey: TRUE, relation: greater, pathStk: pathSkt].actual; others _ Rope.Equal[s1: value, s2: UniqueKeyToValue[aValue], case: FALSE]; }; RETURN[entry, others]; }; SimpleEntryProc: EntryProc = { -- for debugging purposes cont: BOOLEAN _ TRUE; RETURN[cont]; }; EnumerateEntriesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL, proc: EntryProc] RETURNS [] ~ { ENABLE UNWIND => NULL; EntriesInSubrange: PROC [key: BTreeSimple.InternalKey, value: BTreeSimple.Value] RETURNS [continue: BOOLEAN] = { indexEntry: IndexEntry; entry: Entry; IF end # NIL AND Rope.Compare[s1: key, s2: end, case: FALSE] = greater THEN RETURN[continue: FALSE]; TRUSTED { indexEntry _ LOOPHOLE[@value[0], IndexPtr]^; }; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RETURN[continue: proc[entry]]; }; index: IndexInfo; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; IF index.type = secondary THEN { IF start # NIL THEN start _ ValueToSmallKey[start]; IF end # NIL THEN end _ ValueToLargeKey[end]; }; [] _ BTreeSimple.EnumerateRecords[tree: index.btree, key: start, Proc: EntriesInSubrange ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex, Rope.Cat["Problem enumerating index for ",Atom.GetPName[key],"."]]]; }; GenerateEntriesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL] RETURNS [cinfo: CursorInfo] ~ { ENABLE UNWIND => NULL; newCursor: CursorInfo; index: IndexInfo; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; newCursor _ NEW[CursorRecord]; newCursor.index _ index; newCursor.dbinfo _ dbinfo; newCursor.key _ key; newCursor.start _ start; newCursor.end _ end; IF newCursor.index.type = secondary THEN { IF start # NIL THEN newCursor.start _ ValueToSmallKey[start]; IF end # NIL THEN newCursor.end _ ValueToLargeKey[end]; }; newCursor.pathStk _ BTreeSimple.NewPathStk[]; newCursor.current _ IF start # NIL THEN GetIndexEntry[index: newCursor.index, value: newCursor.start, valueIsKey: TRUE, relation: less, pathStk: newCursor.pathStk].actual ELSE NIL; RETURN[newCursor]; }; NextEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, cinfo: CursorInfo, dir: CursorDirection _ increasing] RETURNS [entry: Entry] ~ { ENABLE UNWIND => NULL; actualValue: AttributeValue; indexEntry: IndexEntry; [actualValue, indexEntry] _ GetIndexEntry[index: cinfo.index, value: cinfo.current, valueIsKey: TRUE, relation: IF dir = increasing THEN greater ELSE less, pathStk: cinfo.pathStk]; IF (actualValue = NIL) OR (dir = increasing AND cinfo.end # NIL AND Rope.Compare[actualValue, cinfo.end, FALSE] = greater) OR (dir = decreasing AND Rope.Compare[actualValue, cinfo.start, FALSE] = less) THEN RETURN[NIL]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; cinfo.current _ actualValue; RETURN[entry]; }; EndGenerateI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, cinfo: CursorInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; NULL; -- this routine is unnecessary since cursors get garbage collected }; WriteEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, entry: Entry, log: LogID _ activityLog, replace: BOOLEAN _ FALSE] RETURNS [] ~ { ENABLE UNWIND => NULL; value: AttributeValue; indexData: IndexEntry _ [log: log, firstByte: 0]; doReplace: BOOLEAN _ FALSE; replacedIndexEntry: IndexEntry; replacedEntry: Entry; value _ GetAttributeValue[entry, dbinfo.primaryIndex.key]; IF value = NIL THEN ERROR Error[$NoPrimaryKey, "Entry does not contain a primary key."]; IF dbinfo.logs[log].access = readOnly THEN ERROR Error[$LogReadOnly, "Can't write to a read only log."]; [value, replacedIndexEntry] _ GetIndexEntry[index: dbinfo.primaryIndex, value: value]; IF value # NIL THEN IF replace THEN doReplace _ TRUE ELSE ERROR Error[$ValueNotUnique, "An existing entry already contains the primary key."]; IF doReplace THEN { IF replacedIndexEntry.log # log THEN ERROR Error[$InvalidReplace, "Cross-log replacements are not allowed."]; [] _ WriteLogEntry[dbinfo.logs[log].stream, LIST[[$REPLACED, Convert.RopeFromInt[replacedIndexEntry.firstByte]]]]; }; indexData.firstByte _ WriteLogEntry[dbinfo.logs[log].stream, entry, doReplace]; IF doReplace THEN { replacedEntry _ ReadLogEntry[dbinfo.logs[log].stream, replacedIndexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, replacedEntry, replacedIndexEntry]; }; AddEntryToIndices[dbinfo.indices, entry, indexData]; MarkUpdateComplete[dbinfo.logs[log].stream]; }; DeleteEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, value: AttributeValue] RETURNS [] ~ { ENABLE UNWIND => NULL; indexEntry: IndexEntry; index: IndexInfo; pathSkt: BTreeSimple.PathStk _ BTreeSimple.NewPathStk[]; avalue: AttributeValue; delete: Entry; entry: Entry; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; [avalue, indexEntry] _ GetIndexEntry[index: index, value: value, pathStk: pathSkt]; IF avalue = NIL THEN RETURN; avalue _ GetIndexEntry[index: index, value: avalue, valueIsKey: TRUE, relation: greater, pathStk: pathSkt].actual; IF Rope.Equal[s1: value, s2: UniqueKeyToValue[avalue], case: FALSE] THEN ERROR Error[$ValueNotUnique, "Entry to be deleted insufficiently specified."]; IF dbinfo.logs[indexEntry.log].access = readOnly THEN ERROR Error[$LogReadOnly, "Can't delete entries in a read only log."]; delete _ LIST[[$DELETED, Convert.RopeFromInt[indexEntry.firstByte]]]; [] _ WriteLogEntry[dbinfo.logs[indexEntry.log].stream, delete]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, entry, indexEntry]; MarkUpdateComplete[dbinfo.logs[indexEntry.log].stream]; }; CloseI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; CloseLogs[dbinfo.logs]; CloseIndices[dbinfo.indices]; dbinfo.isOpen _ FALSE; }; BuildIndicesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; BuildIndicesWorker[dbinfo]; }; BuildIndicesWorker: PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { indexData: IndexEntry; logstream: STREAM; entry: Entry; indexEntry: IndexEntry; saveStreamPosition: INT; EraseIndices[dbinfo.indices]; FOR log: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[log] # NIL THEN { indexData _ [log, 0]; logstream _ dbinfo.logs[log].stream; FOR entry _ ReadLogEntry[logstream, 0], ReadLogEntry[logstream, -1] UNTIL entry = NIL DO IF (entry.first.type # $DELETED) AND (entry.first.type # $REPLACED) THEN { -- WriteEntry AddEntryToIndices[dbinfo.indices, entry, indexData]; } ELSE { -- DeleteEntry saveStreamPosition _ IO.GetIndex[logstream]; -- before the following read changes the index indexEntry _ [log: log, firstByte: Convert.IntFromRope[entry.first.value]]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, entry, indexEntry]; IO.SetIndex[logstream, saveStreamPosition]; }; IF IO.EndOf[logstream] OR IO.GetChar[logstream] # EndOfEntry THEN ERROR Error[$BadLogEntry, "Missing terminator on log entry."]; indexData.firstByte _ IO.GetIndex[logstream]; ENDLOOP; }; ENDLOOP; }; CompactLogsI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE { UNWIND => NULL; FS.Error => ERROR Error[$InternalError, error.explanation]; }; KeepEntry: PROC [key: BTreeSimple.InternalKey, value: BTreeSimple.Value] RETURNS [continue: BOOLEAN] = { indexEntry: IndexEntry; entry: Entry; TRUSTED { indexEntry _ LOOPHOLE[@value[0], IndexPtr]^; }; IF dbinfo.logs[indexEntry.log].access # readOnly THEN { entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; [] _ WriteLogEntry[newlogs[indexEntry.log].stream, entry]; MarkUpdateComplete[newlogs[indexEntry.log].stream]; }; RETURN[continue: TRUE]; }; newlogs: LogSet _ NEW[LogSetRecord[dbinfo.logs.size]]; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL AND dbinfo.logs[i].access # readOnly THEN { newlogs[i] _ NEW[LogInfoRecord]; newlogs[i].stream _ GeneralFS.StreamOpen[dbinfo.logs[i].filename, $create ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; [] _ BTreeSimple.EnumerateRecords[tree: dbinfo.primaryIndex.btree, key: NIL, Proc: KeepEntry ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex, "Problem enumerating primary index."]]; FOR i: LogID IN [0..dbinfo.logs.size) DO IF newlogs[i] # NIL THEN { IO.Close[newlogs[i].stream]; IO.Close[dbinfo.logs[i].stream]; dbinfo.logs[i].stream _ GeneralFS.StreamOpen[dbinfo.logs[i].filename, $append ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; BuildIndicesWorker[dbinfo]; }; SetRemoteAccessI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, accessible: BOOLEAN _ TRUE, why: ROPE _ NIL] RETURNS [] ~ { ENABLE UNWIND => NULL; dbinfo.remoteAccess _ accessible; dbinfo.statusMsg _ why; }; SchemaChars: PUBLIC REF READONLY TEXT = "-->"; ParseSchemaLine: PROC [s: STREAM, buffer: REF TEXT, wdir: ROPE] RETURNS [item: REF ANY, needFilename: BOOLEAN _ FALSE] ~ { ENABLE { IO.Error => ERROR Error[$BadSchema, "Error parsing schema statement."]; IO.EndOfStream => ERROR Error[$BadSchema, "Unexpected end of stream."] }; token: REF TEXT; log: LogInfo; index: IndexInfo; needFilename _ FALSE; token _ IO.GetToken[s, IO.IDProc, buffer].token; IF NOT RefText.Equal[token, SchemaChars] THEN RETURN[NIL, FALSE]; token _ IO.GetToken[s, IO.IDProc, buffer].token; SELECT TRUE FROM RefText.Equal[token, "log", FALSE] => { log _ NEW[LogInfoRecord]; log.id _ IO.GetInt[s]; token _ IO.GetToken[s, IO.IDProc, buffer].token; log.access _ IF RefText.Equal[token, "ReadOnly", FALSE] THEN readOnly ELSE readWrite; log.filename _ IO.GetTokenRope[s, IO.IDProc ! IO.EndOfStream => {needFilename _ TRUE; CONTINUE}].token; IF NOT needFilename THEN log.filename _ FS.ExpandName[log.filename, wdir].fullFName; item _ log; }; RefText.Equal[token, "index", FALSE] => { index _ NEW[IndexInfoRecord]; index.key _ Atom.MakeAtom[IO.GetRopeLiteral[s]]; token _ IO.GetToken[s, IO.IDProc, buffer].token; index.type _ IF RefText.Equal[token, "primary", FALSE] THEN primary ELSE secondary; index.filename _ IO.GetTokenRope[s, IO.IDProc ! IO.EndOfStream => {needFilename _ TRUE; CONTINUE}].token; IF NOT needFilename THEN index.filename _ FS.ExpandName[index.filename, wdir].fullFName; item _ index; }; ENDCASE => ERROR Error[$BadSchema, "Invalid keyword."]; }; ReadSchema: PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE DFUtilities.SyntaxError => ERROR Error[$BadSchema, reason]; NewDBInfo: PROC [item: REF ANY, filename: ROPE _ NIL] RETURNS [] ~ { WITH item SELECT FROM log: LogInfo => { IF filename # NIL THEN log.filename _ filename; dbinfo.logs[log.id] _ log; }; index: IndexInfo => { IF filename # NIL THEN index.filename _ filename; [] _ RefTab.Insert[dbinfo.indices, index.key, index]; IF index.type = primary THEN IF dbinfo.primaryIndex = NIL THEN dbinfo.primaryIndex _ index ELSE ERROR Error[$BadSchema, "Multiple primary indices specified."]; }; ENDCASE => ERROR Error[$InternalError]; }; SchemaItemProc: DFUtilities.ProcessItemProc = { WITH item SELECT FROM comment: REF DFUtilities.CommentItem => { IF needFilename THEN ERROR Error[$BadSchema, "No file name specified for some log or index."]; [lastSchemaItem, needFilename] _ ParseSchemaLine[IO.RIS[comment.text], buffer, wdir]; IF lastSchemaItem # NIL AND NOT needFilename THEN NewDBInfo[lastSchemaItem]; }; file: REF DFUtilities.FileItem => { IF needFilename THEN { NewDBInfo[lastSchemaItem, FS.ExpandName[DFUtilities.RemoveVersionNumber[file.name], wdir].fullFName]; needFilename _ FALSE; }; }; ENDCASE => { IF needFilename THEN ERROR Error[$BadSchema, "No file name specified for some log or index."]; }; }; needFilename: BOOLEAN _ FALSE; lastSchemaItem: REF ANY; buffer: REF TEXT = RefText.ObtainScratch[RefText.line]; wdir: ROPE = Rope.Substr[dbinfo.dbName, 0, FS.ExpandName[dbinfo.dbName].cp.base.start]; schemaStream: STREAM _ GeneralFS.StreamOpen[dbinfo.dbName ! FS.Error => ERROR Error[$CantOpenSchema, error.explanation]]; DFUtilities.ParseFromStream[in: schemaStream, proc: SchemaItemProc, filter: [comments: TRUE]]; RefText.ReleaseScratch[buffer]; }; DescribeI: PROC [dbinfo: OpenDBInfo] RETURNS [info: SchemaInfo] ~ { AddIndexInfo: RefTab.EachPairAction = { index: IndexInfo _ NARROW[val]; IF index # dbinfo.primaryIndex THEN { info.keys _ CONS[index.key, info.keys]; info.indexNames _ CONS[index.filename, info.indexNames]; }; RETURN[quit: FALSE]; }; info.dbName _ dbinfo.dbName; info.logs _ NIL; info.logNames _ NIL; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL THEN { info.logs _ CONS[i, info.logs]; info.logNames _ CONS[dbinfo.logs[i].filename, info.logNames]; }; ENDLOOP; info.keys _ NIL; info.indexNames _ NIL; [] _ RefTab.Pairs[dbinfo.indices, AddIndexInfo]; info.keys _ CONS[dbinfo.primaryIndex.key, info.keys]; info.indexNames _ CONS[dbinfo.primaryIndex.filename, info.indexNames]; }; GetAttributeValue: PROC [entry: Entry, type: AttributeType] RETURNS [AttributeValue] ~ { FOR e: Entry _ entry, e.rest WHILE e # NIL DO IF e.first.type = type THEN RETURN[e.first.value]; ENDLOOP; RETURN[NIL]; }; ReverseEntry: PROC [entry: Entry] RETURNS[Entry] = { l1, l2, l3: Entry _ NIL; IF entry = NIL THEN RETURN[NIL]; l3 _ entry; UNTIL (l1 _ l3) = NIL DO l3 _ l3.rest; l1.rest _ l2; l2 _ l1; ENDLOOP; RETURN[l2]; }; OpenLogs: PROC [logs: LogSet] RETURNS [] ~ { FOR i: LogID IN [0..logs.size) DO IF (logs[i] # NIL) AND (logs[i].stream = NIL) THEN { logs[i].stream _ GeneralFS.StreamOpen[logs[i].filename, IF logs[i].access = readWrite THEN $append ELSE $read ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; }; ReadLogEntry: PUBLIC PROC [logStream: STREAM, byte: LogAddress] RETURNS [Entry] ~ { ENABLE { IO.EndOfStream, IO.Error => ERROR Error[$BadLogEntry] }; AttributeBreakProc: IO.BreakProc = { RETURN[SELECT char FROM ': => sepr, ENDCASE => other] }; ReadAttribute: PROC [s: STREAM] RETURNS [a: Attribute] ~ { a.type _ Atom.MakeAtom[IO.GetTokenRope[s, AttributeBreakProc].token]; [] _ IO.GetChar[s]; -- attribute separation char [] _ IO.SkipWhitespace[stream: s, flushComments: FALSE]; IF IO.PeekChar[s] = '" THEN { a.value _ IO.GetRopeLiteral[s]; [] _ IO.GetLineRope[s]; } ELSE a.value _ IO.GetLineRope[s]; }; entry: Entry; attribute: Attribute; IF logStream = NIL THEN ERROR Error[$DBClosed]; IF byte >= 0 THEN IO.SetIndex[logStream, byte ! IO.Error => ERROR Error[$BadIndex]]; IF IO.PeekChar[logStream] = UpdateComplete THEN RETURN[NIL]; -- at end of log UNTIL IO.PeekChar[logStream] = EndOfEntry DO attribute _ ReadAttribute[logStream]; entry _ CONS[attribute, entry]; ENDLOOP; RETURN[ReverseEntry[entry]]; }; WriteLogEntry: PUBLIC PROC [logStream: STREAM, entry: Entry, continuation: BOOLEAN _ FALSE] RETURNS [LogAddress] ~ { WriteAttribute: PROC [s: STREAM, a: Attribute] RETURNS [] ~ { value: ROPE _ a.value; IF Rope.Find[a.value, "\n"] # -1 THEN -- write out value as rope literal value _ Rope.Cat["\"", a.value, "\""]; IO.PutF[s, "%g: %g\n", IO.rope[Atom.GetPName[a.type]], IO.rope[value]] }; byteOffset: LogAddress; IF logStream = NIL THEN ERROR Error[$DBClosed]; byteOffset _ IO.GetLength[logStream]; IF byteOffset > 0 AND NOT continuation THEN byteOffset _ byteOffset - 1; --remove update marker from end of log IO.SetIndex[logStream, byteOffset]; FOR e: Entry _ entry, e.rest WHILE e # NIL DO WriteAttribute[logStream, e.first]; ENDLOOP; IO.PutChar[logStream, EndOfEntry]; IO.Flush[logStream]; RETURN[byteOffset]; }; MarkUpdateComplete: PUBLIC PROC [logStream: STREAM] RETURNS [] ~ { byteOffset: LogAddress _ IO.GetLength[logStream]; IO.SetIndex[logStream, byteOffset]; IO.PutChar[logStream, UpdateComplete]; IO.Flush[logStream]; }; CloseLogs: PROC [logs: LogSet] RETURNS [] ~ { FOR i: LogID IN [0..logs.size) DO IF (logs[i] # NIL) AND (logs[i].stream # NIL) THEN { IO.Close[logs[i].stream]; logs[i].stream _ NIL; }; ENDLOOP; }; OpenIndices: PROC [indices: IndexSet] RETURNS [needToRebuild: BOOLEAN] ~ { OpenIndex: RefTab.EachPairAction = { init: BOOLEAN _ FALSE; index: IndexInfo _ NARROW[val]; IF index.btree = NIL THEN { index.btree _ BTreeSimple.New[]; }; IF BTree.GetState[index.btree].state # open THEN { index.openfile _ GeneralFS.Open[name: index.filename, lock: $write ! FS.Error => { -- problem opening btree so create new one IF error.group = $user AND error.code = $unknownFile THEN { init _ TRUE; needToRebuild _ TRUE; index.openfile _ GeneralFS.Create[name: index.filename]; CONTINUE; } ELSE ERROR Error[$CantOpenIndex, error.explanation]; } ]; BTreeSimple.Open[tree: index.btree, file: index.openfile, initialize: init ! BTree.Error => ERROR Error[$BadIndex, Rope.Cat["Bad index for ",Atom.GetPName[NARROW[key]],"."]]]; }; RETURN[quit: FALSE]; }; needToRebuild _ FALSE; [] _ RefTab.Pairs[indices, OpenIndex]; RETURN[needToRebuild]; }; GetIndexEntry: PROC [index: IndexInfo, value: AttributeValue, valueIsKey: BOOLEAN _ FALSE, relation: BTreeSimple.Relation _ equal, pathStk: BTreeSimple.PathStk _ NIL] RETURNS [actual: AttributeValue, data: IndexEntry] ~ { btreeValue: BTreeSimple.Value; useKey: AttributeValue _ value; useRelation: BTreeSimple.Relation _ relation; IF NOT valueIsKey AND index.type = secondary THEN SELECT relation FROM less => useKey _ ValueToSmallKey[value]; lessEqual => useKey _ ValueToLargeKey[value]; equal => { --start small and look upwards useKey _ ValueToSmallKey[value]; useRelation _ greaterEqual; }; greaterEqual => useKey _ ValueToSmallKey[value]; greater => useKey _ ValueToLargeKey[value]; ENDCASE; [actual, btreeValue] _ BTreeSimple.ReadRecord[tree: index.btree, key: useKey, relation: useRelation, pathStk: pathStk, useExistingPath: pathStk # NIL ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; IF NOT valueIsKey AND index.type = secondary AND relation = equal AND NOT Rope.Equal[value, UniqueKeyToValue[actual], FALSE] THEN -- found entry larger than desired actual _ NIL; IF btreeValue # NIL THEN TRUSTED { data _ LOOPHOLE[@btreeValue[0], IndexPtr]^; }; RETURN[actual, data]; }; CreateIndexEntry: PROC [index: IndexInfo, value: AttributeValue, data: IndexEntry] RETURNS [] ~ { btreeValue: BTreeSimple.Value; btreeValue _ NEW[BTreeSimple.ValueObject[SIZE[IndexEntry]]]; TRUSTED { LOOPHOLE[@btreeValue[0], IndexPtr]^ _ data; }; BTreeSimple.UpdateRecord[tree: index.btree, key: IF index.type = primary THEN value ELSE ValueToUniqueKey[value, data], value: btreeValue ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; }; DeleteIndexEntry: PROC [index: IndexInfo, value: AttributeValue, data: IndexEntry] RETURNS [] ~ { [] _ BTreeSimple.DeleteKey[tree: index.btree, key: IF index.type = primary THEN value ELSE ValueToUniqueKey[value, data] ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; }; AddEntryToIndices: PROC [indices: IndexSet, entry: Entry, data: IndexEntry] RETURNS [] ~ { index: IndexInfo; FOR e: Entry _ entry, e.rest WHILE e # NIL DO index _ NARROW[RefTab.Fetch[indices, e.first.type].val]; IF index # NIL THEN { -- index key in entry so add to index CreateIndexEntry[index, e.first.value, data]; }; ENDLOOP; }; RemoveEntryFromIndices: PROC [indices: IndexSet, entry: Entry, data: IndexEntry] RETURNS [] ~ { index: IndexInfo; FOR e: Entry _ entry, e.rest WHILE e # NIL DO index _ NARROW[RefTab.Fetch[indices, e.first.type].val]; IF index # NIL THEN { -- index key in entry so remove from index DeleteIndexEntry[index, e.first.value, data]; }; ENDLOOP; }; EraseIndices: PROC [indices: IndexSet] RETURNS [] ~ { EraseIndex: RefTab.EachPairAction = { index: IndexInfo _ NARROW[val]; IF index.btree = NIL THEN RETURN[quit: FALSE]; IF BTree.GetState[index.btree].state # closed THEN { BTreeSimple.SetState[index.btree, closed]; FS.Close[index.openfile]; }; index.openfile _ GeneralFS.Create[index.filename]; BTreeSimple.Open[tree: index.btree, file: index.openfile, initialize: TRUE]; RETURN[quit: FALSE]; }; [] _ RefTab.Pairs[indices, EraseIndex]; }; CloseIndices: PROC [indices: IndexSet] RETURNS [] ~ { CloseIndex: RefTab.EachPairAction = { index: IndexInfo _ NARROW[val]; IF index.btree # NIL AND BTree.GetState[index.btree].state # closed THEN { BTreeSimple.SetState[index.btree, closed]; FS.Close[index.openfile]; }; RETURN[quit: FALSE]; }; [] _ RefTab.Pairs[indices, CloseIndex]; }; IsUniqueKey: PROC [key: ROPE] RETURNS [BOOLEAN] ~ INLINE { RETURN[Rope.Find[key, "\000"] # -1]; }; ValueToUniqueKey: PROC [value: AttributeValue, data: IndexEntry] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Cat[value, "\000", Convert.RopeFromInt[data.log], " ", Convert.RopeFromInt[data.firstByte]]]; }; ValueToSmallKey: PROC [value: AttributeValue] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Concat[value, "\000\000"]]; }; ValueToLargeKey: PROC [value: AttributeValue] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Concat[value, "\000\377"]]; }; UniqueKeyToValue: PROC [key: ROPE] RETURNS [AttributeValue] ~ INLINE { RETURN[Rope.Substr[key, 0, Rope.Index[key, 0, "\000"]]]; }; Recover: PROC [dbinfo: OpenDBInfo] RETURNS [needToRebuild: BOOLEAN] ~ { needToRebuild _ FALSE; FOR i: LogID IN [0..dbinfo.logs.size) DO IF (dbinfo.logs[i] # NIL) AND (dbinfo.logs[i].access # readOnly) THEN { IF NOT InGoodState[dbinfo.logs[i].stream] THEN IF RestoreLog[dbinfo.logs[i].stream] THEN needToRebuild _ TRUE; }; ENDLOOP; RETURN[needToRebuild]; }; InGoodState: PROC [logStream: STREAM] RETURNS [BOOLEAN] ~ { length: INT; thirdToLastByte, secondToLastByte, lastByte: CHAR; length _ IO.GetLength[logStream]; IF length < 3 THEN { -- log just created (or too short to be valid) IO.SetLength[logStream, 0]; MarkUpdateComplete[logStream]; RETURN[TRUE]; }; IO.SetIndex[logStream, length-3]; thirdToLastByte _ IO.GetChar[logStream]; secondToLastByte _ IO.GetChar[logStream]; lastByte _ IO.GetChar[logStream]; RETURN[(thirdToLastByte = '\n) AND (secondToLastByte = EndOfEntry) AND (lastByte = UpdateComplete)]; }; RestoreLog: PROC [logStream: STREAM] RETURNS [needToRebuild: BOOLEAN] ~ { logAddress: INT; prevByte, byte: CHAR; lastEntry: Entry; logAddress _ IO.GetLength[logStream]-2; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; byte _ IO.GetChar[logStream]; IF (byte = EndOfEntry) AND (prevByte = '\n) THEN { needToRebuild _ TRUE; } ELSE { UNTIL ((byte = EndOfEntry) AND (prevByte = '\n)) OR (logAddress = 0) DO byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; ENDLOOP; IO.SetLength[logStream, IF logAddress = 0 THEN 0 ELSE logAddress + 2]; needToRebuild _ FALSE; }; IF logAddress # 0 THEN { byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; UNTIL ((byte = EndOfEntry) AND (prevByte = '\n)) OR (logAddress = 0) DO byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; ENDLOOP; IF logAddress # 0 THEN logAddress _ logAddress + 2; lastEntry _ ReadLogEntry[logStream, logAddress]; IF lastEntry.first.type = $REPLACED THEN { IO.SetLength[logStream, logAddress]; needToRebuild _ FALSE; }; }; MarkUpdateComplete[logStream]; RETURN[needToRebuild]; }; IndicesOutOfDate: PROC [dbinfo: OpenDBInfo] RETURNS [BOOLEAN] ~ { fileDate: BasicTime.GMT; indexDate: BasicTime.GMT _ FS.FileInfo[name: dbinfo.primaryIndex.filename].created; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL THEN { fileDate _ FS.FileInfo[name: dbinfo.logs[i].filename].created; IF BasicTime.Period[from: indexDate, to: fileDate] > 0 THEN RETURN[TRUE]; }; ENDLOOP; RETURN[FALSE]; }; OpenDBTable: SymTab.Ref; OpenDBTableSize: SymTab.SeqIndex = 2039; GetSharedDB: PROC [dbName: ROPE] RETURNS [dbinfo: OpenDBInfo] ~ { fname: ROPE _ FS.FileInfo[dbName ! FS.Error => ERROR Error[$CantOpenSchema, error.explanation]].fullFName; dbinfo _ NARROW[SymTab.Fetch[OpenDBTable, fname].val]; IF dbinfo = NIL THEN { dbinfo _ NEW[OpenDBRecord]; dbinfo.dbName _ fname; dbinfo.isOpen _ FALSE; dbinfo.remoteAccess _ TRUE; dbinfo.statusMsg _ "Open for service."; dbinfo.indices _ RefTab.Create[]; dbinfo.primaryIndex _ NIL; dbinfo.logs _ NEW[LogSetRecord[LAST[LogID]]]; IF NOT SymTab.Insert[OpenDBTable, fname, dbinfo] THEN -- lost race dbinfo _ NARROW[SymTab.Fetch[OpenDBTable, fname].val]; }; }; OpenDBTable _ NARROW[Atom.GetProp[$LoganBerry, $OpenDBTable]]; IF OpenDBTable = NIL THEN { OpenDBTable _ SymTab.Create[mod: OpenDBTableSize, case: FALSE]; Atom.PutProp[$LoganBerry, $OpenDBTable, OpenDBTable]; }; END. 6bLoganBerryImpl.mesa Copyright c 1985 by Xerox Corporation. All rights reserved. Doug Terry, October 17, 1986 3:45:59 pm PDT Swinehart, November 8, 1986 3:26:03 pm PST LoganBerry is a simple facility for managing databases. Data is stored in one or more log files and indexed using stable btrees. This "poor man's" database facility is intended to allow various types of data to be stored persistently. A database survives processor crashes, but the data management facility does not provide atomic transactions. Only very simple queries are supported: retrieval by key or key subrange. Databases may be shared by multiple clients and accessed remotely via RPC. Exported RPC operations These routines export to the LoganBerry interface. All of the operations (except EnumerateEntries) can be invoked via RPC. They take a RPC.Conversation as the first argument so that secure remote procedure calls can be made to a LoganBerry database server. Initiates database activity and checks for consistency. This can be called any number of times to get a new OpenDB handle or reopen a database that has been closed. Indices are automatically rebuilt if any are missing or if a partially-completed update left them out-of-date. Returns an entry that contains the given attribute value for the given key. If the key refers to the primary index then the unique entry is returned and `others' is FALSE. If the key is secondary and several entries exist with the same value for the key, then an arbitrary entry is returned and `others' is set to TRUE; use EnumerateEntries to get all of the matching entries. Calls `proc' for each entry in the specified range of key values. The enumeration is halted when either the range of entries is exhausted or `proc' returns FALSE. A NIL value for `start' represents the least attribute value, while a NIL value for `end' represents the largest attribute value. Thus, the complete database can be enumerated by specifying start=NIL and end=NIL with `key' equal to the primary key. Similar to EnumerateEntries, but creates a cursor so that entries in the specified range of key values can be retrieved using NextEntry (defined below). Initially, the cursor points to the start of the sequence. A NIL value for `start' represents the least attribute value, while a NIL value for `end' represents the largest attribute value. Thus, the complete database can be enumerated by specifying start=NIL and end=NIL with `key' equal to the primary key. Retrieves the next entry relative to the given cursor. The cursor, and thus the sequence of desired entries, must have been previously created by a call to GenerateEntries. The cursor is automatically updated so that NextEntry may be repeatedly called to enumerate entries. NIL is returned if the cursor is at the end of the sequence and dir=increasing or at the start of the sequence and dir=decreasing. Releases the cursor; no further operations may be performed using the given cursor. This must be called once for every call to GenerateEntries. Adds a new entry to the database. The entry is added to the activity log unless another log is explicitly requested. The entry must have an attribute for the primary key. The primary attribute value must be unique throughout the database unless replace=TRUE; in this case, an existing entry with the same primary attribute value is atomically replaced with the new entry. Deletes the entry that contains the given attribute value for the given key. If the key is for a secondary index and the value is not unique then an Error[$ValueNotUnique] is raised. Deletes are actually performed by logging the delete request (so that the log can be replayed at a later time) and then updating all of the indices. Terminates database activity and closes all log and index files. Databases are always maintained consistently on disk so it doesn't hurt to leave a database open. The main reason for calling Close is to release the FS locks on the log files so they can be manually edited. Rebuilds the indices by scanning the logs and performing WriteEntry or DeleteEntry operations. Removes deleted entries from the logs by enumerating the primary index and writing new logs. Returns schema information about the database. Unseals the database handle, ensures that it's valid, and checks if remote access is allowed. Internal monitored operations These routines export to the internal LB interface. The operations are monitored so that several processes can share a database. All of the operations (except OpenI) take a monitored OpenDBInfo handle as their first parameter. Initiates database activity and checks for consistency. This can be called any number of times to get a new OpenDB handle or reopen a database that has been closed. Indices are automatically rebuilt if any are missing or if a partially-completed update left them out-of-date. WARNING: SINCE THIS ROUTINE IS NOT MONITORED, RACE CONDITIONS MUST BE CAREFULLY AVOIDED. Want at most one client to attempt to open a database. Returns an entry that contains the given attribute value for the given key. If the key refers to the primary index then the unique entry is returned and `others' is FALSE. If the key is secondary and several entries exist with the same value for the key, then an arbitrary entry is returned and `others' is set to TRUE; use EnumerateEntries to get all of the matching entries. Find appropriate index [entry: LoganBerry.Entry] RETURNS [continue: BOOL] set breakpoint here to look at entry Calls `proc' for each entry in the specified range of key values. The enumeration is halted when either the range of entries is exhausted or `proc' returns FALSE. A NIL value for `start' represents the least attribute value, while a NIL value for `end' represents the largest attribute value. Thus, the complete database can be enumerated by specifying start=NIL and end=NIL with `key' equal to the primary key. Similar to EnumerateEntries, but creates a cursor so that entries in the specified range of key values can be retrieved using NextEntry (defined below). Initially, the cursor points to the start of the sequence. A NIL value for `start' represents the least attribute value, while a NIL value for `end' represents the largest attribute value. Thus, the complete database can be enumerated by specifying start=NIL and end=NIL with `key' equal to the primary key. back up one so we're in a position to start (assuming dir=increasing) Retrieves the next entry relative to the given cursor. The cursor, and thus the sequence of desired entries, must have been previously created by a call to GenerateEntries. The cursor is automatically updated so that NextEntry may be repeatedly called to enumerate entries. NIL is returned if the cursor is at the end of the sequence and dir=increasing or at the start of the sequence and dir=decreasing. Note: The end conditions may not be what is desired, since changing directions at either end causes the first element at that end to be skipped. Releases the cursor; no further operations may be performed using the given cursor. This must be called once for every call to GenerateEntries. Adds a new entry to the database. The entry is added to the activity log unless another log is explicitly requested. The entry must have an attribute for the primary key and the primary attribute value must be unique throughout the database. For replacements, the entry being replaced and its replacement must reside in the same log. A replace operation is logged in a similar manner to delete operations. The entry written after the replace log entry is the replacement. Both log entries are written before any of the indices are updated. In the event of a crash, both entries must be recovered atomically. Deletes the entry that contains the given attribute value for the given key. If the key is for a secondary index and the value is not unique then an Error[$ValueNotUnique] is raised. Deletes are actually performed by logging the delete request (so that the log can be replayed at a later time) and then updating all of the indices. Find appropriate index Locate log entry Log delete operation Update indices Terminates database activity and closes all log and index files. Databases are always maintained consistently on disk so it doesn't hurt to leave a database open. The main reason for calling Close is to release the FS locks on the log files so they can be manually edited. Maintenance operations Rebuilds the indices by scanning the logs and performing WriteEntry or DeleteEntry operations. This routine grabs the monitor lock then calls on the internal procedure to do the real work. Removes deleted entries from the logs by enumerating the primary index and writing new logs. Controls access by remote clients to the database. If accessible=FALSE then the database can not be accessed through the normal LoganBerry interface, only via this interface; setting accessible=TRUE makes the database commonly available (the normal case). This operation is intended for use by administrators that wish to temporarily remove a database from service. Database schema A schema is created by clients of this package and stored in a DF file that can be used to backup the database. Lines of the file starting with "-->" are deemed to contain schema information for the file named in the subsequent line of the DF file or at the end of the same line. The two types of schema entries are as follows: --> log --> index Takes an input stream and parses one schema entry from it. The item returned is of type LogInfo or IndexInfo; needFilename=TRUE indicates that no file name existed in the input stream (it must be on the next line of the schema file). Read the database schema from dbinfo.dbName. [item: REF ANY] RETURNS [stop: BOOL _ FALSE] Returns schema information about the database. This routine is not monitored since it can safely be called while other operations are in progress. [key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOLEAN] Entries Destructively reverses the order of the entry's attributes. Taken from LispImpl.DReverse. Log management Some day these routines may be placed in a separate module. [char: CHAR] RETURNS [IO.CharClass] read a list of attributes until endofentry the list is constructed backwards, so it is reversed before being returned write a list of attributes Index management Some day these routines may be placed in a separate module. [key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL] Note: the AttributeValue's passed in and out may be unique keys or not. [key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL] the following two statements may not really be needed [key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL] Secondary indices: need not be unique so special btree keys must be generated. Unique keys are obtained by appending a logID/logAddress pair to an AttributeValue. Crash recovery Checks that the log is in a consistent state. The last byte of the log should be the UpdateComplete marker. PROBLEM: the byte count returned by GetLength may be wrong after a crash? Restores the log to a consistent state. There are two cases to consider: 1) the system was interupted while writing an entry on the log, 2) a complete entry exists on the log but the btrees have only been partially updated. entry complete so roll forward, but don't know state of indices so must rebuild them entry only partially written so roll backward: scan log from end looking for EndOfEntry, then reset log length A replaced entry should never be the last entry in a log. If this is the case, then we must have crashed before writing the replacement entry. To recover, simply remove the replaced entry. Back up one more character, then continue backwards search to find last entry Log is now in good shape. Determines if logs have been edited manually, i.e. if the indices are currently out-of-date, by checking the create date of the primary index against those of the logs. This doesn't actually work since simply opening an activity log changes its date. Shared databases A given database can not actually be opened more than once because of the concurrency control provided by FS. Thus, we keep a table of databases that are already open. 2039 is a good candidate because it is prime and is large enough that collisions should be rare. Remember: this routine is not called under a monitor lock. If several clients try to create an OpenDBRecord concurrently, the first one to register with the OpenDBTable wins. Want the table of open databases to be created only once, even if this package is re-run. Doug Terry August 12, 1985 1:05:48 pm PDT created Doug Terry, January 23, 1986 8:28:43 pm PST changes to: DIRECTORY, Open, Close, GetInfo, dbinfo (local of GetInfo), OpenI, MonitoredOpen, EnumerateEntriesI, CloseI, CompactLogsI, SetRemoteAccessI, ReadLogEntry, WriteLogEntry, CloseLogs, GetIndexEntry, CreateIndexEntry, DeleteIndexEntry, CloseIndex (local of CloseIndices), OpenDBTable, OpenDBTableSize, GetSharedDB, OpenDBTable, IF, IMPORTS Doug Terry, January 24, 1986 9:39:30 am PST changes to: MonitoredOpen, BuildIndicesI, BuildIndicesWorker Swinehart, January 27, 1986 5:10:45 pm PST Fullname expansion does not produce a canonical name (case may differ); GetSharedDB now uses a case-insensitive symbol table. changes to: GetSharedDB Doug Terry, January 31, 1986 4:25:40 pm PST changes to: DIRECTORY, OpenIndex (local of OpenIndices), CloseIndex (local of CloseIndices), EraseIndex (local of EraseIndices), CompactLogsI Doug Terry, March 5, 1986 5:21:27 pm PST Implemented Describe so that clients can obtain schema information even if they don't have access to the schema file. changes to: Describe, DescribeI, AddIndexInfo (local of DescribeI) Κ'V˜codešœ™Kšœ Οmœ1™—Kšœžœ˜-Kšžœ˜—K˜—Kšžœ˜—K˜—K˜š‘ œžœž œžœ˜CKš žœžœžœžœ žœ-˜WKšœ\™\š£ œžœ:žœ žœ˜hKšœ˜K˜ šžœ˜ Nšœ žœ˜,N˜—šžœ/žœ˜7KšœO˜OKšœ:˜:Kšœ3˜3K˜—Kšžœ žœ˜K˜—K˜Kšœžœ!˜6šžœ žœž˜(šžœžœžœ"žœ˜CKšœ žœ˜ šœK˜KKšžœ žœ)˜:—K˜—Kšžœ˜—šœHžœ˜^Kš œžœžœžœ žœ3˜t—šžœ žœž˜(šžœžœžœ˜Kšžœ˜Kšžœ˜ šœžœ7˜OKšžœ žœ)˜:—K˜—Kšžœ˜—Kšœ˜K˜—K˜š‘œžœžœžœ"žœžœžœžœžœ˜tKšžœžœžœ˜KšœB’œ|’œ¨™οKšœ!˜!Kšœ˜K˜——šœ™šœΙ™ΙKšœ@™@Kšœ?™?—K™Kšœ žœžœ ˜.L˜šΠbnœžœžœ žœžœžœžœžœžœžœ˜zKšœκ™κšžœ˜Kšžœ žœ6˜GKšžœžœ2˜I—Kšœžœžœ˜Kšœ ˜ Kšœ˜Kšœžœ˜Kšœžœ žœ˜0Kš žœžœ#žœžœžœžœ˜AKšœžœ žœ˜0šžœžœž˜šœžœ˜'Kšœžœ˜Kšœ žœ ˜Kšœžœ žœ˜0Kš œ žœ"žœžœ žœ ˜UKš œžœžœ žœ žœžœ ˜gšžœžœž˜Kšœ;˜;—K˜ K˜—šœžœ˜)Kšœžœ˜Kšœžœ˜1Kšœžœ žœ˜0Kš œ žœ!žœžœžœ ˜SKš œžœžœ žœ žœžœ ˜išžœžœž˜Kšœ?˜?—K˜ K˜—Kšžœžœ'˜7—K˜—K˜š‘ œžœžœ˜4Kšœ.™.K–0 -- [item: REF ANY] RETURNS [stop: BOOL _ FALSE]šžœžœ˜Cš‘ œžœžœžœ žœžœžœ˜Dšžœžœžœ˜šœ˜šžœ žœž˜K˜—Kšœ˜K˜—šœ˜šžœ žœž˜Kšœ˜—Kšœ5˜5šžœž˜šžœžœž˜!Kšœ˜—šž˜Kšžœ:˜?——K˜—Kšžœžœ˜'—K˜—š£œ!˜/Kš€,™,šžœžœžœ˜šœ žœ˜)šžœž˜KšžœD˜I—KšœU˜Uš žœžœžœžœž˜1Kšœ˜—K˜—šœžœ˜#šžœžœ˜Kšœe˜eKšœžœ˜K˜—K˜—šžœ˜ šžœž˜KšžœD˜I—Kšœ˜——K˜—Kšœžœžœ˜Kšœžœžœ˜Kšœžœžœ'˜7KšœžœM˜Wšœžœ'˜;Kšžœ žœ,˜=—KšœWžœ˜^K˜K˜K˜—š‘ œžœžœ˜CK™“–> -- [key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOLEAN]šœ'˜'Kš€:™:Kšœžœ˜šžœžœ˜%Kšœ žœ˜'Kšœžœ"˜8K˜—Kšžœžœ˜K˜—Kšœ˜Kšœ žœ˜Kšœžœ˜šžœ žœž˜(šžœžœžœ˜Kšœ žœ˜Kšœžœ)˜=K˜—Kšžœ˜—Kšœ žœ˜Kšœžœ˜Kšœ0˜0Kšœ žœ%˜5Kšœžœ0˜FK˜——™š‘œžœ%žœ˜Xšžœžœžœž˜-šžœž˜Kšžœ˜—Kšžœ˜—Kšžœžœ˜ K˜—K˜š‘ œžœžœ ˜5KšœZ™Zšœžœ˜Kš žœ žœžœžœžœ˜ Kšœ ˜ šžœ žœž˜Kšœ ˜ Kšœ ˜ Kšœ˜Kšžœ˜—Kšžœ˜ Kšœ˜———™KšŸ;™;K˜š‘œžœžœ˜,šžœ žœž˜!š žœ žœžœžœžœ˜4šœ8žœžœ žœ˜oKšžœ žœ)˜:—K˜—Kšžœ˜—K˜—K˜š‘ œž œ žœžœ ˜SKšžœžœžœ žœ˜A–' -- [char: CHAR] RETURNS [IO.CharClass]š£œžœ˜$Kš€#™#šžœžœž˜Kšœ ˜ Kšžœ ˜—K˜—š‘ œžœžœžœ˜:Kšœžœ,˜EKšœžœŸ˜1K–0[stream: STREAM, flushComments: BOOL _ TRUE]šœžœ*žœ˜8šžœžœžœ˜Kšœ žœ˜Kšœžœ˜Kšœ˜—šž˜Kšœ žœ˜—K˜—Kšœ ˜ Kšœ˜Kšžœ žœžœžœ˜/šžœ ž˜Kšžœžœ žœ˜B—Kš žœžœ&žœžœžœŸ˜MKšœ*™*šžœžœ"ž˜,Kšœ%˜%Kšœžœ˜Kšžœ˜—K™JKšžœ˜K˜—K˜š ‘ œž œ žœžœžœžœ˜t–; -- [key: SymTab.Key, val: SymTab.Val] RETURNS [quit: BOOL]š£œžœžœžœ˜=Kšœžœ ˜šžœžœŸ#˜IK˜&—Kšžœžœžœ ˜FKšœ˜—Kšœ˜Kšžœ žœžœžœ˜/Kšœ žœ˜%šžœžœžœž˜+KšœŸ&˜D—Kšžœ!˜#Kšœ™šžœžœžœž˜-Kšœ#˜#Kšžœ˜—Kšžœ ˜"Lšžœ˜Lšžœ ˜K˜—K˜š ‘œžœžœ žœžœ˜BKšœžœ˜1Kšžœ!˜#Kšžœ$˜&Lšžœ˜K˜—K˜š£ œžœžœ˜-šžœ žœž˜!š žœ žœžœžœžœ˜4Kšžœ˜Kšœžœ˜K˜—Kšžœ˜—K˜——™KšŸ;™;K˜š‘ œžœžœžœ˜Jš£ œ˜$Kš€7™7Kšœžœžœ˜Kšœžœ˜šžœžœžœ˜Kšœ ˜ K˜—šžœ*žœ˜2šœB˜Bšœžœ Ÿ*˜;šžœžœžœ˜;Nšœžœ˜ Nšœžœ˜Nšœ8˜8Nšžœ˜ Nšœ˜—šž˜Nšžœ*˜/—Nšœ˜—Kšœ˜—šœL˜LKšœžœ:žœ˜b—K˜—Kšžœžœ˜K˜—Kšœžœ˜Kšœ&˜&Kšžœ˜K˜—K˜š ‘ œžœ7žœžœIžœžœ/˜έKšœG™GKšœ˜Kšœ˜Kšœ-˜-šžœžœ žœžœ˜2šžœ ž˜šœ˜Lšœ ˜ —šœ ˜ Lšœ ˜ —šœ Ÿ˜)Lšœ ˜ Lšœ˜L˜—šœ˜Lšœ ˜ —šœ ˜ Lšœ ˜ —Lšž˜——šœ’žœ˜—Lš œžœžœžœ žœ ˜N—šžœžœ žœžœžœžœ-žœžœŸ"˜€Lšœžœ˜ —šžœžœžœžœ˜"Nšœžœ˜+N˜—Nšžœ˜K˜—K˜š‘œžœ=žœ˜aKšœ˜Kšœ žœžœ˜<šžœ˜ Nšžœ#˜+N˜—šœ1žœžœžœ3˜‹Lš œžœžœžœ žœ ˜N—K˜—K˜š‘œžœ=žœ˜ašœ3žœžœžœ ˜zKš œžœžœžœ žœ ˜N—K˜—K˜š‘œžœ5žœ˜ZKšœ˜šžœžœžœž˜-Lšœžœ*˜8šžœ žœžœŸ%˜˜>K–[from: GMT, to: GMT]šžœ5žœžœžœ˜IK˜—Kšžœ˜—Kšžœžœ˜K˜——™Kšœ¨™¨K˜K˜K˜šœ(˜(Jšœ`™`—J˜š‘ œžœ žœžœ˜AKšœ―™―Kš œžœžœžœ žœ6˜jKšœ žœ'˜6šžœ žœžœ˜Kšœ žœ˜Kšœ˜Kšœžœ˜Kšœžœ˜Kšœ'˜'Kšœ!˜!Kšœžœ˜Kšœžœžœ ˜-šžœžœ+žœŸ ˜CKšœ žœ'˜6—K˜—K˜K˜—J™JšœY™YJšœžœ*˜>šžœžœžœ˜Jšœ8žœ˜?Jšœ5˜5J˜——K˜Kšžœ˜™)K™—™+Kšœ Οr'œ¦Έœ¦E™Ϋ—™+Kšœ ¦0™<—™*K™}Kšœ ¦ ™—™+Kšœ ¦œ¦ œ¦ œ¦™—™(Kšœu™uKšœ ¦!œ™B——…—vvΤ.