<> <> <> <> <<>> <> <<>> DIRECTORY Atom USING [GetPName, MakeAtom, GetProp, PutProp], BasicTime USING [GMT, Period], BTree USING [Error, GetState], BTreeSimple USING [DeleteKey, EnumerateRecords, --GetState,-- InternalKey, New, NewPathStk, Open, PathStk, ReadRecord, --ReadValue,-- Relation, SetState, Tree, UpdateRecord, Value, ValueObject], Convert USING [IntFromRope, RopeFromInt], DFUtilities USING [--DirectoryItem,-- FileItem, CommentItem, ParseFromStream, ProcessItemProc, RemoveVersionNumber, SyntaxError], FS USING [Close, Error, ExpandName, FileInfo, OpenFile], GeneralFS USING [Open, Create, StreamOpen], IO USING [BreakProc, Close, EndOf, EndOfStream, Error, Flush, GetChar, --GetID,-- GetIndex, GetInt, GetLength, GetLineRope, GetRopeLiteral, GetToken, GetTokenRope, IDProc, PeekChar, PutChar, PutF, RIS, rope, SetIndex, SetLength, SkipWhitespace, STREAM], RefID USING [Seal, Unseal, Release], RefTab USING [Create, EachPairAction, Fetch, Insert, Pairs, Ref], RefText USING [Equal, line, ObtainScratch, ReleaseScratch], Rope USING [Cat, Compare, Concat, Equal, Find, Index, ROPE, Substr], SymTab USING [Create, Fetch, Insert, Ref, SeqIndex], LB, LoganBerryBackdoor, LoganBerry, LoganBerryStructure; LoganBerryImpl: CEDAR MONITOR LOCKS dbinfo USING dbinfo: OpenDBInfo IMPORTS Atom, BasicTime, BTree, BTreeSimple, Convert, DFUtilities, FS, GeneralFS, IO, RefText, Rope, RefTab, RefID, SymTab EXPORTS LB, LoganBerry, LoganBerryBackdoor ~ BEGIN OPEN LoganBerry, LoganBerryStructure; ROPE: TYPE = Rope.ROPE; STREAM: TYPE = IO.STREAM; <> <> <<>> Error: PUBLIC ERROR [ec: ErrorCode, explanation: ROPE _ NIL] = CODE; Open: PUBLIC PROC [conv: Conv _ NIL, dbName: ROPE] RETURNS [db: OpenDB] ~ { <> dbinfo: OpenDBInfo _ OpenI[dbName]; IF NOT dbinfo.remoteAccess THEN ERROR Error[$DBNotAvailable, dbinfo.statusMsg]; db _ RefID.Seal[dbinfo]; }; ReadEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, value: AttributeValue] RETURNS [entry: Entry, others: BOOLEAN] ~ { <> [entry, others] _ ReadEntryI[GetInfo[db], key, value]; }; EnumerateEntries: PUBLIC PROC [db: OpenDB, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL, proc: EntryProc] RETURNS [] ~ { <> EnumerateEntriesI[GetInfo[db], key, start, end, proc]; }; GenerateEntries: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL] RETURNS [cursor: Cursor] ~ { <> newCursor: CursorInfo _ GenerateEntriesI[GetInfo[db], key, start, end]; cursor _ RefID.Seal[newCursor]; }; NextEntry: PUBLIC PROC [conv: Conv _ NIL, cursor: Cursor, dir: CursorDirection _ increasing] RETURNS [entry: Entry] ~ { <> c: CursorInfo; WITH RefID.Unseal[cursor] SELECT FROM ci: CursorInfo => c _ ci; ENDCASE => c _ NIL; IF c = NIL THEN ERROR Error[$BadCursor, "Invalid cursor passed to NextEntry."]; entry _ NextEntryI[c.dbinfo, c, dir]; }; EndGenerate: PUBLIC PROC [conv: Conv _ NIL, cursor: Cursor] RETURNS [] ~ { <> [] _ RefID.Release[cursor]; }; WriteEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, entry: Entry, log: LogID _ activityLog, replace: BOOLEAN _ FALSE] RETURNS [] ~ { <> WriteEntryI[GetInfo[db], entry, log, replace]; }; DeleteEntry: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB, key: AttributeType, value: AttributeValue] RETURNS [] ~ { <> DeleteEntryI[GetInfo[db], key, value]; }; Close: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { <> CloseI[GetInfo[db ! Error => IF ec=$DBClosed THEN CONTINUE]]; }; BuildIndices: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { <> BuildIndicesI[GetInfo[db]]; }; CompactLogs: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [] ~ { <> CompactLogsI[GetInfo[db]]; }; Describe: PUBLIC PROC [conv: Conv _ NIL, db: OpenDB] RETURNS [info: SchemaInfo] ~ { <> info _ DescribeI[GetInfo[db]]; }; GetInfo: PROC [db: OpenDB] RETURNS [OpenDBInfo] ~ INLINE { <> ref: REF = RefID.Unseal[db]; IF ref = NIL THEN ERROR Error[$BadDBHandle, "NIL OpenDB handle."]; WITH ref SELECT FROM dbinfo: OpenDBInfo => { IF NOT dbinfo.remoteAccess THEN ERROR Error[$DBNotAvailable, dbinfo.statusMsg]; IF NOT dbinfo.isOpen THEN ERROR Error[$DBClosed, dbinfo.statusMsg]; RETURN[dbinfo]; }; ENDCASE => ERROR Error[$BadDBHandle, "Invalid OpenDB handle."]; }; <> <> OpenI: PUBLIC PROC [dbName: ROPE] RETURNS [dbinfo: OpenDBInfo] ~ { <> <> dbinfo _ GetSharedDB[dbName]; IF NOT dbinfo.isOpen THEN MonitoredOpen[dbinfo]; RETURN[dbinfo]; }; MonitoredOpen: ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; <> needToRebuild: BOOLEAN _ FALSE; IF dbinfo.isOpen THEN RETURN; -- lost race IF dbinfo.primaryIndex = NIL THEN ReadSchema[dbinfo]; OpenLogs[dbinfo.logs]; IF Recover[dbinfo] THEN needToRebuild _ TRUE; IF OpenIndices[dbinfo.indices] THEN needToRebuild _ TRUE; IF needToRebuild THEN BuildIndicesWorker[dbinfo]; dbinfo.isOpen _ TRUE; }; ReadEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, value: AttributeValue] RETURNS [entry: Entry, others: BOOLEAN] ~ { ENABLE UNWIND => NULL; <> indexEntry: IndexEntry; index: IndexInfo; aValue: AttributeValue; pathSkt: BTreeSimple.PathStk _ BTreeSimple.NewPathStk[]; <> index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; [aValue, indexEntry] _ GetIndexEntry[index: index, value: value, pathStk: pathSkt]; IF aValue = NIL THEN RETURN[entry: NIL, others: FALSE]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; IF index.type = primary THEN others _ FALSE ELSE { aValue _ GetIndexEntry[index: index, value: aValue, valueIsKey: TRUE, relation: greater, pathStk: pathSkt].actual; others _ Rope.Equal[s1: value, s2: UniqueKeyToValue[aValue], case: FALSE]; }; RETURN[entry, others]; }; SimpleEntryProc: EntryProc = { -- for debugging purposes <<[entry: LoganBerry.Entry] RETURNS [continue: BOOL]>> cont: BOOLEAN _ TRUE; <> RETURN[cont]; }; EnumerateEntriesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL, proc: EntryProc] RETURNS [] ~ { ENABLE UNWIND => NULL; <> EntriesInSubrange: PROC [key: BTreeSimple.InternalKey, value: BTreeSimple.Value] RETURNS [continue: BOOLEAN] = { indexEntry: IndexEntry; entry: Entry; IF end # NIL AND Rope.Compare[s1: key, s2: end, case: FALSE] = greater THEN RETURN[continue: FALSE]; TRUSTED { indexEntry _ LOOPHOLE[@value[0], IndexPtr]^; }; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RETURN[continue: proc[entry]]; }; index: IndexInfo; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; IF index.type = secondary THEN { IF start # NIL THEN start _ ValueToSmallKey[start]; IF end # NIL THEN end _ ValueToLargeKey[end]; }; [] _ BTreeSimple.EnumerateRecords[tree: index.btree, key: start, Proc: EntriesInSubrange ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex, Rope.Cat["Problem enumerating index for ",Atom.GetPName[key],"."]]]; }; GenerateEntriesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, start: AttributeValue _ NIL, end: AttributeValue _ NIL] RETURNS [cinfo: CursorInfo] ~ { ENABLE UNWIND => NULL; <> newCursor: CursorInfo; index: IndexInfo; index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; newCursor _ NEW[CursorRecord]; newCursor.index _ index; newCursor.dbinfo _ dbinfo; newCursor.key _ key; newCursor.start _ start; newCursor.end _ end; IF newCursor.index.type = secondary THEN { IF start # NIL THEN newCursor.start _ ValueToSmallKey[start]; IF end # NIL THEN newCursor.end _ ValueToLargeKey[end]; }; newCursor.pathStk _ BTreeSimple.NewPathStk[]; <> newCursor.current _ IF start # NIL THEN GetIndexEntry[index: newCursor.index, value: newCursor.start, valueIsKey: TRUE, relation: less, pathStk: newCursor.pathStk].actual ELSE NIL; RETURN[newCursor]; }; NextEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, cinfo: CursorInfo, dir: CursorDirection _ increasing] RETURNS [entry: Entry] ~ { ENABLE UNWIND => NULL; <> <> actualValue: AttributeValue; indexEntry: IndexEntry; [actualValue, indexEntry] _ GetIndexEntry[index: cinfo.index, value: cinfo.current, valueIsKey: TRUE, relation: IF dir = increasing THEN greater ELSE less, pathStk: cinfo.pathStk]; IF (actualValue = NIL) OR (dir = increasing AND cinfo.end # NIL AND Rope.Compare[actualValue, cinfo.end, FALSE] = greater) OR (dir = decreasing AND Rope.Compare[actualValue, cinfo.start, FALSE] = less) THEN RETURN[NIL]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; cinfo.current _ actualValue; RETURN[entry]; }; EndGenerateI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, cinfo: CursorInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; <> NULL; -- this routine is unnecessary since cursors get garbage collected }; WriteEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, entry: Entry, log: LogID _ activityLog, replace: BOOLEAN _ FALSE] RETURNS [] ~ { ENABLE UNWIND => NULL; <> value: AttributeValue; indexData: IndexEntry _ [log: log, firstByte: 0]; doReplace: BOOLEAN _ FALSE; replacedIndexEntry: IndexEntry; replacedEntry: Entry; value _ GetAttributeValue[entry, dbinfo.primaryIndex.key]; IF value = NIL THEN ERROR Error[$NoPrimaryKey, "Entry does not contain a primary key."]; IF dbinfo.logs[log].access = readOnly THEN ERROR Error[$LogReadOnly, "Can't write to a read only log."]; [value, replacedIndexEntry] _ GetIndexEntry[index: dbinfo.primaryIndex, value: value]; IF value # NIL THEN IF replace THEN doReplace _ TRUE ELSE ERROR Error[$ValueNotUnique, "An existing entry already contains the primary key."]; <> IF doReplace THEN { IF replacedIndexEntry.log # log THEN ERROR Error[$InvalidReplace, "Cross-log replacements are not allowed."]; [] _ WriteLogEntry[dbinfo.logs[log].stream, LIST[[$REPLACED, Convert.RopeFromInt[replacedIndexEntry.firstByte]]]]; }; indexData.firstByte _ WriteLogEntry[dbinfo.logs[log].stream, entry, doReplace]; IF doReplace THEN { replacedEntry _ ReadLogEntry[dbinfo.logs[log].stream, replacedIndexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, replacedEntry, replacedIndexEntry]; }; AddEntryToIndices[dbinfo.indices, entry, indexData]; MarkUpdateComplete[dbinfo.logs[log].stream]; }; DeleteEntryI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, key: AttributeType, value: AttributeValue] RETURNS [] ~ { ENABLE UNWIND => NULL; <> indexEntry: IndexEntry; index: IndexInfo; pathSkt: BTreeSimple.PathStk _ BTreeSimple.NewPathStk[]; avalue: AttributeValue; delete: Entry; entry: Entry; <> index _ NARROW[RefTab.Fetch[dbinfo.indices, key].val]; IF index = NIL THEN ERROR Error[$NoIndex, Rope.Cat["Index for ",Atom.GetPName[key]," does not exist."]]; <> [avalue, indexEntry] _ GetIndexEntry[index: index, value: value, pathStk: pathSkt]; IF avalue = NIL THEN RETURN; avalue _ GetIndexEntry[index: index, value: avalue, valueIsKey: TRUE, relation: greater, pathStk: pathSkt].actual; IF Rope.Equal[s1: value, s2: UniqueKeyToValue[avalue], case: FALSE] THEN ERROR Error[$ValueNotUnique, "Entry to be deleted insufficiently specified."]; IF dbinfo.logs[indexEntry.log].access = readOnly THEN ERROR Error[$LogReadOnly, "Can't delete entries in a read only log."]; <> delete _ LIST[[$DELETED, Convert.RopeFromInt[indexEntry.firstByte]]]; [] _ WriteLogEntry[dbinfo.logs[indexEntry.log].stream, delete]; <> entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, entry, indexEntry]; MarkUpdateComplete[dbinfo.logs[indexEntry.log].stream]; }; CloseI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; <> CloseLogs[dbinfo.logs]; CloseIndices[dbinfo.indices]; dbinfo.isOpen _ FALSE; }; <> BuildIndicesI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE UNWIND => NULL; <> <> BuildIndicesWorker[dbinfo]; }; BuildIndicesWorker: PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { indexData: IndexEntry; logstream: STREAM; entry: Entry; indexEntry: IndexEntry; saveStreamPosition: INT; EraseIndices[dbinfo.indices]; FOR log: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[log] # NIL THEN { indexData _ [log, 0]; logstream _ dbinfo.logs[log].stream; FOR entry _ ReadLogEntry[logstream, 0], ReadLogEntry[logstream, -1] UNTIL entry = NIL DO IF (entry.first.type # $DELETED) AND (entry.first.type # $REPLACED) THEN { -- WriteEntry AddEntryToIndices[dbinfo.indices, entry, indexData]; } ELSE { -- DeleteEntry saveStreamPosition _ IO.GetIndex[logstream]; -- before the following read changes the index indexEntry _ [log: log, firstByte: Convert.IntFromRope[entry.first.value]]; entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; RemoveEntryFromIndices[dbinfo.indices, entry, indexEntry]; IO.SetIndex[logstream, saveStreamPosition]; }; IF IO.EndOf[logstream] OR IO.GetChar[logstream] # EndOfEntry THEN ERROR Error[$BadLogEntry, "Missing terminator on log entry."]; indexData.firstByte _ IO.GetIndex[logstream]; ENDLOOP; }; ENDLOOP; }; CompactLogsI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { ENABLE { UNWIND => NULL; FS.Error => ERROR Error[$InternalError, error.explanation]; }; <> KeepEntry: PROC [key: BTreeSimple.InternalKey, value: BTreeSimple.Value] RETURNS [continue: BOOLEAN] = { indexEntry: IndexEntry; entry: Entry; TRUSTED { indexEntry _ LOOPHOLE[@value[0], IndexPtr]^; }; IF dbinfo.logs[indexEntry.log].access # readOnly THEN { entry _ ReadLogEntry[dbinfo.logs[indexEntry.log].stream, indexEntry.firstByte]; [] _ WriteLogEntry[newlogs[indexEntry.log].stream, entry]; MarkUpdateComplete[newlogs[indexEntry.log].stream]; }; RETURN[continue: TRUE]; }; newlogs: LogSet _ NEW[LogSetRecord[dbinfo.logs.size]]; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL AND dbinfo.logs[i].access # readOnly THEN { newlogs[i] _ NEW[LogInfoRecord]; newlogs[i].stream _ GeneralFS.StreamOpen[dbinfo.logs[i].filename, $create ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; [] _ BTreeSimple.EnumerateRecords[tree: dbinfo.primaryIndex.btree, key: NIL, Proc: KeepEntry ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex, "Problem enumerating primary index."]]; FOR i: LogID IN [0..dbinfo.logs.size) DO IF newlogs[i] # NIL THEN { IO.Close[newlogs[i].stream]; IO.Close[dbinfo.logs[i].stream]; dbinfo.logs[i].stream _ GeneralFS.StreamOpen[dbinfo.logs[i].filename, $append ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; BuildIndicesWorker[dbinfo]; }; SetRemoteAccessI: PUBLIC ENTRY PROC [dbinfo: OpenDBInfo, accessible: BOOLEAN _ TRUE, why: ROPE _ NIL] RETURNS [] ~ { ENABLE UNWIND => NULL; <> dbinfo.remoteAccess _ accessible; dbinfo.statusMsg _ why; }; <> <" are deemed to contain schema information for the file named in the subsequent line of the DF file or at the end of the same line. The two types of schema entries are as follows:>> <<--> log >> <<--> index >> <<>> SchemaChars: PUBLIC REF READONLY TEXT = "-->"; ParseSchemaLine: PROC [s: STREAM, buffer: REF TEXT, wdir: ROPE] RETURNS [item: REF ANY, needFilename: BOOLEAN _ FALSE] ~ { <> ENABLE { IO.Error => ERROR Error[$BadSchema, "Error parsing schema statement."]; IO.EndOfStream => ERROR Error[$BadSchema, "Unexpected end of stream."] }; token: REF TEXT; log: LogInfo; index: IndexInfo; needFilename _ FALSE; token _ IO.GetToken[s, IO.IDProc, buffer].token; IF NOT RefText.Equal[token, SchemaChars] THEN RETURN[NIL, FALSE]; token _ IO.GetToken[s, IO.IDProc, buffer].token; SELECT TRUE FROM RefText.Equal[token, "log", FALSE] => { log _ NEW[LogInfoRecord]; log.id _ IO.GetInt[s]; token _ IO.GetToken[s, IO.IDProc, buffer].token; log.access _ IF RefText.Equal[token, "ReadOnly", FALSE] THEN readOnly ELSE readWrite; log.filename _ IO.GetTokenRope[s, IO.IDProc ! IO.EndOfStream => {needFilename _ TRUE; CONTINUE}].token; IF NOT needFilename THEN log.filename _ FS.ExpandName[log.filename, wdir].fullFName; item _ log; }; RefText.Equal[token, "index", FALSE] => { index _ NEW[IndexInfoRecord]; index.key _ Atom.MakeAtom[IO.GetRopeLiteral[s]]; token _ IO.GetToken[s, IO.IDProc, buffer].token; index.type _ IF RefText.Equal[token, "primary", FALSE] THEN primary ELSE secondary; index.filename _ IO.GetTokenRope[s, IO.IDProc ! IO.EndOfStream => {needFilename _ TRUE; CONTINUE}].token; IF NOT needFilename THEN index.filename _ FS.ExpandName[index.filename, wdir].fullFName; item _ index; }; ENDCASE => ERROR Error[$BadSchema, "Invalid keyword."]; }; ReadSchema: PROC [dbinfo: OpenDBInfo] RETURNS [] ~ { <> ENABLE DFUtilities.SyntaxError => ERROR Error[$BadSchema, reason]; NewDBInfo: PROC [item: REF ANY, filename: ROPE _ NIL] RETURNS [] ~ { WITH item SELECT FROM log: LogInfo => { IF filename # NIL THEN log.filename _ filename; dbinfo.logs[log.id] _ log; }; index: IndexInfo => { IF filename # NIL THEN index.filename _ filename; [] _ RefTab.Insert[dbinfo.indices, index.key, index]; IF index.type = primary THEN IF dbinfo.primaryIndex = NIL THEN dbinfo.primaryIndex _ index ELSE ERROR Error[$BadSchema, "Multiple primary indices specified."]; }; ENDCASE => ERROR Error[$InternalError]; }; SchemaItemProc: DFUtilities.ProcessItemProc = { <<[item: REF ANY] RETURNS [stop: BOOL _ FALSE]>> WITH item SELECT FROM comment: REF DFUtilities.CommentItem => { IF needFilename THEN ERROR Error[$BadSchema, "No file name specified for some log or index."]; [lastSchemaItem, needFilename] _ ParseSchemaLine[IO.RIS[comment.text], buffer, wdir]; IF lastSchemaItem # NIL AND NOT needFilename THEN NewDBInfo[lastSchemaItem]; }; file: REF DFUtilities.FileItem => { IF needFilename THEN { NewDBInfo[lastSchemaItem, FS.ExpandName[DFUtilities.RemoveVersionNumber[file.name], wdir].fullFName]; needFilename _ FALSE; }; }; ENDCASE => { IF needFilename THEN ERROR Error[$BadSchema, "No file name specified for some log or index."]; }; }; needFilename: BOOLEAN _ FALSE; lastSchemaItem: REF ANY; buffer: REF TEXT = RefText.ObtainScratch[RefText.line]; wdir: ROPE = Rope.Substr[dbinfo.dbName, 0, FS.ExpandName[dbinfo.dbName].cp.base.start]; schemaStream: STREAM _ GeneralFS.StreamOpen[dbinfo.dbName ! FS.Error => ERROR Error[$CantOpenSchema, error.explanation]]; DFUtilities.ParseFromStream[in: schemaStream, proc: SchemaItemProc, filter: [comments: TRUE]]; RefText.ReleaseScratch[buffer]; }; DescribeI: PROC [dbinfo: OpenDBInfo] RETURNS [info: SchemaInfo] ~ { <> AddIndexInfo: RefTab.EachPairAction = { <<[key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOLEAN]>> index: IndexInfo _ NARROW[val]; IF index # dbinfo.primaryIndex THEN { info.keys _ CONS[index.key, info.keys]; info.indexNames _ CONS[index.filename, info.indexNames]; }; RETURN[quit: FALSE]; }; info.dbName _ dbinfo.dbName; info.logs _ NIL; info.logNames _ NIL; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL THEN { info.logs _ CONS[i, info.logs]; info.logNames _ CONS[dbinfo.logs[i].filename, info.logNames]; }; ENDLOOP; info.keys _ NIL; info.indexNames _ NIL; [] _ RefTab.Pairs[dbinfo.indices, AddIndexInfo]; info.keys _ CONS[dbinfo.primaryIndex.key, info.keys]; info.indexNames _ CONS[dbinfo.primaryIndex.filename, info.indexNames]; }; <> GetAttributeValue: PROC [entry: Entry, type: AttributeType] RETURNS [AttributeValue] ~ { FOR e: Entry _ entry, e.rest WHILE e # NIL DO IF e.first.type = type THEN RETURN[e.first.value]; ENDLOOP; RETURN[NIL]; }; ReverseEntry: PROC [entry: Entry] RETURNS[Entry] = { <> l1, l2, l3: Entry _ NIL; IF entry = NIL THEN RETURN[NIL]; l3 _ entry; UNTIL (l1 _ l3) = NIL DO l3 _ l3.rest; l1.rest _ l2; l2 _ l1; ENDLOOP; RETURN[l2]; }; <> <> OpenLogs: PROC [logs: LogSet] RETURNS [] ~ { FOR i: LogID IN [0..logs.size) DO IF (logs[i] # NIL) AND (logs[i].stream = NIL) THEN { logs[i].stream _ GeneralFS.StreamOpen[logs[i].filename, IF logs[i].access = readWrite THEN $append ELSE $read ! FS.Error => ERROR Error[$CantOpenLog, error.explanation]]; }; ENDLOOP; }; ReadLogEntry: PUBLIC PROC [logStream: STREAM, byte: LogAddress] RETURNS [Entry] ~ { ENABLE { IO.EndOfStream, IO.Error => ERROR Error[$BadLogEntry] }; AttributeBreakProc: IO.BreakProc = { <<[char: CHAR] RETURNS [IO.CharClass]>> RETURN[SELECT char FROM ': => sepr, ENDCASE => other] }; ReadAttribute: PROC [s: STREAM] RETURNS [a: Attribute] ~ { a.type _ Atom.MakeAtom[IO.GetTokenRope[s, AttributeBreakProc].token]; [] _ IO.GetChar[s]; -- attribute separation char [] _ IO.SkipWhitespace[stream: s, flushComments: FALSE]; IF IO.PeekChar[s] = '" THEN { a.value _ IO.GetRopeLiteral[s]; [] _ IO.GetLineRope[s]; } ELSE a.value _ IO.GetLineRope[s]; }; entry: Entry; attribute: Attribute; IF logStream = NIL THEN ERROR Error[$DBClosed]; IF byte >= 0 THEN IO.SetIndex[logStream, byte ! IO.Error => ERROR Error[$BadIndex]]; IF IO.PeekChar[logStream] = UpdateComplete THEN RETURN[NIL]; -- at end of log <> UNTIL IO.PeekChar[logStream] = EndOfEntry DO attribute _ ReadAttribute[logStream]; entry _ CONS[attribute, entry]; ENDLOOP; <> RETURN[ReverseEntry[entry]]; }; WriteLogEntry: PUBLIC PROC [logStream: STREAM, entry: Entry, continuation: BOOLEAN _ FALSE] RETURNS [LogAddress] ~ { WriteAttribute: PROC [s: STREAM, a: Attribute] RETURNS [] ~ { value: ROPE _ a.value; IF Rope.Find[a.value, "\n"] # -1 THEN -- write out value as rope literal value _ Rope.Cat["\"", a.value, "\""]; IO.PutF[s, "%g: %g\n", IO.rope[Atom.GetPName[a.type]], IO.rope[value]] }; byteOffset: LogAddress; IF logStream = NIL THEN ERROR Error[$DBClosed]; byteOffset _ IO.GetLength[logStream]; IF byteOffset > 0 AND NOT continuation THEN byteOffset _ byteOffset - 1; --remove update marker from end of log IO.SetIndex[logStream, byteOffset]; <> FOR e: Entry _ entry, e.rest WHILE e # NIL DO WriteAttribute[logStream, e.first]; ENDLOOP; IO.PutChar[logStream, EndOfEntry]; IO.Flush[logStream]; RETURN[byteOffset]; }; MarkUpdateComplete: PUBLIC PROC [logStream: STREAM] RETURNS [] ~ { byteOffset: LogAddress _ IO.GetLength[logStream]; IO.SetIndex[logStream, byteOffset]; IO.PutChar[logStream, UpdateComplete]; IO.Flush[logStream]; }; CloseLogs: PROC [logs: LogSet] RETURNS [] ~ { FOR i: LogID IN [0..logs.size) DO IF (logs[i] # NIL) AND (logs[i].stream # NIL) THEN { IO.Close[logs[i].stream]; logs[i].stream _ NIL; }; ENDLOOP; }; <> <> OpenIndices: PROC [indices: IndexSet] RETURNS [needToRebuild: BOOLEAN] ~ { OpenIndex: RefTab.EachPairAction = { <<[key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL]>> init: BOOLEAN _ FALSE; index: IndexInfo _ NARROW[val]; IF index.btree = NIL THEN { index.btree _ BTreeSimple.New[]; }; IF BTree.GetState[index.btree].state # open THEN { index.openfile _ GeneralFS.Open[name: index.filename, lock: $write ! FS.Error => { -- problem opening btree so create new one IF error.group = $user AND error.code = $unknownFile THEN { init _ TRUE; needToRebuild _ TRUE; index.openfile _ GeneralFS.Create[name: index.filename]; CONTINUE; } ELSE ERROR Error[$CantOpenIndex, error.explanation]; } ]; BTreeSimple.Open[tree: index.btree, file: index.openfile, initialize: init ! BTree.Error => ERROR Error[$BadIndex, Rope.Cat["Bad index for ",Atom.GetPName[NARROW[key]],"."]]]; }; RETURN[quit: FALSE]; }; needToRebuild _ FALSE; [] _ RefTab.Pairs[indices, OpenIndex]; RETURN[needToRebuild]; }; GetIndexEntry: PROC [index: IndexInfo, value: AttributeValue, valueIsKey: BOOLEAN _ FALSE, relation: BTreeSimple.Relation _ equal, pathStk: BTreeSimple.PathStk _ NIL] RETURNS [actual: AttributeValue, data: IndexEntry] ~ { <> btreeValue: BTreeSimple.Value; useKey: AttributeValue _ value; useRelation: BTreeSimple.Relation _ relation; IF NOT valueIsKey AND index.type = secondary THEN SELECT relation FROM less => useKey _ ValueToSmallKey[value]; lessEqual => useKey _ ValueToLargeKey[value]; equal => { --start small and look upwards useKey _ ValueToSmallKey[value]; useRelation _ greaterEqual; }; greaterEqual => useKey _ ValueToSmallKey[value]; greater => useKey _ ValueToLargeKey[value]; ENDCASE; [actual, btreeValue] _ BTreeSimple.ReadRecord[tree: index.btree, key: useKey, relation: useRelation, pathStk: pathStk, useExistingPath: pathStk # NIL ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; IF NOT valueIsKey AND index.type = secondary AND relation = equal AND NOT Rope.Equal[value, UniqueKeyToValue[actual], FALSE] THEN -- found entry larger than desired actual _ NIL; IF btreeValue # NIL THEN TRUSTED { data _ LOOPHOLE[@btreeValue[0], IndexPtr]^; }; RETURN[actual, data]; }; CreateIndexEntry: PROC [index: IndexInfo, value: AttributeValue, data: IndexEntry] RETURNS [] ~ { btreeValue: BTreeSimple.Value; btreeValue _ NEW[BTreeSimple.ValueObject[SIZE[IndexEntry]]]; TRUSTED { LOOPHOLE[@btreeValue[0], IndexPtr]^ _ data; }; BTreeSimple.UpdateRecord[tree: index.btree, key: IF index.type = primary THEN value ELSE ValueToUniqueKey[value, data], value: btreeValue ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; }; DeleteIndexEntry: PROC [index: IndexInfo, value: AttributeValue, data: IndexEntry] RETURNS [] ~ { [] _ BTreeSimple.DeleteKey[tree: index.btree, key: IF index.type = primary THEN value ELSE ValueToUniqueKey[value, data] ! BTree.Error => ERROR Error[IF reason = closed THEN $DBClosed ELSE $BadIndex]]; }; AddEntryToIndices: PROC [indices: IndexSet, entry: Entry, data: IndexEntry] RETURNS [] ~ { index: IndexInfo; FOR e: Entry _ entry, e.rest WHILE e # NIL DO index _ NARROW[RefTab.Fetch[indices, e.first.type].val]; IF index # NIL THEN { -- index key in entry so add to index CreateIndexEntry[index, e.first.value, data]; }; ENDLOOP; }; RemoveEntryFromIndices: PROC [indices: IndexSet, entry: Entry, data: IndexEntry] RETURNS [] ~ { index: IndexInfo; FOR e: Entry _ entry, e.rest WHILE e # NIL DO index _ NARROW[RefTab.Fetch[indices, e.first.type].val]; IF index # NIL THEN { -- index key in entry so remove from index DeleteIndexEntry[index, e.first.value, data]; }; ENDLOOP; }; EraseIndices: PROC [indices: IndexSet] RETURNS [] ~ { EraseIndex: RefTab.EachPairAction = { <<[key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL]>> index: IndexInfo _ NARROW[val]; IF index.btree = NIL THEN RETURN[quit: FALSE]; <> IF BTree.GetState[index.btree].state # closed THEN { BTreeSimple.SetState[index.btree, closed]; FS.Close[index.openfile]; }; index.openfile _ GeneralFS.Create[index.filename]; BTreeSimple.Open[tree: index.btree, file: index.openfile, initialize: TRUE]; RETURN[quit: FALSE]; }; [] _ RefTab.Pairs[indices, EraseIndex]; }; CloseIndices: PROC [indices: IndexSet] RETURNS [] ~ { CloseIndex: RefTab.EachPairAction = { <<[key: RefTab.Key, val: RefTab.Val] RETURNS [quit: BOOL]>> index: IndexInfo _ NARROW[val]; IF index.btree # NIL AND BTree.GetState[index.btree].state # closed THEN { BTreeSimple.SetState[index.btree, closed]; FS.Close[index.openfile]; }; RETURN[quit: FALSE]; }; [] _ RefTab.Pairs[indices, CloseIndex]; }; <> IsUniqueKey: PROC [key: ROPE] RETURNS [BOOLEAN] ~ INLINE { RETURN[Rope.Find[key, "\000"] # -1]; }; ValueToUniqueKey: PROC [value: AttributeValue, data: IndexEntry] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Cat[value, "\000", Convert.RopeFromInt[data.log], " ", Convert.RopeFromInt[data.firstByte]]]; }; ValueToSmallKey: PROC [value: AttributeValue] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Concat[value, "\000\000"]]; }; ValueToLargeKey: PROC [value: AttributeValue] RETURNS [ROPE] ~ INLINE { RETURN[Rope.Concat[value, "\000\377"]]; }; UniqueKeyToValue: PROC [key: ROPE] RETURNS [AttributeValue] ~ INLINE { RETURN[Rope.Substr[key, 0, Rope.Index[key, 0, "\000"]]]; }; <> Recover: PROC [dbinfo: OpenDBInfo] RETURNS [needToRebuild: BOOLEAN] ~ { needToRebuild _ FALSE; FOR i: LogID IN [0..dbinfo.logs.size) DO IF (dbinfo.logs[i] # NIL) AND (dbinfo.logs[i].access # readOnly) THEN { IF NOT InGoodState[dbinfo.logs[i].stream] THEN IF RestoreLog[dbinfo.logs[i].stream] THEN needToRebuild _ TRUE; }; ENDLOOP; RETURN[needToRebuild]; }; InGoodState: PROC [logStream: STREAM] RETURNS [BOOLEAN] ~ { <> <> length: INT; thirdToLastByte, secondToLastByte, lastByte: CHAR; length _ IO.GetLength[logStream]; IF length < 3 THEN { -- log just created (or too short to be valid) IO.SetLength[logStream, 0]; MarkUpdateComplete[logStream]; RETURN[TRUE]; }; IO.SetIndex[logStream, length-3]; thirdToLastByte _ IO.GetChar[logStream]; secondToLastByte _ IO.GetChar[logStream]; lastByte _ IO.GetChar[logStream]; RETURN[(thirdToLastByte = '\n) AND (secondToLastByte = EndOfEntry) AND (lastByte = UpdateComplete)]; }; RestoreLog: PROC [logStream: STREAM] RETURNS [needToRebuild: BOOLEAN] ~ { <> logAddress: INT; prevByte, byte: CHAR; lastEntry: Entry; logAddress _ IO.GetLength[logStream]-2; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; byte _ IO.GetChar[logStream]; IF (byte = EndOfEntry) AND (prevByte = '\n) THEN { <> needToRebuild _ TRUE; } ELSE { <> UNTIL ((byte = EndOfEntry) AND (prevByte = '\n)) OR (logAddress = 0) DO byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; ENDLOOP; IO.SetLength[logStream, IF logAddress = 0 THEN 0 ELSE logAddress + 2]; needToRebuild _ FALSE; }; <> IF logAddress # 0 THEN { <> byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; UNTIL ((byte = EndOfEntry) AND (prevByte = '\n)) OR (logAddress = 0) DO byte _ prevByte; logAddress _ logAddress - 1; IO.SetIndex[logStream, logAddress]; prevByte _ IO.GetChar[logStream]; ENDLOOP; IF logAddress # 0 THEN logAddress _ logAddress + 2; lastEntry _ ReadLogEntry[logStream, logAddress]; IF lastEntry.first.type = $REPLACED THEN { IO.SetLength[logStream, logAddress]; needToRebuild _ FALSE; }; }; <> MarkUpdateComplete[logStream]; RETURN[needToRebuild]; }; IndicesOutOfDate: PROC [dbinfo: OpenDBInfo] RETURNS [BOOLEAN] ~ { <> <> fileDate: BasicTime.GMT; indexDate: BasicTime.GMT _ FS.FileInfo[name: dbinfo.primaryIndex.filename].created; FOR i: LogID IN [0..dbinfo.logs.size) DO IF dbinfo.logs[i] # NIL THEN { fileDate _ FS.FileInfo[name: dbinfo.logs[i].filename].created; IF BasicTime.Period[from: indexDate, to: fileDate] > 0 THEN RETURN[TRUE]; }; ENDLOOP; RETURN[FALSE]; }; <> <> OpenDBTable: SymTab.Ref; OpenDBTableSize: SymTab.SeqIndex = 2039; <<2039 is a good candidate because it is prime and is large enough that collisions should be rare.>> GetSharedDB: PROC [dbName: ROPE] RETURNS [dbinfo: OpenDBInfo] ~ { <> fname: ROPE _ FS.FileInfo[dbName ! FS.Error => ERROR Error[$CantOpenSchema, error.explanation]].fullFName; dbinfo _ NARROW[SymTab.Fetch[OpenDBTable, fname].val]; IF dbinfo = NIL THEN { dbinfo _ NEW[OpenDBRecord]; dbinfo.dbName _ fname; dbinfo.isOpen _ FALSE; dbinfo.remoteAccess _ TRUE; dbinfo.statusMsg _ "Open for service."; dbinfo.indices _ RefTab.Create[]; dbinfo.primaryIndex _ NIL; dbinfo.logs _ NEW[LogSetRecord[LAST[LogID]]]; IF NOT SymTab.Insert[OpenDBTable, fname, dbinfo] THEN -- lost race dbinfo _ NARROW[SymTab.Fetch[OpenDBTable, fname].val]; }; }; <<>> <> OpenDBTable _ NARROW[Atom.GetProp[$LoganBerry, $OpenDBTable]]; IF OpenDBTable = NIL THEN { OpenDBTable _ SymTab.Create[mod: OpenDBTableSize, case: FALSE]; Atom.PutProp[$LoganBerry, $OpenDBTable, OpenDBTable]; }; END. <> <> <> <> <> <> <> <> <> <> <> <> <> <>