DIRECTORY BTree USING [Compare, Entry, EntryFromRecord, EntrySize, EntSize, Error, Key, PageNumber, Record, ReferencePage, ReleasePage, UpdateState, UpdateType], BTreeInternal USING [AdjustTreeState, AllocatePage, AssignRefESR, BackUpAndRemoveEntry, BackUpOneEntry, BTreeEntry, BTreeEntrySize, BTreePagePtr, Bug, Compare, entry0Offset, entry1Offset, EntryFromRecord, EntryOrdinal, entryOverhead, EntrySize, EntryTable, EntryTableRec, EntSeqRecord, FreePage, freePageMarker, GetDefaultPathStk, GetHeapAndTable, Heap, HeapIndex, InsertRecords, Lock, maxLevelsInTree, nilOffset, nilPage, PathEntryLE, PathStk, PathStkEntry, PathStkIndex, PathStkObject, PathToMaxDescendant, ReferencePage, ReferenceStack, ReleasePage, RemoveEntry, RepairOffsets, ReturnDefaultPathStk, ReturnHeapAndTable, Tree, TreeObject, Unlock, WriteStatePage], PrincOpsUtils USING [LongCopy, LongMove]; BTreeWrite: PROGRAM IMPORTS BTree, BTreeInternal, PrincOpsUtils EXPORTS BTree, BTreeInternal = { OPEN BTree, BTreeInternal; CARD: TYPE = LONG CARDINAL; Tree: TYPE = REF TreeObject; TreeObject: PUBLIC TYPE = BTreeInternal.TreeObject; PathStk: TYPE = REF PathStkObject; PathStkObject: PUBLIC TYPE = BTreeInternal.PathStkObject; PathStkEntryPtr: TYPE = LONG POINTER TO PathStkEntry; BTreeEntryPtr: TYPE = LONG POINTER TO BTreeEntry; DeleteKey: PUBLIC SAFE PROC [tree: Tree, key: Key, pathStk: PathStk _ NIL, useExistingPath: BOOL _ FALSE] RETURNS [found: BOOL] = TRUSTED { FatherMayNeedWork: PROC RETURNS [needsWork: BOOL] = { pagePtr, otherPtr: BTreePagePtr; fatherPSE: PathStkEntryPtr; fatherFreeWords: CARDINAL; pse _ @pathStk.path[pathStk.top]; IF pse.eslFront=NIL THEN needsWork _ FALSE ELSE { tree.InsertRecords[pathStk]; needsWork _ pathStk.top#0 AND pathStk.path[pathStk.top-1].eslFront#NIL; }; pagePtr _ tree.ReferencePage[pse.pageNumber]; IF pathStk.top=1 AND pagePtr.freeWords=tree.maxFreeWords THEN { tree.state.rootPage _ pagePtr.minPage; tree.state.depth _ tree.state.depth-1; tree.ReleasePage[pse.pageNumber]; tree.FreePage[pse.pageNumber]; RETURN [FALSE]; }; IF pathStk.top=1 OR tree.maxFreeWords-pagePtr.freeWords >= tree.prettyFull THEN { tree.ReleasePage[pse.pageNumber]; RETURN; }; AppendEntSeqRecord[pse: pse, esr: MakeEntSeqRecord[entSeq: @pagePtr.entries, length: tree.maxFreeWords-pagePtr.freeWords]]; fatherPSE _ @pathStk.path[pathStk.top-1]; otherPtr _ tree.ReferencePage[fatherPSE.pageNumber]; fatherFreeWords _ otherPtr.freeWords; tree.ReleasePage[fatherPSE.pageNumber]; IF fatherPSE.offset < nilOffset+(tree.state.pageSize-fatherFreeWords) OR fatherPSE.eslFront#NIL THEN { rtBroPg: PageNumber; esr: REF EntSeqRecord; [esr: esr, grPage: rtBroPg] _ tree.RemoveEntry[fatherPSE]; AppendEntSeqRecord[pse: pse, esr: esr]; otherPtr _ tree.ReferencePage[fatherPSE.pageNumber, write]; IF otherPtr[fatherPSE.lastOffset].grPage # pse.pageNumber THEN ERROR Bug[mcCreightWasWrong]; otherPtr[fatherPSE.lastOffset].grPage _ rtBroPg; tree.ReleasePage[fatherPSE.pageNumber]; otherPtr _ tree.ReferencePage[rtBroPg, write]; otherPtr.minPage _ pagePtr.minPage; tree.ReleasePage[rtBroPg]; tree.ReleasePage[pse.pageNumber]; tree.FreePage[pse.pageNumber]; pse.pageNumber _ rtBroPg; pse.offset _ entry1Offset; pse.lastOffset _ entry0Offset; pse.nextToLastOffset _ nilOffset; } ELSE { esr: REF EntSeqRecord; tree.ReleasePage[pse.pageNumber]; [esr: esr] _ tree.BackUpAndRemoveEntry[fatherPSE]; PushEntSeqRecord[pse: pse, esr: esr]; tree.FreePage[pse.pageNumber]; otherPtr _ tree.ReferencePage[fatherPSE.pageNumber]; pse.pageNumber _ otherPtr[fatherPSE.lastOffset].grPage; tree.ReleasePage[fatherPSE.pageNumber]; pagePtr _ tree.ReferencePage[pse.pageNumber]; pse.offset _ nilOffset+(tree.state.pageSize-pagePtr.freeWords); pse.lastOffset _ entry1Offset; pse.nextToLastOffset _ entry0Offset; tree.ReleasePage[pse.pageNumber]; tree.RepairOffsets[pse]; }; IF pse.eslFront#NIL THEN tree.InsertRecords[pathStk]; RETURN [TRUE]; }; pathStkWasNil: BOOL _ pathStk=NIL; pse: PathStkEntryPtr; tree.Lock[update]; IF pathStkWasNil THEN { IF useExistingPath THEN ERROR Error[nilPathStk]; pathStk _ tree.GetDefaultPathStk[]; }; { ENABLE UNWIND => { IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk]; tree.Unlock[]; }; origStkTop: PathStkIndex; pagePtr: BTreePagePtr _ NIL; descendantPg: PageNumber; simpleDelete: BOOL; equal: BOOL _ tree.PathEntryLE[key: key, pathStk: pathStk, useExistingPath: useExistingPath].equal; IF ~equal THEN { IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk]; tree.Unlock[]; RETURN [FALSE]; }; origStkTop _ pathStk.top; [ptr: pagePtr, pse: pse] _ tree.ReferenceStack[pathStk]; descendantPg _ pagePtr[pse.nextToLastOffset].grPage; simpleDelete _ descendantPg=nilPage AND tree.maxFreeWords - (pagePtr.freeWords + tree.BTreeEntrySize[@pagePtr[pse.lastOffset]]) >= tree.prettyFull; tree.ReleasePage[pse.pageNumber]; tree.version _ tree.version+1; tree.BackUpOneEntry[pse]; -- pse.offset should index deletion victim IF simpleDelete THEN { tree.AdjustTreeState[update: unchanged, deltaEntryCount: -1]; pagePtr _ tree.ReferencePage[pse.pageNumber, write]; [] _ tree.RemoveEntry[pse]; tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate]; } ELSE { tree.AdjustTreeState[update: startOfUpdate, deltaEntryCount: -1]; tree.PathToMaxDescendant[pathStk: pathStk, page: descendantPg]; IF pathStk.top > origStkTop THEN { dpse: PathStkEntryPtr = @pathStk.path[pathStk.top]; leafESR: REF EntSeqRecord _ tree.BackUpAndRemoveEntry[dpse].esr; [grPage: leafESR.entSeqP.grPage] _ tree.RemoveEntry[pse]; AppendEntSeqRecord[pse: pse, esr: leafESR]; } ELSE [] _ tree.RemoveEntry[pse]; tree.GetHeapAndTable[pathStk]; DO needsWork: BOOL _ FatherMayNeedWork[ ! UNWIND => tree.ReturnHeapAndTable[pathStk]]; IF pathStk.top=0 OR (~needsWork AND pathStk.top<=origStkTop) THEN EXIT ELSE pathStk.top _ pathStk.top-1; ENDLOOP; pathStk.top _ 0; tree.ReturnHeapAndTable[pathStk]; tree.AdjustTreeState[update: endOfUpdate, deltaEntryCount: 0]; }; }; IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk]; tree.Unlock[]; RETURN [TRUE]; }; UpdateRecord: PUBLIC SAFE PROC [tree: Tree, key: Key, pathStk: PathStk _ NIL, useExistingPath: BOOL _ FALSE, record: Record, updateType: UpdateType _ insertOrReplace] = TRUSTED { ProduceEntry: PROC [entry: Entry] = { PrincOpsUtils.LongCopy[to: entry, from: tree.EntryFromRecord[record], nwords: words]; }; words: EntSize = tree.EntrySize[tree.EntryFromRecord[record]]; UpdateEntry[tree: tree, key: key, pathStk: pathStk, useExistingPath: useExistingPath, words: words, Proc: ProduceEntry, updateType: updateType]; }; UpdateEntry: PUBLIC PROC [tree: Tree, key: Key, pathStk: PathStk _ NIL, useExistingPath: BOOL _ FALSE, words: EntSize, Proc: UNSAFE PROC [entry: Entry], updateType: UpdateType _ insertOrReplace] = { CallEntryProc: PROC [entry: Entry] = { Proc[entry]; IF tree.EntrySize[entry]#words OR tree.Compare[key, entry]#equal THEN ERROR Error[wrongEntryProduced]; }; pathStkWasNil: BOOL _ pathStk=NIL; tree.Lock[update]; IF pathStkWasNil THEN { IF useExistingPath THEN ERROR Error[nilPathStk]; pathStk _ tree.GetDefaultPathStk[]; }; { ENABLE UNWIND => { IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk]; tree.Unlock[]; }; leafStkTop: PathStkIndex; equal: BOOL; pse: PathStkEntryPtr; pagePtr: BTreePagePtr _ NIL; foundEntSize: CARDINAL _ 0; -- zero means there is not an existing entry with this key IF CARDINAL[words+entryOverhead] NOT IN [1+entryOverhead..tree.maxFreeWords] THEN ERROR Error[entrySizesWrong]; [equal: equal, depth: leafStkTop] _ tree.PathEntryLE[key: key, pathStk: pathStk, useExistingPath: useExistingPath]; IF equal THEN { IF updateType=insert THEN ERROR Error[wrongUpdateType]; [pse: pse, ptr: pagePtr] _ tree.ReferenceStack[pathStk]; foundEntSize _ tree.EntrySize[@pagePtr[pse.lastOffset].entry]; tree.ReleasePage[pse.pageNumber]; } ELSE IF updateType=replace THEN ERROR Error[wrongUpdateType]; tree.version _ tree.version+1; -- invalidate existing PathStks that refer to this tree pse _ @pathStk.path[pathStk.top]; IF words=foundEntSize THEN { tree.AdjustTreeState[update: unchanged, deltaEntryCount: 0]; pagePtr _ tree.ReferencePage[pse.pageNumber, write]; CallEntryProc[@pagePtr[pse.lastOffset].entry]; tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate]; } ELSE { removedEntGrPage: PageNumber _ nilPage; newEntryFits: BOOL _ FALSE; IF foundEntSize=0 THEN { pathStk.top _ leafStkTop; pse _ @pathStk.path[pathStk.top]; }; IF pathStk.top>0 THEN { pagePtr _ tree.ReferencePage[pse.pageNumber]; newEntryFits _ CARDINAL[words+entryOverhead] <= CARDINAL[pagePtr.freeWords + (IF foundEntSize=0 THEN 0 ELSE foundEntSize+entryOverhead)]; tree.ReleasePage[pse.pageNumber]; }; tree.AdjustTreeState[update: IF newEntryFits THEN unchanged ELSE startOfUpdate, deltaEntryCount: IF foundEntSize=0 THEN 1 ELSE 0]; IF pathStk.top>0 THEN pagePtr _ tree.ReferencePage[pse.pageNumber, write]; IF foundEntSize#0 THEN [grPage: removedEntGrPage] _ tree.BackUpAndRemoveEntry[pse]; IF newEntryFits THEN { entPtr: BTreeEntryPtr _ @pagePtr[pse.offset]; PrincOpsUtils.LongMove[to: entPtr+words+entryOverhead, from: entPtr, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset]; CallEntryProc[@entPtr.entry]; entPtr.grPage _ removedEntGrPage; pagePtr.freeWords _ pagePtr.freeWords - (words+entryOverhead); tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate]; } ELSE { esr: REF EntSeqRecord _ NEW[EntSeqRecord[words+entryOverhead]]; esr.entSeqP _ LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]]; esr.entSeqLen _ words+entryOverhead; CallEntryProc[@esr.entSeqP.entry]; esr.entSeqP.grPage _ removedEntGrPage; AppendEntSeqRecord[pse: pse, esr: esr]; IF pathStk.top>0 THEN tree.ReleasePage[pse.pageNumber]; tree.GetHeapAndTable[pathStk]; WHILE pathStk.path[pathStk.top].eslFront#NIL DO tree.InsertRecords[pathStk ! UNWIND => tree.ReturnHeapAndTable[pathStk]]; IF pathStk.top=0 THEN EXIT ELSE pathStk.top _ pathStk.top-1; ENDLOOP; tree.ReturnHeapAndTable[pathStk]; tree.AdjustTreeState[update: endOfUpdate, deltaEntryCount: 0]; }; }; }; IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk]; tree.Unlock[]; }; SetUpdateInProgress: PUBLIC SAFE PROC [tree: Tree, updateInProgress: BOOL] = TRUSTED { tree.Lock[update]; tree.longUpdate _ updateInProgress; tree.AdjustTreeState[update: IF updateInProgress THEN startOfUpdate ELSE endOfUpdate, deltaEntryCount: 0]; tree.Unlock[]; }; AdjustTreeState: PUBLIC PROC [tree: Tree, update: UpdateState, deltaEntryCount: INTEGER] = { IF tree.maintainRecomputableState THEN { IF tree.state.entryCount#LAST[CARD] THEN tree.state.entryCount _ tree.state.entryCount+deltaEntryCount; IF update#unchanged AND ~(tree.state.updateInProgress AND tree.longUpdate) THEN { tree.state.updateInProgress _ update=startOfUpdate OR tree.longUpdate; tree.WriteStatePage[update: update]; }; } ELSE IF deltaEntryCount#0 AND tree.state.entryCount#LAST[CARD] THEN { tree.state.entryCount _ LAST[CARD]; tree.WriteStatePage[]; }; }; InsertRecords: PUBLIC PROC [tree: Tree, pathStk: PathStk] = { pse: PathStkEntryPtr = @pathStk.path[pathStk.top]; IF pse.eslFront#NIL THEN { pathStk.entryTable.length _ 0; pathStk.entryTable.map[0].cumEntSize _ 0; FOR esr: REF EntSeqRecord _ pse.eslFront, esr.fwdP UNTIL esr=NIL DO AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr]; ENDLOOP; IF pathStk.top=0 THEN { wordsToInsert: CARDINAL = EntryIntervalSize[pathStk: pathStk, leftFather: 0]; newRootPage: PageNumber = tree.AllocatePage[]; pagePtr: BTreePagePtr = tree.ReferencePage[newRootPage, write]; IF tree.state.depth >= maxLevelsInTree THEN ERROR Error[depthExceeded]; pagePtr.minPage _ pathStk.path[0].leastSon; tree.ReleasePage[newRootPage]; IF wordsToInsert > tree.maxFreeWords THEN ERROR Bug[newRootOverflow]; WritePage[tree: tree, pse: @pathStk.path[0], number: newRootPage, words: wordsToInsert]; tree.state.rootPage _ newRootPage; tree.state.depth _ tree.state.depth+1; } ELSE { pagePtr: BTreePagePtr = tree.ReferencePage[pse.pageNumber, write]; tailBlkPtr: BTreeEntryPtr = @pagePtr[pse.offset]; tailBlkLen: CARDINAL = (nilOffset+tree.state.pageSize-pagePtr.freeWords)-pse.offset; wordsToInsert: CARDINAL = EntryIntervalSize[pathStk: pathStk]; IF wordsToInsert<=pagePtr.freeWords THEN { PrincOpsUtils.LongMove[to: tailBlkPtr+wordsToInsert, from: tailBlkPtr, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset]; DepositESL[tree: tree, pse: pse, block: tailBlkPtr, length: wordsToInsert]; pagePtr.freeWords _ pagePtr.freeWords-wordsToInsert; tree.ReleasePage[pse.pageNumber]; } ELSE { rtBroPg1: PageNumber; esr: REF EntSeqRecord _ MakeEntSeqRecord[@pagePtr[entry1Offset], pse.offset-entry1Offset]; PushEntSeqRecord[pse: pse, esr: esr]; PushEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr]; esr _ MakeEntSeqRecord[tailBlkPtr, tailBlkLen]; AppendEntSeqRecord[pse: pse, esr: esr]; AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr]; tree.ReleasePage[pse.pageNumber]; rtBroPg1 _ ComplexInsertRecords[tree: tree, pathStk: pathStk]; IF rtBroPg1#nilPage THEN HairyInsertRecords[tree: tree, pathStk: pathStk, rtBroPg1: rtBroPg1]; }; }; IF pse.eslFront#NIL THEN ERROR Bug[entriesLeftOver]; }; }; MakeEntSeqRecord: PUBLIC PROC [entSeq: BTreeEntryPtr, length: CARDINAL] RETURNS [esr: REF EntSeqRecord] = { IF length=0 THEN RETURN [NIL]; esr _ NEW[EntSeqRecord[length]]; esr.entSeqP _ LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]]; esr.entSeqLen _ length; PrincOpsUtils.LongCopy[to: esr.entSeqP, from: entSeq, nwords: length]; }; AppendEntSeqRecord: PUBLIC PROC [pse: PathStkEntryPtr, esr: REF EntSeqRecord] = { IF esr#NIL THEN { esr.fwdP _ NIL; IF pse.eslRear=NIL THEN AssignRefESR[@pse.eslFront, esr] ELSE pse.eslRear.fwdP _ esr; AssignRefESR[@pse.eslRear, esr]; }; }; PushEntSeqRecord: PUBLIC PROC [pse: PathStkEntryPtr, esr: REF EntSeqRecord] = { IF esr#NIL THEN { esr.fwdP _ pse.eslFront; AssignRefESR[@pse.eslFront, esr]; IF pse.eslRear=NIL THEN AssignRefESR[@pse.eslRear, esr]; }; }; RemoveEntry: PUBLIC PROC [tree: Tree, pse: PathStkEntryPtr, ignoreESL: BOOL _ FALSE] RETURNS [esr: REF EntSeqRecord _ NIL, grPage: PageNumber] = { IF ignoreESL OR pse.eslFront=NIL THEN { pagePtr: BTreePagePtr = tree.ReferencePage[pse.pageNumber, write]; entSize: CARDINAL = tree.BTreeEntrySize[@pagePtr[pse.offset]]; esr _ MakeEntSeqRecord[entSeq: @pagePtr[pse.offset], length: entSize]; pagePtr.freeWords _ pagePtr.freeWords+entSize; PrincOpsUtils.LongCopy[to: @pagePtr[pse.offset], from: @pagePtr[pse.offset]+entSize, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset]; tree.ReleasePage[pse.pageNumber]; } ELSE { entSize: CARDINAL = tree.BTreeEntrySize[pse.eslFront.entSeqP]; esr _ NEW[EntSeqRecord[entSize]]; esr.entSeqP _ LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]]; esr.entSeqLen _ entSize; DepositESL[tree: tree, pse: pse, block: esr.entSeqP, length: entSize]; }; grPage _ esr.entSeqP.grPage; esr.entSeqP.grPage _ nilPage; IF grPage # nilPage THEN { pagePtr: BTreePagePtr = tree.ReferencePage[grPage]; esr.entSeqP.grPage _ pagePtr.minPage; tree.ReleasePage[grPage]; }; }; BackUpAndRemoveEntry: PUBLIC PROC [tree: Tree, pse: PathStkEntryPtr] RETURNS [esr: REF EntSeqRecord, grPage: PageNumber] = { tree.BackUpOneEntry[pse]; [esr: esr, grPage: grPage] _ tree.RemoveEntry[pse: pse, ignoreESL: TRUE]; }; AllocatePage: PUBLIC SAFE PROC [tree: Tree] RETURNS [number: PageNumber] = TRUSTED { pagePtr: BTreePagePtr; IF tree.state.firstFreePage=nilPage THEN { number _ (tree.state.greatestPage _ tree.state.greatestPage+1); pagePtr _ tree.ReferencePage[number, new]; } ELSE { number _ tree.state.firstFreePage; pagePtr _ tree.ReferencePage[number, write]; IF pagePtr.freeWords#freePageMarker THEN ERROR Bug[pageNotFree]; tree.state.firstFreePage _ pagePtr.minPage; }; pagePtr.freeWords _ tree.maxFreeWords; tree.ReleasePage[number]; }; FreePage: PUBLIC SAFE PROC [tree: Tree, number: PageNumber] = TRUSTED { pagePtr: BTreePagePtr = tree.ReferencePage[number, write]; IF pagePtr.freeWords=freePageMarker THEN ERROR Bug[pageAlreadyFree]; pagePtr.freeWords _ freePageMarker; pagePtr.minPage _ tree.state.firstFreePage; tree.state.firstFreePage _ number; tree.ReleasePage[number]; }; LongMove: PUBLIC PROC [to, from: LONG POINTER, nWords: CARDINAL] = { PrincOpsUtils.LongMove[to: to, from: from, nwords: nWords]; }; AppendEntSeqLengths: PROC [tree: Tree, pathStk: PathStk, esr: REF EntSeqRecord] = { IF esr#NIL THEN { entryTable: REF EntryTable _ pathStk.entryTable; index: EntryOrdinal _ entryTable.length; wordsLeft: CARDINAL _ esr.entSeqLen; entry: BTreeEntryPtr _ esr.entSeqP; WHILE wordsLeft>0 DO entrySize: CARDINAL = tree.BTreeEntrySize[entry]; index _ index+1; IF index >= entryTable.maxLength THEN ERROR Bug[tooManyEntriesInPage]; entryTable.map[index].cumEntSize _ entryTable.map[index-1].cumEntSize+entrySize; entry _ entry+entrySize; wordsLeft _ wordsLeft-entrySize; ENDLOOP; entryTable.length _ index; }; }; PushEntSeqLengths: PROC [tree: Tree, pathStk: PathStk, esr: REF EntSeqRecord] = { IF esr#NIL THEN { entryTable: REF EntryTable = pathStk.entryTable; oldLen: EntryOrdinal = entryTable.length; tempFirstOldIndex: EntryOrdinal = entryTable.maxLength-oldLen; newLen: EntryOrdinal; delta: CARDINAL; PrincOpsUtils.LongMove[to: @entryTable.map[tempFirstOldIndex], from: @entryTable.map[1], nwords: oldLen*SIZE[EntryTableRec]]; entryTable.length _ 0; AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr]; newLen _ entryTable.length; IF newLen >= tempFirstOldIndex THEN ERROR Bug[tooManyEntriesInPage]; entryTable.length _ newLen+oldLen; delta _ entryTable.map[newLen].cumEntSize; FOR i: EntryOrdinal IN [0..oldLen) DO entryTable.map[newLen+1+i].cumEntSize _ entryTable.map[tempFirstOldIndex+i].cumEntSize+delta; ENDLOOP; }; }; DepositESL: PROC [tree: Tree, pse: PathStkEntryPtr, block: BTreeEntryPtr, length: CARDINAL] = { WHILE length#0 AND pse.eslFront#NIL DO esr: REF EntSeqRecord _ pse.eslFront; entSeqP: BTreeEntryPtr = esr.entSeqP; IF esr.entSeqLen <= length THEN { PrincOpsUtils.LongCopy[to: block, from: entSeqP, nwords: esr.entSeqLen]; block _ block+esr.entSeqLen; length _ length-esr.entSeqLen; AssignRefESR[@pse.eslFront, esr.fwdP]; esr.fwdP _ NIL; } ELSE { firstEntSize: CARDINAL = tree.BTreeEntrySize[entSeqP]; IF firstEntSize <= length THEN { PrincOpsUtils.LongCopy[to: block, from: entSeqP, nwords: firstEntSize]; block _ block+firstEntSize; length _ length-firstEntSize; esr.entSeqP _ entSeqP+firstEntSize; esr.entSeqLen _ esr.entSeqLen-firstEntSize; } ELSE ERROR Bug[depositESL]; -- block would end in middle of entry }; ENDLOOP; IF length#0 THEN ERROR Bug[depositESL]; -- ESL exhausted IF pse.eslFront=NIL THEN AssignRefESR[@pse.eslRear, NIL]; }; EntryIntervalSize: PROC [pathStk: PathStk, leftFather, rightFather: EntryOrdinal _ 0] RETURNS [words: CARDINAL] = { IF rightFather=0 THEN rightFather _ pathStk.entryTable.length+1; RETURN [pathStk.entryTable.map[rightFather-1].cumEntSize - pathStk.entryTable.map[leftFather].cumEntSize]; }; ComplexInsertRecords: PROC [tree: Tree, pathStk: PathStk] RETURNS [rtBroPg1: PageNumber] = { pse: PathStkEntryPtr = @pathStk.path[pathStk.top]; fatherPSE: PathStkEntryPtr = @pathStk.path[pathStk.top-1]; entryTable: REF EntryTable = pathStk.entryTable; oneBrotherEnough: BOOL _ FALSE; fatherIndex, bestFatherIndex: EntryOrdinal; bestFatherSize: CARDINAL _ tree.maxFreeWords+1; fatherPSE.leastSon _ pse.pageNumber; -- in case this is the root page splitting rtBroPg1 _ nilPage; IF pathStk.top>1 THEN { rtBroPg1 _ FindRightBrother[tree: tree, pathStk: pathStk, spaceNeeded: -tree.maxFreeWords]; IF rtBroPg1=nilPage THEN rtBroPg1 _ FindLeftBrother[tree: tree, pathStk: pathStk, spaceNeeded: -tree.maxFreeWords]; }; IF rtBroPg1=nilPage THEN rtBroPg1 _ tree.AllocatePage[]; IF entryTable.length<3 THEN ERROR Bug[tooFewEntries]; fatherIndex _ FillLeftPage[tree: tree, pathStk: pathStk]; DO pl0, pl1, fatherSize: CARDINAL; pl1 _ EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex]; IF pl1 > tree.maxFreeWords THEN EXIT; pl0 _ EntryIntervalSize[pathStk: pathStk, rightFather: fatherIndex]; IF pl0=0 OR pl0+pl1 > tree.maxFreeWords+tree.awfullyFull THEN EXIT; fatherSize _ IndexedEntrySize[pathStk: pathStk, index: fatherIndex]; IF fatherSize (IF twoBrothersEnough THEN tree.prettyFull ELSE 0) DO WHILE EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex, rightFather: minFeasIndex-1] > 0 AND EntryIntervalSize[pathStk: pathStk, leftFather: minFeasIndex-1] <= tree.maxFreeWords DO minFeasIndex _ minFeasIndex-1; IF minFeasIndex <= maxFeasIndex THEN { heap.length _ heap.length+1; TrickleDown[emptyIndex: heap.length, entry: minFeasIndex]; }; ENDLOOP; WHILE EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex, rightFather: maxFeasIndex] > tree.maxFreeWords DO IF maxFeasIndex >= minFeasIndex THEN { heapPos: HeapIndex = entryTable.map[maxFeasIndex].heapPos; heap.length _ heap.length-1; IF heapPos <= heap.length THEN { replacementEntry: EntryOrdinal = heap.entries[heap.length+1]; IF IndexedEntrySize[pathStk: pathStk, index: replacementEntry] <= IndexedEntrySize[pathStk: pathStk, index: maxFeasIndex] THEN TrickleDown[emptyIndex: heapPos, entry: replacementEntry] ELSE { emptyIndex: HeapIndex _ heapPos; entrySize: CARDINAL = IndexedEntrySize[pathStk: pathStk, index: replacementEntry]; DO son: HeapIndex _ emptyIndex*2; sonEntry: EntryOrdinal; IF son > heap.length THEN EXIT; sonEntry _ heap.entries[son]; IF son < heap.length AND IndexedEntrySize[pathStk: pathStk, index: heap.entries[son+1]] < IndexedEntrySize[pathStk: pathStk, index: sonEntry] THEN { son _ son+1; sonEntry _ heap.entries[son]; }; IF IndexedEntrySize[pathStk: pathStk, index: sonEntry] >= entrySize THEN EXIT; heap.entries[emptyIndex] _ sonEntry; entryTable.map[sonEntry].heapPos _ emptyIndex; emptyIndex _ son; ENDLOOP; heap.entries[emptyIndex] _ replacementEntry; entryTable.map[replacementEntry].heapPos _ emptyIndex; }; }; }; maxFeasIndex _ maxFeasIndex-1; ENDLOOP; IF heap.length>0 THEN { fatherSizeSum: CARDINAL; fatherIndex2 _ heap.entries[1]; fatherSizeSum _ IndexedEntrySize[pathStk: pathStk, index: fatherIndex] + IndexedEntrySize[pathStk: pathStk, index: fatherIndex2]; IF fatherSizeSumleftFather+2 AND EntryIntervalSize[pathStk: pathStk, leftFather: midFather-1, rightFather: rightFather] <= tree.maxFreeWords DO midFather _ midFather-1; ENDLOOP; }; }. '(BTreeWrite.mesa Copyright c 1985, 1986 by Xerox Corporation. All rights reserved. Operations for inserting new BTree entries and replacing existing ones. Taft, November 25, 1983 3:08 pm Russ Atkinson (RRA) April 28, 1986 2:01:30 pm PDT BTree. This code assumes that the son page is pointed to by the fatherPage[lastOffset].grPage and that this condition is preserved by InsertRecords. Bye-bye, old root page! Page is not sufficiently full. Try to merge with left or right brother page. This is done by extracting the entire contents of this page (plus one father entry) into the ESL, freeing this page, repositioning to the brother page, and calling InsertRecords. Of course, there may not actually be enough space in the brother page(s), in which case InsertRecords will turn around and allocate a new page. But in any event the overall balance of the tree should be improved. the current page has a right brother the current page surely has a left brother Extra nesting required so that pathStkWasNil is visible in the catch phrase (yecch)! entry is "simple" to delete if it is in a leaf page and removing it will still leave the page at least "prettyFull". invalidate existing PathStks that refer to this tree Set up to delete the entry. If it is in a leaf page, we just remove it. If it is in an interior page, we must find a leaf entry to replace it with. Deletion surrogate is one with greatest key less than victim's discard returned ESR Extra nesting required so that pathStkWasNil is visible in the catch phrase (yecch)! To minimize average insertion time, perform the update in one of three ways (in increasing order of difficulty, as measured by amount of temporary storage allocated and amount of data copied): 1. If replacing an existing entry of the same size, just overwrite it. 2. If the new entry fits on the page (after removing the old entry if any), just slide up the entries beyond the insertion point and insert the new entry. 3. Otherwise, leave the new entry as an EntSeqRecord at the appropriate stack level, and let InsertRecords cope with the problem. This code also takes care not to perform the startOfUpdate and endOfUpdate write references to the state page when the update consists of only a single page write. new record same length as old; just copy it over no old entry to remove, and we will insert at the leaf level pathStk.top and pse now designate the page into which to insert the new entry. first remove and discard old entry, but save its descendant pointer new entry fits on the page; slide the greater entries out of the way and drop the new entry in new entry does not fit (or there isn't yet a page to fit it into) BTreeInternal. normal update remember that the entryCount is no longer being maintained Makes a new root page given a pathStk now at level 0 and with a non-empty ESL. all entries fit the current page. Hurrah! not all the entries will fit on the current page. This is getting complex. Removes the entry from the BTree page at pse.offset and sets esr to an EntSeqRecord containing it. Removes the first entry from the ESL at pse.offset (there had better be one) and sets esr to an EntSeqRecord containing it. Private procedures Appends the cumulative lengths of the entries in the EntSeqRecord to the EntryTable held by the pathStk. It is ok for esr to be NIL. Inserts the cumulative lengths of the EntSeqRecord's entries at the front of the EntryTable held by the pathStk, and appropriately adjusts the cumulative lengths of the ones already there. This must never be done while we have an active heap, or disaster will ensue. It is ok for esr to be NIL. Move existing stuff out of the way Now compute the new lengths as if the old entries weren't there Now make the old lengths contiguous with the new ones and adjust the cumulative lengths of the old entries Removes entries from pse's ESL and deposits as many as will fit into the storage described by block and length. Raises Bug[depositESL] if the ESL is exhausted before the block is used up or if the end of the block would fall in the middle of an entry. Computes the number of words occupied by the ESL entries bounded by the leftFather and rightFather ordinals, not inclusive. Note that it is ok for leftFather and rightFather to designate nonexistent entries, i.e., leftFather = 0 and rightFather = pathStk.entryTable.length+1. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1. Private "cool" procedures The following procedures are logically local to InsertRecords and are not called anywhere else. They are separated out because they are infrequently called and might be packaged separately, should we ever decide to package this code at all. Called when not all the entries will fit on the current page. All of this page's entries have been extracted into the ESL for this level. Tries to spill over onto the right brother page, or onto the left brother page if there isn't a right brother, or onto a new page if neither brother exists. Returns rtBroPg1=nilPage if this is successful. Otherwise, repositions the current level of the pathStk (if necessary) so that a right brother exists, and returns the right brother's page number. This procedure is responsible for redistributing the entries among the two pages so as to minimize size of the entry promoted to the father page. Note that this considers only brothers and not cousins or more distant relatives. This may look strange, but see the comment below At this point, we have two pages in hand, pse.pageNumber and rtBroPg1. All of their entries have been extracted into the ESL, so they may be considered blank pages. We will use rtBroPg1 as the right brother of the current page regardless of whether it was formerly the right brother, the left brother, or newly allocated. there must be at least one entry each from this page, the brother page, and the father page The idea next is to send the shortest entry into the father page such that the current page is at least "pretty" full (if we have such a choice). Still enough room in right brother page. See if this is the shortest father entry, and try moving one more entry into right brother page. Called when not all the entries will fit on the current page and the right brother page. Pours all the entries into the current and right brother pages and either the second right brother page or the left brother page, creating a new second right brother page if neither exists or there is still not enough space. This procedure is responsible for redistributing the entries among the three pages so as to minimize the sum of sizes of the entries promoted to the father page. Note that this considers only brothers and not cousins or more distant relatives. See how much free space our second brother page would have to contain in order to handle the overflow. This is done by pretending to fill up this page and the first right brother page and seeing what is left over. The current page can't be the root, because one brother would surely have been enough in that case; so we don't have to pussyfoot when calling FindRightBrother. no luck, try the left brother left brother had space, but fatherIndexes are now invalid there must be two entries from the father page and at least one entry each from this page and the two brother pages Now figure out how to divide the entries among the three pages in a way that minimizes the sum of the sizes of the two entries sent to the father page while attempting to keep the pages at least "fairly full". The way this is done is as follows. The left cut point (fatherIndex) is swept leftward from its initial maximum possible value, and all possible right cut points for the initial left cut point are thrown into a heap ordered by entry size. As the left cut point moves left, some possible right cut points are added and some are removed. At each step, the minimum-size entry for the right cut point is on the top of the heap. The sum of that and the entry for the left cut point is computed and the minimum remembered. AddToHeap RemoveFromHeap SiftUp Write the three pages and promote the two father entries to the next level. Finds the right brother of the current page, and determines whether it has room for at least spaceNeeded additional words. If so, removes the father entry and all right brother entries and appends them to the ESL for this level. Returns nilPage if there is no right brother or it is too full. Passing a spaceNeeded argument of -tree.maxFreeWords will find the right brother if it exists, regardless of how full it is. no right brother right brother too full Finds the left brother of the current page, and determines whether it has room for at least spaceNeeded additional words. If so, backs up one entry at the father's level, removes the father entry and all left brother entries, and inserts them at the front of the ESL for this level. Returns nilPage if there is no left brother or it is too full. Passing a spaceNeeded argument of -tree.maxFreeWords will find the left brother if it exists, regardless of how full it is. Removes words' worth of entries from the front of the ESL for this level, and writes all but the first entry into rtBroPg. Designates the first entry as the (left) father of rtBroPg, and returns a new ESR containing it. Also sets the page's freeWords and minPage fields appropriately. Removes words' worth of entries from the front of the ESL for this level, and writes them into the page designated by number. Sets the page's freeWords appropriately, but does not touch minPage. Finds the largest entry ordinal in (leftFather .. rightFather) such that all the entries in (leftFather .. midFather) will fit in one BTree page. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1. Finds the smallest entry ordinal in (leftFather .. rightFather) such that all the entries in (midFather .. rightFather) will fit in one BTree page. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1. Κ – "Cedar" style˜headšœ™Icodešœ Οmœ7™BL™GL™J™1—˜šΟk ˜ LšœžœŒ˜—Lšœžœ†˜™Lšœžœ˜)L˜——šœ ž˜Lšžœ$˜+Lšžœ˜Lšœžœ˜L˜Lšžœžœžœžœ˜—šœ™Icode2šœžœžœ ˜Mšœ žœžœ˜3Mšœ žœžœ˜"Mšœžœžœ˜9Mš œžœžœžœžœ˜5Mš œžœžœžœžœ ˜1šΟn œžœžœžœ+žœžœžœžœ žœžœ˜‹šŸœžœžœ žœ˜5Mšœ™M˜ Mšœ˜Mšœžœ˜M˜!š žœžœžœ žœžœ˜1M˜Mšœžœ&žœ˜GM˜—M˜-šžœžœ%žœ˜?Mšœ™M˜&M˜&M˜!M˜Mšžœžœ˜Mšœ˜—šžœžœ8žœ˜QMšœ!˜!Mšžœ˜Mšœ˜—MšœΨ™ΨMšœ{˜{M˜)M˜4M˜%M˜'šžœDžœž˜_šžœ˜Mšœ$™$M˜Mšœžœ˜Mšœ:˜:M˜'M˜;šžœ8ž˜>Mšžœ˜—Mšœ0˜0M˜'M˜.M˜#M˜M˜!M˜Mšœ˜M˜M˜M˜!Mšœ˜—šžœ˜Mšœ*™*Mšœžœ˜M˜!Mšœ2˜2Mšœ%˜%M˜M˜4M˜7M˜'M˜-M˜?M˜M˜$M˜!M˜Mšœ˜——Mšžœžœžœ˜5Mšžœžœ˜Mšœ˜—Mšœžœ žœ˜"Mšœ˜M˜šžœžœ˜Mšžœžœžœ˜0M˜#Mšœ˜—šœ˜MšœT™Tšžœžœžœ˜Mšžœžœ$˜9Mšœ˜Mšœ˜—M˜Mšœžœ˜M˜šœžœ˜Mšœt™t—MšœžœX˜cšžœžœ˜Mšžœžœ$˜9Mšœ˜Mšžœžœ˜Mšœ˜—M˜Mšœ8˜8M˜4Mšœ$žœl˜“M˜!šœ˜Mšœ4™4—Mšœ•™•MšœΟc*˜Dšžœ ˜šžœ˜Mšœ=˜=Mšœ4˜4Mšœ˜Mšœ!žœžœ žœ˜UMšœ˜—šžœ˜MšœA˜AMšœ>™>M˜?šžœ˜šžœ˜Mšœ3˜3Mšœ žœ4˜@šœ9˜9Mšœ™—Mšœ+˜+Mšœ˜—Mšžœ˜ —M˜šž˜šœ žœ˜&Mšžœ&˜,—Mš žœžœ žœžœž˜FMšžœ˜!Mšžœ˜—M˜M˜!Mšœ>˜>Mšœ˜——Mšœ˜—Mšžœžœ$˜9M˜Mšžœžœ˜Mšœ˜—šŸ œžœžœžœ+žœžœžœ>žœ˜²šŸ œžœ˜%MšœU˜UMšœ˜—Mšœ>˜>Mšœ˜Mšœ˜—šŸ œžœžœ+žœžœžœžœžœ>˜ΖšŸ œžœ˜&M˜ šžœžœ ž˜EMšžœ˜ —Mšœ˜—Mšœžœ žœ˜"M˜šžœžœ˜Mšžœžœžœ˜0M˜#Mšœ˜—MšœT™Tšœ˜šžœžœ˜Mšžœžœ$˜9Mšœ˜Mšœ˜—M˜Mšœžœ˜ Mšœ˜Mšœžœ˜Mšœžœ :˜Vš žœžœžœžœ&ž˜QMšžœ˜—Mšœs˜sšžœ˜šžœ˜Mšžœžœžœ˜7Mšœ8˜8Mšœ>˜>M˜!Mšœ˜—Mšžœžœžœžœ˜=—Mšœΐ™ΐMšœF™FMšœš™šMšœ™Mšœ£™£Mšœ 7˜VM˜!šžœ˜šžœ˜Mšœ0™0Mšœ<˜Mšœ!žœžœ žœ˜UMšœ˜—šžœ˜MšœA™AMšœžœžœ$˜?Mšœžœžœž œ˜5Mšœ$˜$Mšœ"˜"Mšœ&˜&M˜'Mšžœžœ"˜7M˜šžœ$žœž˜/˜Mšžœ&˜,—Mšžœžœžœžœ˜˜>Mšœ˜——Mšœ˜——Mšœ˜—Mšžœžœ$˜9M˜Mšœ˜—š Ÿœžœžœžœ žœžœ˜VM˜Mšœ#˜#Mšœžœžœžœ"˜jM˜Mšœ˜——šœ™šŸœžœžœ4žœ˜\šžœ˜!šžœ˜Mšœ ™ šžœžœžœž˜(Mšœ>˜>—šžœžœžœžœ˜QMšœ3žœ˜FMšœ$˜$Mšœ˜—Mšœ˜—š žœžœžœžœžœžœ˜EMšœ:™:Mšœžœžœ˜#M˜Mšœ˜——Mšœ˜—šŸ œžœžœ#˜=Mšœ2˜2šžœžœžœ˜Mšœ˜Mšœ)˜)š žœžœ'žœžœž˜CMšœ<˜šžœ!˜#šžœ˜Mšœ*™*Mšœ˜MšœK˜KMšœ4˜4Mšœ!˜!Mšœ˜—šžœ˜MšœK™KM˜MšœžœR˜ZMšœ%˜%Mšœ:˜:Mšœ/˜/Mšœ'˜'Mšœ<˜MšžœžœF˜^Mšœ˜——Mšœ˜——Mšžœžœžœžœ˜4Mšœ˜—Mšœ˜—š Ÿœžœžœ!žœžœžœ˜kMšžœ žœžœžœ˜Mšœžœ˜ Mšœžœžœž œ˜5Mšœ˜MšœF˜FMšœ˜—šŸœžœžœžœ˜Qšžœžœžœ˜Mšœ žœ˜Mšžœ žœžœ"žœ˜UMšœ ˜ Mšœ˜—Mšœ˜—šŸœžœžœžœ˜Ošžœžœžœ˜M˜Mšœ!˜!Mšžœ žœžœ!˜8Mšœ˜—Mšœ˜—šŸ œžœžœ/žœžœžœžœžœ˜’šžœ žœž˜ šžœ˜Mšœb™bM˜BMšœ žœ-˜>MšœF˜FM˜.Mšœ›˜›M˜!M˜—šžœ˜Mšœ{™{Mšœ žœ-˜>Mšœžœ˜!Mšœžœžœž œ˜5Mšœ˜MšœF˜FM˜——Mšœ˜Mšœ˜šžœžœ˜Mšœ3˜3M˜%Mšœ˜Mšœ˜—Mšœ˜—š Ÿœžœžœ$žœžœ&˜|M˜MšœCžœ˜IMšœ˜—š Ÿ œžœžœžœžœžœ˜TM˜šžœ!˜#šžœ˜Mšœ?˜?M˜*Mšœ˜—šžœ˜M˜"M˜,Mšžœ"žœžœ˜@M˜+Mšœ˜——Mšœ&˜&M˜Mšœ˜—š Ÿœžœžœžœ$žœ˜GM˜:Mšžœ"žœžœ˜DMšœ#˜#M˜+M˜"M˜Mšœ˜—š Ÿœžœžœ žœžœ žœ˜DMšœ;˜;Mšœ˜——šœ™šŸœžœ%žœ˜SMšœžœ™…šžœžœžœ˜Mšœ žœ!˜0M˜(Mšœ žœ˜$Mšœ#˜#šžœ ž˜Mšœ žœ˜1M˜Mšžœžœžœ˜FMšœP˜PMšœ˜Mšœ ˜ Mšžœ˜—M˜Mšœ˜—Mšœ˜—šŸœžœ%žœ˜QMšœ€žœ™¨šžœžœžœ˜Mšœ žœ!˜0Mšœ)˜)Mšœ>˜>Mšœ˜Mšœžœ˜Mšœ"™"Mšœhžœ˜}Mšœ?™?Mšœ˜Mšœ<˜šžœ˜Mšœ™Mšœ ˜ Mšœ žœ?˜Ršž˜Mšœ˜Mšœ˜Mšžœžœžœ˜M˜šžœžœvžœ˜”Mšœ ˜ Mšœ˜Mšœ˜—MšžœBžœžœ˜NM˜$Mšœ.˜.M˜Mšžœ˜—Mšœ,˜,Mšœ6˜6M˜——M˜—Mšœ˜—Mšœ˜Mšžœ˜—šžœžœ˜Mšœžœ˜M˜Mšœ˜šžœ!žœ˜)Mšœžœ˜Mšœ"˜"Mšœ˜Mšœ ˜ Mšœ˜—Mšœ˜—M˜Mšžœ˜—Mšžœžœžœ˜;MšœK™KMšœO˜OMšœP˜PMšœ0˜0MšœK˜KMšœe˜eMšœ˜Mšœ1˜1Mšœ˜—šŸœžœ-žœžœ˜mMšœ€™€Mšœ2˜2Mšœ:˜:Mšœžœ˜Mšœ˜Mšœžœ˜&šžœž˜šžœ˜Mšœ3˜3šžœFžœ˜NMšœ™Mšœ'˜'Mšžœ ˜Mšœ˜—Mšœ@˜@Mšœ+˜+Mšœ'˜'Mšœ˜—šžœ˜Mšœ@˜@Mšœ,˜,Mšœ˜——Mšœ&˜&šžœžœ"žœžœ˜JMšœ™Mšœ˜Mšžœ ˜Mšœ˜—Mšœc˜cMšœ˜Mšœ4˜4MšœB˜BM˜-MšœA˜AMšœ,˜,Mšœ˜—šŸœžœ-žœžœ˜lMšœΨ™ΨMšœ2˜2Mšœ:˜:Mšœ8˜8Mšœžœ˜&Mšœžœ˜M˜Mšžœ"žœžœ ˜:M˜9Mšœ;˜;Mšœ<˜