BTreeWrite.mesa
Copyright © 1985, 1986 by Xerox Corporation. All rights reserved.
Operations for inserting new BTree entries and replacing existing ones.
Taft, November 25, 1983 3:08 pm
Russ Atkinson (RRA) April 28, 1986 2:01:30 pm PDT
DIRECTORY
BTree USING [Compare, Entry, EntryFromRecord, EntrySize, EntSize, Error, Key, PageNumber, Record, ReferencePage, ReleasePage, UpdateState, UpdateType],
BTreeInternal USING [AdjustTreeState, AllocatePage, AssignRefESR, BackUpAndRemoveEntry, BackUpOneEntry, BTreeEntry, BTreeEntrySize, BTreePagePtr, Bug, Compare, entry0Offset, entry1Offset, EntryFromRecord, EntryOrdinal, entryOverhead, EntrySize, EntryTable, EntryTableRec, EntSeqRecord, FreePage, freePageMarker, GetDefaultPathStk, GetHeapAndTable, Heap, HeapIndex, InsertRecords, Lock, maxLevelsInTree, nilOffset, nilPage, PathEntryLE, PathStk, PathStkEntry, PathStkIndex, PathStkObject, PathToMaxDescendant, ReferencePage, ReferenceStack, ReleasePage, RemoveEntry, RepairOffsets, ReturnDefaultPathStk, ReturnHeapAndTable, Tree, TreeObject, Unlock, WriteStatePage],
PrincOpsUtils USING [LongCopy, LongMove];
BTreeWrite: PROGRAM
IMPORTS BTree, BTreeInternal, PrincOpsUtils
EXPORTS BTree, BTreeInternal
= { OPEN BTree, BTreeInternal;
CARD: TYPE = LONG CARDINAL;
BTree.
Tree: TYPE = REF TreeObject;
TreeObject: PUBLIC TYPE = BTreeInternal.TreeObject;
PathStk: TYPE = REF PathStkObject;
PathStkObject: PUBLIC TYPE = BTreeInternal.PathStkObject;
PathStkEntryPtr: TYPE = LONG POINTER TO PathStkEntry;
BTreeEntryPtr: TYPE = LONG POINTER TO BTreeEntry;
DeleteKey: PUBLIC SAFE PROC [tree: Tree, key: Key, pathStk: PathStk ← NIL, useExistingPath: BOOLFALSE] RETURNS [found: BOOL] = TRUSTED {
FatherMayNeedWork: PROC RETURNS [needsWork: BOOL] = {
This code assumes that the son page is pointed to by the fatherPage[lastOffset].grPage and that this condition is preserved by InsertRecords.
pagePtr, otherPtr: BTreePagePtr;
fatherPSE: PathStkEntryPtr;
fatherFreeWords: CARDINAL;
pse ← @pathStk.path[pathStk.top];
IF pse.eslFront=NIL THEN needsWork ← FALSE ELSE {
tree.InsertRecords[pathStk];
needsWork ← pathStk.top#0 AND pathStk.path[pathStk.top-1].eslFront#NIL;
};
pagePtr ← tree.ReferencePage[pse.pageNumber];
IF pathStk.top=1 AND pagePtr.freeWords=tree.maxFreeWords THEN {
Bye-bye, old root page!
tree.state.rootPage ← pagePtr.minPage;
tree.state.depth ← tree.state.depth-1;
tree.ReleasePage[pse.pageNumber];
tree.FreePage[pse.pageNumber];
RETURN [FALSE];
};
IF pathStk.top=1 OR tree.maxFreeWords-pagePtr.freeWords >= tree.prettyFull THEN {
tree.ReleasePage[pse.pageNumber];
RETURN;
};
Page is not sufficiently full. Try to merge with left or right brother page. This is done by extracting the entire contents of this page (plus one father entry) into the ESL, freeing this page, repositioning to the brother page, and calling InsertRecords. Of course, there may not actually be enough space in the brother page(s), in which case InsertRecords will turn around and allocate a new page. But in any event the overall balance of the tree should be improved.
AppendEntSeqRecord[pse: pse, esr: MakeEntSeqRecord[entSeq: @pagePtr.entries, length: tree.maxFreeWords-pagePtr.freeWords]];
fatherPSE ← @pathStk.path[pathStk.top-1];
otherPtr ← tree.ReferencePage[fatherPSE.pageNumber];
fatherFreeWords ← otherPtr.freeWords;
tree.ReleasePage[fatherPSE.pageNumber];
IF fatherPSE.offset < nilOffset+(tree.state.pageSize-fatherFreeWords) OR fatherPSE.eslFront#NIL
THEN {
the current page has a right brother
rtBroPg: PageNumber;
esr: REF EntSeqRecord;
[esr: esr, grPage: rtBroPg] ← tree.RemoveEntry[fatherPSE];
AppendEntSeqRecord[pse: pse, esr: esr];
otherPtr ← tree.ReferencePage[fatherPSE.pageNumber, write];
IF otherPtr[fatherPSE.lastOffset].grPage # pse.pageNumber THEN
ERROR Bug[mcCreightWasWrong];
otherPtr[fatherPSE.lastOffset].grPage ← rtBroPg;
tree.ReleasePage[fatherPSE.pageNumber];
otherPtr ← tree.ReferencePage[rtBroPg, write];
otherPtr.minPage ← pagePtr.minPage;
tree.ReleasePage[rtBroPg];
tree.ReleasePage[pse.pageNumber];
tree.FreePage[pse.pageNumber];
pse.pageNumber ← rtBroPg;
pse.offset ← entry1Offset;
pse.lastOffset ← entry0Offset;
pse.nextToLastOffset ← nilOffset;
}
ELSE {
the current page surely has a left brother
esr: REF EntSeqRecord;
tree.ReleasePage[pse.pageNumber];
[esr: esr] ← tree.BackUpAndRemoveEntry[fatherPSE];
PushEntSeqRecord[pse: pse, esr: esr];
tree.FreePage[pse.pageNumber];
otherPtr ← tree.ReferencePage[fatherPSE.pageNumber];
pse.pageNumber ← otherPtr[fatherPSE.lastOffset].grPage;
tree.ReleasePage[fatherPSE.pageNumber];
pagePtr ← tree.ReferencePage[pse.pageNumber];
pse.offset ← nilOffset+(tree.state.pageSize-pagePtr.freeWords);
pse.lastOffset ← entry1Offset;
pse.nextToLastOffset ← entry0Offset;
tree.ReleasePage[pse.pageNumber];
tree.RepairOffsets[pse];
};
IF pse.eslFront#NIL THEN tree.InsertRecords[pathStk];
RETURN [TRUE];
};
pathStkWasNil: BOOL ← pathStk=NIL;
pse: PathStkEntryPtr;
tree.Lock[update];
IF pathStkWasNil THEN {
IF useExistingPath THEN ERROR Error[nilPathStk];
pathStk ← tree.GetDefaultPathStk[];
};
{
Extra nesting required so that pathStkWasNil is visible in the catch phrase (yecch)!
ENABLE UNWIND => {
IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk];
tree.Unlock[];
};
origStkTop: PathStkIndex;
pagePtr: BTreePagePtr ← NIL;
descendantPg: PageNumber;
simpleDelete: BOOL;
entry is "simple" to delete if it is in a leaf page and removing it will still leave the page at least "prettyFull".
equal: BOOL ← tree.PathEntryLE[key: key, pathStk: pathStk, useExistingPath: useExistingPath].equal;
IF ~equal THEN {
IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk];
tree.Unlock[];
RETURN [FALSE];
};
origStkTop ← pathStk.top;
[ptr: pagePtr, pse: pse] ← tree.ReferenceStack[pathStk];
descendantPg ← pagePtr[pse.nextToLastOffset].grPage;
simpleDelete ← descendantPg=nilPage AND tree.maxFreeWords - (pagePtr.freeWords + tree.BTreeEntrySize[@pagePtr[pse.lastOffset]]) >= tree.prettyFull;
tree.ReleasePage[pse.pageNumber];
tree.version ← tree.version+1;
invalidate existing PathStks that refer to this tree
Set up to delete the entry. If it is in a leaf page, we just remove it. If it is in an interior page, we must find a leaf entry to replace it with.
tree.BackUpOneEntry[pse]; -- pse.offset should index deletion victim
IF simpleDelete
THEN {
tree.AdjustTreeState[update: unchanged, deltaEntryCount: -1];
pagePtr ← tree.ReferencePage[pse.pageNumber, write];
[] ← tree.RemoveEntry[pse];
tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate];
}
ELSE {
tree.AdjustTreeState[update: startOfUpdate, deltaEntryCount: -1];
Deletion surrogate is one with greatest key less than victim's
tree.PathToMaxDescendant[pathStk: pathStk, page: descendantPg];
IF pathStk.top > origStkTop
THEN {
dpse: PathStkEntryPtr = @pathStk.path[pathStk.top];
leafESR: REF EntSeqRecord ← tree.BackUpAndRemoveEntry[dpse].esr;
[grPage: leafESR.entSeqP.grPage] ← tree.RemoveEntry[pse];
discard returned ESR
AppendEntSeqRecord[pse: pse, esr: leafESR];
}
ELSE [] ← tree.RemoveEntry[pse];
tree.GetHeapAndTable[pathStk];
DO
needsWork: BOOL ← FatherMayNeedWork[ !
UNWIND => tree.ReturnHeapAndTable[pathStk]];
IF pathStk.top=0 OR (~needsWork AND pathStk.top<=origStkTop) THEN EXIT
ELSE pathStk.top ← pathStk.top-1;
ENDLOOP;
pathStk.top ← 0;
tree.ReturnHeapAndTable[pathStk];
tree.AdjustTreeState[update: endOfUpdate, deltaEntryCount: 0];
};
};
IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk];
tree.Unlock[];
RETURN [TRUE];
};
UpdateRecord: PUBLIC SAFE PROC [tree: Tree, key: Key, pathStk: PathStk ← NIL, useExistingPath: BOOLFALSE, record: Record, updateType: UpdateType ← insertOrReplace] = TRUSTED {
ProduceEntry: PROC [entry: Entry] = {
PrincOpsUtils.LongCopy[to: entry, from: tree.EntryFromRecord[record], nwords: words];
};
words: EntSize = tree.EntrySize[tree.EntryFromRecord[record]];
UpdateEntry[tree: tree, key: key, pathStk: pathStk, useExistingPath: useExistingPath, words: words, Proc: ProduceEntry, updateType: updateType];
};
UpdateEntry: PUBLIC PROC [tree: Tree, key: Key, pathStk: PathStk ← NIL, useExistingPath: BOOLFALSE, words: EntSize, Proc: UNSAFE PROC [entry: Entry], updateType: UpdateType ← insertOrReplace] = {
CallEntryProc: PROC [entry: Entry] = {
Proc[entry];
IF tree.EntrySize[entry]#words OR tree.Compare[key, entry]#equal THEN
ERROR Error[wrongEntryProduced];
};
pathStkWasNil: BOOL ← pathStk=NIL;
tree.Lock[update];
IF pathStkWasNil THEN {
IF useExistingPath THEN ERROR Error[nilPathStk];
pathStk ← tree.GetDefaultPathStk[];
};
Extra nesting required so that pathStkWasNil is visible in the catch phrase (yecch)!
{
ENABLE UNWIND => {
IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk];
tree.Unlock[];
};
leafStkTop: PathStkIndex;
equal: BOOL;
pse: PathStkEntryPtr;
pagePtr: BTreePagePtr ← NIL;
foundEntSize: CARDINAL ← 0; -- zero means there is not an existing entry with this key
IF CARDINAL[words+entryOverhead] NOT IN [1+entryOverhead..tree.maxFreeWords] THEN
ERROR Error[entrySizesWrong];
[equal: equal, depth: leafStkTop] ← tree.PathEntryLE[key: key, pathStk: pathStk, useExistingPath: useExistingPath];
IF equal
THEN {
IF updateType=insert THEN ERROR Error[wrongUpdateType];
[pse: pse, ptr: pagePtr] ← tree.ReferenceStack[pathStk];
foundEntSize ← tree.EntrySize[@pagePtr[pse.lastOffset].entry];
tree.ReleasePage[pse.pageNumber];
}
ELSE IF updateType=replace THEN ERROR Error[wrongUpdateType];
To minimize average insertion time, perform the update in one of three ways (in increasing order of difficulty, as measured by amount of temporary storage allocated and amount of data copied):
1. If replacing an existing entry of the same size, just overwrite it.
2. If the new entry fits on the page (after removing the old entry if any), just slide up the entries beyond the insertion point and insert the new entry.
3. Otherwise, leave the new entry as an EntSeqRecord at the appropriate stack level, and let InsertRecords cope with the problem.
This code also takes care not to perform the startOfUpdate and endOfUpdate write references to the state page when the update consists of only a single page write.
tree.version ← tree.version+1; -- invalidate existing PathStks that refer to this tree
pse ← @pathStk.path[pathStk.top];
IF words=foundEntSize
THEN {
new record same length as old; just copy it over
tree.AdjustTreeState[update: unchanged, deltaEntryCount: 0];
pagePtr ← tree.ReferencePage[pse.pageNumber, write];
CallEntryProc[@pagePtr[pse.lastOffset].entry];
tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate];
}
ELSE {
removedEntGrPage: PageNumber ← nilPage;
newEntryFits: BOOLFALSE;
IF foundEntSize=0 THEN {
no old entry to remove, and we will insert at the leaf level
pathStk.top ← leafStkTop;
pse ← @pathStk.path[pathStk.top];
};
pathStk.top and pse now designate the page into which to insert the new entry.
IF pathStk.top>0 THEN {
pagePtr ← tree.ReferencePage[pse.pageNumber];
newEntryFits ← CARDINAL[words+entryOverhead] <= CARDINAL[pagePtr.freeWords + (IF foundEntSize=0 THEN 0 ELSE foundEntSize+entryOverhead)];
tree.ReleasePage[pse.pageNumber];
};
tree.AdjustTreeState[update: IF newEntryFits THEN unchanged ELSE startOfUpdate, deltaEntryCount: IF foundEntSize=0 THEN 1 ELSE 0];
IF pathStk.top>0 THEN pagePtr ← tree.ReferencePage[pse.pageNumber, write];
IF foundEntSize#0 THEN
first remove and discard old entry, but save its descendant pointer
[grPage: removedEntGrPage] ← tree.BackUpAndRemoveEntry[pse];
IF newEntryFits
THEN {
new entry fits on the page; slide the greater entries out of the way and drop the new entry in
entPtr: BTreeEntryPtr ← @pagePtr[pse.offset];
PrincOpsUtils.LongMove[to: entPtr+words+entryOverhead, from: entPtr, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset];
CallEntryProc[@entPtr.entry];
entPtr.grPage ← removedEntGrPage;
pagePtr.freeWords ← pagePtr.freeWords - (words+entryOverhead);
tree.ReleasePage[pse.pageNumber, IF tree.longUpdate THEN unchanged ELSE endOfUpdate];
}
ELSE {
new entry does not fit (or there isn't yet a page to fit it into)
esr: REF EntSeqRecord ← NEW[EntSeqRecord[words+entryOverhead]];
esr.entSeqP ← LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]];
esr.entSeqLen ← words+entryOverhead;
CallEntryProc[@esr.entSeqP.entry];
esr.entSeqP.grPage ← removedEntGrPage;
AppendEntSeqRecord[pse: pse, esr: esr];
IF pathStk.top>0 THEN tree.ReleasePage[pse.pageNumber];
tree.GetHeapAndTable[pathStk];
WHILE pathStk.path[pathStk.top].eslFront#NIL DO
tree.InsertRecords[pathStk !
UNWIND => tree.ReturnHeapAndTable[pathStk]];
IF pathStk.top=0 THEN EXIT ELSE pathStk.top ← pathStk.top-1;
ENDLOOP;
tree.ReturnHeapAndTable[pathStk];
tree.AdjustTreeState[update: endOfUpdate, deltaEntryCount: 0];
};
};
};
IF pathStkWasNil THEN tree.ReturnDefaultPathStk[pathStk];
tree.Unlock[];
};
SetUpdateInProgress: PUBLIC SAFE PROC [tree: Tree, updateInProgress: BOOL] = TRUSTED {
tree.Lock[update];
tree.longUpdate ← updateInProgress;
tree.AdjustTreeState[update: IF updateInProgress THEN startOfUpdate ELSE endOfUpdate, deltaEntryCount: 0];
tree.Unlock[];
};
BTreeInternal.
AdjustTreeState: PUBLIC PROC [tree: Tree, update: UpdateState, deltaEntryCount: INTEGER] = {
IF tree.maintainRecomputableState
THEN {
normal update
IF tree.state.entryCount#LAST[CARD] THEN
tree.state.entryCount ← tree.state.entryCount+deltaEntryCount;
IF update#unchanged AND ~(tree.state.updateInProgress AND tree.longUpdate) THEN {
tree.state.updateInProgress ← update=startOfUpdate OR tree.longUpdate;
tree.WriteStatePage[update: update];
};
}
ELSE IF deltaEntryCount#0 AND tree.state.entryCount#LAST[CARD] THEN {
remember that the entryCount is no longer being maintained
tree.state.entryCount ← LAST[CARD];
tree.WriteStatePage[];
};
};
InsertRecords: PUBLIC PROC [tree: Tree, pathStk: PathStk] = {
pse: PathStkEntryPtr = @pathStk.path[pathStk.top];
IF pse.eslFront#NIL THEN {
pathStk.entryTable.length ← 0;
pathStk.entryTable.map[0].cumEntSize ← 0;
FOR esr: REF EntSeqRecord ← pse.eslFront, esr.fwdP UNTIL esr=NIL DO
AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr];
ENDLOOP;
IF pathStk.top=0
THEN {
Makes a new root page given a pathStk now at level 0 and with a non-empty ESL.
wordsToInsert: CARDINAL = EntryIntervalSize[pathStk: pathStk, leftFather: 0];
newRootPage: PageNumber = tree.AllocatePage[];
pagePtr: BTreePagePtr = tree.ReferencePage[newRootPage, write];
IF tree.state.depth >= maxLevelsInTree THEN ERROR Error[depthExceeded];
pagePtr.minPage ← pathStk.path[0].leastSon;
tree.ReleasePage[newRootPage];
IF wordsToInsert > tree.maxFreeWords THEN ERROR Bug[newRootOverflow];
WritePage[tree: tree, pse: @pathStk.path[0], number: newRootPage, words: wordsToInsert];
tree.state.rootPage ← newRootPage;
tree.state.depth ← tree.state.depth+1;
}
ELSE {
pagePtr: BTreePagePtr = tree.ReferencePage[pse.pageNumber, write];
tailBlkPtr: BTreeEntryPtr = @pagePtr[pse.offset];
tailBlkLen: CARDINAL = (nilOffset+tree.state.pageSize-pagePtr.freeWords)-pse.offset;
wordsToInsert: CARDINAL = EntryIntervalSize[pathStk: pathStk];
IF wordsToInsert<=pagePtr.freeWords
THEN {
all entries fit the current page. Hurrah!
PrincOpsUtils.LongMove[to: tailBlkPtr+wordsToInsert, from: tailBlkPtr, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset];
DepositESL[tree: tree, pse: pse, block: tailBlkPtr, length: wordsToInsert];
pagePtr.freeWords ← pagePtr.freeWords-wordsToInsert;
tree.ReleasePage[pse.pageNumber];
}
ELSE {
not all the entries will fit on the current page. This is getting complex.
rtBroPg1: PageNumber;
esr: REF EntSeqRecord ← MakeEntSeqRecord[@pagePtr[entry1Offset], pse.offset-entry1Offset];
PushEntSeqRecord[pse: pse, esr: esr];
PushEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr];
esr ← MakeEntSeqRecord[tailBlkPtr, tailBlkLen];
AppendEntSeqRecord[pse: pse, esr: esr];
AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr];
tree.ReleasePage[pse.pageNumber];
rtBroPg1 ← ComplexInsertRecords[tree: tree, pathStk: pathStk];
IF rtBroPg1#nilPage THEN HairyInsertRecords[tree: tree, pathStk: pathStk, rtBroPg1: rtBroPg1];
};
};
IF pse.eslFront#NIL THEN ERROR Bug[entriesLeftOver];
};
};
MakeEntSeqRecord: PUBLIC PROC [entSeq: BTreeEntryPtr, length: CARDINAL] RETURNS [esr: REF EntSeqRecord] = {
IF length=0 THEN RETURN [NIL];
esr ← NEW[EntSeqRecord[length]];
esr.entSeqP ← LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]];
esr.entSeqLen ← length;
PrincOpsUtils.LongCopy[to: esr.entSeqP, from: entSeq, nwords: length];
};
AppendEntSeqRecord: PUBLIC PROC [pse: PathStkEntryPtr, esr: REF EntSeqRecord] = {
IF esr#NIL THEN {
esr.fwdP ← NIL;
IF pse.eslRear=NIL THEN AssignRefESR[@pse.eslFront, esr] ELSE pse.eslRear.fwdP ← esr;
AssignRefESR[@pse.eslRear, esr];
};
};
PushEntSeqRecord: PUBLIC PROC [pse: PathStkEntryPtr, esr: REF EntSeqRecord] = {
IF esr#NIL THEN {
esr.fwdP ← pse.eslFront;
AssignRefESR[@pse.eslFront, esr];
IF pse.eslRear=NIL THEN AssignRefESR[@pse.eslRear, esr];
};
};
RemoveEntry: PUBLIC PROC [tree: Tree, pse: PathStkEntryPtr, ignoreESL: BOOLFALSE]
RETURNS [esr: REF EntSeqRecord ← NIL, grPage: PageNumber] = {
IF ignoreESL OR pse.eslFront=NIL
THEN {
Removes the entry from the BTree page at pse.offset and sets esr to an EntSeqRecord containing it.
pagePtr: BTreePagePtr = tree.ReferencePage[pse.pageNumber, write];
entSize: CARDINAL = tree.BTreeEntrySize[@pagePtr[pse.offset]];
esr ← MakeEntSeqRecord[entSeq: @pagePtr[pse.offset], length: entSize];
pagePtr.freeWords ← pagePtr.freeWords+entSize;
PrincOpsUtils.LongCopy[to: @pagePtr[pse.offset], from: @pagePtr[pse.offset]+entSize, nwords: nilOffset+(tree.state.pageSize-pagePtr.freeWords)-pse.offset];
tree.ReleasePage[pse.pageNumber];
}
ELSE {
Removes the first entry from the ESL at pse.offset (there had better be one) and sets esr to an EntSeqRecord containing it.
entSize: CARDINAL = tree.BTreeEntrySize[pse.eslFront.entSeqP];
esr ← NEW[EntSeqRecord[entSize]];
esr.entSeqP ← LOOPHOLE[BASE[DESCRIPTOR[esr.entSeq]]];
esr.entSeqLen ← entSize;
DepositESL[tree: tree, pse: pse, block: esr.entSeqP, length: entSize];
};
grPage ← esr.entSeqP.grPage;
esr.entSeqP.grPage ← nilPage;
IF grPage # nilPage THEN {
pagePtr: BTreePagePtr = tree.ReferencePage[grPage];
esr.entSeqP.grPage ← pagePtr.minPage;
tree.ReleasePage[grPage];
};
};
BackUpAndRemoveEntry: PUBLIC PROC [tree: Tree, pse: PathStkEntryPtr]
RETURNS [esr: REF EntSeqRecord, grPage: PageNumber] = {
tree.BackUpOneEntry[pse];
[esr: esr, grPage: grPage] ← tree.RemoveEntry[pse: pse, ignoreESL: TRUE];
};
AllocatePage: PUBLIC SAFE PROC [tree: Tree] RETURNS [number: PageNumber] = TRUSTED {
pagePtr: BTreePagePtr;
IF tree.state.firstFreePage=nilPage
THEN {
number ← (tree.state.greatestPage ← tree.state.greatestPage+1);
pagePtr ← tree.ReferencePage[number, new];
}
ELSE {
number ← tree.state.firstFreePage;
pagePtr ← tree.ReferencePage[number, write];
IF pagePtr.freeWords#freePageMarker THEN ERROR Bug[pageNotFree];
tree.state.firstFreePage ← pagePtr.minPage;
};
pagePtr.freeWords ← tree.maxFreeWords;
tree.ReleasePage[number];
};
FreePage: PUBLIC SAFE PROC [tree: Tree, number: PageNumber] = TRUSTED {
pagePtr: BTreePagePtr = tree.ReferencePage[number, write];
IF pagePtr.freeWords=freePageMarker THEN ERROR Bug[pageAlreadyFree];
pagePtr.freeWords ← freePageMarker;
pagePtr.minPage ← tree.state.firstFreePage;
tree.state.firstFreePage ← number;
tree.ReleasePage[number];
};
LongMove: PUBLIC PROC [to, from: LONG POINTER, nWords: CARDINAL] = {
PrincOpsUtils.LongMove[to: to, from: from, nwords: nWords];
};
Private procedures
AppendEntSeqLengths: PROC [tree: Tree, pathStk: PathStk, esr: REF EntSeqRecord] = {
Appends the cumulative lengths of the entries in the EntSeqRecord to the EntryTable held by the pathStk. It is ok for esr to be NIL.
IF esr#NIL THEN {
entryTable: REF EntryTable ← pathStk.entryTable;
index: EntryOrdinal ← entryTable.length;
wordsLeft: CARDINAL ← esr.entSeqLen;
entry: BTreeEntryPtr ← esr.entSeqP;
WHILE wordsLeft>0 DO
entrySize: CARDINAL = tree.BTreeEntrySize[entry];
index ← index+1;
IF index >= entryTable.maxLength THEN ERROR Bug[tooManyEntriesInPage];
entryTable.map[index].cumEntSize ← entryTable.map[index-1].cumEntSize+entrySize;
entry ← entry+entrySize;
wordsLeft ← wordsLeft-entrySize;
ENDLOOP;
entryTable.length ← index;
};
};
PushEntSeqLengths: PROC [tree: Tree, pathStk: PathStk, esr: REF EntSeqRecord] = {
Inserts the cumulative lengths of the EntSeqRecord's entries at the front of the EntryTable held by the pathStk, and appropriately adjusts the cumulative lengths of the ones already there. This must never be done while we have an active heap, or disaster will ensue. It is ok for esr to be NIL.
IF esr#NIL THEN {
entryTable: REF EntryTable = pathStk.entryTable;
oldLen: EntryOrdinal = entryTable.length;
tempFirstOldIndex: EntryOrdinal = entryTable.maxLength-oldLen;
newLen: EntryOrdinal;
delta: CARDINAL;
Move existing stuff out of the way
PrincOpsUtils.LongMove[to: @entryTable.map[tempFirstOldIndex], from: @entryTable.map[1], nwords: oldLen*SIZE[EntryTableRec]];
Now compute the new lengths as if the old entries weren't there
entryTable.length ← 0;
AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: esr];
newLen ← entryTable.length;
IF newLen >= tempFirstOldIndex THEN ERROR Bug[tooManyEntriesInPage];
entryTable.length ← newLen+oldLen;
Now make the old lengths contiguous with the new ones and adjust the cumulative lengths of the old entries
delta ← entryTable.map[newLen].cumEntSize;
FOR i: EntryOrdinal IN [0..oldLen) DO
entryTable.map[newLen+1+i].cumEntSize ← entryTable.map[tempFirstOldIndex+i].cumEntSize+delta;
ENDLOOP;
};
};
DepositESL: PROC [tree: Tree, pse: PathStkEntryPtr, block: BTreeEntryPtr, length: CARDINAL] = {
Removes entries from pse's ESL and deposits as many as will fit into the storage described by block and length. Raises Bug[depositESL] if the ESL is exhausted before the block is used up or if the end of the block would fall in the middle of an entry.
WHILE length#0 AND pse.eslFront#NIL DO
esr: REF EntSeqRecord ← pse.eslFront;
entSeqP: BTreeEntryPtr = esr.entSeqP;
IF esr.entSeqLen <= length
THEN {
PrincOpsUtils.LongCopy[to: block, from: entSeqP, nwords: esr.entSeqLen];
block ← block+esr.entSeqLen;
length ← length-esr.entSeqLen;
AssignRefESR[@pse.eslFront, esr.fwdP];
esr.fwdP ← NIL;
}
ELSE {
firstEntSize: CARDINAL = tree.BTreeEntrySize[entSeqP];
IF firstEntSize <= length
THEN {
PrincOpsUtils.LongCopy[to: block, from: entSeqP, nwords: firstEntSize];
block ← block+firstEntSize;
length ← length-firstEntSize;
esr.entSeqP ← entSeqP+firstEntSize;
esr.entSeqLen ← esr.entSeqLen-firstEntSize;
}
ELSE ERROR Bug[depositESL]; -- block would end in middle of entry
};
ENDLOOP;
IF length#0 THEN ERROR Bug[depositESL]; -- ESL exhausted
IF pse.eslFront=NIL THEN AssignRefESR[@pse.eslRear, NIL];
};
EntryIntervalSize: PROC [pathStk: PathStk, leftFather, rightFather: EntryOrdinal ← 0] RETURNS [words: CARDINAL] = {
Computes the number of words occupied by the ESL entries bounded by the leftFather and rightFather ordinals, not inclusive. Note that it is ok for leftFather and rightFather to designate nonexistent entries, i.e., leftFather = 0 and rightFather = pathStk.entryTable.length+1. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1.
IF rightFather=0 THEN rightFather ← pathStk.entryTable.length+1;
RETURN [pathStk.entryTable.map[rightFather-1].cumEntSize - pathStk.entryTable.map[leftFather].cumEntSize];
};
Private "cool" procedures
The following procedures are logically local to InsertRecords and are not called anywhere else. They are separated out because they are infrequently called and might be packaged separately, should we ever decide to package this code at all.
ComplexInsertRecords: PROC [tree: Tree, pathStk: PathStk] RETURNS [rtBroPg1: PageNumber] = {
Called when not all the entries will fit on the current page. All of this page's entries have been extracted into the ESL for this level. Tries to spill over onto the right brother page, or onto the left brother page if there isn't a right brother, or onto a new page if neither brother exists. Returns rtBroPg1=nilPage if this is successful. Otherwise, repositions the current level of the pathStk (if necessary) so that a right brother exists, and returns the right brother's page number. This procedure is responsible for redistributing the entries among the two pages so as to minimize size of the entry promoted to the father page. Note that this considers only brothers and not cousins or more distant relatives.
pse: PathStkEntryPtr = @pathStk.path[pathStk.top];
fatherPSE: PathStkEntryPtr = @pathStk.path[pathStk.top-1];
entryTable: REF EntryTable = pathStk.entryTable;
oneBrotherEnough: BOOLFALSE;
fatherIndex, bestFatherIndex: EntryOrdinal;
bestFatherSize: CARDINAL ← tree.maxFreeWords+1;
fatherPSE.leastSon ← pse.pageNumber; -- in case this is the root page splitting
rtBroPg1 ← nilPage;
IF pathStk.top>1 THEN {
rtBroPg1 ← FindRightBrother[tree: tree, pathStk: pathStk, spaceNeeded: -tree.maxFreeWords];
IF rtBroPg1=nilPage THEN
This may look strange, but see the comment below
rtBroPg1 ← FindLeftBrother[tree: tree, pathStk: pathStk, spaceNeeded: -tree.maxFreeWords];
};
IF rtBroPg1=nilPage THEN rtBroPg1 ← tree.AllocatePage[];
At this point, we have two pages in hand, pse.pageNumber and rtBroPg1. All of their entries have been extracted into the ESL, so they may be considered blank pages. We will use rtBroPg1 as the right brother of the current page regardless of whether it was formerly the right brother, the left brother, or newly allocated.
IF entryTable.length<3 THEN ERROR Bug[tooFewEntries];
there must be at least one entry each from this page, the brother page, and the father page
fatherIndex ← FillLeftPage[tree: tree, pathStk: pathStk];
The idea next is to send the shortest entry into the father page such that the current page is at least "pretty" full (if we have such a choice).
DO
pl0, pl1, fatherSize: CARDINAL;
pl1 ← EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex];
IF pl1 > tree.maxFreeWords THEN EXIT;
pl0 ← EntryIntervalSize[pathStk: pathStk, rightFather: fatherIndex];
IF pl0=0 OR pl0+pl1 > tree.maxFreeWords+tree.awfullyFull THEN EXIT;
Still enough room in right brother page. See if this is the shortest father entry, and try moving one more entry into right brother page.
fatherSize ← IndexedEntrySize[pathStk: pathStk, index: fatherIndex];
IF fatherSize<bestFatherSize THEN {
bestFatherIndex ← fatherIndex;
bestFatherSize ← fatherSize;
oneBrotherEnough ← TRUE;
};
fatherIndex ← fatherIndex-1;
ENDLOOP;
IF oneBrotherEnough THEN {
breakSize: CARDINAL = EntryIntervalSize[pathStk: pathStk, rightFather: bestFatherIndex];
totalSize: CARDINAL = EntryIntervalSize[pathStk: pathStk];
WritePage[tree: tree, pse: pse, number: pse.pageNumber, words: breakSize];
PushEntSeqRecord[pse: fatherPSE, esr: WriteRightBrother[tree: tree, pse: pse, rtBroPg: rtBroPg1, words: totalSize-breakSize]];
rtBroPg1 ← nilPage;
};
};
HairyInsertRecords: PROC [tree: Tree, pathStk: PathStk, rtBroPg1: PageNumber] = {
Called when not all the entries will fit on the current page and the right brother page. Pours all the entries into the current and right brother pages and either the second right brother page or the left brother page, creating a new second right brother page if neither exists or there is still not enough space. This procedure is responsible for redistributing the entries among the three pages so as to minimize the sum of sizes of the entries promoted to the father page. Note that this considers only brothers and not cousins or more distant relatives.
TrickleDown: PROC [emptyIndex: HeapIndex, entry: EntryOrdinal] = {
sonSize: CARDINAL = IndexedEntrySize[pathStk: pathStk, index: entry];
son: HeapIndex ← emptyIndex;
DO
father: HeapIndex ← son/2;
fatherEnt: EntryOrdinal;
IF father<=0 THEN EXIT;
fatherEnt ← heap.entries[father];
IF IndexedEntrySize[pathStk: pathStk, index: fatherEnt] <= sonSize THEN EXIT;
heap.entries[son] ← fatherEnt;
entryTable.map[fatherEnt].heapPos ← son;
son ← father;
ENDLOOP;
heap.entries[son] ← entry;
entryTable.map[entry].heapPos ← son;
};
entryTable: REF EntryTable = pathStk.entryTable;
heap: REF Heap = pathStk.heap;
pse: PathStkEntryPtr = @pathStk.path[pathStk.top];
fatherPSE: PathStkEntryPtr = @pathStk.path[pathStk.top-1]; -- father's pse
rtBroPg2: PageNumber;
fatherIndex, fatherIndex2, bestFatherIndex, bestFatherIndex2: EntryOrdinal;
minFeasIndex, maxFeasIndex: EntryOrdinal;
bestFatherSizeSum: CARDINAL ← 2*tree.maxFreeWords + 1;
twoBrothersEnough: BOOLFALSE;
breakSize1, breakSize2, totalSize: CARDINAL;
fatherESR: REF EntSeqRecord;
See how much free space our second brother page would have to contain in order to handle the overflow. This is done by pretending to fill up this page and the first right brother page and seeing what is left over.
fatherIndex ← FillLeftPage[tree: tree, pathStk: pathStk];
fatherIndex2 ← FillLeftPage[tree: tree, pathStk: pathStk, leftFather: fatherIndex];
The current page can't be the root, because one brother would surely have been enough in that case; so we don't have to pussyfoot when calling FindRightBrother.
rtBroPg2 ← FindRightBrother[tree: tree, pathStk: pathStk, spaceNeeded: EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex2] + 2*tree.breathingSpace];
IF rtBroPg2=nilPage THEN {
no luck, try the left brother
fe2: EntryOrdinal = FillRightPage[tree: tree, pathStk: pathStk];
fe: EntryOrdinal = FillRightPage[tree: tree, pathStk: pathStk, rightFather: fe2];
rtBroPg2 ← FindLeftBrother[tree: tree, pathStk: pathStk, spaceNeeded: EntryIntervalSize[pathStk: pathStk, leftFather: 0, rightFather: fe] + 2*tree.breathingSpace];
IF rtBroPg2=nilPage
THEN rtBroPg2 ← tree.AllocatePage[] -- still no luck, allocate new page
ELSE {
left brother had space, but fatherIndexes are now invalid
fatherIndex ← FillLeftPage[tree: tree, pathStk: pathStk];
fatherIndex2 ← FillLeftPage[tree: tree, pathStk: pathStk, leftFather: fatherIndex];
};
};
IF entryTable.length < 5 THEN ERROR Bug[tooFewEntries];
there must be two entries from the father page and at least one entry each from this page and the two brother pages
Now figure out how to divide the entries among the three pages in a way that minimizes the sum of the sizes of the two entries sent to the father page while attempting to keep the pages at least "fairly full". The way this is done is as follows. The left cut point (fatherIndex) is swept leftward from its initial maximum possible value, and all possible right cut points for the initial left cut point are thrown into a heap ordered by entry size. As the left cut point moves left, some possible right cut points are added and some are removed. At each step, the minimum-size entry for the right cut point is on the top of the heap. The sum of that and the entry for the left cut point is computed and the minimum remembered.
heap.length ← 0;
maxFeasIndex ← fatherIndex2;
WHILE EntryIntervalSize[pathStk: pathStk, leftFather: maxFeasIndex] <= tree.fairlyFull DO
maxFeasIndex ← maxFeasIndex-1;
ENDLOOP;
minFeasIndex ← maxFeasIndex+1;
WHILE EntryIntervalSize[pathStk: pathStk, rightFather: fatherIndex] > (IF twoBrothersEnough THEN tree.prettyFull ELSE 0) DO
WHILE EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex, rightFather: minFeasIndex-1] > 0 AND EntryIntervalSize[pathStk: pathStk, leftFather: minFeasIndex-1] <= tree.maxFreeWords DO
minFeasIndex ← minFeasIndex-1;
IF minFeasIndex <= maxFeasIndex THEN {
AddToHeap
heap.length ← heap.length+1;
TrickleDown[emptyIndex: heap.length, entry: minFeasIndex];
};
ENDLOOP;
WHILE EntryIntervalSize[pathStk: pathStk, leftFather: fatherIndex, rightFather: maxFeasIndex] > tree.maxFreeWords DO
IF maxFeasIndex >= minFeasIndex THEN {
RemoveFromHeap
heapPos: HeapIndex = entryTable.map[maxFeasIndex].heapPos;
heap.length ← heap.length-1;
IF heapPos <= heap.length THEN {
replacementEntry: EntryOrdinal = heap.entries[heap.length+1];
IF IndexedEntrySize[pathStk: pathStk, index: replacementEntry] <= IndexedEntrySize[pathStk: pathStk, index: maxFeasIndex]
THEN TrickleDown[emptyIndex: heapPos, entry: replacementEntry]
ELSE {
SiftUp
emptyIndex: HeapIndex ← heapPos;
entrySize: CARDINAL = IndexedEntrySize[pathStk: pathStk, index: replacementEntry];
DO
son: HeapIndex ← emptyIndex*2;
sonEntry: EntryOrdinal;
IF son > heap.length THEN EXIT;
sonEntry ← heap.entries[son];
IF son < heap.length AND IndexedEntrySize[pathStk: pathStk, index: heap.entries[son+1]] < IndexedEntrySize[pathStk: pathStk, index: sonEntry] THEN {
son ← son+1;
sonEntry ← heap.entries[son];
};
IF IndexedEntrySize[pathStk: pathStk, index: sonEntry] >= entrySize THEN EXIT;
heap.entries[emptyIndex] ← sonEntry;
entryTable.map[sonEntry].heapPos ← emptyIndex;
emptyIndex ← son;
ENDLOOP;
heap.entries[emptyIndex] ← replacementEntry;
entryTable.map[replacementEntry].heapPos ← emptyIndex;
};
};
};
maxFeasIndex ← maxFeasIndex-1;
ENDLOOP;
IF heap.length>0 THEN {
fatherSizeSum: CARDINAL;
fatherIndex2 ← heap.entries[1];
fatherSizeSum ← IndexedEntrySize[pathStk: pathStk, index: fatherIndex] + IndexedEntrySize[pathStk: pathStk, index: fatherIndex2];
IF fatherSizeSum<bestFatherSizeSum THEN {
twoBrothersEnough ← TRUE;
bestFatherSizeSum ← fatherSizeSum;
bestFatherIndex ← fatherIndex;
bestFatherIndex2 ← fatherIndex2;
};
};
fatherIndex ← fatherIndex-1;
ENDLOOP;
IF ~twoBrothersEnough THEN ERROR Bug[twoBrothersNotEnough];
Write the three pages and promote the two father entries to the next level.
breakSize1 ← EntryIntervalSize[pathStk: pathStk, rightFather: bestFatherIndex];
breakSize2 ← EntryIntervalSize[pathStk: pathStk, rightFather: bestFatherIndex2];
totalSize ← EntryIntervalSize[pathStk: pathStk];
WritePage[tree: tree, pse: pse, number: pse.pageNumber, words: breakSize1];
fatherESR ← WriteRightBrother[tree: tree, pse: pse, rtBroPg: rtBroPg1, words: breakSize2-breakSize1];
PushEntSeqRecord[pse: fatherPSE, esr: WriteRightBrother[tree: tree, pse: pse, rtBroPg: rtBroPg2, words: totalSize-breakSize2]];
PushEntSeqRecord[pse: fatherPSE, esr: fatherESR];
};
FindRightBrother: PROC [tree: Tree, pathStk: PathStk, spaceNeeded: INTEGER] RETURNS [rtBroPg: PageNumber] = {
Finds the right brother of the current page, and determines whether it has room for at least spaceNeeded additional words. If so, removes the father entry and all right brother entries and appends them to the ESL for this level. Returns nilPage if there is no right brother or it is too full. Passing a spaceNeeded argument of -tree.maxFreeWords will find the right brother if it exists, regardless of how full it is.
pse: PathStkEntryPtr = @pathStk.path[pathStk.top];
fatherPSE: PathStkEntryPtr = @pathStk.path[pathStk.top-1];
fatherEntSize: CARDINAL;
pagePtr: BTreePagePtr;
fatherESR, rtBroESR: REF EntSeqRecord;
IF fatherPSE.eslFront=NIL
THEN {
pagePtr ← tree.ReferencePage[fatherPSE.pageNumber];
IF fatherPSE.offset = nilOffset+(tree.state.pageSize-pagePtr.freeWords) THEN {
no right brother
tree.ReleasePage[fatherPSE.pageNumber];
RETURN [nilPage];
};
fatherEntSize ← tree.BTreeEntrySize[@pagePtr[fatherPSE.offset]];
rtBroPg ← pagePtr[fatherPSE.offset].grPage;
tree.ReleasePage[fatherPSE.pageNumber];
}
ELSE {
fatherEntSize ← tree.BTreeEntrySize[fatherPSE.eslFront.entSeqP];
rtBroPg ← fatherPSE.eslFront.entSeqP.grPage;
};
pagePtr ← tree.ReferencePage[rtBroPg];
IF LOOPHOLE[pagePtr.freeWords-fatherEntSize, INTEGER] < spaceNeeded THEN {
right brother too full
tree.ReleasePage[rtBroPg];
RETURN [nilPage];
};
rtBroESR ← MakeEntSeqRecord[entSeq: @pagePtr.entries, length: tree.maxFreeWords-pagePtr.freeWords];
tree.ReleasePage[rtBroPg];
[esr: fatherESR] ← tree.RemoveEntry[pse: fatherPSE];
AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: fatherESR];
AppendEntSeqRecord[pse: pse, esr: fatherESR];
AppendEntSeqLengths[tree: tree, pathStk: pathStk, esr: rtBroESR];
AppendEntSeqRecord[pse: pse, esr: rtBroESR];
};
FindLeftBrother: PROC [tree: Tree, pathStk: PathStk, spaceNeeded: INTEGER] RETURNS [ltBroPg: PageNumber] = {
Finds the left brother of the current page, and determines whether it has room for at least spaceNeeded additional words. If so, backs up one entry at the father's level, removes the father entry and all left brother entries, and inserts them at the front of the ESL for this level. Returns nilPage if there is no left brother or it is too full. Passing a spaceNeeded argument of -tree.maxFreeWords will find the left brother if it exists, regardless of how full it is.
pse: PathStkEntryPtr = @pathStk.path[pathStk.top];
fatherPSE: PathStkEntryPtr = @pathStk.path[pathStk.top-1];
fatherPagePtr, ltBroPagePtr, rtBroPagePtr: BTreePagePtr;
fatherESR, ltBroESR: REF EntSeqRecord;
fatherEntSize: CARDINAL;
rtBroOfLtBroPg: PageNumber;
IF fatherPSE.offset <= entry1Offset THEN RETURN [nilPage];
fatherPagePtr ← tree.ReferencePage[fatherPSE.pageNumber];
ltBroPg ← fatherPagePtr[fatherPSE.nextToLastOffset].grPage;
rtBroOfLtBroPg ← fatherPagePtr[fatherPSE.lastOffset].grPage;
fatherEntSize ← tree.BTreeEntrySize[@fatherPagePtr[fatherPSE.lastOffset]];
tree.ReleasePage[fatherPSE.pageNumber];
ltBroPagePtr ← tree.ReferencePage[ltBroPg];
IF LOOPHOLE[ltBroPagePtr.freeWords-fatherEntSize, INTEGER] < spaceNeeded THEN {
tree.ReleasePage[ltBroPg];
RETURN [nilPage];
};
ltBroESR ← MakeEntSeqRecord[entSeq: @ltBroPagePtr.entries, length: tree.maxFreeWords-ltBroPagePtr.freeWords];
fatherPagePtr ← tree.ReferencePage[fatherPSE.pageNumber, write];
fatherPagePtr[fatherPSE.nextToLastOffset].grPage ← rtBroOfLtBroPg;
tree.ReleasePage[fatherPSE.pageNumber];
[esr: fatherESR] ← tree.BackUpAndRemoveEntry[pse: fatherPSE];
rtBroPagePtr ← tree.ReferencePage[rtBroOfLtBroPg, write];
fatherESR.entSeqP.grPage ← rtBroPagePtr.minPage;
rtBroPagePtr.minPage ← ltBroPagePtr.minPage;
tree.ReleasePage[rtBroOfLtBroPg];
tree.ReleasePage[ltBroPg];
PushEntSeqLengths[tree: tree, pathStk: pathStk, esr: fatherESR];
PushEntSeqRecord[pse: pse, esr: fatherESR];
PushEntSeqLengths[tree: tree, pathStk: pathStk, esr: ltBroESR];
PushEntSeqRecord[pse: pse, esr: ltBroESR];
};
WriteRightBrother: PROC [tree: Tree, pse: PathStkEntryPtr, rtBroPg: PageNumber, words: CARDINAL] RETURNS [fatherESR: REF EntSeqRecord] = {
Removes words' worth of entries from the front of the ESL for this level, and writes all but the first entry into rtBroPg. Designates the first entry as the (left) father of rtBroPg, and returns a new ESR containing it. Also sets the page's freeWords and minPage fields appropriately.
pagePtr: BTreePagePtr;
minPage: PageNumber;
[esr: fatherESR, grPage: minPage] ← tree.RemoveEntry[pse: pse];
words ← words-fatherESR.entSeqLen;
pagePtr ← tree.ReferencePage[rtBroPg, write];
pagePtr.minPage ← minPage;
tree.ReleasePage[rtBroPg];
WritePage[tree: tree, pse: pse, number: rtBroPg, words: words];
fatherESR.entSeqP.grPage ← rtBroPg;
};
WritePage: PROC [tree: Tree, pse: PathStkEntryPtr, number: PageNumber, words: CARDINAL] = {
Removes words' worth of entries from the front of the ESL for this level, and writes them into the page designated by number. Sets the page's freeWords appropriately, but does not touch minPage.
pagePtr: BTreePagePtr = tree.ReferencePage[number, write];
DepositESL[tree: tree, pse: pse, block: @pagePtr.entries, length: words];
pagePtr.freeWords ← tree.maxFreeWords-words;
tree.ReleasePage[number];
};
IndexedEntrySize: PROC [pathStk: PathStk, index: EntryOrdinal] RETURNS [words: CARDINAL] = INLINE {
RETURN [EntryIntervalSize[pathStk: pathStk, leftFather: index-1, rightFather: index+1]];
};
FillLeftPage: PROC [tree: Tree, pathStk: PathStk, leftFather, rightFather: EntryOrdinal ← 0] RETURNS [midFather: EntryOrdinal] = {
Finds the largest entry ordinal in (leftFather .. rightFather) such that all the entries in (leftFather .. midFather) will fit in one BTree page. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1.
IF rightFather=0 THEN rightFather ← pathStk.entryTable.length+1;
midFather ← leftFather+2;
WHILE midFather<rightFather-2 AND EntryIntervalSize[pathStk: pathStk, leftFather: leftFather, rightFather: midFather+1] <= tree.maxFreeWords DO
midFather ← midFather+1;
ENDLOOP;
};
FillRightPage: PROC [tree: Tree, pathStk: PathStk, leftFather, rightFather: EntryOrdinal ← 0] RETURNS [midFather: EntryOrdinal] = {
Finds the smallest entry ordinal in (leftFather .. rightFather) such that all the entries in (midFather .. rightFather) will fit in one BTree page. If rightFather = 0 then it is defaulted to pathStk.entryTable.length+1.
IF rightFather=0 THEN rightFather ← pathStk.entryTable.length+1;
midFather ← rightFather-2;
WHILE midFather>leftFather+2 AND EntryIntervalSize[pathStk: pathStk, leftFather: midFather-1, rightFather: rightFather] <= tree.maxFreeWords DO
midFather ← midFather-1;
ENDLOOP;
};
}.