DIRECTORY Basics USING [BITAND, BITOR, BITSHIFT], FS USING [Error, StreamOpen], GVPDefs, IO USING [Close, GetChar, GetLength, PutChar, PutF, PutFR, PutText, SetIndex, STREAM, UnsafeGetBlock, UnsafePutBlock], ObjectDirDefs USING [noObject, ObjectNumber, ObjectType]; GVPProcs: CEDAR PROGRAM IMPORTS Basics, FS, GVPDefs, IO EXPORTS GVPDefs SHARES ObjectDirDefs = BEGIN OPEN GVPDefs; gap: ObjectDirDefs.ObjectNumber _ ObjectDirDefs.noObject; currentObj: ObjectDirDefs.ObjectNumber; pageBuffer: REF PageByteVec _ NIL; maxSegments: CARDINAL; SegmentPtr: TYPE = CARDINAL; noSegment: SegmentPtr; externalNoSegment: CARDINAL = LAST[CARDINAL]; emptyCell: CARDINAL = LAST[CARDINAL]; CFHdrSize: CARDINAL = SIZE[LONG INTEGER] + SIZE[SegmentPtr]; ChainFilePage: TYPE = MACHINE DEPENDENT RECORD [ serialNumber(0): LONG INTEGER, chainHead(2): SegmentPtr, next(3): ARRAY [CFHdrSize..wordsPerPage) OF CARDINAL]; -- 3..256 ChainPage: TYPE = REF ChainFilePage; ChainSeq: TYPE = RECORD [SEQUENCE length: CARDINAL OF ChainPage]; MapSeq: TYPE = RECORD [SEQUENCE length: CARDINAL OF MapEntry]; map: REF MapSeq _ NIL; mapChanged: BOOLEAN _ FALSE; structureOK: BOOLEAN _ FALSE; LList, PList: REF WordSeq _ NIL; bitMapSeq: TYPE = RECORD[SEQUENCE length: CARDINAL OF WORD]; bitMap: REF bitMapSeq _ NIL; SetBitFromObj: PROC[obj: LONG POINTER TO ObjectHeader] RETURNS [bit: BOOLEAN] = TRUSTED BEGIN bitIndex: INT = obj.number.page * maxObjNumIndex + obj.number.index; bitWord: INT = bitIndex / bitsPerWord; bitPos: INT = bitIndex MOD bitsPerWord; bitMask: WORD = Basics.BITSHIFT[1, bitPos]; bit _ Basics.BITAND[bitMap[bitWord], bitMask]#0; bitMap[bitWord] _ Basics.BITOR[bitMap[bitWord], bitMask] END; LookAtThis: SIGNAL = CODE; GetChain: PROC[h: GVPRef] RETURNS[r: ROPE] = BEGIN i, j, prevJ: CARDINAL; chainStream, mapStream: IO.STREAM _ NIL; chainSize: CARDINAL; chainPages: REF ChainSeq; chainStream _ FS.StreamOpen [fileName: "heap.segments", accessOptions: read ! FS.Error => BEGIN r _ "Unable to open heap.segments"; GOTO fail END]; chainSize _ (chainStream.GetLength[] / bytesPerPage) / 2; -- pairs of pages chainPages _ NEW[ChainSeq[chainSize]]; mapStream _ FS.StreamOpen [fileName: "heap.map", accessOptions: read ! FS.Error => BEGIN r _ "Unable to open heap.map"; chainStream.Close[]; GOTO fail END]; h.heapStream _ FS.StreamOpen[fileName: "heap.data", accessOptions: write ! FS.Error => BEGIN r _ "Unable to open heap.data"; chainStream.Close[]; mapStream.Close[]; GOTO fail END]; maxSegments _ (h.heapStream.GetLength[] / pageByteSize) / segSize; noSegment _ maxSegments; LList _ NEW[WordSeq[maxSegments + 1]]; PList _ NEW[WordSeq[maxSegments + 1]]; map _ NEW[MapSeq[maxSegments*segSize]]; FOR i: CARDINAL IN [0..maxSegments] DO PList[i] _ emptyCell; LList[i] _ emptyCell; ENDLOOP; FOR i: CARDINAL IN [0.. maxSegments*segSize) DO map[i] _ mapStream.GetChar[] ENDLOOP; mapChanged _ FALSE; mapStream.Close[]; chainStream.SetIndex[0]; FOR i: CARDINAL IN [0..chainSize) -- copy chain file pages to my array DO chain0: ChainPage = NEW[ChainFilePage]; chain1: ChainPage = NEW[ChainFilePage]; TRUSTED { [] _ chainStream.UnsafeGetBlock[[LOOPHOLE[chain0], 0, bytesPerPage]]; [] _ chainStream.UnsafeGetBlock[[LOOPHOLE[chain1], 0, bytesPerPage]]; }; IF chain0.serialNumber > chain1.serialNumber THEN chainPages[i] _ chain0 ELSE chainPages[i] _ chain1; ENDLOOP; chainStream.Close[]; j _ chainPages[0].chainHead; -- the start of the list i _ 0; -- i counts logical segments DO chainJ: CARDINAL _ IF j = externalNoSegment -- end of list? THEN noSegment ELSE j - CFHdrSize * (j / wordsPerPage + 1); IF PList[chainJ] # emptyCell THEN SIGNAL LookAtThis; LList[i] _ chainJ; PList[chainJ] _ i; IF chainJ = noSegment THEN EXIT;-- note that PList[noSegment] = number of active segments i _ i + 1; prevJ _ j; j _ chainPages[j / wordsPerPage].next[j MOD wordsPerPage]; ENDLOOP; structureOK _ TRUE; FOR i: CARDINAL IN [0..chainSize) DO chainPages[i] _ NIL; ENDLOOP; ResetCurrent[h]; EXITS fail => NULL END; -- a mother of a proc! SetLogical: PUBLIC PROC[h: GVPRef, n: CARDINAL] RETURNS[ok: BOOLEAN _ FALSE] = BEGIN seg: CARDINAL = n/segSize; IF NOT seg IN [0..noSegment) THEN RETURN; IF NOT LList[seg] IN [0..noSegment) THEN RETURN; h.lPage _ n; h.pPage _ LList[seg] * segSize + n MOD segSize; ok _ TRUE END; SetPhysical: PUBLIC PROC[h: GVPRef, n: CARDINAL] RETURNS[ok: BOOLEAN _ FALSE] = BEGIN seg: CARDINAL = n/segSize; IF NOT seg IN [0..noSegment) THEN RETURN; IF NOT PList[seg] IN [0..noSegment) THEN RETURN; h.pPage _ n; h.lPage _ PList[seg] * segSize + n MOD segSize; ok _ TRUE END; NextPage: PUBLIC PROC[h: GVPRef] RETURNS[ok: BOOLEAN _ FALSE] = BEGIN IF h.lPage MOD segSize < segSize-1 THEN { h.lPage _ h.lPage+1; h.pPage _ h.pPage+1} ELSE BEGIN pSeg: SegmentPtr _ LList[h.lPage/segSize + 1]; -- new physical segment IF pSeg = noSegment THEN RETURN ELSE BEGIN h.pPage _ pSeg * segSize; h.lPage _ h.lPage+1; END; END; ok _ TRUE END; PrevPage: PUBLIC PROC[h: GVPRef] RETURNS[ok: BOOLEAN _ FALSE]= BEGIN IF h.lPage MOD segSize > 0 THEN { h.lPage _ h.lPage-1; h.pPage _ h.pPage-1 } ELSE BEGIN IF h.lPage = 0 THEN RETURN ELSE BEGIN h.lPage _ h.lPage-1; h.pPage _ LList[h.lPage/segSize] * segSize + segSize - 1 END END; ok _ TRUE END; ResetCurrent: PROC[h: GVPRef] = BEGIN h.lPage _ 0; h.pPage _ LList[h.lPage/segSize] * segSize END; PositionRope: PUBLIC PROC[h: GVPRef] RETURNS[r: ROPE] = BEGIN r _ IO.PutFR["position %d, page %d", [cardinal[h.lPage]], [cardinal[h.pPage]]] END; ReadHeapPage: PUBLIC PROC[h: GVPRef, page: CARDINAL, pageBuffer: REF PageByteVec] RETURNS [r: ROPE _ NIL] = BEGIN IF GetPageState[page] = emptyPage THEN r _ ReadServerPage[h, page, pageBuffer] ELSE r _ ReadLocalPage[h, page, pageBuffer] END; ReadLocalPage: PROC [h: GVPRef, page: CARDINAL, pageBuffer: REF PageByteVec] RETURNS [r: ROPE _ NIL] = BEGIN h.heapStream.SetIndex[LONG[page] * bytesPerPage]; TRUSTED { [] _ h.heapStream.UnsafeGetBlock[[LOOPHOLE[pageBuffer], 0, bytesPerPage]]; }; END; WriteLocalPage: PUBLIC PROC [h: GVPRef, page: CARDINAL, pageBuffer: REF PageByteVec] RETURNS [r: ROPE _ NIL] = BEGIN h.heapStream.SetIndex[LONG[page] * bytesPerPage]; TRUSTED { h.heapStream.UnsafePutBlock[[LOOPHOLE[pageBuffer], 0, bytesPerPage]]; }; SetPageState[page, alteredPage] END; WriteHeap: PUBLIC PROC [h: GVPRef] RETURNS[r: ROPE _ NIL] = BEGIN numSent: CARDINAL _ 0; r _ CheckStructure[h]; IF r#NIL THEN RETURN; FOR i: CARDINAL IN [0..map.length) DO IF GetPageState[i]#alteredPage THEN LOOP; r _ ReadLocalPage[h, i, pageBuffer]; IF r#NIL THEN RETURN; r _ WriteServerPage[h, i, pageBuffer]; IF r#NIL THEN RETURN; SetPageState[i, fullPage]; numSent _ numSent +1; ENDLOOP; IF numSent=0 THEN Flash["No pages written (none changed)"] ELSE h.logStream.PutF["\n%lHeap written%l, %d pages updated\n", [rope["b"]], [rope[" "]], [cardinal[numSent]]] END; ScanHeap: PUBLIC PROC [h: GVPRef, onlyError: BOOLEAN] RETURNS[r: ROPE _ NIL] = TRUSTED BEGIN out: IO.STREAM = h.browserStream; nextOffset: LONG CARDINAL _ 0; word: CARDINAL; page, index, type: CARDINAL; firstTime: BOOLEAN _ TRUE; searchValid: BOOLEAN; header: LONG POINTER TO PageHeader = LOOPHOLE[pageBuffer]; ReportError: PROC [r: ROPE] = TRUSTED BEGIN out.PutF["ERROR: page %4d, word %3d, %g\n", [cardinal[h.pPage]], [cardinal[word]], [rope[r]] ] END; ReportNote: PROC[r: ROPE] = TRUSTED BEGIN out.PutF["NOTE: page %4d, word %3d, %g\n", [cardinal[h.pPage]], [cardinal[word]], [rope[r]] ] END; FOR i: CARDINAL IN [0..bitMap.length) DO bitMap[i] _ 0 ENDLOOP; IF NOT onlyError THEN BEGIN [page, index, type] _ GetSearchPattern[]; IF type#lastCard AND NOT type IN [0..15] THEN {r _ "Bad type string"; RETURN} END; out.PutText["\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n"]; out.PutF["Scan started at %g\n\n", [rope[PositionRope[h]]]]; searchValid _ NOT onlyError AND (page#lastCard OR index#lastCard OR type#lastCard); currentObj _ gap; DO -- for each page byte: INT = LONG[h.pPage] * bytesPerPage; h.heapStream.SetIndex[byte]; TRUSTED { [] _ h.heapStream.UnsafeGetBlock[[LOOPHOLE[pageBuffer], 0, bytesPerPage]]; }; word _ pageHdrSize; IF firstTime THEN BEGIN firstTime _ FALSE; IF h.lPage=0 THEN nextOffset _ 0 ELSE nextOffset _ header.offset; END; DO -- for each sub-object offset: LONG CARDINAL = IF word # pageHdrSize THEN 0 ELSE header.offset; objH: LONG POINTER TO ObjectHeader = LOOPHOLE[header, LONG POINTER] + word; IF searchValid THEN IF (objH.number.page=page OR page=lastCard) AND (objH.number.index=index OR index=lastCard) AND (LOOPHOLE[objH.number.type, CARDINAL]=type OR type=lastCard) THEN out.PutF ["SEARCH: page %4d, word %3d, found [p: %3bB, x: %3bB, t: %g]\n", [cardinal[h.pPage]], [cardinal[word]], [cardinal[objH.number.page]], [cardinal[objH.number.index]], [rope[ObjectRope[LOOPHOLE[objH.number.type, CARDINAL], FALSE]]]]; SELECT TRUE FROM objH.number.page>maxObjNumPage OR objH.number.index>maxObjNumIndex OR objH.number.fill#0 => ReportError["Illegal object number"]; objH.number = gap => NULL; objH.number = currentObj => NULL; currentObj # gap => ReportError["Garbage found"]; ENDCASE => NULL; IF offset = 0 THEN BEGIN currentObj _ objH.number; nextOffset _ 0; IF objH.number#gap AND SetBitFromObj[objH] THEN ReportNote[IO.PutFR["duplicate object p:%3bB x:%3bB", [cardinal[objH.number.page]], [cardinal[objH.number.index]]]] END; IF currentObj # gap AND objH.number = currentObj THEN SELECT TRUE FROM offset = nextOffset => nextOffset _ offset + objH.size; offset < nextOffset AND nextOffset < offset + objH.size => {nextOffset _ offset + objH.size; ReportNote["Overlap"]}; offset < nextOffset => {nextOffset _ offset + objH.size; ReportNote["redundant"]}; offset > nextOffset => {nextOffset _ offset + objH.size; ReportError["Gap"]}; ENDCASE => ERROR; word _ word + objHdrSize + objH.size; IF word + objHdrSize > 255 THEN EXIT; IF objH.number = currentObj THEN currentObj _ gap; ENDLOOP; IF h.lPage MOD 10 = 0 THEN Set[IO.PutFR["Scanning position %d of %d", [cardinal[h.lPage]], [cardinal[PList[maxSegments]*segSize]]]]; IF StopRequested[] THEN GOTO quit; IF NOT NextPage[h] THEN GOTO quit ENDLOOP EXITS quit => h.browserStream.PutF["\nScan stopped at %g\n", [rope[PositionRope[h]]]] END; GetPageState: PUBLIC PROC[page: CARDINAL] RETURNS [state: MapEntry] = {state _ map[page]}; SetPageState: PUBLIC PROC[page: CARDINAL, state: MapEntry] = BEGIN mapChanged _ TRUE; map[page] _ state END; CheckStructure: PUBLIC PROC[h: GVPRef] RETURNS [r: ROPE _ NIL] = BEGIN IF NOT structureOK THEN BEGIN Set["Initialising data structure"]; r _ GetChain[h]; END; Set[] END; ProcsInit: PUBLIC PROC = BEGIN structureOK _ FALSE; pageBuffer _ NEW[PageByteVec]; bitMap _ NEW[bitMapSeq[(maxObjNumPage*maxObjNumIndex) / bitsPerWord]] END; ProcsTidyUp: PUBLIC PROC = BEGIN h: GVPRef = GetHandle[]; IF map#NIL AND mapChanged THEN BEGIN mapStream: IO.STREAM = FS.StreamOpen[fileName: "heap.map", accessOptions: write]; FOR i: CARDINAL IN [0..map.length) DO mapStream.PutChar[map[i]] ENDLOOP; mapStream.Close[]; map _ NIL END; IF h.heapStream#NIL THEN {h.heapStream.Close[]; h.heapStream _ NIL}; pageBuffer _ NIL; bitMap _ NIL; LList _ NIL; PList _ NIL END; END. îGVPProcs.mesa HGM, May 24, 1984 3:17:03 am PDT Steve Temple, November 18, 1982 11:09 am This module is a collection of procs most of which manipulate the heap in one way or another. The grubby details of heap file data formatting are handled here and we attempt to hide all this from users of the procs. gap and currentObj are used while we are scanning the heap. pageBuffer holds disk pages from the heap. maxSegments is the number of segments that the heap can hold and valid segments may be indexed by [0..maxSegments). Thus the index maxSegments is invalid and we set noSegment to be this to use as a flag. The server heap uses LAST[CARDINAL] as a flag to mean an invalid segment index and we translate between the two, calling the latter externalNoSegment. The chain file (heap.segments) contains complete pages of data in pairs, updated alternately on the server. We choose the newer one from its version stamp. Each page has 3 words at its start, 2 in the version stamp and one which is the index of the first segment in the chain of currently active segments. This word is only valid in the first page pair of the file. The remaining 253 words are pointers to other such words on this or one of the other pages. The indexing scheme used is wierd, more about this later. A ChainPage is a pointer to such a page in memory, while a ChainSeq is a SEQUENCE of such pointers. map is the SEQUENCE used to store info on each heap page. This says whether the page has been read from the server to the local heap and if it has been altered. It is backed up on the file heap.map between GVPatch sessions. mapChanged is set if we alter the map itself and is used when we tidy up. structureOK is set when we have set up our data structures in GetChain. LList and PList are the data structures derived from heap.segments and detail the ordering of active segments. If LList is indexed by a "logical" segment number it will yield the physical position of that segment in the heap file. PList is indexed by a "physical" segment number and yields the corresponding logical segment. Entries which are not on the active list of segments contain emptyCell. These lists are set up in GetChain. bitMap is a bit map used to detect duplicate object numbers while scanning the heap. The proc SetBitFromObj takes an object number (a duple of [0..256] and [0..85]) and indexes the corresponding bit in the map. It sets the bit and returns the old value. GetChain opens files heap.segments (chain file), heap.data (the heap) and heap.map (the permanent copy of the page map). It creates various SEQUENCES once it knows the size of the heap and reads in the page map. It then sets up LList and PList from the chain file, line by line comments are the order of the day here. summary so far . . chainSize is the number of (pairs of) pages to process in the chain. chainPages is a sequence of that many (uninitialised) pointers to in-store page buffers maxSegments is the number of segments the heap can hold LList and PList are full of emptyCell and have maxSegments+1 entries in them the page map has been copied from disk and marked unchanged (in this session) we are now going to copy the newer of each page pair from the chain file into in-store buffers. we've now got the newer of each page pair and we're going to set up LList and PList Each entry on the chain pages is an index whose values may lie in [3..255] or [259..511] or [515..767] etc. The entries form a list whose head is the chainHead field of the first page and which is terminated by externalNoSegment. For each index we must map the above indexing order onto numbers [0..n], thus index 18 corresponds to physical segment 15 and index 260 to segment 254. Thus if the first index (chainHead) is 18 this says that logical segment 0 is to be found at physical segment 15 Furthermore if the contents of location 18 are 260 then logical segment 1 is to be found at physical segment 254. Get it? SetLogical and SetPhysical make a given page the current page. The page is first converted to a segment number and if this is OK to index the appropriate list then we do so. This gets the matching segment number which is then converted to a page number and placed in the data record. BOOLEAN returned to say OK. NextPage and PrevPage both check to see if we will run off then end of the current segment. If not it's easy, otherwise consult the lists to find the new segment and set that up. Again return a BOOLEAN to say OK. ResetCurrent is a simple case of SetLogical. PositionRope returns a human readable rope of the current position ReadHeapPage, ReadLocalPage and WriteLocalPage all take a buffer and a page as arguments and do the appropriate thing with them. ReadHeapPage consults the page map to decide where to get the page from (and updates it if the page comes from the server). WriteLocalPage marks this page as altered in the map (it might not have been but so what). WriteHeap loops thro the page map and sends any page which has been altered to the server. These pages are then marked unchanged in the map so a second call of the proc immediately after this one wouldn't do anything. ScanHeap looks thro the heap from the current position looking for things. A BOOLEAN argument says do/don't look for pattern matches. An error is something drastically wrong with the structure, a note is something which might be caused by normal operation of the compacter (but could be an error). Note that searching may find many instances of a given object number if the object is broken into sub-objects. The duplicate number checker is smart enough to avoid this. The next two procs just provide procedural access to the page map. Useful since we can mark if the map has been changed if everyone comes thro here. CheckStructure is called whenever we try to go into editor or browser mode. If GetChain has already been called then return else call it. ProcsInit gets the page buffer and storage for the bit map and says we haven't called GetChain yet. ProcsTidyUp writes the page map to its file if it exists and has been changed. It also frees any store that is can. Êd˜šœ ™ Jšœ ™ Jšœ(™(—J˜JšœT™TJšœX™XJšœ)™)J˜J˜šÏk ˜ Jš œœœœœ˜'Jšœœ˜J˜šœœ;˜CJšœ œ"˜2—Jšœœ&˜9J˜J˜—Jšœ œœ˜J˜šœœ˜!J˜—Jšœ˜J˜Jšœœœ ˜*J˜J˜Jšœ;™;JšœU™UJšœW™WJšœX™XJšœW™WJšœ.™.J˜J˜9J˜J˜'J˜Jšœ œœ˜"J˜Jšœ œ˜J˜Jšœœœ˜J˜J˜Jšœœœœ˜-Jšœ œœœ˜%J˜Jšœ\™\Jšœ[™[Jšœ]™]Jšœ[™[Jšœ[™[JšœY™YJšœ@™@J˜Jš œ œœœœœ ˜J˜Jšœœ œ˜J˜Jšœ œœ˜Jšœ œœ˜J˜J˜JšœZ™ZJšœW™WJšœ\™\Jšœ]™]JšœB™BJ˜Jšœœ œ˜ J˜J˜JšœT™TJšœ[™[JšœL™LJ˜Jš œ œœœ œœœ˜—Jšœœœ˜"Jšœœ œœ˜!Jš˜J˜JšœQ˜VJšœ˜J˜J˜—JšœO™OJšœD™DJ˜š Ÿ œœœœœ˜FJ˜J˜—š Ÿ œœœœ˜BJšœ œ˜J˜Jšœ˜J˜J˜—Jšœ[™[Jšœ-™-J˜š Ÿœœœ œœœ˜Fšœœ œ˜J˜#J˜Jšœ˜—J˜Jšœ˜J˜J˜—JšœV™VJšœ ™ Jšœ\™\Jšœ™J˜šŸ œœœ˜Jšœœ˜Jšœ œ˜Jšœ œ9˜EJšœ˜J˜—šŸ œœœ˜ J˜š œœœ œ˜$Jšœ œœ=˜QJš œœœœœ˜HJ˜Jšœ˜ Jšœ˜—Jšœœœ'œ˜DJšœ œ˜Jšœ œ˜ Jšœœ˜ Jšœ˜ Jšœ˜J˜—Jšœ˜J˜—…—,Vj