-- BcdWrite.mesa -- Last edited by Satterthwaite on August 1, 1983 12:20 pm -- Last edited by Lewis on 30-Mar-81 16:51:35 DIRECTORY Alloc: TYPE USING [ AddNotify, Bounds, DropNotify, Handle, Notifier, Top, Trim, Words], BcdComData: TYPE USING [ aborted, bcdName, binderVersion, codeName, copyCode, copySymbols, nConfigs, nExports, nImports, nModules, nPages, objectStamp, op, outputFile, sourceName, sourceVersion, symbolName, table, typeExported, textIndex, zone], BcdControlDefs: TYPE USING [], BcdDefs: TYPE USING [ BCD, CTIndex, CTRecord, cttype, cxtype, EVIndex, evtype, EXPIndex, EXPRecord, exptype, FPIndex, FPRecord, fptype, FTIndex, FTNull, FTRecord, FTSelf, fttype, IMPIndex, IMPNull, imptype, LFNull, lftype, Link, MTIndex, MTNull, MTRecord, mttype, Namee, NameRecord, NTIndex, NTRecord, nttype, NullName, NullVersion, PackedString, rftype, SegClass, SGIndex, SGNull, SGRecord, sgtype, SpaceID, SPIndex, SPRecord, sptype, sstype, sttype, tftype, tmtype, treetype, typtype, VersionID, VersionStamp], BcdErrorDefs: TYPE USING [ErrorHti, ErrorName, ErrorNameBase, GetSti], BcdLiterals: TYPE USING [ EnterVersionFiles, LitSegSize, UpdateSegments, WriteLiterals], BcdOps: TYPE USING [BcdBase, NameString], BcdUtilDefs: TYPE USING [ BcdBases, ContextForTree, EnterExport, EnterFile, EnterImport, EnterName, GetGfi, MapName, MergeFile, NameForHti, SetFileVersion], Environment: TYPE USING [bytesPerPage, bytesPerWord, wordsPerPage], File: TYPE USING [Capability, nullCapability, SetSize], FileSegment: TYPE USING [Pages, nullPages], FileStream: TYPE USING [FileByteIndex, Create, GetIndex, SetIndex], --ExecOps: TYPE USING [CheckForAbort], HashOps: TYPE USING [Reset], Inline: TYPE USING [LongCOPY, LongMult], OSMiscOps: TYPE USING [FileError, FindFile, StampToTime], Space: TYPE USING [ Create, Delete, Handle, Map, LongPointer, Unmap, virtualMemory], Stream: TYPE USING [Handle, Delete, PutBlock, PutWord], Strings: TYPE USING [ String, SubString, SubStringDescriptor, AppendChar, AppendString], Symbols: TYPE USING [CXIndex, HTIndex, htNull, STIndex, stNull], Table: TYPE USING [Base, Selector], Tree: TYPE USING [Index, Link, Scan, null], TreeOps: TYPE USING [GetNode, ListLength, ScanList]; BcdWrite: PROGRAM IMPORTS Alloc, BcdErrorDefs, BcdLiterals, BcdUtilDefs, --ExecOps,-- File, FileStream, HashOps, Inline, OSMiscOps, Space, Stream, Strings, TreeOps, data: BcdComData EXPORTS BcdControlDefs = { OPEN BcdDefs; bytesPerWord: CARDINAL ~ Environment.bytesPerWord; nullFile: File.Capability ~ File.nullCapability; nullPages: FileSegment.Pages ~ FileSegment.nullPages; Alignment: CARDINAL ~ 4; -- Code Segments must start at 0 MOD Alignment BcdWriteError: PUBLIC ERROR ~ CODE; Error: PROC ~ {ERROR BcdWriteError}; UserAbort: ERROR ~ CODE; -- raised on ^DEL during code or symbol copying table: Alloc.Handle; tb, stb, ctb, mtb, lfb, etb, itb, sgb, tyb, tmb, ftb, ntb, spb, fpb, cxb: Table.Base; ssb: BcdOps.NameString; Notifier: Alloc.Notifier ~ { tb _ base[treetype]; stb _ base[sttype]; ctb _ base[cttype]; mtb _ base[mttype]; lfb _ base[lftype]; tyb _ base[typtype]; tmb _ base[tmtype]; etb _ base[exptype]; itb _ base[imptype]; sgb _ base[sgtype]; ftb _ base[fttype]; spb _ base[sptype]; fpb _ base[fptype]; ntb _ base[nttype]; ssb _ base[sstype]; cxb _ base[cxtype]; IF bcd # NIL THEN { bcd.ctb _ ctb; bcd.mtb _ mtb; IF ~packing THEN bcd.sgb _ sgb; bcd.tyb _ tyb; bcd.tmb _ tmb; bcd.spb _ spb; bcd.fpb _ fpb}}; -- inline utilities Copy: PROC [from: LONG POINTER, nwords: CARDINAL, to: LONG POINTER] ~ Inline.LongCOPY; Zero: PROC [p: LONG POINTER, l: CARDINAL] ~ INLINE { IF l # 0 THEN {p^ _ 0; Inline.LongCOPY[from~p, to~(p+1), nwords~(l-1)]}}; PagesForWords: PROC [nWords: CARDINAL] RETURNS [CARDINAL] ~ INLINE { RETURN [(nWords + Environment.wordsPerPage-1)/Environment.wordsPerPage]}; bcd: LONG POINTER TO BcdUtilDefs.BcdBases _ NIL; header: BcdOps.BcdBase; -- points to Bcd header and saved tables headerSpace: Space.Handle; WriteBcd: PUBLIC PROC [root: Tree.Link] ~ { saveIndex: CARDINAL ~ data.textIndex; node, subNode: Tree.Index; table _ data.table; table.AddNotify[Notifier]; node _ TreeOps.GetNode[root]; packing _ (tb[node].son[2] # Tree.null AND data.copyCode); Initialize[]; IF packing THEN { MakePackItem[tb[node].son[2]]; data.textIndex _ saveIndex; FillInSgMap[]}; CopyConfigs[]; CopyModules[]; CopyTypes[]; CopySpaces[]; CopyFramePacks[]; subNode _ TreeOps.GetNode[tb[node].son[3]]; data.textIndex _ tb[subNode].info; TreeOps.ScanList[tb[subNode].son[1], CopyImport]; TreeOps.ScanList[tb[subNode].son[2], CopyExport]; IF tb[subNode].attrs[$exportsALL] THEN ExportCx[BcdUtilDefs.ContextForTree[tb[subNode].son[4]]]; IF data.copySymbols THEN EnterMissingSymbolFiles[]; BcdLiterals.EnterVersionFiles[bcd.ftb, FtiForIndex[fileMap.length], MapFile]; TableOut[]; CloseOutputFile[]; Finalize[]; data.textIndex _ saveIndex; table.DropNotify[Notifier]; table _ NIL}; Initialize: PROC ~ { impSize, expSize, sgSize, fSize, nSize, ssSize: CARDINAL; nSgis: CARDINAL; b: Table.Base; desc: Strings.SubStringDescriptor _ [base~data.sourceName, offset~0, length~data.sourceName.length]; IF data.copyCode OR data.copySymbols THEN InitCodeSymbolCopy[]; impSize _ table.Bounds[imptype].size; expSize _ table.Bounds[exptype].size; sgSize _ table.Bounds[sgtype].size; nSgis _ sgSize/SGRecord.SIZE; IF ~packing THEN sgSize _ 0; fSize _ table.Bounds[fttype].size; nSize _ table.Bounds[nttype].size; ssSize _ table.Bounds[sstype].size; bcd _ (data.zone).NEW[BcdUtilDefs.BcdBases]; fileMap _ (data.zone).NEW[FileMap[fSize/FTRecord.SIZE]]; FOR i: CARDINAL IN [0..fileMap.length) DO fileMap[i] _ FTNull ENDLOOP; headerSpace _ Space.Create[ size~PagesForWords[ BCD.SIZE + impSize + expSize + sgSize + fSize + nSize + ssSize], parent~Space.virtualMemory]; headerSpace.Map[]; header _ LOOPHOLE[headerSpace.LongPointer, BcdOps.BcdBase]; b _ (LOOPHOLE[header, Table.Base] + BCD.SIZE); Copy[to~(bcd.etb _ b), from~etb, nwords~expSize]; b _ b + expSize; table.Trim[exptype,0]; Copy[to~(bcd.itb _ b), from~itb, nwords~impSize]; b _ b + impSize; table.Trim[imptype,0]; Copy[to~(bcd.ftb _ b), from~ftb, nwords~fSize]; b _ b + fSize; table.Trim[fttype,0]; Copy[to~(bcd.ntb _ b), from~ntb, nwords~nSize]; b _ b + nSize; table.Trim[nttype,0]; Copy[to~(bcd.ssb _ b), from~ssb, nwords~ssSize]; b _ b + ssSize; HashOps.Reset[]; IF packing THEN { -- save old segment table in heap Copy[to~(bcd.sgb _ b), from~sgb, nwords~sgSize]; b _ b + sgSize; table.Trim[sgtype,0]}; InitHeader[ header~header, objectVersion~OSMiscOps.StampToTime[data.objectStamp], source~BcdUtilDefs.EnterName[@desc], sourceVersion~data.sourceVersion]; bcd.ctb _ table.Bounds[cttype].base; bcd.mtb _ table.Bounds[mttype].base; bcd.tyb _ table.Bounds[typtype].base; bcd.tmb _ table.Bounds[tmtype].base; bcd.spb _ table.Bounds[sptype].base; bcd.fpb _ table.Bounds[fptype].base; IF data.copyCode OR data.copySymbols THEN {MapCodeSymbolFiles[]; InitCopyMap[nSgis]}; IF packing THEN InitSgMap[nSgis] ELSE { bcd.sgb _ table.Bounds[sgtype].base; IF ~data.copyCode THEN MapSegments[$code]; IF ~data.copySymbols THEN MapSegments[$symbols]}}; Finalize: PROC ~ { IF data.copyCode OR data.copySymbols THEN ReleaseCodeSymbolCopy[]; (data.zone).FREE[@fileMap]; (data.zone).FREE[@bcd]; Space.Delete[headerSpace]; FreePackItems[]; IF packing THEN FreeSgMap[]; IF data.copyCode OR data.copySymbols THEN FreeCopyMap[]}; -- BCD (re)construction CopyName: PROC [olditem, newitem: Namee] ~ { newNti: NTIndex ~ table.Words[nttype, NTRecord.SIZE]; FOR nti: NTIndex _ NTIndex.FIRST, nti+NTRecord.SIZE DO OPEN old~~bcd.ntb[nti]; IF old.item = olditem THEN { OPEN new~~ntb[newNti]; new.item _ newitem; new.name _ bcd.MapName[old.name]; RETURN}; ENDLOOP}; CopyConfigs: PROC ~ { -- configs are already copied, only map names and files cti: CTIndex _ CTIndex.FIRST; ctLimit: CTIndex ~ table.Top[cttype]; UNTIL cti = ctLimit DO header.nConfigs _ header.nConfigs + 1; ctb[cti].name _ bcd.MapName[ctb[cti].name]; ctb[cti].file _ MapFile[ctb[cti].file]; IF ctb[cti].namedInstance THEN CopyName[[config[cti]], [config[cti]]]; cti _ cti + (CTRecord.SIZE + ctb[cti].nControls*Namee.SIZE); ENDLOOP}; CopyModules: PROC ~ { -- modules are already copied, only map names and files MapOne: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN m~~mtb[mti]; header.nModules _ header.nModules + 1; m.name _ bcd.MapName[m.name]; m.file _ MapFile[m.file]; IF m.namedInstance THEN CopyName[[module[mti]], [module[mti]]]}; EnumerateModules[MapOne]}; EnumerateModules: PROC [p: PROC [MTIndex] RETURNS [BOOL]] ~ { mti: MTIndex _ MTIndex.FIRST; mtLimit: MTIndex ~ table.Top[mttype]; UNTIL mti = mtLimit DO IF p[mti] THEN EXIT; mti _ mti + MTRecord.SIZE; ENDLOOP}; CopyTypes: PROC ~ {}; -- types are already copied, nothing need be done (current typeIds) CopySpaces: PROC ~ { -- spaces are already copied, only map names (and segments?) MapOne: PROC [spi: SPIndex] RETURNS [BOOL _ FALSE] ~ { FOR i: CARDINAL IN [0..spb[spi].length) DO spb[spi].spaces[i].name _ bcd.MapName[spb[spi].spaces[i].name]; ENDLOOP}; EnumerateSpaces[MapOne]}; EnumerateSpaces: PROC [p: PROC [SPIndex] RETURNS [BOOL]] ~ { spi: SPIndex _ SPIndex.FIRST; spLimit: SPIndex ~ table.Top[sptype]; UNTIL spi = spLimit DO IF p[spi] THEN EXIT; spi _ spi + SPRecord.SIZE + spb[spi].length*SpaceID.SIZE; ENDLOOP}; CopyFramePacks: PROC ~ { -- framepacks are already copied, only map names MapOne: PROC [fpi: FPIndex] RETURNS [BOOL _ FALSE] ~ { fpb[fpi].name _ bcd.MapName[fpb[fpi].name]}; EnumerateFramePacks[MapOne]}; EnumerateFramePacks: PROC [p: PROC [FPIndex] RETURNS [BOOL]] ~ { fpi: FPIndex _ FPIndex.FIRST; fpLimit: FPIndex ~ table.Top[fptype]; UNTIL fpi = fpLimit DO IF p[fpi] THEN RETURN; fpi _ fpi + FPRecord.SIZE + fpb[fpi].length*MTIndex.SIZE; ENDLOOP}; CopyImport: Tree.Scan ~ { OPEN Symbols; sti: STIndex _ stNull; olditi, iti: IMPIndex; WITH t SELECT FROM symbol => sti _ index; subtree => sti _ NARROW[tb[index].son[1], Tree.Link.symbol].index; ENDCASE => Error[]; olditi _ stb[sti].impi; IF sti = stNull OR olditi = IMPNull THEN RETURN; iti _ bcd.EnterImport[olditi, TRUE]; itb[iti].file _ MapFile[itb[iti].file]; IF header.firstdummy = 0 THEN header.firstdummy _ itb[iti].gfi; header.nImports _ header.nImports + 1; header.nDummies _ header.nDummies + itb[iti].ngfi}; CopyExport: Tree.Scan ~ { OPEN Symbols; sti: STIndex _ stNull; hti: HTIndex _ htNull; neweti: EXPIndex; oldeti: EXPIndex; WITH t SELECT FROM symbol => sti _ index; subtree => { sti _ NARROW[tb[index].son[1], Tree.Link.symbol].index; hti _ stb[sti].hti}; ENDCASE => Error[]; WITH s~~stb[sti] SELECT FROM external => WITH m~~s.map SELECT FROM interface => { OPEN new~~etb[neweti]; oldeti _ m.expi; neweti _ bcd.EnterExport[oldeti, TRUE]; Copy[from~@bcd.etb[oldeti].links, to~@new.links, nwords~new.size]; new.file _ MapFile[new.file]}; module => [] _ NewExportForModule[m.mti, htNull]; ENDCASE => RETURN; ENDCASE => RETURN; header.nExports _ header.nExports + 1}; NewExportForModule: PROC [mti: MTIndex, name: Symbols.HTIndex] RETURNS [eti: EXPIndex] ~ { OPEN Symbols; eti _ table.Words[exptype, EXPRecord.SIZE+1*Link.SIZE]; etb[eti] _ [ name~mtb[mti].name, size~1, port~$module, namedInstance~name # htNull, typeExported~FALSE, file~mtb[mti].file, links~ ]; etb[eti].links[0] _ [variable[vgfi~mtb[mti].gfi, var~0, vtag~$var]]; IF name # htNull THEN { nti: NTIndex ~ table.Words[nttype, NTRecord.SIZE]; ntb[nti] _ [name~BcdUtilDefs.NameForHti[name], item~[module[mti]]]}; RETURN}; ExportCx: PROC [cx: Symbols.CXIndex] ~ { OPEN Symbols; neweti, oldeti: EXPIndex; FOR sti: STIndex _ cxb[cx].link, stb[sti].link UNTIL sti = stNull DO { IF ~stb[sti].filename THEN WITH s~~stb[sti] SELECT FROM external => WITH m~~s.map SELECT FROM interface => { OPEN old~~bcd.etb[oldeti], new~~etb[neweti]; -- first make sure that old is not already exported existingEti: EXPIndex _ EXPIndex.FIRST; etLimit: EXPIndex ~ table.Top[exptype]; oldeti _ m.expi; UNTIL existingEti = etLimit DO IF old = etb[existingEti] THEN GO TO AlreadyExported; existingEti _ existingEti + EXPRecord.SIZE+etb[existingEti].size; ENDLOOP; neweti _ bcd.EnterExport[oldeti, TRUE]; Copy[from~@old.links, to~@new.links, nwords~new.size]; new.file _ MapFile[new.file]; header.nExports _ header.nExports + 1}; ENDCASE; ENDCASE; EXITS AlreadyExported => NULL}; ENDLOOP}; -- file mapping FileMap: TYPE ~ RECORD [SEQUENCE length: CARDINAL OF FTIndex]; fileMap: LONG POINTER TO FileMap _ NIL; IndexForFti: PROC [fti: FTIndex] RETURNS [CARDINAL] ~ INLINE { RETURN [LOOPHOLE[fti,CARDINAL]/FTRecord.SIZE]}; FtiForIndex: PROC [n: CARDINAL] RETURNS [FTIndex] ~ INLINE { RETURN [FTIndex.FIRST + n*FTRecord.SIZE]}; MapFile: PROC [fti: FTIndex] RETURNS [FTIndex] ~ { SELECT fti FROM FTNull, FTSelf => RETURN [fti]; ENDCASE => { fileIndex: CARDINAL ~ IndexForFti[fti]; IF fileMap[fileIndex] = FTNull THEN fileMap[fileIndex] _ bcd.MergeFile[fti]; RETURN [fileMap[fileIndex]]}}; -- header processing InitHeader: PROC [ header: BcdOps.BcdBase, objectVersion: VersionStamp, source: NameRecord _ NullName, sourceVersion: VersionStamp _ NullVersion] ~ { Zero[header, BcdDefs.BCD.SIZE]; header.versionIdent _ BcdDefs.VersionID; header.version _ objectVersion; header.creator _ data.binderVersion; header.definitions _ (data.op = $conc); header.typeExported _ data.typeExported; header.source _ source; header.sourceVersion _ sourceVersion; header.repackaged _ table.Bounds[sptype].size # 0 OR table.Bounds[fptype].size # 0; header.tableCompiled _ FALSE; header.spare1 _ TRUE}; codeMap, symbolMap: LONG POINTER TO Map _ NIL; Map: TYPE ~ RECORD [ fti: FTIndex, type: SegClass, filename: Strings.String, filehandle: File.Capability]; InitCodeSymbolCopy: PROC ~ { Setup: PROC [file: Strings.String, type: SegClass] RETURNS [LONG POINTER TO Map] ~ { RETURN [(data.zone).NEW[Map _ [ type~type, filename~file, filehandle~nullFile, fti~IF file = NIL THEN FTSelf ELSE BcdUtilDefs.EnterFile[file]]]]}; IF data.copyCode THEN codeMap _ Setup[data.codeName, $code]; IF data.copySymbols THEN symbolMap _ Setup[data.symbolName, $symbols]}; MapCodeSymbolFiles: PROC ~ { IF data.copyCode THEN codeMap.fti _ MapFile[codeMap.fti]; IF data.copySymbols THEN symbolMap.fti _ MapFile[symbolMap.fti]}; ReleaseCodeSymbolCopy: PROC ~ { Release: PROC [p: LONG POINTER TO Map] ~ { p.filehandle _ nullFile}; IF codeMap # NIL THEN {Release[codeMap]; (data.zone).FREE[@codeMap]}; IF symbolMap # NIL THEN {Release[symbolMap]; (data.zone).FREE[@symbolMap]}}; EnumerateSegments: PROC [proc: PROC[SGIndex]] ~ { sgLimit: SGIndex ~ table.Top[sgtype]; FOR sgi: SGIndex _ SGIndex.FIRST, sgi + SGRecord.SIZE UNTIL sgi = sgLimit DO proc[sgi] ENDLOOP}; EnumerateOldSegments: PROC [proc: PROC[SGIndex]] ~ { IF ~packing THEN EnumerateSegments[proc] ELSE FOR i: NAT IN [0..sgMap.length) DO proc[SgiForIndex[i]] ENDLOOP}; MapSegments: PROC [type: SegClass] ~ { CopySegment: PROC [sgi: SGIndex] ~ { IF sgb[sgi].class = type THEN sgb[sgi].file _ MapFile[sgb[sgi].file]}; EnumerateSegments[CopySegment]}; InitFile: PROC [p: LONG POINTER TO Map, copiedPages: CARDINAL] RETURNS [stream: Stream.Handle, page: CARDINAL] ~ { lh: BcdDefs.BCD; bcdPages: CARDINAL ~ PagesForWords[BcdDefs.BCD.SIZE]; version: VersionStamp ~ BumpVersion[ OSMiscOps.StampToTime[data.objectStamp], (IF p.type=$code THEN 1 ELSE 2)]; BcdUtilDefs.SetFileVersion[p.fti, version]; p.filehandle _ OSMiscOps.FindFile[p.filename, $write]; (p.filehandle).SetSize[1 + bcdPages + copiedPages]; stream _ FileStream.Create[p.filehandle]; InitHeader[header~@lh, objectVersion~version]; lh.version _ ftb[p.fti].version; stream.PutBlock[[@lh, 0, BcdDefs.BCD.SIZE*bytesPerWord]]; page _ 1 + bcdPages}; BumpVersion: PROC [v: VersionStamp, n: CARDINAL] RETURNS [VersionStamp] ~ { v.time _ v.time + n; RETURN [v]}; MoveToPageBoundary: PROC [stream: Stream.Handle, page: CARDINAL] ~ { FileStream.SetIndex[stream, Inline.LongMult[page, Environment.bytesPerPage]]}; -- Code Packing PackHandle: TYPE ~ LONG POINTER TO PackItem; PackItem: TYPE ~ RECORD [ link: PackHandle, newsgi: SGIndex, -- in the new table item: SEQUENCE count: CARDINAL OF MTIndex]; packing: BOOL; phHead, phTail: PackHandle _ NIL; MakePackItem: Tree.Scan ~ { -- t is Tree.null, a list of ids, or a list of lists of ids ph: PackHandle _ NIL; i, nsons: CARDINAL; Item: Tree.Scan ~ { itemId: Symbols.HTIndex; WITH t SELECT FROM symbol => { itemId _ stb[index].hti; WITH stb[index] SELECT FROM external => WITH m~~map SELECT FROM module => { ph.item[i] _ m.mti; SELECT TRUE FROM ~mtb[m.mti].packageable => BcdErrorDefs.ErrorNameBase[ $error, "is packaged and cannot be PACKed"L, mtb[m.mti].name, bcd.ssb]; (ReadSgMap[mtb[m.mti].code.sgi] # SGNull) => BcdErrorDefs.ErrorNameBase[ $error, "cannot be PACKed twice"L, mtb[m.mti].name, bcd.ssb]; ENDCASE => SetSgMap[old~mtb[m.mti].code.sgi, new~ph.newsgi]}; ENDCASE => GOTO cant; ENDCASE => GOTO cant; EXITS cant => BcdErrorDefs.ErrorHti[$error, "cannot be PACKed"L, itemId ! BcdErrorDefs.GetSti => {RESUME [errorSti~Symbols.stNull]}]}; ENDCASE; i _ i+1}; IF t = Tree.null THEN RETURN; WITH t SELECT FROM subtree => { OPEN tt~~tb[index]; IF tt.name # $list THEN Error[]; data.textIndex _ tt.info; IF tt.son[1].tag = $subtree THEN {TreeOps.ScanList[t,MakePackItem]; RETURN}}; ENDCASE; nsons _ TreeOps.ListLength[t]; ph _ (data.zone).NEW[PackItem[nsons] _ [ link~NIL, newsgi~table.Words[sgtype, SGRecord.SIZE], item~]]; FOR j: CARDINAL IN [0..nsons) DO ph.item[j] _ MTNull ENDLOOP; sgb[ph.newsgi] _ [class~$code, file~codeMap.fti, base~0, pages~0, extraPages~0]; i _ 0; TreeOps.ScanList[t, Item]; IF phTail = NIL THEN phHead _ phTail _ ph ELSE {phTail.link _ ph; phTail _ ph}}; FreePackItems: PROC ~ { next: PackHandle _ phHead; UNTIL next = NIL DO p: PackHandle _ next; next _ next.link; (data.zone).FREE[@p] ENDLOOP; phHead _ phTail _ NIL}; WriteFromPages: PROC [ stream: Stream.Handle, pages: FileSegment.Pages, words: CARDINAL] ~ { bufferPages: CARDINAL ~ MIN[pages.span.pages, 16]; bufferSpace: Space.Handle _ Space.Create[size~bufferPages, parent~Space.virtualMemory]; base: CARDINAL _ pages.span.base; WHILE words # 0 DO wordsToTransfer: CARDINAL ~ MIN[words, bufferPages*Environment.wordsPerPage]; pagesToTransfer: CARDINAL ~ PagesForWords[wordsToTransfer]; bufferSpace.Map[[file~pages.file, base~base]]; stream.PutBlock[[bufferSpace.LongPointer, 0, wordsToTransfer*bytesPerWord]]; base _ (base + pagesToTransfer); words _ (words - wordsToTransfer); bufferSpace.Unmap[]; ENDLOOP; Space.Delete[bufferSpace]}; PackCodeSegments: PROC [out: Stream.Handle, startpage: CARDINAL] RETURNS [nextpage: CARDINAL] ~ { offset, validlength: CARDINAL; oldsgi: SGIndex; file: File.Capability; pages: FileSegment.Pages; FixUpOneModule: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN module~~mtb[mti]; length: CARDINAL; IF module.code.sgi = oldsgi THEN { length _ module.code.offset + module.code.length/2; module.code.offset _ module.code.offset + offset; module.code.packed _ TRUE; IF length > validlength THEN validlength _ length}; RETURN}; nextpage _ startpage; FOR ph: PackHandle _ phHead, ph.link UNTIL ph = NIL DO MoveToPageBoundary[stream~out, page~(nextpage-1)]; offset _ 0; sgb[ph.newsgi].base _ nextpage; FOR pi: CARDINAL IN [0..ph.count) DO { mti: MTIndex ~ ph.item[pi]; IF mtb[mti].linkLoc = $code AND ~mtb[mti].code.linkspace THEN { offset _ (AddLinksToCodeSegment[out, mti, offset, TRUE] + offset); GOTO ignore}; oldsgi _ mtb[mti].code.sgi; [file, pages] _ SegmentForOldCodeSgi[oldsgi]; IF file = nullFile THEN GOTO ignore; IF (offset MOD Alignment) # 0 THEN FOR i: CARDINAL IN [(offset MOD Alignment)..Alignment) DO out.PutWord[0]; offset _ offset + 1 ENDLOOP; validlength _ 0; EnumerateModules[FixUpOneModule]; WriteFromPages[stream~out, pages~pages, words~validlength]; offset _ offset + validlength; EXITS ignore => NULL}; ENDLOOP; sgb[ph.newsgi].pages _ PagesForWords[offset]; nextpage _ nextpage + sgb[ph.newsgi].pages; ENDLOOP; RETURN}; SegmentForOldCodeSgi: PROC [sgi: SGIndex] RETURNS [f: File.Capability _ nullFile, s: FileSegment.Pages _ nullPages] ~ { OPEN seg~~bcd.sgb[sgi]; IF Copied[sgi] OR seg.file = FTNull THEN RETURN; f _ FileForFti[seg.file]; IF f = nullFile THEN BcdErrorDefs.ErrorNameBase[ class~$error, s~"could not be opened to copy code"L, name~bcd.ftb[seg.file].name, base~bcd.ssb] ELSE { s _ [ file~f, span~[base~seg.base, pages~(seg.pages + seg.extraPages)]]; IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN { BcdErrorDefs.ErrorNameBase[ class~$error, s~"on disk has an incorrect version"L, name~bcd.ftb[seg.file].name, base~bcd.ssb]; f _ nullFile; s _ nullPages}}; IF s.file = File.nullCapability THEN header.versionIdent _ 0; SetCopied[sgi]}; WrongOldSegVersion: PROC [s: FileSegment.Pages, version: BcdDefs.VersionStamp] RETURNS [reply: BOOL] ~ { h: BcdOps.BcdBase; headerSpace: Space.Handle _ Space.Create[size~1, parent~Space.virtualMemory]; headerSpace.Map[[file~s.file, base~1]]; h _ headerSpace.LongPointer; reply _ (h.version # version); Space.Delete[headerSpace]; RETURN [reply]}; -- Segment Mapping SGMap: TYPE ~ RECORD [SEQUENCE length: CARDINAL OF SGIndex]; CopyMap: TYPE ~ RECORD [SEQUENCE length: CARDINAL OF BOOL]; sgMap: LONG POINTER TO SGMap _ NIL; copyMap: LONG POINTER TO CopyMap _ NIL; IndexForSgi: PROC [sgi: SGIndex] RETURNS [CARDINAL] ~ INLINE { RETURN [LOOPHOLE[sgi,CARDINAL]/SGRecord.SIZE]}; SgiForIndex: PROC [i: CARDINAL] RETURNS [SGIndex] ~ INLINE { RETURN [SGIndex.FIRST + i*SGRecord.SIZE]}; InitCopyMap: PROC [nsgis: CARDINAL] ~ { copyMap _ (data.zone).NEW[CopyMap[nsgis]]; FOR i: CARDINAL IN [0..nsgis) DO copyMap[i] _ FALSE ENDLOOP}; FreeCopyMap: PROC ~ { IF copyMap # NIL THEN (data.zone).FREE[@copyMap]}; SetCopied: PROC [sgi: SGIndex] ~ { copyMap[IndexForSgi[sgi]] _ TRUE}; Copied: PROC [sgi: SGIndex] RETURNS [BOOL] ~ { RETURN [copyMap[IndexForSgi[sgi]]]}; InitSgMap: PROC [nsgis: CARDINAL] ~ { sgMap _ (data.zone).NEW[SGMap[nsgis]]; FOR i: CARDINAL IN [0..nsgis) DO sgMap[i] _ BcdDefs.SGNull ENDLOOP}; FreeSgMap: PROC ~ {IF sgMap # NIL THEN (data.zone).FREE[@sgMap]}; SetSgMap: PROC [old, new: SGIndex] ~ { IF (packing AND old # SGNull) THEN sgMap[IndexForSgi[old]] _ new}; ReadSgMap: PROC [old: SGIndex] RETURNS [SGIndex] ~ { RETURN [IF (~packing OR old = SGNull) THEN old ELSE sgMap[IndexForSgi[old]]]}; FillInSgMap: PROC ~ { -- called only when packing (i.e. packing requested AND copyCode = TRUE) FOR i: CARDINAL IN [0..sgMap.length) DO IF sgMap[i] = SGNull THEN { oldsgi: SGIndex ~ SgiForIndex[i]; newsgi: SGIndex ~ table.Words[sgtype, SGRecord.SIZE]; sgb[newsgi] _ bcd.sgb[oldsgi]; sgb[newsgi].file _ (IF sgb[newsgi].class = $symbols THEN (IF data.copySymbols THEN symbolMap.fti ELSE MapFile[sgb[newsgi].file]) ELSE codeMap.fti); sgMap[i] _ newsgi}; ENDLOOP}; FixAllSgis: PROC ~ { -- replace all sgis with ReadSgMap[sgi] FixModule: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN m~~mtb[mti]; m.code.sgi _ ReadSgMap[m.code.sgi]; m.sseg _ ReadSgMap[m.sseg]}; FixSpace: PROC [spi: SPIndex] RETURNS [BOOL _ FALSE] ~ { OPEN sp~~spb[spi]; sp.seg _ ReadSgMap[sp.seg]}; EnumerateModules[FixModule]; EnumerateSpaces[FixSpace]}; -- Code Links LinkCount: PROC [mti: MTIndex] RETURNS [CARDINAL] ~ INLINE { RETURN [IF mtb[mti].links = LFNull THEN 0 ELSE lfb[mtb[mti].links].length]}; AlignOffset: PROC [offset: CARDINAL] RETURNS [CARDINAL] ~ INLINE { RETURN [((offset + (Alignment-1))/Alignment)*Alignment]}; AddLinksToCodeSegment: PROC [ stream: Stream.Handle, mti: MTIndex, offset: CARDINAL, packed: BOOL] RETURNS [CARDINAL] ~ { sgi: SGIndex ~ mtb[mti].code.sgi; codeLength: CARDINAL ~ mtb[mti].code.length/2; linkSpace: CARDINAL; f: File.Capability; s: FileSegment.Pages; prefixWords: CARDINAL _ 0; FixOffset: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN c~~mtb[mti].code; IF c.sgi = sgi THEN { c.linkspace _ TRUE; c.offset _ c.offset+offset; c.packed _ packed}}; [f, s] _ SegmentForOldCodeSgi[sgi]; IF f = nullFile THEN RETURN [0]; linkSpace _ LinkCount[mti]; IF offset = 0 AND linkSpace # 0 THEN { prefixWords _ 1; stream.PutWord[linkSpace + Alignment - (linkSpace MOD Alignment)]; offset _ offset+1}; IF (offset+linkSpace) MOD Alignment # 0 THEN linkSpace _ linkSpace + Alignment - ((offset+linkSpace) MOD Alignment); offset _ offset + linkSpace; EnumerateModules[FixOffset]; FOR i: CARDINAL IN [0..linkSpace) DO stream.PutWord[0] ENDLOOP; WriteFromPages[stream~stream, pages~s, words~codeLength]; RETURN [prefixWords + linkSpace + codeLength]}; -- code and symbol copying EstimateCopiedPages: PROC RETURNS [codePages, symbolPages: CARDINAL _ 0] ~ { -- estimates ignore possible packing of code packaged: BOOL _ FALSE; AddModule: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN m~~mtb[mti]; IF data.copyCode AND m.code.sgi # SGNull THEN { OPEN seg~~bcd.sgb[m.code.sgi]; IF ~m.packageable THEN packaged _ TRUE ELSE { IF m.linkLoc = $code AND ~m.code.linkspace THEN { nLinks: CARDINAL ~ LinkCount[mti]; offset: CARDINAL ~ AlignOffset[IF nLinks=0 THEN 0 ELSE 1+nLinks]; codePages _ codePages + PagesForWords[offset + m.code.length/2]} ELSE codePages _ codePages + seg.pages; codePages _ codePages + seg.extraPages}}; IF data.copySymbols AND m.sseg # SGNull THEN { OPEN seg~~bcd.sgb[m.sseg]; symbolPages _ symbolPages + seg.pages + seg.extraPages}; RETURN}; AddSegment: PROC [oldSgi: SGIndex] ~ { OPEN seg~~bcd.sgb[oldSgi]; IF seg.class = $code THEN { package: BOOL _ FALSE; TestModule: PROC [mti: MTIndex] RETURNS [BOOL] ~ { OPEN m~~mtb[mti]; IF ~m.packageable AND m.code.sgi = oldSgi THEN package _ TRUE; RETURN [m.code.sgi = oldSgi]}; EnumerateModules[TestModule]; IF package THEN codePages _ codePages + seg.pages + seg.extraPages}}; IF data.copyCode OR data.copySymbols THEN EnumerateModules[AddModule]; IF data.copyCode AND packaged THEN EnumerateOldSegments[AddSegment]; RETURN}; MoveCodeSegments: PROC [copiedPages: CARDINAL] ~ { stream: Stream.Handle; nextPage: CARDINAL; AddLinks: PROC [mti: MTIndex] RETURNS [BOOL _ FALSE] ~ { OPEN m~~mtb[mti]; wordsWritten, pagesWritten: CARDINAL; newSgi: SGIndex; -- IF ExecOps.CheckForAbort[] THEN ERROR UserAbort; IF m.linkLoc = $code AND ~m.code.linkspace AND m.packageable THEN { IF m.code.packed THEN BcdErrorDefs.ErrorName[ $error, "was previously PACKed and can not now have code links added"L, m.name] ELSE { MoveToPageBoundary[stream~stream, page~(nextPage-1)]; wordsWritten _ AddLinksToCodeSegment[ stream~stream, mti~mti, offset~0, packed~FALSE]; pagesWritten _ PagesForWords[wordsWritten]; newSgi _ ReadSgMap[m.code.sgi]; sgb[newSgi].file _ codeMap.fti; sgb[newSgi].base _ nextPage; sgb[newSgi].pages _ pagesWritten; nextPage _ nextPage + pagesWritten}}}; MoveOne: PROC [oldSgi: SGIndex] ~ { OPEN seg~~bcd.sgb[oldSgi]; -- IF ExecOps.CheckForAbort[] THEN ERROR UserAbort; IF seg.class = $code THEN { f: File.Capability; s: FileSegment.Pages; [f, s] _ SegmentForOldCodeSgi[oldSgi]; IF f # nullFile THEN { segPages: CARDINAL ~ s.span.pages; newSgi: SGIndex ~ ReadSgMap[oldSgi]; sgb[newSgi].file _ codeMap.fti; sgb[newSgi].base _ nextPage; MoveToPageBoundary[stream~stream, page~(nextPage-1)]; WriteFromPages[ stream~stream, pages~s, words~(segPages * Environment.wordsPerPage)]; nextPage _ nextPage + segPages}}}; IF codeMap.fti = FTSelf THEN {stream _ bcdStream; nextPage _ nextBcdPage} ELSE [stream, nextPage] _ InitFile[codeMap, copiedPages]; nextPage _ PackCodeSegments[stream, nextPage]; EnumerateModules[AddLinks]; EnumerateOldSegments[MoveOne]; IF codeMap.fti = FTSelf THEN nextBcdPage _ nextPage ELSE Stream.Delete[stream]}; EnterMissingSymbolFiles: PROC ~ { CheckOneSymbolsFileSeg: PROC [oldSgi: SGIndex] ~ { OPEN seg~~bcd.sgb[oldSgi]; f: File.Capability _ File.nullCapability; IF (seg.class = $symbols) AND ~Copied[oldSgi] AND (seg.file # FTNull) THEN -- insure that a file entry exists for this file [] _ MapFile[bcd.sgb[oldSgi].file]}; EnumerateOldSegments[CheckOneSymbolsFileSeg]}; MoveSymbolSegments: PROC [copiedPages: CARDINAL] ~ { stream: Stream.Handle; nextPage: CARDINAL; MoveOne: PROC [oldSgi: SGIndex] ~ { OPEN seg~~bcd.sgb[oldSgi]; f: File.Capability _ nullFile; newSgi: SGIndex; -- IF ExecOps.CheckForAbort[] THEN ERROR UserAbort; IF (seg.class # $symbols) OR Copied[oldSgi] OR (seg.file = FTNull) THEN RETURN; newSgi _ ReadSgMap[oldSgi]; f _ FileForFti[seg.file]; IF f = nullFile THEN { BcdErrorDefs.ErrorNameBase[ class~$warning, s~"could not be opened to copy symbols"L, name~bcd.ftb[seg.file].name, base~bcd.ssb]; sgb[newSgi] _ bcd.sgb[oldSgi]; sgb[newSgi].file _ MapFile[bcd.sgb[oldSgi].file]} ELSE { s: FileSegment.Pages ~ [ file~f, span~[base~seg.base, pages~(seg.pages + seg.extraPages)]]; IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN { BcdErrorDefs.ErrorNameBase[ class~$error, s~"on disk has incorrect version"L, name~bcd.ftb[seg.file].name, base~bcd.ssb]; header.versionIdent _ 0} ELSE { segPages: CARDINAL ~ s.span.pages; sgb[newSgi].file _ symbolMap.fti; sgb[newSgi].base _ nextPage; MoveToPageBoundary[stream~stream, page~(nextPage-1)]; WriteFromPages[ stream~stream, pages~s, words~(segPages * Environment.wordsPerPage)]; nextPage _ nextPage + segPages}}; SetCopied[oldSgi]}; IF symbolMap.fti = FTSelf THEN {stream _ bcdStream; nextPage _ nextBcdPage} ELSE [stream, nextPage] _ InitFile[symbolMap, copiedPages]; EnumerateOldSegments[MoveOne]; IF symbolMap.fti = FTSelf THEN nextBcdPage _ nextPage ELSE Stream.Delete[stream]}; FileForFti: PROC [oldFti: BcdDefs.FTIndex] RETURNS [f: File.Capability] ~ { name: BcdDefs.NameRecord ~ bcd.ftb[oldFti].name; ssd: Strings.SubStringDescriptor _ [ base~@bcd.ssb.string, offset~name, length~bcd.ssb.size[name]]; nameStr: Strings.String _ [100]; f _ nullFile; NormalizeFileName[in~@ssd, out~nameStr]; f _ OSMiscOps.FindFile[nameStr, $read ! OSMiscOps.FileError => {CONTINUE}]; RETURN}; NormalizeFileName: PROC [in: Strings.SubString, out: Strings.String] ~ { char: CHAR; dot: BOOL _ FALSE; out.length _ 0; FOR i: CARDINAL IN [in.offset .. in.offset+in.length) DO SELECT (char _ in.base[i]) FROM IN ['A..'Z] => char _ VAL[(char.ORD-'A.ORD) + 'a.ORD]; '. => dot _ TRUE; ENDCASE; Strings.AppendChar[out, char]; ENDLOOP; IF ~dot THEN Strings.AppendString[out, ".bcd"L]}; -- Bcd Output Routines bcdStream: Stream.Handle; nextBcdPage: CARDINAL; WriteSubTable: PROC [selector: Table.Selector] ~ { base: Table.Base; size: CARDINAL; [base, size] _ table.Bounds[selector]; bcdStream.PutBlock[[base, 0, size*bytesPerWord]]}; TableOut: PROC ~ { d, s: CARDINAL; bcdPages, codePages, symbolPages: CARDINAL; basePages: CARDINAL; rtPageCount: CARDINAL; saveNextPage: CARDINAL; saveIndex: FileStream.FileByteIndex; rtPageCount _ PagesForWords[BcdLiterals.LitSegSize[]]; BEGIN OPEN header; IF firstdummy = 0 THEN firstdummy _ BcdUtilDefs.GetGfi[0]; d _ BCD.SIZE; ssOffset _ d; d _ d + (ssLimit _ table.Bounds[sstype].size); ctOffset _ d; d _ d + (s _ table.Bounds[cttype].size); ctLimit _ LOOPHOLE[s]; mtOffset _ d; d _ d + (s _ table.Bounds[mttype].size); mtLimit _ LOOPHOLE[s]; impOffset _ d; d _ d + (s _ table.Bounds[imptype].size); impLimit _ LOOPHOLE[s]; expOffset _ d; d _ d + (s _ table.Bounds[exptype].size); expLimit _ LOOPHOLE[s]; evOffset _ d; d _ d + (s _ table.Bounds[evtype].size); evLimit _ LOOPHOLE[s, EVIndex]; sgOffset _ d; d _ d + (s _ table.Bounds[sgtype].size); sgLimit _ LOOPHOLE[s]; ftOffset _ d; d _ d + (s _ table.Bounds[fttype].size); ftLimit _ LOOPHOLE[s]; ntOffset _ d; d _ d + (s _ table.Bounds[nttype].size); ntLimit _ LOOPHOLE[s]; typOffset _ d; d _ d + (s _ table.Bounds[typtype].size); typLimit _ LOOPHOLE[s]; tmOffset _ d; d _ d + (s _ table.Bounds[tmtype].size); tmLimit _ LOOPHOLE[s]; spOffset _ d; d _ d + (s _ table.Bounds[sptype].size); spLimit _ LOOPHOLE[s]; fpOffset _ d; d _ d + (s _ table.Bounds[fptype].size); fpLimit _ LOOPHOLE[s]; lfOffset _ d; d _ d + (s _ table.Bounds[lftype].size); lfLimit _ LOOPHOLE[s]; rfOffset _ d; d _ d + (s _ table.Bounds[rftype].size); rfLimit _ LOOPHOLE[s]; tfOffset _ d; d _ d + (s _ table.Bounds[tftype].size); tfLimit _ LOOPHOLE[s]; basePages _ PagesForWords[d]; rtPages _ [relPageBase~basePages, pages~rtPageCount]; nPages _ bcdPages _ basePages + rtPageCount; END; [codePages, symbolPages] _ EstimateCopiedPages[]; IF data.copyCode AND codeMap.fti = FTSelf THEN bcdPages _ bcdPages + codePages; IF data.copySymbols AND symbolMap.fti = FTSelf THEN bcdPages _ bcdPages + symbolPages; OpenOutputFile[1 + bcdPages]; IF rtPageCount # 0 THEN { MoveToPageBoundary[stream~bcdStream, page~basePages]; IF packing THEN BcdLiterals.UpdateSegments[ReadSgMap]; BcdLiterals.WriteLiterals[bcdStream]; saveIndex _ FileStream.GetIndex[bcdStream]}; saveNextPage _ nextBcdPage _ header.nPages + 1; IF data.copyCode THEN MoveCodeSegments[codePages ! UserAbort => {GO TO AbortRequested}]; IF data.copySymbols THEN MoveSymbolSegments[symbolPages ! UserAbort => {GO TO AbortRequested}]; IF packing THEN FixAllSgis[]; FileStream.SetIndex[bcdStream, 0]; bcdStream.PutBlock[[header, 0, BCD.SIZE*bytesPerWord]]; WriteSubTable[sstype]; WriteSubTable[cttype]; WriteSubTable[mttype]; WriteSubTable[imptype]; WriteSubTable[exptype]; WriteSubTable[evtype]; WriteSubTable[sgtype]; WriteSubTable[fttype]; WriteSubTable[nttype]; WriteSubTable[typtype]; WriteSubTable[tmtype]; WriteSubTable[sptype]; WriteSubTable[fptype]; WriteSubTable[lftype]; WriteSubTable[rftype]; WriteSubTable[tftype]; IF nextBcdPage # saveNextPage THEN MoveToPageBoundary[stream~bcdStream, page~(nextBcdPage-1)] ELSE IF rtPageCount # 0 THEN FileStream.SetIndex[bcdStream, saveIndex]; data.nConfigs _ header.nConfigs; data.nModules _ header.nModules; data.nImports _ header.nImports; data.nExports _ header.nExports; data.nPages _ header.nPages; EXITS AbortRequested => data.aborted _ TRUE}; OpenOutputFile: PROC [initialPages: CARDINAL] ~ INLINE { file: File.Capability _ data.outputFile; IF file = File.nullCapability THEN file _ data.outputFile _ OSMiscOps.FindFile[data.bcdName, $write]; file.SetSize[initialPages]; bcdStream _ FileStream.Create[file]}; CloseOutputFile: PROC ~ INLINE {Stream.Delete[bcdStream]; bcdStream _ NIL}; }.