DIRECTORY
Alloc: TYPE USING [AddNotify, Bounds, DropNotify, Handle, Notifier, Top, Trim, Words],
Ascii: TYPE USING [Lower],
Basics: TYPE USING [bytesPerWord, LongMult, RawBytes],
BcdComData: TYPE USING [aborted, binderVersion, codeName, copyCode, copySymbols, literals, nConfigs, nExports, nImports, nModules, nPages, objectStamp, op, outputFile, sourceName, sourceVersion, symbolName, table, typeExported, textIndex],
BcdControlDefs: TYPE USING [],
BcdDefs: TYPE USING [BCD, BcdBase, ControlItem, CTIndex, CTRecord, cttype, cxtype, EVIndex, evtype, EXPIndex, EXPRecord, exptype, FPIndex, FPRecord, fptype, FTIndex, FTNull, FTRecord, FTSelf, fttype, IMPIndex, IMPNull, imptype, LFNull, lftype, Link, MTIndex, MTNull, MTRecord, mttype, Namee, NameRecord, NameString, NTIndex, NTRecord, nttype, NullName, NullVersion, PackedString, PageSize, rftype, SegClass, SGIndex, SGNull, SGRecord, sgtype, SpaceID, SPIndex, SPRecord, sptype, sstype, sttype, tftype, tmtype, treetype, typtype, VersionID, VersionStamp],
BcdErrorDefs: TYPE USING [ErrorHti, ErrorName, ErrorNameBase, GetSti],
BcdLiterals: TYPE USING [EnterVersionFiles, SegmentSize, UpdateSegments, Write],
BcdUtilDefs: TYPE USING [BcdBases, ContextForTree, EnterExport, EnterFile, EnterImport, EnterName, GetGfi, MapName, MergeFile, NameForHti, SetFileVersion, BcdBasePtr],
ConvertUnsafe: TYPE USING [SubString],
FileSegment: TYPE USING [Pages, nullPages],
FS: TYPE USING [Close, Error, nullOpenFile, Open, OpenFile, Read, SetPageCount, StreamFromOpenFile],
HashOps: TYPE USING [Reset],
IO: TYPE USING [Close, GetIndex, GetLength, SetIndex, SetLength, STREAM, UnsafePutBlock],
OSMiscOps: TYPE USING [StampToTime],
PrincOpsUtils: TYPE USING [LongCopy],
Process: TYPE USING [CheckForAbort],
Rope: TYPE USING [Concat, Flatten, FromProc, Length, ROPE],
Symbols: TYPE USING [CXIndex, HTIndex, htNull, STIndex, stNull],
Table: TYPE USING [Base, Selector],
Tree: TYPE USING [Index, Link, Scan, null],
TreeOps: TYPE USING [GetNode, ListLength, ScanList],
VM: TYPE USING [bytesPerPage, wordsPerPage, AddressForPageNumber, Allocate, Free, Interval];
BcdWrite:
PROGRAM
IMPORTS Alloc, Ascii, Basics, BcdErrorDefs, BcdLiterals, BcdUtilDefs, FS, IO, HashOps, OSMiscOps, PrincOpsUtils, Process, Rope, TreeOps, VM, data: BcdComData
EXPORTS BcdControlDefs = {
OPEN BcdDefs;
pageSizeCheck:
BOOL[TRUE..(
VM.wordsPerPage=BcdDefs.PageSize)] =
TRUE;
a basic assumption of this module is equality of these two page sizes
bytesPerWord: CARDINAL = Basics.bytesPerWord;
nullFile: FS.OpenFile = FS.nullOpenFile;
nullPages: FileSegment.Pages = FileSegment.nullPages;
Alignment: CARDINAL = 4; -- Code Segments must start at 0 MOD Alignment
BcdWriteError: PUBLIC ERROR = CODE;
Error: PROC = {ERROR BcdWriteError};
UserAbort: ERROR = CODE; -- raised on ^DEL during code or symbol copying
table: Alloc.Handle;
tb, stb, ctb, mtb, lfb, etb, itb, sgb, tyb, tmb, ftb, ntb, spb, fpb, cxb: Table.Base;
ssb: BcdDefs.NameString;
Notifier: Alloc.Notifier = {
tb ← base[treetype]; stb ← base[sttype];
ctb ← base[cttype]; mtb ← base[mttype]; lfb ← base[lftype];
tyb ← base[typtype]; tmb ← base[tmtype];
etb ← base[exptype]; itb ← base[imptype];
sgb ← base[sgtype]; ftb ← base[fttype];
spb ← base[sptype]; fpb ← base[fptype];
ntb ← base[nttype]; ssb ← base[sstype];
cxb ← base[cxtype];
IF bcd #
NIL
THEN {
bcd.ctb ← ctb; bcd.mtb ← mtb;
IF ~packing THEN bcd.sgb ← sgb;
bcd.tyb ← tyb; bcd.tmb ← tmb; bcd.spb ← spb; bcd.fpb ← fpb}
Copy:
PROC[from:
LONG
POINTER, nwords:
CARDINAL, to:
LONG
POINTER] =
PrincOpsUtils.LongCopy;
Zero:
PROC[p:
LONG
POINTER, l:
CARDINAL] =
INLINE {
IF l # 0 THEN {p^ ← 0; PrincOpsUtils.LongCopy[from: p, to: (p+1), nwords: (l-1)]}};
PagesForWords:
PROC[nWords:
CARDINAL]
RETURNS[
CARDINAL] =
INLINE {
RETURN[(nWords + VM.wordsPerPage-1)/VM.wordsPerPage]};
PutWord:
PROC[stream:
IO.
STREAM, word:
WORD] = {
stream.UnsafePutBlock[[
LOOPHOLE[(@word).LONG, LONG POINTER TO Basics.RawBytes],
0,
WORD.SIZE*Basics.bytesPerWord]];
};
bcd: BcdUtilDefs.BcdBasePtr ← NIL;
header: BcdDefs.BcdBase; -- points to Bcd header and saved tables
headerInterval: VM.Interval;
WriteBcd:
PUBLIC
PROC[root: Tree.Link] = {
saveIndex: CARDINAL = data.textIndex;
node, subNode: Tree.Index;
table ← data.table; table.AddNotify[Notifier];
node ← TreeOps.GetNode[root];
packing ← (tb[node].son[2] # Tree.null AND data.copyCode);
Initialize[];
IF packing
THEN {
MakePackItem[tb[node].son[2]];
data.textIndex ← saveIndex;
FillInSgMap[]};
CopyConfigs[];
CopyModules[];
CopyTypes[];
CopySpaces[]; CopyFramePacks[];
subNode ← TreeOps.GetNode[tb[node].son[3]];
data.textIndex ← tb[subNode].info;
TreeOps.ScanList[tb[subNode].son[1], CopyImport];
TreeOps.ScanList[tb[subNode].son[2], CopyExport];
IF tb[subNode].attrs[$exportsALL]
THEN
ExportCx[BcdUtilDefs.ContextForTree[tb[subNode].son[4]]];
IF data.copySymbols THEN EnterMissingSymbolFiles[];
(data.literals).EnterVersionFiles[bcd.ftb, FtiForIndex[fileMap.length], MapFile];
TableOut[];
CloseOutputFile[];
Finalize[]; data.textIndex ← saveIndex;
table.DropNotify[Notifier]; table ← NIL};
Initialize:
PROC = {
impSize, expSize, sgSize, fSize, nSize, ssSize: CARDINAL;
nSgis: CARDINAL;
b: Table.Base;
desc: ConvertUnsafe.SubString;
desc.base ← LOOPHOLE[(data.sourceName).Flatten[]];
desc.offset ← 0;
desc.length ← data.sourceName.Length[];
IF data.copyCode OR data.copySymbols THEN InitCodeSymbolCopy[];
impSize ← table.Bounds[imptype].size;
expSize ← table.Bounds[exptype].size;
sgSize ← table.Bounds[sgtype].size;
nSgis ← sgSize/SGRecord.SIZE;
IF ~packing THEN sgSize ← 0;
fSize ← table.Bounds[fttype].size;
nSize ← table.Bounds[nttype].size;
ssSize ← table.Bounds[sstype].size;
bcd ← NEW[BcdUtilDefs.BcdBases];
fileMap ← NEW[FileMap[fSize/FTRecord.SIZE]];
FOR i: CARDINAL IN [0..fileMap.length) DO fileMap[i] ← FTNull ENDLOOP;
headerInterval ←
VM.Allocate[PagesForWords[
BCD.SIZE + impSize + expSize + sgSize + fSize + nSize + ssSize]];
header ← VM.AddressForPageNumber[headerInterval.page];
b ← (LOOPHOLE[header, Table.Base] + BCD.SIZE);
Copy[to: (bcd.etb ← b), from: etb, nwords: expSize];
b ← b + expSize; table.Trim[exptype,0];
Copy[to: (bcd.itb ← b), from: itb, nwords: impSize];
b ← b + impSize; table.Trim[imptype,0];
Copy[to: (bcd.ftb ← b), from: ftb, nwords: fSize];
b ← b + fSize; table.Trim[fttype,0];
Copy[to: (bcd.ntb ← b), from: ntb, nwords: nSize];
b ← b + nSize; table.Trim[nttype,0];
Copy[to: (bcd.ssb ← b), from: ssb, nwords: ssSize];
b ← b + ssSize;
HashOps.Reset[];
IF packing
THEN {
-- save old segment table in heap
Copy[to: (bcd.sgb ← b), from: sgb, nwords: sgSize];
b ← b + sgSize; table.Trim[sgtype,0]};
InitHeader[
header: header,
objectVersion: OSMiscOps.StampToTime[data.objectStamp],
source: BcdUtilDefs.EnterName[desc],
sourceVersion: data.sourceVersion];
bcd.ctb ← table.Bounds[cttype].base;
bcd.mtb ← table.Bounds[mttype].base;
bcd.tyb ← table.Bounds[typtype].base;
bcd.tmb ← table.Bounds[tmtype].base;
bcd.spb ← table.Bounds[sptype].base;
bcd.fpb ← table.Bounds[fptype].base;
IF data.copyCode OR data.copySymbols THEN {MapCodeSymbolFiles[]; InitCopyMap[nSgis]};
IF packing THEN InitSgMap[nSgis]
ELSE {
bcd.sgb ← table.Bounds[sgtype].base;
IF ~data.copyCode THEN MapSegments[$code];
IF ~data.copySymbols THEN MapSegments[$symbols]}
Finalize:
PROC = {
IF data.copyCode OR data.copySymbols THEN ReleaseCodeSymbolCopy[];
fileMap ← NIL; bcd ← NIL;
headerInterval.Free[];
FreePackItems[];
IF packing THEN FreeSgMap[];
IF data.copyCode OR data.copySymbols THEN FreeCopyMap[]};
CopyName:
PROC[olditem, newitem: Namee] = {
newNti: NTIndex = table.Words[nttype, NTRecord.SIZE];
FOR nti: NTIndex ← NTIndex.
FIRST, nti+NTRecord.
SIZE
DO
OPEN old: bcd.ntb[nti];
IF old.item = olditem
THEN {
OPEN new: ntb[newNti];
new.item ← newitem; new.name ← bcd.MapName[old.name];
RETURN};
ENDLOOP
CopyConfigs:
PROC = {
configs are already copied, only map names and files
cti: CTIndex ← CTIndex.FIRST;
ctLimit: CTIndex = table.Top[cttype];
UNTIL cti = ctLimit
DO
header.nConfigs ← header.nConfigs + 1;
ctb[cti].name ← bcd.MapName[ctb[cti].name];
ctb[cti].file ← MapFile[ctb[cti].file];
IF ctb[cti].namedInstance THEN CopyName[[config[cti]], [config[cti]]];
cti ← cti + (CTRecord.SIZE + ctb[cti].nControls*ControlItem.SIZE);
ENDLOOP
CopyModules:
PROC = {
modules are already copied, only map names and files
MapOne:
PROC[mti: MTIndex]
RETURNS[
BOOL ←
FALSE] = {
OPEN m: mtb[mti];
header.nModules ← header.nModules + 1;
m.name ← bcd.MapName[m.name];
m.file ← MapFile[m.file];
IF m.namedInstance THEN CopyName[[module[mti]], [module[mti]]]};
EnumerateModules[MapOne]};
EnumerateModules:
PROC[p:
PROC[MTIndex]
RETURNS[
BOOL]] = {
mti: MTIndex ← MTIndex.FIRST;
mtLimit: MTIndex = table.Top[mttype];
UNTIL mti = mtLimit
DO
IF p[mti] THEN EXIT;
mti ← mti + (
WITH m: mtb[mti]
SELECT
FROM
direct => MTRecord.direct.SIZE + m.length*Link.SIZE,
indirect => MTRecord.indirect.SIZE,
multiple => MTRecord.multiple.SIZE,
ENDCASE => ERROR);
ENDLOOP
CopyTypes: PROC = {}; -- types are already copied, nothing need be done (current typeIds)
CopySpaces:
PROC = {
spaces are already copied, only map names (and segments?)
MapOne:
PROC[spi: SPIndex]
RETURNS[
BOOL ←
FALSE] = {
FOR i:
CARDINAL
IN [0..spb[spi].length)
DO
spb[spi].spaces[i].name ← bcd.MapName[spb[spi].spaces[i].name];
ENDLOOP
EnumerateSpaces[MapOne]};
EnumerateSpaces:
PROC[p:
PROC[SPIndex]
RETURNS[
BOOL]] = {
spi: SPIndex ← SPIndex.FIRST;
spLimit: SPIndex = table.Top[sptype];
UNTIL spi = spLimit
DO
IF p[spi] THEN EXIT;
spi ← spi + SPRecord.SIZE + spb[spi].length*SpaceID.SIZE;
ENDLOOP
CopyFramePacks:
PROC = {
framepacks are already copied, only map names
MapOne:
PROC[fpi: FPIndex]
RETURNS[
BOOL ←
FALSE] = {
fpb[fpi].name ← bcd.MapName[fpb[fpi].name]};
EnumerateFramePacks[MapOne]};
EnumerateFramePacks:
PROC[p:
PROC[FPIndex]
RETURNS[
BOOL]] = {
fpi: FPIndex ← FPIndex.FIRST;
fpLimit: FPIndex = table.Top[fptype];
UNTIL fpi = fpLimit
DO
IF p[fpi] THEN RETURN;
fpi ← fpi + FPRecord.SIZE + fpb[fpi].length*MTIndex.SIZE;
ENDLOOP};
CopyImport: Tree.Scan = {
OPEN Symbols;
sti: STIndex ← stNull;
olditi, iti: IMPIndex;
WITH t
SELECT
FROM
symbol => sti ← index;
subtree =>
WITH s1:tb[index].son[1]
SELECT
FROM
symbol => sti ← s1.index;
ENDCASE => Error[];
ENDCASE => Error[];
olditi ← stb[sti].impi;
IF sti = stNull OR olditi = IMPNull THEN RETURN;
iti ← bcd.EnterImport[olditi, TRUE];
itb[iti].file ← MapFile[itb[iti].file];
IF header.firstdummy = 0 THEN header.firstdummy ← itb[iti].gfi;
header.nImports ← header.nImports + 1;
header.nDummies ← header.nDummies + itb[iti].ngfi};
CopyExport: Tree.Scan = {
OPEN Symbols;
sti: STIndex ← stNull;
hti: HTIndex ← htNull;
neweti: EXPIndex;
oldeti: EXPIndex;
WITH t
SELECT
FROM
symbol => sti ← index;
subtree =>
WITH s1:tb[index].son[1]
SELECT
FROM
symbol => {sti ← s1.index; hti ← stb[sti].hti};
ENDCASE => Error[];
ENDCASE => Error[];
WITH s:stb[sti]
SELECT
FROM
external =>
WITH m:s.map
SELECT
FROM
interface => {
OPEN new: etb[neweti];
oldeti ← m.expi;
neweti ← bcd.EnterExport[oldeti, TRUE];
Copy[from: @bcd.etb[oldeti].links, to: @new.links, nwords: new.size];
new.file ← MapFile[new.file]};
module => [] ← NewExportForModule[m.mti, htNull];
ENDCASE => RETURN;
ENDCASE => RETURN;
header.nExports ← header.nExports + 1};
NewExportForModule:
PROC[mti: MTIndex, name: Symbols.HTIndex]
RETURNS[eti: EXPIndex] = {
OPEN Symbols;
eti ← table.Words[exptype, EXPRecord.SIZE+1*Link.SIZE];
etb[eti] ← [
name: mtb[mti].name,
size: 1,
port: $module,
namedInstance: name # htNull,
typeExported: FALSE,
file: mtb[mti].file,
links: ];
etb[eti].links[0] ← [variable[vgfi: mtb[mti].gfi, var: 0, vtag: $var]];
IF name # htNull
THEN {
nti: NTIndex = table.Words[nttype, NTRecord.SIZE];
ntb[nti] ← [name: BcdUtilDefs.NameForHti[name], item: [module[mti]]]};
RETURN};
ExportCx:
PROC[cx: Symbols.CXIndex] = {
OPEN Symbols;
neweti, oldeti: EXPIndex;
FOR sti: STIndex ← cxb[cx].link, stb[sti].link
UNTIL sti = stNull
DO {
IF ~stb[sti].filename
THEN
WITH s: stb[sti]
SELECT
FROM
external =>
WITH m: s.map
SELECT
FROM
interface => {
OPEN old: bcd.etb[oldeti], new: etb[neweti];
first make sure that old is not already exported
existingEti: EXPIndex ← EXPIndex.FIRST;
etLimit: EXPIndex = table.Top[exptype];
oldeti ← m.expi;
UNTIL existingEti = etLimit
DO
IF old = etb[existingEti] THEN GO TO AlreadyExported;
existingEti ← existingEti + EXPRecord.SIZE+etb[existingEti].size;
ENDLOOP;
neweti ← bcd.EnterExport[oldeti, TRUE];
Copy[from: @old.links, to: @new.links, nwords: new.size];
new.file ← MapFile[new.file];
header.nExports ← header.nExports + 1};
ENDCASE;
ENDCASE;
EXITS
AlreadyExported => NULL};
ENDLOOP
InitHeader:
PROC[
header: BcdDefs.BcdBase,
objectVersion: VersionStamp,
source: NameRecord ← NullName,
sourceVersion: VersionStamp ← NullVersion] = {
Zero[header, BcdDefs.BCD.SIZE];
header.versionIdent ← BcdDefs.VersionID;
header.version ← objectVersion;
header.creator ← data.binderVersion;
header.definitions ← (data.op = $conc);
header.typeExported ← data.typeExported;
header.source ← source; header.sourceVersion ← sourceVersion;
header.repackaged ←
table.Bounds[sptype].size # 0 OR table.Bounds[fptype].size # 0;
header.tableCompiled ← FALSE;
header.spare1 ← TRUE};
codeMap, symbolMap: REF Map ← NIL;
Map:
TYPE =
RECORD[
fti: FTIndex,
type: SegClass,
filename: Rope.ROPE,
filehandle: FS.OpenFile];
InitCodeSymbolCopy: PROC = {
Setup:
PROC[file: Rope.
ROPE, type: SegClass]
RETURNS[
REF Map] = {
RETURN[
NEW[Map ← [
type: type,
filename: file,
filehandle: nullFile,
fti: IF file = NIL THEN FTSelf ELSE BcdUtilDefs.EnterFile[LOOPHOLE[file.Flatten[]]]]]]
IF data.copyCode THEN codeMap ← Setup[data.codeName, $code];
IF data.copySymbols THEN symbolMap ← Setup[data.symbolName, $symbols]};
MapCodeSymbolFiles:
PROC = {
IF data.copyCode THEN codeMap.fti ← MapFile[codeMap.fti];
IF data.copySymbols THEN symbolMap.fti ← MapFile[symbolMap.fti]};
ReleaseCodeSymbolCopy: PROC = {
IF codeMap #
NIL
THEN {
codeMap.filehandle ← FSClose[codeMap.filehandle]; codeMap ← NIL};
IF symbolMap #
NIL
THEN {
symbolMap.filehandle ← FSClose[symbolMap.filehandle]; symbolMap ← NIL};
};
EnumerateSegments:
PROC[proc:
PROC[SGIndex]] = {
sgLimit: SGIndex = table.Top[sgtype];
FOR sgi: SGIndex ← SGIndex.
FIRST, sgi + SGRecord.
SIZE
UNTIL sgi = sgLimit
DO
proc[sgi] ENDLOOP
EnumerateOldSegments:
PROC[proc:
PROC[SGIndex]] = {
IF ~packing THEN EnumerateSegments[proc]
ELSE FOR i: NAT IN [0..sgMap.length) DO proc[SgiForIndex[i]] ENDLOOP};
MapSegments:
PROC[type: SegClass] = {
CopySegment:
PROC[sgi: SGIndex] = {
IF sgb[sgi].class = type THEN sgb[sgi].file ← MapFile[sgb[sgi].file]};
EnumerateSegments[CopySegment]};
InitFile:
PROC[p:
REF Map, copiedPages:
CARDINAL]
RETURNS[stream: IO.STREAM, page: CARDINAL] = {
lh: BcdDefs.BCD;
bcdPages: CARDINAL = PagesForWords[BcdDefs.BCD.SIZE];
version: VersionStamp = BumpVersion[
OSMiscOps.StampToTime[data.objectStamp], (IF p.type=$code THEN 1 ELSE 2)];
BcdUtilDefs.SetFileVersion[p.fti, version];
p.filehandle ← FS.Open[p.filename];
p.filehandle.SetPageCount[bcdPages + copiedPages];
stream ← (p.filehandle).StreamFromOpenFile[$write];
InitHeader[header: @lh, objectVersion: version];
lh.version ← ftb[p.fti].version;
stream.UnsafePutBlock[[LOOPHOLE[(@lh).LONG, LONG POINTER TO Basics.RawBytes], 0, BcdDefs.BCD.SIZE*bytesPerWord]];
page ← bcdPages + 1};
BumpVersion:
PROC[v: VersionStamp, n:
CARDINAL]
RETURNS[VersionStamp] = {
v.time ← v.time + n; RETURN[v]};
MoveToPageBoundary:
PROC[stream:
IO.
STREAM, page:
CARDINAL] = {
... moves the index to the given page boundary. However, if the size of the file will not permit this, extend the file via SetLength to allow SetIndex to succeed.
pos: INT = Basics.LongMult[page, VM.bytesPerPage];
IF pos > stream.GetLength THEN stream.SetLength[pos];
stream.SetIndex[pos]};