DIRECTORY
Alloc:
TYPE
USING [
AddNotify, Bounds, DropNotify, Handle, Notifier, Top, Trim, Words],
Ascii: TYPE USING [Lower],
Basics: TYPE USING [bytesPerWord, LongMult],
BcdComData:
TYPE
USING [
aborted, binderVersion, codeName, copyCode, copySymbols, nConfigs, nExports, nImports, nModules, nPages, objectStamp, op, outputFile, sourceName, sourceVersion, symbolName, table, typeExported, textIndex],
BcdControlDefs: TYPE USING [],
BcdDefs:
TYPE
USING [
BCD, BcdBase, ControlItem, CTIndex, CTRecord, cttype, cxtype, EVIndex, evtype, EXPIndex, EXPRecord, exptype, FPIndex, FPRecord, fptype, FTIndex, FTNull, FTRecord, FTSelf, fttype, IMPIndex, IMPNull, imptype, LFNull, lftype, Link, MTIndex, MTNull, MTRecord, mttype, Namee, NameRecord, NameString, NTIndex, NTRecord, nttype, NullName, NullVersion, PackedString, rftype, SegClass, SGIndex, SGNull, SGRecord, sgtype, SpaceID, SPIndex, SPRecord, sptype, sstype, sttype, tftype, tmtype, treetype, typtype, VersionID, VersionStamp],
BcdErrorDefs: TYPE USING [ErrorHti, ErrorName, ErrorNameBase, GetSti],
BcdLiterals:
TYPE
USING [
EnterVersionFiles, LitSegSize, UpdateSegments, WriteLiterals],
BcdUtilDefs:
TYPE
USING [
BcdBases, ContextForTree, EnterExport, EnterFile, EnterImport, EnterName, GetGfi, MapName, MergeFile, NameForHti, SetFileVersion, BcdBasePtr],
ConvertUnsafe: TYPE USING [SubString, SubStringToRope],
ExecOps: TYPE USING [CheckForAbort],
FileSegment: TYPE USING [Pages, nullPages],
FS:
TYPE
USING [
Close, Error, nullOpenFile, Open, OpenFile, Read, SetPageCount, StreamFromOpenFile],
HashOps: TYPE USING [Reset],
IO: TYPE USING [Close, GetIndex, GetLength, SetIndex, SetLength, STREAM, UnsafePutBlock],
OSMiscOps: TYPE USING [StampToTime],
PrincOps: TYPE USING [bytesPerPage, wordsPerPage],
PrincOpsUtils: TYPE USING [LongCOPY],
Rope: TYPE USING [Concat, Flatten, Length, Text, ROPE],
Symbols: TYPE USING [CXIndex, HTIndex, htNull, STIndex, stNull],
Table: TYPE USING [Base, Selector],
Tree: TYPE USING [Index, Link, Scan, null],
TreeOps: TYPE USING [GetNode, ListLength, ScanList],
VM: TYPE USING [AddressForPageNumber, Allocate, Free, Interval];
BcdWrite:
PROGRAM
IMPORTS
Alloc, Ascii, Basics, BcdErrorDefs, BcdLiterals, BcdUtilDefs, ConvertUnsafe,
--ExecOps,-- FS, IO, HashOps, OSMiscOps, PrincOpsUtils, Rope, TreeOps, VM,
data: BcdComData
EXPORTS BcdControlDefs
SHARES Rope = {
OPEN BcdDefs;
bytesPerWord: CARDINAL = Basics.bytesPerWord;
nullFile: FS.OpenFile = FS.nullOpenFile;
nullPages: FileSegment.Pages = FileSegment.nullPages;
Alignment: CARDINAL = 4; -- Code Segments must start at 0 MOD Alignment
BcdWriteError: PUBLIC ERROR = CODE;
Error: PROC = {ERROR BcdWriteError};
UserAbort: ERROR = CODE; -- raised on ^DEL during code or symbol copying
table: Alloc.Handle;
tb, stb, ctb, mtb, lfb, etb, itb, sgb, tyb, tmb, ftb, ntb, spb, fpb, cxb: Table.Base;
ssb: BcdDefs.NameString;
Notifier: Alloc.Notifier = {
tb ← base[treetype]; stb ← base[sttype];
ctb ← base[cttype]; mtb ← base[mttype]; lfb ← base[lftype];
tyb ← base[typtype]; tmb ← base[tmtype];
etb ← base[exptype]; itb ← base[imptype];
sgb ← base[sgtype]; ftb ← base[fttype];
spb ← base[sptype]; fpb ← base[fptype];
ntb ← base[nttype]; ssb ← base[sstype];
cxb ← base[cxtype];
IF bcd #
NIL
THEN {
bcd.ctb ← ctb; bcd.mtb ← mtb;
IF ~packing THEN bcd.sgb ← sgb;
bcd.tyb ← tyb; bcd.tmb ← tmb; bcd.spb ← spb; bcd.fpb ← fpb}};
inline utilities
Copy:
PROC [from:
LONG
POINTER, nwords:
CARDINAL, to:
LONG
POINTER] =
PrincOpsUtils.LongCOPY;
Zero:
PROC [p:
LONG
POINTER, l:
CARDINAL] =
INLINE {
IF l # 0 THEN {p^ ← 0; PrincOpsUtils.LongCOPY[from: p, to: (p+1), nwords: (l-1)]}};
PagesForWords:
PROC [nWords:
CARDINAL]
RETURNS [
CARDINAL] =
INLINE {
RETURN [(nWords + PrincOps.wordsPerPage-1)/PrincOps.wordsPerPage]};
PutWord:
PROC [stream:
IO.
STREAM, word:
WORD] = {
stream.UnsafePutBlock[[
LOOPHOLE[LONG[@word], LONG POINTER TO PACKED ARRAY [0..0) OF CHAR],
0,
SIZE[WORD]*Basics.bytesPerWord]];
};
bcd: BcdUtilDefs.BcdBasePtr ← NIL;
header: BcdDefs.BcdBase; -- points to Bcd header and saved tables
headerInterval: VM.Interval;
WriteBcd:
PUBLIC
PROC [root: Tree.Link] = {
saveIndex: CARDINAL = data.textIndex;
node, subNode: Tree.Index;
table ← data.table; table.AddNotify[Notifier];
node ← TreeOps.GetNode[root];
packing ← (tb[node].son[2] # Tree.null AND data.copyCode);
Initialize[];
IF packing
THEN {
MakePackItem[tb[node].son[2]];
data.textIndex ← saveIndex;
FillInSgMap[]};
CopyConfigs[];
CopyModules[];
CopyTypes[];
CopySpaces[]; CopyFramePacks[];
subNode ← TreeOps.GetNode[tb[node].son[3]];
data.textIndex ← tb[subNode].info;
TreeOps.ScanList[tb[subNode].son[1], CopyImport];
TreeOps.ScanList[tb[subNode].son[2], CopyExport];
IF tb[subNode].attrs[$exportsALL]
THEN
ExportCx[BcdUtilDefs.ContextForTree[tb[subNode].son[4]]];
IF data.copySymbols THEN EnterMissingSymbolFiles[];
BcdLiterals.EnterVersionFiles[bcd.ftb, FtiForIndex[fileMap.length], MapFile];
TableOut[];
CloseOutputFile[];
Finalize[]; data.textIndex ← saveIndex;
table.DropNotify[Notifier]; table ← NIL};
Initialize:
PROC = {
impSize, expSize, sgSize, fSize, nSize, ssSize: CARDINAL;
nSgis: CARDINAL;
b: Table.Base;
desc: ConvertUnsafe.SubString;
desc.base ← LOOPHOLE[Rope.Flatten[data.sourceName]];
desc.offset ← 0;
desc.length ← data.sourceName.Length[];
IF data.copyCode OR data.copySymbols THEN InitCodeSymbolCopy[];
impSize ← table.Bounds[imptype].size;
expSize ← table.Bounds[exptype].size;
sgSize ← table.Bounds[sgtype].size;
nSgis ← sgSize/SGRecord.SIZE;
IF ~packing THEN sgSize ← 0;
fSize ← table.Bounds[fttype].size;
nSize ← table.Bounds[nttype].size;
ssSize ← table.Bounds[sstype].size;
bcd ← NEW[BcdUtilDefs.BcdBases];
fileMap ← NEW[FileMap[fSize/FTRecord.SIZE]];
FOR i: CARDINAL IN [0..fileMap.length) DO fileMap[i] ← FTNull ENDLOOP;
headerInterval ←
VM.Allocate[PagesForWords[
BCD.SIZE + impSize + expSize + sgSize + fSize + nSize + ssSize]];
header ← VM.AddressForPageNumber[headerInterval.page];
b ← (LOOPHOLE[header, Table.Base] + BCD.SIZE);
Copy[to: (bcd.etb ← b), from: etb, nwords: expSize];
b ← b + expSize; table.Trim[exptype,0];
Copy[to: (bcd.itb ← b), from: itb, nwords: impSize];
b ← b + impSize; table.Trim[imptype,0];
Copy[to: (bcd.ftb ← b), from: ftb, nwords: fSize];
b ← b + fSize; table.Trim[fttype,0];
Copy[to: (bcd.ntb ← b), from: ntb, nwords: nSize];
b ← b + nSize; table.Trim[nttype,0];
Copy[to: (bcd.ssb ← b), from: ssb, nwords: ssSize];
b ← b + ssSize;
HashOps.Reset[];
IF packing
THEN {
-- save old segment table in heap
Copy[to: (bcd.sgb ← b), from: sgb, nwords: sgSize];
b ← b + sgSize; table.Trim[sgtype,0]};
InitHeader[
header: header,
objectVersion: OSMiscOps.StampToTime[data.objectStamp],
source: BcdUtilDefs.EnterName[desc],
sourceVersion: data.sourceVersion];
bcd.ctb ← table.Bounds[cttype].base;
bcd.mtb ← table.Bounds[mttype].base;
bcd.tyb ← table.Bounds[typtype].base;
bcd.tmb ← table.Bounds[tmtype].base;
bcd.spb ← table.Bounds[sptype].base;
bcd.fpb ← table.Bounds[fptype].base;
IF data.copyCode OR data.copySymbols THEN {MapCodeSymbolFiles[]; InitCopyMap[nSgis]};
IF packing THEN InitSgMap[nSgis]
ELSE {
bcd.sgb ← table.Bounds[sgtype].base;
IF ~data.copyCode THEN MapSegments[$code];
IF ~data.copySymbols THEN MapSegments[$symbols]}};
Finalize:
PROC = {
IF data.copyCode OR data.copySymbols THEN ReleaseCodeSymbolCopy[];
fileMap ← NIL; bcd ← NIL;
VM.Free[headerInterval];
FreePackItems[];
IF packing THEN FreeSgMap[];
IF data.copyCode OR data.copySymbols THEN FreeCopyMap[]};
BCD (re)construction
CopyName:
PROC [olditem, newitem: Namee] = {
newNti: NTIndex = table.Words[nttype, NTRecord.SIZE];
FOR nti: NTIndex ← NTIndex.
FIRST, nti+NTRecord.
SIZE
DO
OPEN old: bcd.ntb[nti];
IF old.item = olditem
THEN {
OPEN new: ntb[newNti];
new.item ← newitem; new.name ← bcd.MapName[old.name];
RETURN};
ENDLOOP};
CopyConfigs:
PROC = {
configs are already copied, only map names and files
cti: CTIndex ← CTIndex.FIRST;
ctLimit: CTIndex = table.Top[cttype];
UNTIL cti = ctLimit
DO
header.nConfigs ← header.nConfigs + 1;
ctb[cti].name ← bcd.MapName[ctb[cti].name];
ctb[cti].file ← MapFile[ctb[cti].file];
IF ctb[cti].namedInstance THEN CopyName[[config[cti]], [config[cti]]];
cti ← cti + (CTRecord.SIZE + ctb[cti].nControls*ControlItem.SIZE);
ENDLOOP};
CopyModules:
PROC = {
modules are already copied, only map names and files
MapOne:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN m: mtb[mti];
header.nModules ← header.nModules + 1;
m.name ← bcd.MapName[m.name];
m.file ← MapFile[m.file];
IF m.namedInstance THEN CopyName[[module[mti]], [module[mti]]]};
EnumerateModules[MapOne]};
EnumerateModules:
PROC [p:
PROC [MTIndex]
RETURNS [
BOOL]] = {
mti: MTIndex ← MTIndex.FIRST;
mtLimit: MTIndex = table.Top[mttype];
UNTIL mti = mtLimit
DO
IF p[mti] THEN EXIT;
mti ← mti + (
WITH m: mtb[mti]
SELECT
FROM
direct => MTRecord.direct.SIZE + m.length*Link.SIZE,
indirect => MTRecord.indirect.SIZE,
multiple => MTRecord.multiple.SIZE,
ENDCASE => ERROR);
ENDLOOP};
CopyTypes: PROC = {}; -- types are already copied, nothing need be done (current typeIds)
CopySpaces:
PROC = {
spaces are already copied, only map names (and segments?)
MapOne:
PROC [spi: SPIndex]
RETURNS [
BOOL ←
FALSE] = {
FOR i:
CARDINAL
IN [0..spb[spi].length)
DO
spb[spi].spaces[i].name ← bcd.MapName[spb[spi].spaces[i].name];
ENDLOOP};
EnumerateSpaces[MapOne]};
EnumerateSpaces:
PROC [p:
PROC [SPIndex]
RETURNS [
BOOL]] = {
spi: SPIndex ← SPIndex.FIRST;
spLimit: SPIndex = table.Top[sptype];
UNTIL spi = spLimit
DO
IF p[spi] THEN EXIT;
spi ← spi + SPRecord.SIZE + spb[spi].length*SpaceID.SIZE;
ENDLOOP};
CopyFramePacks:
PROC = {
framepacks are already copied, only map names
MapOne:
PROC [fpi: FPIndex]
RETURNS [
BOOL ←
FALSE] = {
fpb[fpi].name ← bcd.MapName[fpb[fpi].name]};
EnumerateFramePacks[MapOne]};
EnumerateFramePacks:
PROC [p:
PROC [FPIndex]
RETURNS [
BOOL]] = {
fpi: FPIndex ← FPIndex.FIRST;
fpLimit: FPIndex = table.Top[fptype];
UNTIL fpi = fpLimit
DO
IF p[fpi] THEN RETURN;
fpi ← fpi + FPRecord.SIZE + fpb[fpi].length*MTIndex.SIZE;
ENDLOOP};
CopyImport: Tree.Scan = {
OPEN Symbols;
sti: STIndex ← stNull;
olditi, iti: IMPIndex;
WITH t
SELECT
FROM
symbol => sti ← index;
subtree =>
WITH s1:tb[index].son[1]
SELECT
FROM
symbol => sti ← s1.index;
ENDCASE => Error[];
ENDCASE => Error[];
olditi ← stb[sti].impi;
IF sti = stNull OR olditi = IMPNull THEN RETURN;
iti ← bcd.EnterImport[olditi, TRUE];
itb[iti].file ← MapFile[itb[iti].file];
IF header.firstdummy = 0 THEN header.firstdummy ← itb[iti].gfi;
header.nImports ← header.nImports + 1;
header.nDummies ← header.nDummies + itb[iti].ngfi};
CopyExport: Tree.Scan = {
OPEN Symbols;
sti: STIndex ← stNull;
hti: HTIndex ← htNull;
neweti: EXPIndex;
oldeti: EXPIndex;
WITH t
SELECT
FROM
symbol => sti ← index;
subtree =>
WITH s1:tb[index].son[1]
SELECT
FROM
symbol => {sti ← s1.index; hti ← stb[sti].hti};
ENDCASE => Error[];
ENDCASE => Error[];
WITH s:stb[sti]
SELECT
FROM
external =>
WITH m:s.map
SELECT
FROM
interface => {
OPEN new: etb[neweti];
oldeti ← m.expi;
neweti ← bcd.EnterExport[oldeti, TRUE];
Copy[from: @bcd.etb[oldeti].links, to: @new.links, nwords: new.size];
new.file ← MapFile[new.file]};
module => [] ← NewExportForModule[m.mti, htNull];
ENDCASE => RETURN;
ENDCASE => RETURN;
header.nExports ← header.nExports + 1};
NewExportForModule:
PROC [mti: MTIndex, name: Symbols.HTIndex]
RETURNS [eti: EXPIndex] = {
OPEN Symbols;
eti ← table.Words[exptype, EXPRecord.SIZE+1*Link.SIZE];
etb[eti] ← [
name: mtb[mti].name,
size: 1,
port: $module,
namedInstance: name # htNull,
typeExported: FALSE,
file: mtb[mti].file,
links: ];
etb[eti].links[0] ← [variable[vgfi: mtb[mti].gfi, var: 0, vtag: $var]];
IF name # htNull
THEN {
nti: NTIndex = table.Words[nttype, NTRecord.SIZE];
ntb[nti] ← [name: BcdUtilDefs.NameForHti[name], item: [module[mti]]]};
RETURN};
ExportCx:
PROC [cx: Symbols.CXIndex] = {
OPEN Symbols;
neweti, oldeti: EXPIndex;
FOR sti: STIndex ← cxb[cx].link, stb[sti].link
UNTIL sti = stNull
DO {
IF ~stb[sti].filename
THEN
WITH s: stb[sti]
SELECT
FROM
external =>
WITH m: s.map
SELECT
FROM
interface => {
OPEN old: bcd.etb[oldeti], new: etb[neweti];
first make sure that old is not already exported
existingEti: EXPIndex ← EXPIndex.FIRST;
etLimit: EXPIndex = table.Top[exptype];
oldeti ← m.expi;
UNTIL existingEti = etLimit
DO
IF old = etb[existingEti] THEN GO TO AlreadyExported;
existingEti ← existingEti + EXPRecord.SIZE+etb[existingEti].size;
ENDLOOP;
neweti ← bcd.EnterExport[oldeti, TRUE];
Copy[from: @old.links, to: @new.links, nwords: new.size];
new.file ← MapFile[new.file];
header.nExports ← header.nExports + 1};
ENDCASE;
ENDCASE;
EXITS
AlreadyExported => NULL};
ENDLOOP};
file mapping
FileMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF FTIndex];
fileMap: REF FileMap ← NIL;
IndexForFti:
PROC [fti: FTIndex]
RETURNS [
CARDINAL] =
INLINE {
RETURN [LOOPHOLE[fti,CARDINAL]/FTRecord.SIZE]};
FtiForIndex:
PROC [n:
CARDINAL]
RETURNS [FTIndex] =
INLINE {
RETURN [FTIndex.FIRST + n*FTRecord.SIZE]};
MapFile:
PROC [fti: FTIndex]
RETURNS [FTIndex] = {
SELECT fti
FROM
FTNull, FTSelf => RETURN [fti];
ENDCASE => {
fileIndex: CARDINAL = IndexForFti[fti];
IF fileMap[fileIndex] = FTNull THEN fileMap[fileIndex] ← bcd.MergeFile[fti];
RETURN [fileMap[fileIndex]]}};
header processing
InitHeader:
PROC [
header: BcdDefs.BcdBase,
objectVersion: VersionStamp,
source: NameRecord ← NullName,
sourceVersion: VersionStamp ← NullVersion] = {
Zero[header, BcdDefs.BCD.SIZE];
header.versionIdent ← BcdDefs.VersionID;
header.version ← objectVersion;
header.creator ← data.binderVersion;
header.definitions ← (data.op = $conc);
header.typeExported ← data.typeExported;
header.source ← source; header.sourceVersion ← sourceVersion;
header.repackaged ←
table.Bounds[sptype].size # 0 OR table.Bounds[fptype].size # 0;
header.tableCompiled ← FALSE;
header.spare1 ← TRUE};
codeMap, symbolMap: REF Map ← NIL;
Map:
TYPE =
RECORD [
fti: FTIndex,
type: SegClass,
filename: Rope.ROPE,
filehandle: FS.OpenFile];
InitCodeSymbolCopy: PROC = {
Setup:
PROC [file: Rope.
ROPE, type: SegClass]
RETURNS [
REF Map] = {
RETURN [
NEW[Map ← [
type: type,
filename: file,
filehandle: nullFile,
fti:
IF file =
NIL
THEN FTSelf
ELSE BcdUtilDefs.EnterFile[LOOPHOLE[Rope.Flatten[file]]]]]]};
IF data.copyCode THEN codeMap ← Setup[data.codeName, $code];
IF data.copySymbols THEN symbolMap ← Setup[data.symbolName, $symbols]};
MapCodeSymbolFiles:
PROC = {
IF data.copyCode THEN codeMap.fti ← MapFile[codeMap.fti];
IF data.copySymbols THEN symbolMap.fti ← MapFile[symbolMap.fti]};
ReleaseCodeSymbolCopy: PROC = {
IF codeMap #
NIL
THEN {
codeMap.filehandle ← FSClose[codeMap.filehandle]; codeMap ← NIL};
IF symbolMap #
NIL
THEN {
symbolMap.filehandle ← FSClose[symbolMap.filehandle]; symbolMap ← NIL};
};
EnumerateSegments:
PROC [proc:
PROC[SGIndex]] = {
sgLimit: SGIndex = table.Top[sgtype];
FOR sgi: SGIndex ← SGIndex.
FIRST, sgi + SGRecord.
SIZE
UNTIL sgi = sgLimit
DO
proc[sgi] ENDLOOP};
EnumerateOldSegments:
PROC [proc:
PROC[SGIndex]] = {
IF ~packing THEN EnumerateSegments[proc]
ELSE FOR i: NAT IN [0..sgMap.length) DO proc[SgiForIndex[i]] ENDLOOP};
MapSegments:
PROC [type: SegClass] = {
CopySegment:
PROC [sgi: SGIndex] = {
IF sgb[sgi].class = type
THEN
sgb[sgi].file ← MapFile[sgb[sgi].file]};
EnumerateSegments[CopySegment]};
InitFile:
PROC [p:
REF Map, copiedPages:
CARDINAL]
RETURNS [stream: IO.STREAM, page: CARDINAL] = {
lh: BcdDefs.BCD;
bcdPages: CARDINAL = PagesForWords[BcdDefs.BCD.SIZE];
version: VersionStamp = BumpVersion[
OSMiscOps.StampToTime[data.objectStamp], (IF p.type=$code THEN 1 ELSE 2)];
BcdUtilDefs.SetFileVersion[p.fti, version];
p.filehandle ← FS.Open[p.filename];
p.filehandle.SetPageCount[bcdPages + copiedPages];
stream ← FS.StreamFromOpenFile[p.filehandle, $write];
InitHeader[header: @lh, objectVersion: version];
lh.version ← ftb[p.fti].version;
stream.UnsafePutBlock[[LOOPHOLE[LONG[@lh], LONG POINTER TO PACKED ARRAY [0..0) OF CHAR], 0, BcdDefs.BCD.SIZE*bytesPerWord]];
page ← bcdPages + 1;
};
BumpVersion:
PROC [v: VersionStamp, n:
CARDINAL]
RETURNS [VersionStamp] = {
v.time ← v.time + n; RETURN [v]};
MoveToPageBoundary:
PROC [stream:
IO.
STREAM, page:
CARDINAL] = {
... moves the index to the given page boundary. However, if the size of the file will not permit this, extend the file via SetLength to allow SetIndex to succeed.
pos: INT = Basics.LongMult[page, PrincOps.bytesPerPage];
IF pos > IO.GetLength[stream] THEN IO.SetLength[stream, pos];
IO.SetIndex[stream, pos];
};
Code Packing
PackHandle: TYPE = REF PackItem;
PackItem:
TYPE =
RECORD [
link: PackHandle,
newsgi: SGIndex, -- in the new table
item: SEQUENCE count: CARDINAL OF MTIndex];
packing: BOOL;
phHead, phTail: PackHandle ← NIL;
MakePackItem: Tree.Scan = {
t is Tree.null, a list of ids, or a list of lists of ids
ph: PackHandle ← NIL;
i, nsons: CARDINAL;
Item: Tree.Scan = {
itemId: Symbols.HTIndex;
WITH t
SELECT
FROM
symbol => {
itemId ← stb[index].hti;
WITH stb[index]
SELECT
FROM
external =>
WITH m: map
SELECT
FROM
module => {
ph.item[i] ← m.mti;
SELECT
TRUE
FROM
~mtb[m.mti].packageable =>
BcdErrorDefs.ErrorNameBase[
$error, "is packaged and cannot be PACKed"L,
mtb[m.mti].name, bcd.ssb];
(ReadSgMap[mtb[m.mti].code.sgi] # SGNull) =>
BcdErrorDefs.ErrorNameBase[
$error, "cannot be PACKed twice"L,
mtb[m.mti].name, bcd.ssb];
ENDCASE => SetSgMap[old: mtb[m.mti].code.sgi, new: ph.newsgi]};
ENDCASE => GOTO cant;
ENDCASE => GOTO cant;
EXITS
cant =>
BcdErrorDefs.ErrorHti[$error, "cannot be PACKed"L, itemId
! BcdErrorDefs.GetSti => {RESUME [errorSti: Symbols.stNull]}]};
ENDCASE;
i ← i+1};
IF t = Tree.null THEN RETURN;
WITH t
SELECT
FROM
subtree => {
OPEN tt: tb[index];
IF tt.name # $list THEN Error[];
data.textIndex ← tt.info;
IF tt.son[1].tag = $subtree THEN {TreeOps.ScanList[t,MakePackItem]; RETURN}};
ENDCASE;
nsons ← TreeOps.ListLength[t];
ph ←
NEW[PackItem[nsons] ← [
link: NIL, newsgi: table.Words[sgtype, SGRecord.SIZE], item:]];
FOR j: CARDINAL IN [0..nsons) DO ph.item[j] ← MTNull ENDLOOP;
sgb[ph.newsgi] ← [class: $code, file: codeMap.fti, base:0, pages:0, extraPages:0];
i ← 0; TreeOps.ScanList[t, Item];
IF phTail =
NIL
THEN phHead ← phTail ← ph
ELSE {phTail.link ← ph; phTail ← ph}};
FreePackItems:
PROC = {
phHead ← phTail ← NIL};
WriteFromPages:
PROC [
stream: IO.STREAM, pages: FileSegment.Pages, words: CARDINAL] = {
bufferPages: CARDINAL = MIN[pages.span.pages, 16];
bufferInterval: VM.Interval ← VM.Allocate[bufferPages];
{
ENABLE
UNWIND =>
VM.Free[bufferInterval];
base: CARDINAL ← pages.span.base;
pointer: LONG POINTER ← VM.AddressForPageNumber[bufferInterval.page];
WHILE words # 0
DO
wordsToTransfer: CARDINAL = MIN[words, bufferPages*PrincOps.wordsPerPage];
pagesToTransfer: CARDINAL = PagesForWords[wordsToTransfer];
FS.Read[file: [pages.file], from: base, nPages: pagesToTransfer, to: pointer];
stream.UnsafePutBlock[[pointer, 0, wordsToTransfer*bytesPerWord]];
base ← (base + pagesToTransfer);
words ← (words - wordsToTransfer);
ENDLOOP;
};
VM.Free[bufferInterval];
};
PackCodeSegments:
PROC [out:
IO.
STREAM, startpage:
CARDINAL]
RETURNS [nextpage: CARDINAL] = {
offset, validlength: CARDINAL;
oldsgi: SGIndex;
file: FS.OpenFile;
pages: FileSegment.Pages;
FixUpOneModule:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN module: mtb[mti];
length: CARDINAL;
IF module.code.sgi = oldsgi
THEN {
length ← module.code.offset + module.code.length/2;
module.code.offset ← module.code.offset + offset;
module.code.packed ← TRUE;
IF length > validlength THEN validlength ← length};
RETURN};
nextpage ← startpage;
FOR ph: PackHandle ← phHead, ph.link
UNTIL ph =
NIL
DO
MoveToPageBoundary[stream: out, page: (nextpage-1)];
offset ← 0;
sgb[ph.newsgi].base ← nextpage;
FOR pi:
CARDINAL
IN [0..ph.count)
DO {
mti: MTIndex = ph.item[pi];
IF mtb[mti].linkLoc = $code
AND ~mtb[mti].code.linkspace
THEN {
offset ← (AddLinksToCodeSegment[out, mti, offset, TRUE] + offset);
GOTO ignore};
oldsgi ← mtb[mti].code.sgi;
[file, pages] ← SegmentForOldCodeSgi[oldsgi];
IF file = nullFile THEN GOTO ignore;
IF (offset
MOD Alignment) # 0
THEN
FOR i:
CARDINAL
IN [(offset
MOD Alignment)..Alignment)
DO
PutWord[out, 0]; offset ← offset + 1
ENDLOOP;
validlength ← 0; EnumerateModules[FixUpOneModule];
WriteFromPages[stream: out, pages: pages, words: validlength];
offset ← offset + validlength;
file ← FSClose[file];
ENDLOOP;
sgb[ph.newsgi].pages ← PagesForWords[offset];
nextpage ← nextpage + sgb[ph.newsgi].pages;
ENDLOOP;
RETURN};
SegmentForOldCodeSgi:
PROC [sgi: SGIndex]
RETURNS [f: FS.OpenFile ← nullFile, s: FileSegment.Pages ← nullPages] = {
OPEN seg: bcd.sgb[sgi];
IF Copied[sgi] OR seg.file = FTNull THEN RETURN;
f ← FileForFti[seg.file];
IF f = nullFile
THEN
BcdErrorDefs.ErrorNameBase[
class: $error, s: "could not be opened to copy code"L,
name: bcd.ftb[seg.file].name, base: bcd.ssb]
ELSE {
s ← [
file: f,
span: [base: seg.base-1, pages: (seg.pages + seg.extraPages)]];
IF WrongOldSegVersion[s, bcd.ftb[seg.file].version]
THEN {
BcdErrorDefs.ErrorNameBase[
class: $error, s: "on disk has an incorrect version"L,
name: bcd.ftb[seg.file].name, base: bcd.ssb];
f ← FSClose[f];
s ← nullPages}};
IF s.file = FS.nullOpenFile THEN header.versionIdent ← 0;
SetCopied[sgi]};
WrongOldSegVersion:
PROC [s: FileSegment.Pages, version: BcdDefs.VersionStamp]
RETURNS [reply: BOOL] = {
h: BcdDefs.BcdBase;
headerInterval: VM.Interval ← VM.Allocate[1];
{
ENABLE
UNWIND =>
VM.Free[headerInterval];
h ← VM.AddressForPageNumber[headerInterval.page];
FS.Read[file: [s.file], from: 0, nPages: 1, to: h];
reply ← (h.version # version);
};
VM.Free[headerInterval];
RETURN [reply]};
Segment Mapping
SGMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF SGIndex];
CopyMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF BOOL];
sgMap: REF SGMap ← NIL;
copyMap: REF CopyMap ← NIL;
IndexForSgi:
PROC [sgi: SGIndex]
RETURNS [
CARDINAL] =
INLINE {
RETURN [LOOPHOLE[sgi,CARDINAL]/SGRecord.SIZE]};
SgiForIndex:
PROC [i:
CARDINAL]
RETURNS [SGIndex] =
INLINE {
RETURN [SGIndex.FIRST + i*SGRecord.SIZE]};
InitCopyMap:
PROC [nsgis:
CARDINAL] = {
copyMap ← NEW[CopyMap[nsgis]];
FOR i: CARDINAL IN [0..nsgis) DO copyMap[i] ← FALSE ENDLOOP};
FreeCopyMap:
PROC = {
copyMap ← NIL};
SetCopied:
PROC [sgi: SGIndex] = {
copyMap[IndexForSgi[sgi]] ← TRUE};
Copied:
PROC [sgi: SGIndex]
RETURNS [
BOOL] = {
RETURN [copyMap[IndexForSgi[sgi]]]};
InitSgMap:
PROC [nsgis:
CARDINAL] = {
sgMap ← NEW[SGMap[nsgis]];
FOR i: CARDINAL IN [0..nsgis) DO sgMap[i] ← BcdDefs.SGNull ENDLOOP};
FreeSgMap: PROC = {sgMap ← NIL};
SetSgMap:
PROC [old, new: SGIndex] = {
IF (packing AND old # SGNull) THEN sgMap[IndexForSgi[old]] ← new};
ReadSgMap:
PROC [old: SGIndex]
RETURNS [SGIndex] = {
RETURN [IF (~packing OR old = SGNull) THEN old ELSE sgMap[IndexForSgi[old]]]};
FillInSgMap:
PROC = {
called only when packing (i.e. packing requested AND copyCode = TRUE)
FOR i:
CARDINAL
IN [0..sgMap.length)
DO
IF sgMap[i] = SGNull
THEN {
oldsgi: SGIndex = SgiForIndex[i];
newsgi: SGIndex = table.Words[sgtype, SGRecord.SIZE];
sgb[newsgi] ← bcd.sgb[oldsgi];
sgb[newsgi].file ←
(
IF sgb[newsgi].class = $symbols
THEN
(
IF data.copySymbols
THEN symbolMap.fti
ELSE MapFile[sgb[newsgi].file])
ELSE codeMap.fti);
sgMap[i] ← newsgi};
ENDLOOP};
FixAllSgis:
PROC = {
replace all sgis with ReadSgMap[sgi]
FixModule:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN m: mtb[mti];
m.code.sgi ← ReadSgMap[m.code.sgi];
m.sseg ← ReadSgMap[m.sseg]};
FixSpace:
PROC [spi: SPIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN sp: spb[spi];
sp.seg ← ReadSgMap[sp.seg]};
EnumerateModules[FixModule];
EnumerateSpaces[FixSpace]};
Code Links
LinkCount:
PROC [mti: MTIndex]
RETURNS [
CARDINAL] =
INLINE {
RETURN [
WITH m: mtb[mti]
SELECT
FROM
direct => m.length,
indirect => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length,
multiple => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length,
ENDCASE => ERROR]};
AlignOffset:
PROC [offset:
CARDINAL]
RETURNS [
CARDINAL] =
INLINE {
RETURN [((offset + (Alignment-1))/Alignment)*Alignment]};
AddLinksToCodeSegment:
PROC [
stream: IO.STREAM, mti: MTIndex, offset: CARDINAL, packed: BOOL]
RETURNS [CARDINAL] = {
sgi: SGIndex = mtb[mti].code.sgi;
codeLength: CARDINAL = mtb[mti].code.length/2;
linkSpace: CARDINAL;
f: FS.OpenFile;
s: FileSegment.Pages;
prefixWords: CARDINAL ← 0;
FixOffset:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN c: mtb[mti].code;
IF c.sgi = sgi
THEN {
c.linkspace ← TRUE; c.offset ← c.offset+offset; c.packed ← packed}};
[f, s] ← SegmentForOldCodeSgi[sgi];
IF f = nullFile THEN RETURN [0];
linkSpace ← LinkCount[mti];
IF offset = 0
AND linkSpace # 0
THEN {
prefixWords ← 1;
PutWord[stream, linkSpace + Alignment - (linkSpace MOD Alignment)];
offset ← offset+1};
IF (offset+linkSpace)
MOD Alignment # 0
THEN
linkSpace ← linkSpace + Alignment - ((offset+linkSpace) MOD Alignment);
offset ← offset + linkSpace;
EnumerateModules[FixOffset];
FOR i: CARDINAL IN [0..linkSpace) DO PutWord[stream, 0] ENDLOOP;
WriteFromPages[stream: stream, pages: s, words: codeLength];
f ← FSClose[f];
RETURN [prefixWords + linkSpace + codeLength]};
code and symbol copying
EstimateCopiedPages:
PROC
RETURNS [codePages, symbolPages:
CARDINAL ← 0] = {
estimates ignore possible packing of code
packaged: BOOL ← FALSE;
AddModule:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN m: mtb[mti];
IF data.copyCode
AND m.code.sgi # SGNull
THEN {
OPEN seg: bcd.sgb[m.code.sgi];
IF ~m.packageable THEN packaged ← TRUE
ELSE {
IF m.linkLoc = $code
AND ~m.code.linkspace
THEN {
nLinks: CARDINAL = LinkCount[mti];
offset: CARDINAL = AlignOffset[IF nLinks=0 THEN 0 ELSE 1+nLinks];
codePages ← codePages + PagesForWords[offset + m.code.length/2]}
ELSE codePages ← codePages + seg.pages;
codePages ← codePages + seg.extraPages}};
IF data.copySymbols
AND m.sseg # SGNull
THEN {
OPEN seg: bcd.sgb[m.sseg];
symbolPages ← symbolPages + seg.pages + seg.extraPages};
RETURN};
AddSegment:
PROC [oldSgi: SGIndex] = {
OPEN seg: bcd.sgb[oldSgi];
IF seg.class = $code
THEN {
package: BOOL ← FALSE;
TestModule:
PROC [mti: MTIndex]
RETURNS [
BOOL] = {
OPEN m: mtb[mti];
IF ~m.packageable AND m.code.sgi = oldSgi THEN package ← TRUE;
RETURN [m.code.sgi = oldSgi]};
EnumerateModules[TestModule];
IF package THEN codePages ← codePages + seg.pages + seg.extraPages}};
IF data.copyCode
OR data.copySymbols
THEN
EnumerateModules[AddModule];
IF data.copyCode
AND packaged
THEN
EnumerateOldSegments[AddSegment];
RETURN};
MoveCodeSegments:
PROC [copiedPages:
CARDINAL] = {
stream: IO.STREAM;
nextPage: CARDINAL;
AddLinks:
PROC [mti: MTIndex]
RETURNS [
BOOL ←
FALSE] = {
OPEN m: mtb[mti];
wordsWritten, pagesWritten: CARDINAL;
newSgi: SGIndex;
IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
IF m.linkLoc = $code
AND ~m.code.linkspace
AND m.packageable
THEN {
IF m.code.packed
THEN BcdErrorDefs.ErrorName[
$error, "was previously PACKed and can not now have code links added"L, m.name]
ELSE {
MoveToPageBoundary[stream: stream, page: (nextPage-1)];
wordsWritten ← AddLinksToCodeSegment[
stream: stream, mti: mti, offset: 0, packed: FALSE];
pagesWritten ← PagesForWords[wordsWritten];
newSgi ← ReadSgMap[m.code.sgi];
sgb[newSgi].file ← codeMap.fti;
sgb[newSgi].base ← nextPage;
sgb[newSgi].pages ← pagesWritten;
nextPage ← nextPage + pagesWritten;
};
};
}; -- end AddLinks
MoveOne:
PROC [oldSgi: SGIndex] = {
OPEN seg: bcd.sgb[oldSgi];
IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
IF seg.class = $code
THEN {
f: FS.OpenFile;
s: FileSegment.Pages;
[f, s] ← SegmentForOldCodeSgi[oldSgi];
IF f # nullFile
THEN {
segPages: CARDINAL = s.span.pages;
newSgi: SGIndex = ReadSgMap[oldSgi];
sgb[newSgi].file ← codeMap.fti;
sgb[newSgi].base ← nextPage;
MoveToPageBoundary[stream: stream, page: (nextPage-1)];
WriteFromPages[
stream: stream, pages: s,
words: (segPages * PrincOps.wordsPerPage)];
nextPage ← nextPage + segPages;
f ← FSClose[f];
};
};
START MoveCodeSegments HERE
IF codeMap.fti = FTSelf
THEN {stream ← bcdStream; nextPage ← nextBcdPage}
ELSE [stream, nextPage] ← InitFile[codeMap, copiedPages];
nextPage ← PackCodeSegments[stream, nextPage];
EnumerateModules[AddLinks];
EnumerateOldSegments[MoveOne];
IF codeMap.fti = FTSelf THEN nextBcdPage ← nextPage ELSE IO.Close[stream];
}; -- end MoveCodeSegments
EnterMissingSymbolFiles: PROC = {
CheckOneSymbolsFileSeg:
PROC [oldSgi: SGIndex] = {
OPEN seg: bcd.sgb[oldSgi];
IF (seg.class = $symbols)
AND ~Copied[oldSgi]
AND (seg.file # FTNull)
THEN
insure that a file entry exists for this file
[] ← MapFile[bcd.sgb[oldSgi].file]};
EnumerateOldSegments[CheckOneSymbolsFileSeg]};
MoveSymbolSegments:
PROC [copiedPages:
CARDINAL] = {
stream: IO.STREAM;
nextPage: CARDINAL;
MoveOne:
PROC [oldSgi: SGIndex] = {
OPEN seg: bcd.sgb[oldSgi];
f: FS.OpenFile ← nullFile;
newSgi: SGIndex;
IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
IF (seg.class # $symbols) OR Copied[oldSgi] OR (seg.file = FTNull) THEN RETURN;
newSgi ← ReadSgMap[oldSgi];
f ← FileForFti[seg.file];
IF f = nullFile
THEN {
BcdErrorDefs.ErrorNameBase[
class: $warning, s: "could not be opened to copy symbols"L,
name: bcd.ftb[seg.file].name, base: bcd.ssb];
sgb[newSgi] ← bcd.sgb[oldSgi];
sgb[newSgi].file ← MapFile[bcd.sgb[oldSgi].file]}
ELSE {
s: FileSegment.Pages = [
file: f,
span: [base: seg.base-1, pages: (seg.pages + seg.extraPages)]];
IF WrongOldSegVersion[s, bcd.ftb[seg.file].version]
THEN {
BcdErrorDefs.ErrorNameBase[
class: $error, s: "on disk has incorrect version"L,
name: bcd.ftb[seg.file].name, base: bcd.ssb];
header.versionIdent ← 0}
ELSE {
segPages: CARDINAL = s.span.pages;
sgb[newSgi].file ← symbolMap.fti;
sgb[newSgi].base ← nextPage;
MoveToPageBoundary[stream: stream, page: (nextPage-1)];
WriteFromPages[
stream: stream, pages: s,
words: (segPages * PrincOps.wordsPerPage)];
nextPage ← nextPage + segPages};
f ← FSClose[f];
};
SetCopied[oldSgi]};
IF symbolMap.fti = FTSelf
THEN {stream ← bcdStream; nextPage ← nextBcdPage}
ELSE [stream, nextPage] ← InitFile[symbolMap, copiedPages];
EnumerateOldSegments[MoveOne];
IF symbolMap.fti = FTSelf THEN nextBcdPage ← nextPage ELSE IO.Close[stream];
};
FileForFti:
PROC [oldFti: BcdDefs.FTIndex]
RETURNS [f:
FS.OpenFile] = {
name: BcdDefs.NameRecord = bcd.ftb[oldFti].name;
ssd: ConvertUnsafe.SubString ← [
base: @bcd.ssb.string, offset: name, length: bcd.ssb.size[name]];
f ← nullFile;
f ← FS.Open[NormalizeFileName[ssd] ! FS.Error => TRUSTED {CONTINUE}];
RETURN};
NormalizeFileName:
PROC [in: ConvertUnsafe.SubString]
RETURNS[Rope.
ROPE] = {
dot: BOOL ← FALSE;
flat: Rope.Text ← Rope.Flatten[ConvertUnsafe.SubStringToRope[in]];
FOR i:
CARDINAL
IN [0..flat.length)
DO
IF flat.text[i] IN ['A..'Z] THEN flat.text[i] ← Ascii.Lower[flat.text[i]];
IF flat.text[i] = '. THEN dot ← TRUE;
ENDLOOP;
RETURN[IF dot THEN flat ELSE Rope.Concat[flat, ".bcd"]]};
Bcd Output Routines
bcdStream: IO.STREAM;
nextBcdPage: CARDINAL;
WriteSubTable:
PROC [selector: Table.Selector] = {
base: Table.Base;
size: CARDINAL;
[base, size] ← table.Bounds[selector];
bcdStream.UnsafePutBlock[[base, 0, size*bytesPerWord]]};
TableOut:
PROC = {
d, s: CARDINAL;
bcdPages, codePages, symbolPages: CARDINAL;
basePages: CARDINAL;
rtPageCount: CARDINAL;
saveNextPage: CARDINAL;
saveIndex: INT; -- FileStream.FileByteIndex
rtPageCount ← PagesForWords[BcdLiterals.LitSegSize[]];
BEGIN OPEN header;
IF firstdummy = 0 THEN firstdummy ← BcdUtilDefs.GetGfi[0];
d ← BCD.SIZE;
ssOffset ← d; d ← d + (ssLimit ← table.Bounds[sstype].size);
ctOffset ← d; d ← d + (s ← table.Bounds[cttype].size);
ctLimit ← LOOPHOLE[s];
mtOffset ← d; d ← d + (s ← table.Bounds[mttype].size);
mtLimit ← LOOPHOLE[s];
impOffset ← d; d ← d + (s ← table.Bounds[imptype].size);
impLimit ← LOOPHOLE[s];
expOffset ← d; d ← d + (s ← table.Bounds[exptype].size);
expLimit ← LOOPHOLE[s];
evOffset ← d; d ← d + (s ← table.Bounds[evtype].size);
evLimit ← LOOPHOLE[s, EVIndex];
sgOffset ← d; d ← d + (s ← table.Bounds[sgtype].size);
sgLimit ← LOOPHOLE[s];
ftOffset ← d; d ← d + (s ← table.Bounds[fttype].size);
ftLimit ← LOOPHOLE[s];
ntOffset ← d; d ← d + (s ← table.Bounds[nttype].size);
ntLimit ← LOOPHOLE[s];
typOffset ← d; d ← d + (s ← table.Bounds[typtype].size);
typLimit ← LOOPHOLE[s];
tmOffset ← d; d ← d + (s ← table.Bounds[tmtype].size);
tmLimit ← LOOPHOLE[s];
spOffset ← d; d ← d + (s ← table.Bounds[sptype].size);
spLimit ← LOOPHOLE[s];
fpOffset ← d; d ← d + (s ← table.Bounds[fptype].size);
fpLimit ← LOOPHOLE[s];
lfOffset ← d; d ← d + (s ← table.Bounds[lftype].size);
lfLimit ← LOOPHOLE[s];
rfOffset ← d; d ← d + (s ← table.Bounds[rftype].size);
rfLimit ← LOOPHOLE[s];
tfOffset ← d; d ← d + (s ← table.Bounds[tftype].size);
tfLimit ← LOOPHOLE[s];
basePages ← PagesForWords[d];
rtPages ← [relPageBase: basePages, pages: rtPageCount];
extended ← TRUE;
nPages ← bcdPages ← basePages + rtPageCount;
END;
[codePages, symbolPages] ← EstimateCopiedPages[];
IF data.copyCode
AND codeMap.fti = FTSelf
THEN
bcdPages ← bcdPages + codePages;
IF data.copySymbols
AND symbolMap.fti = FTSelf
THEN
bcdPages ← bcdPages + symbolPages;
OpenOutputFile[1 + bcdPages];
IF rtPageCount # 0
THEN {
MoveToPageBoundary[stream: bcdStream, page: basePages];
IF packing THEN BcdLiterals.UpdateSegments[ReadSgMap];
BcdLiterals.WriteLiterals[bcdStream];
saveIndex ← IO.GetIndex[bcdStream]};
saveNextPage ← nextBcdPage ← header.nPages + 1;
IF data.copyCode
THEN MoveCodeSegments[codePages
! UserAbort => {GO TO AbortRequested}];
IF data.copySymbols
THEN MoveSymbolSegments[symbolPages
! UserAbort => {GO TO AbortRequested}];
IF packing THEN FixAllSgis[];
IO.SetIndex[bcdStream, 0];
bcdStream.UnsafePutBlock[[LOOPHOLE[header, LONG POINTER TO PACKED ARRAY [0..0) OF CHAR], 0, BCD.SIZE*bytesPerWord]];
WriteSubTable[sstype];
WriteSubTable[cttype];
WriteSubTable[mttype];
WriteSubTable[imptype];
WriteSubTable[exptype];
WriteSubTable[evtype];
WriteSubTable[sgtype];
WriteSubTable[fttype];
WriteSubTable[nttype];
WriteSubTable[typtype];
WriteSubTable[tmtype];
WriteSubTable[sptype];
WriteSubTable[fptype];
WriteSubTable[lftype];
WriteSubTable[rftype];
WriteSubTable[tftype];
IF nextBcdPage # saveNextPage
THEN
MoveToPageBoundary[stream: bcdStream, page: (nextBcdPage-1)]
ELSE IF rtPageCount # 0 THEN IO.SetIndex[bcdStream, saveIndex];
data.nConfigs ← header.nConfigs; data.nModules ← header.nModules;
data.nImports ← header.nImports; data.nExports ← header.nExports;
data.nPages ← header.nPages;
EXITS
AbortRequested => data.aborted ← TRUE};
OpenOutputFile:
PROC [initialPages:
CARDINAL] =
INLINE {
file: FS.OpenFile ← data.outputFile;
IF file = FS.nullOpenFile THEN ERROR;
file.SetPageCount[initialPages];
bcdStream ← FS.StreamFromOpenFile[file, $write]};
CloseOutputFile: PROC = INLINE {IO.Close[bcdStream]; bcdStream ← NIL};
FSClose:
PROC [fh:
FS.OpenFile]
RETURNS [
FS.OpenFile] = {
IF fh #
FS.nullOpenFile
THEN
FS.Close[fh ! FS.Error => IF error.code = $invalidOpenFile THEN CONTINUE];
RETURN [FS.nullOpenFile];
};
}.