-- CIFSBcdWrite.mesa
-- Last edited by Satterthwaite on September 15, 1982 9:48 am
-- Last edited by Paul Rovner on April 12, 1983 8:35 pm	

DIRECTORY
  Alloc: TYPE USING [
    AddNotify, Bounds, DropNotify, Handle, Notifier, Top, Trim, Words],
  BcdComData: TYPE USING [
    aborted, binderVersion, codeName, copyCode, copySymbols,
    nConfigs, nExports, nImports, nModules, nPages, objectStamp, op,
    outputFile, sourceName, sourceVersion, symbolName, table,
    typeExported, textIndex, zone],
  BcdControlDefs: TYPE USING [],
  BcdDefs: TYPE USING [
    BCD, ControlItem, CTIndex, CTRecord, cttype, cxtype, EVIndex, evtype,
    EXPIndex, EXPRecord, exptype, FPIndex, FPRecord, fptype,
    FTIndex, FTNull, FTRecord, FTSelf, fttype,
    IMPIndex, IMPNull, imptype, LFNull, lftype, Link,
    MTIndex, MTNull, MTRecord, mttype, Namee, NameRecord, NTIndex, NTRecord, nttype,
    NullName, NullVersion, PackedString, rftype, SegClass, SGIndex, SGNull, SGRecord, sgtype,
    SpaceID, SPIndex, SPRecord, sptype, sstype, sttype, tftype, tmtype, 
    treetype, typtype, VersionID, VersionStamp],
  BcdErrorDefs: TYPE USING [ErrorHti, ErrorName, ErrorNameBase, GetSti],
  BcdLiterals: TYPE USING [
    EnterVersionFiles, LitSegSize, UpdateSegments, WriteLiterals],
  BcdOps: TYPE USING [BcdBase, NameString],
  BcdUtilDefs: TYPE USING [
    BcdBases, ContextForTree, EnterExport, EnterFile, EnterImport, EnterName, 
    GetGfi, MapName, MergeFile, NameForHti, SetFileVersion],
  CIFS: TYPE USING [OpenFile, Close, Error, GetFC, Open, create, read, replace, write],
  ConvertUnsafe: TYPE USING [ToRope],
  Environment: TYPE USING [bytesPerPage, bytesPerWord, wordsPerPage],
  File: TYPE USING [Capability, nullCapability, SetSize],
  FileSegment: TYPE USING [Pages, nullPages],
  FileStream: TYPE USING [FileByteIndex, Create, GetIndex, SetIndex],
--  ExecOps: TYPE USING [CheckForAbort],
  Inline: TYPE USING [LongCOPY, LongMult],
  OSMiscOps: TYPE USING [StampToTime],
  Space: TYPE USING [
    Create, Delete, Handle, Map, LongPointer, Unmap, virtualMemory],
  Stream: TYPE USING [Handle, Delete, PutBlock, PutWord],
  Strings: TYPE USING [
    String, SubString, SubStringDescriptor, AppendChar, AppendString],
  Symbols: TYPE USING [CXIndex, HTIndex, HTNull, STIndex, STNull],
  SymbolOps: TYPE USING [Reset],
  Table: TYPE USING [Base, Selector],
  Tree: TYPE USING [Index, Link, Scan, Null],
  TreeOps: TYPE USING [GetNode, ListLength, ScanList];

CIFSBcdWrite: PROGRAM
    IMPORTS
      Alloc, BcdErrorDefs, BcdLiterals, BcdUtilDefs, CIFS, ConvertUnsafe,
      --ExecOps,-- File, FileStream, Inline, OSMiscOps, Space, Stream, Strings,
      SymbolOps, TreeOps,
      data: BcdComData
    EXPORTS BcdControlDefs = {
  OPEN BcdDefs;

  bytesPerWord: CARDINAL = Environment.bytesPerWord;
  nullFile: CIFS.OpenFile = NIL;
  nullPages: FileSegment.Pages = FileSegment.nullPages;
    
  Alignment: CARDINAL = 4;  -- Code Segments must start at 0 MOD Alignment

  BcdWriteError: PUBLIC ERROR = CODE;
  Error: PROC = {ERROR BcdWriteError};
  UserAbort: ERROR = CODE;  -- raised on ↑DEL during code or symbol copying
  

  table: Alloc.Handle;
  tb, stb, ctb, mtb, lfb, etb, itb, sgb, tyb, tmb, ftb, ntb, spb, fpb, cxb: Table.Base;
  ssb: BcdOps.NameString;

  Notifier: Alloc.Notifier = {
    tb  ← base[treetype];  stb ← base[sttype];
    ctb ← base[cttype];    mtb ← base[mttype];  lfb ← base[lftype];
    tyb ← base[typtype];   tmb ← base[tmtype];
    etb ← base[exptype];   itb ← base[imptype];
    sgb ← base[sgtype];    ftb ← base[fttype];
    spb ← base[sptype];    fpb ← base[fptype];
    ntb ← base[nttype];    ssb ← base[sstype];
    cxb ← base[cxtype];
    IF bcd # NIL THEN {
      bcd.ctb ← ctb;  bcd.mtb ← mtb;
      IF ~packing THEN bcd.sgb ← sgb;
      bcd.tyb ← tyb; bcd.tmb ← tmb;  bcd.spb ← spb; bcd.fpb ← fpb}};


 -- inline utilities
 
  Copy: PROC [from: LONG POINTER, nwords: CARDINAL, to: LONG POINTER] =
    Inline.LongCOPY;
    
  Zero: PROC [p: LONG POINTER, l: CARDINAL] = INLINE {
    IF l # 0 THEN {p↑ ← 0; Inline.LongCOPY[from: p, to: (p+1), nwords: (l-1)]}};

  PagesForWords: PROC [nWords: CARDINAL] RETURNS [CARDINAL] = INLINE {
    RETURN [(nWords + Environment.wordsPerPage-1)/Environment.wordsPerPage]};
    
    
  bcd: LONG POINTER TO BcdUtilDefs.BcdBases ← NIL;

  header: BcdOps.BcdBase;  -- points to Bcd header and saved tables
  headerSpace: Space.Handle;

  WriteBcd: PUBLIC PROC [root: Tree.Link] = {
    saveIndex: CARDINAL = data.textIndex;
    node, subNode: Tree.Index;
    table ← data.table; table.AddNotify[Notifier];
    node ← TreeOps.GetNode[root];
    packing ← (tb[node].son[2] # Tree.Null AND data.copyCode);
    Initialize[];
    IF packing THEN {
      MakePackItem[tb[node].son[2]];
      data.textIndex ← saveIndex;
      FillInSgMap[]};
    CopyConfigs[];
    CopyModules[];
    CopyTypes[];
    CopySpaces[];  CopyFramePacks[];
    subNode ← TreeOps.GetNode[tb[node].son[3]];
    data.textIndex ← tb[subNode].info;
    TreeOps.ScanList[tb[subNode].son[1], CopyImport];
    TreeOps.ScanList[tb[subNode].son[2], CopyExport];
    IF tb[subNode].attr2 THEN  -- EXPORTS ALL
      ExportCx[BcdUtilDefs.ContextForTree[tb[subNode].son[4]]];
    IF data.copySymbols THEN EnterMissingSymbolFiles[];
    BcdLiterals.EnterVersionFiles[bcd.ftb, FtiForIndex[fileMap.length], MapFile];
    TableOut[];
    CloseOutputFile[];
    Finalize[];  data.textIndex ← saveIndex;
    table.DropNotify[Notifier];  table ← NIL};


  Initialize: PROC = {
    impSize, expSize, sgSize, fSize, nSize, ssSize: CARDINAL;
    nSgis: CARDINAL;
    b: Table.Base;
    desc: Strings.SubStringDescriptor ←
      [base: data.sourceName, offset: 0, length: data.sourceName.length];
    IF data.copyCode OR data.copySymbols THEN InitCodeSymbolCopy[];
    impSize ← table.Bounds[imptype].size;
    expSize ← table.Bounds[exptype].size;
    sgSize ← table.Bounds[sgtype].size;
    nSgis ← sgSize/SGRecord.SIZE;
    IF ~packing THEN sgSize ← 0;
    fSize ← table.Bounds[fttype].size;
    nSize ← table.Bounds[nttype].size;
    ssSize ← table.Bounds[sstype].size;
    bcd ← (data.zone).NEW[BcdUtilDefs.BcdBases];
    fileMap ← (data.zone).NEW[FileMap[fSize/FTRecord.SIZE]];
    FOR i: CARDINAL IN [0..fileMap.length) DO fileMap[i] ← FTNull ENDLOOP;
    headerSpace ← Space.Create[ 
      size: PagesForWords[
        BCD.SIZE + impSize + expSize + sgSize + fSize + nSize + ssSize],
      parent: Space.virtualMemory];
    headerSpace.Map[];
    header ← LOOPHOLE[headerSpace.LongPointer, BcdOps.BcdBase];
    b ← (LOOPHOLE[header, Table.Base] + BCD.SIZE);
    Copy[to: (bcd.etb ← b), from: etb, nwords: expSize];
      b ← b + expSize;  table.Trim[exptype,0];
    Copy[to: (bcd.itb ← b), from: itb, nwords: impSize];
      b ← b + impSize;  table.Trim[imptype,0];
    Copy[to: (bcd.ftb ← b), from: ftb, nwords: fSize];
      b ← b + fSize;  table.Trim[fttype,0];
    Copy[to: (bcd.ntb ← b), from: ntb, nwords: nSize];
      b ← b + nSize;  table.Trim[nttype,0];
    Copy[to: (bcd.ssb ← b), from: ssb, nwords: ssSize];
      b ← b + ssSize;
    SymbolOps.Reset[];
    IF packing THEN {  -- save old segment table in heap
      Copy[to: (bcd.sgb ← b), from: sgb, nwords: sgSize];
      b ← b + sgSize;  table.Trim[sgtype,0]};
    InitHeader[
      header: header, 
      objectVersion: OSMiscOps.StampToTime[data.objectStamp], 
      source: BcdUtilDefs.EnterName[@desc], 
      sourceVersion: data.sourceVersion];
    bcd.ctb ← table.Bounds[cttype].base;
    bcd.mtb ← table.Bounds[mttype].base;
    bcd.tyb ← table.Bounds[typtype].base;
    bcd.tmb ← table.Bounds[tmtype].base;
    bcd.spb ← table.Bounds[sptype].base;
    bcd.fpb ← table.Bounds[fptype].base;
    IF data.copyCode OR data.copySymbols THEN {MapCodeSymbolFiles[]; InitCopyMap[nSgis]};
    IF packing THEN InitSgMap[nSgis]
    ELSE {
      bcd.sgb ← table.Bounds[sgtype].base;
      IF ~data.copyCode THEN MapSegments[code];
      IF ~data.copySymbols THEN MapSegments[symbols]}};

  Finalize: PROC = {
    IF data.copyCode OR data.copySymbols THEN ReleaseCodeSymbolCopy[];
    (data.zone).FREE[@fileMap];  (data.zone).FREE[@bcd];
    Space.Delete[headerSpace];
    FreePackItems[];
    IF packing THEN FreeSgMap[];
    IF data.copyCode OR data.copySymbols THEN FreeCopyMap[]};


 -- BCD (re)construction
 
 
  CopyName: PROC [olditem, newitem: Namee] = {
    newNti: NTIndex = table.Words[nttype, NTRecord.SIZE];
    FOR nti: NTIndex ← NTIndex.FIRST, nti+NTRecord.SIZE DO
      OPEN old: bcd.ntb[nti];
      IF old.item = olditem THEN {
        OPEN new: ntb[newNti];
        new.item ← newitem; new.name ← bcd.MapName[old.name];
        RETURN};
      ENDLOOP};


  CopyConfigs: PROC = {
    -- configs are already copied, only map names and files
    cti: CTIndex ← CTIndex.FIRST;
    ctLimit: CTIndex = table.Top[cttype];
    UNTIL cti = ctLimit DO
      header.nConfigs ← header.nConfigs + 1;
      ctb[cti].name ← bcd.MapName[ctb[cti].name];
      ctb[cti].file ← MapFile[ctb[cti].file];
      IF ctb[cti].namedInstance THEN CopyName[[config[cti]], [config[cti]]];
      cti ← cti + (CTRecord.SIZE + ctb[cti].nControls*ControlItem.SIZE);
      ENDLOOP};


  CopyModules: PROC = {
    -- modules are already copied, only map names and files

    MapOne: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN m: mtb[mti];
      header.nModules ← header.nModules + 1;
      m.name ← bcd.MapName[m.name];
      m.file ← MapFile[m.file];
      IF m.namedInstance THEN CopyName[[module[mti]], [module[mti]]]};

    EnumerateModules[MapOne]};

  EnumerateModules: PROC [p: PROC [MTIndex] RETURNS [BOOL]] = {
    mti: MTIndex ← MTIndex.FIRST;
    mtLimit: MTIndex = table.Top[mttype];
    UNTIL mti = mtLimit DO
      IF p[mti] THEN EXIT;
      mti ← mti + (WITH m: mtb[mti] SELECT FROM
	      direct => MTRecord.direct.SIZE + m.length*Link.SIZE,
	      indirect => MTRecord.indirect.SIZE,
	      multiple => MTRecord.multiple.SIZE,
	      ENDCASE => ERROR);
      ENDLOOP};


  CopyTypes: PROC = {};  -- types are already copied, nothing need be done (current typeIds)


  CopySpaces: PROC = {
    -- spaces are already copied, only map names (and segments?)

    MapOne: PROC [spi: SPIndex] RETURNS [BOOL ← FALSE] = {
      FOR i: CARDINAL IN [0..spb[spi].length) DO
	spb[spi].spaces[i].name ← bcd.MapName[spb[spi].spaces[i].name];
	ENDLOOP};

    EnumerateSpaces[MapOne]};

  EnumerateSpaces: PROC [p: PROC [SPIndex] RETURNS [BOOL]] = {
    spi: SPIndex ← SPIndex.FIRST; 
    spLimit: SPIndex = table.Top[sptype];
    UNTIL spi = spLimit DO
      IF p[spi] THEN EXIT;
      spi ← spi + SPRecord.SIZE + spb[spi].length*SpaceID.SIZE;
      ENDLOOP};


  CopyFramePacks: PROC = {
    -- framepacks are already copied, only map names

    MapOne: PROC [fpi: FPIndex] RETURNS [BOOL ← FALSE] = {
      fpb[fpi].name ← bcd.MapName[fpb[fpi].name]};

    EnumerateFramePacks[MapOne]};

  EnumerateFramePacks: PROC [p: PROC [FPIndex] RETURNS [BOOL]] = {
    fpi: FPIndex ← FPIndex.FIRST;
    fpLimit: FPIndex = table.Top[fptype];
    UNTIL fpi = fpLimit DO
      IF p[fpi] THEN RETURN;
      fpi ← fpi + FPRecord.SIZE + fpb[fpi].length*MTIndex.SIZE;
      ENDLOOP};


  CopyImport: Tree.Scan = {
    OPEN Symbols;
    sti: STIndex ← STNull;
    olditi, iti: IMPIndex;
    WITH t SELECT FROM
      symbol => sti ← index;
      subtree =>
        WITH s1:tb[index].son[1] SELECT FROM
          symbol => sti ← s1.index;
          ENDCASE => Error[];
      ENDCASE => Error[];
    olditi ← stb[sti].impi;
    IF sti = STNull OR olditi = IMPNull THEN RETURN;
    iti ← bcd.EnterImport[olditi, TRUE];
    itb[iti].file ← MapFile[itb[iti].file];
    IF header.firstdummy = 0 THEN header.firstdummy ← itb[iti].gfi;
    header.nImports ← header.nImports + 1;
    header.nDummies ← header.nDummies + itb[iti].ngfi};

  CopyExport: Tree.Scan = {
    OPEN Symbols;
    sti: STIndex ← STNull;
    hti: HTIndex ← HTNull;
    neweti: EXPIndex;
    oldeti: EXPIndex;
    WITH t SELECT FROM
      symbol => sti ← index;
      subtree =>
	WITH s1:tb[index].son[1] SELECT FROM
	  symbol => {sti ← s1.index; hti ← stb[sti].hti};
	  ENDCASE => Error[];
      ENDCASE => Error[];
    WITH s:stb[sti] SELECT FROM
      external => 
        WITH m:s.map SELECT FROM
          interface => {
            OPEN new: etb[neweti];
            oldeti ← m.expi;
            neweti ← bcd.EnterExport[oldeti, TRUE];
            Copy[from: @bcd.etb[oldeti].links, to: @new.links, nwords: new.size];
            new.file ← MapFile[new.file]};
          module => [] ← NewExportForModule[m.mti, HTNull];
          ENDCASE => RETURN;
      ENDCASE => RETURN;
    header.nExports ← header.nExports + 1};

  NewExportForModule: PROC [mti: MTIndex, name: Symbols.HTIndex]
      RETURNS [eti: EXPIndex] = {
    OPEN Symbols;
    eti ← table.Words[exptype, EXPRecord.SIZE+1*Link.SIZE];
    etb[eti] ← [
      name: mtb[mti].name,
      size: 1,
      port: module,
      namedInstance: name # HTNull,
      typeExported: FALSE,
      file: mtb[mti].file,
      links: ];
    etb[eti].links[0] ← [variable[vgfi: mtb[mti].gfi, var: 0, vtag: var]];
    IF name # HTNull THEN {
      nti: NTIndex = table.Words[nttype, NTRecord.SIZE];
      ntb[nti] ← [name: BcdUtilDefs.NameForHti[name], item: [module[mti]]]};
    RETURN};

  ExportCx: PROC [cx: Symbols.CXIndex] = {
    OPEN Symbols;
    neweti, oldeti: EXPIndex;
    FOR sti: STIndex ← cxb[cx].link, stb[sti].link UNTIL sti = STNull DO {
      IF ~stb[sti].filename THEN 
	WITH s: stb[sti] SELECT FROM
	  external =>
	    WITH m: s.map SELECT FROM
	      interface => {  
		OPEN old: bcd.etb[oldeti], new: etb[neweti];
		-- first make sure that old is not already exported
		existingEti: EXPIndex ← EXPIndex.FIRST;
		etLimit: EXPIndex = table.Top[exptype];
		oldeti ← m.expi;
		UNTIL existingEti = etLimit DO
		  IF old = etb[existingEti] THEN GO TO AlreadyExported;
		  existingEti ← existingEti + EXPRecord.SIZE+etb[existingEti].size;
		  ENDLOOP;  
		neweti ← bcd.EnterExport[oldeti, TRUE];
		Copy[from: @old.links, to: @new.links, nwords: new.size];
		new.file ← MapFile[new.file];
		header.nExports ← header.nExports + 1};
	      ENDCASE;
	  ENDCASE;
      EXITS
	AlreadyExported => NULL}; 
      ENDLOOP};


 -- file mapping
 
  FileMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF FTIndex];
  fileMap: LONG POINTER TO FileMap ← NIL;

  IndexForFti: PROC [fti: FTIndex] RETURNS [CARDINAL] = INLINE {
    RETURN [LOOPHOLE[fti,CARDINAL]/FTRecord.SIZE]};

  FtiForIndex: PROC [n: CARDINAL] RETURNS [FTIndex] = INLINE {
    RETURN [FTIndex.FIRST + n*FTRecord.SIZE]};

  MapFile: PROC [fti: FTIndex] RETURNS [FTIndex] = {
    SELECT fti FROM
      FTNull, FTSelf => RETURN [fti];
      ENDCASE => {
	fileIndex: CARDINAL = IndexForFti[fti];
	IF fileMap[fileIndex] = FTNull THEN fileMap[fileIndex] ← bcd.MergeFile[fti];
	RETURN [fileMap[fileIndex]]}};

 -- header processing
 
   InitHeader: PROC [
      header: BcdOps.BcdBase,
      objectVersion: VersionStamp,
      source: NameRecord ← NullName,
      sourceVersion: VersionStamp ← NullVersion] = {
    Zero[header, BcdDefs.BCD.SIZE];
    header.versionIdent ← BcdDefs.VersionID;
    header.version ← objectVersion;
    header.creator ← data.binderVersion;
    header.definitions ← (data.op = conc);
    header.typeExported ← data.typeExported;
    header.source ← source;  header.sourceVersion ← sourceVersion;
    header.repackaged ← 
      table.Bounds[sptype].size # 0 OR table.Bounds[fptype].size # 0;
    header.tableCompiled ← FALSE;
    header.spare1 ← TRUE};

    
  codeMap, symbolMap: REF Map ← NIL;

  Map: TYPE = RECORD [
    fti: FTIndex,
    type: SegClass,
    filename: Strings.String,
    filehandle: CIFS.OpenFile];

  InitCodeSymbolCopy: PROC = {

    Setup: PROC [file: Strings.String, type: SegClass] RETURNS [REF Map] = {
      RETURN [--(data.zone).--NEW[Map ← [
	type: type,
	filename: file,
	filehandle: nullFile,
	fti: IF file = NIL THEN FTSelf ELSE BcdUtilDefs.EnterFile[file]]]]};

    IF data.copyCode THEN codeMap ← Setup[data.codeName, code];
    IF data.copySymbols THEN symbolMap ← Setup[data.symbolName, symbols]};

  MapCodeSymbolFiles: PROC = {
    IF data.copyCode THEN codeMap.fti ← MapFile[codeMap.fti];
    IF data.copySymbols THEN symbolMap.fti ← MapFile[symbolMap.fti]};

  ReleaseCodeSymbolCopy: PROC = {

    Release: PROC [p: REF Map] = {
      IF p.filehandle # nullFile THEN {
        CIFS.Close[p.filehandle];  p.filehandle ← nullFile}};
	
    IF codeMap # NIL THEN {Release[codeMap]; --(data.zone).--FREE[@codeMap]};
    IF symbolMap # NIL THEN {Release[symbolMap]; --(data.zone).--FREE[@symbolMap]}};


  EnumerateSegments: PROC [proc: PROC[SGIndex]] = {
    sgLimit: SGIndex = table.Top[sgtype];
    FOR sgi: SGIndex ← SGIndex.FIRST, sgi + SGRecord.SIZE UNTIL sgi = sgLimit DO 
      proc[sgi] ENDLOOP};

  EnumerateOldSegments: PROC [proc: PROC[SGIndex]] = {
    IF ~packing THEN EnumerateSegments[proc]
    ELSE FOR i: NAT IN [0..sgMap.length) DO proc[SgiForIndex[i]] ENDLOOP};

  MapSegments: PROC [type: SegClass] = {
    CopySegment: PROC [sgi: SGIndex] = {
      IF sgb[sgi].class = type THEN 
        sgb[sgi].file ← MapFile[sgb[sgi].file]};
    EnumerateSegments[CopySegment]};


  InitFile: PROC [p: REF Map, copiedPages: CARDINAL]
      RETURNS [stream: Stream.Handle, page: CARDINAL] = {
    lh: BcdDefs.BCD;
    bcdPages: CARDINAL = PagesForWords[BcdDefs.BCD.SIZE];
    file: File.Capability;
    version: VersionStamp = BumpVersion[
      OSMiscOps.StampToTime[data.objectStamp], (IF p.type=code THEN 1 ELSE 2)];
    BcdUtilDefs.SetFileVersion[p.fti, version];
    p.filehandle ← CIFS.Open[
	ConvertUnsafe.ToRope[p.filename], CIFS.create+CIFS.replace+CIFS.write];
    file ← CIFS.GetFC[p.filehandle];
    file.SetSize[1 + bcdPages + copiedPages];
    stream ← FileStream.Create[file];
    InitHeader[header: @lh, objectVersion: version];
    lh.version ← ftb[p.fti].version;
    stream.PutBlock[[@lh, 0, BcdDefs.BCD.SIZE*bytesPerWord]];
    page ← 1 + bcdPages};

  BumpVersion: PROC [v: VersionStamp, n: CARDINAL] RETURNS [VersionStamp] = {
    v.time ← v.time + n;  RETURN [v]};

  MoveToPageBoundary: PROC [stream: Stream.Handle, page: CARDINAL] = {
    FileStream.SetIndex[stream, Inline.LongMult[page, Environment.bytesPerPage]]};


 -- Code Packing

  PackHandle: TYPE = LONG POINTER TO PackItem;

  PackItem: TYPE = RECORD [
    link: PackHandle,
    newsgi: SGIndex,  -- in the new table
    item: SEQUENCE count: CARDINAL OF MTIndex];

  packing: BOOL;
  phHead, phTail: PackHandle ← NIL;


  MakePackItem: Tree.Scan = {
    -- t is Tree.Null, a list of ids, or a list of lists of ids
    ph: PackHandle ← NIL;
    i, nsons: CARDINAL;

    Item: Tree.Scan = {
      itemId: Symbols.HTIndex;
      WITH t SELECT FROM
	symbol => {
	  itemId ← stb[index].hti;
	  WITH stb[index] SELECT FROM
	    external =>
	      WITH m: map SELECT FROM
		module => {
		  ph.item[i] ← m.mti;
		  SELECT TRUE FROM
		    ~mtb[m.mti].packageable =>
			BcdErrorDefs.ErrorNameBase[
				error, "is packaged and cannot be PACKed"L,
				mtb[m.mti].name, bcd.ssb];
		    (ReadSgMap[mtb[m.mti].code.sgi] # SGNull) =>
			BcdErrorDefs.ErrorNameBase[
				error, "cannot be PACKed twice"L,
				mtb[m.mti].name, bcd.ssb];
		    ENDCASE => SetSgMap[old: mtb[m.mti].code.sgi, new: ph.newsgi]};
		ENDCASE => GOTO cant;
	    ENDCASE => GOTO cant;
	  EXITS
	    cant => 
		BcdErrorDefs.ErrorHti[error, "cannot be PACKed"L, itemId
		  ! BcdErrorDefs.GetSti => {RESUME [errorSti: Symbols.STNull]}]};
        ENDCASE;
      i ← i+1};

    IF t = Tree.Null THEN RETURN;
    WITH t SELECT FROM
      subtree => {
	OPEN tt: tb[index];
	IF tt.name # list THEN Error[];
	data.textIndex ← tt.info;
	IF tt.son[1].tag = subtree THEN {TreeOps.ScanList[t,MakePackItem]; RETURN}};
      ENDCASE;
    nsons ← TreeOps.ListLength[t];
    ph ← (data.zone).NEW[PackItem[nsons] ← [ 
           link: NIL, newsgi: table.Words[sgtype, SGRecord.SIZE], item:]];
    FOR j: CARDINAL IN [0..nsons) DO ph.item[j] ← MTNull ENDLOOP;
    sgb[ph.newsgi] ← [class: code, file: codeMap.fti, base:0, pages:0, extraPages:0];
    i ← 0;  TreeOps.ScanList[t, Item];
    IF phTail = NIL THEN phHead ← phTail ← ph
		    ELSE {phTail.link ← ph; phTail ← ph}};

  FreePackItems: PROC = {
    next: PackHandle ← phHead;
    UNTIL next = NIL DO
      p: PackHandle ← next;
      next ← next.link; (data.zone).FREE[@p]
      ENDLOOP;
    phHead ← phTail ← NIL};


  WriteFromPages: PROC [
      stream: Stream.Handle, pages: FileSegment.Pages, words: CARDINAL] = {
    bufferPages: CARDINAL = MIN[pages.span.pages, 16];
    bufferSpace: Space.Handle ← Space.Create[size: bufferPages, parent: Space.virtualMemory];
    base: CARDINAL ← pages.span.base;
    WHILE words # 0 DO
      wordsToTransfer: CARDINAL = MIN[words, bufferPages*Environment.wordsPerPage];
      pagesToTransfer: CARDINAL = PagesForWords[wordsToTransfer];
      bufferSpace.Map[[file: pages.file, base: base]];
      stream.PutBlock[[bufferSpace.LongPointer, 0, wordsToTransfer*bytesPerWord]];
      base ← (base + pagesToTransfer);
      words ← (words - wordsToTransfer);
      bufferSpace.Unmap[];
      ENDLOOP;
    Space.Delete[bufferSpace]};
    
      
  PackCodeSegments: PROC [out: Stream.Handle, startpage: CARDINAL]
      RETURNS [nextpage: CARDINAL] = {
    offset, validlength: CARDINAL;
    oldsgi: SGIndex;
    file: CIFS.OpenFile;
    pages: FileSegment.Pages;

    FixUpOneModule: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN module: mtb[mti];
      length: CARDINAL;
      IF module.code.sgi = oldsgi THEN {
	length ← module.code.offset + module.code.length/2;
	module.code.offset ← module.code.offset + offset;
	module.code.packed ← TRUE;
	IF length > validlength THEN validlength ← length};
      RETURN};

    nextpage ← startpage;
    FOR ph: PackHandle ← phHead, ph.link UNTIL ph = NIL DO
      MoveToPageBoundary[stream: out, page: (nextpage-1)];
      offset ← 0;
      sgb[ph.newsgi].base ← nextpage;
      FOR pi: CARDINAL IN [0..ph.count) DO {
	mti: MTIndex = ph.item[pi];
	IF mtb[mti].linkLoc = code AND ~mtb[mti].code.linkspace THEN {
	  offset ← (AddLinksToCodeSegment[out, mti, offset, TRUE] + offset);
	  GOTO ignore};
        oldsgi ← mtb[mti].code.sgi;
        [file, pages] ← SegmentForOldCodeSgi[oldsgi];
	IF file = nullFile THEN GOTO ignore;
	IF (offset MOD Alignment) # 0 THEN
	  FOR i: CARDINAL IN [(offset MOD Alignment)..Alignment) DO
	    out.PutWord[0];  offset ← offset + 1 
	    ENDLOOP;
	validlength ← 0;  EnumerateModules[FixUpOneModule];
        WriteFromPages[stream: out, pages: pages, words: validlength];
        offset ← offset + validlength;
        CIFS.Close[file];
	EXITS
	  ignore => NULL};
        ENDLOOP;
      sgb[ph.newsgi].pages ← PagesForWords[offset]; 
      nextpage ← nextpage + sgb[ph.newsgi].pages;
      ENDLOOP;
    RETURN};

  SegmentForOldCodeSgi: PROC [sgi: SGIndex]
      RETURNS [f: CIFS.OpenFile ← nullFile, s: FileSegment.Pages ← nullPages] = {
    OPEN seg: bcd.sgb[sgi];
    IF Copied[sgi] OR seg.file = FTNull THEN RETURN;
    f ← FileForFti[seg.file];
    IF f = nullFile THEN 
      BcdErrorDefs.ErrorNameBase[
	class: error, s: "could not be opened to copy code"L, 
	name: bcd.ftb[seg.file].name, base: bcd.ssb]
    ELSE {
      s ← [
	file: CIFS.GetFC[f],
	span: [base: seg.base, pages: (seg.pages + seg.extraPages)]];
      IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN {
	BcdErrorDefs.ErrorNameBase[
	  class: error, s: "on disk has an incorrect version"L,
      	  name: bcd.ftb[seg.file].name, base: bcd.ssb];
        CIFS.Close[f];
	f ← nullFile;  s ← nullPages}};
    IF s.file = File.nullCapability THEN header.versionIdent ← 0;
    SetCopied[sgi]};

  WrongOldSegVersion: PROC [s: FileSegment.Pages, version: BcdDefs.VersionStamp] 
        RETURNS [reply: BOOL] = {
    h: BcdOps.BcdBase;
    headerSpace: Space.Handle ← Space.Create[size: 1, parent: Space.virtualMemory];
    headerSpace.Map[[file: s.file, base: 1]];
    h ← headerSpace.LongPointer;
    reply ← (h.version # version);
    Space.Delete[headerSpace];
    RETURN [reply]};


 -- Segment Mapping

  SGMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF SGIndex];
  CopyMap: TYPE = RECORD [SEQUENCE length: CARDINAL OF BOOL];
  
  sgMap:   LONG POINTER TO SGMap ← NIL;
  copyMap: LONG POINTER TO CopyMap ← NIL;

  IndexForSgi: PROC [sgi: SGIndex] RETURNS [CARDINAL] = INLINE {
    RETURN [LOOPHOLE[sgi,CARDINAL]/SGRecord.SIZE]};

  SgiForIndex: PROC [i: CARDINAL] RETURNS [SGIndex] = INLINE {
    RETURN [SGIndex.FIRST + i*SGRecord.SIZE]};

  InitCopyMap: PROC [nsgis: CARDINAL] = {
    copyMap ← (data.zone).NEW[CopyMap[nsgis]];
    FOR i: CARDINAL IN [0..nsgis) DO copyMap[i] ← FALSE ENDLOOP};

  FreeCopyMap: PROC = {
    IF copyMap # NIL THEN (data.zone).FREE[@copyMap]};

  SetCopied: PROC [sgi: SGIndex] = {
    copyMap[IndexForSgi[sgi]] ← TRUE};

  Copied: PROC [sgi: SGIndex] RETURNS [BOOL] = {
    RETURN [copyMap[IndexForSgi[sgi]]]};


  InitSgMap: PROC [nsgis: CARDINAL] = {
    sgMap ← (data.zone).NEW[SGMap[nsgis]];
    FOR i: CARDINAL IN [0..nsgis) DO sgMap[i] ← BcdDefs.SGNull ENDLOOP};

  FreeSgMap: PROC = {IF sgMap # NIL THEN (data.zone).FREE[@sgMap]};

  SetSgMap: PROC [old, new: SGIndex] = {
    IF (packing AND old # SGNull) THEN sgMap[IndexForSgi[old]] ← new};

  ReadSgMap: PROC [old: SGIndex] RETURNS [SGIndex] = {
    RETURN [IF (~packing OR old = SGNull) THEN old ELSE sgMap[IndexForSgi[old]]]};


  FillInSgMap: PROC = {
    -- called only when packing (i.e. packing requested AND copyCode = TRUE)
    FOR i: CARDINAL IN [0..sgMap.length) DO
      IF sgMap[i] = SGNull THEN {
	oldsgi: SGIndex = SgiForIndex[i];
	newsgi: SGIndex = table.Words[sgtype, SGRecord.SIZE];
	sgb[newsgi] ← bcd.sgb[oldsgi];
	sgb[newsgi].file ←
	  (IF sgb[newsgi].class = symbols THEN 
             (IF data.copySymbols THEN symbolMap.fti
	      ELSE MapFile[sgb[newsgi].file])
	   ELSE codeMap.fti);
	sgMap[i] ← newsgi};
      ENDLOOP};

  FixAllSgis: PROC = {
    -- replace all sgis with ReadSgMap[sgi]

    FixModule: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN m: mtb[mti];
      m.code.sgi ← ReadSgMap[m.code.sgi];
      m.sseg ← ReadSgMap[m.sseg]};

    FixSpace: PROC [spi: SPIndex] RETURNS [BOOL ← FALSE] = {
      OPEN sp: spb[spi];
      sp.seg ← ReadSgMap[sp.seg]};

    EnumerateModules[FixModule];
    EnumerateSpaces[FixSpace]};


 -- Code Links

  LinkCount: PROC [mti: MTIndex] RETURNS [CARDINAL] = INLINE {
    RETURN [WITH m: mtb[mti] SELECT FROM
      direct => m.length,
      indirect => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length,
      multiple => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length,
      ENDCASE => ERROR]};
      
  AlignOffset: PROC [offset: CARDINAL] RETURNS [CARDINAL] = INLINE {
    RETURN [((offset + (Alignment-1))/Alignment)*Alignment]};
    
    
  AddLinksToCodeSegment: PROC [
        stream: Stream.Handle, mti: MTIndex, offset: CARDINAL, packed: BOOL]
      RETURNS [CARDINAL] = {
    sgi: SGIndex = mtb[mti].code.sgi;
    codeLength: CARDINAL = mtb[mti].code.length/2;
    linkSpace: CARDINAL;
    f: CIFS.OpenFile;
    s: FileSegment.Pages;
    prefixWords: CARDINAL ← 0;

    FixOffset: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN c: mtb[mti].code;
      IF c.sgi = sgi THEN { 
        c.linkspace ← TRUE; c.offset ← c.offset+offset; c.packed ← packed}};

    [f, s] ← SegmentForOldCodeSgi[sgi];
    IF f = nullFile THEN RETURN [0];
    linkSpace ← LinkCount[mti];
    IF offset = 0 AND linkSpace # 0 THEN {
      prefixWords ← 1;
      stream.PutWord[linkSpace + Alignment - (linkSpace MOD Alignment)];
      offset ← offset+1};
    IF (offset+linkSpace) MOD Alignment # 0 THEN
      linkSpace ← linkSpace + Alignment - ((offset+linkSpace) MOD Alignment);
    offset ← offset + linkSpace;
    EnumerateModules[FixOffset];
    FOR i: CARDINAL IN [0..linkSpace) DO stream.PutWord[0] ENDLOOP;
    WriteFromPages[stream: stream, pages: s, words: codeLength];
    CIFS.Close[f];
    RETURN [prefixWords + linkSpace + codeLength]};


 -- code and symbol copying
 
  EstimateCopiedPages: PROC RETURNS [codePages, symbolPages: CARDINAL ← 0] = {
    -- estimates ignore possible packing of code
    packaged: BOOL ← FALSE;
    
    AddModule: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN m: mtb[mti];
      IF data.copyCode AND m.code.sgi # SGNull THEN {
        OPEN seg: bcd.sgb[m.code.sgi];
        IF ~m.packageable THEN packaged ← TRUE
        ELSE {
          IF m.linkLoc = code AND ~m.code.linkspace THEN {
	    nLinks: CARDINAL = LinkCount[mti];
	    offset: CARDINAL = AlignOffset[IF nLinks=0 THEN 0 ELSE 1+nLinks];
	    codePages ← codePages + PagesForWords[offset + m.code.length/2]}
	  ELSE codePages ← codePages + seg.pages;
	codePages ← codePages + seg.extraPages}};
      IF data.copySymbols AND m.sseg # SGNull THEN {
        OPEN seg: bcd.sgb[m.sseg];
        symbolPages ← symbolPages + seg.pages + seg.extraPages};
      RETURN};
      
    AddSegment: PROC [oldSgi: SGIndex] = {
      OPEN seg: bcd.sgb[oldSgi];
      IF seg.class = code THEN {
        package: BOOL ← FALSE;
        
        TestModule: PROC [mti: MTIndex] RETURNS [BOOL] = {
          OPEN m: mtb[mti];
          IF ~m.packageable AND m.code.sgi = oldSgi THEN package ← TRUE;
          RETURN [m.code.sgi = oldSgi]};
          
        EnumerateModules[TestModule];
        IF package THEN codePages ← codePages + seg.pages + seg.extraPages}};
        
    IF data.copyCode OR data.copySymbols THEN
      EnumerateModules[AddModule];
    IF data.copyCode AND packaged THEN
      EnumerateOldSegments[AddSegment];
    RETURN};
    
    
  MoveCodeSegments: PROC [copiedPages: CARDINAL] = {
    stream: Stream.Handle;
    nextPage: CARDINAL;

    AddLinks: PROC [mti: MTIndex] RETURNS [BOOL ← FALSE] = {
      OPEN m: mtb[mti];
      wordsWritten, pagesWritten: CARDINAL;
      newSgi: SGIndex;
--      IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
      IF m.linkLoc = code AND ~m.code.linkspace AND m.packageable THEN {
	IF m.code.packed THEN BcdErrorDefs.ErrorName[
	  error, "was previously PACKed and can not now have code links added"L, m.name]
	ELSE {
  	  MoveToPageBoundary[stream: stream, page: (nextPage-1)];
          wordsWritten ← AddLinksToCodeSegment[
  	    stream: stream, mti: mti, offset: 0, packed: FALSE];
          pagesWritten ← PagesForWords[wordsWritten];
          newSgi ← ReadSgMap[m.code.sgi];
          sgb[newSgi].file ← codeMap.fti;
          sgb[newSgi].base ← nextPage;
          sgb[newSgi].pages ← pagesWritten;
          nextPage ← nextPage + pagesWritten}}};

    MoveOne: PROC [oldSgi: SGIndex] = { 
      OPEN seg: bcd.sgb[oldSgi];
--      IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
      IF seg.class = code THEN {
	f: CIFS.OpenFile;
	s: FileSegment.Pages;
        [f, s] ← SegmentForOldCodeSgi[oldSgi];
	IF f # nullFile THEN {
	  segPages: CARDINAL = s.span.pages;
	  newSgi: SGIndex = ReadSgMap[oldSgi];
	  sgb[newSgi].file ← codeMap.fti;
	  sgb[newSgi].base ← nextPage;
	  MoveToPageBoundary[stream: stream, page: (nextPage-1)];
	  WriteFromPages[
	    stream: stream, pages: s, 
            words: (segPages * Environment.wordsPerPage)];
          nextPage ← nextPage + segPages;
	  CIFS.Close[f]}}};

    IF codeMap.fti = FTSelf THEN {stream ← bcdStream; nextPage ← nextBcdPage}
    ELSE [stream, nextPage] ← InitFile[codeMap, copiedPages];
    nextPage ← PackCodeSegments[stream, nextPage];
    EnumerateModules[AddLinks];
    EnumerateOldSegments[MoveOne];
    IF codeMap.fti = FTSelf THEN nextBcdPage ← nextPage
    ELSE Stream.Delete[stream]};


  EnterMissingSymbolFiles: PROC = {

    CheckOneSymbolsFileSeg: PROC [oldSgi: SGIndex] = { 
      OPEN seg: bcd.sgb[oldSgi];
      IF (seg.class = symbols) AND ~Copied[oldSgi] AND (seg.file # FTNull) THEN
	-- insure that a file entry exists for this file 
	[] ← MapFile[bcd.sgb[oldSgi].file]};

    EnumerateOldSegments[CheckOneSymbolsFileSeg]};

  MoveSymbolSegments: PROC [copiedPages: CARDINAL] = {
    stream: Stream.Handle;
    nextPage: CARDINAL;

    MoveOne: PROC [oldSgi: SGIndex] = { 
      OPEN seg: bcd.sgb[oldSgi];
      f: CIFS.OpenFile ← nullFile;
      newSgi: SGIndex;
--      IF ExecOps.CheckForAbort[] THEN ERROR UserAbort;
      IF (seg.class # symbols) OR Copied[oldSgi] OR (seg.file = FTNull) THEN RETURN;
      newSgi ← ReadSgMap[oldSgi];
      f ← FileForFti[seg.file];
      IF f = nullFile THEN {
        BcdErrorDefs.ErrorNameBase[
	  class: warning, s: "could not be opened to copy symbols"L, 
	  name: bcd.ftb[seg.file].name, base: bcd.ssb];
        sgb[newSgi] ← bcd.sgb[oldSgi];
        sgb[newSgi].file ← MapFile[bcd.sgb[oldSgi].file]}
      ELSE {
        s: FileSegment.Pages = [
	  file: CIFS.GetFC[f],
	  span: [base: seg.base, pages: (seg.pages + seg.extraPages)]];
        IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN {
	  BcdErrorDefs.ErrorNameBase[
	    class: error, s: "on disk has incorrect version"L,
      	    name: bcd.ftb[seg.file].name, base: bcd.ssb];
          header.versionIdent ← 0}
        ELSE {
	  segPages: CARDINAL = s.span.pages;
	  sgb[newSgi].file ← symbolMap.fti;
          sgb[newSgi].base ← nextPage;
          MoveToPageBoundary[stream: stream, page: (nextPage-1)];
          WriteFromPages[
            stream: stream, pages: s, 
            words: (segPages * Environment.wordsPerPage)];
          nextPage ← nextPage + segPages};
	CIFS.Close[f]};
      SetCopied[oldSgi]};

    IF symbolMap.fti = FTSelf THEN {stream ← bcdStream; nextPage ← nextBcdPage}
    ELSE [stream, nextPage] ← InitFile[symbolMap, copiedPages];
    EnumerateOldSegments[MoveOne];
    IF symbolMap.fti = FTSelf THEN nextBcdPage ← nextPage
    ELSE Stream.Delete[stream]};


  FileForFti: PROC [oldFti: BcdDefs.FTIndex] RETURNS [f: CIFS.OpenFile] = {
    name: BcdDefs.NameRecord = bcd.ftb[oldFti].name;
    ssd: Strings.SubStringDescriptor ← [
      base: @bcd.ssb.string, offset: name, length: bcd.ssb.size[name]];
    nameStr: Strings.String ← [100];
    f ← nullFile;
    NormalizeFileName[in: @ssd, out: nameStr];
    f ← CIFS.Open[ConvertUnsafe.ToRope[nameStr], CIFS.read
	  ! CIFS.Error => TRUSTED {CONTINUE}];
    RETURN};

  NormalizeFileName: PROC [in: Strings.SubString, out: Strings.String] = {
    char: CHAR;
    dot: BOOL ← FALSE;
    out.length ← 0;
    FOR i: CARDINAL IN [in.offset .. in.offset+in.length) DO
      SELECT (char ← in.base[i]) FROM
	IN ['A..'Z] =>  char ← char + ('a-'A);
	'. =>  dot ← TRUE;
	ENDCASE;
      Strings.AppendChar[out, char];
      ENDLOOP;
    IF ~dot THEN Strings.AppendString[out, ".bcd"L]};
    
  -- Bcd Output Routines

  bcdStream: Stream.Handle;
  nextBcdPage: CARDINAL;

  WriteSubTable: PROC [selector: Table.Selector] = {
    base: Table.Base;
    size: CARDINAL;
    [base, size] ← table.Bounds[selector];
    bcdStream.PutBlock[[base, 0, size*bytesPerWord]]};

  TableOut: PROC = { 
    d, s: CARDINAL;
    bcdPages, codePages, symbolPages: CARDINAL;
    basePages: CARDINAL;
    rtPageCount: CARDINAL;
    saveNextPage: CARDINAL;
    saveIndex: FileStream.FileByteIndex;
    rtPageCount ← PagesForWords[BcdLiterals.LitSegSize[]];
      BEGIN OPEN header;
      IF firstdummy = 0 THEN firstdummy ← BcdUtilDefs.GetGfi[0];
      d ← BCD.SIZE;
      ssOffset ← d;  d ← d + (ssLimit ← table.Bounds[sstype].size);
      ctOffset ← d;  d ← d + (s ← table.Bounds[cttype].size);
      ctLimit ← LOOPHOLE[s];
      mtOffset ← d;  d ← d + (s ← table.Bounds[mttype].size);  
      mtLimit ← LOOPHOLE[s];  
      impOffset ← d;  d ← d + (s ← table.Bounds[imptype].size);
      impLimit ← LOOPHOLE[s];
      expOffset ← d;  d ← d + (s ← table.Bounds[exptype].size);
      expLimit ← LOOPHOLE[s];
      evOffset ← d;  d ← d + (s ← table.Bounds[evtype].size);
      evLimit ← LOOPHOLE[s, EVIndex];
      sgOffset ← d;  d ← d + (s ← table.Bounds[sgtype].size);
      sgLimit ← LOOPHOLE[s];
      ftOffset ← d;  d ← d + (s ← table.Bounds[fttype].size);
      ftLimit ← LOOPHOLE[s];
      ntOffset ← d;  d ← d + (s ← table.Bounds[nttype].size);
      ntLimit ← LOOPHOLE[s];
      typOffset ← d;  d ← d + (s ← table.Bounds[typtype].size);
      typLimit ← LOOPHOLE[s];
      tmOffset ← d;  d ← d + (s ← table.Bounds[tmtype].size);
      tmLimit ← LOOPHOLE[s];
      spOffset ← d;  d ← d + (s ← table.Bounds[sptype].size);
      spLimit ← LOOPHOLE[s];
      fpOffset ← d;  d ← d + (s ← table.Bounds[fptype].size);
      fpLimit ← LOOPHOLE[s];
      lfOffset ← d;  d ← d + (s ← table.Bounds[lftype].size);
      lfLimit ← LOOPHOLE[s];
      rfOffset ← d;  d ← d + (s ← table.Bounds[rftype].size);
      rfLimit ← LOOPHOLE[s];
      tfOffset ← d;  d ← d + (s ← table.Bounds[tftype].size);
      tfLimit ← LOOPHOLE[s];
      basePages ← PagesForWords[d];
      rtPages ← [relPageBase: basePages, pages: rtPageCount];
      extended ← TRUE;
      nPages ← bcdPages ← basePages + rtPageCount;
      END;
    [codePages, symbolPages] ← EstimateCopiedPages[];
    IF data.copyCode AND codeMap.fti = FTSelf THEN
      bcdPages ← bcdPages + codePages;
    IF data.copySymbols AND symbolMap.fti = FTSelf THEN
      bcdPages ← bcdPages + symbolPages;
    OpenOutputFile[1 + bcdPages];
    IF rtPageCount # 0 THEN {
      MoveToPageBoundary[stream: bcdStream, page: basePages];
      IF packing THEN BcdLiterals.UpdateSegments[ReadSgMap];
      BcdLiterals.WriteLiterals[bcdStream];
      saveIndex ← FileStream.GetIndex[bcdStream]};
    saveNextPage ← nextBcdPage ← header.nPages + 1;
    IF data.copyCode THEN MoveCodeSegments[codePages
      ! UserAbort => {GO TO AbortRequested}];
    IF data.copySymbols THEN MoveSymbolSegments[symbolPages
      ! UserAbort => {GO TO AbortRequested}];
    IF packing THEN FixAllSgis[];
    FileStream.SetIndex[bcdStream, 0];
    bcdStream.PutBlock[[header, 0, BCD.SIZE*bytesPerWord]];
    WriteSubTable[sstype];
    WriteSubTable[cttype];
    WriteSubTable[mttype];
    WriteSubTable[imptype];
    WriteSubTable[exptype];
    WriteSubTable[evtype];
    WriteSubTable[sgtype];
    WriteSubTable[fttype];
    WriteSubTable[nttype];
    WriteSubTable[typtype];
    WriteSubTable[tmtype];
    WriteSubTable[sptype];
    WriteSubTable[fptype];
    WriteSubTable[lftype];
    WriteSubTable[rftype];
    WriteSubTable[tftype];
    IF nextBcdPage # saveNextPage THEN
      MoveToPageBoundary[stream: bcdStream, page: (nextBcdPage-1)]
    ELSE IF rtPageCount # 0 THEN FileStream.SetIndex[bcdStream, saveIndex];
    data.nConfigs ← header.nConfigs; data.nModules ← header.nModules;
    data.nImports ← header.nImports; data.nExports ← header.nExports;
    data.nPages ← header.nPages;
    EXITS
      AbortRequested => data.aborted ← TRUE};


  OpenOutputFile: PROC [initialPages: CARDINAL] = INLINE {
    file: File.Capability ← data.outputFile;
    IF file = File.nullCapability THEN ERROR;
    file.SetSize[initialPages];
    bcdStream ← FileStream.Create[file]};

  CloseOutputFile: PROC = INLINE {Stream.Delete[bcdStream]; bcdStream ← NIL};

  }.