DIRECTORY Alloc: TYPE USING [AddNotify, Bounds, DropNotify, Handle, Notifier, Top, Trim, Words], Ascii: TYPE USING [Lower], Basics: TYPE USING [bytesPerWord, LongMult, RawBytes], BcdComData: TYPE USING [aborted, binderVersion, codeName, copyCode, copySymbols, literals, nConfigs, nExports, nImports, nModules, nPages, objectStamp, op, outputFile, sourceName, sourceVersion, symbolName, table, typeExported, textIndex], BcdControlDefs: TYPE USING [], BcdDefs: TYPE USING [BCD, BcdBase, ControlItem, CTIndex, CTRecord, cttype, cxtype, EVIndex, evtype, EXPIndex, EXPRecord, exptype, FPIndex, FPRecord, fptype, FTIndex, FTNull, FTRecord, FTSelf, fttype, IMPIndex, IMPNull, imptype, LFNull, lftype, Link, MTIndex, MTNull, MTRecord, mttype, Namee, NameRecord, NameString, NTIndex, NTRecord, nttype, NullName, NullVersion, PackedString, PageSize, rftype, SegClass, SGIndex, SGNull, SGRecord, sgtype, SpaceID, SPIndex, SPRecord, sptype, sstype, sttype, tftype, tmtype, treetype, typtype, VersionID, VersionStamp], BcdErrorDefs: TYPE USING [ErrorHti, ErrorName, ErrorNameBase, GetSti], BcdLiterals: TYPE USING [EnterVersionFiles, SegmentSize, UpdateSegments, Write], BcdUtilDefs: TYPE USING [BcdBases, ContextForTree, EnterExport, EnterFile, EnterImport, EnterName, GetGfi, MapName, MergeFile, NameForHti, SetFileVersion, BcdBasePtr], ConvertUnsafe: TYPE USING [SubString], FileSegment: TYPE USING [Pages, nullPages], FS: TYPE USING [Close, Error, nullOpenFile, Open, OpenFile, Read, SetPageCount, StreamFromOpenFile], HashOps: TYPE USING [Reset], IO: TYPE USING [Close, GetIndex, GetLength, SetIndex, SetLength, STREAM, UnsafePutBlock], OSMiscOps: TYPE USING [StampToTime], PrincOpsUtils: TYPE USING [LongCopy], Process: TYPE USING [CheckForAbort], Rope: TYPE USING [Concat, Flatten, FromProc, Length, ROPE], Symbols: TYPE USING [CXIndex, HTIndex, htNull, STIndex, stNull], Table: TYPE USING [Base, Selector], Tree: TYPE USING [Index, Link, Scan, null], TreeOps: TYPE USING [GetNode, ListLength, ScanList], VM: TYPE USING [bytesPerPage, wordsPerPage, AddressForPageNumber, Allocate, Free, Interval]; BcdWrite: PROGRAM IMPORTS Alloc, Ascii, Basics, BcdErrorDefs, BcdLiterals, BcdUtilDefs, FS, IO, HashOps, OSMiscOps, PrincOpsUtils, Process, Rope, TreeOps, VM, data: BcdComData EXPORTS BcdControlDefs = { OPEN BcdDefs; pageSizeCheck: BOOL[TRUE..(VM.wordsPerPage=BcdDefs.PageSize)] = TRUE; bytesPerWord: CARDINAL = Basics.bytesPerWord; nullFile: FS.OpenFile = FS.nullOpenFile; nullPages: FileSegment.Pages = FileSegment.nullPages; Alignment: CARDINAL = 4; -- Code Segments must start at 0 MOD Alignment BcdWriteError: PUBLIC ERROR = CODE; Error: PROC = {ERROR BcdWriteError}; UserAbort: ERROR = CODE; -- raised on ^DEL during code or symbol copying table: Alloc.Handle; tb, stb, ctb, mtb, lfb, etb, itb, sgb, tyb, tmb, ftb, ntb, spb, fpb, cxb: Table.Base; ssb: BcdDefs.NameString; Notifier: Alloc.Notifier = { tb _ base[treetype]; stb _ base[sttype]; ctb _ base[cttype]; mtb _ base[mttype]; lfb _ base[lftype]; tyb _ base[typtype]; tmb _ base[tmtype]; etb _ base[exptype]; itb _ base[imptype]; sgb _ base[sgtype]; ftb _ base[fttype]; spb _ base[sptype]; fpb _ base[fptype]; ntb _ base[nttype]; ssb _ base[sstype]; cxb _ base[cxtype]; IF bcd # NIL THEN { bcd.ctb _ ctb; bcd.mtb _ mtb; IF ~packing THEN bcd.sgb _ sgb; bcd.tyb _ tyb; bcd.tmb _ tmb; bcd.spb _ spb; bcd.fpb _ fpb} }; Copy: PROC[from: LONG POINTER, nwords: CARDINAL, to: LONG POINTER] = PrincOpsUtils.LongCopy; Zero: PROC[p: LONG POINTER, l: CARDINAL] = INLINE { IF l # 0 THEN {p^ _ 0; PrincOpsUtils.LongCopy[from: p, to: (p+1), nwords: (l-1)]}}; PagesForWords: PROC[nWords: CARDINAL] RETURNS[CARDINAL] = INLINE { RETURN[(nWords + VM.wordsPerPage-1)/VM.wordsPerPage]}; PutWord: PROC[stream: IO.STREAM, word: WORD] = { stream.UnsafePutBlock[[ LOOPHOLE[(@word).LONG, LONG POINTER TO Basics.RawBytes], 0, WORD.SIZE*Basics.bytesPerWord]]; }; bcd: BcdUtilDefs.BcdBasePtr _ NIL; header: BcdDefs.BcdBase; -- points to Bcd header and saved tables headerInterval: VM.Interval; WriteBcd: PUBLIC PROC[root: Tree.Link] = { saveIndex: CARDINAL = data.textIndex; node, subNode: Tree.Index; table _ data.table; table.AddNotify[Notifier]; node _ TreeOps.GetNode[root]; packing _ (tb[node].son[2] # Tree.null AND data.copyCode); Initialize[]; IF packing THEN { MakePackItem[tb[node].son[2]]; data.textIndex _ saveIndex; FillInSgMap[]}; CopyConfigs[]; CopyModules[]; CopyTypes[]; CopySpaces[]; CopyFramePacks[]; subNode _ TreeOps.GetNode[tb[node].son[3]]; data.textIndex _ tb[subNode].info; TreeOps.ScanList[tb[subNode].son[1], CopyImport]; TreeOps.ScanList[tb[subNode].son[2], CopyExport]; IF tb[subNode].attrs[$exportsALL] THEN ExportCx[BcdUtilDefs.ContextForTree[tb[subNode].son[4]]]; IF data.copySymbols THEN EnterMissingSymbolFiles[]; (data.literals).EnterVersionFiles[bcd.ftb, FtiForIndex[fileMap.length], MapFile]; TableOut[]; CloseOutputFile[]; Finalize[]; data.textIndex _ saveIndex; table.DropNotify[Notifier]; table _ NIL}; Initialize: PROC = { impSize, expSize, sgSize, fSize, nSize, ssSize: CARDINAL; nSgis: CARDINAL; b: Table.Base; desc: ConvertUnsafe.SubString; desc.base _ LOOPHOLE[(data.sourceName).Flatten[]]; desc.offset _ 0; desc.length _ data.sourceName.Length[]; IF data.copyCode OR data.copySymbols THEN InitCodeSymbolCopy[]; impSize _ table.Bounds[imptype].size; expSize _ table.Bounds[exptype].size; sgSize _ table.Bounds[sgtype].size; nSgis _ sgSize/SGRecord.SIZE; IF ~packing THEN sgSize _ 0; fSize _ table.Bounds[fttype].size; nSize _ table.Bounds[nttype].size; ssSize _ table.Bounds[sstype].size; bcd _ NEW[BcdUtilDefs.BcdBases]; fileMap _ NEW[FileMap[fSize/FTRecord.SIZE]]; FOR i: CARDINAL IN [0..fileMap.length) DO fileMap[i] _ FTNull ENDLOOP; headerInterval _ VM.Allocate[PagesForWords[ BCD.SIZE + impSize + expSize + sgSize + fSize + nSize + ssSize]]; header _ VM.AddressForPageNumber[headerInterval.page]; b _ (LOOPHOLE[header, Table.Base] + BCD.SIZE); Copy[to: (bcd.etb _ b), from: etb, nwords: expSize]; b _ b + expSize; table.Trim[exptype,0]; Copy[to: (bcd.itb _ b), from: itb, nwords: impSize]; b _ b + impSize; table.Trim[imptype,0]; Copy[to: (bcd.ftb _ b), from: ftb, nwords: fSize]; b _ b + fSize; table.Trim[fttype,0]; Copy[to: (bcd.ntb _ b), from: ntb, nwords: nSize]; b _ b + nSize; table.Trim[nttype,0]; Copy[to: (bcd.ssb _ b), from: ssb, nwords: ssSize]; b _ b + ssSize; HashOps.Reset[]; IF packing THEN { -- save old segment table in heap Copy[to: (bcd.sgb _ b), from: sgb, nwords: sgSize]; b _ b + sgSize; table.Trim[sgtype,0]}; InitHeader[ header: header, objectVersion: OSMiscOps.StampToTime[data.objectStamp], source: BcdUtilDefs.EnterName[desc], sourceVersion: data.sourceVersion]; bcd.ctb _ table.Bounds[cttype].base; bcd.mtb _ table.Bounds[mttype].base; bcd.tyb _ table.Bounds[typtype].base; bcd.tmb _ table.Bounds[tmtype].base; bcd.spb _ table.Bounds[sptype].base; bcd.fpb _ table.Bounds[fptype].base; IF data.copyCode OR data.copySymbols THEN {MapCodeSymbolFiles[]; InitCopyMap[nSgis]}; IF packing THEN InitSgMap[nSgis] ELSE { bcd.sgb _ table.Bounds[sgtype].base; IF ~data.copyCode THEN MapSegments[$code]; IF ~data.copySymbols THEN MapSegments[$symbols]} }; Finalize: PROC = { IF data.copyCode OR data.copySymbols THEN ReleaseCodeSymbolCopy[]; fileMap _ NIL; bcd _ NIL; headerInterval.Free[]; FreePackItems[]; IF packing THEN FreeSgMap[]; IF data.copyCode OR data.copySymbols THEN FreeCopyMap[]}; CopyName: PROC[olditem, newitem: Namee] = { newNti: NTIndex = table.Words[nttype, NTRecord.SIZE]; FOR nti: NTIndex _ NTIndex.FIRST, nti+NTRecord.SIZE DO OPEN old: bcd.ntb[nti]; IF old.item = olditem THEN { OPEN new: ntb[newNti]; new.item _ newitem; new.name _ bcd.MapName[old.name]; RETURN}; ENDLOOP }; CopyConfigs: PROC = { cti: CTIndex _ CTIndex.FIRST; ctLimit: CTIndex = table.Top[cttype]; UNTIL cti = ctLimit DO header.nConfigs _ header.nConfigs + 1; ctb[cti].name _ bcd.MapName[ctb[cti].name]; ctb[cti].file _ MapFile[ctb[cti].file]; IF ctb[cti].namedInstance THEN CopyName[[config[cti]], [config[cti]]]; cti _ cti + (CTRecord.SIZE + ctb[cti].nControls*ControlItem.SIZE); ENDLOOP }; CopyModules: PROC = { MapOne: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN m: mtb[mti]; header.nModules _ header.nModules + 1; m.name _ bcd.MapName[m.name]; m.file _ MapFile[m.file]; IF m.namedInstance THEN CopyName[[module[mti]], [module[mti]]]}; EnumerateModules[MapOne]}; EnumerateModules: PROC[p: PROC[MTIndex] RETURNS[BOOL]] = { mti: MTIndex _ MTIndex.FIRST; mtLimit: MTIndex = table.Top[mttype]; UNTIL mti = mtLimit DO IF p[mti] THEN EXIT; mti _ mti + (WITH m: mtb[mti] SELECT FROM direct => MTRecord.direct.SIZE + m.length*Link.SIZE, indirect => MTRecord.indirect.SIZE, multiple => MTRecord.multiple.SIZE, ENDCASE => ERROR); ENDLOOP }; CopyTypes: PROC = {}; -- types are already copied, nothing need be done (current typeIds) CopySpaces: PROC = { MapOne: PROC[spi: SPIndex] RETURNS[BOOL _ FALSE] = { FOR i: CARDINAL IN [0..spb[spi].length) DO spb[spi].spaces[i].name _ bcd.MapName[spb[spi].spaces[i].name]; ENDLOOP }; EnumerateSpaces[MapOne]}; EnumerateSpaces: PROC[p: PROC[SPIndex] RETURNS[BOOL]] = { spi: SPIndex _ SPIndex.FIRST; spLimit: SPIndex = table.Top[sptype]; UNTIL spi = spLimit DO IF p[spi] THEN EXIT; spi _ spi + SPRecord.SIZE + spb[spi].length*SpaceID.SIZE; ENDLOOP }; CopyFramePacks: PROC = { MapOne: PROC[fpi: FPIndex] RETURNS[BOOL _ FALSE] = { fpb[fpi].name _ bcd.MapName[fpb[fpi].name]}; EnumerateFramePacks[MapOne]}; EnumerateFramePacks: PROC[p: PROC[FPIndex] RETURNS[BOOL]] = { fpi: FPIndex _ FPIndex.FIRST; fpLimit: FPIndex = table.Top[fptype]; UNTIL fpi = fpLimit DO IF p[fpi] THEN RETURN; fpi _ fpi + FPRecord.SIZE + fpb[fpi].length*MTIndex.SIZE; ENDLOOP}; CopyImport: Tree.Scan = { OPEN Symbols; sti: STIndex _ stNull; olditi, iti: IMPIndex; WITH t SELECT FROM symbol => sti _ index; subtree => WITH s1:tb[index].son[1] SELECT FROM symbol => sti _ s1.index; ENDCASE => Error[]; ENDCASE => Error[]; olditi _ stb[sti].impi; IF sti = stNull OR olditi = IMPNull THEN RETURN; iti _ bcd.EnterImport[olditi, TRUE]; itb[iti].file _ MapFile[itb[iti].file]; IF header.firstdummy = 0 THEN header.firstdummy _ itb[iti].gfi; header.nImports _ header.nImports + 1; header.nDummies _ header.nDummies + itb[iti].ngfi}; CopyExport: Tree.Scan = { OPEN Symbols; sti: STIndex _ stNull; hti: HTIndex _ htNull; neweti: EXPIndex; oldeti: EXPIndex; WITH t SELECT FROM symbol => sti _ index; subtree => WITH s1:tb[index].son[1] SELECT FROM symbol => {sti _ s1.index; hti _ stb[sti].hti}; ENDCASE => Error[]; ENDCASE => Error[]; WITH s:stb[sti] SELECT FROM external => WITH m:s.map SELECT FROM interface => { OPEN new: etb[neweti]; oldeti _ m.expi; neweti _ bcd.EnterExport[oldeti, TRUE]; Copy[from: @bcd.etb[oldeti].links, to: @new.links, nwords: new.size]; new.file _ MapFile[new.file]}; module => [] _ NewExportForModule[m.mti, htNull]; ENDCASE => RETURN; ENDCASE => RETURN; header.nExports _ header.nExports + 1}; NewExportForModule: PROC[mti: MTIndex, name: Symbols.HTIndex] RETURNS[eti: EXPIndex] = { OPEN Symbols; eti _ table.Words[exptype, EXPRecord.SIZE+1*Link.SIZE]; etb[eti] _ [ name: mtb[mti].name, size: 1, port: $module, namedInstance: name # htNull, typeExported: FALSE, file: mtb[mti].file, links: ]; etb[eti].links[0] _ [variable[vgfi: mtb[mti].gfi, var: 0, vtag: $var]]; IF name # htNull THEN { nti: NTIndex = table.Words[nttype, NTRecord.SIZE]; ntb[nti] _ [name: BcdUtilDefs.NameForHti[name], item: [module[mti]]]}; RETURN}; ExportCx: PROC[cx: Symbols.CXIndex] = { OPEN Symbols; neweti, oldeti: EXPIndex; FOR sti: STIndex _ cxb[cx].link, stb[sti].link UNTIL sti = stNull DO { IF ~stb[sti].filename THEN WITH s: stb[sti] SELECT FROM external => WITH m: s.map SELECT FROM interface => { OPEN old: bcd.etb[oldeti], new: etb[neweti]; existingEti: EXPIndex _ EXPIndex.FIRST; etLimit: EXPIndex = table.Top[exptype]; oldeti _ m.expi; UNTIL existingEti = etLimit DO IF old = etb[existingEti] THEN GO TO AlreadyExported; existingEti _ existingEti + EXPRecord.SIZE+etb[existingEti].size; ENDLOOP; neweti _ bcd.EnterExport[oldeti, TRUE]; Copy[from: @old.links, to: @new.links, nwords: new.size]; new.file _ MapFile[new.file]; header.nExports _ header.nExports + 1}; ENDCASE; ENDCASE; EXITS AlreadyExported => NULL}; ENDLOOP }; FileMap: TYPE = RECORD[SEQUENCE length: CARDINAL OF FTIndex]; fileMap: REF FileMap _ NIL; IndexForFti: PROC[fti: FTIndex] RETURNS[CARDINAL] = INLINE { RETURN[LOOPHOLE[fti,CARDINAL]/FTRecord.SIZE]}; FtiForIndex: PROC[n: CARDINAL] RETURNS[FTIndex] = INLINE { RETURN[FTIndex.FIRST + n*FTRecord.SIZE]}; MapFile: PROC[fti: FTIndex] RETURNS[FTIndex] = { SELECT fti FROM FTNull, FTSelf => RETURN[fti]; ENDCASE => { fileIndex: CARDINAL = IndexForFti[fti]; IF fileMap[fileIndex] = FTNull THEN fileMap[fileIndex] _ bcd.MergeFile[fti]; RETURN[fileMap[fileIndex]]} }; InitHeader: PROC[ header: BcdDefs.BcdBase, objectVersion: VersionStamp, source: NameRecord _ NullName, sourceVersion: VersionStamp _ NullVersion] = { Zero[header, BcdDefs.BCD.SIZE]; header.versionIdent _ BcdDefs.VersionID; header.version _ objectVersion; header.creator _ data.binderVersion; header.definitions _ (data.op = $conc); header.typeExported _ data.typeExported; header.source _ source; header.sourceVersion _ sourceVersion; header.repackaged _ table.Bounds[sptype].size # 0 OR table.Bounds[fptype].size # 0; header.tableCompiled _ FALSE; header.spare1 _ TRUE}; codeMap, symbolMap: REF Map _ NIL; Map: TYPE = RECORD[ fti: FTIndex, type: SegClass, filename: Rope.ROPE, filehandle: FS.OpenFile]; InitCodeSymbolCopy: PROC = { Setup: PROC[file: Rope.ROPE, type: SegClass] RETURNS[REF Map] = { RETURN[NEW[Map _ [ type: type, filename: file, filehandle: nullFile, fti: IF file = NIL THEN FTSelf ELSE BcdUtilDefs.EnterFile[LOOPHOLE[file.Flatten[]]]]]] }; IF data.copyCode THEN codeMap _ Setup[data.codeName, $code]; IF data.copySymbols THEN symbolMap _ Setup[data.symbolName, $symbols]}; MapCodeSymbolFiles: PROC = { IF data.copyCode THEN codeMap.fti _ MapFile[codeMap.fti]; IF data.copySymbols THEN symbolMap.fti _ MapFile[symbolMap.fti]}; ReleaseCodeSymbolCopy: PROC = { IF codeMap # NIL THEN { codeMap.filehandle _ FSClose[codeMap.filehandle]; codeMap _ NIL}; IF symbolMap # NIL THEN { symbolMap.filehandle _ FSClose[symbolMap.filehandle]; symbolMap _ NIL}; }; EnumerateSegments: PROC[proc: PROC[SGIndex]] = { sgLimit: SGIndex = table.Top[sgtype]; FOR sgi: SGIndex _ SGIndex.FIRST, sgi + SGRecord.SIZE UNTIL sgi = sgLimit DO proc[sgi] ENDLOOP }; EnumerateOldSegments: PROC[proc: PROC[SGIndex]] = { IF ~packing THEN EnumerateSegments[proc] ELSE FOR i: NAT IN [0..sgMap.length) DO proc[SgiForIndex[i]] ENDLOOP}; MapSegments: PROC[type: SegClass] = { CopySegment: PROC[sgi: SGIndex] = { IF sgb[sgi].class = type THEN sgb[sgi].file _ MapFile[sgb[sgi].file]}; EnumerateSegments[CopySegment]}; InitFile: PROC[p: REF Map, copiedPages: CARDINAL] RETURNS[stream: IO.STREAM, page: CARDINAL] = { lh: BcdDefs.BCD; bcdPages: CARDINAL = PagesForWords[BcdDefs.BCD.SIZE]; version: VersionStamp = BumpVersion[ OSMiscOps.StampToTime[data.objectStamp], (IF p.type=$code THEN 1 ELSE 2)]; BcdUtilDefs.SetFileVersion[p.fti, version]; p.filehandle _ FS.Open[p.filename]; p.filehandle.SetPageCount[bcdPages + copiedPages]; stream _ (p.filehandle).StreamFromOpenFile[$write]; InitHeader[header: @lh, objectVersion: version]; lh.version _ ftb[p.fti].version; stream.UnsafePutBlock[[LOOPHOLE[(@lh).LONG, LONG POINTER TO Basics.RawBytes], 0, BcdDefs.BCD.SIZE*bytesPerWord]]; page _ bcdPages + 1}; BumpVersion: PROC[v: VersionStamp, n: CARDINAL] RETURNS[VersionStamp] = { v.time _ v.time + n; RETURN[v]}; MoveToPageBoundary: PROC[stream: IO.STREAM, page: CARDINAL] = { pos: INT = Basics.LongMult[page, VM.bytesPerPage]; IF pos > stream.GetLength THEN stream.SetLength[pos]; stream.SetIndex[pos]}; PackHandle: TYPE = REF PackItem; PackItem: TYPE = RECORD[ link: PackHandle, newsgi: SGIndex, -- in the new table item: SEQUENCE count: CARDINAL OF MTIndex]; packing: BOOL; phHead, phTail: PackHandle _ NIL; MakePackItem: Tree.Scan = { ph: PackHandle _ NIL; i, nsons: CARDINAL; Item: Tree.Scan = { itemId: Symbols.HTIndex; WITH t SELECT FROM symbol => { itemId _ stb[index].hti; WITH stb[index] SELECT FROM external => WITH m: map SELECT FROM module => { ph.item[i] _ m.mti; SELECT TRUE FROM ~mtb[m.mti].packageable => BcdErrorDefs.ErrorNameBase[ $error, "is packaged and cannot be PACKed"L, mtb[m.mti].name, bcd.ssb]; (ReadSgMap[mtb[m.mti].code.sgi] # SGNull) => BcdErrorDefs.ErrorNameBase[ $error, "cannot be PACKed twice"L, mtb[m.mti].name, bcd.ssb]; ENDCASE => SetSgMap[old: mtb[m.mti].code.sgi, new: ph.newsgi]}; ENDCASE => GOTO cant; ENDCASE => GOTO cant; EXITS cant => BcdErrorDefs.ErrorHti[$error, "cannot be PACKed"L, itemId ! BcdErrorDefs.GetSti => {RESUME [errorSti: Symbols.stNull]}]}; ENDCASE; i _ i+1}; IF t = Tree.null THEN RETURN; WITH t SELECT FROM subtree => { OPEN tt: tb[index]; IF tt.name # $list THEN Error[]; data.textIndex _ tt.info; IF tt.son[1].tag = $subtree THEN {TreeOps.ScanList[t,MakePackItem]; RETURN}}; ENDCASE; nsons _ TreeOps.ListLength[t]; ph _ NEW[PackItem[nsons] _ [ link: NIL, newsgi: table.Words[sgtype, SGRecord.SIZE], item:]]; FOR j: CARDINAL IN [0..nsons) DO ph.item[j] _ MTNull ENDLOOP; sgb[ph.newsgi] _ [class: $code, file: codeMap.fti, base:0, pages:0, extraPages:0]; i _ 0; TreeOps.ScanList[t, Item]; IF phTail = NIL THEN phHead _ phTail _ ph ELSE {phTail.link _ ph; phTail _ ph}}; FreePackItems: PROC = {phHead _ phTail _ NIL}; WriteFromPages: PROC[ stream: IO.STREAM, pages: FileSegment.Pages, words: CARDINAL] = { bufferPages: CARDINAL = MIN[pages.span.pages, 16]; bufferInterval: VM.Interval _ VM.Allocate[bufferPages]; { ENABLE UNWIND => bufferInterval.Free[]; base: CARDINAL _ pages.span.base; pointer: LONG POINTER _ VM.AddressForPageNumber[bufferInterval.page]; WHILE words # 0 DO wordsToTransfer: CARDINAL = MIN[words, bufferPages*VM.wordsPerPage]; pagesToTransfer: CARDINAL = PagesForWords[wordsToTransfer]; FS.Read[file: [pages.file], from: base, nPages: pagesToTransfer, to: pointer]; stream.UnsafePutBlock[[pointer, 0, wordsToTransfer*bytesPerWord]]; base _ (base + pagesToTransfer); words _ (words - wordsToTransfer); ENDLOOP; }; bufferInterval.Free[]}; PackCodeSegments: PROC[out: IO.STREAM, startpage: CARDINAL] RETURNS[nextpage: CARDINAL] = { offset, validlength: CARDINAL; oldsgi: SGIndex; file: FS.OpenFile; pages: FileSegment.Pages; FixUpOneModule: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN module: mtb[mti]; length: CARDINAL; IF module.code.sgi = oldsgi THEN { length _ module.code.offset + module.code.length/2; module.code.offset _ module.code.offset + offset; module.code.packed _ TRUE; IF length > validlength THEN validlength _ length}; RETURN}; nextpage _ startpage; FOR ph: PackHandle _ phHead, ph.link UNTIL ph = NIL DO MoveToPageBoundary[stream: out, page: (nextpage-1)]; offset _ 0; sgb[ph.newsgi].base _ nextpage; FOR pi: CARDINAL IN [0..ph.count) DO { mti: MTIndex = ph.item[pi]; IF mtb[mti].linkLoc = $code AND ~mtb[mti].code.linkspace THEN { offset _ (AddLinksToCodeSegment[out, mti, offset, TRUE] + offset); GOTO ignore}; oldsgi _ mtb[mti].code.sgi; [file, pages] _ SegmentForOldCodeSgi[oldsgi]; IF file = nullFile THEN GOTO ignore; IF (offset MOD Alignment) # 0 THEN FOR i: CARDINAL IN [(offset MOD Alignment)..Alignment) DO PutWord[out, 0]; offset _ offset + 1 ENDLOOP; validlength _ 0; EnumerateModules[FixUpOneModule]; WriteFromPages[stream: out, pages: pages, words: validlength]; offset _ offset + validlength; file _ FSClose[file]; EXITS ignore => NULL}; ENDLOOP; sgb[ph.newsgi].pages _ PagesForWords[offset]; nextpage _ nextpage + sgb[ph.newsgi].pages; ENDLOOP; RETURN}; SegmentForOldCodeSgi: PROC[sgi: SGIndex] RETURNS[f: FS.OpenFile _ nullFile, s: FileSegment.Pages _ nullPages] = { OPEN seg: bcd.sgb[sgi]; IF Copied[sgi] OR seg.file = FTNull THEN RETURN; f _ FileForFti[seg.file]; IF f = nullFile THEN BcdErrorDefs.ErrorNameBase[ class: $error, s: "could not be opened to copy code"L, name: bcd.ftb[seg.file].name, base: bcd.ssb] ELSE { s _ [ file: f, span: [base: seg.base-1, pages: (seg.pages + seg.extraPages)]]; IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN { BcdErrorDefs.ErrorNameBase[ class: $error, s: "on disk has an incorrect version"L, name: bcd.ftb[seg.file].name, base: bcd.ssb]; f _ FSClose[f]; s _ nullPages}}; IF s.file = FS.nullOpenFile THEN header.versionIdent _ 0; SetCopied[sgi]}; WrongOldSegVersion: PROC[s: FileSegment.Pages, version: BcdDefs.VersionStamp] RETURNS[reply: BOOL] = { h: BcdDefs.BcdBase; headerInterval: VM.Interval _ VM.Allocate[1]; { ENABLE UNWIND => headerInterval.Free[]; h _ VM.AddressForPageNumber[headerInterval.page]; FS.Read[file: [s.file], from: 0, nPages: 1, to: h]; reply _ (h.version # version); }; headerInterval.Free[]; RETURN[reply]}; SGMap: TYPE = RECORD[SEQUENCE length: CARDINAL OF SGIndex]; CopyMap: TYPE = RECORD[SEQUENCE length: CARDINAL OF BOOL]; sgMap: REF SGMap _ NIL; copyMap: REF CopyMap _ NIL; IndexForSgi: PROC[sgi: SGIndex] RETURNS[CARDINAL] = INLINE { RETURN[LOOPHOLE[sgi,CARDINAL]/SGRecord.SIZE]}; SgiForIndex: PROC[i: CARDINAL] RETURNS[SGIndex] = INLINE { RETURN[SGIndex.FIRST + i*SGRecord.SIZE]}; InitCopyMap: PROC[nsgis: CARDINAL] = { copyMap _ NEW[CopyMap[nsgis]]; FOR i: CARDINAL IN [0..nsgis) DO copyMap[i] _ FALSE ENDLOOP}; FreeCopyMap: PROC = {copyMap _ NIL}; SetCopied: PROC[sgi: SGIndex] = {copyMap[IndexForSgi[sgi]] _ TRUE}; Copied: PROC[sgi: SGIndex] RETURNS[BOOL] = { RETURN[copyMap[IndexForSgi[sgi]]]}; InitSgMap: PROC[nsgis: CARDINAL] = { sgMap _ NEW[SGMap[nsgis]]; FOR i: CARDINAL IN [0..nsgis) DO sgMap[i] _ BcdDefs.SGNull ENDLOOP}; FreeSgMap: PROC = {sgMap _ NIL}; SetSgMap: PROC[old, new: SGIndex] = { IF (packing AND old # SGNull) THEN sgMap[IndexForSgi[old]] _ new}; ReadSgMap: PROC[old: SGIndex] RETURNS[SGIndex] = { RETURN[IF (~packing OR old = SGNull) THEN old ELSE sgMap[IndexForSgi[old]]]}; FillInSgMap: PROC = { FOR i: CARDINAL IN [0..sgMap.length) DO IF sgMap[i] = SGNull THEN { oldsgi: SGIndex = SgiForIndex[i]; newsgi: SGIndex = table.Words[sgtype, SGRecord.SIZE]; sgb[newsgi] _ bcd.sgb[oldsgi]; sgb[newsgi].file _ (IF sgb[newsgi].class = $symbols THEN (IF data.copySymbols THEN symbolMap.fti ELSE MapFile[sgb[newsgi].file]) ELSE codeMap.fti); sgMap[i] _ newsgi}; ENDLOOP }; FixAllSgis: PROC = { FixModule: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN m: mtb[mti]; m.code.sgi _ ReadSgMap[m.code.sgi]; m.sseg _ ReadSgMap[m.sseg]}; FixSpace: PROC[spi: SPIndex] RETURNS[BOOL _ FALSE] = { OPEN sp: spb[spi]; sp.seg _ ReadSgMap[sp.seg]}; EnumerateModules[FixModule]; EnumerateSpaces[FixSpace]}; LinkCount: PROC[mti: MTIndex] RETURNS[CARDINAL] = INLINE { RETURN[WITH m: mtb[mti] SELECT FROM direct => m.length, indirect => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length, multiple => IF m.links = LFNull THEN 0 ELSE lfb[m.links].length, ENDCASE => ERROR]}; AlignOffset: PROC[offset: CARDINAL] RETURNS[CARDINAL] = INLINE { RETURN[((offset + (Alignment-1))/Alignment)*Alignment]}; AddLinksToCodeSegment: PROC[ stream: IO.STREAM, mti: MTIndex, offset: CARDINAL, packed: BOOL] RETURNS[CARDINAL] = { sgi: SGIndex = mtb[mti].code.sgi; codeLength: CARDINAL = mtb[mti].code.length/2; linkSpace: CARDINAL; f: FS.OpenFile; s: FileSegment.Pages; prefixWords: CARDINAL _ 0; FixOffset: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN c: mtb[mti].code; IF c.sgi = sgi THEN {c.linkspace _ TRUE; c.offset _ c.offset+offset; c.packed _ packed}}; [f, s] _ SegmentForOldCodeSgi[sgi]; IF f = nullFile THEN RETURN[0]; linkSpace _ LinkCount[mti]; IF offset = 0 AND linkSpace # 0 THEN { prefixWords _ 1; PutWord[stream, linkSpace + Alignment - (linkSpace MOD Alignment)]; offset _ offset+1}; IF (offset+linkSpace) MOD Alignment # 0 THEN linkSpace _ linkSpace + Alignment - ((offset+linkSpace) MOD Alignment); offset _ offset + linkSpace; EnumerateModules[FixOffset]; FOR i: CARDINAL IN [0..linkSpace) DO PutWord[stream, 0] ENDLOOP; WriteFromPages[stream: stream, pages: s, words: codeLength]; f _ FSClose[f]; RETURN[prefixWords + linkSpace + codeLength]}; EstimateCopiedPages: PROC RETURNS[codePages, symbolPages: CARDINAL _ 0] = { packaged: BOOL _ FALSE; AddModule: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN m: mtb[mti]; IF data.copyCode AND m.code.sgi # SGNull THEN { OPEN seg: bcd.sgb[m.code.sgi]; IF ~m.packageable THEN packaged _ TRUE ELSE { IF m.linkLoc = $code AND ~m.code.linkspace THEN { nLinks: CARDINAL = LinkCount[mti]; offset: CARDINAL = AlignOffset[IF nLinks=0 THEN 0 ELSE 1+nLinks]; codePages _ codePages + PagesForWords[offset + m.code.length/2]} ELSE codePages _ codePages + seg.pages; codePages _ codePages + seg.extraPages}}; IF data.copySymbols AND m.sseg # SGNull THEN { OPEN seg: bcd.sgb[m.sseg]; symbolPages _ symbolPages + seg.pages + seg.extraPages}; RETURN}; AddSegment: PROC[oldSgi: SGIndex] = { OPEN seg: bcd.sgb[oldSgi]; IF seg.class = $code THEN { package: BOOL _ FALSE; TestModule: PROC[mti: MTIndex] RETURNS[BOOL] = { OPEN m: mtb[mti]; IF ~m.packageable AND m.code.sgi = oldSgi THEN package _ TRUE; RETURN[m.code.sgi = oldSgi]}; EnumerateModules[TestModule]; IF package THEN codePages _ codePages + seg.pages + seg.extraPages}}; IF data.copyCode OR data.copySymbols THEN EnumerateModules[AddModule]; IF data.copyCode AND packaged THEN EnumerateOldSegments[AddSegment]; RETURN}; MoveCodeSegments: PROC[copiedPages: CARDINAL] = { stream: IO.STREAM; nextPage: CARDINAL; AddLinks: PROC[mti: MTIndex] RETURNS[BOOL _ FALSE] = { OPEN m: mtb[mti]; wordsWritten, pagesWritten: CARDINAL; newSgi: SGIndex; Process.CheckForAbort[]; IF m.linkLoc = $code AND ~m.code.linkspace AND m.packageable THEN { IF m.code.packed THEN BcdErrorDefs.ErrorName[ $error, "was previously PACKed and can not now have code links added"L, m.name] ELSE { MoveToPageBoundary[stream: stream, page: (nextPage-1)]; wordsWritten _ AddLinksToCodeSegment[ stream: stream, mti: mti, offset: 0, packed: FALSE]; pagesWritten _ PagesForWords[wordsWritten]; newSgi _ ReadSgMap[m.code.sgi]; sgb[newSgi].file _ codeMap.fti; sgb[newSgi].base _ nextPage; sgb[newSgi].pages _ pagesWritten; nextPage _ nextPage + pagesWritten}; }; }; -- end AddLinks MoveOne: PROC[oldSgi: SGIndex] = { OPEN seg: bcd.sgb[oldSgi]; Process.CheckForAbort[]; IF seg.class = $code THEN { f: FS.OpenFile; s: FileSegment.Pages; [f, s] _ SegmentForOldCodeSgi[oldSgi]; IF f # nullFile THEN { segPages: CARDINAL = s.span.pages; newSgi: SGIndex = ReadSgMap[oldSgi]; sgb[newSgi].file _ codeMap.fti; sgb[newSgi].base _ nextPage; MoveToPageBoundary[stream: stream, page: (nextPage-1)]; WriteFromPages[ stream: stream, pages: s, words: (segPages * BcdDefs.PageSize)]; nextPage _ nextPage + segPages; f _ FSClose[f]}; }; }; -- end MoveOne IF codeMap.fti = FTSelf THEN {stream _ bcdStream; nextPage _ nextBcdPage} ELSE [stream, nextPage] _ InitFile[codeMap, copiedPages]; nextPage _ PackCodeSegments[stream, nextPage]; EnumerateModules[AddLinks]; EnumerateOldSegments[MoveOne]; IF codeMap.fti = FTSelf THEN nextBcdPage _ nextPage ELSE stream.Close[]}; EnterMissingSymbolFiles: PROC = { CheckOneSymbolsFileSeg: PROC[oldSgi: SGIndex] = { OPEN seg: bcd.sgb[oldSgi]; IF (seg.class = $symbols) AND ~Copied[oldSgi] AND (seg.file # FTNull) THEN [] _ MapFile[bcd.sgb[oldSgi].file] }; EnumerateOldSegments[CheckOneSymbolsFileSeg]}; MoveSymbolSegments: PROC[copiedPages: CARDINAL] = { stream: IO.STREAM; nextPage: CARDINAL; MoveOne: PROC[oldSgi: SGIndex] = { OPEN seg: bcd.sgb[oldSgi]; f: FS.OpenFile _ nullFile; newSgi: SGIndex; Process.CheckForAbort[]; IF (seg.class # $symbols) OR Copied[oldSgi] OR (seg.file = FTNull) THEN RETURN; newSgi _ ReadSgMap[oldSgi]; f _ FileForFti[seg.file]; IF f = nullFile THEN { BcdErrorDefs.ErrorNameBase[ class: $warning, s: "could not be opened to copy symbols"L, name: bcd.ftb[seg.file].name, base: bcd.ssb]; sgb[newSgi] _ bcd.sgb[oldSgi]; sgb[newSgi].file _ MapFile[bcd.sgb[oldSgi].file]} ELSE { s: FileSegment.Pages = [ file: f, span: [base: seg.base-1, pages: (seg.pages + seg.extraPages)]]; IF WrongOldSegVersion[s, bcd.ftb[seg.file].version] THEN { BcdErrorDefs.ErrorNameBase[ class: $error, s: "on disk has incorrect version"L, name: bcd.ftb[seg.file].name, base: bcd.ssb]; header.versionIdent _ 0} ELSE { segPages: CARDINAL = s.span.pages; sgb[newSgi].file _ symbolMap.fti; sgb[newSgi].base _ nextPage; MoveToPageBoundary[stream: stream, page: (nextPage-1)]; WriteFromPages[ stream: stream, pages: s, words: (segPages * BcdDefs.PageSize)]; nextPage _ nextPage + segPages}; f _ FSClose[f]; }; SetCopied[oldSgi]}; IF symbolMap.fti = FTSelf THEN {stream _ bcdStream; nextPage _ nextBcdPage} ELSE [stream, nextPage] _ InitFile[symbolMap, copiedPages]; EnumerateOldSegments[MoveOne]; IF symbolMap.fti = FTSelf THEN nextBcdPage _ nextPage ELSE stream.Close[]}; FileForFti: PROC[oldFti: BcdDefs.FTIndex] RETURNS[f: FS.OpenFile] = { name: BcdDefs.NameRecord = bcd.ftb[oldFti].name; ssd: ConvertUnsafe.SubString _ [ base: @bcd.ssb.string, offset: name, length: bcd.ssb.size[name]]; f _ nullFile; f _ FS.Open[NormalizeFileName[ssd] ! FS.Error => TRUSTED {CONTINUE}]; RETURN}; NormalizeFileName: PROC[in: ConvertUnsafe.SubString] RETURNS[Rope.ROPE] = { dot: BOOL _ FALSE; i: CARDINAL _ in.offset; EachChar: SAFE PROC RETURNS[c: CHAR] ~ TRUSTED { c _ in.base[i]; i _ i + 1; SELECT c FROM IN ['A..'Z] => c _ Ascii.Lower[c]; '. => dot _ TRUE; ENDCASE; RETURN}; name: Rope.ROPE = Rope.FromProc[in.length, EachChar]; RETURN[IF ~dot THEN name.Concat[".bcd"] ELSE name]}; bcdStream: IO.STREAM; nextBcdPage: CARDINAL; WriteSubTable: PROC[selector: Table.Selector] = { base: Table.Base; size: CARDINAL; [base, size] _ table.Bounds[selector]; bcdStream.UnsafePutBlock[[base, 0, size*bytesPerWord]]}; TableOut: PROC = { d, s: CARDINAL; bcdPages, codePages, symbolPages: CARDINAL; basePages: CARDINAL; rtPageCount: CARDINAL; saveNextPage: CARDINAL; saveIndex: INT; -- FileStream.FileByteIndex rtPageCount _ PagesForWords[(data.literals).SegmentSize[]]; BEGIN OPEN header; IF firstdummy = 0 THEN firstdummy _ BcdUtilDefs.GetGfi[0]; d _ BCD.SIZE; ssOffset _ d; d _ d + (ssLimit _ table.Bounds[sstype].size); ctOffset _ d; d _ d + (s _ table.Bounds[cttype].size); ctLimit _ LOOPHOLE[s]; mtOffset _ d; d _ d + (s _ table.Bounds[mttype].size); mtLimit _ LOOPHOLE[s]; impOffset _ d; d _ d + (s _ table.Bounds[imptype].size); impLimit _ LOOPHOLE[s]; expOffset _ d; d _ d + (s _ table.Bounds[exptype].size); expLimit _ LOOPHOLE[s]; evOffset _ d; d _ d + (s _ table.Bounds[evtype].size); evLimit _ LOOPHOLE[s, EVIndex]; sgOffset _ d; d _ d + (s _ table.Bounds[sgtype].size); sgLimit _ LOOPHOLE[s]; ftOffset _ d; d _ d + (s _ table.Bounds[fttype].size); ftLimit _ LOOPHOLE[s]; ntOffset _ d; d _ d + (s _ table.Bounds[nttype].size); ntLimit _ LOOPHOLE[s]; typOffset _ d; d _ d + (s _ table.Bounds[typtype].size); typLimit _ LOOPHOLE[s]; tmOffset _ d; d _ d + (s _ table.Bounds[tmtype].size); tmLimit _ LOOPHOLE[s]; spOffset _ d; d _ d + (s _ table.Bounds[sptype].size); spLimit _ LOOPHOLE[s]; fpOffset _ d; d _ d + (s _ table.Bounds[fptype].size); fpLimit _ LOOPHOLE[s]; lfOffset _ d; d _ d + (s _ table.Bounds[lftype].size); lfLimit _ LOOPHOLE[s]; rfOffset _ d; d _ d + (s _ table.Bounds[rftype].size); rfLimit _ LOOPHOLE[s]; tfOffset _ d; d _ d + (s _ table.Bounds[tftype].size); tfLimit _ LOOPHOLE[s]; basePages _ PagesForWords[d]; rtPages _ [relPageBase: basePages, pages: rtPageCount]; extended _ TRUE; nPages _ bcdPages _ basePages + rtPageCount; END; [codePages, symbolPages] _ EstimateCopiedPages[]; IF data.copyCode AND codeMap.fti = FTSelf THEN bcdPages _ bcdPages + codePages; IF data.copySymbols AND symbolMap.fti = FTSelf THEN bcdPages _ bcdPages + symbolPages; OpenOutputFile[1 + bcdPages]; IF rtPageCount # 0 THEN { MoveToPageBoundary[stream: bcdStream, page: basePages]; IF packing THEN (data.literals).UpdateSegments[ReadSgMap]; (data.literals).Write[bcdStream]; saveIndex _ bcdStream.GetIndex}; saveNextPage _ nextBcdPage _ header.nPages + 1; IF data.copyCode THEN MoveCodeSegments[codePages ! UserAbort => {GO TO AbortRequested}]; IF data.copySymbols THEN MoveSymbolSegments[symbolPages ! UserAbort => {GO TO AbortRequested}]; IF packing THEN FixAllSgis[]; bcdStream.SetIndex[0]; bcdStream.UnsafePutBlock[[LOOPHOLE[header, LONG POINTER TO Basics.RawBytes], 0, BCD.SIZE*bytesPerWord]]; WriteSubTable[sstype]; WriteSubTable[cttype]; WriteSubTable[mttype]; WriteSubTable[imptype]; WriteSubTable[exptype]; WriteSubTable[evtype]; WriteSubTable[sgtype]; WriteSubTable[fttype]; WriteSubTable[nttype]; WriteSubTable[typtype]; WriteSubTable[tmtype]; WriteSubTable[sptype]; WriteSubTable[fptype]; WriteSubTable[lftype]; WriteSubTable[rftype]; WriteSubTable[tftype]; IF nextBcdPage # saveNextPage THEN MoveToPageBoundary[stream: bcdStream, page: (nextBcdPage-1)] ELSE IF rtPageCount # 0 THEN bcdStream.SetIndex[saveIndex]; data.nConfigs _ header.nConfigs; data.nModules _ header.nModules; data.nImports _ header.nImports; data.nExports _ header.nExports; data.nPages _ header.nPages; EXITS AbortRequested => data.aborted _ TRUE }; OpenOutputFile: PROC[initialPages: CARDINAL] = INLINE { file: FS.OpenFile _ data.outputFile; IF file = FS.nullOpenFile THEN ERROR; file.SetPageCount[initialPages]; bcdStream _ file.StreamFromOpenFile[$write]}; CloseOutputFile: PROC = INLINE {bcdStream.Close[]; bcdStream _ NIL}; FSClose: PROC[fh: FS.OpenFile] RETURNS[FS.OpenFile] = { IF fh # FS.nullOpenFile THEN fh.Close[ ! FS.Error => IF error.code = $invalidOpenFile THEN CONTINUE]; RETURN[FS.nullOpenFile]}; }. ”BcdWrite.mesa Copyright c 1985 by Xerox Corporation. All rights reserved. Satterthwaite on April 18, 1986 11:57:26 am PST Maxwell, August 11, 1983 2:40 pm Russ Atkinson, March 7, 1985 1:01:21 am PST Paul Rovner, October 7, 1983 3:19 pm a basic assumption of this module is equality of these two page sizes inline utilities BCD (re)construction configs are already copied, only map names and files modules are already copied, only map names and files spaces are already copied, only map names (and segments?) framepacks are already copied, only map names first make sure that old is not already exported file mapping header processing ... moves the index to the given page boundary. However, if the size of the file will not permit this, extend the file via SetLength to allow SetIndex to succeed. Code Packing t is Tree.null, a list of ids, or a list of lists of ids Segment Mapping called only when packing (i.e. packing requested AND copyCode = TRUE) replace all sgis with ReadSgMap[sgi] Code Links code and symbol copying estimates ignore possible packing of code START MoveCodeSegments HERE insure that a file entry exists for this file Bcd Output Routines Κ%˜codešœ ™ Kšœ Οmœ1™˜Kšœžœ˜?—Kšœžœ˜Kšœžœ˜K˜K˜—Kšœžœžœ˜"K˜šœžœžœ˜K˜ K˜Kšœžœ˜Kšœ žœ ˜K˜—Kš œžœ˜˜š  œžœ žœžœžœ ˜Ašžœžœ˜K˜ K˜K˜Kš œžœžœžœžœžœ˜V—šœ˜K˜——Kšžœžœ'˜K˜Kšœ˜šž˜Kšœ žœ˜—Kšžœ˜—K˜.K˜+Kšžœ˜—Kšžœ˜K˜—š œžœ˜(Kšžœžœ;˜HKšžœ˜Kšžœ žœžœžœ˜0K˜šžœžœ˜˜K˜7K˜,——šžœ˜˜Kšœ˜K˜?—šžœ2žœ˜:˜˜6K˜-——Kšœ˜K˜——Kšžœ žœžœ˜9K˜K˜—š œžœ6˜NKšžœžœ˜K˜Kšœžœ žœ ˜-šœžœžœ˜)Kšœžœ+˜1Kšžœ1˜3K˜—K˜Kšœ˜Kšžœ ˜K˜K˜——Kšœ™˜Kš œžœžœžœ žœžœ ˜;Kš œ žœžœžœ žœžœžœ˜:K˜Kšœ žœ žœ˜Kšœ žœ žœ˜K˜š   œžœžœžœžœ˜Kšžœ˜K˜—K˜Kšžœ žœ6˜EK˜——Kšžœžœžœ˜FKšžœžœ žœ"˜DKšžœ˜K˜K˜—š œžœžœ˜1Kšœžœžœ˜Kšœ žœ˜K˜š  œžœžœžœžœ˜6Kšžœ ˜Kšœžœ˜%K˜Kšœ˜šžœžœžœžœ˜Cšžœžœ˜-K˜O—šžœ˜K˜7˜%Kšœ-žœ˜4—K˜+K˜K˜K˜K˜!K˜$—K˜—KšœŸ˜—K˜š œžœ˜#Kšžœ˜Kšœ˜šžœžœ˜Kšœžœ ˜K˜K˜&šžœžœ˜Kšœ žœ˜"K˜$K˜K˜K˜7˜K˜K˜&—K˜Kšœ˜—Kšœ˜—šœŸ˜K˜——Kšœ™šžœ˜Kšžœ-˜1Kšžœ5˜9—K˜.K˜K˜Kšžœžœžœ˜IK˜K˜—Kš œžœ˜!˜š œžœ˜2Kšžœ˜šžœžœžœž˜JKšœ.™.K˜"—˜K˜——K˜.K˜—š œžœžœ˜3Kšœžœžœ˜Kšœ žœ˜K˜š œžœ˜#Kšžœ˜Kšœžœ˜K˜Kšœ˜Kš žœžœžœžœžœ˜OK˜K˜šžœžœ˜˜K˜