<> <> <> DIRECTORY BcdOps: TYPE USING[BcdBase], BTreeDefs: TYPE USING [BTreeHandle, CreateAndInitializeBTree, Desc, Insert, KeyNotFound, Lookup, ReleaseBTree, TestKeys], BTreeSupportExtraDefs: TYPE USING[CloseFile, OpenFile], ConvertUnsafe: TYPE USING[ToRope], CS: TYPE USING[MakeTS, PTimeStamp], CWF: TYPE USING [SWF1, SWF2, SWF3, WF0, WF1, WF2, WF3, WF4, WFCR], DateAndTimeUnsafe: TYPE USING[Parse], DFSubr: TYPE USING [AllocateDFSeq, DF, DFSeq, FreeDFSeq, LookupDF, NextDF, ParseStream, StripLongName, WriteOut], Directory: TYPE USING[Error, Lookup, ignore], Environment: TYPE USING [bytesPerPage], File: TYPE USING[Capability, nullCapability], FileIO: TYPE USING[Open], FQ: TYPE USING[FileQueryBangH, Result], Heap: TYPE USING [Error], Inline: TYPE USING [BITNOT, BITOR, LowHalf], IO: TYPE USING [card, Close, Handle, Flush, GetLength, PFCodeProc, Put, PutChar, PutF, PutFR, rope, SetPFCodeProc, SetIndex, string, UserAbort], LongString: TYPE USING [EquivalentString, StringToDecimal], ReleaseSupport: TYPE USING [], Rope: TYPE USING[Length, Lower, Fetch, Flatten, ROPE, Text], RTBcd: TYPE USING[VersionID], Space: TYPE USING [Create, Delete, Handle, Kill, LongPointer, Map, nullHandle, virtualMemory], UnsafeSTP: TYPE USING [Connect, CreateRemoteStream, DesiredProperties,Destroy, Error, FileInfo, GetFileInfo, Handle, SetDesiredProperties], UnsafeSTPOps: TYPE USING [Handle, SetPListItem], STPSubr: TYPE USING [CachedOpen, HandleSTPError, MakeSTPHandle, StopSTP, StpStateRecord], Stream: TYPE USING [Delete, EndOfStream, GetBlock, Handle, PutBlock], Subr: TYPE USING [AbortMyself, AllocateString, CopyString, CursorInWindow, EndsIn, FileError, FreeString, GetLine, GetNameandPassword, LongZone, NewFile, NewStream, Prefix, Read, ReadWrite, strcpy, StripLeadingBlanks, SubStrCopy, TTYProcs], TimeStamp: TYPE USING[Stamp], UserTerminal: TYPE USING [cursor, CursorArray, GetCursorPattern, SetCursorPattern], VerifyDFInterface: TYPE USING [VerifyBcds]; Release23Impl: PROGRAM IMPORTS BTreeDefs, BTreeSupportExtraDefs, ConvertUnsafe, CS, CWF, DateAndTimeUnsafe, DFSubr, Directory, FileIO, FQ, Heap, Inline, IO, LongString, Rope, Space, STP: UnsafeSTP, STPOps: UnsafeSTPOps, STPSubr, Stream, Subr, UserTerminal, VerifyDFInterface EXPORTS ReleaseSupport SHARES IO = { <> MAXFILES: CARDINAL = 500; <> <> NPAGESTOCOPY: CARDINAL = 127; <> <<(all page counts are in IFS pages, not Pilot pages)>> bytesPerIFSPage: CARDINAL = 2048; DFMapSeqRecord: TYPE = RECORD[ size: CARDINAL _ 0, zone: UNCOUNTED ZONE _ NULL, -- zone for strings below body: SEQUENCE maxsize: CARDINAL OF RECORD[ shortname: LONG STRING _ NIL, -- e.g. "Rigging.DF" lhsHost: LONG STRING _ NIL, -- the released position we are overriding lhsDirectory: LONG STRING _ NIL, rhsHost: LONG STRING _ NIL, -- the working posn we want it to refer to rhsDirectory: LONG STRING _ NIL ] ]; Rw: TYPE = {read, write, none}; Global: TYPE = RECORD[ nDFFilesStored: CARDINAL _ 0, -- the # actually written nFilesToRelease: CARDINAL _ 0, -- the number that are being ReleaseAs'd nPagesToRelease: LONG CARDINAL _ 0, -- the number of above nFilesStored: CARDINAL _ 0, -- the number actually copied this time nPagesStored: LONG CARDINAL _ 0, -- pages for above nFilesNotStored: CARDINAL _ 0, -- the number that would have been copied nPagesNotStored: LONG CARDINAL _ 0, -- pages for above nFilesSkipped: CARDINAL _ 0, -- number not being released (e.g. CameFrom) <<>> copySpace: Space.Handle _ Space.nullHandle, <> oldPhase3FileCacheExists: BOOL _ FALSE, useOldPhase3FileCache: BOOL _ FALSE, updateBTree: BOOL _ TRUE, phase3BTreeHandle: BTreeDefs.BTreeHandle _ NULL, phase3BTreeCap: File.Capability _ File.nullCapability, dfmap: LONG POINTER TO DFMapSeqRecord _ NIL, in, out: IO.Handle _ NIL, verbose: REF BOOL _ NIL, stp: ARRAY Rw OF STP.Handle _ ALL[NIL], stpHost: ARRAY Rw OF Rope.Text _ ALL[NIL], connectName: LONG STRING _ NIL, connectPassword: LONG STRING _ NIL, versionMapPrefix: Rope.Text _ NIL, versionMapFile: IO.Handle _ NIL, dfseqIndex: ARRAY CHAR['a .. 'z] OF CARDINAL _ ALL[0] ]; <> <> <> <> <> <> <> <> g: REF Global _ NEW[Global _ [versionMapPrefix: "[Indigo]"]]; <> VerifySufficiency: PUBLIC PROC[topdffilename: LONG STRING, h: Subr.TTYProcs, outhandle: IO.Handle, checkForOverwrite: BOOL] = { <> dfseq: DFSubr.DFSeq _ NIL; sh: Stream.Handle; df: DFSubr.DF; {ENABLE UNWIND => DFSubr.FreeDFSeq[@dfseq]; stpStateRecord: STPSubr.StpStateRecord _ [checkForOverwrite: checkForOverwrite]; g.out _ outhandle; CWF.WF1["Opening %s.\n"L, topdffilename]; [sh] _ STPSubr.CachedOpen[host: NIL, directory: NIL, shortname: topdffilename, version: 0, wantcreatetime: 0, h: h, wantExplicitVersion: FALSE, onlyOne: TRUE, stpState: @stpStateRecord ! Subr.FileError => GOTO notfound]; dfseq _ DFSubr.AllocateDFSeq[maxEntries: MAXFILES, zoneType: shared]; DFSubr.ParseStream[sh, dfseq, topdffilename, NIL, FALSE, FALSE, FALSE, h]; Stream.Delete[sh]; FOR i: CARDINAL IN [0 .. dfseq.size) DO IF outhandle.UserAbort[] THEN SIGNAL Subr.AbortMyself; df _ @dfseq[i]; IF df.atsign AND NOT df.readonly AND NOT df.cameFrom THEN VerifyThisPackage[df.host, df.directory, df.shortname, df.version, df.createtime, h, checkForOverwrite, df.criterion = none]; ENDLOOP; STPSubr.StopSTP[]; -- may have timed out DFSubr.FreeDFSeq[@dfseq]; EXITS notfound => CWF.WF1["Error - can't open %s.\n"L, topdffilename]; }}; VerifyThisPackage: PROC[host, directory, shortname: LONG STRING, version: CARDINAL, createtime: LONG CARDINAL, h: Subr.TTYProcs, checkForOverwrite, wantExplicitVersion: BOOL] = { <<... is called once for each of the first level of the tree below the root; that is, for each of the Includes of the root DF for the release; host and directory may be NIL>> dfseq: DFSubr.DFSeq _ NIL; sh: Stream.Handle; plus, nonLeaf: BOOL _ FALSE; df: DFSubr.DF; stpStateRecord: STPSubr.StpStateRecord _ [checkForOverwrite: checkForOverwrite]; {ENABLE UNWIND => DFSubr.FreeDFSeq[@dfseq]; Flush[]; CWF.WF1["\nOpening %s.\n"L, shortname]; [sh] _ STPSubr.CachedOpen[ host: host, directory: directory, shortname: shortname, version: version, wantcreatetime: createtime, h: h, wantExplicitVersion: wantExplicitVersion, onlyOne: TRUE, stpState: @stpStateRecord ! Subr.FileError => GOTO notfound]; dfseq _ DFSubr.AllocateDFSeq[maxEntries: MAXFILES, zoneType: shared]; DFSubr.ParseStream[sh, dfseq, shortname, NIL, FALSE, FALSE, FALSE, h]; Stream.Delete[sh]; FOR i: CARDINAL IN [0 .. dfseq.size) DO df _ @dfseq[i]; IF df.atsign AND NOT df.readonly AND NOT df.cameFrom THEN nonLeaf _ TRUE; ENDLOOP; <> plus _ FALSE; FOR i: CARDINAL IN [0 .. dfseq.size) DO IF dfseq[i].topmark THEN plus _ TRUE; ENDLOOP; DFSubr.FreeDFSeq[@dfseq]; IF NOT nonLeaf AND NOT plus THEN { CWF.WF1["No + files in %s, a file that Includes no other DF files.\n"L, shortname]; RETURN; }; CWF.WF1["VerifyDF of %s started.\n"L, shortname]; Flush[]; VerifyDFInterface.VerifyBcds[bcdfilename: NIL, dffilename: shortname, h: h, checkForOverwrite: checkForOverwrite, printFlattened: FALSE, useHugeZone: FALSE, wantRTVersionID: RTBcd.VersionID ! Heap.Error => { CWF.WF0["Error - Heap.Error!!!!!.\n"L]; CONTINUE; } ]; CWF.WF1["VerifyDF of %s complete.\n"L, shortname]; STPSubr.StopSTP[]; -- may have timed out EXITS notfound => { IF host ~= NIL THEN CWF.WF3["Error - can't open [%s]<%s>%s\n"L, host, directory, shortname] ELSE CWF.WF1["Error - can't open %s.\n"L, shortname]; }; }}; TransferFiles: PUBLIC PROC[topdffilename: LONG STRING, dfseqall: DFSubr.DFSeq, h: Subr.TTYProcs, inhandle, outhandle, logFileHandle: IO.Handle, checkForOverwrite, usePhase3BTree, updateBTree: BOOL, verbosePtr: REF BOOL] = { <> <> <> <> <> <<>> <> dfseq: DFSubr.DFSeq _ NIL; outofspace: BOOL; df: DFSubr.DF; connectName: LONG STRING _ Subr.AllocateString[50]; connectPassword: LONG STRING _ Subr.AllocateString[50]; Cleanup: PROC = { g.out.PutF["Summary for phase 3: (page counts are in 2048 bytes)\n"]; g.out.PutF["\t%d non-DF files being released, %d pages in those files,\n", IO.card[g.nFilesToRelease], IO.card[g.nPagesToRelease]]; g.out.PutF["\t%d non-DF files actually copied, %d pages in those files,\n", IO.card[g.nFilesStored], IO.card[g.nPagesStored]]; g.out.PutF["\t%d non-DF files in release position, %d pages in those files,\n", IO.card[g.nFilesNotStored], IO.card[g.nPagesNotStored]]; g.out.PutF["\t%d DF files written, %d files skipped entirely.\n", IO.card[g.nDFFilesStored], IO.card[g.nFilesSkipped]]; IF g.copySpace ~= Space.nullHandle THEN Space.Delete[g.copySpace]; g.copySpace _ Space.nullHandle; Flush[]; [] _ Close[read]; [] _ Close[write]; IF g.versionMapFile ~= NIL THEN g.versionMapFile.Close[]; g.versionMapFile _ NIL; STPSubr.StopSTP[]; CleanupBTree[]; DFSubr.FreeDFSeq[@dfseq]; Subr.FreeString[connectName]; Subr.FreeString[connectPassword]; }; {ENABLE UNWIND => Cleanup[]; notFound: BOOL _ FALSE; g.verbose _ verbosePtr; g.in _ inhandle; g.out _ outhandle; g.updateBTree _ updateBTree; g.useOldPhase3FileCache _ usePhase3BTree; IF usePhase3BTree OR updateBTree THEN MakeBTree[]; g.nPagesStored _ g.nPagesNotStored _ g.nPagesToRelease _ 0; g.nFilesStored _ g.nFilesNotStored _ g.nFilesToRelease _ g.nFilesSkipped _ g.nDFFilesStored _ 0; IF dfseqall = NIL THEN { CWF.WF0["Error - Phase 1 must precede Phase 3 without a Reset in between.\n"L]; GO TO return; }; BuildIndex[dfseqall]; -- build search index used by phase 3 IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself; dfseq _ DFSubr.AllocateDFSeq[maxEntries: 1, zoneType: shared]; df _ DFSubr.NextDF[dfseq]; df.shortname _ Subr.CopyString[topdffilename, dfseq.dfzone]; df.atsign _ TRUE; Flush[]; CWF.WF0["For Indigo, Enter Probable Connect "L]; -- supply "Cedar", no password Subr.GetNameandPassword[connect, connectName, connectPassword, h]; g.connectName _ Subr.CopyString[connectName]; g.connectPassword _ Subr.CopyString[connectPassword]; g.out.Put[IO.string["Appending version map file for bcds on 'Release.VersionMapFile$'.\n"L]]; [] _ Directory.Lookup[ fileName: "Release.VersionMapFile$"L, permissions: Directory.ignore ! Directory.Error => { notFound _ TRUE; CONTINUE; }]; g.versionMapFile _ FileIO.Open[ "Release.VersionMapFile$", IF notFound THEN overwrite ELSE write]; g.versionMapFile.SetPFCodeProc['a, PrintACode]; IF notFound THEN g.versionMapFile.PutF["%s\n", IO.rope[g.versionMapPrefix]] ELSE g.versionMapFile.SetIndex[g.versionMapFile.GetLength[]]; -- sets to end outofspace _ RecursiveStoreDF[dfseq, NIL, dfseqall, h, checkForOverwrite]; EXITS return => {}; }; Cleanup[]; }; ConnectionClosedError: ERROR[rw: Rw] = CODE; <> ConnectCredentialsError: ERROR[rw: Rw] = CODE; <> RecursiveStoreDF: PROC [dfseqouter: DFSubr.DFSeq, topdfouter: DFSubr.DF, dfseqall: DFSubr.DFSeq, h: Subr.TTYProcs, checkForOverwrite: BOOL] RETURNS[outofspace: BOOL] = { <> sh: Stream.Handle; dfouter: DFSubr.DF; stpStateRecord: STPSubr.StpStateRecord _ [checkForOverwrite: checkForOverwrite]; outofspace _ FALSE; FOR i: CARDINAL IN [0 .. dfseqouter.size) DO dfouter _ @dfseqouter[i]; IF dfouter.atsign AND NOT dfouter.readonly AND (dfouter.releaseDirectory = NIL OR NOT dfouter.cameFrom) THEN { <> dfseqinner: DFSubr.DFSeq _ NIL; o: BOOL; {ENABLE UNWIND => DFSubr.FreeDFSeq[@dfseqinner]; <> <> <> <> IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself; dfseqinner _ DFSubr.AllocateDFSeq[maxEntries: MAXFILES, zoneType: shared]; <> [sh] _ STPSubr.CachedOpen[host: dfouter.host, directory: dfouter.directory, shortname: dfouter.shortname, version: dfouter.version, wantcreatetime: dfouter.createtime, h: h, wantExplicitVersion: dfouter.criterion = none, onlyOne: TRUE, stpState: @stpStateRecord ! Subr.FileError => GOTO err]; CWF.WF1["Opening %s.\n"L, dfouter.shortname]; IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself; DFSubr.ParseStream[sh, dfseqinner, dfouter.shortname, NIL, FALSE, FALSE, FALSE, h]; Stream.Delete[sh]; o _ RecursiveStoreDF[dfseqinner, dfouter, dfseqall, h, checkForOverwrite]; SetCreateDateAndVersionFromPhaseOne[dfouter, dfseqall]; outofspace _ outofspace OR o; IF dfouter.releaseDirectory = NIL AND topdfouter ~= NIL THEN { CWF.WF2["No release directory for %s in %s.\n"L, dfouter.shortname, topdfouter.shortname]; DFSubr.FreeDFSeq[@dfseqinner]; LOOP; }; <> IF NOT outofspace THEN StoreDFFile[dfouter, dfseqinner, h]; DFSubr.FreeDFSeq[@dfseqinner]; Flush[]; EXITS err => { IF dfouter.host ~= NIL THEN CWF.WF3["Error - can't open [%s]<%s>%s"L, dfouter.host, dfouter.directory, dfouter.shortname] ELSE CWF.WF1["Error - can't open %s.\n"L, dfouter.shortname]; }; }; LOOP; }; IF dfouter.atsign AND dfouter.readonly AND Subr.EndsIn[dfouter.shortname, ".df"L] THEN <> <> CoerceDFLocToAnother[dfouter, dfseqouter]; IF dfouter.cameFrom AND dfouter.releaseDirectory ~= NIL THEN { IF g.verbose^ THEN CWF.WF4["Leaving [%s]<%s>%s in %s alone.\n"L, dfouter.host, dfouter.directory, dfouter.shortname, topdfouter.shortname]; g.nFilesSkipped _ g.nFilesSkipped + 1; Flush[]; } ELSE { -- is either a readonly df file or not a DF file SetCreateDateAndVersionFromPhaseOne[dfouter, dfseqall]; IF dfouter.releaseDirectory = NIL THEN { IF NOT dfouter.readonly THEN CWF.WF2["Error - no release directory specified for %s in %s.\n"L, dfouter.shortname, topdfouter.shortname] ELSE FixupEntriesWithoutReleaseAs[dfouter, dfseqouter, dfseqall]; Flush[]; } <> ELSE IF NOT LongString.EquivalentString[dfouter.shortname, topdfouter.shortname] THEN { smashrw: Rw _ none; <> <> DO CopyRemoteFile[dfouter, h ! ConnectionClosedError => { CWF.WF1["Connection to %s timed out.\n"L, dfouter.host]; smashrw _ rw; CONTINUE; }; ]; <> IF smashrw ~= none THEN smashrw _ Close[smashrw] ELSE EXIT; ENDLOOP; Flush[]; } ELSE { <> <> SwapSides[dfouter]; }; }; ENDLOOP; }; StoreDFFile: PROC[dfouter: DFSubr.DF, dfseqinner: DFSubr.DFSeq, h: Subr.TTYProcs] = { <> <> dfinner: DFSubr.DF; smashrw: Rw _ none; host: LONG STRING _ NIL; sfnnew: LONG STRING _ Subr.AllocateString[100]; {ENABLE UNWIND => {Subr.FreeString[sfnnew]}; dfouter.version _ 0; SwapSides[dfouter]; -- beware, this call must be executed exactly once <> <> dfinner _ DFSubr.LookupDF[dfseqinner, dfouter.shortname]; IF dfinner ~= NIL THEN { IF dfouter.createtime = 0 THEN dfouter.createtime _ dfinner.createtime ELSE dfinner.createtime _ dfouter.createtime; dfinner.version _ 0; IF dfouter.host ~= NIL THEN { Subr.FreeString[dfinner.host, dfseqinner.dfzone]; dfinner.host _ Subr.CopyString[dfouter.host, dfseqinner.dfzone]; Subr.FreeString[dfinner.directory, dfseqinner.dfzone]; dfinner.directory _ Subr.CopyString[dfouter.directory, dfseqinner.dfzone]; Subr.FreeString[dfinner.releaseHost, dfseqinner.dfzone]; dfinner.releaseHost _ Subr.CopyString[dfouter.releaseHost, dfseqinner.dfzone]; Subr.FreeString[dfinner.releaseDirectory, dfseqinner.dfzone]; dfinner.releaseDirectory _ Subr.CopyString[dfouter.releaseDirectory, dfseqinner.dfzone]; dfinner.cameFrom _ dfouter.cameFrom; } <> <> }; IF dfouter.host ~= NIL THEN { host _ dfouter.host; CWF.SWF2[sfnnew, "<%s>%s"L, dfouter.directory, dfouter.shortname] } ELSE IF dfinner ~= NIL THEN { host _ dfinner.host; CWF.SWF2[sfnnew, "<%s>%s"L, dfinner.directory, dfinner.shortname] } ELSE { CWF.WF1["Error - don't know where to store %s.\n"L, dfouter.shortname]; GO TO return; }; DO dfouter.version _ ReallyStoreDFFile[host, sfnnew, dfouter.createtime, dfseqinner, h ! ConnectionClosedError => { CWF.WF1["Connection to %s timed out.\n"L, dfouter.host]; smashrw _ rw; CONTINUE; }; ConnectCredentialsError => { <> CWF.WFCR[]; LOOP } ]; <> IF smashrw ~= none THEN smashrw _ Close[smashrw] ELSE EXIT; ENDLOOP; g.nDFFilesStored _ g.nDFFilesStored + 1; EXITS return => {}; }; -- of ENABLE UNWIND Subr.FreeString[sfnnew]; }; ReallyStoreDFFile: PROC [host, filename: LONG STRING, createtime: LONG CARDINAL, dfseqinner: DFSubr.DFSeq, h: Subr.TTYProcs] RETURNS[version: CARDINAL] = { <> sh: Stream.Handle _ NIL; info: STP.FileInfo; shortFileName: LONG STRING _ Subr.AllocateString[100]; desiredProperties: STP.DesiredProperties _ ALL[FALSE]; <> {ENABLE { ConnectionClosedError => { IF sh ~= NIL THEN Stream.Delete[sh]; sh _ NIL; Subr.FreeString[shortFileName]; }; STP.Error => SELECT code FROM noSuchFile => { CWF.WF2["Error - %s: %s.\n\n"L, filename, error]; Subr.FreeString[shortFileName]; ERROR Subr.FileError[notFound]; }; connectionClosed => { Subr.FreeString[shortFileName]; ERROR ConnectionClosedError[write]; }; illegalConnectName, illegalConnectPassword, accessDenied => { <> <> [] _ STPSubr.HandleSTPError[g.stp[write], code, error, h]; Subr.FreeString[shortFileName]; ERROR ConnectCredentialsError[write]; }; ENDCASE; }; Subr.strcpy[shortFileName, filename]; version _ 0; CWF.WF1["Storing %s "L, filename]; <> Open[host, write, h]; desiredProperties[directory] _ TRUE; desiredProperties[nameBody] _ TRUE; desiredProperties[version] _ TRUE; STP.SetDesiredProperties[g.stp[write], desiredProperties]; sh _ STP.CreateRemoteStream[ stp: g.stp[write], file: shortFileName, access: write, fileType: text, creation: LOOPHOLE[createtime] ! STP.Error => IF STPSubr.HandleSTPError[g.stp[write], code, error, h] THEN RETRY]; DFSubr.WriteOut[dfseq: dfseqinner, topLevelFile: NIL, outputStream: sh, print: FALSE]; Stream.Delete[sh]; sh _ NIL; info _ STP.GetFileInfo[g.stp[write]]; version _ LongString.StringToDecimal[info.version]; CWF.WF1["!%s\n"L, info.version]; Subr.FreeString[shortFileName]; }}; CopyRemoteFile: PROC[dfouter: DFSubr.DF, h: Subr.TTYProcs] = { nIfsPages: CARDINAL; ok, inCache: BOOL; vers: CARDINAL; [ok, inCache, vers, nIfsPages] _ AlreadyExistsInCorrectVersion[ dfouter.releaseHost, dfouter.releaseDirectory, dfouter.shortname, dfouter.createtime, h]; IF ok THEN { IF g.verbose^ THEN { CWF.WF4["Correct version of [%s]<%s>%s already stored, %u pages "L, dfouter.releaseHost, dfouter.releaseDirectory, dfouter.shortname, @nIfsPages]; CWF.WF1["%s\n"L, IF inCache THEN " (In cache)"L ELSE ""L]; }; SwapSides[dfouter]; dfouter.version _ vers; g.nFilesNotStored _ g.nFilesNotStored + 1; g.nPagesNotStored _ g.nPagesNotStored + nIfsPages; g.nFilesToRelease _ g.nFilesToRelease + 1; g.nPagesToRelease _ g.nPagesToRelease + nIfsPages; RETURN; }; DO nIfsPages _ CopyRemoteFilesUsingSTP[dfouter, h ! ConnectCredentialsError => { CWF.WFCR[]; LOOP }]; EXIT; ENDLOOP; g.nFilesStored _ g.nFilesStored + 1; g.nPagesStored _ g.nPagesStored + nIfsPages; g.nFilesToRelease _ g.nFilesToRelease + 1; g.nPagesToRelease _ g.nPagesToRelease + nIfsPages; }; CopyRemoteFilesUsingSTP: PROC [dfouter: DFSubr.DF, h: Subr.TTYProcs] RETURNS[nIfsPages: CARDINAL] = { <> buffer: LONG POINTER; nxfer: CARDINAL; stopit: BOOL _ FALSE; shin, shout: Stream.Handle _ NIL; info: STP.FileInfo; ca: UserTerminal.CursorArray _ ALL[0]; cursorX, cursorY: INTEGER; flip: BOOL _ FALSE; sfnold: LONG STRING _ Subr.AllocateString[100]; sfnnew: LONG STRING _ Subr.AllocateString[100]; nbytes: LONG CARDINAL _ 0; ftp: UserTerminal.CursorArray _ [ 177400B, 177400B, 177400B, 177400B, 177400B, 177400B, 177400B, 177400B, 000377B, 000377B, 000377B, 000377B, 000377B, 000377B, 000377B, 000377B]; bcdBase: BcdOps.BcdBase; versionStamp: TimeStamp.Stamp; desiredProperties: STP.DesiredProperties _ ALL[FALSE]; Cleanup: PROC = { <> <> IF shin ~= NIL THEN Stream.Delete[shin]; shin _ NIL; IF shout ~= NIL THEN Stream.Delete[shout]; shout _ NIL; IF flip THEN UserTerminal.SetCursorPattern[ca]; IF sfnold # NIL THEN {Subr.FreeString[sfnold]; sfnold _ NIL}; IF sfnnew # NIL THEN {Subr.FreeString[sfnnew]; sfnnew _ NIL}; }; nIfsPages _ 0; IF dfouter.version = 0 THEN CWF.SWF2[sfnold, "<%s>%s!H"L, dfouter.directory, dfouter.shortname] ELSE CWF.SWF3[sfnold, "<%s>%s!%u"L, dfouter.directory, dfouter.shortname, @dfouter.version]; CWF.SWF2[sfnnew, "<%s>%s"L, dfouter.releaseDirectory, dfouter.shortname]; IF g.verbose^ THEN CWF.WF1["Copy %s"L, sfnold]; Open[dfouter.host, read, h]; desiredProperties _ ALL[FALSE]; desiredProperties[directory] _ TRUE; desiredProperties[nameBody] _ TRUE; desiredProperties[version] _ TRUE; desiredProperties[createDate] _ TRUE; desiredProperties[size] _ TRUE; STP.SetDesiredProperties[g.stp[read], desiredProperties]; shin _ STP.CreateRemoteStream[stp: g.stp[read], file: sfnold, access: read ! STP.Error => IF code = noSuchFile THEN { CWF.WF2["Error - %s: %s.\n\n"L, sfnold, error]; ERROR Subr.FileError[notFound]; } ELSE IF code = connectionClosed THEN ERROR ConnectionClosedError[read] ELSE IF STPSubr.HandleSTPError[g.stp[read], code, error, h] THEN RETRY ]; IF g.verbose^ THEN CWF.WF0["\n\tto "L]; Open[dfouter.releaseHost, write, h]; desiredProperties _ ALL[FALSE]; desiredProperties[directory] _ TRUE; desiredProperties[nameBody] _ TRUE; desiredProperties[version] _ TRUE; STP.SetDesiredProperties[g.stp[write], desiredProperties]; shout _ STP.CreateRemoteStream[stp: g.stp[write], file: sfnnew, access: write, fileType: text, creation: LOOPHOLE[dfouter.createtime] ! STP.Error => SELECT code FROM noSuchFile => { CWF.WF2["Error - %s: %s.\n\n"L, sfnnew, error]; ERROR Subr.FileError[notFound]; }; connectionClosed => ERROR ConnectionClosedError[write]; ENDCASE => IF STPSubr.HandleSTPError[g.stp[write], code, error, h] THEN RETRY ]; CWF.WF1["%s ... "L, sfnnew]; IF g.copySpace = Space.nullHandle THEN { g.copySpace _ Space.Create[NPAGESTOCOPY, Space.virtualMemory]; Space.Map[g.copySpace]; }; IF Subr.CursorInWindow[h] THEN { [cursorX, cursorY] _ UserTerminal.cursor^; ca _ UserTerminal.GetCursorPattern[]; UserTerminal.SetCursorPattern[ftp]; flip _ TRUE; }; buffer _ Space.LongPointer[g.copySpace]; bcdBase _ buffer; {ENABLE UNWIND => Cleanup[]; WHILE NOT stopit DO [bytesTransferred: nxfer] _ Stream.GetBlock[shin, [buffer, 0, NPAGESTOCOPY*Environment.bytesPerPage] ! STP.Error => IF code = noSuchFile THEN { CWF.WF1["\n\tError - %s not found.\n"L, sfnold]; GOTO out; } ELSE IF code = connectionClosed THEN ERROR ConnectionClosedError[read]; Stream.EndOfStream => { stopit _ TRUE; nxfer _ nextIndex; CONTINUE } ]; IF nbytes = 0 THEN { lstr: LONG STRING _ NIL; info _ STP.GetFileInfo[g.stp[read]]; IF Subr.EndsIn[dfouter.shortname, ".Bcd"L] OR Subr.EndsIn[dfouter.shortname, ".Symbols"L] THEN versionStamp _ bcdBase.version ELSE -- use create time versionStamp _ [net: 0, host: 0, time: DateAndTimeUnsafe.Parse[info.create].dt]; lstr _ Subr.AllocateString[100]; {ENABLE UNWIND => {Subr.FreeString[lstr]}; CWF.SWF1[lstr, "%lu"L, @info.size]; STPOps.SetPListItem[LOOPHOLE[g.stp[write], STPOps.Handle].plist, "Size"L, lstr]; }; -- of ENABLE UNWIND Subr.FreeString[lstr]; }; Stream.PutBlock[shout, [buffer, 0, nxfer] ! STP.Error => IF code = connectionClosed THEN ERROR ConnectionClosedError[write] ELSE IF code = illegalConnectName OR code = illegalConnectPassword OR code = accessDenied THEN { <> <> [] _ STPSubr.HandleSTPError[g.stp[write], code, error, h]; ERROR ConnectCredentialsError[write]; }; ]; nbytes _ nbytes + nxfer; <> IF flip AND cursorX = UserTerminal.cursor^.x AND cursorY = UserTerminal.cursor^.y THEN { <> bits: UserTerminal.CursorArray _ UserTerminal.GetCursorPattern[]; FOR i: CARDINAL IN [0..16) DO bits[i] _ Inline.BITNOT[bits[i]]; ENDLOOP; UserTerminal.SetCursorPattern[bits]; }; ENDLOOP; Space.Kill[g.copySpace]; info _ STP.GetFileInfo[g.stp[write]]; IF info.version ~= NIL THEN { -- if there is a version number dfouter.version _ LongString.StringToDecimal[info.version]; CWF.WF1["!%s"L, info.version]; } ELSE dfouter.version _ 0; Cleanup[]; -- up here in case Twinkle runs out of space and the Stream.Delete gens. error CWF.WF1[", %lu bytes.\n"L, @nbytes]; nIfsPages _ Inline.LowHalf[(nbytes/bytesPerIFSPage)+2]; SwapSides[dfouter]; IF g.updateBTree THEN InsertIntoCache[ dfouter.host, dfouter.directory, dfouter.shortname, dfouter.version, dfouter.createtime, nIfsPages]; -- record existence AddToVersionMap[ dfouter.host, dfouter.directory, dfouter.shortname, dfouter.version, versionStamp]; EXITS out => Cleanup[]; }}; AddToVersionMap: PROC [host, directory, shortname: LONG STRING, version: CARDINAL, bcdVers: TimeStamp.Stamp] = { i: INT; file: Rope.ROPE _ IO.PutFR[ "[%s]<%s>%s!%d", IO.string[host], IO.string[directory], IO.string[shortname], IO.card[version]]; IF file.Length[] < g.versionMapPrefix.Length[] THEN { g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]]; RETURN; }; i _ 0; WHILE i < g.versionMapPrefix.Length[] DO IF Rope.Lower[file.Fetch[i]] ~= Rope.Lower[g.versionMapPrefix.Fetch[i]] THEN { g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]]; RETURN; }; i _ i + 1; ENDLOOP; file _ Rope.Flatten[file, i]; g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]]; }; AlreadyExistsInCorrectVersion: PROC [host, directory, shortname: LONG STRING, createtime: LONG CARDINAL, h: Subr.TTYProcs] RETURNS [foundonremote, inCache: BOOL, remoteVersion, nIfsPages: CARDINAL] = { fres: FQ.Result; remoteByteLength: LONG CARDINAL; foundonremote _ FALSE; IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself; <> [inCache, remoteVersion, nIfsPages] _ LookupInOldFileCache[host, directory, shortname, createtime]; IF inCache THEN RETURN[TRUE, TRUE, remoteVersion, nIfsPages]; <> [fres: fres, remoteVersion: remoteVersion, remoteByteLength: remoteByteLength] _ FQ.FileQueryBangH[host, directory, shortname, createtime, h]; SELECT fres FROM foundCorrectVersion => { -- found with right create time foundonremote _ TRUE; nIfsPages _ (remoteByteLength/bytesPerIFSPage) + 2; IF g.updateBTree THEN -- record existence in cache InsertIntoCache[host, directory, shortname, remoteVersion, createtime, nIfsPages]; }; notFound, foundWrongVersion => NULL; -- not found ENDCASE => ERROR; }; SetCreateDateAndVersionFromPhaseOne: PROC[df: DFSubr.DF, dfseqall: DFSubr.DFSeq] = { <> <> <> dfall: DFSubr.DF; start, stopPlusOne: CARDINAL; [start, stopPlusOne] _ ObtainStartAndStopIndicesFromDFSeq[df.shortname, dfseqall]; FOR i: CARDINAL IN [start .. stopPlusOne) DO dfall _ @dfseqall[i]; IF dfall.createtime ~= 0 AND LongString.EquivalentString[dfall.shortname, df.shortname] AND LongString.EquivalentString[dfall.directory, df.directory] AND LongString.EquivalentString[dfall.host, df.host] THEN { df.createtime _ dfall.createtime; df.version _ dfall.version; RETURN; }; ENDLOOP; df.version _ 0; <> g.out.PutF["Warning- can't find create date for %s.\n", IO.string[df.shortname]]; }; FixupEntriesWithoutReleaseAs: PROC[df: DFSubr.DF, dfseq, dfseqall: DFSubr.DFSeq] = { <> <> dfall: DFSubr.DF; start, stopPlusOne: CARDINAL; [start, stopPlusOne] _ ObtainStartAndStopIndicesFromDFSeq[df.shortname, dfseqall]; FOR i: CARDINAL IN [start .. stopPlusOne) DO dfall _ @dfseqall[i]; IF dfall.releaseDirectory ~= NIL AND NOT dfall.cameFrom AND LongString.EquivalentString[dfall.shortname, df.shortname] AND LongString.EquivalentString[dfall.directory, df.directory] AND LongString.EquivalentString[dfall.host, df.host] AND NOT LongString.EquivalentString[dfall.shortname, dfall.recorder] THEN { df.releaseHost _ Subr.CopyString[dfall.releaseHost, dfseq.dfzone]; df.releaseDirectory _ Subr.CopyString[dfall.releaseDirectory, dfseq.dfzone]; df.createtime _ dfall.createtime; df.version _ dfall.version; SwapSides[df]; RETURN; }; ENDLOOP; <> <> g.out.PutF["Warning- appears %s is not being released.\n", IO.string[df.shortname]]; }; ObtainStartAndStopIndicesFromDFSeq: PROC [shortname: LONG STRING, dfseqall: DFSubr.DFSeq] RETURNS[start, stopPlusOne: CARDINAL] = { ch: CHAR _ Rope.Lower[shortname[0]]; IF ch NOT IN ['a .. 'z] THEN { g.out.PutF["Cant find index for %s\n", IO.string[shortname]]; <> RETURN[0, dfseqall.size]; }; start _ g.dfseqIndex[ch]; stopPlusOne _ IF ch = 'z THEN dfseqall.size ELSE g.dfseqIndex[ch+1]; IF stopPlusOne = 0 THEN { g.out.PutF["Cant find upper bound\n"]; stopPlusOne _ dfseqall.size; }; }; BuildIndex: PROC[dfseqall: DFSubr.DFSeq] = { dfall: DFSubr.DF; ch, newch: CHAR _ 'a; g.dfseqIndex['a] _ 0; FOR i: CARDINAL IN [0 .. dfseqall.size) DO dfall _ @dfseqall[i]; newch _ Rope.Lower[dfall.shortname[0]]; IF newch < ch THEN { g.out.PutF["Warning - Bad sort order for %s\n", IO.string[dfall.shortname]]; LOOP; }; IF newch > ch THEN { FOR c: CHAR['a .. 'z] IN (ch .. newch] DO -- in case of gaps g.dfseqIndex[c] _ i; ENDLOOP; ch _ newch; }; ENDLOOP; FOR c: CHAR['a .. 'z] IN (ch .. 'z] DO g.dfseqIndex[c] _ dfseqall.size; ENDLOOP; }; SwapSides: PROC[df: DFSubr.DF] = { s: LONG STRING; IF df.cameFrom THEN ERROR; -- should never be called if is CameFrom df.cameFrom _ NOT df.cameFrom; s _ df.releaseHost; df.releaseHost _ df.host; df.host _ s; s _ df.directory; df.directory _ df.releaseDirectory; df.releaseDirectory _ s; }; BVal: TYPE = RECORD[ version: CARDINAL _ 0, nIfsPages: CARDINAL _ 0 ]; LookupInOldFileCache: PROC [host, directory, shortname: LONG STRING, createtime: LONG CARDINAL] RETURNS[inCache: BOOL _ FALSE, version, nIfsPages: CARDINAL _ 0] = { bval: BVal _ []; len: CARDINAL; IF createtime = 0 OR NOT g.oldPhase3FileCacheExists OR NOT g.useOldPhase3FileCache THEN RETURN; {-- block for actual lookup specialPrefix: STRING _ "[Indigo]"L; sfn: LONG STRING _ Subr.AllocateString[100]; file: LONG STRING _ Subr.AllocateString[100]; {ENABLE UNWIND => {Subr.FreeString[file]; Subr.FreeString[sfn]}; <> CWF.SWF3[file, "[%s]<%s>%s"L, host, directory, shortname]; IF Subr.Prefix[file, specialPrefix] THEN Subr.SubStrCopy[file, file, specialPrefix.length]; CWF.SWF2[sfn, "%lu\000%s"L, @createtime, file]; len _ BTreeDefs.Lookup[ g.phase3BTreeHandle, MakeBTreeDesc[sfn], DESCRIPTOR[@bval, SIZE[BVal]]]; }; -- of ENABLE UNWIND Subr.FreeString[file]; Subr.FreeString[sfn]}; IF len = BTreeDefs.KeyNotFound THEN RETURN; RETURN[TRUE, bval.version, bval.nIfsPages]; }; InsertIntoCache: PROC[host, directory, shortname: LONG STRING, version: CARDINAL, createtime: LONG CARDINAL, nIfsPages: CARDINAL] = { bval: BVal _ [version: version, nIfsPages: nIfsPages]; sfn: LONG STRING _ Subr.AllocateString[100]; file: LONG STRING _ Subr.AllocateString[100]; specialPrefix: STRING _ "[Indigo]"L; {ENABLE UNWIND => {Subr.FreeString[sfn]; Subr.FreeString[file]}; CWF.SWF3[file, "[%s]<%s>%s"L, host, directory, shortname]; IF Subr.Prefix[file, specialPrefix] THEN Subr.SubStrCopy[file, file, specialPrefix.length]; CWF.SWF2[sfn, "%lu\000%s"L, @createtime, file]; BTreeDefs.Insert[g.phase3BTreeHandle, MakeBTreeDesc[sfn], DESCRIPTOR[@bval, SIZE[BVal]]]; }; -- of ENABLE UNWIND Subr.FreeString[sfn]; Subr.FreeString[file]; }; <> InitialNumberOfPhase3BTreePages: CARDINAL = 1000; MakeBTree: PROC = { g.oldPhase3FileCacheExists _ TRUE; g.phase3BTreeCap _ Directory.Lookup[fileName: "ReleaseTool.Phase3BTreeFile$"L, permissions: Directory.ignore ! Directory.Error => { g.oldPhase3FileCacheExists _ FALSE; CONTINUE; }]; IF NOT g.oldPhase3FileCacheExists THEN g.phase3BTreeCap _ Subr.NewFile["ReleaseTool.Phase3BTreeFile$"L, Subr.ReadWrite, InitialNumberOfPhase3BTreePages]; g.phase3BTreeHandle _ BTreeDefs.CreateAndInitializeBTree[ fileH: BTreeSupportExtraDefs.OpenFile[g.phase3BTreeCap], initializeFile: NOT g.oldPhase3FileCacheExists, isFirstGreaterOrEqual: IsFirstGEQ, areTheyEqual: AreTheyEQ]; }; CleanupBTree: PROC = { IF g.phase3BTreeCap ~= File.nullCapability THEN { BTreeSupportExtraDefs.CloseFile[BTreeDefs.ReleaseBTree[g.phase3BTreeHandle]]; g.phase3BTreeCap _ File.nullCapability; g.oldPhase3FileCacheExists _ FALSE; }; }; MakeBTreeDesc: PROC [s: LONG STRING] RETURNS [d: BTreeDefs.Desc] = { RETURN[DESCRIPTOR[LOOPHOLE[s, LONG POINTER], (s.length + 1)/2 + 2]]}; IsFirstGEQ: BTreeDefs.TestKeys = { aS: LONG STRING = LOOPHOLE[BASE[a]]; bS: LONG STRING = LOOPHOLE[BASE[b]]; FOR i: CARDINAL IN [0..MIN[aS.length, bS.length]) DO aC: CHAR = Inline.BITOR[aS[i], 40B]; bC: CHAR = Inline.BITOR[bS[i], 40B]; SELECT aC FROM > bC => RETURN [TRUE]; < bC => RETURN [FALSE]; ENDCASE; ENDLOOP; RETURN [aS.length >= bS.length] }; AreTheyEQ: BTreeDefs.TestKeys = { aS: LONG STRING = LOOPHOLE[BASE[a]]; bS: LONG STRING = LOOPHOLE[BASE[b]]; IF aS.length ~= bS.length THEN RETURN [FALSE]; FOR i: CARDINAL IN [0..aS.length) DO IF Inline.BITOR[aS[i], 40B] ~= Inline.BITOR[bS[i], 40B] THEN RETURN [FALSE]; ENDLOOP; RETURN [TRUE] }; <<(entry must be readonly)>> <> <Top>>> <> <Top>>> <> <Top>> <> <> <<[Indigo]Top>X.df [Indigo]Top>X.df>> <<>> CoerceDFLocToAnother: PROC[df: DFSubr.DF, dfseq: DFSubr.DFSeq] = { IF g.dfmap = NIL THEN ReadInAndParseDFMap[]; <> IF NOT df.readonly THEN ERROR; FOR i: CARDINAL IN [0 .. g.dfmap.size) DO IF LongString.EquivalentString[g.dfmap[i].shortname, df.shortname] AND LongString.EquivalentString[g.dfmap[i].lhsHost, df.host] AND LongString.EquivalentString[g.dfmap[i].lhsDirectory, df.directory] THEN { CWF.WF3["Mapping reference to [%s]<%s>%s\n"L, df.host, df.directory, df.shortname]; CWF.WF3["\tinto a reference to [%s]<%s>%s.\n"L, g.dfmap[i].rhsHost, g.dfmap[i].rhsDirectory, df.shortname]; Subr.FreeString[df.host, dfseq.dfzone]; Subr.FreeString[df.directory, dfseq.dfzone]; df.host _ Subr.CopyString[g.dfmap[i].rhsHost, dfseq.dfzone]; df.directory _ Subr.CopyString[g.dfmap[i].rhsDirectory, dfseq.dfzone]; <> df.cameFrom _ FALSE; Subr.FreeString[df.releaseHost, dfseq.dfzone]; -- if present Subr.FreeString[df.releaseDirectory, dfseq.dfzone]; df.releaseHost _ df.releaseDirectory _ NIL; RETURN; }; ENDLOOP; }; NMAPENTRIES: CARDINAL = 100; ReadInAndParseDFMap: PROC = { i: CARDINAL; sh: Stream.Handle _ NIL; stemp: LONG STRING _ Subr.AllocateString[100]; line: LONG STRING _ Subr.AllocateString[100]; host: LONG STRING _ Subr.AllocateString[100]; directory: LONG STRING _ Subr.AllocateString[100]; shortname: LONG STRING _ Subr.AllocateString[100]; longzone: UNCOUNTED ZONE _ Subr.LongZone[]; Cleanup: PROC = { Subr.FreeString[stemp]; Subr.FreeString[line]; Subr.FreeString[host]; Subr.FreeString[directory]; Subr.FreeString[shortname]; IF sh # NIL THEN {Stream.Delete[sh]; sh _ NIL}; }; {ENABLE UNWIND => Cleanup[]; g.dfmap _ longzone.NEW[DFMapSeqRecord[NMAPENTRIES]]; g.dfmap.zone _ longzone; sh _ Subr.NewStream["Release.DFLocations"L, Subr.Read ! Subr.FileError => { CWF.WF0["No mapping of locations - Cannot open Release.DFLocations.\n"L]; GOTO return }]; CWF.WF0["Reading DF mapping from file Release.DFLocations.\n"L]; WHILE Subr.GetLine[sh, line] DO IF line.length = 0 OR Subr.Prefix[line, "//"L] OR Subr.Prefix[line, "--"L] THEN LOOP; IF g.dfmap.size > g.dfmap.maxsize THEN { CWF.WF0["Error - too many DFLocations.\n"L]; EXIT; }; i _ 0; WHILE i < line.length AND line[i] ~= ' AND line[i] ~= '\t DO i _ i + 1; ENDLOOP; IF i >= line.length THEN { CWF.WF1["Error - this line needs two file names on it: %s.\n"L, line]; EXIT; }; Subr.strcpy[stemp, line]; stemp.length _ i; Subr.SubStrCopy[line, line, i]; Subr.StripLeadingBlanks[line]; [] _ DFSubr.StripLongName[stemp, host, directory, shortname, FALSE]; g.dfmap[g.dfmap.size].shortname _ Subr.CopyString[shortname, longzone]; g.dfmap[g.dfmap.size].lhsHost _ Subr.CopyString[host, longzone]; g.dfmap[g.dfmap.size].lhsDirectory _ Subr.CopyString[directory, longzone]; [] _ DFSubr.StripLongName[line, host, directory, shortname, FALSE]; IF NOT LongString.EquivalentString[shortname, g.dfmap[g.dfmap.size].shortname] THEN { CWF.WF1["Error - line including %s does not have shortnames that match.\n"L, line]; LOOP; }; g.dfmap[g.dfmap.size].rhsHost _ Subr.CopyString[host, longzone]; g.dfmap[g.dfmap.size].rhsDirectory _ Subr.CopyString[directory, longzone]; g.dfmap.size _ g.dfmap.size + 1; ENDLOOP; EXITS return => {}; }; -- of ENABLE UNWIND Cleanup[]; }; Open: PROC[host: LONG STRING, rw: Rw, h: Subr.TTYProcs] = { IF g.stp[rw] ~= NIL THEN STP.SetDesiredProperties[g.stp[rw], ALL[FALSE]]; IF g.stp[rw] = NIL OR NOT LongString.EquivalentString[LOOPHOLE[g.stpHost[rw]], host] THEN { IF g.stp[rw] ~= NIL THEN [] _ Close[rw]; g.stp[rw] _ STPSubr.MakeSTPHandle[host, h]; g.stpHost[rw] _ ConvertUnsafe.ToRope[host]; <> IF rw = write AND g.connectName ~= NIL AND LongString.EquivalentString[host, "Indigo"L] THEN { shortConnectName: LONG STRING _ Subr.AllocateString[100]; shortConnectPassword: LONG STRING _ Subr.AllocateString[100]; {ENABLE UNWIND => { Subr.FreeString[shortConnectName]; Subr.FreeString[shortConnectPassword]}; Subr.strcpy[shortConnectName, g.connectName]; IF g.connectPassword ~= NIL THEN Subr.strcpy[shortConnectPassword, g.connectPassword]; STP.Connect[g.stp[rw], shortConnectName, shortConnectPassword]; }; -- of ENABLE UNWIND Subr.FreeString[shortConnectName]; Subr.FreeString[shortConnectPassword]; }; }; }; Close: PROC[rw: Rw] RETURNS[alwaysNone: Rw] = { IF g.stp[rw] ~= NIL THEN { g.out.PutF["Closing connection to %s\n", IO.rope[g.stpHost[rw]]]; g.stp[rw] _ STP.Destroy[g.stp[rw] ! STP.Error => CONTINUE]; g.stpHost[rw] _ NIL; }; RETURN[none]; }; Flush: PROC = { g.out.Flush[]; }; PrintACode: IO.PFCodeProc = TRUSTED { WITH v: val SELECT FROM refAny => { pts: CS.PTimeStamp _ NARROW[LOOPHOLE[v.value, REF ANY]]; hex: PACKED ARRAY [0 .. 12) OF [0 .. 16) _ LOOPHOLE[pts^]; FOR i: CARDINAL IN [0 .. 12) DO IF hex[i] IN [0 .. 9] THEN stream.PutChar['0 + hex[i]] ELSE stream.PutChar['A + (hex[i] - 10)]; ENDLOOP; }; ENDCASE => ERROR; }; }.