Release23Impl.Mesa
last edit March 17, 1983 10:16 am
last edit June 9, 1983 12:16 pm, Russ Atkinson (short STRING to LONG STRING)
DIRECTORY
BcdOps: TYPE USING[BcdBase],
BTreeDefs:
TYPE
USING
[BTreeHandle, CreateAndInitializeBTree, Desc, Insert, KeyNotFound, Lookup, ReleaseBTree, TestKeys],
BTreeSupportExtraDefs: TYPE USING[CloseFile, OpenFile],
ConvertUnsafe: TYPE USING[ToRope],
CS: TYPE USING[MakeTS, PTimeStamp],
CWF: TYPE USING [SWF1, SWF2, SWF3, WF0, WF1, WF2, WF3, WF4, WFCR],
DateAndTimeUnsafe: TYPE USING[Parse],
DFSubr:
TYPE
USING
[AllocateDFSeq, DF, DFSeq, FreeDFSeq, LookupDF, NextDF, ParseStream, StripLongName, WriteOut],
Directory: TYPE USING[Error, Lookup, ignore],
Environment: TYPE USING [bytesPerPage],
File: TYPE USING[Capability, nullCapability],
FileIO: TYPE USING[Open],
FQ: TYPE USING[FileQueryBangH, Result],
Heap: TYPE USING [Error],
Inline: TYPE USING [BITNOT, BITOR, LowHalf],
IO:
TYPE
USING
[card, Close, Handle, Flush, GetLength, PFCodeProc, Put, PutChar, PutF, PutFR, rope, SetPFCodeProc, SetIndex, string, UserAbort],
LongString: TYPE USING [EquivalentString, StringToDecimal],
ReleaseSupport: TYPE USING [],
Rope: TYPE USING[Length, Lower, Fetch, Flatten, ROPE, Text],
RTBcd: TYPE USING[VersionID],
Space:
TYPE
USING [Create, Delete, Handle, Kill, LongPointer, Map,
nullHandle, virtualMemory],
UnsafeSTP:
TYPE
USING [Connect, CreateRemoteStream, DesiredProperties,Destroy, Error,
FileInfo, GetFileInfo, Handle, SetDesiredProperties],
UnsafeSTPOps: TYPE USING [Handle, SetPListItem],
STPSubr:
TYPE
USING [CachedOpen, HandleSTPError, MakeSTPHandle, StopSTP,
StpStateRecord],
Stream: TYPE USING [Delete, EndOfStream, GetBlock, Handle, PutBlock],
Subr:
TYPE
USING
[AbortMyself, AllocateString, CopyString, CursorInWindow, EndsIn, FileError, FreeString, GetLine, GetNameandPassword, LongZone, NewFile, NewStream, Prefix, Read, ReadWrite, strcpy, StripLeadingBlanks, SubStrCopy, TTYProcs],
TimeStamp: TYPE USING[Stamp],
UserTerminal: TYPE USING [cursor, CursorArray, GetCursorPattern, SetCursorPattern],
VerifyDFInterface: TYPE USING [VerifyBcds];
Release23Impl:
PROGRAM
IMPORTS
BTreeDefs, BTreeSupportExtraDefs, ConvertUnsafe, CS, CWF, DateAndTimeUnsafe, DFSubr, Directory, FileIO, FQ, Heap, Inline, IO, LongString, Rope, Space, STP: UnsafeSTP, STPOps: UnsafeSTPOps, STPSubr, Stream, Subr, UserTerminal, VerifyDFInterface
= {
max number of entries in a single df file
MAXFILES: CARDINAL = 500;
in phase 3, remote copy to copy, # pages
cannot be larger than 128, since 512*128 = 64k = SIZE[CARDINAL]
NPAGESTOCOPY: CARDINAL = 127;
number of bytes in an IFS page
(all page counts are in IFS pages, not Pilot pages)
bytesPerIFSPage: CARDINAL = 2048;
DFMapSeqRecord:
TYPE =
RECORD[
size: CARDINAL ← 0,
zone: UNCOUNTED ZONE ← NULL, -- zone for strings below
body:
SEQUENCE maxsize:
CARDINAL
OF
RECORD[
shortname: LONG STRING ← NIL, -- e.g. "Rigging.DF"
lhsHost: LONG STRING ← NIL, -- the released position we are overriding
lhsDirectory: LONG STRING ← NIL,
rhsHost:
LONG
STRING ←
NIL,
-- the working posn we want it to refer to
rhsDirectory: LONG STRING ← NIL
]
];
Rw: TYPE = {read, write, none};
Global:
TYPE =
RECORD[
nDFFilesStored:
CARDINAL ← 0,
-- the # actually written
nFilesToRelease: CARDINAL ← 0, -- the number that are being ReleaseAs'd
nPagesToRelease: LONG CARDINAL ← 0, -- the number of above
nFilesStored: CARDINAL ← 0, -- the number actually copied this time
nPagesStored: LONG CARDINAL ← 0, -- pages for above
nFilesNotStored: CARDINAL ← 0, -- the number that would have been copied
nPagesNotStored: LONG CARDINAL ← 0, -- pages for above
nFilesSkipped: CARDINAL ← 0, -- number not being released (e.g. CameFrom)
copySpace: Space.Handle ← Space.nullHandle,
BTree stuff
oldPhase3FileCacheExists: BOOL ← FALSE,
useOldPhase3FileCache: BOOL ← FALSE,
updateBTree: BOOL ← TRUE,
phase3BTreeHandle: BTreeDefs.BTreeHandle ← NULL,
phase3BTreeCap: File.Capability ← File.nullCapability,
dfmap: LONG POINTER TO DFMapSeqRecord ← NIL,
in, out: IO.Handle ← NIL,
verbose: REF BOOL ← NIL,
stp: ARRAY Rw OF STP.Handle ← ALL[NIL],
stpHost: ARRAY Rw OF Rope.Text ← ALL[NIL],
connectName: LONG STRING ← NIL,
connectPassword: LONG STRING ← NIL,
versionMapPrefix: Rope.Text ← NIL,
versionMapFile: IO.Handle ← NIL,
dfseqIndex: ARRAY CHAR['a .. 'z] OF CARDINAL ← ALL[0]
];
the btree caches work as follows:
if there was one, oldPhase3FileCacheExists is TRUE
if the user wants lookups from the btree, useOldPhase3FileCache is TRUE
if the user wants insertions into the btree, updateBTree is TRUE
lookups only use the btree if both oldPhase3FileCacheExists and
useOldPhase3FileCache are true
insertions only occur if updateBTree is TRUE
MDS usage
g: REF Global ← NEW[Global ← [versionMapPrefix: "[Indigo]<Cedar>"]];
endof MDS usage
VerifySufficiency:
PUBLIC
PROC[topdffilename:
LONG
STRING, h: Subr.TTYProcs,
outhandle: IO.Handle, checkForOverwrite: BOOL] = {
Phase 2
dfseq: DFSubr.DFSeq ← NIL;
sh: Stream.Handle;
df: DFSubr.DF;
{
ENABLE
UNWIND => DFSubr.FreeDFSeq[@dfseq];
stpStateRecord: STPSubr.StpStateRecord ← [checkForOverwrite: checkForOverwrite];
g.out ← outhandle;
CWF.WF1["Opening %s.\n"L, topdffilename];
[sh] ← STPSubr.CachedOpen[host:
NIL, directory:
NIL, shortname: topdffilename, version: 0,
wantcreatetime: 0, h: h, wantExplicitVersion: FALSE,
onlyOne: TRUE, stpState: @stpStateRecord
! Subr.FileError => GOTO notfound];
dfseq ← DFSubr.AllocateDFSeq[maxEntries: MAXFILES, zoneType: shared];
DFSubr.ParseStream[sh, dfseq, topdffilename, NIL, FALSE, FALSE, FALSE, h];
Stream.Delete[sh];
FOR i:
CARDINAL
IN [0 .. dfseq.size)
DO
IF outhandle.UserAbort[] THEN SIGNAL Subr.AbortMyself;
df ← @dfseq[i];
IF df.atsign
AND NOT df.readonly
AND NOT df.cameFrom
THEN
VerifyThisPackage[df.host, df.directory, df.shortname,
df.version, df.createtime, h, checkForOverwrite, df.criterion = none];
ENDLOOP;
STPSubr.StopSTP[]; -- may have timed out
DFSubr.FreeDFSeq[@dfseq];
EXITS
notfound => CWF.WF1["Error - can't open %s.\n"L, topdffilename];
}};
VerifyThisPackage:
PROC[host, directory, shortname:
LONG
STRING,
version: CARDINAL, createtime: LONG CARDINAL, h: Subr.TTYProcs,
checkForOverwrite, wantExplicitVersion: BOOL] = {
... is called once for each of the first level of the tree below the root; that is, for each of the Includes of the root DF for the release; host and directory may be NIL
dfseq: DFSubr.DFSeq ← NIL;
sh: Stream.Handle;
plus, nonLeaf: BOOL ← FALSE;
df: DFSubr.DF;
stpStateRecord: STPSubr.StpStateRecord ← [checkForOverwrite: checkForOverwrite];
{
ENABLE
UNWIND => DFSubr.FreeDFSeq[@dfseq];
Flush[];
CWF.WF1["\nOpening %s.\n"L, shortname];
[sh] ← STPSubr.CachedOpen[
host: host, directory: directory,
shortname: shortname, version: version,
wantcreatetime: createtime, h: h, wantExplicitVersion: wantExplicitVersion,
onlyOne: TRUE, stpState: @stpStateRecord
! Subr.FileError => GOTO notfound];
dfseq ← DFSubr.AllocateDFSeq[maxEntries: MAXFILES, zoneType: shared];
DFSubr.ParseStream[sh, dfseq, shortname, NIL, FALSE, FALSE, FALSE, h];
Stream.Delete[sh];
FOR i:
CARDINAL
IN [0 .. dfseq.size)
DO
df ← @dfseq[i];
IF df.atsign
AND
NOT df.readonly
AND
NOT df.cameFrom
THEN
nonLeaf ← TRUE;
ENDLOOP;
at this point all nested DF files have been verified
plus ← FALSE;
FOR i:
CARDINAL
IN [0 .. dfseq.size)
DO
IF dfseq[i].topmark THEN plus ← TRUE;
ENDLOOP;
DFSubr.FreeDFSeq[@dfseq];
IF
NOT nonLeaf
AND
NOT plus
THEN {
CWF.WF1["No + files in %s, a file that Includes no other DF files.\n"L, shortname];
RETURN;
};
CWF.WF1["VerifyDF of %s started.\n"L, shortname];
Flush[];
VerifyDFInterface.VerifyBcds[bcdfilename:
NIL, dffilename: shortname,
h: h, checkForOverwrite: checkForOverwrite, printFlattened: FALSE,
useHugeZone: FALSE, wantRTVersionID: RTBcd.VersionID
! Heap.Error => {
CWF.WF0["Error - Heap.Error!!!!!.\n"L];
CONTINUE;
}
];
CWF.WF1["VerifyDF of %s complete.\n"L, shortname];
STPSubr.StopSTP[]; -- may have timed out
EXITS
notfound => {
IF host ~=
NIL
THEN
CWF.WF3["Error - can't open [%s]<%s>%s\n"L,
host, directory, shortname]
ELSE CWF.WF1["Error - can't open %s.\n"L, shortname];
};
}};
TransferFiles:
PUBLIC
PROC[topdffilename:
LONG
STRING, dfseqall: DFSubr.DFSeq,
h: Subr.TTYProcs, inhandle, outhandle, logFileHandle: IO.Handle,
checkForOverwrite, usePhase3BTree, updateBTree: BOOL, verbosePtr: REF BOOL] = {
Phase 3
at this point we know all the files exist and are consistent
thus we go ahead and store the files using STP in their
new release directories, producing new DF files as we go
this is done bottom up-recursive, roughly as SModel does it
ASSUMES the dfseqall is sorted by shortname
dfseq: DFSubr.DFSeq ← NIL;
outofspace: BOOL;
df: DFSubr.DF;
connectName: LONG STRING ← Subr.AllocateString[50];
connectPassword: LONG STRING ← Subr.AllocateString[50];
Cleanup:
PROC = {
g.out.PutF["Summary for phase 3: (page counts are in 2048 bytes)\n"];
g.out.PutF["\t%d non-DF files being released, %d pages in those files,\n",
IO.card[g.nFilesToRelease], IO.card[g.nPagesToRelease]];
g.out.PutF["\t%d non-DF files actually copied, %d pages in those files,\n",
IO.card[g.nFilesStored], IO.card[g.nPagesStored]];
g.out.PutF["\t%d non-DF files in release position, %d pages in those files,\n",
IO.card[g.nFilesNotStored], IO.card[g.nPagesNotStored]];
g.out.PutF["\t%d DF files written, %d files skipped entirely.\n",
IO.card[g.nDFFilesStored], IO.card[g.nFilesSkipped]];
IF g.copySpace ~= Space.nullHandle THEN Space.Delete[g.copySpace];
g.copySpace ← Space.nullHandle;
Flush[];
[] ← Close[read];
[] ← Close[write];
IF g.versionMapFile ~= NIL THEN g.versionMapFile.Close[];
g.versionMapFile ← NIL;
STPSubr.StopSTP[];
CleanupBTree[];
DFSubr.FreeDFSeq[@dfseq];
Subr.FreeString[connectName];
Subr.FreeString[connectPassword];
};
{
ENABLE
UNWIND => Cleanup[];
notFound: BOOL ← FALSE;
g.verbose ← verbosePtr;
g.in ← inhandle;
g.out ← outhandle;
g.updateBTree ← updateBTree;
g.useOldPhase3FileCache ← usePhase3BTree;
IF usePhase3BTree OR updateBTree THEN MakeBTree[];
g.nPagesStored ← g.nPagesNotStored ← g.nPagesToRelease ← 0;
g.nFilesStored ← g.nFilesNotStored ← g.nFilesToRelease ← g.nFilesSkipped ← g.nDFFilesStored ← 0;
IF dfseqall =
NIL
THEN {
CWF.WF0["Error - Phase 1 must precede Phase 3 without a Reset in between.\n"L];
GO TO return;
};
BuildIndex[dfseqall]; -- build search index used by phase 3
IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself;
dfseq ← DFSubr.AllocateDFSeq[maxEntries: 1, zoneType: shared];
df ← DFSubr.NextDF[dfseq];
df.shortname ← Subr.CopyString[topdffilename, dfseq.dfzone];
df.atsign ← TRUE;
Flush[];
CWF.WF0["For Indigo, Enter Probable Connect "L]; -- supply "Cedar", no password
Subr.GetNameandPassword[connect, connectName, connectPassword, h];
g.connectName ← Subr.CopyString[connectName];
g.connectPassword ← Subr.CopyString[connectPassword];
g.out.Put[IO.string["Appending version map file for bcds on 'Release.VersionMapFile$'.\n"L]];
[] ← Directory.Lookup[
fileName: "Release.VersionMapFile$"L, permissions: Directory.ignore
! Directory.Error => {
notFound ← TRUE;
CONTINUE;
}];
g.versionMapFile ← FileIO.Open[
"Release.VersionMapFile$",
IF notFound THEN overwrite ELSE write];
g.versionMapFile.SetPFCodeProc['a, PrintACode];
IF notFound
THEN g.versionMapFile.PutF["%s\n", IO.rope[g.versionMapPrefix]]
ELSE g.versionMapFile.SetIndex[g.versionMapFile.GetLength[]]; -- sets to end
outofspace ← RecursiveStoreDF[dfseq, NIL, dfseqall, h, checkForOverwrite];
EXITS return => {};
};
Cleanup[];
};
ConnectionClosedError:
ERROR[rw: Rw] =
CODE;
raised when a connection has timed out
ConnectCredentialsError:
ERROR[rw: Rw] =
CODE;
raised when conn. password is needed
RecursiveStoreDF:
PROC
[dfseqouter: DFSubr.DFSeq, topdfouter: DFSubr.DF, dfseqall: DFSubr.DFSeq, h: Subr.TTYProcs, checkForOverwrite: BOOL]
RETURNS[outofspace: BOOL] = {
topdfouter may be NIL
sh: Stream.Handle;
dfouter: DFSubr.DF;
stpStateRecord: STPSubr.StpStateRecord ← [checkForOverwrite: checkForOverwrite];
outofspace ← FALSE;
FOR i:
CARDINAL
IN [0 .. dfseqouter.size)
DO
dfouter ← @dfseqouter[i];
IF dfouter.atsign AND NOT dfouter.readonly
AND (dfouter.releaseDirectory =
NIL
OR
NOT dfouter.cameFrom)
THEN {
is a non-readonly DF file (may be a CameFrom DF file)
dfseqinner: DFSubr.DFSeq ← NIL;
o: BOOL;
{
ENABLE
UNWIND => DFSubr.FreeDFSeq[@dfseqinner];
recur on lower DF file
once it returns, we know the files are all stored and
the release directories have been swapped
now this level (parent) can store the DF file
IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself;
dfseqinner ← DFSubr.AllocateDFSeq[maxEntries:
MAXFILES,
zoneType: shared];
this call may get connectionClosed, but will catch it internally
[sh] ← STPSubr.CachedOpen[host: dfouter.host, directory: dfouter.directory,
shortname: dfouter.shortname, version: dfouter.version,
wantcreatetime: dfouter.createtime, h: h,
wantExplicitVersion: dfouter.criterion = none,
onlyOne: TRUE, stpState: @stpStateRecord
! Subr.FileError => GOTO err];
CWF.WF1["Opening %s.\n"L, dfouter.shortname];
IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself;
DFSubr.ParseStream[sh, dfseqinner, dfouter.shortname,
NIL,
FALSE, FALSE, FALSE, h];
Stream.Delete[sh];
o ← RecursiveStoreDF[dfseqinner, dfouter, dfseqall, h, checkForOverwrite];
SetCreateDateAndVersionFromPhaseOne[dfouter, dfseqall];
outofspace ← outofspace OR o;
IF dfouter.releaseDirectory =
NIL
AND topdfouter ~=
NIL
THEN {
CWF.WF2["No release directory for %s in %s.\n"L,
dfouter.shortname, topdfouter.shortname];
DFSubr.FreeDFSeq[@dfseqinner];
LOOP;
};
ConnectionClosedError and connect-passwd errors are caught at an inner level
IF NOT outofspace THEN StoreDFFile[dfouter, dfseqinner, h];
DFSubr.FreeDFSeq[@dfseqinner];
Flush[];
EXITS
err => {
IF dfouter.host ~=
NIL
THEN
CWF.WF3["Error - can't open [%s]<%s>%s"L,
dfouter.host, dfouter.directory, dfouter.shortname]
ELSE CWF.WF1["Error - can't open %s.\n"L, dfouter.shortname];
};
};
LOOP;
};
IF dfouter.atsign
AND dfouter.readonly
AND Subr.EndsIn[dfouter.shortname, ".df"L]
THEN
only works for ReadOnly entries
handle case where special map to working directory is needed
CoerceDFLocToAnother[dfouter, dfseqouter];
IF dfouter.cameFrom
AND dfouter.releaseDirectory ~=
NIL
THEN {
IF g.verbose^
THEN
CWF.WF4["Leaving [%s]<%s>%s in %s alone.\n"L,
dfouter.host, dfouter.directory, dfouter.shortname, topdfouter.shortname];
g.nFilesSkipped ← g.nFilesSkipped + 1;
Flush[];
}
ELSE {
-- is either a readonly df file or not a DF file
SetCreateDateAndVersionFromPhaseOne[dfouter, dfseqall];
IF dfouter.releaseDirectory =
NIL
THEN {
IF
NOT dfouter.readonly
THEN
CWF.WF2["Error - no release directory specified for %s in %s.\n"L,
dfouter.shortname, topdfouter.shortname]
ELSE FixupEntriesWithoutReleaseAs[dfouter, dfseqouter, dfseqall];
Flush[];
}
skipping self reference to DF file
ELSE
IF
NOT LongString.EquivalentString[dfouter.shortname,
topdfouter.shortname] THEN {
smashrw: Rw ← none;
handles more than one connection
and one of the connections times out (connectionClosed)
DO
CopyRemoteFile[dfouter, h
! ConnectionClosedError => {
CWF.WF1["Connection to %s timed out.\n"L, dfouter.host];
smashrw ← rw;
CONTINUE;
};
];
have to do this outside STP monitor lock
IF smashrw ~= none
THEN
smashrw ← Close[smashrw]
ENDLOOP;
Flush[];
}
ELSE {
if it is a self reference, then
swap directory entries even though it is not correct
SwapSides[dfouter];
};
};
ENDLOOP;
};
StoreDFFile:
PROC[dfouter: DFSubr.
DF, dfseqinner: DFSubr.DFSeq, h: Subr.TTYProcs] = {
looks up the create time from the phase 1 data
handles ConnectionClosedError and Connect-passwd internally
dfinner: DFSubr.DF;
smashrw: Rw ← none;
host: LONG STRING ← NIL;
sfnnew: LONG STRING ← Subr.AllocateString[100];
{
ENABLE
UNWIND => {Subr.FreeString[sfnnew]};
dfouter.version ← 0;
SwapSides[dfouter]; -- beware, this call must be executed exactly once
get the entry for the inner DF file
this is usually a self entry
dfinner ← DFSubr.LookupDF[dfseqinner, dfouter.shortname];
IF dfinner ~=
NIL
THEN {
IF dfouter.createtime = 0
THEN
dfouter.createtime ← dfinner.createtime
ELSE dfinner.createtime ← dfouter.createtime;
dfinner.version ← 0;
IF dfouter.host ~=
NIL
THEN {
Subr.FreeString[dfinner.host, dfseqinner.dfzone];
dfinner.host ← Subr.CopyString[dfouter.host, dfseqinner.dfzone];
Subr.FreeString[dfinner.directory, dfseqinner.dfzone];
dfinner.directory ← Subr.CopyString[dfouter.directory,
dfseqinner.dfzone];
Subr.FreeString[dfinner.releaseHost, dfseqinner.dfzone];
dfinner.releaseHost ← Subr.CopyString[dfouter.releaseHost,
dfseqinner.dfzone];
Subr.FreeString[dfinner.releaseDirectory, dfseqinner.dfzone];
dfinner.releaseDirectory ← Subr.CopyString[dfouter.releaseDirectory,
dfseqinner.dfzone];
dfinner.cameFrom ← dfouter.cameFrom;
}
dfinner has already had its sides swapped (SwapSides[])
by nested RecursiveStore[]
};
IF dfouter.host ~=
NIL
THEN {
host ← dfouter.host;
CWF.SWF2[sfnnew, "<%s>%s"L, dfouter.directory, dfouter.shortname]
}
ELSE
IF dfinner ~=
NIL
THEN {
host ← dfinner.host;
CWF.SWF2[sfnnew, "<%s>%s"L, dfinner.directory, dfinner.shortname]
}
ELSE {
CWF.WF1["Error - don't know where to store %s.\n"L, dfouter.shortname];
GO TO return;
};
DO
dfouter.version ← ReallyStoreDFFile[host, sfnnew, dfouter.createtime, dfseqinner, h
! ConnectionClosedError => {
CWF.WF1["Connection to %s timed out.\n"L, dfouter.host];
smashrw ← rw;
CONTINUE;
};
ConnectCredentialsError => {
does NOT close the connection, simply re-starts
CWF.WFCR[];
LOOP
}
];
have to do this outside STP monitor lock
IF smashrw ~= none
THEN smashrw ← Close[smashrw]
ELSE EXIT;
ENDLOOP;
g.nDFFilesStored ← g.nDFFilesStored + 1;
EXITS return => {};
}; -- of ENABLE UNWIND
Subr.FreeString[sfnnew];
};
ReallyStoreDFFile:
PROC
[host, filename: LONG STRING, createtime: LONG CARDINAL, dfseqinner: DFSubr.DFSeq, h: Subr.TTYProcs]
RETURNS[version: CARDINAL] = {
may raise ConnectionClosedError or ConnectCredentialsError
sh: Stream.Handle ← NIL;
info: STP.FileInfo;
shortFileName: LONG STRING ← Subr.AllocateString[100];
desiredProperties: STP.DesiredProperties ← ALL[FALSE];
can't catch UNWIND (RRA: WHY?)
{
ENABLE {
ConnectionClosedError => {
IF sh ~= NIL THEN Stream.Delete[sh];
sh ← NIL;
Subr.FreeString[shortFileName];
};
STP.Error =>
SELECT code
FROM
noSuchFile => {
CWF.WF2["Error - %s: %s.\n\n"L, filename, error];
Subr.FreeString[shortFileName];
ERROR Subr.FileError[notFound];
};
connectionClosed => {
Subr.FreeString[shortFileName];
ERROR ConnectionClosedError[write];
};
illegalConnectName, illegalConnectPassword, accessDenied => {
can't just attach RETRY here, must go back
to the point where the remote file was opened
[] ← STPSubr.HandleSTPError[g.stp[write], code, error, h];
Subr.FreeString[shortFileName];
ERROR ConnectCredentialsError[write];
};
ENDCASE;
};
Subr.strcpy[shortFileName, filename];
version ← 0;
CWF.WF1["Storing %s "L, filename];
STP.Error is caught above
Open[host, write, h];
desiredProperties[directory] ← TRUE;
desiredProperties[nameBody] ← TRUE;
desiredProperties[version] ← TRUE;
STP.SetDesiredProperties[g.stp[write], desiredProperties];
sh ←
STP.CreateRemoteStream[
stp: g.stp[write], file: shortFileName, access: write,
fileType: text, creation: LOOPHOLE[createtime]
! STP.Error => IF STPSubr.HandleSTPError[g.stp[write], code, error, h] THEN RETRY];
DFSubr.WriteOut[dfseq: dfseqinner, topLevelFile: NIL, outputStream: sh, print: FALSE];
Stream.Delete[sh];
sh ← NIL;
info ← STP.GetFileInfo[g.stp[write]];
version ← LongString.StringToDecimal[info.version];
CWF.WF1["!%s\n"L, info.version];
Subr.FreeString[shortFileName];
}};
CopyRemoteFile:
PROC[dfouter: DFSubr.
DF, h: Subr.TTYProcs] = {
nIfsPages: CARDINAL;
ok, inCache: BOOL;
vers: CARDINAL;
[ok, inCache, vers, nIfsPages] ← AlreadyExistsInCorrectVersion[
dfouter.releaseHost, dfouter.releaseDirectory, dfouter.shortname, dfouter.createtime, h];
IF ok
THEN {
IF g.verbose^
THEN {
CWF.WF4["Correct version of [%s]<%s>%s already stored, %u pages "L,
dfouter.releaseHost, dfouter.releaseDirectory,
dfouter.shortname, @nIfsPages];
CWF.WF1["%s\n"L, IF inCache THEN " (In cache)"L ELSE ""L];
};
SwapSides[dfouter];
dfouter.version ← vers;
g.nFilesNotStored ← g.nFilesNotStored + 1;
g.nPagesNotStored ← g.nPagesNotStored + nIfsPages;
g.nFilesToRelease ← g.nFilesToRelease + 1;
g.nPagesToRelease ← g.nPagesToRelease + nIfsPages;
RETURN;
};
DO
nIfsPages ← CopyRemoteFilesUsingSTP[dfouter, h
! ConnectCredentialsError => {
CWF.WFCR[];
LOOP
}];
EXIT;
ENDLOOP;
g.nFilesStored ← g.nFilesStored + 1;
g.nPagesStored ← g.nPagesStored + nIfsPages;
g.nFilesToRelease ← g.nFilesToRelease + 1;
g.nPagesToRelease ← g.nPagesToRelease + nIfsPages;
};
CopyRemoteFilesUsingSTP:
PROC
[dfouter: DFSubr.DF, h: Subr.TTYProcs] RETURNS[nIfsPages: CARDINAL] = {
may raise ConnectCredentialsError
buffer: LONG POINTER;
nxfer: CARDINAL;
stopit: BOOL ← FALSE;
shin, shout: Stream.Handle ← NIL;
info: STP.FileInfo;
ca: UserTerminal.CursorArray ← ALL[0];
cursorX, cursorY: INTEGER;
flip: BOOL ← FALSE;
sfnold: LONG STRING ← Subr.AllocateString[100];
sfnnew: LONG STRING ← Subr.AllocateString[100];
nbytes: LONG CARDINAL ← 0;
ftp: UserTerminal.CursorArray ← [
177400B, 177400B, 177400B, 177400B,
177400B, 177400B, 177400B, 177400B,
000377B, 000377B, 000377B, 000377B,
000377B, 000377B, 000377B, 000377B];
bcdBase: BcdOps.BcdBase;
versionStamp: TimeStamp.Stamp;
desiredProperties: STP.DesiredProperties ← ALL[FALSE];
Cleanup:
PROC = {
called on unwind and when exiting normally,
but not for ConnectCredentialsError
IF shin ~= NIL THEN Stream.Delete[shin];
shin ← NIL;
IF shout ~= NIL THEN Stream.Delete[shout];
shout ← NIL;
IF flip THEN UserTerminal.SetCursorPattern[ca];
IF sfnold # NIL THEN {Subr.FreeString[sfnold]; sfnold ← NIL};
IF sfnnew # NIL THEN {Subr.FreeString[sfnnew]; sfnnew ← NIL};
};
nIfsPages ← 0;
IF dfouter.version = 0
THEN
CWF.SWF2[sfnold, "<%s>%s!H"L, dfouter.directory, dfouter.shortname]
ELSE
CWF.SWF3[sfnold, "<%s>%s!%u"L, dfouter.directory, dfouter.shortname, @dfouter.version];
CWF.SWF2[sfnnew, "<%s>%s"L, dfouter.releaseDirectory, dfouter.shortname];
IF g.verbose^ THEN CWF.WF1["Copy %s"L, sfnold];
Open[dfouter.host, read, h];
desiredProperties ← ALL[FALSE];
desiredProperties[directory] ← TRUE;
desiredProperties[nameBody] ← TRUE;
desiredProperties[version] ← TRUE;
desiredProperties[createDate] ← TRUE;
desiredProperties[size] ← TRUE;
STP.SetDesiredProperties[g.stp[read], desiredProperties];
shin ←
STP.CreateRemoteStream[stp: g.stp[read], file: sfnold, access: read
!
STP.Error =>
IF code = noSuchFile
THEN {
CWF.WF2["Error - %s: %s.\n\n"L, sfnold, error];
ERROR Subr.FileError[notFound];
}
ELSE
IF code = connectionClosed
THEN
ERROR ConnectionClosedError[read]
ELSE IF STPSubr.HandleSTPError[g.stp[read], code, error, h] THEN RETRY
];
IF g.verbose^ THEN CWF.WF0["\n\tto "L];
Open[dfouter.releaseHost, write, h];
desiredProperties ← ALL[FALSE];
desiredProperties[directory] ← TRUE;
desiredProperties[nameBody] ← TRUE;
desiredProperties[version] ← TRUE;
STP.SetDesiredProperties[g.stp[write], desiredProperties];
shout ←
STP.CreateRemoteStream[stp: g.stp[write], file: sfnnew, access: write,
fileType: text, creation:
LOOPHOLE[dfouter.createtime]
!
STP.Error =>
SELECT code
FROM
noSuchFile => {
CWF.WF2["Error - %s: %s.\n\n"L, sfnnew, error];
ERROR Subr.FileError[notFound];
};
connectionClosed =>
ERROR ConnectionClosedError[write];
ENDCASE =>
IF STPSubr.HandleSTPError[g.stp[write], code, error, h] THEN RETRY
];
CWF.WF1["%s ... "L, sfnnew];
IF g.copySpace = Space.nullHandle
THEN {
g.copySpace ← Space.Create[NPAGESTOCOPY, Space.virtualMemory];
Space.Map[g.copySpace];
};
IF Subr.CursorInWindow[h]
THEN {
[cursorX, cursorY] ← UserTerminal.cursor^;
ca ← UserTerminal.GetCursorPattern[];
UserTerminal.SetCursorPattern[ftp];
flip ← TRUE;
};
buffer ← Space.LongPointer[g.copySpace];
bcdBase ← buffer;
{
ENABLE
UNWIND => Cleanup[];
WHILE
NOT stopit
DO
[bytesTransferred: nxfer] ← Stream.GetBlock[shin,
[buffer, 0, NPAGESTOCOPY*Environment.bytesPerPage]
!
STP.Error =>
IF code = noSuchFile
THEN {
CWF.WF1["\n\tError - %s not found.\n"L, sfnold];
GOTO out;
}
ELSE
IF code = connectionClosed
THEN
ERROR ConnectionClosedError[read];
Stream.EndOfStream => {
stopit ← TRUE;
nxfer ← nextIndex;
CONTINUE
}
];
IF nbytes = 0
THEN {
lstr: LONG STRING ← NIL;
info ← STP.GetFileInfo[g.stp[read]];
IF Subr.EndsIn[dfouter.shortname, ".Bcd"L]
OR Subr.EndsIn[dfouter.shortname, ".Symbols"L]
THEN versionStamp ← bcdBase.version
ELSE
-- use create time
versionStamp ← [net: 0, host: 0, time: DateAndTimeUnsafe.Parse[info.create].dt];
lstr ← Subr.AllocateString[100];
{
ENABLE
UNWIND => {Subr.FreeString[lstr]};
CWF.SWF1[lstr, "%lu"L, @info.size];
STPOps.SetPListItem[LOOPHOLE[g.stp[write], STPOps.Handle].plist, "Size"L, lstr];
}; -- of ENABLE UNWIND
Subr.FreeString[lstr];
};
Stream.PutBlock[shout, [buffer, 0, nxfer]
!
STP.Error =>
IF code = connectionClosed
THEN
ERROR ConnectionClosedError[write]
ELSE
IF code = illegalConnectName
OR code = illegalConnectPassword
OR code = accessDenied
THEN {
can't just attach RETRY here, must go back
to the point where the remote file was opened
[] ← STPSubr.HandleSTPError[g.stp[write], code,
error, h];
ERROR ConnectCredentialsError[write];
};
];
nbytes ← nbytes + nxfer;
only flips if not moved
IF flip
AND cursorX = UserTerminal.cursor^.x
AND cursorY = UserTerminal.cursor^.y
THEN {
code from Cursor.Invert
bits: UserTerminal.CursorArray ← UserTerminal.GetCursorPattern[];
FOR i:
CARDINAL
IN [0..16)
DO
bits[i] ← Inline.BITNOT[bits[i]];
ENDLOOP;
UserTerminal.SetCursorPattern[bits];
};
ENDLOOP;
Space.Kill[g.copySpace];
info ← STP.GetFileInfo[g.stp[write]];
IF info.version ~=
NIL
THEN {
-- if there is a version number
dfouter.version ← LongString.StringToDecimal[info.version];
CWF.WF1["!%s"L, info.version];
}
ELSE dfouter.version ← 0;
Cleanup[]; -- up here in case Twinkle runs out of space and the Stream.Delete gens. error
CWF.WF1[", %lu bytes.\n"L, @nbytes];
nIfsPages ← Inline.LowHalf[(nbytes/bytesPerIFSPage)+2];
SwapSides[dfouter];
IF g.updateBTree
THEN
InsertIntoCache[
dfouter.host, dfouter.directory, dfouter.shortname,
dfouter.version, dfouter.createtime, nIfsPages]; -- record existence
AddToVersionMap[
dfouter.host, dfouter.directory, dfouter.shortname,
dfouter.version, versionStamp];
EXITS out => Cleanup[];
}};
AddToVersionMap:
PROC
[host, directory, shortname: LONG STRING, version: CARDINAL, bcdVers: TimeStamp.Stamp] = {
i: INT;
file: Rope.
ROPE ←
IO.PutFR[
"[%s]<%s>%s!%d",
IO.string[host], IO.string[directory], IO.string[shortname], IO.card[version]];
IF file.Length[] < g.versionMapPrefix.Length[]
THEN {
g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]];
RETURN;
};
i ← 0;
WHILE i < g.versionMapPrefix.Length[]
DO
IF Rope.Lower[file.Fetch[i]] ~= Rope.Lower[g.versionMapPrefix.Fetch[i]]
THEN {
g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]];
RETURN;
};
i ← i + 1;
ENDLOOP;
file ← Rope.Flatten[file, i];
g.versionMapFile.PutF["%a %s\n", CS.MakeTS[bcdVers], IO.rope[file]];
};
AlreadyExistsInCorrectVersion:
PROC
[host, directory, shortname: LONG STRING, createtime: LONG CARDINAL, h: Subr.TTYProcs]
RETURNS [foundonremote, inCache: BOOL, remoteVersion, nIfsPages: CARDINAL] = {
fres: FQ.Result;
remoteByteLength: LONG CARDINAL;
foundonremote ← FALSE;
IF g.in.UserAbort[] THEN SIGNAL Subr.AbortMyself;
first look in BTree
[inCache, remoteVersion, nIfsPages] ←
LookupInOldFileCache[host, directory, shortname, createtime];
IF inCache THEN RETURN[TRUE, TRUE, remoteVersion, nIfsPages];
now look on server
[fres: fres, remoteVersion: remoteVersion, remoteByteLength: remoteByteLength] ←
FQ.FileQueryBangH[host, directory, shortname, createtime, h];
SELECT fres
FROM
foundCorrectVersion => {
-- found with right create time
foundonremote ← TRUE;
nIfsPages ← (remoteByteLength/bytesPerIFSPage) + 2;
IF g.updateBTree
THEN
-- record existence in cache
InsertIntoCache[host, directory, shortname, remoteVersion, createtime, nIfsPages];
};
notFound, foundWrongVersion => NULL; -- not found
ENDCASE => ERROR;
};
SetCreateDateAndVersionFromPhaseOne:
PROC[df: DFSubr.
DF, dfseqall: DFSubr.DFSeq] = {
these two procedures, SetCreateDateAndVersionFromPhaseOne
and FixupEntriesWithoutReleaseAs are the procedures that search
the phase 1 data structure, dfseqall
dfall: DFSubr.DF;
start, stopPlusOne: CARDINAL;
[start, stopPlusOne] ← ObtainStartAndStopIndicesFromDFSeq[df.shortname, dfseqall];
FOR i:
CARDINAL
IN [start .. stopPlusOne)
DO
dfall ← @dfseqall[i];
IF dfall.createtime ~= 0
AND LongString.EquivalentString[dfall.shortname, df.shortname]
AND LongString.EquivalentString[dfall.directory, df.directory]
AND LongString.EquivalentString[dfall.host, df.host]
THEN {
df.createtime ← dfall.createtime;
df.version ← dfall.version;
RETURN;
};
ENDLOOP;
df.version ← 0;
this is not an error
g.out.PutF["Warning- can't find create date for %s.\n", IO.string[df.shortname]];
};
FixupEntriesWithoutReleaseAs:
PROC[df: DFSubr.
DF, dfseq, dfseqall: DFSubr.DFSeq] = {
note that it doesn't check create times
called when we don't know where this will be stored
dfall: DFSubr.DF;
start, stopPlusOne: CARDINAL;
[start, stopPlusOne] ← ObtainStartAndStopIndicesFromDFSeq[df.shortname, dfseqall];
FOR i:
CARDINAL
IN [start .. stopPlusOne)
DO
dfall ← @dfseqall[i];
IF dfall.releaseDirectory ~= NIL
AND NOT dfall.cameFrom
AND LongString.EquivalentString[dfall.shortname, df.shortname]
AND LongString.EquivalentString[dfall.directory, df.directory]
AND LongString.EquivalentString[dfall.host, df.host]
AND
NOT LongString.EquivalentString[dfall.shortname, dfall.recorder]
THEN {
df.releaseHost ← Subr.CopyString[dfall.releaseHost, dfseq.dfzone];
df.releaseDirectory ← Subr.CopyString[dfall.releaseDirectory, dfseq.dfzone];
df.createtime ← dfall.createtime;
df.version ← dfall.version;
SwapSides[df];
RETURN;
};
ENDLOOP;
not found, leave alone
this is not an error
g.out.PutF["Warning- appears %s is not being released.\n", IO.string[df.shortname]];
};
ObtainStartAndStopIndicesFromDFSeq:
PROC
[shortname: LONG STRING, dfseqall: DFSubr.DFSeq] RETURNS[start, stopPlusOne: CARDINAL] = {
ch: CHAR ← Rope.Lower[shortname[0]];
IF ch
NOT
IN ['a .. 'z]
THEN {
g.out.PutF["Cant find index for %s\n", IO.string[shortname]];
this is a perfect default value to return, is not an error
RETURN[0, dfseqall.size];
};
start ← g.dfseqIndex[ch];
stopPlusOne ← IF ch = 'z THEN dfseqall.size ELSE g.dfseqIndex[ch+1];
IF stopPlusOne = 0
THEN {
g.out.PutF["Cant find upper bound\n"];
stopPlusOne ← dfseqall.size;
};
};
BuildIndex:
PROC[dfseqall: DFSubr.DFSeq] = {
dfall: DFSubr.DF;
ch, newch: CHAR ← 'a;
g.dfseqIndex['a] ← 0;
FOR i:
CARDINAL
IN [0 .. dfseqall.size)
DO
dfall ← @dfseqall[i];
newch ← Rope.Lower[dfall.shortname[0]];
IF newch < ch
THEN {
g.out.PutF["Warning - Bad sort order for %s\n", IO.string[dfall.shortname]];
LOOP;
};
IF newch > ch
THEN {
FOR c:
CHAR['a .. 'z]
IN (ch .. newch]
DO
-- in case of gaps
g.dfseqIndex[c] ← i;
ENDLOOP;
ch ← newch;
};
ENDLOOP;
FOR c:
CHAR['a .. 'z]
IN (ch .. 'z]
DO
g.dfseqIndex[c] ← dfseqall.size;
ENDLOOP;
SwapSides:
PROC[df: DFSubr.
DF] = {
s: LONG STRING;
IF df.cameFrom THEN ERROR; -- should never be called if is CameFrom
df.cameFrom ← NOT df.cameFrom;
s ← df.releaseHost;
df.releaseHost ← df.host;
df.host ← s;
s ← df.directory;
df.directory ← df.releaseDirectory;
df.releaseDirectory ← s;
};
BVal:
TYPE =
RECORD[
version: CARDINAL ← 0,
nIfsPages: CARDINAL ← 0
];
LookupInOldFileCache:
PROC
[host, directory, shortname: LONG STRING, createtime: LONG CARDINAL]
RETURNS[inCache: BOOL ← FALSE, version, nIfsPages: CARDINAL ← 0] = {
bval: BVal ← [];
len: CARDINAL;
IF createtime = 0
OR NOT g.oldPhase3FileCacheExists
OR NOT g.useOldPhase3FileCache THEN RETURN;
{
-- block for actual lookup
specialPrefix: STRING ← "[Indigo]<Cedar>"L;
sfn: LONG STRING ← Subr.AllocateString[100];
file: LONG STRING ← Subr.AllocateString[100];
{
ENABLE
UNWIND => {Subr.FreeString[file]; Subr.FreeString[sfn]};
the version # is written onto version
CWF.SWF3[file, "[%s]<%s>%s"L, host, directory, shortname];
IF Subr.Prefix[file, specialPrefix] THEN Subr.SubStrCopy[file, file, specialPrefix.length];
CWF.SWF2[sfn, "%lu\000%s"L, @createtime, file];
len ← BTreeDefs.Lookup[
g.phase3BTreeHandle, MakeBTreeDesc[sfn], DESCRIPTOR[@bval, SIZE[BVal]]];
}; -- of ENABLE UNWIND
Subr.FreeString[file]; Subr.FreeString[sfn]};
IF len = BTreeDefs.KeyNotFound THEN RETURN;
RETURN[TRUE, bval.version, bval.nIfsPages];
};
InsertIntoCache:
PROC[host, directory, shortname:
LONG
STRING, version:
CARDINAL,
createtime: LONG CARDINAL, nIfsPages: CARDINAL] = {
bval: BVal ← [version: version, nIfsPages: nIfsPages];
sfn: LONG STRING ← Subr.AllocateString[100];
file: LONG STRING ← Subr.AllocateString[100];
specialPrefix: STRING ← "[Indigo]<Cedar>"L;
{
ENABLE
UNWIND => {Subr.FreeString[sfn]; Subr.FreeString[file]};
CWF.SWF3[file, "[%s]<%s>%s"L, host, directory, shortname];
IF Subr.Prefix[file, specialPrefix]
THEN
Subr.SubStrCopy[file, file, specialPrefix.length];
CWF.SWF2[sfn, "%lu\000%s"L, @createtime, file];
BTreeDefs.Insert[g.phase3BTreeHandle, MakeBTreeDesc[sfn], DESCRIPTOR[@bval, SIZE[BVal]]];
}; -- of ENABLE UNWIND
Subr.FreeString[sfn]; Subr.FreeString[file];
};
size in pages for btree (used to be 100)
InitialNumberOfPhase3BTreePages: CARDINAL = 1000;
MakeBTree:
PROC = {
g.oldPhase3FileCacheExists ← TRUE;
g.phase3BTreeCap ← Directory.Lookup[fileName: "ReleaseTool.Phase3BTreeFile$"L, permissions: Directory.ignore
! Directory.Error => {
g.oldPhase3FileCacheExists ← FALSE;
CONTINUE;
}];
IF
NOT g.oldPhase3FileCacheExists
THEN
g.phase3BTreeCap ← Subr.NewFile["ReleaseTool.Phase3BTreeFile$"L, Subr.ReadWrite,
InitialNumberOfPhase3BTreePages];
g.phase3BTreeHandle ← BTreeDefs.CreateAndInitializeBTree[
fileH: BTreeSupportExtraDefs.OpenFile[g.phase3BTreeCap],
initializeFile: NOT g.oldPhase3FileCacheExists,
isFirstGreaterOrEqual: IsFirstGEQ, areTheyEqual: AreTheyEQ];
};
CleanupBTree:
PROC = {
IF g.phase3BTreeCap ~= File.nullCapability
THEN {
BTreeSupportExtraDefs.CloseFile[BTreeDefs.ReleaseBTree[g.phase3BTreeHandle]];
g.phase3BTreeCap ← File.nullCapability;
g.oldPhase3FileCacheExists ← FALSE;
};
};
MakeBTreeDesc:
PROC [s:
LONG STRING]
RETURNS [d: BTreeDefs.Desc] =
{
RETURN[DESCRIPTOR[LOOPHOLE[s, LONG POINTER], (s.length + 1)/2 + 2]]};
IsFirstGEQ: BTreeDefs.TestKeys = {
aS: LONG STRING = LOOPHOLE[BASE[a]];
bS: LONG STRING = LOOPHOLE[BASE[b]];
FOR i:
CARDINAL
IN [0..
MIN[aS.length, bS.length])
DO
aC: CHAR = Inline.BITOR[aS[i], 40B];
bC: CHAR = Inline.BITOR[bS[i], 40B];
SELECT aC
FROM
> bC => RETURN [TRUE];
< bC => RETURN [FALSE];
ENDCASE;
ENDLOOP;
RETURN [aS.length >= bS.length]
};
AreTheyEQ: BTreeDefs.TestKeys = {
aS: LONG STRING = LOOPHOLE[BASE[a]];
bS: LONG STRING = LOOPHOLE[BASE[b]];
IF aS.length ~= bS.length THEN RETURN [FALSE];
FOR i:
CARDINAL
IN [0..aS.length)
DO
IF Inline.BITOR[aS[i], 40B] ~= Inline.BITOR[bS[i], 40B] THEN RETURN [FALSE];
ENDLOOP;
RETURN [TRUE]
};
(entry must be readonly)
this maps lines like
Directory [Indigo]<Cedar>Top>
to be
Directory [Indigo]<PreCedar>Top>
for selected DF files. This working directory is then mapped back to the
new version on Cedar>Top
these are in the file "Release.DFLocations"
format:
[Indigo]<Cedar>Top>X.df [Indigo]<PreCedar>Top>X.df
CoerceDFLocToAnother:
PROC[df: DFSubr.
DF, dfseq: DFSubr.DFSeq] = {
IF g.dfmap = NIL THEN ReadInAndParseDFMap[];
if dfmap.size = 0, then couldn't open Release.DFLocations
IF NOT df.readonly THEN ERROR;
FOR i:
CARDINAL
IN [0 .. g.dfmap.size)
DO
IF LongString.EquivalentString[g.dfmap[i].shortname, df.shortname]
AND LongString.EquivalentString[g.dfmap[i].lhsHost, df.host]
AND LongString.EquivalentString[g.dfmap[i].lhsDirectory, df.directory]
THEN {
CWF.WF3["Mapping reference to [%s]<%s>%s\n"L,
df.host, df.directory, df.shortname];
CWF.WF3["\tinto a reference to [%s]<%s>%s.\n"L,
g.dfmap[i].rhsHost, g.dfmap[i].rhsDirectory, df.shortname];
Subr.FreeString[df.host, dfseq.dfzone];
Subr.FreeString[df.directory, dfseq.dfzone];
df.host ← Subr.CopyString[g.dfmap[i].rhsHost, dfseq.dfzone];
df.directory ← Subr.CopyString[g.dfmap[i].rhsDirectory, dfseq.dfzone];
in case a CameFrom is hanging on it
df.cameFrom ← FALSE;
Subr.FreeString[df.releaseHost, dfseq.dfzone]; -- if present
Subr.FreeString[df.releaseDirectory, dfseq.dfzone];
df.releaseHost ← df.releaseDirectory ← NIL;
RETURN;
};
ENDLOOP;
};
NMAPENTRIES: CARDINAL = 100;
ReadInAndParseDFMap:
PROC = {
i: CARDINAL;
sh: Stream.Handle ← NIL;
stemp: LONG STRING ← Subr.AllocateString[100];
line: LONG STRING ← Subr.AllocateString[100];
host: LONG STRING ← Subr.AllocateString[100];
directory: LONG STRING ← Subr.AllocateString[100];
shortname: LONG STRING ← Subr.AllocateString[100];
longzone: UNCOUNTED ZONE ← Subr.LongZone[];
Cleanup:
PROC = {
Subr.FreeString[stemp]; Subr.FreeString[line];
Subr.FreeString[host]; Subr.FreeString[directory];
Subr.FreeString[shortname];
IF sh # NIL THEN {Stream.Delete[sh]; sh ← NIL};
};
{
ENABLE
UNWIND => Cleanup[];
g.dfmap ← longzone.NEW[DFMapSeqRecord[NMAPENTRIES]];
g.dfmap.zone ← longzone;
sh ← Subr.NewStream["Release.DFLocations"L, Subr.Read
! Subr.FileError => {
CWF.WF0["No mapping of locations - Cannot open Release.DFLocations.\n"L];
GOTO return
}];
CWF.WF0["Reading DF mapping from file Release.DFLocations.\n"L];
WHILE Subr.GetLine[sh, line]
DO
IF line.length = 0
OR Subr.Prefix[line, "//"L]
OR Subr.Prefix[line, "--"L] THEN LOOP;
IF g.dfmap.size > g.dfmap.maxsize
THEN {
CWF.WF0["Error - too many DFLocations.\n"L];
EXIT;
};
i ← 0;
WHILE i < line.length
AND line[i] ~= '
AND line[i] ~= '\t
DO
i ← i + 1;
ENDLOOP;
IF i >= line.length
THEN {
CWF.WF1["Error - this line needs two file names on it: %s.\n"L, line];
EXIT;
};
Subr.strcpy[stemp, line];
stemp.length ← i;
Subr.SubStrCopy[line, line, i];
Subr.StripLeadingBlanks[line];
[] ← DFSubr.StripLongName[stemp, host, directory, shortname, FALSE];
g.dfmap[g.dfmap.size].shortname ← Subr.CopyString[shortname, longzone];
g.dfmap[g.dfmap.size].lhsHost ← Subr.CopyString[host, longzone];
g.dfmap[g.dfmap.size].lhsDirectory ← Subr.CopyString[directory, longzone];
[] ← DFSubr.StripLongName[line, host, directory, shortname, FALSE];
IF
NOT LongString.EquivalentString[shortname, g.dfmap[g.dfmap.size].shortname]
THEN {
CWF.WF1["Error - line including %s does not have shortnames that match.\n"L, line];
LOOP;
};
g.dfmap[g.dfmap.size].rhsHost ← Subr.CopyString[host, longzone];
g.dfmap[g.dfmap.size].rhsDirectory ← Subr.CopyString[directory, longzone];
g.dfmap.size ← g.dfmap.size + 1;
ENDLOOP;
EXITS return => {};
}; -- of ENABLE UNWIND
Cleanup[];
};
Open:
PROC[host:
LONG
STRING, rw: Rw, h: Subr.TTYProcs] = {
IF g.stp[rw] ~= NIL THEN STP.SetDesiredProperties[g.stp[rw], ALL[FALSE]];
IF g.stp[rw] =
NIL
OR NOT LongString.EquivalentString[LOOPHOLE[g.stpHost[rw]], host] THEN {
IF g.stp[rw] ~= NIL THEN [] ← Close[rw];
g.stp[rw] ← STPSubr.MakeSTPHandle[host, h];
g.stpHost[rw] ← ConvertUnsafe.ToRope[host];
only connects if releasing to Indigo, this is a crock
IF rw = write AND g.connectName ~= NIL
AND LongString.EquivalentString[host, "Indigo"L]
THEN {
shortConnectName: LONG STRING ← Subr.AllocateString[100];
shortConnectPassword: LONG STRING ← Subr.AllocateString[100];
{
ENABLE
UNWIND => {
Subr.FreeString[shortConnectName]; Subr.FreeString[shortConnectPassword]};
Subr.strcpy[shortConnectName, g.connectName];
IF g.connectPassword ~=
NIL
THEN
Subr.strcpy[shortConnectPassword, g.connectPassword];
STP.Connect[g.stp[rw], shortConnectName, shortConnectPassword];
}; -- of ENABLE UNWIND
Subr.FreeString[shortConnectName]; Subr.FreeString[shortConnectPassword];
};
};
};
Close:
PROC[rw: Rw]
RETURNS[alwaysNone: Rw] = {
IF g.stp[rw] ~=
NIL
THEN {
g.out.PutF["Closing connection to %s\n", IO.rope[g.stpHost[rw]]];
g.stp[rw] ← STP.Destroy[g.stp[rw] ! STP.Error => CONTINUE];
g.stpHost[rw] ← NIL;
};
RETURN[none];
};
Flush:
PROC = {
g.out.Flush[];
};
PrintACode:
IO.PFCodeProc =
TRUSTED {
WITH v: val
SELECT
FROM
refAny => {
pts: CS.PTimeStamp ← NARROW[LOOPHOLE[v.value, REF ANY]];
hex: PACKED ARRAY [0 .. 12) OF [0 .. 16) ← LOOPHOLE[pts^];
FOR i:
CARDINAL
IN [0 .. 12)
DO
IF hex[i]
IN [0 .. 9]
THEN
stream.PutChar['0 + hex[i]]
ELSE
stream.PutChar['A + (hex[i] - 10)];
ENDLOOP;
};
ENDCASE => ERROR;
};
}.