TrickleChargeServerImpl.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Russ Atkinson (RRA) June 20, 1985 12:54:23 pm PDT
Rick Beach, June 21, 1985 9:35:53 am PDT
Dave Rumph, June 27, 1985 9:36:30 am PDT
Bob Hagmann July 18, 1985 4:38:42 pm PDT
Carl Hauser, March 11, 1986 2:07:25 pm PST
Rick Beach, January 23, 1986 1:37:04 pm PST
Eric Nickell February 25, 1986 12:10:10 pm PST
TrickleChargeServer (switches) srcDir dstDir
moves files from one remote directory to another. The behavior is governed by switches and options variables (see below).
Implementation points:
1. Version numbers should be retained in order to keep the validity of the hints in DF files. This requires using STP (unfortunately).
2. Don't clutter the local disk with cached copies of transferred files.
3. Don't move files that are already there.
4. Support running this program in background to allow periodic updating of one directory from another. This is particularly useful for CedarChest.
DIRECTORY
BasicTime USING [GMT, Now, nullGMT],
CedarProcess USING [DoWithPriority, Priority],
Commander USING [CommandProc, Handle, Register],
CommandTool USING [ArgumentVector, Failed, Parse],
DFUtilities USING [DirectoryItem, FileItem, IncludeItem, ParseFromStream, ProcessItemProc],
FS USING [Copy, defaultStreamOptions, EnumerateForInfo, Error, ExpandName, FileInfo, InfoProc, StreamOpen, StreamOptions],
FSBackdoor USING [EnumerateCacheForInfo, InfoProc],
FSPseudoServers USING [TranslateForWrite],
GVBasics USING [Connect],
GVNames USING [ConnectInfo, GetConnect],
IO USING [Close, PutChar, PutF, PutF1, PutFR1, PutRope, STREAM],
List USING [CompareProc],
PrincOpsUtils USING [],
Process USING [CheckForAbort, GetPriority, Pause, Priority, SecondsToTicks],
RedBlackTree USING [Compare, Create, Delete, DestroyTable, EachNode, EnumerateIncreasing, GetKey, Insert, Lookup, Table],
Rope USING [Cat, Compare, Concat, Equal, Fetch, Find, Flatten, Length, Match, Replace, ROPE, Run, SkipTo, Substr],
STP USING [Close, Connect, Create, Error, Handle, IsOpen, Login, Open, Store],
Tempus USING [Now, PackedSeconds, PackedToSeconds, Parse],
UserCredentials USING [Get];
TrickleChargeServerImpl: CEDAR PROGRAM
IMPORTS BasicTime, CedarProcess, Commander, CommandTool, DFUtilities, FS, FSBackdoor, FSPseudoServers, GVNames, IO, Process, RedBlackTree, Rope, STP, Tempus, UserCredentials
= BEGIN
Types
GMT: TYPE = BasicTime.GMT;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
Switches: TYPE = PACKED ARRAY CHAR['a..'z] OF BOOL;
PairList: TYPE = LIST OF Pair;
Pair: TYPE = RECORD [
src: ROPE,
Source directory OR DF name (angle bracket syntax)
dst: ROPE,
Destination directory name (angle bracket syntax)
switches: Switches,
switches in effect at argument capture time
timeRestriction: ROPE
If timeRestriction#NIL, the it should contain
];
FileEntry: TYPE = REF FileEntryRep;
FileEntryRep: TYPE = RECORD [
name: ROPE,
Includes the srcPrefix, also includes version #
short: ROPE,
Does not include the srcPrefix, does include version #
date: GMT,
create date of the file
len: INT,
byte count of the file (useful redundancy)
state: FileEntryState
indicates the state of the file (obvious)
];
FileEntryState: TYPE = {init, fetching, storing, moved};
Documentation
doc: ROPE = "{srcDir dstDir}*
moves files from srcDir (or DF) to dstDir
-c: connect (to destination host)
-d: debug (inhibits file transfer)
-r: repeat (keep doing the transfer forever)
-v: verify (no transfers, messages only for missing files)
-q: quick (enumerate only the df files)
See documentation for specification of time restrictions.
";
Option variables
maxRetries: NAT ← 10;
# of times to retry connectionRejected from STP
retrySeconds: NAT ← 20;
# of seconds between retry attemps
repeatSeconds: NAT ← 1800;
# of seconds between repeats (when using the R switch)
secondsBetweenMoves: NAT ← 0;
# of seconds to wait after a file transfer (to keep IFS load down)
bytesPerIFSPage: NAT = 2048;
maxPauseTime: NAT = 1800;
Command Procedures
DoIt: PROC [table: RedBlackTree.Table, out: STREAM, srcPrefix: ROPE, dstPrefix: ROPE, switches: Switches] = {
debug: BOOL ← switches['d];
verify: BOOL ← switches['v];
enumerateForDfFiles: BOOL ← switches['q];
dfList: LIST OF FileEntry ← NIL;
EachInfo: FS.InfoProc = {
[fullFName: ROPE, attachedTo: ROPE, created: GMT, bytes: INT, keep: CARDINAL] RETURNS [continue: BOOL]
continue ← TRUE;
Process.CheckForAbort[];
IF Rope.Run[fullFName, 0, srcPrefix, 0, FALSE] = srcPrefixLen THEN {
new: FileEntry ← NIL;
short: ROPENIL;
WITH RedBlackTree.Lookup[table, fullFName] SELECT FROM
entry: FileEntry => {
IF entry.date = created THEN RETURN;
[] ← RedBlackTree.Delete[table, fullFName];
};
ENDCASE;
short ← Rope.Substr[fullFName, srcPrefixLen];
IF Rope.Match["!*", short] THEN RETURN;
This is likely to be the controlling file entry for an IFS
(or it could just be a bogus file to be ignored)
IF debug THEN IO.PutChar[out, '.];
new ← NEW[FileEntryRep ← [
name: fullFName, short: short, date: created, len: bytes, state: init]];
RedBlackTree.Insert[table, new, fullFName];
filesSeenDuringEnumeration ← filesSeenDuringEnumeration + 1;
bytesSeenDuringEnumeration ← bytesSeenDuringEnumeration + bytes;
ifsPagesDuringEnumeration ← ifsPagesDuringEnumeration + 1 + (bytes + bytesPerIFSPage-1) / bytesPerIFSPage;
};
};
EachEntry: RedBlackTree.EachNode = {
[data: RedBlackTree.UserData] RETURNS [stop: BOOL ← FALSE]
WITH data SELECT FROM
entry: FileEntry => {
IF entry.state # moved THEN {
IF Rope.Match["*.df!*", entry.short, FALSE] THEN dfList ← CONS[entry, dfList]
ELSE MoveFile[entry];
};
RETURN;
};
ENDCASE => ERROR;
};
MoveFile: PROC [entry: FileEntry] = {
fullDstName: ROPE ← Rope.Concat[dstPrefix, entry.short];
fullSrcName: ROPE ← entry.name;
isInCache: BOOL ← IsInFileCache[fullSrcName, entry];
isOnDst: BOOLFALSE;
dstBytes: INT ← 0;
dstDate: GMT ← BasicTime.nullGMT;
Process.CheckForAbort[];
[created: dstDate, bytes: dstBytes] ← FS.FileInfo[fullDstName
! FS.Error => CONTINUE
];
IF dstDate = entry.date AND dstBytes = entry.len THEN {
isOnDst ← TRUE;
filesAlreadyThere ← filesAlreadyThere + 1;
};
SELECT TRUE FROM
debug => {
ShowEntry[out, entry];
IF isInCache THEN {
filesInCache ← filesInCache + 1;
IO.PutRope[out, " (in local file cache)\n"];
};
IF isOnDst THEN
IO.PutRope[out, " (already on destination)\n"];
};
isOnDst => {
The remote file is already present, so we don't need to move it
entry.state ← moved;
};
verify => {
We are verifying stuff and the entry is NOT on the desitnation
ShowEntry[out, entry];
IF isInCache THEN {
filesInCache ← filesInCache + 1;
IO.PutRope[out, " (in local file cache)"];
};
IO.PutRope[out, " (NOT on destination)\n"];
};
ENDCASE => {
Sigh, we actually have to ship the bits
srcStream: STREAMNIL;
retriedCount: NAT ← 0;
openName: ROPENIL;
streamOptions: FS.StreamOptions ← FS.defaultStreamOptions;
streamOptions[tiogaRead] ← FALSE;
Force the transfers to happen on raw files
entry.state ← fetching;
IF isInCache
THEN {
openName ← fullSrcName;
filesInCache ← filesInCache + 1;
}
ELSE {
openName ← FS.Copy[from: fullSrcName, to: "///Temp/TrickleCharge.Temp$", wantedCreatedTime: entry.date, setKeep: TRUE, keep: 4];
};
srcStream ← FS.StreamOpen[fileName: openName, streamOptions: streamOptions, wantedCreatedTime: entry.date, remoteCheck: FALSE];
entry.state ← storing;
CopyStreamToRemote[srcStream, fullDstName, entry.date
!
STP.Error => {
IO.PutF[out, "STP.Error when storing %g\n %g\n",
[rope[fullDstName]], [rope[error]] ];
SELECT code FROM
connectionRejected =>
It may be worth retrying later
IF retriedCount < maxRetries THEN {
retriedCount ← retriedCount + 1;
Process.Pause[Process.SecondsToTicks[retrySeconds]];
RETRY;
};
connectionClosed =>
Retry, this time establishing the connection first
IF retriedCount < maxRetries THEN {
retriedCount ← retriedCount + 1;
RETRY;
};
ENDCASE;
IO.Close[srcStream];
GO TO failed;
};
UNWIND => IO.Close[srcStream]];
IO.Close[srcStream];
entry.state ← moved;
IO.PutF[out, "Moved %g\n to %g\n", [rope[fullSrcName]], [rope[fullDstName]] ];
IF secondsBetweenMoves # 0 THEN
Process.Pause[Process.SecondsToTicks[secondsBetweenMoves]];
EXITS failed => {};
};
};
CopyStreamToRemote: PROC [stream: STREAM, remoteName: ROPE, date: GMT] = {
IF Rope.Match["[*]<*>*", remoteName] THEN {
hostStop: INT ← Rope.SkipTo[remoteName, 1, "]"];
dirStart: INT ← Rope.SkipTo[remoteName, hostStop, "<"]+1;
dirStop: INT ← Rope.SkipTo[remoteName, dirStart, ">"];
host: ROPE ← Rope.Flatten[remoteName, 1, hostStop-1];
dir: ROPE ← Rope.Flatten[remoteName, dirStart, dirStop-dirStart];
nameSansHost: ROPE ← Rope.Substr[remoteName, dirStart-1];
IF NOT STP.IsOpen[stp] THEN DO
getServerPupName: PROC [server: ROPE] RETURNS [pupServer: ROPE] = {
IF server.Find[".", 0, FALSE] > 0 THEN {
Names with "." are GVNames (Grapevine names), so ask Grapevine to look them up
info: GVNames.ConnectInfo;
connect: GVBasics.Connect;
[info: info, connect: connect ] ← GVNames.GetConnect[server];
If successful, use the connect as the server name for STP.Open
IF info = group OR info = individual THEN RETURN[connect];
};
RETURN[server];
};
userName: ROPENIL;
userPassword: ROPENIL;
[userName, userPassword] ← UserCredentials.Get[];
[] ← STP.Open[stp, getServerPupName[host]];
STP.Login[stp, userName, userPassword];
IF switches['c] THEN STP.Connect[stp, dir, ""];
EXIT;
ENDLOOP;
{
ENABLE UNWIND => STP.Close[stp ! STP.Error => CONTINUE; ];
STP.Store[stp: stp, file: nameSansHost, stream: stream, fileType: binary, creation: date];
};
filesMoved ← filesMoved + 1;
};
};
VisitEntry: PROC [name: ROPE, date: GMT] = {
This procedure is used to visit each file in a simple DF closure, where the imports are NOT followed, but the inclusions ARE followed.
RemoveAngles: PROC [name: ROPE] RETURNS [short: ROPENIL] = {
angleCount: INT ← 0;
lastAngle: INT ← -1;
IF enumerateForDfFiles AND Rope.Run[ s1: name, s2: srcPrefix, case: FALSE ] # Rope.Length[srcPrefix] THEN RETURN[NIL];
FOR i: INT IN [0..Rope.Length[name]) DO
SELECT Rope.Fetch[name, i] FROM
'>, '] => {
lastAngle ← i;
IF (angleCount𡤊ngleCount+1) = dstAngleCount THEN
RETURN [Rope.Substr[name, i+1]];
};
'! => EXIT;
ENDCASE;
ENDLOOP;
IF lastAngle >= 0 THEN RETURN [Rope.Substr[name, lastAngle+1]];
};
new: FileEntry ← NIL;
bytes: INT ← 0;
short: ROPENIL;
Process.CheckForAbort[];
WITH RedBlackTree.Lookup[table, name] SELECT FROM
entry: FileEntry => IF entry.date = date THEN RETURN;
ENDCASE;
[fullFName: name, bytes: bytes, created: date] ← FS.FileInfo[name: name, wantedCreatedTime: date !
FS.Error => IF error.code = $unknownFile OR error.code = $unknownCreatedTime THEN {
IO.PutF1[out, "FS.Error[%g]\n", [rope[error.explanation]]];
name ← NIL;
CONTINUE;
}
ELSE REJECT];
short ← RemoveAngles[name];
IF short = NIL THEN RETURN;
new ← NEW[FileEntryRep ← [
name: name,
short: short,
date: date,
len: bytes,
state: init]];
WITH RedBlackTree.Lookup[table, name] SELECT FROM
entry: FileEntry => {
IF entry.date = date THEN RETURN;
[] ← RedBlackTree.Delete[table, name];
};
ENDCASE;
RedBlackTree.Insert[table, new, name];
filesSeenDuringEnumeration ← filesSeenDuringEnumeration + 1;
bytesSeenDuringEnumeration ← bytesSeenDuringEnumeration + bytes;
ifsPagesDuringEnumeration ← ifsPagesDuringEnumeration + 1 + (bytes + bytesPerIFSPage-1) / bytesPerIFSPage;
};
stp: STP.Handle ← STP.Create[];
srcPrefixLen: INT ← Rope.Length[srcPrefix ← TranslateHost[srcPrefix]];
dstPrefixLen: INT ← Rope.Length[dstPrefix];
dstAngleCount: INT ← 0;
filesMoved: INT ← 0;
filesInCache: INT ← 0;
filesAlreadyThere: INT ← 0;
filesSeenDuringEnumeration: INT ← 0;
bytesSeenDuringEnumeration: INT ← 0;
ifsPagesDuringEnumeration: INT ← 0;
RedBlackTree.DestroyTable[table]; -- clear the table from the last run
FOR i: INT IN [0..dstPrefixLen) DO
SELECT Rope.Fetch[dstPrefix, i] FROM
'>, '] => dstAngleCount ← dstAngleCount + 1;
ENDCASE;
ENDLOOP;
IO.PutF[out, "Moving files from %g to %g\n", [rope[srcPrefix]], [rope[dstPrefix]] ];
Phase1, build up data base. Don't move any files.
IO.PutF1[out, "{Building file table at %g}\n", [time[BasicTime.Now[]]] ];
SELECT TRUE FROM
~Rope.Match["*>", srcPrefix] => VisitClosure[srcPrefix, BasicTime.nullGMT, VisitEntry]; --A df file
enumerateForDfFiles => { --Enumerate source directory for df files
EachFile: FS.InfoProc = {
[fullFName: ROPE, attachedTo: ROPE, created: GMT, bytes: INT, keep: CARDINAL] RETURNS [continue: BOOL]
doTheEnumerate: BOOLFALSE;
Process.CheckForAbort[];
IF Rope.Run[s1: fullFName, s2: srcPrefix, case: FALSE]#srcPrefixLen THEN ERROR;
[] ← FS.FileInfo[name: Rope.Substr[base: fullFName, start: srcPrefixLen], wantedCreatedTime: created, wDir: dstPrefix ! FS.Error => IF error.group=user THEN {doTheEnumerate ← TRUE; CONTINUE}];
IF doTheEnumerate THEN VisitClosure[dfName: fullFName, date: BasicTime.nullGMT, visitor: VisitEntry];
continue ← TRUE;
};
FS.EnumerateForInfo[Rope.Concat[srcPrefix, "*.df!H"], EachFile];
};
ENDCASE => FS.EnumerateForInfo[Rope.Concat[srcPrefix, "*!h"], EachInfo];
IF debug THEN IO.PutChar[out, '\n];
IO.PutF[out, "Enumerated new files: %g, bytes: %g, IFS pages: %g\n",
[integer[filesSeenDuringEnumeration]],
[integer[bytesSeenDuringEnumeration]],
[integer[ifsPagesDuringEnumeration]] ];
Phase2, move files. Don't change the entries (except for the 'moved' field).
IO.PutF1[out, "{Moving files at %g}\n", [time[BasicTime.Now[]]] ];
RedBlackTree.EnumerateIncreasing[table, EachEntry];
Phase2 1/2: move df files last.
FOR entryList: LIST OF FileEntry ← dfList, entryList.rest WHILE entryList # NIL DO
MoveFile[entryList.first];
ENDLOOP;
IF STP.IsOpen[stp] THEN STP.Close[stp];
IO.PutF1[out, "{Done at %g}\n", [time[BasicTime.Now[]]] ];
IO.PutF[out, "Files moved: %g, inCache: %g, alreadyRemote: %g\n\n",
[integer[filesMoved]], [integer[filesInCache]], [integer[filesAlreadyThere]] ];
};
ShowTable: PROC [out: STREAM, table: RedBlackTree.Table] = {
EachEntry: RedBlackTree.EachNode = {
[data: RedBlackTree.UserData] RETURNS [stop: BOOL ← FALSE]
WITH data SELECT FROM
entry: FileEntry => ShowEntry[out, entry];
ENDCASE => ERROR;
};
RedBlackTree.EnumerateIncreasing[table, EachEntry];
};
ShowEntry: PROC [out: STREAM, entry: FileEntry] = {
IO.PutF[out, "[name: %g, date: %g, len: %g, state: ",
[rope[entry.name]], [time[entry.date]], [integer[entry.len]] ];
SELECT entry.state FROM
init => IO.PutRope[out, "init]\n"];
fetching => IO.PutRope[out, "fetching]\n"];
storing => IO.PutRope[out, "storing]\n"];
moved => IO.PutRope[out, "moved]\n"];
ENDCASE;
};
IsInFileCache: PUBLIC PROC [fullName: ROPE, entry: FileEntry] RETURNS [inCache: BOOLFALSE] = {
cacheChecker: FSBackdoor.InfoProc = {
[fullGName: ROPE, created: BasicTime.GMT, bytes: INT, keep: CARDINAL]
RETURNS [continue: BOOL]
IF bytes = entry.len AND created = entry.date THEN GO TO found;
IF bytes > 0 AND entry.date = BasicTime.nullGMT THEN GO TO found;
RETURN [TRUE];
EXITS found => {inCache ← TRUE; RETURN [FALSE]};
};
FSBackdoor.EnumerateCacheForInfo[cacheChecker, NIL, fullName];
};
TranslateHost: PROC [name: ROPE] RETURNS [ROPE] = {
IF Rope.Match["[*]*", name] THEN {
rPos: INT ← Rope.SkipTo[name, 1, "]"];
host: ROPE ← Rope.Substr[name, 1, rPos-1];
IF Rope.Length[host] # 0 THEN {
nHost: ROPE ← FSPseudoServers.TranslateForWrite[host];
IF Rope.Length[nHost] # 0 THEN
IF NOT Rope.Equal[nHost, host, FALSE] THEN
name ← Rope.Flatten[Rope.Replace[name, 1, rPos-1, nHost]];
};
};
RETURN [name];
};
GetKey: RedBlackTree.GetKey = {
[data: RedBlackTree.UserData] RETURNS [RedBlackTree.Key]
RETURN [data];
};
Compare: RedBlackTree.Compare = {
[k: RedBlackTree.Key, data: RedBlackTree.UserData] RETURNS [Basics.Comparison]
key: ROPENIL;
WITH k SELECT FROM
ent: FileEntry => key ← ent.name;
rope: ROPE => key ← rope;
ENDCASE => ERROR;
WITH data SELECT FROM
ent: FileEntry => RETURN [Rope.Compare[key, ent.name, FALSE]];
ENDCASE;
ERROR;
};
CompareEntries: List.CompareProc = {
[ref1: REF ANY, ref2: REF ANY] RETURNS [Basics.Comparison]
WITH ref1 SELECT FROM
ent1: FileEntry =>
WITH ref2 SELECT FROM
ent2: FileEntry =>
RETURN [Rope.Compare[ent1.name, ent2.name, FALSE]];
ENDCASE;
ENDCASE;
ERROR;
};
VisitClosure: PROC [dfName: ROPE, date: GMT, visitor: PROC [name: ROPE, date: GMT]] = {
eachItem: DFUtilities.ProcessItemProc = {
WITH item SELECT FROM
dir: REF DFUtilities.DirectoryItem => prefix ← TranslateHost[dir.path1];
file: REF DFUtilities.FileItem => {
name: ROPE ← Rope.Concat[prefix, file.name];
IF prefix = NIL THEN name ← TranslateHost[name];
visitor[name, file.date.gmt];
};
incl: REF DFUtilities.IncludeItem => {
file: ROPE ← TranslateHost[incl.path1];
visitor[file, incl.date.gmt];
VisitClosure[file, incl.date.gmt, visitor];
};
ENDCASE;
};
prefix: ROPENIL;
in: STREAMFS.StreamOpen[fileName: dfName ← TranslateHost[dfName], wantedCreatedTime: date];
DFUtilities.ParseFromStream[in, eachItem, [FALSE, all, all, defining]
! UNWIND => IO.Close[in]];
IO.Close[in];
};
ParseTimeReference: PROC [ref: ROPE] RETURNS [valid, inRange: BOOLTRUE] ~ {
IF ref=NIL THEN RETURN [TRUE, TRUE];
{
ENABLE ANY => GOTO Fail;
pos1, pos2, pos3: INT;
pos1 ← Rope.Find[s1: ref, s2: "("];
IF pos1=-1 THEN GOTO Fail;
pos2 ← Rope.Find[s1: ref, s2: "..", pos1: pos1+1];
IF pos2=-1 THEN GOTO Fail;
pos3 ← Rope.Find[s1: ref, s2: ")", pos1: pos2+2];
inRange ←
Tempus.PackedToSeconds[Tempus.Parse[rope: Rope.Substr[base: ref, start: pos1+1, len: pos2-pos1-1], search: FALSE].time]
> Tempus.PackedToSeconds[Tempus.Parse[rope: Rope.Substr[base: ref, start: pos2+2, len: pos3-pos2-2], search: FALSE].time];
EXITS Fail => RETURN [FALSE, FALSE]
};
};
TrickleCommandProc: Commander.CommandProc = {
[cmd: Handle] RETURNS [result: REFNIL, msg: ROPENIL]
CommandObject = [in, out, err: STREAM, commandLine, command: ROPE, ...]
out: STREAM = cmd.out;
switches: Switches ← ALL[FALSE];
timeRestriction: ROPENIL;
ProcessSwitches: PROC [arg: ROPE] = {
sense: BOOLTRUE;
FOR index: INT IN [0..Rope.Length[arg]) DO
char: CHAR ← Rope.Fetch[arg, index];
SELECT char FROM
'- => LOOP;
'~ => {sense ← NOT sense; LOOP};
IN ['a..'z] => switches[char] ← sense;
IN ['A..'Z] => switches[char + ('a-'A)] ← sense;
ENDCASE;
sense ← TRUE;
ENDLOOP;
};
oldPriority: Process.Priority ← Process.GetPriority[];
table: RedBlackTree.Table ← RedBlackTree.Create[getKey: GetKey, compare: Compare];
argv: CommandTool.ArgumentVector ← CommandTool.Parse[cmd: cmd, starExpand: FALSE
! CommandTool.Failed => {msg ← errorMsg; GO TO failed}];
When parsing the command line, be prepared for failure. The error is reported to the user
pairList: PairList ← NIL;
pairListTail: PairList ← NIL;
FOR i: NAT IN [1..argv.argc) DO
Each argument can either be a switch specification or a genuine argument to be processed. The first argument (argv[0]) is not examined, because by convention it is the name of the command as given by the user.
arg: ROPE ← argv[i];
Process.CheckForAbort[];
It is a good idea to periodically check for a process abort request.
IF Rope.Length[arg] = 0 THEN LOOP;
Ignore null arguments (it is not easy to generate them, even).
IF Rope.Fetch[arg, 0] = '- THEN {
This argument sets switches for the remaining patterns. By convention, switches are normally "sticky", in that they stay set until explicitly changed.
ProcessSwitches[arg];
LOOP;
};
IF Rope.Fetch[arg, 0] = '( THEN {
This argument is a pair of time references, of the form: "( time1 .. time2 )" where time1 and time2 are parsable by Tempus, and therefore somewhat vague.
IF ~ParseTimeReference[arg].valid THEN {
msg ← Rope.Cat["Invalid time restriction pair: \"", arg, "\""];
GO TO failed;
};
timeRestriction ← arg;
LOOP;
};
{
Parse the argument. It must be either a directory OR a DF file name
arg ← FS.ExpandName[arg, NIL
! FS.Error => {
The argument is NOT a DF file name, so perhaps it is a directory
arg ← FS.ExpandName["$", arg
! FS.Error => {
The argument is not a valid name at all, so abort this nonsense
msg ← error.explanation;
GO TO failed;
}].fullFName;
arg ← Rope.Flatten[arg, 0, Rope.Length[arg]-1];
GO TO ok;
};
].fullFName;
EXITS ok => {};
};
IF pairListTail = NIL OR pairListTail.first.dst # NIL THEN {
new: PairList ← LIST[[arg, NIL, switches, timeRestriction]];
IF pairListTail = NIL THEN pairList ← new ELSE pairListTail.rest ← new;
pairListTail ← new;
LOOP;
};
IF NOT Rope.Match["*>", arg] AND NOT Rope.Match["*]", arg] THEN {
msg ← IO.PutFR1["Destination not a directory (%g)", [rope[arg]] ];
GO TO failed;
};
pairListTail.first.dst ← arg;
pairListTail.first.switches ← switches;
ENDLOOP;
IF pairList = NIL THEN {msg ← "No arguments given.\n"; RETURN};
IF pairList.first.dst = NIL THEN {msg ← "Missing destination.\n"; RETURN};
{
action: PROC = {
DO
FOR pair: PairList ← pairList, pair.rest WHILE pair # NIL DO
IF ~ParseTimeReference[pair.first.timeRestriction].inRange THEN {
IO.PutF[stream: out, format: "Not copying %g to %g this pass because %g is not in range %g.\n\n", v1: [rope[pair.first.src]], v2: [rope[pair.first.dst]], v3: [time[Tempus.Now[]]], v4: [rope[pair.first.timeRestriction]]];
LOOP;
};
DoIt[table, out, pair.first.src, pair.first.dst, pair.first.switches
! FS.Error => {
IO.PutF1[out, "FS.Error[%g], stopping this round.\n\n", [rope[error.explanation]]];
CONTINUE;
};
];
ENDLOOP;
IF NOT switches['r] THEN EXIT;
FOR timeLeft: INT ← repeatSeconds, timeLeft - maxPauseTime WHILE timeLeft > 0 DO
Process.Pause[Process.SecondsToTicks[MIN[maxPauseTime, CARDINAL[timeLeft]]]];
ENDLOOP;
ENDLOOP;
};
CedarProcess.DoWithPriority[background, action];
};
EXITS
failed => {result ← $Failure};
};
Initialization
Commander.Register[
key: "///Commands/TrickleChargeServer",
proc: TrickleCommandProc,
doc: doc,
clientData: NIL,
interpreted: TRUE
];
END.


Rick Beach, January 23, 1986 1:34:16 pm PST
changes to: action (local of TrickleCommandProc) changed timeLeft: NAT to INT because timeLeft could go negative!