TCAttachmentsImpl.mesa
Copyright Ó 1985, 1988, 1990, 1991, 1992 by Xerox Corporation. All rights reserved.
Russ Atkinson (RRA) June 20, 1985 12:54:23 pm PDT
Dave Rumph, September 13, 1988 6:28:12 pm PDT
Bob Hagmann July 18, 1985 4:38:42 pm PDT
Rick Beach, January 23, 1986 1:37:04 pm PST
Eric Nickell February 25, 1986 12:10:10 pm PST
Carl Hauser, October 20, 1987 2:20:57 pm PDT
Bill Jackson (bj) April 14, 1988 8:41:57 pm PDT
Willie-Sue, October 19, 1988 4:20:11 pm PDT
Last edited by: Mik Lamming - January 26, 1989 4:09:53 pm GMT
Mik Lamming, January 27, 1989 10:17:50 am GMT
Willie-Sue Orr, March 28, 1990 4:29:38 pm PST -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100 -0100
Willie-s, August 10, 1992 4:44 pm PDT
Implementation points:
1. Version numbers should be retained in order to keep the validity of the hints in DF files.
2. Don't move files that are already there.
DIRECTORY
Basics USING [Comparison, PartialComparison, UnsafeBlock],
BasicTime USING [GMT, Now, nullGMT],
CedarProcess USING [DoWithPriority, Priority],
Commander USING [CommandProc, Handle, Register],
CommanderOps USING [Failed, NextArgument],
DFUtilities USING [DirectoryItem, FileItem, Filter, IncludeItem, ParseFromStream, ProcessItemProc, UsingList],
FileNames,
IO,
PFS,
PFSNames,
PFSPrefixMap,
Process USING [CheckForAbort, GetPriority, Pause, Priority, SecondsToTicks],
RedBlackTree USING [Compare, Create, Delete, DestroyTable, EachNode, EnumerateIncreasing, GetKey, Insert, Lookup, Table],
Rope;
TCAttachmentsImpl: CEDAR MONITOR
IMPORTS BasicTime, CedarProcess, Commander, CommanderOps, DFUtilities, FileNames, IO, PFS, PFSNames, PFSPrefixMap, Process, RedBlackTree, Rope
= BEGIN
Types
GMT: TYPE = BasicTime.GMT;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
PATH: TYPE = PFS.PATH;
Switches: TYPE = PACKED ARRAY CHAR['a..'z] OF BOOL;
TCType: TYPE = { fullDirectory, oneDF, allDFs, individualDFs };
TCInfoList: TYPE = LIST OF TCInfo;
TCInfo: TYPE = REF TCInfoRec;
TCInfoRec: TYPE = RECORD[
prefix, fromTranslation, toTranslation: ROPE,
fromTranslationPath: PATH,
otherArg: ROPE, -- for oneDF and allDFs
prefixPath: PATH,
fromPTable, toPTable: PFSPrefixMap.PrefixTableList,
switches: Switches,  -- switches in effect at argument capture time
useCopy: BOOL ¬ TRUE, -- rather than retrieve
tcType: TCType ¬ fullDirectory
];
FileEntry: TYPE = REF FileEntryRep;
FileEntryRep: TYPE = RECORD [
fullSrcName: ROPE,
includes version number
fullSrcNamePath: PATH,
uid: PFS.UniqueID,
really just create date of the file for now
len: INT,
byte count of the file (useful redundancy)
state: FileEntryState
indicates the state of the file (obvious)
];
FileEntryState: TYPE = {init, fetching, storing, moved};
Documentation
dirDoc: ROPE = " srcDir dstDir
moves files from srcDir to dstDir
-a fileName: append output messages to fileName
-b: bestEfforts (ignore unknown prefixes)
-d: debug (inhibits file transfer)
-o fileName: output messages to new fileName
-r: retrieve rather than copy
-v: verify (no transfers, messages only for missing files)
-x: debugging output
";
allDfsDoc: ROPE = "prefixMapEntry fromTranslation toTranslation {subdirForDfs}
moves files from srcDir to dstDir
-a fileName: append output messages to fileName
-b: bestEfforts (ignore unknown prefixes)
-d: debug (inhibits file transfer)
-m: move dfs even if errors
-o fileName: output messages to new fileName
-r: retrieve rather than copy
-v: verify (no transfers, messages only for missing files)
-x: debugging output
";
oneDfDoc: ROPE = "prefixMapEntry fromTranslation toTranslation subDirAndDfName
moves files from srcDir to dstDir
-a fileName: append output messages to fileName
-b: bestEfforts (ignore unknown prefixes)
-d: debug (inhibits file transfer)
-l: localDF (assumes df name is local or full ({not prefixMapEntry relative})
-m: move dfs even if errors
-o fileName: output messages to new fileName
-r: retrieve rather than copy
-v: verify (no transfers, messages only for missing files)
-x: debugging output
";
Option variables
courierMaxRetries: NAT ¬ 150;
# of times to retry CrRPC error
courierRetrySeconds: NAT ¬ 5*60;
wait a while before retrying CrRPC error
maxRetries: NAT ¬ 10;
# of times to retry connectionRejected from STP
retrySeconds: NAT ¬ 20;
# of seconds between retry attemps
repeatSeconds: NAT ¬ 1800;
# of seconds between repeats (when using the R switch)
secondsBetweenMoves: NAT ¬ 0;
# of seconds to wait after a file transfer (to keep IFS load down)
maxPauseTime: NAT = 1800;
tempDFPath: PATH ~ PFS.PathFromRope["/tmp/TrickleChargeTemp.df"];
bangH: ROPE ~ "!H";
Command Procedures
DoIt: PROC [tcInfo: TCInfo, table: RedBlackTree.Table, out: STREAM] = {
bestEfforts: BOOL ¬ tcInfo.switches['b];
doFlush: BOOL ¬ tcInfo.switches['a] OR tcInfo.switches['o];
debug: BOOL ¬ tcInfo.switches['d];
verify: BOOL ¬ tcInfo.switches['v];
localDF: BOOL ¬ tcInfo.switches['l];
outputForDebugging: BOOL ¬ tcInfo.switches['x];
useRetrieve: BOOL ¬ tcInfo.switches['r];
moveDfsEvenIfErrors: BOOL ¬ tcInfo.switches['m];
enumerateForDfFiles: BOOL ¬ tcInfo.tcType = allDFs;
dfList: LIST OF FileEntry ¬ NIL;
errorsEncountered: BOOL ¬ FALSE;
A Programmer's quick hack
processingImports: BOOL ¬ tcInfo.switches['e];
filter: DFUtilities.Filter ¬ [FALSE, all, all, IF ( processingImports ) THEN all ELSE defining];
if you want to experiment, you can try other values here! - (bj)
EachEntry: RedBlackTree.EachNode = {
[data: RedBlackTree.UserData] RETURNS [stop: BOOL ← FALSE]
called for each item in the table to do the moving.
WITH data SELECT FROM
entry: FileEntry => {
IF entry.state # moved THEN {
df files are collected and moved last as a "commit" of the successful completion of the tricklecharge pass.
ext: ROPE ¬ FindExt[entry.fullSrcName];
IF Rope.Equal[ext, "df", FALSE] THEN dfList ¬ CONS[entry, dfList]
ELSE MoveFile[entry];
};
RETURN;
};
ENDCASE => ERROR;
};
MoveFile: PROC [entry: FileEntry] = {
isOnDst: BOOL ¬ FALSE;
dstBytes: INT ¬ 0;
dstUID: PFS.UniqueID ¬ PFS.nullUniqueID;
fullDstPath: PATH ¬ PFSPrefixMap.Translate[entry.fullSrcNamePath, tcInfo.toPTable];
Process.CheckForAbort[];
[uniqueID: dstUID, bytes: dstBytes] ¬ PFS.FileInfo[fullDstPath
! PFS.Error => CONTINUE
];
IF EqualUIDs[dstUID, entry.uid] AND dstBytes = entry.len THEN {
isOnDst ¬ TRUE;
filesAlreadyThere ¬ CheckCount[filesAlreadyThere, out, doFlush];
};
SELECT TRUE FROM
debug => {
ShowEntry[out, entry];
IF isOnDst THEN out.PutRope[" (already on destination)\n"];
out.PutF1["~~fullDstPath: %g\n", [rope[PFS.RopeFromPath[fullDstPath]]] ];
};
isOnDst => {
The remote file is already present, so we don't need to move it
entry.state ¬ moved;
};
verify => {
We are verifying stuff and the entry is NOT on the destination
ShowEntry[out, entry];
out.PutRope[" (NOT on destination)\n"];
};
ENDCASE => {
Sigh, we actually have to ship the bits
dstName: ROPE = PFS.RopeFromPath[fullDstPath];
retriedCount: INT ¬ 0;
Yes: PFS.NameConfirmProc ~ { RETURN[TRUE] };
DO
ENABLE PFS.Error => {
sleepSec: NAT ¬ 0;
out.PutF["PFS.Error when storing %g\n %g\n",
[rope[dstName]], [rope[error.explanation]] ];
IF doFlush THEN out.Flush[];
SELECT error.code FROM
$connectionRejected => sleepSec ¬ retrySeconds;
$CrRPC => sleepSec ¬ courierRetrySeconds;
$quotaExceeded, $volumeFull => REJECT;
ENDCASE;
SELECT error.code FROM
$connectionRejected =>
It may be worth retrying later
IF retriedCount < maxRetries THEN {
retriedCount ¬ retriedCount + 1;
Process.Pause[Process.SecondsToTicks[retrySeconds]];
LOOP;
};
$CrRPC =>
It may be worth retrying later
IF retriedCount < courierMaxRetries THEN {
retriedCount ¬ retriedCount + 1;
Process.Pause[Process.SecondsToTicks[courierRetrySeconds]];
LOOP;
};
$connectionTimedOut =>
Retry, this time establishing the connection first
IF retriedCount < maxRetries THEN {
retriedCount ¬ retriedCount + 1;
LOOP;
};
ENDCASE;
GO TO failed;
};
IF NOT useRetrieve THEN {
[] ¬ PFS.Attach[attachedFile: entry.fullSrcNamePath, attachment: fullDstPath, wantedUniqueID: entry.uid, remoteCheck: TRUE];
EXIT
};
ENDLOOP;
entry.state ¬ moved;
filesMoved ¬ filesMoved + 1;
out.PutF["Attached %g\n to %g\n", [rope[PFS.RopeFromPath[fullDstPath]]], [rope[PFS.RopeFromPath[entry.fullSrcNamePath]]] ];
IF doFlush THEN out.Flush[];
IF secondsBetweenMoves # 0 THEN
Process.Pause[Process.SecondsToTicks[secondsBetweenMoves]];
EXITS failed => { errorsEncountered ¬ TRUE };
};
};
VisitEntry: PROC [name: PATH, uid: PFS.UniqueID] = {
This procedure is used to visit each file in a simple DF closure, where the imports are NOT followed, but the inclusions ARE followed.
new: FileEntry ¬ NIL;
bytes: INT ¬ 0;
isa: BOOL;
suffix: PATH ¬ NIL;
ropeName: ROPE ¬ PFS.RopeFromPath[name];
xFromName, fileInfoName: PATH;
Process.CheckForAbort[];
WITH RedBlackTree.Lookup[table, ropeName] SELECT FROM
entry: FileEntry => IF EqualUIDs[entry.uid, uid] THEN RETURN;
ENDCASE;
[isa, suffix] ¬ PFSNames.IsAPrefix[tcInfo.prefixPath, name];
IF NOT isa OR Rope.Match["fs.13.*", PFSNames.ComponentRope[PFSNames.ShortName[name]] ] THEN {
This is to deal with generated files that have been "abandoned" while being written - the name starts with fs.13.
IF reportIgnoring THEN out.PutF1["\t**Ignoring the file %g\n", [rope[ropeName]] ];
RETURN;
};
xFromName ¬ PFSPrefixMap.Translate[name, tcInfo.fromPTable];
[fullFName: fileInfoName, uniqueID: uid, bytes: bytes] ¬ PFS.FileInfo[name: xFromName, wantedUniqueID: uid !
PFS.Error => IF ( error.code = $unknownFile ) OR ( error.code = $unknownUniqueID ) OR ( error.code = $unknownCreatedTime ) OR ( error.code = $unknownServer AND bestEfforts ) THEN {
out.PutF["\n*PFS.Error[%g]\n\tasking for %g(%g)\n", [rope[error.explanation]], [rope[PFS.RopeFromPath[xFromName]]], IF uid.egmt.gmt = BasicTime.nullGMT THEN [rope["nullGMT"]] ELSE [time[uid.egmt.gmt]] ];
IF doFlush THEN out.Flush[];
fileInfoName ¬ NIL;
CONTINUE;
}
ELSE REJECT];
IF fileInfoName = NIL THEN RETURN;
WITH RedBlackTree.Lookup[table, ropeName] SELECT FROM
entry: FileEntry => {
IF EqualUIDs[entry.uid, uid] THEN RETURN;
[] ¬ RedBlackTree.Delete[table, ropeName];
};
ENDCASE;
new ¬ NEW[FileEntryRep ¬ [
fullSrcName: ropeName,
fullSrcNamePath: fileInfoName,
uid: uid,
len: bytes,
state: init]];
RedBlackTree.Insert[table, new, ropeName];
filesSeenDuringEnumeration ¬ CheckCount[filesSeenDuringEnumeration, out, doFlush];
bytesSeenDuringEnumeration ¬ bytesSeenDuringEnumeration + bytes;
};
VisitClosure: PROC [tcInfo: TCInfo, dfName: PATH, uid: PFS.UniqueID,
visitor: PROC [name: PATH, uid: PFS.UniqueID],
usingList: REF DFUtilities.UsingList ¬ NIL] = {
ENABLE PFS.Error => IF ( error.code = $unknownServer ) OR
( error.code = $unknownFile ) THEN {
out.PutF["\n****PFS.Error[%g], in dfFile: %g\n", [rope[error.explanation]], [rope[PFS.RopeFromPath[dfName]]] ];
IF doFlush THEN out.Flush[];
};
EachItem: DFUtilities.ProcessItemProc = {
WITH item SELECT FROM
dir: REF DFUtilities.DirectoryItem => prefix ¬ PFS.PathFromRope[dir.path1];
file: REF DFUtilities.FileItem => {
name: PATH = PFSNames.ExpandName[PFS.PathFromRope[file.name], prefix];
visitor[name, UIDFromGMT[file.date.gmt]];
};
incl: REF DFUtilities.IncludeItem => {
thisUID: PFS.UniqueID = UIDFromGMT[incl.date.gmt];
path1: PATH ¬ PFS.PathFromRope[incl.path1];
visitor[path1, thisUID];
VisitClosure[tcInfo, path1, thisUID, visitor];
};
imports: REF DFUtilities.ImportsItem => { -- this stuff is for me - (bj)
IF ( processingImports ) THEN {
file: ROPE ← TranslateHost[tcInfo, imports.path1];
IF ( NOT imports.exported ) THEN { ERROR };
IF imports.exported THEN -- fix to make export chasing work
SELECT imports.form FROM
exports => {
the Exports Imports Using ALL case
visitor[file, thisUID];
VisitClosure[tcInfo, file, thisUID, visitor];
};
list => {
the Exports Imports Using [list] case
IF ( imports.list = NIL ) THEN ERROR; -- interface claims this can't happen
visitor[file, imports.date.gmt];
VisitClosure[tcInfo, file, thisUID, visitor, imports.list];
};
all => { ERROR };
ENDCASE => { ERROR };
};
};
ENDCASE => { i: INT ¬ 0; }; -- handy for setting breakpoints - (bj)
};
prefix: PATH ¬ NIL;
in: STREAM;
fileNamePath: PATH ¬ PFSPrefixMap.Translate[dfName, tcInfo.fromPTable];
in ¬ PFS.StreamOpen[
fileName: fileNamePath,
wantedUniqueID: uid
! PFS.Error => IF error.code = $unknownFile THEN {
out.PutF["PFS.Error[%g] - from dfFile %g\n", [rope[error.explanation]],
[rope[PFS.RopeFromPath[dfName]]] ];
IF doFlush THEN out.Flush[];
in ¬ NIL;
CONTINUE;
}
ELSE REJECT
];
IF in#NIL THEN {
filter.list ¬ usingList;
DFUtilities.ParseFromStream[in, EachItem, filter -- global variable, hack for now! - (bj)
! UNWIND => in.Close[]];
in.Close[]
};
};
The mainline of DoIt
filesMoved: INT ¬ 0;
filesInCache: INT ¬ 0;
filesAlreadyThere: INT ¬ 0;
filesSeenDuringEnumeration: INT ¬ 0;
bytesSeenDuringEnumeration: INT ¬ 0;
RedBlackTree.DestroyTable[table]; -- clear the table from the last run
IF tcInfo.otherArg # NIL THEN
out.PutF["\n\tMoving files (%g) from %g to %g\n", [rope[tcInfo.otherArg]], [rope[tcInfo.fromTranslation]], [rope[tcInfo.toTranslation]] ]
ELSE out.PutF["\n\tMoving files from %g to %g\n", [rope[tcInfo.fromTranslation]], [rope[tcInfo.toTranslation]] ];
Phase1, build up data base. Don't move any files.
out.PutF1["\n***** Building file table at %g\n", [time[BasicTime.Now[]]] ];
IF doFlush THEN out.Flush[];
SELECT tcInfo.tcType FROM
oneDF => {
Trickling a df file together with the files it controls
out.PutF["oneDF\n"];
IF localDF THEN VisitClosure[tcInfo, PFS.PathFromRope[tcInfo.otherArg.Concat[bangH]], PFS.nullUniqueID, VisitEntry]
ELSE VisitClosure[tcInfo, PFS.PathFromRope[tcInfo.prefix.Cat[tcInfo.otherArg, bangH]], PFS.nullUniqueID, VisitEntry];
};
allDFs => { 
Trickling df files in a directory together with the files controlled by those df files.
EachDfFile: PFS.InfoProc = {
[fullFName: attachedTo: PATH, uniqueID: UniqueID, bytes: INT, mutability: Mutability, fileType: FileType] RETURNS [continue: BOOL]
dstUID: PFS.UniqueID ¬ PFS.nullUniqueID;
doTheEnumerate: BOOL ¬ FALSE;
isa: BOOL;
suffix: PATH;
Process.CheckForAbort[];
IF outputForDebugging THEN
out.PutF1["~~%g\n", [rope[PFS.RopeFromPath[fullFName]]] ];
[isa, suffix] ¬ PFSNames.IsAPrefix[tcInfo.fromTranslationPath, fullFName];
IF NOT isa THEN {
out.PutF["\t*** %g is not a prefix of %g => ignoring\n",
[rope[tcInfo.fromTranslation]], [rope[PFS.RopeFromPath[fullFName]]] ];
IF doFlush THEN out.Flush[];
RETURN;
};
[uniqueID: dstUID] ¬ PFS.FileInfo[PFSPrefixMap.Translate[fullFName, tcInfo.toPTable]
! PFS.Error => IF error.group=user THEN
{ doTheEnumerate ¬ TRUE; CONTINUE }];
IF NOT EqualUIDs[dstUID, uniqueID] THEN doTheEnumerate ¬ TRUE;
IF doTheEnumerate THEN VisitClosure[tcInfo: tcInfo, dfName: fullFName, uid: PFS.nullUniqueID, visitor: VisitEntry];
};
who: ROPE ~ tcInfo.prefix.Concat["/Top/*.df!H"];
IF outputForDebugging THEN out.PutF1["$$doing %g\n", [rope[who]] ];
make sure we get who from the correct place
PFS.EnumerateForInfo[PFSPrefixMap.Translate[PFS.PathFromRope[who], tcInfo.fromPTable], EachDfFile];
};
fullDirectory => {  -- Trickling a whole directory.
bangH: PATH ~ PFS.PathFromRope["*!h"];
depth: INT ¬ 1;
EachFile: PFS.InfoProc = {
[fullFName: attachedTo: PATH, uniqueID: UniqueID, bytes: INT, mutability: Mutability, fileType: FileType] RETURNS [continue: BOOL]
isa: BOOL;
suffix: PATH;
ropeName: ROPE;
Process.CheckForAbort[];
IF fileType=PFS.tDirectory THEN {
newPath: PATH;
short: ROPE ¬ PFSNames.ComponentRope[PFSNames.ShortName[fullFName]];
IF short.Equal["."] OR short.Equal[".."] THEN RETURN;
newPath ¬ PFSNames.Cat[fullFName, bangH];
IF ( depth ← depth + 1 ) > 10 THEN {
out.PutF["depth: %g, path: %g\n", [integer[depth]], [rope[PFS.RopeFromPath[newPath]]] ];
RETURN;
};
PFS.EnumerateForInfo[newPath, EachFile];
RETURN;
};
[isa, suffix] ¬ PFSNames.IsAPrefix[tcInfo.prefixPath, fullFName];
IF isa THEN {
new: FileEntry ¬ NIL;
ropeName ¬ PFS.RopeFromPath[fullFName];
WITH RedBlackTree.Lookup[table, ropeName] SELECT FROM
entry: FileEntry => {
IF EqualUIDs[entry.uid, uniqueID] THEN RETURN;
[] ¬ RedBlackTree.Delete[table, ropeName];
};
ENDCASE;
IF Rope.Match["!*", PFS.RopeFromPath[suffix]] THEN RETURN;
This is likely to be the controlling file entry for an IFS
(or it could just be a bogus file to be ignored)
new ¬ NEW[FileEntryRep ¬ [
fullSrcName: ropeName,
fullSrcNamePath: PFSPrefixMap.Translate[fullFName, tcInfo.fromPTable],
uid: uniqueID,
len: bytes,
state: init]];
RedBlackTree.Insert[table, new, ropeName];
filesSeenDuringEnumeration ¬ CheckCount[filesSeenDuringEnumeration, out, doFlush];
bytesSeenDuringEnumeration ¬ bytesSeenDuringEnumeration + bytes;
};
};
out.PutF["fulldirectory\n"];
PFS.EnumerateForInfo[PFS.PathFromRope[tcInfo.fromTranslation.Concat["*!h"]], EachFile];
};
ENDCASE => {
out.PutRope["***unknown case - exiting\n"];
RETURN;
};
out.PutF["\nEnumerated new files: %g, bytes: %g\n",
[integer[filesSeenDuringEnumeration]], [integer[bytesSeenDuringEnumeration]] ];
Phase2, move files. Don't change the entries (except for the 'moved' field).
out.PutF1["\n***** Moving files at %g\n", [time[BasicTime.Now[]]] ];
IF doFlush THEN out.Flush[];
RedBlackTree.EnumerateIncreasing[table, EachEntry];
Phase2 1/2: move df files last.
IF errorsEncountered AND NOT moveDfsEvenIfErrors THEN
out.PutRope["There were errors so no df's will be moved\n"]
ELSE {
IF dfList # NIL THEN {
IF errorsEncountered AND moveDfsEvenIfErrors THEN
out.PutRope["\n***There were errors; df's will be moved anyway\n"];
out.PutRope["\n Starting to move df's\n"];
FOR entryList: LIST OF FileEntry ¬ dfList, entryList.rest WHILE entryList # NIL DO
MoveFile[entryList.first];
ENDLOOP;
};
};
out.PutF1["\n{Done at %g}\n", [time[BasicTime.Now[]]] ];
out.PutF["\tFiles moved: %g, alreadyRemote: %g\n\n",
[integer[filesMoved]], [integer[filesAlreadyThere]] ];
IF doFlush THEN out.Flush[];
};
EqualUIDs: PROC[uid1, uid2: PFS.UniqueID] RETURNS[BOOL] = {
RETURN[ ( uid1.egmt.gmt = uid2.egmt.gmt ) ]; -- all for now
RETURN[ ( uid1.egmt.time = uid2.egmt.time ) AND ( uid1.egmt.usecs = uid2.egmt.usecs )
AND ( uid1.host.a = uid2.host.a ) AND ( uid1.host.a = uid2.host.a ) ];
};
UIDFromGMT: PROC [gmt: BasicTime.GMT] RETURNS [PFS.UniqueID] ~ INLINE {
RETURN [[egmt: [gmt: gmt, usecs: 0]]]
};
CheckCount: PROC[num: INT, out: STREAM, doFlush: BOOL] RETURNS[res: INT] = {
IF ( res ¬ num + 1 ) MOD 10 = 0 THEN IF res MOD 100 = 0 THEN {
out.PutF1["(%g) ", [integer[res]] ];
IF doFlush THEN out.Flush[];
}
ELSE out.PutChar['.];
};
FindExt: PROC[name: ROPE] RETURNS[ext: ROPE] = {
short: ROPE ¬ FileNames.GetShortName[name, TRUE];
pos: INT ¬ Rope.FindBackward[short, "."];
IF pos = -1 THEN RETURN[short];
RETURN[Rope.Substr[short, pos+1]];
};
ShowTable: PROC [out: STREAM, table: RedBlackTree.Table] = {
EachEntry: RedBlackTree.EachNode = {
[data: RedBlackTree.UserData] RETURNS [stop: BOOL ← FALSE]
WITH data SELECT FROM
entry: FileEntry => ShowEntry[out, entry];
ENDCASE => ERROR;
};
RedBlackTree.EnumerateIncreasing[table, EachEntry];
};
ShowEntry: PROC [out: STREAM, entry: FileEntry] = {
IO.PutF[out, "[name: %g, date: %g, len: %g, state: ",
[rope[PFS.RopeFromPath[entry.fullSrcNamePath]]], [time[entry.uid.egmt.gmt]], [integer[entry.len]] ];
SELECT entry.state FROM
init => out.PutRope["init]\n"];
fetching => out.PutRope["fetching]\n"];
storing => out.PutRope["storing]\n"];
moved => out.PutRope["moved]\n"];
ENDCASE;
};
GetKey: RedBlackTree.GetKey = {
[data: RedBlackTree.UserData] RETURNS [RedBlackTree.Key]
RETURN [data];
};
Compare: RedBlackTree.Compare = {
[k: RedBlackTree.Key, data: RedBlackTree.UserData] RETURNS [Basics.Comparison]
key: ROPE ¬ NIL;
WITH k SELECT FROM
ent: FileEntry => key ¬ ent.fullSrcName;
rope: ROPE => key ¬ rope;
ENDCASE => ERROR;
WITH data SELECT FROM
ent: FileEntry => RETURN [Rope.Compare[key, ent.fullSrcName, FALSE]];
ent: FileEntry => {
pCom: Basics.PartialComparison ← PFSNames.Compare[key, ent.fullSrcNamePath];
RETURN [ConvertPCom[pCom]];
};
ENDCASE;
ERROR;
};
ConvertPCom: PROC[pCom: Basics.PartialComparison] RETURNS[Basics.Comparison] = {
SELECT pCom FROM
incomparable, less => RETURN[less];
equal => RETURN[equal];
greater => RETURN[greater];
ENDCASE => RETURN[less];
};
AllDfsCmdProc: Commander.CommandProc = { [result, msg] ¬ Common[allDFs, cmd] };
OneDfCmdProc: Commander.CommandProc = { [result, msg] ¬ Common[oneDF, cmd] };
IndDfCmdProc: Commander.CommandProc = { [result, msg] ¬ Common[individualDFs, cmd] };
DirCmdProc: Commander.CommandProc = { [result, msg] ¬ Common[fullDirectory, cmd] };
Common: PROC[tcType: TCType, cmd: Commander.Handle, useRetrieve: BOOL ¬ FALSE]
RETURNS[result: REF, msg: ROPE] = {
[cmd: Handle] RETURNS [result: REFNIL, msg: ROPENIL]
CommandObject = [in, out, err: STREAM, commandLine, command: ROPE, ...]
ProcessSwitches: PROC [arg: ROPE] RETURNS[Switches]= {
sense: BOOL ¬ TRUE;
switches: Switches ¬ ALL[FALSE];
FOR index: INT IN [0..Rope.Length[arg]) DO
char: CHAR ¬ Rope.Fetch[arg, index];
SELECT char FROM
'- => LOOP;
'~ => {sense ¬ NOT sense; LOOP};
'a, 'A => { outFileName ¬ CommanderOps.NextArgument[cmd]; switches[char] ¬ sense };
'o, 'O => { outFileName ¬ CommanderOps.NextArgument[cmd]; switches[char] ¬ sense };
IN ['a..'z] => switches[char] ¬ sense;
IN ['A..'Z] => switches[char + ('a-'A)] ¬ sense;
ENDCASE;
sense ¬ TRUE;
ENDLOOP;
RETURN[switches];
};
out: STREAM ¬ cmd.out;
outFileName: ROPE;
oldPriority: Process.Priority ¬ Process.GetPriority[];
table: RedBlackTree.Table ¬ RedBlackTree.Create[getKey: GetKey, compare: Compare];
this: TCInfo ¬ NEW[TCInfoRec];
this.tcType ¬ tcType;
DO
arg: ROPE ¬ CommanderOps.NextArgument[cmd ! CommanderOps.Failed => { msg ¬ errorMsg; GO TO failed } ];
When parsing the command line, be prepared for failure. The error is reported to the user
ch: CHAR;
Process.CheckForAbort[];
IF arg = NIL THEN EXIT;
ch ¬ Rope.Fetch[arg, 0];
SELECT TRUE FROM
( ch = '- ) AND ( arg.Length[] = 2 ) => -- switch
this.switches ¬ ProcessSwitches[arg];
( ch = '{ ) => LOOP; -- ignore
( ch = '} ) => LOOP; -- ignore
( ch = '$ ) => LOOP; -- ignore
ENDCASE => { -- translations or other things
IF this.prefix = NIL THEN {
this.prefix ¬ arg;
this.prefixPath ¬ PFS.PathFromRope[this.prefix];
LOOP;
};
IF this.fromTranslation = NIL THEN {
this.fromTranslation ¬ arg;
this.fromTranslationPath ¬ PFS.PathFromRope[arg];
LOOP;
};
IF this.toTranslation = NIL THEN {
this.toTranslation ¬ arg;
LOOP;
};
SELECT TRUE FROM
( this.otherArg # NIL) => {
msg ¬ IO.PutFR1["Extra argument (%g)", [rope[arg]] ];
GO TO failed;
};
ENDCASE => this.otherArg ¬ arg;
};
ENDLOOP;
IF ( this.prefix = NIL ) OR ( this.fromTranslation = NIL ) OR ( this.toTranslation = NIL ) THEN
{ msg ¬ "Not enough arguments given.\n"; GOTO failed };
IF useRetrieve THEN this.switches['r] ¬ TRUE;
IF outFileName # NIL THEN {
outPath: PFS.PATH ~ PFS.PathFromRope[outFileName];
outStream: STREAM;
appending: BOOL ~ this.switches['a];
outStream ¬ PFS.StreamOpen[outPath, IF appending THEN $append ELSE $create ! PFS.Error => {
out.PutRope[error.explanation]; CONTINUE} ];
IF outStream # NIL THEN {
out.PutF["Messages will be %g %g\n",
[rope[IF appending THEN "appended to" ELSE "written on"]],
[rope[outFileName]] ];
out ¬ outStream;
};
};
{
Action: PROC = {
this.fromPTable ¬ PFSPrefixMap.InsertIntoNewPTable[
PFS.PathFromRope[this.prefix],
PFS.PathFromRope[this.fromTranslation] ];
this.toPTable ¬ PFSPrefixMap.InsertIntoNewPTable[
PFS.PathFromRope[this.prefix],
PFS.PathFromRope[this.toTranslation] ];
this.toPTable ¬ PFSPrefixMap.Insert[
PFS.PathFromRope[this.fromTranslation],
PFS.PathFromRope[this.toTranslation],
this.toPTable ];
IF this.tcType = individualDFs THEN IndividualDFs[this, table, out]
ELSE DoIt[this, table, out
! PFS.Error => {
out.PutF["PFS.Error[%g], stopping this round.\n\t\t(at %g)\n\n", [rope[error.explanation]], [time[BasicTime.Now[]]] ];
IF outFileName # NIL THEN out.Flush[];
CONTINUE;
};
];
IF outFileName # NIL THEN out.Close[];
};
CedarProcess.DoWithPriority[background, Action];
};
EXITS
failed => {result ¬ $Failure};
};
IndividualDFs: PROC[tcInfo: TCInfo, table: RedBlackTree.Table, out: STREAM] = {
doFlush: BOOL ¬ tcInfo.switches['a] OR tcInfo.switches['o];
dfList: LIST OF ROPE ¬ NIL;
filter: DFUtilities.Filter ¬ [FALSE, all, all, defining];
VisitClosure1: PROC [tcInfo: TCInfo, dfName: PATH, uid: PFS.UniqueID,
visitor: PROC [name: PATH, uid: PFS.UniqueID],
usingList: REF DFUtilities.UsingList ¬ NIL] = {
ENABLE PFS.Error => IF ( error.code = $unknownServer ) OR
( error.code = $unknownFile ) THEN {
out.PutF["\n****PFS.Error[%g], in dfFile: %g\n", [rope[error.explanation]], [rope[PFS.RopeFromPath[dfName]]] ];
IF doFlush THEN out.Flush[];
};
EachItem1: DFUtilities.ProcessItemProc = {
WITH item SELECT FROM
dir: REF DFUtilities.DirectoryItem => prefix ¬ PFS.PathFromRope[dir.path1];
file: REF DFUtilities.FileItem => {
out.PutF1["\n*** fileName found in IndividualDF driver file (%g)", [rope[file.name]]];
out.PutRope["\n\tit will be ignored\n"];
};
incl: REF DFUtilities.IncludeItem =>
dfList ¬ CONS[incl.path1, dfList];
ENDCASE => { i: INT ¬ 0; }; -- handy for setting breakpoints - (bj)
};
prefix: PATH ¬ NIL;
in: STREAM;
fileNamePath: PATH ¬ PFSPrefixMap.Translate[dfName, tcInfo.fromPTable];
in ¬ PFS.StreamOpen[
fileName: fileNamePath,
wantedUniqueID: uid
! PFS.Error => IF error.code = $unknownFile THEN {
out.PutF["PFS.Error[%g] - from dfFile %g\n", [rope[error.explanation]],
[rope[PFS.RopeFromPath[dfName]]] ];
IF doFlush THEN out.Flush[];
in ¬ NIL;
CONTINUE;
}
ELSE REJECT
];
IF in#NIL THEN {
filter.list ¬ usingList;
DFUtilities.ParseFromStream[in, EachItem1, filter -- global variable, hack for now! - (bj)
! UNWIND => in.Close[]];
in.Close[]
};
};
IF tcInfo.switches['l] THEN VisitClosure1[tcInfo, PFS.PathFromRope[tcInfo.otherArg.Concat[bangH]], PFS.nullUniqueID, NIL]
ELSE VisitClosure1[tcInfo, PFS.PathFromRope[tcInfo.prefix.Cat[tcInfo.otherArg, bangH]], PFS.nullUniqueID, NIL];
tcInfo.tcType ¬ oneDF; -- doing one df
tcInfo.switches['l] ¬ TRUE; -- make it look like a local df (this is a full name
FOR rL: LIST OF ROPE ¬ dfList, rL.rest UNTIL rL = NIL DO
tcInfo.otherArg ¬ rL.first;
DoIt[tcInfo, table, out
! PFS.Error => {
out.PutF["PFS.Error[%g]; problems with %g.\n\t\t(at %g)\n\n", [rope[error.explanation]], [rope[rL.first]], [time[BasicTime.Now[]]] ];
IF doFlush THEN out.Flush[];
CONTINUE;
};
];
ENDLOOP;
};
Path: PROC[pt: PFS.PATH] RETURNS[rp: ROPE] ~ { RETURN[PFS.RopeFromPath[pt] ] };
Translate: PROC[name: PATH, pTable: PFSPrefixMap.PrefixTableList] RETURNS [rp: ROPE] =
{ RETURN[PFS.RopeFromPath[PFSPrefixMap.Translate[name, pTable]]] };
reportIgnoring: BOOL ¬ FALSE;
ATReportIgnores: ENTRY Commander.CommandProc ~ { reportIgnoring ¬ TRUE };
ATDontReportIgnores: ENTRY Commander.CommandProc ~ { reportIgnoring ¬ FALSE };
Initialization
Commander.Register[ key: "AttachOneDF", proc: OneDfCmdProc,
doc: oneDfDoc, interpreted: TRUE];
Commander.Register[ key: "AttachAllDFs", proc: AllDfsCmdProc,
doc: allDfsDoc, interpreted: TRUE];
Commander.Register[ key: "AttachDirectory", proc: DirCmdProc,
doc: dirDoc, interpreted: TRUE];
Commander.Register[ key: "ATReportIgnores", proc: ATReportIgnores];
Commander.Register[ key: "ATDontReportIgnores", proc: ATDontReportIgnores];
END.


Rick Beach, January 23, 1986 1:34:16 pm PST
changes to: action (local of TrickleCommandProc) changed timeLeft: NAT to INT because timeLeft could go negative!
Carl Hauser, February 3, 1987 10:44:11 am PST
Register the command in the local directory, not ///Commands/
Last edited by: Mik Lamming - January 25, 1989 7:36:42 pm GMT
Removed ERROR clause that makes export chasing fail.
changes to: imports (local of EachItem, local of VisitClosure, local of DoIt)