ReleaseToolDriver.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Russ Atkinson, March 12, 1985 3:27:09 pm PST
DIRECTORY
BasicTime,
Commander,
DFOperations,
DFUtilities,
FS,
IO,
MessageWindow,
Process,
ReleaseToolVerify,
Rope,
SymTab,
VersionMap,
ViewerIO;
ReleaseToolDriver: CEDAR PROGRAM
IMPORTS BasicTime, Commander, DFUtilities, FS, IO, MessageWindow, Process, ReleaseToolVerify, Rope, SymTab, VersionMap, ViewerIO
SHARES VersionMap
= BEGIN
CommentItem: TYPE = DFUtilities.CommentItem;
Date: TYPE = DFUtilities.Date;
DirectoryItem: TYPE = DFUtilities.DirectoryItem;
FileItem: TYPE = DFUtilities.FileItem;
Filter: TYPE = DFUtilities.Filter;
ImportsItem: TYPE = DFUtilities.ImportsItem;
IncludeItem: TYPE = DFUtilities.IncludeItem;
Map: TYPE = VersionMap.Map;
MapList: TYPE = VersionMap.MapList;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
WhiteSpaceItem: TYPE = DFUtilities.WhiteSpaceItem;
ReleaseToolCommand: Commander.CommandProc = {
EachItem: DFUtilities.ProcessItemProc = {
[item: REF ANY] RETURNS [stop: BOOLFALSE]
WITH item SELECT FROM
incl: REF DFUtilities.IncludeItem => {
name: ROPE = incl.path1;
IO.PutRope[outV, "\n\nVerifyDF of "];
IO.PutRope[outV, name];
IF Rope.Equal[ShortName[name], shortDfFile, FALSE] THEN {
IO.PutRope[outV, "\n not done, short name equals root DF name.\n"];
RETURN;
};
IO.PutF[outV, "\n starting at %g\n", [time[BasicTime.Now[]]]];
[] ← ReleaseToolVerify.Verify[
name, bcdCache, sourceMaps, symbolsMaps, Interact, outV, outV];
};
ENDCASE;
};
debugging,verifying,moving: BOOLFALSE;
inV,outV: STREAMNIL;
inStream: STREAMNIL;
dfTable: SymTab.Ref ← NIL;
dfFile: ROPE ← "CurrentCedar.df";
shortDfFile: ROPE ← dfFile;
mapPrefix: ROPE ← "Cedar";
releaseDirectory: ROPENIL;
releaseHost: ROPENIL;
ris: STREAM = IO.RIS[cmd.commandLine];
sourceMapName,symbolsMapName: ROPENIL;
sourceMaps,symbolsMaps: MapList ← NIL;
bcdCache: ReleaseToolVerify.BcdCache ← NIL;
Cleanup: PROC = {
IF bcdCache # NIL THEN {ReleaseToolVerify.FlushBcdCache[bcdCache]; bcdCache ← NIL};
IF inStream # NIL THEN {IO.Close[inStream]; inStream ← NIL};
IF inV # NIL THEN {IO.Close[inV]; inV ← NIL};
IF outV # NIL THEN {IO.Close[outV]; outV ← NIL};
};
TimedMessage: PROC [msg: ROPE] = {
IO.PutF[outV, msg, [time[BasicTime.Now[]]]];
};
WITH cmd.procData.clientData SELECT FROM
list: LIST OF REF ANY => {
WHILE list # NIL DO
SELECT list.first FROM
$debug => debugging ← TRUE;
$verify => verifying ← TRUE;
$move => moving ← TRUE;
ENDCASE;
list ← list.rest;
ENDLOOP;
};
ENDCASE;
The command line has the following items (all optional):
dfFile mapPrefix releaseHost releaseDirectory
dfFile ← DefaultExtension[
IO.GetTokenRope[ris, IO.IDProc ! IO.EndOfStream => CONTINUE].token, ".df"];
shortDfFile ← ShortName[dfFile];
mapPrefix ← IO.GetTokenRope[ris, IO.IDProc ! IO.EndOfStream => CONTINUE].token;
releaseHost ← IO.GetTokenRope[ris, IO.IDProc ! IO.EndOfStream => CONTINUE].token;
releaseDirectory ← IO.GetTokenRope[ris, IO.IDProc ! IO.EndOfStream => CONTINUE].token;
sourceMapName ← DefaultExtension[mapPrefix, "Source.VersionMap"];
sourceMaps ← LIST[VersionMap.RestoreMapFromFile[sourceMapName]];
symbolsMapName ← DefaultExtension[mapPrefix, "Symbols.VersionMap"];
symbolsMaps ← LIST[VersionMap.RestoreMapFromFile[symbolsMapName]];
inStream ← FS.StreamOpen[dfFile];
[inV,outV] ← ViewerIO.CreateViewerStreams[
"ReleaseTool.log", NIL, "ReleaseTool.log", FALSE];
{ENABLE
UNWIND => {
TimedMessage["\n\n**** Aborting at %g ****\n"];
Cleanup[];
};
IF verifying THEN {
TimedMessage["\n\nVerification Phase of ReleaseTool starting at %g\n"];
bcdCache ← ReleaseToolVerify.CreateBcdCache[80];
DFUtilities.ParseFromStream[inStream, EachItem];
TimedMessage["\n\nVerification Phase of ReleaseTool ending at %g\n"];
IO.Close[inStream];
inStream ← NIL;
ReleaseToolVerify.FlushBcdCache[bcdCache];
bcdCache ← NIL;
};
IF moving THEN {
TimedMessage["\n\nFile Moving Phase of ReleaseTool starting at %g\n"];
dfTable ← BuildDFTable[dfFile, outV, releaseHost, releaseDirectory];
TimedMessage["\nDF table built at %g\n"];
MoveFiles[dfTable, outV, releaseHost, releaseDirectory, debugging];
TimedMessage["\n\nFile Moving Phase of ReleaseTool ending at %g\n"];
};
};
Cleanup[];
};
showInfo: BOOLFALSE;
Interact: DFOperations.InteractionProc = {
[interaction: REF, clientData: REF]
RETURNS [abort: BOOLFALSE, abortMessageForLog: ROPENIL, response: REFNIL]
out: STREAM = NARROW[clientData];
IF showInfo THEN
WITH interaction SELECT FROM
info: REF DFOperations.InfoInteraction => {
IO.PutRope[out, info.message];
};
dfInfo: REF DFOperations.DFInfoInteraction => {
IO.PutRope[out, dfInfo.message];
};
abort: REF DFOperations.AbortInteraction => {
Process.CheckForAbort[];
};
ENDCASE;
};
DefaultExtension: PROC [name: ROPE, ext: ROPE] RETURNS [ROPE] = {
len: INT = Rope.Length[name];
eLen: INT = Rope.Length[ext];
pos, bang, dot: INT ← len;
WHILE pos > 0 DO
posM: INT = pos-1;
SELECT Rope.Fetch[name, posM] FROM
'! => bang ← dot ← posM;
'. => dot ← posM;
'>, '/, '] => EXIT;
ENDCASE;
pos ← posM;
ENDLOOP;
IF bang = len
AND (bang-dot # eLen OR Rope.Run[name, dot, ext, 0, FALSE] # eLen)
THEN name ← Rope.Concat[name, ext];
RETURN [name];
};
DFTableEntry: TYPE = REF DFTableEntryRep;
DFTableEntryRep: TYPE = RECORD [
tempName: ROPE,
destName: ROPE,
time: BasicTime.GMT
];
BuildDFTable: PROC [topDF: ROPE, log: STREAM, host,dir: ROPE] RETURNS [tab: SymTab.Ref] = {
Build a table of source DF names and entries. Each entry gets a distinct desired create time, which is bounded by the start time of the procedure and the start time plus N seconds, where N is the number of DF files.
now: BasicTime.GMT = BasicTime.Now[];
index: INT ← 0;
EachDF: PROC [dfName: ROPE, dfDate: Date ← []] = {
EachDFPass[dfName, dfDate, TRUE];
EachDFPass[dfName, dfDate, FALSE];
};
EachDFPass: PROC [dfName: ROPE, dfDate: Date ← [], selfReferenceOnly: BOOL] = {
inStream: STREAMNIL;
currentDir: REF DirectoryItem ← NIL;
EachItem: DFUtilities.ProcessItemProc = {
WITH item SELECT FROM
dirItem: REF DirectoryItem => {
currentDir ← dirItem;
};
fileItem: REF FileItem => {
SELECT TRUE FROM
NOT selfReferenceOnly => RETURN;
currentDir = NIL =>
IO.PutF[log, "\n**** NIL directory: %g", [rope[dfName]]];
currentDir.path2 = NIL =>
IO.PutF[log, "\n**** no ReleaseAs clause for: %g", [rope[dfName]]];
currentDir.path2IsCameFrom => {
IO.PutF[log, "\n**** CameFrom clause for : %g", [rope[dfName]]];
};
ENDCASE => {
destName: ROPE ← Rope.Concat[
ReplaceHostAndDir[currentDir.path2, host, dir],
RemoveVersion[fileItem.name]];
entry.destName ← destName;
};
stop ← selfReferenceOnly;
};
inclItem: REF IncludeItem => {
IF selfReferenceOnly THEN RETURN;
EachDF[inclItem.path1, inclItem.date];
};
ENDCASE;
};
Cleanup: PROC = {
IF inStream # NIL THEN {IO.Close[inStream]; inStream ← NIL};
};
First check to see if we have seen this dfName before.
entry: DFTableEntry ← NIL;
stripped: ROPE = RemoveVersion[dfName];
realName: ROPENIL;
key: ROPENIL;
[realName, dfDate] ← MakeExplicit[dfName, dfDate];
IF realName = NIL THEN {
IO.PutF[log, "\n**** DF file not found: %g", [rope[dfName]]];
GO TO skipIt
};
inStream ← FS.StreamOpen[dfName ← realName];
key ← RemoveVersion[dfName];
WITH SymTab.Fetch[tab, key].val SELECT FROM
e: DFTableEntry => {
This file has a previous entry (put there on the previous pass)
IF selfReferenceOnly THEN RETURN;
entry ← e;
};
ENDCASE => {
Only need to do this stuff if this is the first entry for this file
entry ← NEW[DFTableEntryRep ← [
tempName: IO.PutFR["Temp.%g.df", [integer[index]]],
destName: ReplaceHostAndDir[dfName, host, dir],
time: BasicTime.Update[now, index]
]];
IO.PutF[log, "\nDF table entry for: %g", [rope[dfName]]];
index ← index + 1;
[] ← SymTab.Store[tab, key, entry];
};
{ENABLE UNWIND => Cleanup[];
filter: Filter ← [filterA: source, filterB: public, filterC: defining];
This is the most restrictive fileter we can use to just get the Includes clauses
IF selfReferenceOnly THEN {
We are ONLY looking for the self-reference, so make the list say that.
using: REF DFUtilities.UsingList ← NEW[DFUtilities.UsingList[1]];
using[0] ← [name: ShortName[dfName]];
using.nEntries ← 1;
filter ← [filterA: source, filterB: all, filterC: defining, list: using];
};
DFUtilities.ParseFromStream[
inStream, EachItem, filter
! DFUtilities.SyntaxError => {
IO.PutF[log, "\n**** Syntax Error: %g", [rope[reason]]];
CONTINUE;
}];
};
Cleanup[];
EXITS skipIt => {};
};
tab ← SymTab.Create[151, FALSE];
EachDF[topDF];
};
MakeExplicit: PROC [name: ROPE, date: Date] RETURNS [realName: ROPENIL, realDate: Date] = {
realDate.format ← explicit;
[fullFName: realName, created: realDate.gmt] ← FS.FileInfo[name, date.gmt
! FS.Error => IF error.group # bug THEN CONTINUE];
IF realName = NIL THEN
[fullFName: realName, created: realDate.gmt] ← FS.FileInfo[RemoveVersion[name], date.gmt
! FS.Error => IF error.group # bug THEN CONTINUE];
};
ReplaceHostAndDir: PROC [name,host,dir: ROPE] RETURNS [dest: ROPE] = {
rbPos: INT = Rope.SkipTo[dest ← name, 1, "]"];
laPos: INT = Rope.SkipTo[dest, rbPos+1, "<"];
raPos: INT = Rope.SkipTo[dest, laPos+1, ">"];
IF dir # NIL THEN dest ← Rope.Replace[name, laPos+1, raPos-laPos-1, dir];
IF host # NIL THEN dest ← Rope.Replace[dest, 1, rbPos-1, host];
dest ← RemoveVersion[dest];
};
RemoveVersion: PROC [name: ROPE] RETURNS [ROPE] = {
pos: INT ← Rope.Length[name];
WHILE (pos ← pos - 1) > 0 DO
SELECT Rope.Fetch[name, pos] FROM
'! => RETURN [Rope.Flatten[name, 0, pos]];
'>, '], '. => EXIT;
ENDCASE;
ENDLOOP;
RETURN [name];
};
ShortName: PROC [name: ROPE] RETURNS [ROPE] = {
pos: INT ← Rope.Length[name];
bang: INT ← Rope.Length[name];
WHILE (pos ← pos - 1) > 0 DO
SELECT Rope.Fetch[name, pos] FROM
'! => bang ← pos;
'>, '] => RETURN [Rope.Flatten[name, pos+1, bang-pos-1]];
ENDCASE;
ENDLOOP;
RETURN [Rope.Flatten[name, 0, bang]];
};
FindDFName: PROC [table: SymTab.Ref, name: ROPE, date: Date] RETURNS [destName: ROPE, destDate: Date, tempName: ROPENIL] = {
destName ← RemoveVersion[name];
destDate ← date;
WITH SymTab.Fetch[table, destName].val SELECT FROM
entry: DFTableEntry => {
destName ← entry.destName;
destDate.format ← explicit;
destDate.gmt ← entry.time;
tempName ← entry.tempName;
};
ENDCASE;
};
FindNonDF: PROC [table: SymTab.Ref, name: ROPE, date: Date] RETURNS [realName: ROPENIL, realDate: Date ← []] = {
stripped: ROPE ← RemoveVersion[name];
WITH SymTab.Fetch[table, stripped].val SELECT FROM
entry: DFTableEntry => {
In this case, the file is a DF file in our table, so we return NIL to indicate that it should NOT be moved!
};
ENDCASE => {
[realName, realDate] ← MakeExplicit[name, date];
};
};
MoveFiles: PROC [table: SymTab.Ref, log: STREAM, host, dir: ROPE, debugging: BOOL] = {
nonDfAction: SymTab.EachPairAction = {
[key: Key, val: Val] RETURNS [quit: BOOL]
quit ← FALSE;
totalCount ← MoveDFContents[key, table, log, host, dir, totalCount, debugging];
};
dfAction: SymTab.EachPairAction = {
[key: Key, val: Val] RETURNS [quit: BOOL]
quit ← FALSE;
totalCount ← MoveDFTemp[key, val, log, totalCount, debugging];
};
totalCount: INT ← 0;
[] ← SymTab.Pairs[table, nonDfAction];
[] ← SymTab.Pairs[table, dfAction];
};
MoveDFContents: PROC [dfName: ROPE, table: SymTab.Ref, log: STREAM, host,dir: ROPE, count: INT, debugging: BOOL] RETURNS [INT ← 0] = {
entry: DFTableEntry ← NIL;
inStream,outStream: STREAMNIL;
currentDirItem: REF DirectoryItem;
currentSourcePrefix: ROPENIL;
currentDestPrefix: ROPENIL;
EachItem: DFUtilities.ProcessItemProc = {
WITH item SELECT FROM
dirItem: REF DirectoryItem => {
IF NOT dirItem.path2IsCameFrom AND dirItem.path2 # NIL THEN {
currentSourcePrefix ← dirItem.path1;
currentDestPrefix ← ReplaceHostAndDir[dirItem.path2, host, dir];
dirItem.path1 ← currentDestPrefix;
dirItem.path2 ← currentSourcePrefix;
dirItem.path2IsCameFrom ← TRUE;
};
currentDirItem ← dirItem;
};
file: REF FileItem => {
date: Date ← file.date;
sourceName: ROPE ← Rope.Concat[currentSourcePrefix, file.name];
destName: ROPE ← Rope.Concat[currentDestPrefix, RemoveVersion[file.name]];
destExists: BOOLFALSE;
realName,tempName: ROPENIL;
[realName, date] ← FindNonDF[table, sourceName, file.date];
IF realName = NIL THEN {
The file requested is either a DF file (which does NOT get moved yet) or it does not exist, in which case we put a message to the log.
[destName, date, tempName] ← FindDFName[table, sourceName, date];
IF NOT Rope.Match["*.df", destName, FALSE] THEN {
IO.PutF[log, "\n**** File not found: %g", [rope[sourceName]]];
GO TO skip1
};
IO.PutF[log, "\nFuture copy %g (%g)\n to %g",
[rope[sourceName]], [rope[tempName]], [rope[destName]]];
file.date ← date;
GO TO skip1;
};
At this point, realName is the real long name of the sourceFile, and date is the real explicit date of the source file.
file.date ← date;
IO.PutF[log, "\nCopy %g\n to ", [rope[sourceName ← realName]]];
IF debugging
THEN IO.PutF[log, "%g\n (not copied, debugging)", [rope[destName]]]
ELSE {
ENABLE FS.Error =>
IF error.group # bug THEN {
IO.PutF[log, "\n**** Copy failed: %g", [rope[error.explanation]]];
GO TO skip1
};
gmt: BasicTime.GMT;
realName ← NIL;
First, check to see if the file needs moving at all (based on the create date).
[fullFName: realName, created: gmt] ← FS.FileInfo[destName
! FS.Error => IF error.group # bug THEN CONTINUE];
IF realName # NIL AND gmt = date.gmt
THEN {
No move needed, since it it already there!
destName ← realName;
IO.PutRope[log, destName];
IO.PutRope[log, "\n (already there)"];
}
ELSE {
Move the file from the old place to the new place.
tempName: ROPE ← "ReleaseTool.Temp";
IF date.gmt # BasicTime.nullGMT
AND ReleaseToolVerify.IsInFileCache[sourceName, date]
THEN {
No need to copy to a temp file, since we already have this file in our cache. Therefore, we just make the tempName the real source name.
tempName ← sourceName;
}
ELSE {
It pays to copy this file to a temp file first, since that means we don't have to mess up our current file cache with entries that are better flushed. This would not have to happen if FS did the right thing with remote to remote copies!
tempName ← FS.Copy[from: sourceName, to: tempName];
};
destName ← FS.Copy[from: tempName, to: destName];
IO.PutRope[log, destName];
date.format ← explicit;
date.gmt ← gmt;
};
file.name ← Rope.Substr[destName, Rope.Length[currentDestPrefix]];
};
MessageWindow.Append[
IO.PutFR[" %g files moved.", [integer[count ← count + 1]]], TRUE];
EXITS skip1 => {};
};
imports: REF ImportsItem => {
destName, tempName: ROPE;
destDate: Date;
[destName, destDate, tempName] ← FindDFName[table, imports.path1, imports.date];
IF tempName = NIL
THEN {
This is an imported file NOT being released.
[destName, destDate] ← MakeExplicit[imports.path1, imports.date];
IF destName = NIL
THEN IO.PutF[log, "\n**** File not found: %g", [rope[imports.path1]]]
ELSE {imports.path1 ← destName; imports.date ← destDate};
}
ELSE {
This is an imported file that is being released.
imports.path2 ← imports.path1;
imports.path1 ← destName;
imports.date ← destDate;
};
};
include: REF IncludeItem => {
IF NOT include.path2IsCameFrom OR include.path2 = NIL THEN {
destName, tempName: ROPE;
destDate: Date;
[destName, destDate, tempName] ← FindDFName[table, include.path1, include.date];
IF tempName = NIL
THEN {
This is an included file NOT being released.
[destName, destDate] ← MakeExplicit[include.path1, include.date];
IF destName = NIL
THEN IO.PutF[log, "\n**** File not found: %g", [rope[include.path1]]]
ELSE {include.path1 ← destName; include.date ← destDate};
}
ELSE {
This is an included file that is being released.
include.path2IsCameFrom ← TRUE;
include.path2 ← include.path1;
include.path1 ← destName;
include.date ← destDate;
};
};
};
ENDCASE;
DFUtilities.WriteItemToStream[outStream, item];
};
WITH SymTab.Fetch[table, dfName].val SELECT FROM
e: DFTableEntry => entry ← e;
ENDCASE => {
IO.PutF[log, "\n**** DF file not in table: %g", [rope[dfName]]];
GO TO quit
};
inStream ← FS.StreamOpen[dfName
! FS.Error =>
IF error.group # bug THEN {
IO.PutF[log, "\n**** DF file not found: %g", [rope[error.explanation]]];
GO TO quit
};
];
outStream ← FS.StreamOpen[entry.tempName, create
! FS.Error =>
IF error.group # bug THEN {
IO.PutF[log, "\n**** temp DF file not opened: %g", [rope[error.explanation]]];
IO.Close[inStream];
GO TO quit
};
];
IO.PutF[log, "\n\nMoving contents of %g (%g)", [rope[dfName]], [rope[entry.tempName]]];
DFUtilities.ParseFromStream[inStream, EachItem, [comments: TRUE]
! DFUtilities.SyntaxError => {
IO.PutF[log, "\n**** Syntax Error: %g", [rope[reason]]];
CONTINUE;
}];
IO.Close[inStream];
FS.SetByteCountAndCreatedTime[FS.OpenFileFromStream[outStream], -1, entry.time];
IO.Close[outStream];
RETURN [count];
EXITS quit => {};
};
MoveDFTemp: PROC [name: ROPE, val: REF, log: STREAM, count: INT, debugging: BOOL] RETURNS [INT] = {
WITH val SELECT FROM
entry: DFTableEntry => {
IO.PutF[log, "\nCopy %g (%g)\n to %g",
[rope[name]], [rope[entry.tempName]], [rope[entry.destName]]];
IF debugging
THEN
IO.PutRope[log, "\n (not copied, debugging)"]
ELSE
[] ← FS.Copy[from: entry.tempName, to: entry.destName
! FS.Error =>
IF error.group # bug THEN {
IO.PutF[log, "\n**** Copy failed: %g", [rope[error.explanation]]];
CONTINUE;
};
];
MessageWindow.Append[
IO.PutFR[" %g files moved.", [integer[count ← count + 1]]], TRUE];
};
ENDCASE;
RETURN [count];
};
Commander.Register[
"TestMovingPhase",
ReleaseToolCommand,
"tests the file moving phase of the ReleaseTool without moving files. The command line has, in order (all optional): <dfFile> <mapPrefix> <releaseHost> <releaseDirectory>",
LIST[$debug, $move]];
Commander.Register[
"VerifyRelease",
ReleaseToolCommand,
"verifies the files in a release. We assume that the version maps are valid. The command line has, in order (all optional): <dfFile> <mapPrefix> <releaseHost> <releaseDirectory>",
LIST[$verify]];
Commander.Register[
"MoveRelease",
ReleaseToolCommand,
"moves the files in a release. We assume that the version maps are valid, and that the files have been verified (although no state is retained between verification and moving). The command line has, in order (all optional): <dfFile> <mapPrefix> <releaseHost> <releaseDirectory>",
LIST[$move]];
END.