ReleaseToolVerifyImpl.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Levin on November 23, 1983 1:24 am
Russ Atkinson (RRA) June 25, 1985 2:11:51 pm PDT
DIRECTORY
Ascii USING [Lower],
Basics USING [bytesPerWord],
BasicTime USING [FromPupTime, GMT, nullGMT, ToPupTime],
BcdDefs USING [Base, BCD, BcdBase, CTHandle, CTIndex, CTNull, FTHandle, FTIndex, FTNull, FTSelf, MTHandle, MTIndex, NameRecord, NameString, NullName, NullVersion, VersionID, VersionStamp],
BcdOps USING [ProcessConfigs, ProcessFiles, ProcessModules],
DFInternal USING [AbortDF, CheckAbort, Client, ClientDescriptor, DefaultInteractionProc, GetFileInfo, LocalFile, LocalFileInfo, RemoteFileInfo, ReportFSError, RetryFSOperation, SimpleInteraction],
DFOperations USING [AbortInteraction, DFInfoInteraction, InfoInteraction, InteractionProc],
DFUtilities USING [Date, DateToRope, DirectoryItem, FileItem, Filter, ImportsItem, IncludeItem, ParseFromStream, ProcessItemProc, RemoveVersionNumber, SyntaxError],
IFSFile USING [Close, Completer, Error, GetTimes, FileHandle, Finalize, FSInstance, Initialize, Login, Logout, Open, Problem, StartRead, UnableToLogin],
FS,
FSBackdoor USING [EnumerateCacheForInfo, InfoProc],
List USING [CompareProc, UniqueSort],
IO USING [card, Close, GetIndex, PutFR, rope, STREAM],
PrincOps USING [bytesPerPage, PageCount],
Process USING [CheckForAbort, Pause, SecondsToTicks],
ReleaseToolVerify USING [],
Rope USING [Cat, Compare, Concat, Equal, Fetch, Find, Flatten, FromProc, Index, Length, ROPE, Run, SkipTo, Substr],
RuntimeError USING [UNCAUGHT],
UserCredentials USING [Get],
VersionMap USING [MapAndNameList, MapList, VersionToAllNames],
VM USING [AddressForPageNumber, Allocate, Free, Interval, nullInterval, PageCount];
ReleaseToolVerifyImpl: CEDAR MONITOR
IMPORTS Ascii, BasicTime, BcdOps, DFInternal, DFUtilities, IFSFile, FS, FSBackdoor, List, IO, Process, Rope, RuntimeError, UserCredentials, VersionMap, VM
EXPORTS ReleaseToolVerify
= BEGIN OPEN Int: DFInternal, Ops: DFOperations, Utils: DFUtilities;
bytesPerPage: NAT = PrincOps.bytesPerPage;
bytesPerWord: NAT = Basics.bytesPerWord;
pagesPerBCD: NAT = (SIZE[BCD]*bytesPerWord+bytesPerPage-1)/bytesPerPage;
BCD: TYPE = BcdDefs.BCD;
Date: TYPE = Utils.Date;
MapList: TYPE = VersionMap.MapList;
ROPE: TYPE = Rope.ROPE;
STREAM: TYPE = IO.STREAM;
FileDesc: TYPE = RECORD [
shortName: ROPENIL,
path: ROPENIL,
importedFrom: REF FileDesc ← NIL,
highest level Imports with Using containing 'shortName'
parent: ROPENIL, -- DF file containing defining occurrence of 'shortName'
date: Date ← [],
version: BcdDefs.VersionStamp ← BcdDefs.NullVersion,
bcd: BOOLFALSE,
needed: Needed ← $no
];
Needed: TYPE = {no, yes, wrongVersion};
htSize: NAT = 97;
HashIndex: TYPE = [0..htSize);
HashTable: TYPE = REF HashTableArray;
HashTableArray: TYPE = ARRAY HashIndex OF LIST OF REF FileDesc;
allFilter: Utils.Filter = [];
cacheSize: NAT = 10;
maxTries: NAT = 8;
Omission: TYPE = RECORD [
missing: ROPE,
neededBy: ROPE
];
Verify: PUBLIC PROC [dfFile: ROPE, bcdCache: BcdCache, sourceMaps,symbolsMaps: MapList ← NIL, interact: Ops.InteractionProc ← NIL, clientData: REF ANYNIL, log: STREAMNIL] RETURNS [errors, warnings, filesActedUpon: INT ← 0] = {
client: Int.Client = NEW[Int.ClientDescriptor ← [interact, clientData, log]];
rootList: LIST OF REF FileDesc ← NIL;
hashTable: HashTable = NEW[HashTableArray ← ALL[NIL]];
omissions: LIST OF --REF Omission--REF ANYNIL;
Hash: PROC [name: ROPE] RETURNS [HashIndex] = {
x: NAT ← name.Length[];
FOR i: INT IN [0..x) DO x ← x + (Ascii.Lower[name.Fetch[i]] - 0C); ENDLOOP;
RETURN[x MOD htSize]
};
EnterInHashTable: PROC [desc: REF FileDesc] RETURNS [BOOLTRUE] = {
hi: HashIndex = Hash[desc.shortName];
FOR l: LIST OF REF FileDesc ← hashTable[hi], l.rest UNTIL l = NIL DO
IF desc.shortName.Equal[l.first.shortName, FALSE] THEN {
IF desc.date ~= l.first.date THEN {
duplicate short file name with different file creation dates
warnings ← warnings.SUCC;
WarnClient[
client,
IO.PutFR["'%g' appears more than once (via '%g' and '%g').",
IO.rope[desc.shortName],
IO.rope[
IF l.first.importedFrom = NIL THEN l.first.parent
ELSE FullName[l.first.importedFrom]
],
IO.rope[
IF desc.importedFrom = NIL THEN desc.parent
ELSE FullName[desc.importedFrom]
]
]
];
};
RETURN[FALSE]
};
ENDLOOP;
hashTable[hi] ← CONS[desc, hashTable[hi]];
filesActedUpon ← filesActedUpon.SUCC;
};
LookupInHashTable: PROC [shortName: ROPE] RETURNS [desc: REF FileDesc ← NIL] = {
FOR l: LIST OF REF FileDesc ← hashTable[Hash[shortName]], l.rest UNTIL l = NIL DO
IF shortName.Equal[l.first.shortName, FALSE] THEN RETURN[l.first];
ENDLOOP;
};
EnumerateHashTable: PROC [proc: PROC [REF FileDesc]] = {
FOR hi: HashIndex IN HashIndex DO
FOR l: LIST OF REF FileDesc ← hashTable[hi], l.rest UNTIL l = NIL DO
proc[l.first];
ENDLOOP;
ENDLOOP;
};
WorthRemembering: PROC [desc: REF FileDesc] RETURNS [BOOLFALSE] = {
ext: ROPE = desc.shortName.Substr[start: desc.shortName.Index[s2: "."]];
exts: ARRAY [0..3) OF ROPE = [".bcd", ".mesa", ".config"];
FOR i: NAT IN [0..exts.LENGTH) DO
IF ext.Equal[exts[i], FALSE] THEN {desc.bcd ← i = 0; RETURN[TRUE]};
ENDLOOP;
};
Extant: PROC [desc: REF FileDesc] RETURNS [BOOL] = INLINE {
RETURN[desc.date.gmt ~= BasicTime.nullGMT]
};
importedFrom: REF FileDesc ← NIL;
VerifyInner: PROC [dfFile: ROPE, date: Date, filter: Utils.Filter] RETURNS [finished: BOOLFALSE] = {
directoryPath: ROPENIL;
DoOneItem: Utils.ProcessItemProc = {
Int.CheckAbort[client];
WITH item SELECT FROM
directory: REF Utils.DirectoryItem => {
directoryPath ← directory.path1;
IF filter = allFilter AND NOT directory.path2IsCameFrom THEN
CheckReleaseAs[bcdCache, dfFile, directory.path2, client];
};
file: REF Utils.FileItem => {
desc: REF FileDesc = NEW[FileDesc ← [
shortName: Utils.RemoveVersionNumber[file.name],
path: directoryPath,
parent: dfFile,
importedFrom: importedFrom
]];
remoteInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo ← [
name: FullName[desc],
date: file.date
]];
Int.GetFileInfo[info: remoteInfo, client: client, errorLevel: $warning
! FS.Error => CONTINUE
];
desc.date ← remoteInfo.date;
IF (filter.list ~= NIL OR WorthRemembering[desc]) AND EnterInHashTable[desc] AND
file.verifyRoot AND importedFrom = NIL THEN
rootList ← CONS[desc, rootList];
};
imports: REF Utils.ImportsItem =>
IF imports.form = $list OR filter.list # NIL THEN {
outerMostImports: BOOL = (filter.list = NIL);
newFilter: Utils.Filter = [
comments: filter.comments, -- comments processing is unaffected by imports
filterA: filter.filterA, -- source/derived distinction is unaffected by imports
filterB: IF imports.form = $exports THEN $public ELSE filter.filterB,
filterC: $all, -- if the top level passes imports, they can come from anywhere
list: IF imports.form = $list THEN imports.list ELSE filter.list
];
IF outerMostImports THEN
importedFrom ← NEW[FileDesc ← [
path: imports.path1, -- hack: shortName is NIL, but only CheckIfNeeded cares.
date: imports.date,
parent: dfFile
]];
IF VerifyInner[imports.path1, imports.date, newFilter] AND outerMostImports
THEN {
FOR i: NAT IN [0..imports.list.nEntries) DO
desc: REF FileDesc = LookupInHashTable[imports.list.u[i].name];
SELECT TRUE FROM
desc = NIL => {
warnings ← warnings.SUCC;
WarnClient[
client,
"'", imports.list.u[i].name, "' could not be found inside '",
imports.path1, "' (or any nested DF file)."];
};
imports.list.u[i].verifyRoot => rootList ← CONS[desc, rootList];
ENDCASE;
ENDLOOP;
importedFrom ← NIL;
};
};
include: REF Utils.IncludeItem => {
IF filter = allFilter AND include.path2 # NIL AND NOT include.path2IsCameFrom
THEN CheckReleaseAs[bcdCache, dfFile, include.path2, client];
[] ← VerifyInner[include.path1, include.date, filter];
};
ENDCASE;
};
dfInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo ← [name: dfFile, date: date]];
dfStream: STREAM;
Int.GetFileInfo[info: dfInfo, client: client ! FS.Error => {errors ← errors.SUCC; GO TO skip}];
IF dfInfo.date.gmt # BasicTime.nullGMT AND sourceMaps # NIL THEN {
We have found a valid date, so we can now check the version map for the DF file being in the release. If it is not, we give a warning.
stamp: BcdDefs.VersionStamp = [0, 0, BasicTime.ToPupTime[dfInfo.date.gmt]];
foundList: VersionMap.MapAndNameList =
VersionMap.VersionToAllNames[sourceMaps, stamp];
FOR each: VersionMap.MapAndNameList ← foundList, each.rest WHILE each # NIL DO
eachName: ROPE = each.first.name;
pos: INT = Rope.Run[eachName, 0, dfInfo.name, 0, FALSE];
IF pos = Rope.Length[eachName] OR Rope.Fetch[eachName, pos] = '! THEN GO TO found;
ENDLOOP;
WarnClient[client, "'", dfFile, "' not in the source version map list."];
EXITS found => {};
};
dfStream ← FS.StreamOpen[fileName: dfInfo.name
! FS.Error => {errors ← errors.SUCC; Int.ReportFSError[error, dfInfo, client]; GO TO skip}
];
dfInfo.name ← Utils.RemoveVersionNumber[dfInfo.name];
Int.SimpleInteraction[
client,
NEW[Ops.DFInfoInteraction ← [action: $start, dfFile: dfInfo.name]]
];
Utils.ParseFromStream[dfStream, DoOneItem, filter !
Utils.SyntaxError -- [reason: ROPE]-- => {
errors ← errors.SUCC;
ErrorClient[client,
IO.PutFR[
"Syntax error in '%g'[%d]: %g\NProcessing of this DF file aborted.",
IO.rope[dfInfo.name], IO.card[dfStream.GetIndex[]], IO.rope[reason]
]];
CONTINUE
};
Int.AbortDF => dfStream.Close[];
];
dfStream.Close[];
Int.SimpleInteraction[
client,
NEW[Ops.DFInfoInteraction ← [action: $end, dfFile: dfInfo.name]]
];
RETURN[TRUE];
EXITS
skip => NULL;
};
VerifyDependencies: PROC = {
VerifyDependenciesInner: PROC [parent: REF FileDesc] = {
This procedure checks for consistency and completeness of the DF input with respect to the file identified by its parameter. There are essentially two cases.
(1) If the parameter file is not a BCD or if it is a BCD that was imported, the file is a leaf of the dependency tree. It sufficies, therefore, to check that the file exists, which was already done in the course of building the hash table. Therefore, VerifyDependenciesInner does nothing for non-BCDs.
(2) If the parameter is a BCD, the file table is enumerated and, for each file, the DF input is checked to see that (a) there exists a file with the same short name, (b) that the file is extant, and (c) that the BCD version stamp matches the stamp in the file table entry. If (a) fails, a necessary file has been omitted from the input and its (short) name is placed on a list for future printing. If (b) fails, the file appeared in the DF input but doesn't exist on the server. This error was reported during hash table construction, so no further action is required here. If (c) fails, the file specified in the DF input exists, but is the wrong version. An error is reported (unless it was already reported.) After the file table enumeration is complete, the source file for 'parent' is checked to be sure it exists.
parentBcd: BcdDefs.BcdBase ← NIL;
parentFileTable: BcdDefs.Base;
parentSource: REF FileDesc ← NIL;
parentFileName: ROPENIL;
RopeForNameRecord: PROC [bcd: BcdDefs.BcdBase, name: BcdDefs.NameRecord] RETURNS [r: ROPE] = TRUSTED {
ssb: BcdDefs.NameString = LOOPHOLE[bcd + bcd.ssOffset];
len: NAT;
i: INT ← name;
GetFromNameString: SAFE PROC RETURNS [char: CHAR] = TRUSTED {
char ← ssb.string[i]; i ← i + 1};
r ← Rope.FromProc[ssb.size[name], GetFromNameString];
len ← r.Length[];
IF len > 0 AND r.Fetch[len-1] = '. THEN r ← r.Substr[len: len-1];
};
CheckDependentFile: PROC [fth: BcdDefs.FTHandle, recur: BOOLTRUE] = TRUSTED {
file: ROPE ← RopeForNameRecord[parentBcd, fth.name];
child: REF FileDesc;
Int.CheckAbort[client];
IF file.Find["."] < 0 THEN file ← file.Concat[".bcd"];
IF (child ← LookupInHashTable[file]) = NIL THEN
omissions ← CONS[
NEW[Omission ← [missing: file, neededBy: parentFileName]],
omissions]
ELSE IF Extant[child] THEN {
IF child.needed = $wrongVersion THEN RETURN;
IF child.version = BcdDefs.NullVersion THEN {
childBcd: BcdDefs.BcdBase;
childBcd ← GetBcd[client, bcdCache, child
! FS.Error =>
IF Int.RetryFSOperation[error, client] THEN RETRY
ELSE {
errors ← errors.SUCC;
Int.ReportFSError[
error,
NEW[Int.RemoteFileInfo ← [name: parentFileName, date: parent.date]],
client
];
GO TO proceedWithoutBCD
}
];
IF childBcd = NIL THEN GO TO proceedWithoutBCD;
IF childBcd.versionIdent = BcdDefs.VersionID THEN
child.version ← childBcd.version;
ReleaseBcd[bcdCache, childBcd];
};
SELECT child.needed FROM
$no => {
IF fth.version = child.version THEN {
child.needed ← $yes;
IF child.importedFrom ~= NIL AND child.importedFrom.needed = $no THEN
child.importedFrom.needed ← $yes;
IF recur THEN VerifyDependenciesInner[child];
RETURN
};
};
$yes => IF fth.version = child.version THEN RETURN;
ENDCASE;
child.needed ← $wrongVersion;
errors ← errors.SUCC;
ErrorClient[client, IO.PutFR[
"'%g' {%g} depends on '%g'; '%g' {%g} is the wrong version.",
IO.rope[parentFileName],
IO.rope[Utils.DateToRope[parent.date]],
IO.rope[file],
IO.rope[FullName[child]],
IO.rope[Utils.DateToRope[child.date]]
]];
EXITS
proceedWithoutBCD => NULL;
};
};
parentSourceName: BcdDefs.NameRecord ← BcdDefs.NullName;
parentSourceVersion: BcdDefs.VersionStamp;
IF ~parent.bcd OR parent.importedFrom ~= NIL THEN RETURN;
parentFileName ← FullName[parent];
parentBcd ← GetBcd[client, bcdCache, parent
! FS.Error =>
IF Int.RetryFSOperation[error, client] THEN RETRY
ELSE {
errors ← errors.SUCC;
Int.ReportFSError[
error,
NEW[Int.RemoteFileInfo ← [name: parentFileName, date: parent.date]],
client
];
GO TO skipThisBCD
}
];
IF parentBcd = NIL THEN GO TO skipThisBCD;
TRUSTED{
parentSourceName ← parentBcd.source;
parentSourceVersion ← parentBcd.sourceVersion;
parentFileTable ← LOOPHOLE[parentBcd + parentBcd.ftOffset];
};
IF parentSourceName = BcdDefs.NullName THEN {
warnings ← warnings.SUCC;
WarnClient[client, IO.PutFR[
"'%g' {%g} does not specify a source file.",
IO.rope[parentFileName],
IO.rope[Utils.DateToRope[parent.date]]
]];
}
ELSE {
sourceFileName: ROPE = RopeForNameRecord[parentBcd, parentSourceName];
IF (parentSource ← LookupInHashTable[sourceFileName]) = NIL THEN
omissions ← CONS[
NEW[Omission ← [
missing: sourceFileName,
neededBy: parentFileName
]],
omissions]
ELSE IF Extant[parentSource] AND parentSource.needed ~= $wrongVersion THEN {
sourceDate: Date;
gmt: BasicTime.GMT;
gmt ← BasicTime.FromPupTime[LOOPHOLE[parentSourceVersion.time]
! RuntimeError.UNCAUGHT => {
ErrorClient[client, "'", parentFileName, "' has a totally bogus date."];
}];
sourceDate ←
[$explicit, BasicTime.FromPupTime[LOOPHOLE[parentSourceVersion.time]]];
IF sourceDate = parentSource.date THEN parentSource.needed ← $yes
ELSE {
parentSource.needed ← $wrongVersion;
errors ← errors.SUCC;
ErrorClient[client,
IO.PutFR[
"'%g' {%g} expects source of {%g}, but DF file specifies '%g' {%g}.",
IO.rope[parentFileName],
IO.rope[Utils.DateToRope[parent.date]],
IO.rope[Utils.DateToRope[sourceDate]],
IO.rope[FullName[parentSource]],
IO.rope[Utils.DateToRope[parentSource.date]]
]];
};
};
};
TRUSTED {
IF parentBcd.nConfigs > 0 THEN {
remoteSubConfig: BOOLFALSE;
ctb: BcdDefs.Base = LOOPHOLE[parentBcd + parentBcd.ctOffset];
sgb: BcdDefs.Base = LOOPHOLE[parentBcd + parentBcd.sgOffset];
DoOneModule: PROC [mth: BcdDefs.MTHandle, mti: BcdDefs.MTIndex]
RETURNS [BOOLFALSE] = TRUSTED {
cti: BcdDefs.CTIndex ← mth.config;
UNTIL cti = BcdDefs.CTNull DO
cth: BcdDefs.CTHandle = @ctb[cti];
IF cth.file ~= BcdDefs.FTSelf THEN {remoteSubConfig ← TRUE; RETURN};
cti ← cth.config;
ENDLOOP;
This module belongs the configuration directly defined by parentBcd's source.
IF mth.file ~= BcdDefs.FTSelf AND mth.file ~= BcdDefs.FTNull THEN
CheckDependentFile[@parentFileTable[mth.file]];
};
DoOneConfig: PROC [cth: BcdDefs.CTHandle, cti: BcdDefs.CTIndex]
RETURNS [BOOLFALSE] = TRUSTED {
IF cth.file ~= BcdDefs.FTSelf THEN {
This config is a remote one. If all of its enclosing configs are local, we treat it as a dependency.
outerCti: BcdDefs.CTIndex ← cth.config;
UNTIL outerCti = BcdDefs.CTNull DO
outerCth: BcdDefs.CTHandle = @ctb[outerCti];
IF outerCth.file ~= BcdDefs.FTSelf THEN RETURN;
outerCti ← outerCth.config;
ENDLOOP;
CheckDependentFile[@parentFileTable[cth.file]];
};
};
[] ← BcdOps.ProcessModules[parentBcd, DoOneModule];
IF remoteSubConfig THEN [] ← BcdOps.ProcessConfigs[parentBcd, DoOneConfig];
}
ELSE {
DoOneFile: PROC [fth: BcdDefs.FTHandle, fti: BcdDefs.FTIndex]
RETURNS [BOOLFALSE] = TRUSTED {
CheckDependentFile[fth];
};
[] ← BcdOps.ProcessFiles[parentBcd, DoOneFile];
};
};
ReleaseBcd[bcdCache, parentBcd];
EXITS
skipThisBCD => NULL;
};
IF rootList = NIL THEN {
warnings ← warnings.SUCC;
WarnClient[client, IO.PutFR["No files in '%g' are marked with '+'.", IO.rope[dfFile]]];
}
ELSE
FOR l: LIST OF REF FileDesc ← rootList, l.rest UNTIL l = NIL DO
rootDesc: REF FileDesc = l.first;
IF rootDesc.needed = $no THEN {
rootDesc.needed ← $yes;
IF rootDesc.importedFrom ~= NIL AND rootDesc.importedFrom.needed = $no THEN
rootDesc.importedFrom.needed ← $yes;
IF Extant[rootDesc] THEN
VerifyDependenciesInner[rootDesc];
};
ENDLOOP;
};
ReportOmissions: PROC = {
Compare: List.CompareProc = {
RETURN[Rope.Compare[
NARROW[ref1, REF Omission].missing,
NARROW[ref2, REF Omission].missing]
]
};
nFiles: NAT ← 0;
msg: ROPE ← "The following should appear in the DF input:";
IF omissions = NIL THEN RETURN;
FOR omissions ← List.UniqueSort[omissions, Compare], omissions.rest UNTIL omissions = NIL DO
omission: REF Omission = NARROW[omissions.first];
msg ← msg.Concat[
IO.PutFR["\n '%g', needed by '%g'",
IO.rope[omission.missing],
IO.rope[omission.neededBy]
]
];
nFiles ← nFiles.SUCC;
ENDLOOP;
errors ← errors + nFiles;
ErrorClient[client, msg];
};
CheckIfNeeded: PROC [desc: REF FileDesc] = {
IF desc.needed ~= $no OR ~WorthRemembering[desc] THEN RETURN;
IF rootList = NIL THEN RETURN;
If there are no root files, then this is probably an umbrella DF, so we don't worry about any superfluous entries.
IF desc.importedFrom ~= NIL THEN
IF desc.importedFrom.needed = $no THEN
The entire imports item is superfluous. We print the warning here, then flag the 'importedFrom' descriptor as 'needed' to prevent repeated messages, either for the imports item or anything in its Using list.
(desc ← desc.importedFrom).needed ← $yes
ELSE RETURN;
warnings ← warnings.SUCC;
WarnClient[client,
IO.PutFR[
"'%g' (%g) is superfluous.",
IO.rope[IF desc.shortName = NIL THEN desc.path ELSE desc.shortName],
IO.rope[
IF desc.importedFrom = NIL
THEN IO.PutFR["in '%g'", IO.rope[desc.parent]]
ELSE IO.PutFR["via '%g'", IO.rope[FullName[desc.importedFrom]]]
]]];
};
IF interact = NIL THEN interact ← Int.DefaultInteractionProc;
IF Int.LocalFile[dfFile] THEN {
dfInfo: REF Int.LocalFileInfo = NEW[Int.LocalFileInfo ← [name: dfFile]];
Int.GetFileInfo[info: dfInfo, client: client, errorLevel: $abort];
IF (dfFile ← dfInfo.attachedTo) = NIL THEN
ErrorClient[client, "'", dfInfo.name, "' isn't a remote file and therefore can't be verified."];
};
BEGIN
ENABLE
BEGIN
UNWIND =>
IFSCleanup[bcdCache];
ABORTED => {
Int.SimpleInteraction[client, NEW[Ops.AbortInteraction ← [TRUE]]];
We would like to RESUME at this point, but until ABORTED is redeclared as a SIGNAL, it won't work.
};
Int.AbortDF => {
errors ← errors.SUCC;
Int.SimpleInteraction[client, NEW[Ops.DFInfoInteraction ← [action: $abort, dfFile: dfFile]]];
CONTINUE
};
END;
The following is a terrible cheat, since the date is technically illegal. However, the (desired!) effect is to treat a version number on 'dfFile' as truth (i.e., it won't be stripped off by Int.GetFileInfo.)
IF VerifyInner[dfFile, [format: $explicit], allFilter] THEN {
The hash table has now been constructed. Process the roots list.
VerifyDependencies[];
Report any omitted names.
ReportOmissions[];
All inconsistencies have been reported. Pick up extra entries and report them.
EnumerateHashTable[CheckIfNeeded];
};
END;
};
Internal procedures
BcdCache: TYPE = REF BcdCacheObject;
BcdCacheObject: PUBLIC TYPE = RECORD [
releaseDest: ROPENIL,
ifsInstance: IFSFile.FSInstance ← NIL,
currentServer: ROPENIL,
ifsInitialized: BOOLFALSE,
locked: CachedBcdList ← NIL, -- linear list
available: CachedBcdList ← NIL, -- circularly chained
size: NAT ← 0,
replacementSize: NAT
];
CachedBcdList: TYPE = LIST OF CachedBcd;
CachedBcd: TYPE = REF CachedBcdEntry;
CachedBcdEntry: TYPE = RECORD [
buffer: VM.Interval ← VM.nullInterval,
desc: REF FileDesc ← NIL
];
initialVM: VM.PageCount = 10;
A bcd cache is managed as two singly-linked lists of cache entries. The 'available' list consists of cached bcd's that are eligible for replacement. The 'locked' list consists of cached bcd's that are actually being touched; they cannot be replaced until they are moved to the 'available' list. The maximum size of the two lists is (almost) bounded by a creation-time parameter; however, if the 'available' list is empty when a new entry is needed, the entry will be created regardless of the size bound.
Cache replacement occurs in the 'available' list in LRU order. New cache entries appear at the tail of the list; victims are taken from the head. BcdCache.available points at the logical tail of the list, that is, the MRU entry, and BcdCache.list.rest points at the logical head, the LRU entry.
CreateBcdCache: PUBLIC PROC [replacementSize: NAT] RETURNS [bcdCache: BcdCache] = {
RETURN[NEW[BcdCacheObject ← [replacementSize: replacementSize]]]
};
IFSSetup: PROC [cache: BcdCache, server: ROPE] RETURNS [problem: ROPENIL] = TRUSTED {
userName,password: ROPE;
IF NOT cache.ifsInitialized THEN {IFSFile.Initialize[]; cache.ifsInitialized ← TRUE};
IF cache.ifsInstance # NIL AND Rope.Equal[server, cache.currentServer, FALSE] THEN RETURN;
[userName,password] ← UserCredentials.Get[];
cache.ifsInstance ← IFSFile.Login[server, userName, password
! IFSFile.UnableToLogin => {
why: ROPE ← "Remote access failed for %g";
SELECT reason FROM
credentials => why ← "Invalid credentials for %g";
io => why ← "Server not found: %g";
ENDCASE;
problem ← IO.PutFR[why, IO.rope[server]];
GO TO notGood;
}];
cache.currentServer ← server;
EXITS notGood => {};
};
IFSCleanup: PROC [cache: BcdCache] = TRUSTED {
IF cache.ifsInstance # NIL THEN {IFSFile.Logout[cache.ifsInstance]; cache.ifsInstance ← NIL};
IF cache.ifsInitialized THEN {IFSFile.Finalize[]; cache.ifsInitialized ← FALSE};
};
WarnClient: PROC [client: Int.Client, r1,r2,r3,r4,r5: ROPENIL] = {
Int.SimpleInteraction[
client,
NEW[Ops.InfoInteraction ← [
class: $warning,
message: Rope.Cat[r1,r2,r3,r4,r5]
]]
];
};
ErrorClient: PROC [client: Int.Client, r1,r2,r3,r4,r5: ROPENIL] = {
Int.SimpleInteraction[
client,
NEW[Ops.InfoInteraction ← [
class: $error,
message: Rope.Cat[r1,r2,r3,r4,r5]
]]
];
};
IsInFileCache: PUBLIC PROC [name: ROPE, date: Date] RETURNS [inCache: BOOLFALSE] = {
cacheChecker: FSBackdoor.InfoProc = {
[fullGName: ROPE, created: BasicTime.GMT, bytes: INT, keep: CARDINAL]
RETURNS [continue: BOOL]
IF bytes > 0 THEN {
IF date.gmt # BasicTime.nullGMT THEN {
We will only accept a specific date
IF created # date.gmt THEN RETURN [TRUE];
};
At this point we will either accept anything, or we have a match on the time.
inCache ← TRUE;
RETURN [FALSE];
};
RETURN [TRUE];
};
FSBackdoor.EnumerateCacheForInfo[cacheChecker, NIL, name];
};
GetBcd: PROC [client: Int.Client, bcdCache: BcdCache, desc: REF FileDesc] RETURNS [bcd: BcdDefs.BcdBase ← NIL] = {
prev: CachedBcdList ← bcdCache.available;
new: CachedBcd ← NIL;
list: CachedBcdList ← NIL;
date: Date = desc.date;
NewEntry: PROC RETURNS [CachedBcdList] = {
bcdCache.size ← bcdCache.size.SUCC;
RETURN[CONS[NEW[CachedBcdEntry ← []], NIL]]
};
SELECT TRUE FROM
prev = NIL =>
'available' list is empty. Create a new cache entry regardless of present cache size.
list ← NewEntry[];
prev = prev.rest => {
'available' list has precisely one entry, which may or may not be the file of interest.
list ← bcdCache.available;
bcdCache.available ← NIL;
IF list.first.desc ~= desc THEN list.first.desc ← NIL;
};
ENDCASE => {
'available' list has at least two entries.
list ← prev.rest;
DO
assert: list = prev.rest
IF list.first.desc = desc THEN GO TO dequeue; -- 'list.first' is a cache hit
prev ← list;
IF (list ← list.rest) = bcdCache.available.rest THEN {
cache miss.
IF bcdCache.size < bcdCache.replacementSize THEN {list ← NewEntry[]; EXIT}
ELSE {list.first.desc ← NIL; GO TO dequeue};
};
REPEAT
dequeue => {
prev.rest ← list.rest;
IF bcdCache.available = list THEN bcdCache.available ← list.rest;
};
ENDLOOP;
};
'list' is a single element list (although list.rest may be garbage) containing the CachedBcd to be (re)used. We link it on the 'locked' list.
list.rest ← bcdCache.locked;
bcdCache.locked ← list;
We now have a cache entry, which either is empty or has the desired file.
IF (new ← list.first).desc = NIL THEN {
ENABLE UNWIND => {
bcdCache.locked ← bcdCache.locked.rest;
bcdCache.size ← bcdCache.size.PRED;
};
name: ROPE = FullName[desc];
nPages: INT;
MaybeGrowBuffer: PROC = TRUSTED {
nPages ← bcd.nPages;
IF nPages > new.buffer.count THEN {
buffer too small; grow it.
VM.Free[new.buffer];
new.buffer ← VM.Allocate[nPages];
bcd ← LOOPHOLE[VM.AddressForPageNumber[new.buffer.page]];
};
};
inCache: BOOLFALSE;
ReadFile: PROC = TRUSTED {
The principle behind ReadFile is to use the local file cache when the file is already on the local disk, and to use Leaf access to an IFS when the file is not on the local disk. Our use of the file will be quite transient, so there is no reason to have it cluttering the local disk.
inCache ← IsInFileCache[name, date];
IF new.buffer.count = 0 THEN new.buffer ← VM.Allocate[initialVM];
bcd ← VM.AddressForPageNumber[new.buffer.page];
IF NOT inCache THEN {
Use Leaf access to get this file into our VM.
ifsFile: IFSFile.FileHandle ← NIL;
serverPos: INT = Rope.SkipTo[name, 1, "]"];
server: ROPE = Rope.Flatten[name, 1, serverPos-1];
fileName: ROPE ← Rope.Flatten[name, serverPos+1];
msg: ROPE ← IFSSetup[bcdCache, server];
completed: CONDITION;
problem: IFSFile.Problem ← ok;
OnCompletion: ENTRY IFSFile.Completer = TRUSTED {
problem ← outcome;
BROADCAST completed
};
WaitForCompleted: ENTRY PROC = TRUSTED {WAIT completed};
LeafRead: PROC [pages: INT] = TRUSTED {
FOR pause: NAT IN [1..maxTries] DO
Process.CheckForAbort[];
IFSFile.StartRead[
ifsFile, 0, pages*bytesPerPage, LOOPHOLE[bcd], OnCompletion, NIL];
WaitForCompleted[];
IF problem = ok THEN EXIT;
WarnClient[client, "File access glitch: ", name];
Process.Pause[Process.SecondsToTicks[1+pause/2]];
ENDLOOP;
};
IF bcdCache.ifsInstance = NIL THEN GO TO useFS;
This is the case if there is no Leaf server on the IFS
IF msg # NIL THEN ErrorClient[client, msg];
FOR pause: NAT IN [1..maxTries] DO
Process.CheckForAbort[];
ifsFile ← IFSFile.Open[bcdCache.ifsInstance, fileName
! IFSFile.Error => {problem ← reason; CONTINUE}];
IF problem = ok THEN EXIT;
WarnClient[client, "File open glitch: ", name];
Process.Pause[Process.SecondsToTicks[1+pause/2]];
ENDLOOP;
Check the date for the file we just opened.
SELECT TRUE FROM
date.gmt = BasicTime.nullGMT => {};
date.gmt = BasicTime.FromPupTime[IFSFile.GetTimes[ifsFile].create] => {};
ENDCASE => {
Sigh, the date we wanted was not the one we got.
IFSFile.Close[ifsFile];
GO TO useFS;
};
IF problem = ok THEN {
LeafRead[pagesPerBCD];
IF problem = ok THEN {
MaybeGrowBuffer[];
LeafRead[nPages];
};
IFSFile.Close[ifsFile];
IF problem = ok THEN RETURN;
};
};
GO TO useFS;
EXITS useFS => {
It will be significantly faster to use the local cache for this file. We also use this route if the leaf access failed for any reason.
tName: ROPEIF inCache THEN name ELSE FS.Copy[from: name, to: "///Temp/VerifyRelease.log$", setKeep: TRUE, keep: 4, wantedCreatedTime: date.gmt, remoteCheck: TRUE, attach: TRUE];
openFile: FS.OpenFile ← FS.Open[name: tName, wantedCreatedTime: date.gmt];
FS.Read[openFile, 0, pagesPerBCD, bcd];
MaybeGrowBuffer[];
FS.Read[openFile, 0, nPages, bcd];
FS.Close[openFile];
};
};
ReadFile[];
new.desc ← desc;
}
ELSE TRUSTED {bcd ← VM.AddressForPageNumber[new.buffer.page]};
};
CheckReleaseAs: PROC [bcdCache: BcdCache, dfFile,path: ROPE, client: Int.Client] = {
pLen: INT = Rope.Length[path];
rLen: INT = Rope.Length[bcdCache.releaseDest];
aPos: INT = Rope.SkipTo[path, 0, ">"];
SELECT TRUE FROM
pLen = 0 => {};
aPos = pLen =>
WarnClient[client, "Bad release path: '", path, "'\n in: ", dfFile];
rLen = 0 =>
bcdCache.releaseDest ← Rope.Flatten[path, 0, aPos+1];
Rope.Run[bcdCache.releaseDest, 0, path, 0, FALSE] < rLen =>
WarnClient[client, "Unusual release path: '", path, "'\n in: ", dfFile];
ENDCASE;
};
ReleaseBcd: PROC [bcdCache: BcdCache, bcd: BcdDefs.BcdBase] = {
list: CachedBcdList ← bcdCache.locked;
prev: CachedBcdList ← NIL;
UNTIL list = NIL DO
TRUSTED {IF VM.AddressForPageNumber[list.first.buffer.page] = bcd THEN EXIT};
prev ← list;
list ← list.rest;
REPEAT
FINISHED => ERROR;
ENDLOOP;
dequeue from 'locked' list.
IF prev = NIL THEN bcdCache.locked ← list.rest ELSE prev.rest ← list.rest;
enqueue on 'available' list.
IF bcdCache.available = NIL THEN list.rest ← list
ELSE {list.rest ← bcdCache.available.rest; bcdCache.available.rest ← list};
bcdCache.available ← list;
};
FlushBcdCache: PUBLIC PROC [bcdCache: BcdCache] = {
list: CachedBcdList;
flush 'locked' list.
FOR list ← bcdCache.locked, list.rest UNTIL list = NIL DO
TRUSTED {VM.Free[list.first.buffer]};
ENDLOOP;
bcdCache.locked ← NIL;
IF bcdCache.available = NIL THEN RETURN;
list ← bcdCache.available.rest; -- head of 'available' list
bcdCache.available.rest ← NIL; -- break circular chain
bcdCache.available ← NIL;
UNTIL list = NIL DO
TRUSTED {VM.Free[list.first.buffer]};
list ← list.rest;
ENDLOOP;
IFSCleanup[bcdCache];
};
FullName: PROC [desc: REF FileDesc] RETURNS [ROPE] = INLINE {
RETURN[desc.path.Concat[desc.shortName]]
};
END.