<> <> <> <> DIRECTORY Ascii USING [Lower], Basics USING [bytesPerWord], BasicTime USING [FromPupTime, GMT, nullGMT, ToPupTime], BcdDefs USING [Base, BCD, BcdBase, CTHandle, CTIndex, CTNull, FTHandle, FTIndex, FTNull, FTSelf, MTHandle, MTIndex, NameRecord, NameString, NullName, NullVersion, VersionID, VersionStamp], BcdOps USING [ProcessConfigs, ProcessFiles, ProcessModules], DFInternal USING [AbortDF, CheckAbort, Client, ClientDescriptor, DefaultInteractionProc, GetFileInfo, LocalFile, LocalFileInfo, RemoteFileInfo, ReportFSError, RetryFSOperation, SimpleInteraction], DFOperations USING [AbortInteraction, DFInfoInteraction, InfoInteraction, InteractionProc], DFUtilities USING [Date, DateToRope, DirectoryItem, FileItem, Filter, ImportsItem, IncludeItem, ParseFromStream, ProcessItemProc, RemoveVersionNumber, SyntaxError], IFSFile USING [Close, Completer, Error, GetTimes, FileHandle, Finalize, FSInstance, Initialize, Login, Logout, Open, Problem, StartRead, UnableToLogin], FS, FSBackdoor USING [EnumerateCacheForInfo, InfoProc], List USING [CompareProc, UniqueSort], IO USING [card, Close, GetIndex, PutFR, rope, STREAM], PrincOps USING [bytesPerPage, PageCount], Process USING [CheckForAbort, Pause, SecondsToTicks], ReleaseToolVerify USING [], Rope USING [Cat, Compare, Concat, Equal, Fetch, Find, Flatten, FromProc, Index, Length, ROPE, Run, SkipTo, Substr], RuntimeError USING [UNCAUGHT], UserCredentials USING [Get], VersionMap USING [MapAndNameList, MapList, VersionToAllNames], VM USING [AddressForPageNumber, Allocate, Free, Interval, nullInterval, PageCount]; ReleaseToolVerifyImpl: CEDAR MONITOR IMPORTS Ascii, BasicTime, BcdOps, DFInternal, DFUtilities, IFSFile, FS, FSBackdoor, List, IO, Process, Rope, RuntimeError, UserCredentials, VersionMap, VM EXPORTS ReleaseToolVerify = BEGIN OPEN Int: DFInternal, Ops: DFOperations, Utils: DFUtilities; bytesPerPage: NAT = PrincOps.bytesPerPage; bytesPerWord: NAT = Basics.bytesPerWord; pagesPerBCD: NAT = (SIZE[BCD]*bytesPerWord+bytesPerPage-1)/bytesPerPage; BCD: TYPE = BcdDefs.BCD; Date: TYPE = Utils.Date; MapList: TYPE = VersionMap.MapList; ROPE: TYPE = Rope.ROPE; STREAM: TYPE = IO.STREAM; FileDesc: TYPE = RECORD [ shortName: ROPE _ NIL, path: ROPE _ NIL, importedFrom: REF FileDesc _ NIL, <> parent: ROPE _ NIL, -- DF file containing defining occurrence of 'shortName' date: Date _ [], version: BcdDefs.VersionStamp _ BcdDefs.NullVersion, bcd: BOOL _ FALSE, needed: Needed _ $no ]; Needed: TYPE = {no, yes, wrongVersion}; htSize: NAT = 97; HashIndex: TYPE = [0..htSize); HashTable: TYPE = REF HashTableArray; HashTableArray: TYPE = ARRAY HashIndex OF LIST OF REF FileDesc; allFilter: Utils.Filter = []; cacheSize: NAT = 10; maxTries: NAT = 8; Omission: TYPE = RECORD [ missing: ROPE, neededBy: ROPE ]; Verify: PUBLIC PROC [dfFile: ROPE, bcdCache: BcdCache, sourceMaps,symbolsMaps: MapList _ NIL, interact: Ops.InteractionProc _ NIL, clientData: REF ANY _ NIL, log: STREAM _ NIL] RETURNS [errors, warnings, filesActedUpon: INT _ 0] = { client: Int.Client = NEW[Int.ClientDescriptor _ [interact, clientData, log]]; rootList: LIST OF REF FileDesc _ NIL; hashTable: HashTable = NEW[HashTableArray _ ALL[NIL]]; omissions: LIST OF --REF Omission--REF ANY _ NIL; Hash: PROC [name: ROPE] RETURNS [HashIndex] = { x: NAT _ name.Length[]; FOR i: INT IN [0..x) DO x _ x + (Ascii.Lower[name.Fetch[i]] - 0C); ENDLOOP; RETURN[x MOD htSize] }; EnterInHashTable: PROC [desc: REF FileDesc] RETURNS [BOOL _ TRUE] = { hi: HashIndex = Hash[desc.shortName]; FOR l: LIST OF REF FileDesc _ hashTable[hi], l.rest UNTIL l = NIL DO IF desc.shortName.Equal[l.first.shortName, FALSE] THEN { IF desc.date ~= l.first.date THEN { <> warnings _ warnings.SUCC; WarnClient[ client, IO.PutFR["'%g' appears more than once (via '%g' and '%g').", IO.rope[desc.shortName], IO.rope[ IF l.first.importedFrom = NIL THEN l.first.parent ELSE FullName[l.first.importedFrom] ], IO.rope[ IF desc.importedFrom = NIL THEN desc.parent ELSE FullName[desc.importedFrom] ] ] ]; }; RETURN[FALSE] }; ENDLOOP; hashTable[hi] _ CONS[desc, hashTable[hi]]; filesActedUpon _ filesActedUpon.SUCC; }; LookupInHashTable: PROC [shortName: ROPE] RETURNS [desc: REF FileDesc _ NIL] = { FOR l: LIST OF REF FileDesc _ hashTable[Hash[shortName]], l.rest UNTIL l = NIL DO IF shortName.Equal[l.first.shortName, FALSE] THEN RETURN[l.first]; ENDLOOP; }; EnumerateHashTable: PROC [proc: PROC [REF FileDesc]] = { FOR hi: HashIndex IN HashIndex DO FOR l: LIST OF REF FileDesc _ hashTable[hi], l.rest UNTIL l = NIL DO proc[l.first]; ENDLOOP; ENDLOOP; }; WorthRemembering: PROC [desc: REF FileDesc] RETURNS [BOOL _ FALSE] = { ext: ROPE = desc.shortName.Substr[start: desc.shortName.Index[s2: "."]]; exts: ARRAY [0..3) OF ROPE = [".bcd", ".mesa", ".config"]; FOR i: NAT IN [0..exts.LENGTH) DO IF ext.Equal[exts[i], FALSE] THEN {desc.bcd _ i = 0; RETURN[TRUE]}; ENDLOOP; }; Extant: PROC [desc: REF FileDesc] RETURNS [BOOL] = INLINE { RETURN[desc.date.gmt ~= BasicTime.nullGMT] }; importedFrom: REF FileDesc _ NIL; VerifyInner: PROC [dfFile: ROPE, date: Date, filter: Utils.Filter] RETURNS [finished: BOOL _ FALSE] = { directoryPath: ROPE _ NIL; DoOneItem: Utils.ProcessItemProc = { Int.CheckAbort[client]; WITH item SELECT FROM directory: REF Utils.DirectoryItem => { directoryPath _ directory.path1; IF filter = allFilter AND NOT directory.path2IsCameFrom THEN CheckReleaseAs[bcdCache, dfFile, directory.path2, client]; }; file: REF Utils.FileItem => { desc: REF FileDesc = NEW[FileDesc _ [ shortName: Utils.RemoveVersionNumber[file.name], path: directoryPath, parent: dfFile, importedFrom: importedFrom ]]; remoteInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo _ [ name: FullName[desc], date: file.date ]]; Int.GetFileInfo[info: remoteInfo, client: client, errorLevel: $warning ! FS.Error => CONTINUE ]; desc.date _ remoteInfo.date; IF (filter.list ~= NIL OR WorthRemembering[desc]) AND EnterInHashTable[desc] AND file.verifyRoot AND importedFrom = NIL THEN rootList _ CONS[desc, rootList]; }; imports: REF Utils.ImportsItem => IF imports.form = $list OR filter.list # NIL THEN { outerMostImports: BOOL = (filter.list = NIL); newFilter: Utils.Filter = [ comments: filter.comments, -- comments processing is unaffected by imports filterA: filter.filterA, -- source/derived distinction is unaffected by imports filterB: IF imports.form = $exports THEN $public ELSE filter.filterB, filterC: $all, -- if the top level passes imports, they can come from anywhere list: IF imports.form = $list THEN imports.list ELSE filter.list ]; IF outerMostImports THEN importedFrom _ NEW[FileDesc _ [ path: imports.path1, -- hack: shortName is NIL, but only CheckIfNeeded cares. date: imports.date, parent: dfFile ]]; IF VerifyInner[imports.path1, imports.date, newFilter] AND outerMostImports THEN { FOR i: NAT IN [0..imports.list.nEntries) DO desc: REF FileDesc = LookupInHashTable[imports.list.u[i].name]; SELECT TRUE FROM desc = NIL => { warnings _ warnings.SUCC; WarnClient[ client, "'", imports.list.u[i].name, "' could not be found inside '", imports.path1, "' (or any nested DF file)."]; }; imports.list.u[i].verifyRoot => rootList _ CONS[desc, rootList]; ENDCASE; ENDLOOP; importedFrom _ NIL; }; }; include: REF Utils.IncludeItem => { IF filter = allFilter AND include.path2 # NIL AND NOT include.path2IsCameFrom THEN CheckReleaseAs[bcdCache, dfFile, include.path2, client]; [] _ VerifyInner[include.path1, include.date, filter]; }; ENDCASE; }; dfInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo _ [name: dfFile, date: date]]; dfStream: STREAM; Int.GetFileInfo[info: dfInfo, client: client ! FS.Error => {errors _ errors.SUCC; GO TO skip}]; IF dfInfo.date.gmt # BasicTime.nullGMT AND sourceMaps # NIL THEN { <> stamp: BcdDefs.VersionStamp = [0, 0, BasicTime.ToPupTime[dfInfo.date.gmt]]; foundList: VersionMap.MapAndNameList = VersionMap.VersionToAllNames[sourceMaps, stamp]; FOR each: VersionMap.MapAndNameList _ foundList, each.rest WHILE each # NIL DO eachName: ROPE = each.first.name; pos: INT = Rope.Run[eachName, 0, dfInfo.name, 0, FALSE]; IF pos = Rope.Length[eachName] OR Rope.Fetch[eachName, pos] = '! THEN GO TO found; ENDLOOP; WarnClient[client, "'", dfFile, "' not in the source version map list."]; EXITS found => {}; }; dfStream _ FS.StreamOpen[fileName: dfInfo.name ! FS.Error => {errors _ errors.SUCC; Int.ReportFSError[error, dfInfo, client]; GO TO skip} ]; dfInfo.name _ Utils.RemoveVersionNumber[dfInfo.name]; Int.SimpleInteraction[ client, NEW[Ops.DFInfoInteraction _ [action: $start, dfFile: dfInfo.name]] ]; Utils.ParseFromStream[dfStream, DoOneItem, filter ! Utils.SyntaxError -- [reason: ROPE]-- => { errors _ errors.SUCC; ErrorClient[client, IO.PutFR[ "Syntax error in '%g'[%d]: %g\NProcessing of this DF file aborted.", IO.rope[dfInfo.name], IO.card[dfStream.GetIndex[]], IO.rope[reason] ]]; CONTINUE }; Int.AbortDF => dfStream.Close[]; ]; dfStream.Close[]; Int.SimpleInteraction[ client, NEW[Ops.DFInfoInteraction _ [action: $end, dfFile: dfInfo.name]] ]; RETURN[TRUE]; EXITS skip => NULL; }; VerifyDependencies: PROC = { VerifyDependenciesInner: PROC [parent: REF FileDesc] = { <> <<(1) If the parameter file is not a BCD or if it is a BCD that was imported, the file is a leaf of the dependency tree. It sufficies, therefore, to check that the file exists, which was already done in the course of building the hash table. Therefore, VerifyDependenciesInner does nothing for non-BCDs.>> <<(2) If the parameter is a BCD, the file table is enumerated and, for each file, the DF input is checked to see that (a) there exists a file with the same short name, (b) that the file is extant, and (c) that the BCD version stamp matches the stamp in the file table entry. If (a) fails, a necessary file has been omitted from the input and its (short) name is placed on a list for future printing. If (b) fails, the file appeared in the DF input but doesn't exist on the server. This error was reported during hash table construction, so no further action is required here. If (c) fails, the file specified in the DF input exists, but is the wrong version. An error is reported (unless it was already reported.) After the file table enumeration is complete, the source file for 'parent' is checked to be sure it exists.>> parentBcd: BcdDefs.BcdBase _ NIL; parentFileTable: BcdDefs.Base; parentSource: REF FileDesc _ NIL; parentFileName: ROPE _ NIL; RopeForNameRecord: PROC [bcd: BcdDefs.BcdBase, name: BcdDefs.NameRecord] RETURNS [r: ROPE] = TRUSTED { ssb: BcdDefs.NameString = LOOPHOLE[bcd + bcd.ssOffset]; len: NAT; i: INT _ name; GetFromNameString: SAFE PROC RETURNS [char: CHAR] = TRUSTED { char _ ssb.string[i]; i _ i + 1}; r _ Rope.FromProc[ssb.size[name], GetFromNameString]; len _ r.Length[]; IF len > 0 AND r.Fetch[len-1] = '. THEN r _ r.Substr[len: len-1]; }; CheckDependentFile: PROC [fth: BcdDefs.FTHandle, recur: BOOL _ TRUE] = TRUSTED { file: ROPE _ RopeForNameRecord[parentBcd, fth.name]; child: REF FileDesc; Int.CheckAbort[client]; IF file.Find["."] < 0 THEN file _ file.Concat[".bcd"]; IF (child _ LookupInHashTable[file]) = NIL THEN omissions _ CONS[ NEW[Omission _ [missing: file, neededBy: parentFileName]], omissions] ELSE IF Extant[child] THEN { IF child.needed = $wrongVersion THEN RETURN; IF child.version = BcdDefs.NullVersion THEN { childBcd: BcdDefs.BcdBase; childBcd _ GetBcd[client, bcdCache, child ! FS.Error => IF Int.RetryFSOperation[error, client] THEN RETRY ELSE { errors _ errors.SUCC; Int.ReportFSError[ error, NEW[Int.RemoteFileInfo _ [name: parentFileName, date: parent.date]], client ]; GO TO proceedWithoutBCD } ]; IF childBcd = NIL THEN GO TO proceedWithoutBCD; IF childBcd.versionIdent = BcdDefs.VersionID THEN child.version _ childBcd.version; ReleaseBcd[bcdCache, childBcd]; }; SELECT child.needed FROM $no => { IF fth.version = child.version THEN { child.needed _ $yes; IF child.importedFrom ~= NIL AND child.importedFrom.needed = $no THEN child.importedFrom.needed _ $yes; IF recur THEN VerifyDependenciesInner[child]; RETURN }; }; $yes => IF fth.version = child.version THEN RETURN; ENDCASE; child.needed _ $wrongVersion; errors _ errors.SUCC; ErrorClient[client, IO.PutFR[ "'%g' {%g} depends on '%g'; '%g' {%g} is the wrong version.", IO.rope[parentFileName], IO.rope[Utils.DateToRope[parent.date]], IO.rope[file], IO.rope[FullName[child]], IO.rope[Utils.DateToRope[child.date]] ]]; EXITS proceedWithoutBCD => NULL; }; }; parentSourceName: BcdDefs.NameRecord _ BcdDefs.NullName; parentSourceVersion: BcdDefs.VersionStamp; IF ~parent.bcd OR parent.importedFrom ~= NIL THEN RETURN; parentFileName _ FullName[parent]; parentBcd _ GetBcd[client, bcdCache, parent ! FS.Error => IF Int.RetryFSOperation[error, client] THEN RETRY ELSE { errors _ errors.SUCC; Int.ReportFSError[ error, NEW[Int.RemoteFileInfo _ [name: parentFileName, date: parent.date]], client ]; GO TO skipThisBCD } ]; IF parentBcd = NIL THEN GO TO skipThisBCD; TRUSTED{ parentSourceName _ parentBcd.source; parentSourceVersion _ parentBcd.sourceVersion; parentFileTable _ LOOPHOLE[parentBcd + parentBcd.ftOffset]; }; IF parentSourceName = BcdDefs.NullName THEN { warnings _ warnings.SUCC; WarnClient[client, IO.PutFR[ "'%g' {%g} does not specify a source file.", IO.rope[parentFileName], IO.rope[Utils.DateToRope[parent.date]] ]]; } ELSE { sourceFileName: ROPE = RopeForNameRecord[parentBcd, parentSourceName]; IF (parentSource _ LookupInHashTable[sourceFileName]) = NIL THEN omissions _ CONS[ NEW[Omission _ [ missing: sourceFileName, neededBy: parentFileName ]], omissions] ELSE IF Extant[parentSource] AND parentSource.needed ~= $wrongVersion THEN { sourceDate: Date; gmt: BasicTime.GMT; gmt _ BasicTime.FromPupTime[LOOPHOLE[parentSourceVersion.time] ! RuntimeError.UNCAUGHT => { ErrorClient[client, "'", parentFileName, "' has a totally bogus date."]; }]; sourceDate _ [$explicit, BasicTime.FromPupTime[LOOPHOLE[parentSourceVersion.time]]]; IF sourceDate = parentSource.date THEN parentSource.needed _ $yes ELSE { parentSource.needed _ $wrongVersion; errors _ errors.SUCC; ErrorClient[client, IO.PutFR[ "'%g' {%g} expects source of {%g}, but DF file specifies '%g' {%g}.", IO.rope[parentFileName], IO.rope[Utils.DateToRope[parent.date]], IO.rope[Utils.DateToRope[sourceDate]], IO.rope[FullName[parentSource]], IO.rope[Utils.DateToRope[parentSource.date]] ]]; }; }; }; TRUSTED { IF parentBcd.nConfigs > 0 THEN { remoteSubConfig: BOOL _ FALSE; ctb: BcdDefs.Base = LOOPHOLE[parentBcd + parentBcd.ctOffset]; sgb: BcdDefs.Base = LOOPHOLE[parentBcd + parentBcd.sgOffset]; DoOneModule: PROC [mth: BcdDefs.MTHandle, mti: BcdDefs.MTIndex] RETURNS [BOOL _ FALSE] = TRUSTED { cti: BcdDefs.CTIndex _ mth.config; UNTIL cti = BcdDefs.CTNull DO cth: BcdDefs.CTHandle = @ctb[cti]; IF cth.file ~= BcdDefs.FTSelf THEN {remoteSubConfig _ TRUE; RETURN}; cti _ cth.config; ENDLOOP; <> IF mth.file ~= BcdDefs.FTSelf AND mth.file ~= BcdDefs.FTNull THEN CheckDependentFile[@parentFileTable[mth.file]]; }; DoOneConfig: PROC [cth: BcdDefs.CTHandle, cti: BcdDefs.CTIndex] RETURNS [BOOL _ FALSE] = TRUSTED { IF cth.file ~= BcdDefs.FTSelf THEN { <> outerCti: BcdDefs.CTIndex _ cth.config; UNTIL outerCti = BcdDefs.CTNull DO outerCth: BcdDefs.CTHandle = @ctb[outerCti]; IF outerCth.file ~= BcdDefs.FTSelf THEN RETURN; outerCti _ outerCth.config; ENDLOOP; CheckDependentFile[@parentFileTable[cth.file]]; }; }; [] _ BcdOps.ProcessModules[parentBcd, DoOneModule]; IF remoteSubConfig THEN [] _ BcdOps.ProcessConfigs[parentBcd, DoOneConfig]; } ELSE { DoOneFile: PROC [fth: BcdDefs.FTHandle, fti: BcdDefs.FTIndex] RETURNS [BOOL _ FALSE] = TRUSTED { CheckDependentFile[fth]; }; [] _ BcdOps.ProcessFiles[parentBcd, DoOneFile]; }; }; ReleaseBcd[bcdCache, parentBcd]; EXITS skipThisBCD => NULL; }; IF rootList = NIL THEN { warnings _ warnings.SUCC; WarnClient[client, IO.PutFR["No files in '%g' are marked with '+'.", IO.rope[dfFile]]]; } ELSE FOR l: LIST OF REF FileDesc _ rootList, l.rest UNTIL l = NIL DO rootDesc: REF FileDesc = l.first; IF rootDesc.needed = $no THEN { rootDesc.needed _ $yes; IF rootDesc.importedFrom ~= NIL AND rootDesc.importedFrom.needed = $no THEN rootDesc.importedFrom.needed _ $yes; IF Extant[rootDesc] THEN VerifyDependenciesInner[rootDesc]; }; ENDLOOP; }; ReportOmissions: PROC = { Compare: List.CompareProc = { RETURN[Rope.Compare[ NARROW[ref1, REF Omission].missing, NARROW[ref2, REF Omission].missing] ] }; nFiles: NAT _ 0; msg: ROPE _ "The following should appear in the DF input:"; IF omissions = NIL THEN RETURN; FOR omissions _ List.UniqueSort[omissions, Compare], omissions.rest UNTIL omissions = NIL DO omission: REF Omission = NARROW[omissions.first]; msg _ msg.Concat[ IO.PutFR["\n '%g', needed by '%g'", IO.rope[omission.missing], IO.rope[omission.neededBy] ] ]; nFiles _ nFiles.SUCC; ENDLOOP; errors _ errors + nFiles; ErrorClient[client, msg]; }; CheckIfNeeded: PROC [desc: REF FileDesc] = { IF desc.needed ~= $no OR ~WorthRemembering[desc] THEN RETURN; IF rootList = NIL THEN RETURN; <> IF desc.importedFrom ~= NIL THEN IF desc.importedFrom.needed = $no THEN <> (desc _ desc.importedFrom).needed _ $yes ELSE RETURN; warnings _ warnings.SUCC; WarnClient[client, IO.PutFR[ "'%g' (%g) is superfluous.", IO.rope[IF desc.shortName = NIL THEN desc.path ELSE desc.shortName], IO.rope[ IF desc.importedFrom = NIL THEN IO.PutFR["in '%g'", IO.rope[desc.parent]] ELSE IO.PutFR["via '%g'", IO.rope[FullName[desc.importedFrom]]] ]]]; }; IF interact = NIL THEN interact _ Int.DefaultInteractionProc; IF Int.LocalFile[dfFile] THEN { dfInfo: REF Int.LocalFileInfo = NEW[Int.LocalFileInfo _ [name: dfFile]]; Int.GetFileInfo[info: dfInfo, client: client, errorLevel: $abort]; IF (dfFile _ dfInfo.attachedTo) = NIL THEN ErrorClient[client, "'", dfInfo.name, "' isn't a remote file and therefore can't be verified."]; }; BEGIN ENABLE BEGIN UNWIND => IFSCleanup[bcdCache]; ABORTED => { Int.SimpleInteraction[client, NEW[Ops.AbortInteraction _ [TRUE]]]; <> }; Int.AbortDF => { errors _ errors.SUCC; Int.SimpleInteraction[client, NEW[Ops.DFInfoInteraction _ [action: $abort, dfFile: dfFile]]]; CONTINUE }; END; <> IF VerifyInner[dfFile, [format: $explicit], allFilter] THEN { <> VerifyDependencies[]; <> ReportOmissions[]; <> EnumerateHashTable[CheckIfNeeded]; }; END; }; <> BcdCache: TYPE = REF BcdCacheObject; BcdCacheObject: PUBLIC TYPE = RECORD [ releaseDest: ROPE _ NIL, ifsInstance: IFSFile.FSInstance _ NIL, currentServer: ROPE _ NIL, ifsInitialized: BOOL _ FALSE, locked: CachedBcdList _ NIL, -- linear list available: CachedBcdList _ NIL, -- circularly chained size: NAT _ 0, replacementSize: NAT ]; CachedBcdList: TYPE = LIST OF CachedBcd; CachedBcd: TYPE = REF CachedBcdEntry; CachedBcdEntry: TYPE = RECORD [ buffer: VM.Interval _ VM.nullInterval, desc: REF FileDesc _ NIL ]; initialVM: VM.PageCount = 10; <> <> CreateBcdCache: PUBLIC PROC [replacementSize: NAT] RETURNS [bcdCache: BcdCache] = { RETURN[NEW[BcdCacheObject _ [replacementSize: replacementSize]]] }; IFSSetup: PROC [cache: BcdCache, server: ROPE] RETURNS [problem: ROPE _ NIL] = TRUSTED { userName,password: ROPE; IF NOT cache.ifsInitialized THEN {IFSFile.Initialize[]; cache.ifsInitialized _ TRUE}; IF cache.ifsInstance # NIL AND Rope.Equal[server, cache.currentServer, FALSE] THEN RETURN; [userName,password] _ UserCredentials.Get[]; cache.ifsInstance _ IFSFile.Login[server, userName, password ! IFSFile.UnableToLogin => { why: ROPE _ "Remote access failed for %g"; SELECT reason FROM credentials => why _ "Invalid credentials for %g"; io => why _ "Server not found: %g"; ENDCASE; problem _ IO.PutFR[why, IO.rope[server]]; GO TO notGood; }]; cache.currentServer _ server; EXITS notGood => {}; }; IFSCleanup: PROC [cache: BcdCache] = TRUSTED { IF cache.ifsInstance # NIL THEN {IFSFile.Logout[cache.ifsInstance]; cache.ifsInstance _ NIL}; IF cache.ifsInitialized THEN {IFSFile.Finalize[]; cache.ifsInitialized _ FALSE}; }; WarnClient: PROC [client: Int.Client, r1,r2,r3,r4,r5: ROPE _ NIL] = { Int.SimpleInteraction[ client, NEW[Ops.InfoInteraction _ [ class: $warning, message: Rope.Cat[r1,r2,r3,r4,r5] ]] ]; }; ErrorClient: PROC [client: Int.Client, r1,r2,r3,r4,r5: ROPE _ NIL] = { Int.SimpleInteraction[ client, NEW[Ops.InfoInteraction _ [ class: $error, message: Rope.Cat[r1,r2,r3,r4,r5] ]] ]; }; IsInFileCache: PUBLIC PROC [name: ROPE, date: Date] RETURNS [inCache: BOOL _ FALSE] = { cacheChecker: FSBackdoor.InfoProc = { <<[fullGName: ROPE, created: BasicTime.GMT, bytes: INT, keep: CARDINAL]>> <> IF bytes > 0 THEN { IF date.gmt # BasicTime.nullGMT THEN { <> IF created # date.gmt THEN RETURN [TRUE]; }; <> inCache _ TRUE; RETURN [FALSE]; }; RETURN [TRUE]; }; FSBackdoor.EnumerateCacheForInfo[cacheChecker, NIL, name]; }; GetBcd: PROC [client: Int.Client, bcdCache: BcdCache, desc: REF FileDesc] RETURNS [bcd: BcdDefs.BcdBase _ NIL] = { prev: CachedBcdList _ bcdCache.available; new: CachedBcd _ NIL; list: CachedBcdList _ NIL; date: Date = desc.date; NewEntry: PROC RETURNS [CachedBcdList] = { bcdCache.size _ bcdCache.size.SUCC; RETURN[CONS[NEW[CachedBcdEntry _ []], NIL]] }; SELECT TRUE FROM prev = NIL => <<'available' list is empty. Create a new cache entry regardless of present cache size.>> list _ NewEntry[]; prev = prev.rest => { <<'available' list has precisely one entry, which may or may not be the file of interest.>> list _ bcdCache.available; bcdCache.available _ NIL; IF list.first.desc ~= desc THEN list.first.desc _ NIL; }; ENDCASE => { <<'available' list has at least two entries.>> list _ prev.rest; DO <> IF list.first.desc = desc THEN GO TO dequeue; -- 'list.first' is a cache hit prev _ list; IF (list _ list.rest) = bcdCache.available.rest THEN { <> IF bcdCache.size < bcdCache.replacementSize THEN {list _ NewEntry[]; EXIT} ELSE {list.first.desc _ NIL; GO TO dequeue}; }; REPEAT dequeue => { prev.rest _ list.rest; IF bcdCache.available = list THEN bcdCache.available _ list.rest; }; ENDLOOP; }; <<'list' is a single element list (although list.rest may be garbage) containing the CachedBcd to be (re)used. We link it on the 'locked' list.>> list.rest _ bcdCache.locked; bcdCache.locked _ list; <> IF (new _ list.first).desc = NIL THEN { ENABLE UNWIND => { bcdCache.locked _ bcdCache.locked.rest; bcdCache.size _ bcdCache.size.PRED; }; name: ROPE = FullName[desc]; nPages: INT; MaybeGrowBuffer: PROC = TRUSTED { nPages _ bcd.nPages; IF nPages > new.buffer.count THEN { <> VM.Free[new.buffer]; new.buffer _ VM.Allocate[nPages]; bcd _ LOOPHOLE[VM.AddressForPageNumber[new.buffer.page]]; }; }; inCache: BOOL _ FALSE; ReadFile: PROC = TRUSTED { <> inCache _ IsInFileCache[name, date]; IF new.buffer.count = 0 THEN new.buffer _ VM.Allocate[initialVM]; bcd _ VM.AddressForPageNumber[new.buffer.page]; IF NOT inCache THEN { <> ifsFile: IFSFile.FileHandle _ NIL; serverPos: INT = Rope.SkipTo[name, 1, "]"]; server: ROPE = Rope.Flatten[name, 1, serverPos-1]; fileName: ROPE _ Rope.Flatten[name, serverPos+1]; msg: ROPE _ IFSSetup[bcdCache, server]; completed: CONDITION; problem: IFSFile.Problem _ ok; OnCompletion: ENTRY IFSFile.Completer = TRUSTED { problem _ outcome; BROADCAST completed }; WaitForCompleted: ENTRY PROC = TRUSTED {WAIT completed}; LeafRead: PROC [pages: INT] = TRUSTED { FOR pause: NAT IN [1..maxTries] DO Process.CheckForAbort[]; IFSFile.StartRead[ ifsFile, 0, pages*bytesPerPage, LOOPHOLE[bcd], OnCompletion, NIL]; WaitForCompleted[]; IF problem = ok THEN EXIT; WarnClient[client, "File access glitch: ", name]; Process.Pause[Process.SecondsToTicks[1+pause/2]]; ENDLOOP; }; IF bcdCache.ifsInstance = NIL THEN GO TO useFS; <> IF msg # NIL THEN ErrorClient[client, msg]; FOR pause: NAT IN [1..maxTries] DO Process.CheckForAbort[]; ifsFile _ IFSFile.Open[bcdCache.ifsInstance, fileName ! IFSFile.Error => {problem _ reason; CONTINUE}]; IF problem = ok THEN EXIT; WarnClient[client, "File open glitch: ", name]; Process.Pause[Process.SecondsToTicks[1+pause/2]]; ENDLOOP; <> SELECT TRUE FROM date.gmt = BasicTime.nullGMT => {}; date.gmt = BasicTime.FromPupTime[IFSFile.GetTimes[ifsFile].create] => {}; ENDCASE => { <> IFSFile.Close[ifsFile]; GO TO useFS; }; IF problem = ok THEN { LeafRead[pagesPerBCD]; IF problem = ok THEN { MaybeGrowBuffer[]; LeafRead[nPages]; }; IFSFile.Close[ifsFile]; IF problem = ok THEN RETURN; }; }; GO TO useFS; EXITS useFS => { <> tName: ROPE _ IF inCache THEN name ELSE FS.Copy[from: name, to: "///Temp/VerifyRelease.log$", setKeep: TRUE, keep: 4, wantedCreatedTime: date.gmt, remoteCheck: TRUE, attach: TRUE]; openFile: FS.OpenFile _ FS.Open[name: tName, wantedCreatedTime: date.gmt]; FS.Read[openFile, 0, pagesPerBCD, bcd]; MaybeGrowBuffer[]; FS.Read[openFile, 0, nPages, bcd]; FS.Close[openFile]; }; }; ReadFile[]; new.desc _ desc; } ELSE TRUSTED {bcd _ VM.AddressForPageNumber[new.buffer.page]}; }; CheckReleaseAs: PROC [bcdCache: BcdCache, dfFile,path: ROPE, client: Int.Client] = { pLen: INT = Rope.Length[path]; rLen: INT = Rope.Length[bcdCache.releaseDest]; aPos: INT = Rope.SkipTo[path, 0, ">"]; SELECT TRUE FROM pLen = 0 => {}; aPos = pLen => WarnClient[client, "Bad release path: '", path, "'\n in: ", dfFile]; rLen = 0 => bcdCache.releaseDest _ Rope.Flatten[path, 0, aPos+1]; Rope.Run[bcdCache.releaseDest, 0, path, 0, FALSE] < rLen => WarnClient[client, "Unusual release path: '", path, "'\n in: ", dfFile]; ENDCASE; }; ReleaseBcd: PROC [bcdCache: BcdCache, bcd: BcdDefs.BcdBase] = { list: CachedBcdList _ bcdCache.locked; prev: CachedBcdList _ NIL; UNTIL list = NIL DO TRUSTED {IF VM.AddressForPageNumber[list.first.buffer.page] = bcd THEN EXIT}; prev _ list; list _ list.rest; REPEAT FINISHED => ERROR; ENDLOOP; <> IF prev = NIL THEN bcdCache.locked _ list.rest ELSE prev.rest _ list.rest; <> IF bcdCache.available = NIL THEN list.rest _ list ELSE {list.rest _ bcdCache.available.rest; bcdCache.available.rest _ list}; bcdCache.available _ list; }; FlushBcdCache: PUBLIC PROC [bcdCache: BcdCache] = { list: CachedBcdList; <> FOR list _ bcdCache.locked, list.rest UNTIL list = NIL DO TRUSTED {VM.Free[list.first.buffer]}; ENDLOOP; bcdCache.locked _ NIL; IF bcdCache.available = NIL THEN RETURN; list _ bcdCache.available.rest; -- head of 'available' list bcdCache.available.rest _ NIL; -- break circular chain bcdCache.available _ NIL; UNTIL list = NIL DO TRUSTED {VM.Free[list.first.buffer]}; list _ list.rest; ENDLOOP; IFSCleanup[bcdCache]; }; FullName: PROC [desc: REF FileDesc] RETURNS [ROPE] = INLINE { RETURN[desc.path.Concat[desc.shortName]] }; END.