<> <> DIRECTORY Ascii USING [Lower], BcdDefs USING [ BcdBase, FTHandle, FTIndex, NameRecord, NameString], BcdOps USING [ProcessFiles], Commander USING [CommandProc, Register, Handle], CommandTool USING [ArgumentVector, Parse, Failed], DFInternal USING [ AbortDF, GetFileInfo, LocalFile, LocalFileInfo, RemoteFileInfo, ReportFSError, RetryFSOperation], DFUtilities USING [ Date, DirectoryItem, FileItem, Filter, ImportsItem, IncludeItem, ParseFromStream, ProcessItemProc, RemoveVersionNumber, SyntaxError], FS USING [Close, Error, GetInfo, Open, OpenFile, PagesForBytes, Read, StreamOpen], IO USING [card, int, Close, GetIndex, rope, STREAM, PutF, Put, PutRope, PutChar], IOClasses USING [CreateDribbleOutputStream], List USING [CompareProc, Sort], ProcessExtras USING [CheckForAbort], Rope USING [Compare, Concat, Equal, Fetch, Find, FromProc, Index, Length, ROPE, Substr, SkipTo], VM USING [AddressForPageNumber, Allocate, Free, Interval, nullInterval, PageCount]; Dependencies: CEDAR PROGRAM IMPORTS Ascii, BcdOps, Commander, CommandTool, DFInternal, DFUtilities, FS, IO, IOClasses, List, ProcessExtras, Rope, VM = BEGIN OPEN Int: DFInternal, Utils: DFUtilities; ROPE: TYPE = Rope.ROPE; FileDesc: TYPE = RECORD [ shortName: ROPE _ NIL, path: ROPE _ NIL, importedFrom: REF FileDesc _ NIL, -- highest level Imports with Using containing 'shortName' parent: ROPE _ NIL, -- DF file containing defining occurrence of 'shortName' date: Utils.Date _ [], dependers: LIST OF REF ANY _ NIL, ofInterest: BOOL _ FALSE ]; htSize: NAT = 97; HashIndex: TYPE = [0..htSize); HashTable: TYPE = REF HashTableArray; HashTableArray: TYPE = ARRAY HashIndex OF LIST OF REF FileDesc; cacheSize: NAT = 10; Construct: PROC [dfFile: ROPE, cmd: Commander.Handle] RETURNS [errors, warnings, filesActedUpon: INT _ 0] = { logFileStream: IO.STREAM _ FS.StreamOpen[fileName: "Dependencies.log", accessOptions: $create]; log: IO.STREAM _ IOClasses.CreateDribbleOutputStream[cmd.out, logFileStream]; rootList: LIST OF REF FileDesc _ NIL; hashTable: HashTable = NEW[HashTableArray _ ALL[NIL]]; bcdCache: BcdCache = CreateBcdCache[cacheSize]; importedFrom: REF FileDesc _ NIL; Hash: PROC [name: ROPE] RETURNS [HashIndex] = { x: NAT _ name.Length[]; FOR i: INT IN [0..x) DO x _ x + (Ascii.Lower[name.Fetch[i]] - 0C); ENDLOOP; RETURN[x MOD htSize] }; EnterInHashTable: PROC [desc: REF FileDesc] RETURNS [BOOL _ TRUE] = { hi: HashIndex = Hash[desc.shortName]; FOR l: LIST OF REF FileDesc _ hashTable[hi], l.rest UNTIL l = NIL DO IF desc.shortName.Equal[l.first.shortName, FALSE] THEN { IF desc.date ~= l.first.date THEN { <> warnings _ warnings.SUCC; log.PutF[ "Warning: '%g' appears more than once (via '%g' and '%g').", IO.rope[desc.shortName], IO.rope[ IF l.first.importedFrom = NIL THEN l.first.parent ELSE FullName[l.first.importedFrom] ], IO.rope[ IF desc.importedFrom = NIL THEN desc.parent ELSE FullName[desc.importedFrom] ] ]; }; RETURN[FALSE] }; ENDLOOP; hashTable[hi] _ CONS[desc, hashTable[hi]]; filesActedUpon _ filesActedUpon.SUCC; }; LookupInHashTable: PROC [shortName: ROPE] RETURNS [desc: REF FileDesc _ NIL] = { FOR l: LIST OF REF FileDesc _ hashTable[Hash[shortName]], l.rest UNTIL l = NIL DO IF shortName.Equal[l.first.shortName, FALSE] THEN RETURN[l.first]; ENDLOOP; }; EnumerateHashTable: PROC [proc: PROC [REF FileDesc]] = { FOR hi: HashIndex IN HashIndex DO FOR l: LIST OF REF FileDesc _ hashTable[hi], l.rest UNTIL l = NIL DO proc[l.first]; ENDLOOP; ENDLOOP; }; WorthRemembering: PROC [desc: REF FileDesc] RETURNS [BOOL _ FALSE] = { ext: ROPE = desc.shortName.Substr[start: desc.shortName.Index[s2: "."]]; IF ext.Equal[".bcd", FALSE] THEN RETURN[TRUE]; }; ConstructInner: PROC [dfFile: ROPE, date: Utils.Date, filter: Utils.Filter, nestingDepth: NAT] RETURNS [finished: BOOL _ FALSE] = { directoryPath: ROPE _ NIL; DoOneItem: Utils.ProcessItemProc = { ProcessExtras.CheckForAbort[]; WITH item SELECT FROM directory: REF Utils.DirectoryItem => directoryPath _ directory.path1; file: REF Utils.FileItem => { desc: REF FileDesc = NEW[FileDesc _ [ shortName: Utils.RemoveVersionNumber[file.name], path: directoryPath, parent: dfFile, importedFrom: importedFrom ]]; remoteInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo _ [ name: FullName[desc], date: file.date ]]; Int.GetFileInfo[info: remoteInfo, client: NIL, errorLevel: $warning ! FS.Error => CONTINUE ]; desc.date _ remoteInfo.date; IF filter.list ~= NIL OR WorthRemembering[desc] THEN [] _ EnterInHashTable[desc]; <> <> <> }; imports: REF Utils.ImportsItem => IF imports.form ~= $list AND filter.list = NIL THEN { warnings _ warnings.SUCC; log.PutF[ "Warning: '%g' appears in an Imports statement (in '%g') without a Using list.\n", IO.rope[imports.path1], IO.rope[dfFile] ]; <> [] _ ConstructInner[imports.path1, imports.date, filter, nestingDepth + 1]; } ELSE { outerMostImports: BOOL = (filter.list = NIL); newFilter: Utils.Filter = [ comments: filter.comments, -- comments processing is unaffected by imports filterA: filter.filterA, -- source/derived distinction is unaffected by imports filterB: IF imports.form = $exports THEN $public ELSE filter.filterB, filterC: $all, -- if the top level passes imports, they can come from anywhere list: IF imports.form = $list THEN imports.list ELSE filter.list ]; IF outerMostImports THEN importedFrom _ NEW[FileDesc _ [ path: imports.path1, -- hack: shortName is NIL, but only CheckIfNeeded cares. date: imports.date, parent: dfFile ]]; IF ConstructInner[imports.path1, imports.date, newFilter, nestingDepth + 1] AND outerMostImports THEN { FOR i: NAT IN [0..imports.list.nEntries) DO desc: REF FileDesc = LookupInHashTable[imports.list.u[i].name]; IF desc = NIL THEN { warnings _ warnings.SUCC; log.PutF[ "Warning: '%g' could not be found inside '%g' (or any nested DF file).\n", IO.rope[imports.list.u[i].name], IO.rope[imports.path1] ]; }; ENDLOOP; importedFrom _ NIL; }; }; include: REF Utils.IncludeItem => [] _ ConstructInner[include.path1, include.date, filter, nestingDepth + 1]; ENDCASE; }; -- end DoOneItem dfInfo: REF Int.RemoteFileInfo = NEW[Int.RemoteFileInfo _ [name: dfFile, date: date]]; dfStream: IO.STREAM; Int.GetFileInfo[info: dfInfo, client: NIL ! FS.Error => {errors _ errors.SUCC; GO TO skip}]; dfStream _ FS.StreamOpen[fileName: dfInfo.name ! FS.Error => {errors _ errors.SUCC; Int.ReportFSError[error, dfInfo, NIL]; GO TO skip} ]; dfInfo.name _ Utils.RemoveVersionNumber[dfInfo.name]; FOR i: NAT IN [1..nestingDepth] DO log.PutRope[" "] ENDLOOP; log.Put[IO.rope["starting to look at "], IO.rope[dfInfo.name], IO.rope["\n"]]; Utils.ParseFromStream[dfStream, DoOneItem, filter ! Utils.SyntaxError -- [reason: ROPE]-- => { errors _ errors.SUCC; log.PutF[ "Error: Syntax error in '%g'[%d]: %g\NProcessing of this DF file aborted.", IO.rope[dfInfo.name], IO.card[dfStream.GetIndex[]], IO.rope[reason] ]; CONTINUE }; ABORTED => dfStream.Close[]; ]; dfStream.Close[]; FOR i: NAT IN [1..nestingDepth] DO log.PutRope[" "] ENDLOOP; log.Put[IO.rope["finished looking at "], IO.rope[dfInfo.name], IO.rope["\n"]]; RETURN[TRUE]; EXITS skip => NULL; }; -- end ConstructInner <> IF Int.LocalFile[dfFile] THEN { dfInfo: REF Int.LocalFileInfo = NEW[Int.LocalFileInfo _ [name: dfFile]]; Int.GetFileInfo[info: dfInfo, client: NIL, errorLevel: $abort]; IF (dfFile _ dfInfo.attachedTo) = NIL THEN { warnings _ warnings.SUCC; log.PutF[ "Warning: '%g' isn't a remote file and therefore dependencies can't be analyzed.", IO.rope[dfInfo.name] ]; }; }; BEGIN ENABLE BEGIN ABORTED => { errors _ errors.SUCC; log.Put[IO.rope["Error: ABORT looking at "], IO.rope[dfFile], IO.rope["\n"]]; <> }; Int.AbortDF => { errors _ errors.SUCC; log.Put[IO.rope["Error: ABORT looking at "], IO.rope[dfFile], IO.rope["\n"]]; CONTINUE }; END; <> IF ConstructInner[dfFile, [format: $explicit], [], 0] THEN { lookAtBCDFile: PROC [depender: REF FileDesc] = { parentBcd: BcdDefs.BcdBase; IF ~WorthRemembering[depender] THEN RETURN; parentBcd _ GetBcd[bcdCache, depender ! FS.Error => IF Int.RetryFSOperation[error, NIL] THEN RETRY ELSE { errors _ errors.SUCC; log.Put[IO.rope["Error: Can't get BCD file named "], IO.rope[depender.shortName], IO.rope["\n"]]; GOTO skipThisBCD } ]; TRUSTED { IF parentBcd.nConfigs = 0 THEN { DoOneFile: PROC [fth: BcdDefs.FTHandle, fti: BcdDefs.FTIndex] RETURNS [BOOL _ FALSE] = TRUSTED { RopeForNameRecord: PROC [bcd: BcdDefs.BcdBase, name: BcdDefs.NameRecord] RETURNS [r: ROPE] = TRUSTED { ssb: BcdDefs.NameString = LOOPHOLE[bcd + bcd.ssOffset]; len: NAT; i: INT _ name; GetFromNameString: SAFE PROC RETURNS [char: CHAR] = TRUSTED { char _ ssb.string[i]; i _ i + 1}; r _ Rope.FromProc[ssb.size[name], GetFromNameString]; len _ r.Length[]; IF len > 0 AND r.Fetch[len-1] = '. THEN r _ r.Substr[len: len-1]; }; file: ROPE _ Rope.Concat[RopeForNameRecord[parentBcd, fth.name], ".bcd"]; dependee: REF FileDesc _ LookupInHashTable[file]; IF dependee = NIL THEN { warnings _ warnings.SUCC; log.PutRope["Warning: "]; log.Put[ IO.rope[depender.shortName], IO.rope[" depends on "], IO.rope[file] ]; log.Put[ IO.rope[", but "], IO.rope[file], IO.rope[" was not found in any DF file\n"] ]; [] _ EnterInHashTable[dependee _ NEW[FileDesc _ [shortName: file]]]; }; <> dependee.dependers _ CONS[depender, dependee.dependers]; }; -- end DoOneFile depender.ofInterest _ TRUE; [] _ BcdOps.ProcessFiles[parentBcd, DoOneFile]; }; -- end parentBcd.nConfigs = 0 }; -- end TRUSTED ReleaseBcd[bcdCache, parentBcd]; EXITS skipThisBCD => RETURN; }; -- end lookAtBCDFile buildList: PROC [dependee: REF FileDesc] = { listOfFileDescRefs _ CONS[dependee, listOfFileDescRefs]; }; compare: List.CompareProc = { <> f1: REF FileDesc = NARROW[ref1]; f2: REF FileDesc = NARROW[ref2]; RETURN[f1.shortName.Compare[f2.shortName, FALSE]]; }; printDependencies: PROC [dependee: REF FileDesc] = { lineLength: NAT _ 0; IF NOT dependee.ofInterest THEN RETURN; IF dependee.dependers = NIL THEN { log.Put[IO.rope["No interface or program module was found to depend on "], IO.rope[dependee.shortName]]; log.PutChar['\n]; RETURN; }; out.PutRope[dependee.shortName.Substr[0, dependee.shortName.SkipTo[0, "."]]]; out.PutChar['\n]; out.PutChar['\t]; dependee.dependers _ List.Sort[dependee.dependers, compare]; FOR fdl: LIST OF REF ANY _ dependee.dependers, fdl.rest UNTIL fdl = NIL DO fd: REF FileDesc = NARROW[fdl.first]; name: ROPE _ fd.shortName.Substr[0, fd.shortName.SkipTo[0, "."]]; lineLength _ lineLength + name.Length[]; IF lineLength > 72 AND lineLength # name.Length[] THEN {out.PutRope["\n\t"]; lineLength _ name.Length[]}; out.PutRope[name]; IF fdl.rest = NIL THEN out.PutRope["\n"] ELSE out.PutRope[" "] ENDLOOP; }; -- end printDependencies <> out: IO.STREAM = FS.StreamOpen[fileName: "Dependencies.txt", accessOptions: $create]; listOfFileDescRefs: LIST OF REF ANY _ NIL; EnumerateHashTable[lookAtBCDFile]; out.PutRope["List of BCD file dependencies from "]; out.PutRope[dfFile]; out.PutRope["\n\n"]; EnumerateHashTable[buildList]; listOfFileDescRefs _ List.Sort[listOfFileDescRefs, compare]; FOR l: LIST OF REF ANY _ listOfFileDescRefs, l.rest UNTIL l = NIL DO printDependencies[NARROW[l.first]]; ENDLOOP; out.PutRope["\nEND of list of BCD file dependencies from "]; out.PutRope[dfFile]; out.PutRope["\n"]; out.Close[]; log.Put[ IO.rope["\n***BCD dependencies from "], IO.rope[dfFile], IO.rope[" written onto Dependencies.txt"] ]; IF errors # 0 OR warnings # 0 THEN { log.PutRope["\n*** "]; log.Put[ IO.int[errors], IO.rope[" errors and "], IO.int[warnings] ]; log.PutRope[" warnings written onto Dependencies.log\n"]; } ELSE log.PutRope[".\n"]; }; -- end IF ConstructInner[dfFile, [format: $explicit], [], 0] THEN END; -- ENABLE logFileStream.Close[]; }; -- end Construct <> BcdCache: TYPE = REF BcdCacheObject; BcdCacheObject: TYPE = RECORD [ locked: CachedBcdList _ NIL, -- linear list available: CachedBcdList _ NIL, -- circularly chained size: NAT _ 0, replacementSize: NAT ]; CachedBcdList: TYPE = LIST OF CachedBcd; CachedBcd: TYPE = REF CachedBcdEntry; CachedBcdEntry: TYPE = RECORD [ buffer: VM.Interval _ VM.nullInterval, desc: REF FileDesc _ NIL ]; initialVM: VM.PageCount = 10; <> <> CreateBcdCache: PROC [replacementSize: NAT] RETURNS [bcdCache: BcdCache] = { RETURN[NEW[BcdCacheObject _ [replacementSize: replacementSize]]] }; GetBcd: PROC [bcdCache: BcdCache, desc: REF FileDesc] RETURNS [bcd: BcdDefs.BcdBase _ NIL] = { prev: CachedBcdList _ bcdCache.available; new: CachedBcd _ NIL; list: CachedBcdList _ NIL; NewEntry: PROC RETURNS [CachedBcdList] = { bcdCache.size _ bcdCache.size.SUCC; RETURN[CONS[NEW[CachedBcdEntry _ []], NIL]] }; SELECT TRUE FROM prev = NIL => <<'available' list is empty. Create a new cache entry regardless of present cache size.>> list _ NewEntry[]; prev = prev.rest => { <<'available' list has precisely one entry, which may or may not be the file of interest.>> list _ bcdCache.available; bcdCache.available _ NIL; IF list.first.desc ~= desc THEN list.first.desc _ NIL; }; ENDCASE => { <<'available' list has at least two entries.>> list _ prev.rest; DO <> IF list.first.desc = desc THEN GO TO dequeue; -- 'list.first' is a cache hit prev _ list; IF (list _ list.rest) = bcdCache.available.rest THEN { <> IF bcdCache.size < bcdCache.replacementSize THEN {list _ NewEntry[]; EXIT} ELSE {list.first.desc _ NIL; GO TO dequeue}; }; REPEAT dequeue => { prev.rest _ list.rest; IF bcdCache.available = list THEN bcdCache.available _ list.rest; }; ENDLOOP; }; <<'list' is a single element list (although list.rest may be garbage) containing the CachedBcd to be (re)used. We link it on the 'locked' list.>> list.rest _ bcdCache.locked; bcdCache.locked _ list; <> IF (new _ list.first).desc = NIL THEN { ENABLE UNWIND => { bcdCache.locked _ bcdCache.locked.rest; bcdCache.size _ bcdCache.size.PRED; }; name: ROPE = FullName[desc]; file: FS.OpenFile; nPages: INT; file _ FS.Open[name: name, wantedCreatedTime: desc.date.gmt]; IF new.buffer.count = 0 THEN new.buffer _ VM.Allocate[initialVM]; nPages _ MIN[FS.PagesForBytes[FS.GetInfo[file].bytes], new.buffer.count]; TRUSTED { bcd _ VM.AddressForPageNumber[new.buffer.page]; FS.Read[file: file, from: 0, nPages: nPages, to: bcd]; IF bcd.nPages > nPages THEN { <> nPages _ bcd.nPages; VM.Free[new.buffer]; new.buffer _ VM.Allocate[nPages]; bcd _ VM.AddressForPageNumber[new.buffer.page]; FS.Read[file: file, from: 0, nPages: nPages, to: bcd]; }; }; FS.Close[file]; new.desc _ desc; } ELSE TRUSTED {bcd _ VM.AddressForPageNumber[new.buffer.page]}; }; ReleaseBcd: PROC [bcdCache: BcdCache, bcd: BcdDefs.BcdBase] = { list: CachedBcdList _ bcdCache.locked; prev: CachedBcdList _ NIL; UNTIL list = NIL DO TRUSTED {IF VM.AddressForPageNumber[list.first.buffer.page] = bcd THEN EXIT}; prev _ list; list _ list.rest; REPEAT FINISHED => ERROR; ENDLOOP; <> IF prev = NIL THEN bcdCache.locked _ list.rest ELSE prev.rest _ list.rest; <> IF bcdCache.available = NIL THEN list.rest _ list ELSE {list.rest _ bcdCache.available.rest; bcdCache.available.rest _ list}; bcdCache.available _ list; }; FlushBcdCache: PROC [bcdCache: BcdCache] = { list: CachedBcdList; <> FOR list _ bcdCache.locked, list.rest UNTIL list = NIL DO TRUSTED {VM.Free[list.first.buffer]}; ENDLOOP; bcdCache.locked _ NIL; IF bcdCache.available = NIL THEN RETURN; list _ bcdCache.available.rest; -- head of 'available' list bcdCache.available.rest _ NIL; -- break circular chain bcdCache.available _ NIL; UNTIL list = NIL DO TRUSTED {VM.Free[list.first.buffer]}; list _ list.rest; ENDLOOP; }; FullName: PROC [desc: REF FileDesc] RETURNS [ROPE] = INLINE { RETURN[desc.path.Concat[desc.shortName]] }; DoDependencies: Commander.CommandProc = { <> dfFileName: ROPE _ NIL; length: NAT; argv: CommandTool.ArgumentVector _ NIL; argv _ CommandTool.Parse[cmd ! CommandTool.Failed => { msg _ errorMsg; CONTINUE; }]; IF argv = NIL THEN RETURN[$Failure, msg]; IF argv.argc # 2 THEN RETURN[$Failure, "Usage: Dependencies dfFileName\n"]; dfFileName _ argv[1]; length _ dfFileName.Length[]; IF length < 4 OR (Rope.Compare[Rope.Substr[dfFileName, length - 3, 3], ".df", FALSE] # equal AND Rope.Find[dfFileName, "!", MAX[0, length-5]] = -1) THEN dfFileName _ Rope.Concat[dfFileName, ".df"]; [] _ Construct[dfFileName, cmd]; }; -- end DoDependencies <<>> <> Commander.Register[ "Dependencies", DoDependencies, "Build a list of bcd dependencies from a df file." ]; END.