DIRECTORY AssociativeCache, Basics USING [BITSHIFT], CacheModels, Convert, DragOpsCross USING [Half, Word, ZerosWord], DragOpsCrossUtils USING [AddDelta, HalfShift, HalfToCard, HalfXor, LowHalf, SingleWordShiftRight, WordToHalves], IO, Rope; AssociativeCacheImpl: CEDAR PROGRAM IMPORTS Basics, Convert, DragOpsCrossUtils, IO, Rope EXPORTS AssociativeCache = BEGIN OPEN DragOpsCross, DragOpsCrossUtils; Cache: TYPE = CacheModels.Cache; maxQuadsPerLine: NAT = 64; maxLines: NAT = 16*1024; wordsPerPage: CARDINAL = CacheModels.wordsPerPage; QuadIndex: TYPE = [0..maxQuadsPerLine); PageEntry: TYPE = REF PageEntryRep; PageEntryRep: TYPE = RECORD [ next: PageEntry _ NIL, pageAddr: Word _ ZerosWord, useCount: INT _ 0 ]; QuadBitVector: TYPE = PACKED ARRAY QuadIndex OF BOOL; None: QuadBitVector = ALL[FALSE]; HashEntry: TYPE = REF HashEntryRep; HashEntryRep: TYPE = RECORD [ next: HashEntry _ NIL, lineAddr: Word _ ZerosWord, index: NAT _, chunkPresent: QuadBitVector _ None, dirty: QuadBitVector _ None, referenced: QuadBitVector _ None ]; CacheData: TYPE = REF CacheDataRep; CacheDataRep: TYPE = RECORD [ stats: CacheStats _ [], hashVector: HashVector _ NIL, pageEntryCount: INT _ 0, pageList: PageEntry _ NIL, freePageList: PageEntry _ NIL, victimIndex: CARDINAL _ 0, lru: BOOL _ FALSE, rover: NAT _ 0, quadsPerLine: NAT _ 2, wordsPerQuad: NAT _ 4, logWordsPerQuad: NAT, realCache: CacheModels.Cache _ NIL, mapCache: CacheModels.Cache _ NIL, lineTable: SEQUENCE linesInCache: NAT OF HashEntry ]; CacheStats: TYPE = RECORD [ probes: INT _ 0, readProbes: INT _ 0, writeProbes: INT _ 0, writeMisses: INT _ 0, writeClean: INT _ 0, chunkMisses: INT _ 0, lineMisses: INT _ 0, jumpMisses: INT _ 0, mapMisses: INT _ 0, dirtyWrites: INT _ 0 ]; HashLim: CARDINAL = 512; HashVector: TYPE = REF HashVectorRep; HashVectorRep: TYPE = ARRAY [0..HashLim) OF HashEntry; LogBaseTwo: PROC [n: NAT] RETURNS [NAT] = { IF n = 1 THEN RETURN[0] ELSE RETURN[1+LogBaseTwo[n/2]]; }; NewCache: PUBLIC PROC [lines: NAT _ 100, quadsPerLine: NAT _ 2, wordsPerQuad: NAT _ 4, lru: BOOL _ FALSE, realCache, mapCache: CacheModels.Cache _ NIL] RETURNS [cache: CacheModels.Cache] = { IF quadsPerLine > maxQuadsPerLine THEN quadsPerLine _ maxQuadsPerLine; IF lines > maxLines THEN lines _ maxLines; cache _ NEW[CacheModels.CacheRec _ [ private: NEW[CacheDataRep[lines] _ [quadsPerLine: quadsPerLine, wordsPerQuad: wordsPerQuad, logWordsPerQuad: LogBaseTwo[wordsPerQuad], lru: lru, realCache: realCache, mapCache: mapCache, lineTable: NULL]], fetch: Fetch, store: Store, reset: Reset, flush: Flush, print: Print, data: NIL]]; Reset[cache]; }; Reset: PUBLIC CacheModels.CacheResetProc -- [cache: CacheModels.Cache] -- = { private: CacheData = NARROW[cache.private]; private.pageList _ NIL; private.hashVector _ NEW[HashVectorRep _ ALL[NIL]]; private.victimIndex _ 0; private.pageEntryCount _ 0; FOR i: NAT IN [0..private.linesInCache) DO private[i] _ NEW[HashEntryRep _ [index: i]]; ENDLOOP; private.stats _ []; -- zero out statistics IF private.realCache # NIL THEN private.realCache.reset[private.realCache]; IF private.mapCache # NIL THEN private.mapCache.reset[private.mapCache]; }; Flush: PUBLIC CacheModels.CacheFlushProc -- [cache: CacheModels.Cache] -- = { private: CacheData = NARROW[cache.private]; private.pageList _ NIL; private.hashVector^ _ ALL[NIL]; private.victimIndex _ 0; private.pageEntryCount _ 0; FOR i: NAT IN [0..private.linesInCache) DO private[i]^ _ [index: i]; ENDLOOP; IF private.realCache # NIL THEN private.realCache.flush[private.realCache]; IF private.mapCache # NIL THEN private.mapCache.flush[private.mapCache]; }; Print: PUBLIC CacheModels.CachePrintProc -- [cache: CacheModels.Cache, stream: STREAM, name: ROPE, resetStatistics: BOOL _ TRUE] -- = { Realn: PROC [r: REAL, n: NAT] RETURNS [IO.Value] = { RETURN [[rope[Convert.RopeFromReal[r, n]]]]}; private: CacheData = NARROW[cache.private]; quadMisses: INT _ private.stats.chunkMisses; misses: INT _ quadMisses + private.stats.lineMisses; probes: INT _ private.stats.probes; rProbes: REAL _ probes; rMisses: REAL _ misses; stream.PutF[ "\nStats for %g (associative, lines: %g, quads/line: %g, words/quad: %g)\n probes: %g\n", [rope[IF private.lru THEN Rope.Cat[name, " (pseudo-lru)"] ELSE name]], [integer[private.linesInCache]], [integer[private.quadsPerLine]], [integer[private.wordsPerQuad]], [integer[probes]]]; IF probes = 0 THEN RETURN; stream.PutF[ "read probes: %g, write probes: %g\n", [integer[private.stats.readProbes]], [integer[private.stats.writeProbes]]]; stream.PutF[ "write misses: %g, writes to clean quads: %g\n", [integer[private.stats.writeMisses]], [integer[private.stats.writeClean]]]; stream.PutF[ " misses: %g (%g (%g%%) existing line)\n", [integer[misses]], [integer[private.stats.chunkMisses]], IF misses = 0 THEN [rope["-"]] ELSE Realn[(quadMisses/rMisses)*100, 3]]; IF private.stats.jumpMisses # 0 THEN stream.PutF[ " misses caused by jumps: %g (%g%% of probes)\n", [integer[private.stats.jumpMisses]], Realn[(private.stats.jumpMisses/rProbes)*100, 3]]; IF probes # 0 THEN stream.PutF[ " miss rate: %g%%\n", Realn[(misses/rProbes)*100, 3]]; stream.PutF[ " map misses: %g (%g%%)\n dirty writes: %g\n", [integer[private.stats.mapMisses]], Realn[(private.stats.mapMisses/rProbes)*100, 3], [integer[private.stats.dirtyWrites]]]; }; Fetch: PUBLIC CacheModels.CacheFetchProc -- [cache: CacheModels.Cache, addr: DragOpsCross.Word, fromJump: BOOL _ FALSE] -- = { Access[cache, addr, fromJump, FALSE]; }; Store: PUBLIC CacheModels.CacheStoreProc -- [cache: CacheModels.Cache, addr: DragOpsCross.Word] -- = { Access[cache, addr, FALSE, TRUE]; }; Method: TYPE = {advanceOnMiss, shiftOnHit}; method: Method _ advanceOnMiss; Access: PROC [cache: Cache, addr: Word, fromJump, write: BOOL] = { data: CacheData = NARROW[cache.private]; oldEntry: BOOL _ TRUE; victim: CARDINAL _ data.victimIndex; wordsPerLine: NAT = data.quadsPerLine * data.wordsPerQuad; indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine; chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -data.logWordsPerQuad]; lineAddr: Word = AddDelta[-INT[indexInLine], addr]; hashIndex: CARDINAL; hashEntry: HashEntry; halfHash: Half _ HalfXor[WordToHalves[lineAddr][0], WordToHalves[lineAddr][1]]; data.stats.probes _ data.stats.probes + 1; IF write THEN data.stats.writeProbes _ data.stats.writeProbes + 1 ELSE data.stats.readProbes _ data.stats.readProbes + 1; halfHash _ HalfXor[halfHash, HalfShift[halfHash, -8]]; hashIndex _ HalfToCard[halfHash] MOD HashLim; hashEntry _ data.hashVector[hashIndex]; WHILE hashEntry # NIL DO IF hashEntry.lineAddr = lineAddr THEN { IF data.lru THEN hashEntry.referenced[chunk] _ TRUE; IF write THEN { IF NOT hashEntry.dirty[chunk] THEN data.stats.writeClean _ data.stats.writeClean + 1; hashEntry.dirty[chunk] _ TRUE; }; IF hashEntry.index = victim AND method = shiftOnHit THEN { victim _ victim + 1; data.victimIndex _ IF victim = data.linesInCache THEN 0 ELSE victim; }; IF ~hashEntry.chunkPresent[chunk] THEN { IF data.realCache # NIL THEN data.realCache.fetch[data.realCache, addr]; IF write THEN data.stats.writeMisses _ data.stats.writeMisses + 1; IF fromJump THEN data.stats.jumpMisses _ data.stats.jumpMisses + 1; data.stats.chunkMisses _ data.stats.chunkMisses + 1; hashEntry.chunkPresent[chunk] _ TRUE}; RETURN; }; hashEntry _ hashEntry.next; ENDLOOP; IF fromJump THEN data.stats.jumpMisses _ data.stats.jumpMisses + 1; data.stats.lineMisses _ data.stats.lineMisses + 1; IF write THEN data.stats.writeMisses _ data.stats.writeMisses + 1; { indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage; pageAddr: Word = AddDelta[-INT[indexInPage], lineAddr]; pageId: Word _ SingleWordShiftRight[pageAddr, CacheModels.logWordsPerPage]; FOR pageEntry: PageEntry _ data.pageList, pageEntry.next WHILE pageEntry # NIL DO IF pageEntry.pageAddr = pageAddr THEN GOTO Found; ENDLOOP; data.stats.mapMisses _ data.stats.mapMisses + 1; IF data.mapCache # NIL THEN data.mapCache.fetch[data.mapCache, pageId]; EXITS Found => NULL; }; hashEntry _ data.lineTable[victim]; { lag: PageEntry _ NIL; oldLineAddr: Word = hashEntry.lineAddr; oldIndexInPage: CARDINAL = HalfToCard[LowHalf[oldLineAddr]] MOD wordsPerPage; oldPageAddr: Word = AddDelta[-INT[oldIndexInPage], oldLineAddr]; headHashEntry: HashEntry; oldHashIndex: CARDINAL; oldHalfHash: Half _ HalfXor[ WordToHalves[oldLineAddr][0], WordToHalves[oldLineAddr][1]]; oldHalfHash _ HalfXor[oldHalfHash, HalfShift[oldHalfHash, -8]]; oldHashIndex _ HalfToCard[oldHalfHash] MOD HashLim; headHashEntry _ data.hashVector[oldHashIndex]; IF headHashEntry = hashEntry THEN data.hashVector[oldHashIndex] _ hashEntry.next ELSE WHILE headHashEntry # NIL DO IF hashEntry = headHashEntry.next THEN { headHashEntry.next _ hashEntry.next; EXIT}; headHashEntry _ headHashEntry.next ENDLOOP; FOR pageEntry: PageEntry _ data.pageList, pageEntry.next WHILE pageEntry # NIL DO IF pageEntry.pageAddr = oldPageAddr THEN { IF (pageEntry.useCount _ pageEntry.useCount - 1) <= 0 THEN { IF lag = NIL THEN data.pageList _ pageEntry.next ELSE lag.next _ pageEntry.next; data.pageEntryCount _ data.pageEntryCount - 1; pageEntry.next _ data.freePageList; data.freePageList _ pageEntry; }; EXIT }; lag _ pageEntry; ENDLOOP; IF hashEntry.dirty # None THEN { FOR i: NAT IN [0..data.quadsPerLine) DO IF hashEntry.dirty[i] THEN { IF data.realCache # NIL THEN data.realCache.store[data.realCache, AddDelta[i, oldLineAddr]]; data.stats.dirtyWrites _ data.stats.dirtyWrites + 1; }; ENDLOOP; hashEntry.dirty _ None; }; }; { indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage; pageAddr: Word = AddDelta[-INT[indexInPage], lineAddr]; pageEntry: PageEntry _ data.pageList; hashEntry.next _ data.hashVector[hashIndex]; data.hashVector[hashIndex] _ hashEntry; hashEntry.lineAddr _ lineAddr; hashEntry.chunkPresent _ None; hashEntry.chunkPresent[chunk] _ TRUE; IF data.realCache # NIL THEN data.realCache.fetch[data.realCache, addr]; WHILE pageEntry # NIL DO IF pageEntry.pageAddr = pageAddr THEN { pageEntry.useCount _ pageEntry.useCount + 1; GO TO oldEntry; }; pageEntry _ pageEntry.next; ENDLOOP; data.pageEntryCount _ data.pageEntryCount + 1; pageEntry _ data.freePageList; IF pageEntry = NIL THEN pageEntry _ NEW[PageEntryRep] ELSE data.freePageList _ pageEntry.next; pageEntry^ _ [next: data.pageList, pageAddr: pageAddr, useCount: 1]; data.pageList _ pageEntry; EXITS oldEntry => NULL; }; IF write THEN { IF NOT hashEntry.dirty[chunk] THEN data.stats.writeClean _ data.stats.writeClean + 1; hashEntry.dirty[chunk] _ TRUE; }; victim _ victim + 1; data.victimIndex _ IF victim = data.linesInCache THEN 0 ELSE victim; }; END. μMediumProgImpl.mesa Created By: Sindhu, August 5, 1985 7:15:44 pm PDT Copyright c 1984, 1985 by Xerox Corporation. All rights reserved. Pradeep Sindhu October 17, 1985 6:22:51 pm PDT Private types Creates a new cache on the specified shared memory. Resets the given cache to its initial state (all empty). Resets the given cache to its initial state (all empty). Check if the entry for addr is in our hash table We make the victim index point at the next cache entry Entry for addr is not in the hash table; update miss statistics Check for map miss The victim must be removed from the hash table and page table. Maintain the hash table by removing the victim from the table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new. Now we need to maintain the page table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new. Decrement the use count for this page (if an entry already exists) Remove this page entry from the list and put it on the free page list. At this point we need to read in the quad word from the memory. Maintain the hash table Increment the use count for this page (if an entry already exists). Then return. This entry is brand new, so add it to the list and bump the reject cycles to show that we got a map miss. Note that at this point pageEntry = NIL. At this point we have to advance the victim pointer, since in either method this newly retrieved entry clearly should not be the new victim. Κ˜Jšœ™J™1šœ Οmœ7™BIcode™.—J™J™šΟk ˜ Kšœ˜Kšœžœžœ˜Kšœ ˜ K˜Kšœ žœ˜+KšœžœY˜pKšžœ˜ K˜—šΟbœžœž˜#Kšžœ%žœ˜4Kšžœ˜Kšœžœžœ!˜-K˜šœ ™ K˜Kšœžœ˜ K˜Kšœžœ˜Kšœ žœ ˜Kšœžœ˜2Kšœ žœ˜(K˜Kšœ žœžœ˜#šœžœžœ˜Kšœžœ˜K˜Kšœ žœ˜K˜K˜—š œžœžœžœ žœžœ˜5K˜—šœžœžœ˜!K˜—Kšœ žœžœ˜#šœžœžœ˜Kšœžœ˜K˜Kšœžœ˜ Kšœ#˜#Kšœ˜Kšœ ˜ K˜K˜—Kšœ žœžœ˜#šœžœžœ˜Kšœ˜Kšœžœ˜Kšœžœ˜Kšœžœ˜Kšœžœ˜Kšœ žœ˜Kšœžœžœ˜Kšœžœ˜Kšœžœ˜Kšœžœ˜Kšœžœ˜Kšœžœ˜#Kšœžœ˜"Kšœ žœžœžœ ˜2K˜—K˜šœ žœžœ˜Kšœžœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜Kšœ žœ˜K˜—K˜Kšœ žœ˜Kšœ žœžœ˜%Kšœžœžœžœ ˜6—K˜š Οn œžœžœžœžœ˜+Jš žœžœžœžœžœ˜7J˜—K˜š œžœžœ žœžœžœ žœžœ+žœžœ˜ΎKšœ3™3Kšžœ žœ ˜FKšžœžœ˜*šœžœ˜$Kšœ žœΊžœ˜ΝKšœF˜FKšœžœ˜ —Kšœ ˜ K˜—K˜•StartOfExpansion" -- [cache: CacheModels.Cache] -- šŸœžœ žœΠck!œ˜NKšœ8™8Kšœžœ˜+Kšœžœ˜Kšœžœžœžœ˜3Kšœ˜K˜šžœžœžœž˜*Kšœ žœ˜,Kšžœ˜—KšœΟc˜*Kšžœžœžœ,˜KKšžœžœžœ*˜HK˜K˜K˜—–" -- [cache: CacheModels.Cache] -- šŸœžœ žœ‘!œ˜NKšœ8™8Kšœžœ˜+Kšœžœ˜Kšœžœžœ˜Kšœ˜K˜šžœžœžœž˜*Kšœ˜Kšžœ˜—Kšžœžœžœ,˜KKšžœžœžœ*˜HK˜K˜—K˜–\ -- [cache: CacheModels.Cache, stream: STREAM, name: ROPE, resetStatistics: BOOL _ TRUE] -- šŸœžœ‘[œ˜ˆš  œžœžœžœžœžœ ˜4Kšžœ'˜-—Kšœžœ˜+Kšœ žœ˜,Kšœžœ)˜4Kšœžœ˜#Kšœ žœ ˜Kšœ žœ ˜šœ ˜ K˜ZKšœžœ žœ!žœ ˜GKšœw˜w—Kšžœ žœžœ˜šœ ˜ K˜&KšœK˜K—šœ ˜ K˜0KšœK˜K—šœ ˜ K˜+Kšœ9˜9Kšžœ žœ žœ%˜H—šžœžœ ˜1Kšœ2˜2KšœW˜W—šžœ žœ ˜K˜Kšœ ˜ —šœ ˜ Kšœ0˜0Kšœ{˜{—K˜K˜—–S -- [cache: CacheModels.Cache, addr: DragOpsCross.Word, fromJump: BOOL _ FALSE] -- šŸœžœ‘Rœ˜Kšœžœ˜%K˜K˜—–; -- [cache: CacheModels.Cache, addr: DragOpsCross.Word] -- šŸœžœ‘:œ˜gKšœž œ˜!K˜K˜—Kšœžœ˜+šœ˜K˜—š œžœ-žœ˜BKšœžœ˜(Kšœ žœžœ˜Kšœžœ˜$Kšœžœ)˜:Kšœ žœžœ˜>Kšœžœ%˜GKšœžœ˜3Kšœ žœ˜Kšœ˜KšœO˜OKšœ*˜*Kšžœžœ5žœ3˜yKšœ6˜6Kšœ!žœ ˜-Kšœ'˜'K˜K™0šžœ žœžœ˜šžœžœ˜'Kšžœ žœžœ˜4šžœžœ˜Kšžœžœžœ3˜UKšœžœ˜K˜—šžœžœžœ˜:Kšœ6™6Kšœ˜Kšœžœžœžœ˜DK˜—šžœ žœ˜(šžœžœž˜K˜+—Kšžœžœ5˜BKšžœ žœ3˜CKšœ4˜4Kšœ!žœ˜'—Kšžœ˜K˜—Kšœ˜Kšžœ˜—K˜K™?Kšžœ žœ3˜CKšœ2˜2Kšžœžœ5˜B˜K™Kšœ žœ!žœ˜GKšœžœ˜7KšœK˜Kšžœ6žœ žœž˜QKšžœžœžœ˜1—Kšžœ˜Kšœ0˜0Kšžœžœžœ,˜GKšžœ žœ˜—K˜K˜Kšœ#˜#šœ˜K™>Kšœžœ˜Kšœ'˜'Kšœžœ$žœ˜MKšœžœ˜@Kšœ˜Kšœžœ˜šœ˜Kšœ<˜<—Kšœ?˜?Kšœ'žœ ˜3Kšœ.˜.K˜KšœŸ™Ÿšžœ˜Kšžœ/˜3šžœžœžœž˜!šžœ žœ˜(Kšœ$˜$Kšžœ˜—Kšœ"˜"Kšžœ˜——K˜Kšœˆ™ˆšžœ6žœ žœž˜Qšžœ"žœ˜*KšœB™Bšžœ4žœ˜