DIRECTORY Basics USING [BITSHIFT], DragOpsCross USING [Half, Word, ZerosWord], DragOpsCrossUtils USING [AddDelta, HalfShift, HalfToCard, HalfXor, LowHalf, WordToHalves], CacheModel USING [CacheBase, CacheBaseRep, CacheFetchProc, CacheStoreProc, wordsPerPage]; CacheModelImpl: CEDAR PROGRAM IMPORTS Basics, DragOpsCrossUtils EXPORTS CacheModel = BEGIN OPEN DragOpsCross, DragOpsCrossUtils, CacheModel; wordsPerQuad: NAT = 4; logWordsPerQuad: NAT = 2; maxQuadsPerLine: NAT = 8; wordsPerPage: CARDINAL = CacheModel.wordsPerPage; QuadIndex: TYPE = [0..maxQuadsPerLine); PageEntry: TYPE = REF PageEntryRep; PageEntryRep: TYPE = RECORD [ next: PageEntry _ NIL, pageAddr: Word _ ZerosWord, useCount: INT _ 0 ]; None: PACKED ARRAY QuadIndex OF BOOL = ALL[FALSE]; HashEntry: TYPE = REF HashEntryRep; HashEntryRep: TYPE = RECORD [ next: HashEntry _ NIL, lineAddr: Word _ ZerosWord, index: NAT _, chunkPresent: PACKED ARRAY QuadIndex OF BOOL _ None, dirty: PACKED ARRAY QuadIndex OF BOOL _ None, referenced: PACKED ARRAY QuadIndex OF BOOL _ None ]; CacheData: TYPE = REF CacheDataRep; CacheDataRep: TYPE = RECORD [ hashVector: HashVector _ NIL, pageEntryCount: INT _ 0, pageList: PageEntry _ NIL, freePageList: PageEntry _ NIL, victimIndex: CARDINAL _ 0, lru: BOOL _ FALSE, rover: NAT _ 0, quadsPerLine: NAT _ 2, lineTable: SEQUENCE linesInCache: NAT OF HashEntry ]; HashLim: CARDINAL = 512; HashVector: TYPE = REF HashVectorRep; HashVectorRep: TYPE = ARRAY [0..HashLim) OF HashEntry; NewCache: PUBLIC PROC [lines: [0..4096), quadsPerLine: [0..8), lru: BOOL _ FALSE] RETURNS [CacheBase] = { base: CacheBase _ NEW[CacheBaseRep _ [NIL, LocalFetch, LocalStore, NIL, []]]; private: CacheData _ NEW[CacheDataRep[lines] _ [quadsPerLine: quadsPerLine, lru: lru, lineTable: NULL]]; base.private _ private; ResetCache[base]; RETURN [base]; }; ResetCache: PUBLIC PROC [cache: CacheBase] = { private: CacheData = NARROW[cache.private]; private.pageList _ NIL; private.hashVector _ NEW[HashVectorRep _ ALL[NIL]]; private.victimIndex _ 0; private.pageEntryCount _ 0; FOR i: NAT IN [0..private.linesInCache) DO private[i] _ NEW[HashEntryRep _ [index: i]]; ENDLOOP; cache.stats _ []; -- zero out statistics }; FlushCache: PUBLIC PROC [cache: CacheBase] = { private: CacheData = NARROW[cache.private]; private.pageList _ NIL; private.hashVector^ _ ALL[NIL]; private.victimIndex _ 0; private.pageEntryCount _ 0; FOR i: NAT IN [0..private.linesInCache) DO private[i]^ _ [index: i]; ENDLOOP; }; LocalFetch: CacheFetchProc = { private: CacheData = NARROW[base.private]; wordsPerLine: NAT = private.quadsPerLine * wordsPerQuad; indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine; chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -logWordsPerQuad]; entry: HashEntry _ Access[base, AddDelta[addr, -INT[indexInLine]], chunk, fromJump]; entry _ entry; -- to have a place to set a breakpoint }; LocalStore: CacheStoreProc = { private: CacheData = NARROW[base.private]; wordsPerLine: NAT = private.quadsPerLine * wordsPerQuad; indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine; chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -logWordsPerQuad]; hashEntry: HashEntry; hashEntry _ Access[base, AddDelta[addr, -INT[indexInLine]], chunk, FALSE]; hashEntry.dirty[chunk] _ TRUE; }; Method: TYPE = {advanceOnMiss, shiftOnHit}; method: Method _ shiftOnHit; Access: PROC [cache: CacheBase, lineAddr: Word, chunk: QuadIndex, fromJump: BOOL] RETURNS [hashEntry: HashEntry] = { data: CacheData = NARROW[cache.private]; oldEntry: BOOL _ TRUE; victim: CARDINAL _ data.victimIndex; hashIndex: CARDINAL; halfHash: Half _ HalfXor[WordToHalves[lineAddr][0], WordToHalves[lineAddr][1]]; halfHash _ HalfXor[halfHash, HalfShift[halfHash, -8]]; hashIndex _ HalfToCard[halfHash] MOD HashLim; hashEntry _ data.hashVector[hashIndex]; cache.stats.probes _ cache.stats.probes + 1; WHILE hashEntry # NIL DO IF hashEntry.lineAddr = lineAddr THEN { IF data.lru THEN hashEntry.referenced[chunk] _ TRUE; IF hashEntry.index = victim AND method = shiftOnHit THEN { victim _ victim + 1; data.victimIndex _ IF victim = data.linesInCache THEN 0 ELSE victim; }; IF ~hashEntry.chunkPresent[chunk] THEN { IF fromJump THEN cache.stats.jumpMisses _ cache.stats.jumpMisses + 1; cache.stats.chunkMisses _ cache.stats.chunkMisses + 1; hashEntry.chunkPresent[chunk] _ TRUE}; RETURN; }; hashEntry _ hashEntry.next; ENDLOOP; IF fromJump THEN cache.stats.jumpMisses _ cache.stats.jumpMisses + 1; cache.stats.lineMisses _ cache.stats.lineMisses + 1; hashEntry _ data.lineTable[victim]; { lag: PageEntry _ NIL; oldLineAddr: Word = hashEntry.lineAddr; oldIndexInPage: CARDINAL = HalfToCard[LowHalf[oldLineAddr]] MOD wordsPerPage; oldPageAddr: Word = AddDelta[oldLineAddr, -INT[oldIndexInPage]]; headHashEntry: HashEntry; oldHashIndex: CARDINAL; oldHalfHash: Half _ HalfXor[ WordToHalves[oldLineAddr][0], WordToHalves[oldLineAddr][1]]; oldHalfHash _ HalfXor[oldHalfHash, HalfShift[oldHalfHash, -8]]; oldHashIndex _ HalfToCard[oldHalfHash] MOD HashLim; headHashEntry _ data.hashVector[oldHashIndex]; IF headHashEntry = hashEntry THEN data.hashVector[oldHashIndex] _ hashEntry.next ELSE WHILE headHashEntry # NIL DO IF hashEntry = headHashEntry.next THEN { headHashEntry.next _ hashEntry.next; EXIT}; headHashEntry _ headHashEntry.next ENDLOOP; FOR pageEntry: PageEntry _ data.pageList, pageEntry.next WHILE pageEntry # NIL DO IF pageEntry.pageAddr = oldPageAddr THEN { IF (pageEntry.useCount _ pageEntry.useCount - 1) <= 0 THEN { IF lag = NIL THEN data.pageList _ pageEntry.next ELSE lag.next _ pageEntry.next; data.pageEntryCount _ data.pageEntryCount - 1; pageEntry.next _ data.freePageList; data.freePageList _ pageEntry; }; EXIT }; lag _ pageEntry; ENDLOOP; IF hashEntry.dirty # None THEN { FOR i: NAT IN [0..data.quadsPerLine) DO IF hashEntry.dirty[i] THEN cache.stats.dirtyWrites _ cache.stats.dirtyWrites + 1; ENDLOOP; hashEntry.dirty _ None; }; }; { indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage; pageAddr: Word = AddDelta[lineAddr, -INT[indexInPage]]; pageEntry: PageEntry _ data.pageList; hashEntry.next _ data.hashVector[hashIndex]; data.hashVector[hashIndex] _ hashEntry; hashEntry.lineAddr _ lineAddr; hashEntry.chunkPresent _ None; hashEntry.chunkPresent[chunk] _ TRUE; WHILE pageEntry # NIL DO IF pageEntry.pageAddr = pageAddr THEN { pageEntry.useCount _ pageEntry.useCount + 1; GO TO oldEntry; }; pageEntry _ pageEntry.next; ENDLOOP; data.pageEntryCount _ data.pageEntryCount + 1; cache.stats.mapMisses _ cache.stats.mapMisses + 1; pageEntry _ data.freePageList; IF pageEntry = NIL THEN pageEntry _ NEW[PageEntryRep] ELSE data.freePageList _ pageEntry.next; pageEntry^ _ [next: data.pageList, pageAddr: pageAddr, useCount: 1]; data.pageList _ pageEntry; EXITS oldEntry => NULL; }; victim _ victim + 1; data.victimIndex _ IF victim = data.linesInCache THEN 0 ELSE victim; }; END. ΔCacheModelImpl.mesa Copyright c 1984 by Xerox Corporation. All rights reserved. Russ Atkinson, September 19, 1984 4:15:08 pm PDT Last Edited by: Sweet, March 19, 1985 10:14:53 am PST Word: TYPE = DragOpsCross.Word; CacheBase: TYPE = REF CacheBaseRep; CacheBaseRep: TYPE = RECORD [ sharedBase: SharedBase _ NIL, -- shared base data private: REF _ NIL, -- private data to the cache implementation fetch: CacheFetchProc _ NIL, -- this hook allows the user to intercept cache accesses store: CacheStoreProc _ NIL, -- this hook allows the user to intercept cache accesses data: REF _ NIL, -- private data for clients intercepting fetch & store stats: CacheStats _ [] -- maintained by the default fetch and store routines ]; SharedBase: TYPE = REF SharedBaseRep; SharedBaseRep: TYPE = RECORD [ mem: SparseMemory.Base _ NIL, busyUntil: INT _ 0 ]; CacheFetchProc: TYPE = PROC [base: CacheBase, addr: Word]; This is the type of routine that is used to fetch words from the cache. CacheStoreProc: TYPE = PROC [base: CacheBase, addr: Word]; This is the type of routine that is used to store words into the cache. CacheStats: TYPE = RECORD [ probes: INT _ 0, chunkMisses: INT _ 0, lineMisses: INT _ 0, mapMisses: INT _ 0, dirtyWrites: INT _ 0 ]; Private types Creates a new cache on the specified shared memory. Resets the given cache to its initial state (all empty). Resets the given cache to its initial state (all empty). [base: CacheBase, addr: Word]; [base: CacheBase, addr: Word]; We make the victim index point at the next cache entry The victim must be removed from the hash table and page table. Maintain the hash table by removing the victim from the table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new. Now we need to maintain the page table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new. Decrement the use count for this page (if an entry already exists) Remove this page entry from the list and put it on the free page list. At this point we need to read in the quad word from the memory. Maintain the hash table Increment the use count for this page (if an entry already exists). Then return. This entry is brand new, so add it to the list and bump the reject cycles to show that we got a map miss. Note that at this point pageEntry = NIL. At this point we have to advance the victim pointer, since in either method this newly retrieved entry clearly should not be the new victim. Κ Ξ˜šœ™Jšœ Οmœ1™Jšœžœ ˜BJšœ0žœ!˜TJšœŸ&˜5J˜J˜—šœ˜Jšœž™Jšœžœ˜*Jšœžœ'˜8Jšœ žœžœ˜>Jšœžœ ˜BJšœ˜Jšœ)žœžœ˜JJšœžœ˜J˜J˜—Jšœžœ˜+˜J˜—š œž˜ Jšœ?žœ˜DJšžœ˜"Jšœžœ˜(Jšœ žœžœ˜Jšœžœ˜$Jšœ žœ˜JšœO˜OJšœ6˜6Jšœ!žœ ˜-Jšœ'˜'Jšœ,˜,šžœ žœž˜šžœžœ˜'Jšžœ žœžœ˜4šžœžœžœ˜:Jšœ6™6Jšœ˜Jšœžœžœžœ˜DJ˜—šžœ žœ˜(Jšžœ žœ5˜EJ˜6Jšœ!žœ˜'—Jšžœ˜J˜—Jšœ˜Jšžœ˜—Jšžœ žœ5˜EJšœ4˜4Jšœ#˜#J˜šœ˜J™>Jšœžœ˜Jšœ'˜'Jšœžœ$žœ˜MJšœ+žœ˜@Jšœ˜Jšœžœ˜šœ˜Jšœ<˜<—Jšœ?˜?Jšœ'žœ ˜3Jšœ.˜.J˜JšœŸ™Ÿšžœ˜Jšžœ/˜3šžœžœžœž˜!šžœ žœ˜(Jšœ$˜$Jšžœ˜—Jšœ"˜"Jšžœ˜——J˜Jšœˆ™ˆšžœ6žœ žœž˜Qšžœ"žœ˜*JšœB™Bšžœ4žœ˜