CacheModelImpl.mesa
Copyright © 1984 by Xerox Corporation. All rights reserved.
Russ Atkinson, September 19, 1984 4:15:08 pm PDT
Last Edited by: Sweet, March 19, 1985 10:14:53 am PST
DIRECTORY
Basics USING [BITSHIFT],
DragOpsCross USING [Half, Word, ZerosWord],
DragOpsCrossUtils USING [AddDelta, HalfShift, HalfToCard, HalfXor, LowHalf, WordToHalves],
CacheModel
USING
[CacheBase, CacheBaseRep, CacheFetchProc, CacheStoreProc, wordsPerPage];
CacheModelImpl:
CEDAR
PROGRAM
IMPORTS Basics, DragOpsCrossUtils
EXPORTS CacheModel
= BEGIN OPEN DragOpsCross, DragOpsCrossUtils, CacheModel;
Word: TYPE = DragOpsCross.Word;
CacheBase: TYPE = REF CacheBaseRep;
CacheBaseRep: TYPE = RECORD [
sharedBase: SharedBase ← NIL, -- shared base data
private: REF ← NIL, -- private data to the cache implementation
fetch: CacheFetchProc ← NIL, -- this hook allows the user to intercept cache accesses
store: CacheStoreProc ← NIL, -- this hook allows the user to intercept cache accesses
data: REF ← NIL, -- private data for clients intercepting fetch & store
stats: CacheStats ← [] -- maintained by the default fetch and store routines
];
SharedBase: TYPE = REF SharedBaseRep;
SharedBaseRep: TYPE = RECORD [
mem: SparseMemory.Base ← NIL,
busyUntil: INT ← 0
];
CacheFetchProc: TYPE = PROC [base: CacheBase, addr: Word];
This is the type of routine that is used to fetch words from the cache.
CacheStoreProc: TYPE = PROC [base: CacheBase, addr: Word];
This is the type of routine that is used to store words into the cache.
CacheStats: TYPE = RECORD [
probes: INT ← 0,
chunkMisses: INT ← 0,
lineMisses: INT ← 0,
mapMisses: INT ← 0,
dirtyWrites: INT ← 0
];
Private types
wordsPerQuad: NAT = 4;
logWordsPerQuad: NAT = 2;
maxQuadsPerLine: NAT = 8;
wordsPerPage: CARDINAL = CacheModel.wordsPerPage;
QuadIndex: TYPE = [0..maxQuadsPerLine);
PageEntry: TYPE = REF PageEntryRep;
PageEntryRep:
TYPE =
RECORD [
next: PageEntry ← NIL,
pageAddr: Word ← ZerosWord,
useCount: INT ← 0
];
None:
PACKED ARRAY QuadIndex
OF
BOOL =
ALL[
FALSE];
HashEntry: TYPE = REF HashEntryRep;
HashEntryRep:
TYPE =
RECORD [
next: HashEntry ← NIL,
lineAddr: Word ← ZerosWord,
index: NAT ←,
chunkPresent: PACKED ARRAY QuadIndex OF BOOL ← None,
dirty: PACKED ARRAY QuadIndex OF BOOL ← None,
referenced: PACKED ARRAY QuadIndex OF BOOL ← None
];
CacheData: TYPE = REF CacheDataRep;
CacheDataRep:
TYPE =
RECORD [
hashVector: HashVector ← NIL,
pageEntryCount: INT ← 0,
pageList: PageEntry ← NIL,
freePageList: PageEntry ← NIL,
victimIndex: CARDINAL ← 0,
lru: BOOL ← FALSE,
rover: NAT ← 0,
quadsPerLine: NAT ← 2,
lineTable: SEQUENCE linesInCache: NAT OF HashEntry
];
HashLim: CARDINAL = 512;
HashVector: TYPE = REF HashVectorRep;
HashVectorRep: TYPE = ARRAY [0..HashLim) OF HashEntry;
NewCache:
PUBLIC
PROC [lines: [0..4096), quadsPerLine: [0..8), lru:
BOOL ←
FALSE]
RETURNS [CacheBase] = {
Creates a new cache on the specified shared memory.
base: CacheBase ← NEW[CacheBaseRep ← [NIL, LocalFetch, LocalStore, NIL, []]];
private: CacheData ←
NEW[CacheDataRep[lines] ← [quadsPerLine: quadsPerLine, lru: lru, lineTable: NULL]];
base.private ← private;
ResetCache[base];
RETURN [base];
};
ResetCache:
PUBLIC
PROC [cache: CacheBase] = {
Resets the given cache to its initial state (all empty).
private: CacheData = NARROW[cache.private];
private.pageList ← NIL;
private.hashVector ← NEW[HashVectorRep ← ALL[NIL]];
private.victimIndex ← 0;
private.pageEntryCount ← 0;
FOR i:
NAT
IN [0..private.linesInCache)
DO
private[i] ← NEW[HashEntryRep ← [index: i]];
ENDLOOP;
cache.stats ← []; -- zero out statistics
};
FlushCache:
PUBLIC
PROC [cache: CacheBase] = {
Resets the given cache to its initial state (all empty).
private: CacheData = NARROW[cache.private];
private.pageList ← NIL;
private.hashVector^ ← ALL[NIL];
private.victimIndex ← 0;
private.pageEntryCount ← 0;
FOR i:
NAT
IN [0..private.linesInCache)
DO
private[i]^ ← [index: i];
ENDLOOP;
};
LocalFetch: CacheFetchProc = {
[base: CacheBase, addr: Word];
private: CacheData = NARROW[base.private];
wordsPerLine: NAT = private.quadsPerLine * wordsPerQuad;
indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine;
chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -logWordsPerQuad];
entry: HashEntry ← Access[base, AddDelta[addr, -INT[indexInLine]], chunk, fromJump];
entry ← entry; -- to have a place to set a breakpoint
};
LocalStore: CacheStoreProc = {
[base: CacheBase, addr: Word];
private: CacheData = NARROW[base.private];
wordsPerLine: NAT = private.quadsPerLine * wordsPerQuad;
indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine;
chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -logWordsPerQuad];
hashEntry: HashEntry;
hashEntry ← Access[base, AddDelta[addr, -INT[indexInLine]], chunk, FALSE];
hashEntry.dirty[chunk] ← TRUE;
};
Method: TYPE = {advanceOnMiss, shiftOnHit};
method: Method ← shiftOnHit;
Access:
PROC
[cache: CacheBase, lineAddr: Word, chunk: QuadIndex, fromJump: BOOL]
RETURNS [hashEntry: HashEntry] = {
data: CacheData = NARROW[cache.private];
oldEntry: BOOL ← TRUE;
victim: CARDINAL ← data.victimIndex;
hashIndex: CARDINAL;
halfHash: Half ← HalfXor[WordToHalves[lineAddr][0], WordToHalves[lineAddr][1]];
halfHash ← HalfXor[halfHash, HalfShift[halfHash, -8]];
hashIndex ← HalfToCard[halfHash] MOD HashLim;
hashEntry ← data.hashVector[hashIndex];
cache.stats.probes ← cache.stats.probes + 1;
WHILE hashEntry #
NIL
DO
IF hashEntry.lineAddr = lineAddr
THEN {
IF data.lru THEN hashEntry.referenced[chunk] ← TRUE;
IF hashEntry.index = victim
AND method = shiftOnHit
THEN {
We make the victim index point at the next cache entry
victim ← victim + 1;
data.victimIndex ← IF victim = data.linesInCache THEN 0 ELSE victim;
};
IF ~hashEntry.chunkPresent[chunk]
THEN {
IF fromJump THEN cache.stats.jumpMisses ← cache.stats.jumpMisses + 1;
cache.stats.chunkMisses ← cache.stats.chunkMisses + 1;
hashEntry.chunkPresent[chunk] ← TRUE};
RETURN;
};
hashEntry ← hashEntry.next;
ENDLOOP;
IF fromJump THEN cache.stats.jumpMisses ← cache.stats.jumpMisses + 1;
cache.stats.lineMisses ← cache.stats.lineMisses + 1;
hashEntry ← data.lineTable[victim];
{
The victim must be removed from the hash table and page table.
lag: PageEntry ← NIL;
oldLineAddr: Word = hashEntry.lineAddr;
oldIndexInPage: CARDINAL = HalfToCard[LowHalf[oldLineAddr]] MOD wordsPerPage;
oldPageAddr: Word = AddDelta[oldLineAddr, -INT[oldIndexInPage]];
headHashEntry: HashEntry;
oldHashIndex: CARDINAL;
oldHalfHash: Half ← HalfXor[
WordToHalves[oldLineAddr][0], WordToHalves[oldLineAddr][1]];
oldHalfHash ← HalfXor[oldHalfHash, HalfShift[oldHalfHash, -8]];
oldHashIndex ← HalfToCard[oldHalfHash] MOD HashLim;
headHashEntry ← data.hashVector[oldHashIndex];
Maintain the hash table by removing the victim from the table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
IF headHashEntry = hashEntry
THEN data.hashVector[oldHashIndex] ← hashEntry.next
ELSE
WHILE headHashEntry #
NIL
DO
IF hashEntry = headHashEntry.next
THEN {
headHashEntry.next ← hashEntry.next;
EXIT};
headHashEntry ← headHashEntry.next
ENDLOOP;
Now we need to maintain the page table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
FOR pageEntry: PageEntry ← data.pageList, pageEntry.next
WHILE pageEntry #
NIL
DO
IF pageEntry.pageAddr = oldPageAddr
THEN {
Decrement the use count for this page (if an entry already exists)
IF (pageEntry.useCount ← pageEntry.useCount - 1) <= 0
THEN {
Remove this page entry from the list and put it on the free page list.
IF lag = NIL THEN data.pageList ← pageEntry.next ELSE lag.next ← pageEntry.next;
data.pageEntryCount ← data.pageEntryCount - 1;
pageEntry.next ← data.freePageList;
data.freePageList ← pageEntry;
};
EXIT
};
lag ← pageEntry;
ENDLOOP;
IF hashEntry.dirty # None
THEN {
FOR i:
NAT
IN [0..data.quadsPerLine)
DO
IF hashEntry.dirty[i] THEN cache.stats.dirtyWrites ← cache.stats.dirtyWrites + 1;
ENDLOOP;
hashEntry.dirty ← None;
};
};
At this point we need to read in the quad word from the memory.
{
indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage;
pageAddr: Word = AddDelta[lineAddr, -INT[indexInPage]];
pageEntry: PageEntry ← data.pageList;
Maintain the hash table
hashEntry.next ← data.hashVector[hashIndex];
data.hashVector[hashIndex] ← hashEntry;
hashEntry.lineAddr ← lineAddr;
hashEntry.chunkPresent ← None; hashEntry.chunkPresent[chunk] ← TRUE;
WHILE pageEntry #
NIL
DO
IF pageEntry.pageAddr = pageAddr
THEN {
Increment the use count for this page (if an entry already exists). Then return.
pageEntry.useCount ← pageEntry.useCount + 1;
GO TO oldEntry;
};
pageEntry ← pageEntry.next;
ENDLOOP;
This entry is brand new, so add it to the list and bump the reject cycles to show that we got a map miss. Note that at this point pageEntry = NIL.
data.pageEntryCount ← data.pageEntryCount + 1;
cache.stats.mapMisses ← cache.stats.mapMisses + 1;
pageEntry ← data.freePageList;
IF pageEntry =
NIL
THEN pageEntry ← NEW[PageEntryRep]
ELSE data.freePageList ← pageEntry.next;
pageEntry^ ← [next: data.pageList, pageAddr: pageAddr, useCount: 1];
data.pageList ← pageEntry;
EXITS oldEntry => NULL;
};
At this point we have to advance the victim pointer, since in either method this newly retrieved entry clearly should not be the new victim.
victim ← victim + 1;
data.victimIndex ← IF victim = data.linesInCache THEN 0 ELSE victim;
END.