AssociativeCacheImpl.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Bertrand Serlet, October 17, 1985 4:22:55 pm PDT, stolen from CacheModelImpl.mesa
Last Edited by: Serlet, April 12, 1985 2:39:09 pm PST
Last Edited by: Barth, April 16, 1985 3:58:10 pm PST
Last Edited by: Sindhu, April 29, 1985 0:16:09 am PDT
Pradeep Sindhu November 23, 1985 1:26:58 am PST
DIRECTORY
AssociativeCache,
Basics USING [BITSHIFT],
CacheModels,
Convert,
DragOpsCross USING [Half, Word, ZerosWord],
DragOpsCrossUtils USING [AddDelta, HalfShift, HalfToCard, HalfXor, LowHalf, SingleWordShiftRight, WordToHalves],
IO, Rope;
AssociativeCacheImpl: CEDAR PROGRAM
IMPORTS Basics, Convert, DragOpsCrossUtils, IO, Rope
EXPORTS AssociativeCache
= BEGIN OPEN DragOpsCrossUtils;
Word: TYPE = DragOpsCross.Word;
Half: TYPE = DragOpsCross.Half;
ZerosWord: Word = DragOpsCross.ZerosWord;
Private types
Cache: TYPE = CacheModels.Cache;
maxQuadsPerLine: NAT = 64;
maxLines: NAT = 16*1024;
wordsPerPage: CARDINAL = CacheModels.wordsPerPage;
QuadIndex: TYPE = [0..maxQuadsPerLine);
PageEntry: TYPE = REF PageEntryRep;
PageEntryRep: TYPE = RECORD [
next: PageEntry ← NIL,
pageAddr: Word ← ZerosWord,
useCount: INT ← 0
];
QuadBitVector: TYPE = PACKED ARRAY QuadIndex OF BOOL;
None: QuadBitVector = ALL[FALSE];
HashEntry: TYPE = REF HashEntryRep;
HashEntryRep: TYPE = RECORD [
next: HashEntry ← NIL,
lineAddr: Word ← ZerosWord,
index: NAT ←,
chunkPresent: QuadBitVector ← None,
dirty: QuadBitVector ← None,
referenced: QuadBitVector ← None
];
CacheData: TYPE = REF CacheDataRep;
CacheDataRep: TYPE = RECORD [
stats: CacheStats ← [],
hashVector: HashVector ← NIL,
pageEntryCount: INT ← 0,
pageList: PageEntry ← NIL,
freePageList: PageEntry ← NIL,
victimIndex: CARDINAL ← 0,
lru: BOOLFALSE,
rover: NAT ← 0,
quadsPerLine: NAT ← 2,
wordsPerQuad: NAT ← 4,
logWordsPerQuad: NAT,
realCache: CacheModels.Cache ← NIL,
mapCache: CacheModels.Cache ← NIL,
lineTable: SEQUENCE linesInCache: NAT OF HashEntry
];
CacheStats: TYPE = RECORD [
probes: INT ← 0,
readProbes: INT ← 0,
writeProbes: INT ← 0,
writeMisses: INT ← 0,
writeClean: INT ← 0,
chunkMisses: INT ← 0,
lineMisses: INT ← 0,
jumpMisses: INT ← 0,
mapMisses: INT ← 0,
dirtyWrites: INT ← 0,
missCount: ARRAY [1..maxQuadsPerLine] OF INTALL[0]
];
HashLim: CARDINAL = 512;
HashVector: TYPE = REF HashVectorRep;
HashVectorRep: TYPE = ARRAY [0..HashLim) OF HashEntry;
LogBaseTwo: PROC [n: NAT] RETURNS [NAT] = {
IF n = 1 THEN RETURN[0] ELSE RETURN[1+LogBaseTwo[n/2]];
};
NewCache: PUBLIC PROC [lines: NAT ← 100, quadsPerLine: NAT ← 2, wordsPerQuad: NAT ← 4, lru: BOOLFALSE, realCache, mapCache: CacheModels.Cache ← NIL] RETURNS [cache: CacheModels.Cache] = {
Creates a new cache on the specified shared memory.
IF quadsPerLine > maxQuadsPerLine THEN quadsPerLine ← maxQuadsPerLine;
IF lines > maxLines THEN lines ← maxLines;
cache ← NEW[CacheModels.CacheRec ← [
private: NEW[CacheDataRep[lines] ← [quadsPerLine: quadsPerLine, wordsPerQuad: wordsPerQuad, logWordsPerQuad: LogBaseTwo[wordsPerQuad], lru: lru, realCache: realCache, mapCache: mapCache, lineTable: NULL]],
fetch: Fetch, store: Store, reset: Reset, flush: Flush, print: Print,
data: NIL]];
Reset[cache];
};
Reset: PUBLIC CacheModels.CacheResetProc -- [cache: CacheModels.Cache] -- = {
Resets the given cache to its initial state (all empty).
private: CacheData = NARROW[cache.private];
private.pageList ← NIL;
private.hashVector ← NEW[HashVectorRep ← ALL[NIL]];
private.victimIndex ← 0;
private.pageEntryCount ← 0;
FOR i: NAT IN [0..private.linesInCache) DO
private[i] ← NEW[HashEntryRep ← [index: i]];
ENDLOOP;
private.stats ← []; -- zero out statistics
IF private.realCache # NIL THEN private.realCache.reset[private.realCache];
IF private.mapCache # NIL THEN private.mapCache.reset[private.mapCache];
};
Flush: PUBLIC CacheModels.CacheFlushProc -- [cache: CacheModels.Cache] -- = {
Resets the given cache to its initial state (all empty).
private: CacheData = NARROW[cache.private];
private.pageList ← NIL;
private.hashVector^ ← ALL[NIL];
private.victimIndex ← 0;
private.pageEntryCount ← 0;
FOR i: NAT IN [0..private.linesInCache) DO
private[i]^ ← [index: i];
ENDLOOP;
IF private.realCache # NIL THEN private.realCache.flush[private.realCache];
IF private.mapCache # NIL THEN private.mapCache.flush[private.mapCache];
};
Print: PUBLIC CacheModels.CachePrintProc -- [cache: CacheModels.Cache, stream: STREAM, name: ROPE, resetStatistics: BOOL ← TRUE] -- = {
Realn: PROC [r: REAL, n: NAT] RETURNS [IO.Value] = {
RETURN [[rope[Convert.RopeFromReal[r, n]]]]};
private: CacheData = NARROW[cache.private];
quadMisses: INT ← private.stats.chunkMisses;
misses: INT ← quadMisses + private.stats.lineMisses;
probes: INT ← private.stats.probes;
rProbes: REAL ← probes;
rMisses: REAL ← misses;
stream.PutF[
"\nStats for %g (associative, lines: %g, quads/line: %g, words/quad: %g)\n probes: %g",
[rope[IF private.lru THEN Rope.Cat[name, " (pseudo-lru)"] ELSE name]],
[integer[private.linesInCache]], [integer[private.quadsPerLine]], [integer[private.wordsPerQuad]], [integer[probes]]];
IF probes = 0 THEN RETURN;
stream.PutF[
" (read: %g, write: %g)\n",
[integer[private.stats.readProbes]], [integer[private.stats.writeProbes]]];
stream.PutF[
" write misses: %g, writes to clean quads: %g\n",
[integer[private.stats.writeMisses]], [integer[private.stats.writeClean]]];
stream.PutF[
" misses: %g (%g (%g%%) existing line)\n",
[integer[misses]], [integer[private.stats.chunkMisses]],
IF misses = 0 THEN [rope["-"]] ELSE Realn[(quadMisses/rMisses)*100, 3]];
IF private.stats.jumpMisses # 0 THEN stream.PutF[
" misses caused by jumps: %g (%g%% of probes)\n",
[integer[private.stats.jumpMisses]], Realn[(private.stats.jumpMisses/rProbes)*100, 3]];
IF probes # 0 THEN stream.PutF[
" miss rate: %g%%\n",
Realn[(misses/rProbes)*100, 3]];
stream.PutF[
" map misses: %g (%g%%)\n dirty writes: %g\n",
[integer[private.stats.mapMisses]], Realn[(private.stats.mapMisses/rProbes)*100, 3], [integer[private.stats.dirtyWrites]]];
IF misses # 0 THEN
stream.PutF[" line occupancy:\n"];
FOR i: INT IN [1..private.quadsPerLine] DO
stream.PutF[" %g quad(s)/line: %g\n", [integer[i]], Realn[(private.stats.missCount[i]/rMisses)*100, 3]];
ENDLOOP;
};
Fetch: PUBLIC CacheModels.CacheFetchProc -- [cache: CacheModels.Cache, addr: DragOpsCross.Word, fromJump: BOOL ← FALSE] -- = {
Access[cache, addr, fromJump, FALSE];
};
Store: PUBLIC CacheModels.CacheStoreProc -- [cache: CacheModels.Cache, addr: DragOpsCross.Word] -- = {
Access[cache, addr, FALSE, TRUE];
};
Access: PROC [cache: Cache, addr: Word, fromJump, write: BOOL] = {
data: CacheData = NARROW[cache.private];
oldEntry: BOOLTRUE;
victim: CARDINAL ← data.victimIndex;
wordsPerLine: NAT = data.quadsPerLine * data.wordsPerQuad;
indexInLine: NAT = HalfToCard[LowHalf[addr]] MOD wordsPerLine;
chunk: QuadIndex = Basics.BITSHIFT[indexInLine, -data.logWordsPerQuad];
lineAddr: Word = AddDelta[-INT[indexInLine], addr];
hashIndex: CARDINAL;
hashEntry: HashEntry;
halfHash: Half ← HalfXor[WordToHalves[lineAddr][0], WordToHalves[lineAddr][1]];
data.stats.probes ← data.stats.probes + 1;
IF write THEN data.stats.writeProbes ← data.stats.writeProbes + 1 ELSE data.stats.readProbes ← data.stats.readProbes + 1;
halfHash ← HalfXor[halfHash, HalfShift[halfHash, -8]];
hashIndex ← HalfToCard[halfHash] MOD HashLim;
hashEntry ← data.hashVector[hashIndex];
Check if the entry for addr is in our hash table
WHILE hashEntry # NIL DO
IF hashEntry.lineAddr = lineAddr THEN {
IF data.lru THEN hashEntry.referenced[chunk] ← TRUE;
IF write THEN {
IF NOT hashEntry.dirty[chunk] THEN data.stats.writeClean ← data.stats.writeClean + 1;
hashEntry.dirty[chunk] ← TRUE;
};
IF ~hashEntry.chunkPresent[chunk] THEN {
nPresentBits: NAT ← 0;
IF data.realCache # NIL THEN
data.realCache.fetch[data.realCache, addr];
IF write THEN data.stats.writeMisses ← data.stats.writeMisses + 1;
IF fromJump THEN data.stats.jumpMisses ← data.stats.jumpMisses + 1;
data.stats.chunkMisses ← data.stats.chunkMisses + 1;
hashEntry.chunkPresent[chunk] ← TRUE;
Find out how many present bits are set, and update stats
FOR i: INT IN [0..data.quadsPerLine) DO
IF hashEntry.chunkPresent[i] THEN nPresentBits ← nPresentBits+1;
ENDLOOP;
data.stats.missCount[nPresentBits] ← data.stats.missCount[nPresentBits]+1;
};
RETURN;
};
hashEntry ← hashEntry.next;
ENDLOOP;
Entry for addr is not in the hash table; update miss statistics
IF fromJump THEN data.stats.jumpMisses ← data.stats.jumpMisses + 1;
data.stats.lineMisses ← data.stats.lineMisses + 1;
IF write THEN data.stats.writeMisses ← data.stats.writeMisses + 1;
data.stats.missCount[1] ← data.stats.missCount[1]+1;
{
Check for map miss
indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage;
pageAddr: Word = AddDelta[-INT[indexInPage], lineAddr];
pageId: Word ← SingleWordShiftRight[pageAddr, CacheModels.logWordsPerPage];
FOR pageEntry: PageEntry ← data.pageList, pageEntry.next WHILE pageEntry # NIL DO
IF pageEntry.pageAddr = pageAddr THEN GOTO Found;
ENDLOOP;
data.stats.mapMisses ← data.stats.mapMisses + 1;
IF data.mapCache # NIL THEN data.mapCache.fetch[data.mapCache, pageId];
EXITS Found => NULL;
};
{
Find victim; just use current victim pointer if psuedo-lru not requested, otherwise find the first entry with all reference bits cleared
hashEntry ← data.lineTable[victim];
IF data.lru THEN {
WHILE hashEntry.referenced # None DO
hashEntry.referenced ← None;
victim ← victim+1;
IF victim = data.linesInCache THEN victim ← 0;
hashEntry ← data.lineTable[victim];
ENDLOOP;
data.victimIndex ← victim;
};
};
{
The victim must be removed from the hash table and page table.
lag: PageEntry ← NIL;
oldLineAddr: Word = hashEntry.lineAddr;
oldIndexInPage: CARDINAL = HalfToCard[LowHalf[oldLineAddr]] MOD wordsPerPage;
oldPageAddr: Word = AddDelta[-INT[oldIndexInPage], oldLineAddr];
headHashEntry: HashEntry;
oldHashIndex: CARDINAL;
oldHalfHash: Half ← HalfXor[
WordToHalves[oldLineAddr][0], WordToHalves[oldLineAddr][1]];
oldHalfHash ← HalfXor[oldHalfHash, HalfShift[oldHalfHash, -8]];
oldHashIndex ← HalfToCard[oldHalfHash] MOD HashLim;
headHashEntry ← data.hashVector[oldHashIndex];
Maintain the hash table by removing the victim from the table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
IF headHashEntry = hashEntry
THEN data.hashVector[oldHashIndex] ← hashEntry.next
ELSE WHILE headHashEntry # NIL DO
IF hashEntry = headHashEntry.next THEN {
headHashEntry.next ← hashEntry.next;
EXIT};
headHashEntry ← headHashEntry.next
ENDLOOP;
Now we need to maintain the page table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
FOR pageEntry: PageEntry ← data.pageList, pageEntry.next WHILE pageEntry # NIL DO
IF pageEntry.pageAddr = oldPageAddr THEN {
Decrement the use count for this page (if an entry already exists)
IF (pageEntry.useCount ← pageEntry.useCount - 1) <= 0 THEN {
Remove this page entry from the list and put it on the free page list.
IF lag = NIL THEN data.pageList ← pageEntry.next ELSE lag.next ← pageEntry.next;
data.pageEntryCount ← data.pageEntryCount - 1;
pageEntry.next ← data.freePageList;
data.freePageList ← pageEntry;
};
EXIT
};
lag ← pageEntry;
ENDLOOP;
IF hashEntry.dirty # None THEN {
FOR i: NAT IN [0..data.quadsPerLine) DO
IF hashEntry.dirty[i] THEN {
IF data.realCache # NIL THEN
data.realCache.store[data.realCache, AddDelta[i, oldLineAddr]];
data.stats.dirtyWrites ← data.stats.dirtyWrites + 1;
};
ENDLOOP;
hashEntry.dirty ← None;
};
};
At this point we need to read in the quad word from the memory.
{
indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage;
pageAddr: Word = AddDelta[-INT[indexInPage], lineAddr];
pageEntry: PageEntry ← data.pageList;
Maintain the hash table
hashEntry.next ← data.hashVector[hashIndex];
data.hashVector[hashIndex] ← hashEntry;
hashEntry.lineAddr ← lineAddr;
hashEntry.chunkPresent ← None; hashEntry.chunkPresent[chunk] ← TRUE;
IF data.realCache # NIL THEN
data.realCache.fetch[data.realCache, addr];
WHILE pageEntry # NIL DO
IF pageEntry.pageAddr = pageAddr THEN {
Increment the use count for this page (if an entry already exists). Then return.
pageEntry.useCount ← pageEntry.useCount + 1;
GO TO oldEntry;
};
pageEntry ← pageEntry.next;
ENDLOOP;
This entry is brand new, so add it to the list and bump the reject cycles to show that we got a map miss. Note that at this point pageEntry = NIL.
data.pageEntryCount ← data.pageEntryCount + 1;
pageEntry ← data.freePageList;
IF pageEntry = NIL
THEN pageEntry ← NEW[PageEntryRep]
ELSE data.freePageList ← pageEntry.next;
pageEntry^ ← [next: data.pageList, pageAddr: pageAddr, useCount: 1];
data.pageList ← pageEntry;
EXITS oldEntry => NULL;
};
IF write THEN {
IF NOT hashEntry.dirty[chunk] THEN data.stats.writeClean ← data.stats.writeClean + 1;
hashEntry.dirty[chunk] ← TRUE;
};
At this point we have to advance the victim pointer, since in this newly retrieved entry clearly should not be the new victim.
victim ← victim + 1;
data.victimIndex ← IF victim = data.linesInCache THEN 0 ELSE victim;
};
END.
ChangeLog:
November 23, 1985 1:25:13 am PST: Introduced Psuedo LRU victim selection, and got rid of the advance on hit method.