LizardCacheImpl.mesa
Copyright © 1984, 1985, 1986 by Xerox Corporation. All rights reserved.
Russ Atkinson (RRA) April 21, 1987 3:25:32 pm PDT
DIRECTORY
DragOpsCross USING [Half, KernalLimit, TrapIndex, Word, wordsPerPage, ZerosWord],
DragOpsCrossUtils USING [AddDelta, HalfShift, HalfToCard, HalfXor, LowHalf, WordToCard, WordToHalves],
LizardCache USING [CacheBase, CacheBaseRep, CacheFetchProc, CacheStoreProc, SharedBase, SharedBaseRep],
SparseMemory USING [Base, FetchPage, Page];
LizardCacheImpl:
CEDAR
PROGRAM
IMPORTS DragOpsCrossUtils, SparseMemory
EXPORTS LizardCache
= BEGIN OPEN DragOpsCross, DragOpsCrossUtils, LizardCache;
Private types
PageEntry: TYPE = REF PageEntryRep;
PageEntryRep:
TYPE =
RECORD [
next: PageEntry ← NIL,
pageAddr: Word ← ZerosWord,
useCount: INT ← 0,
readOnly: BOOL ← FALSE
];
HashEntry: TYPE = REF HashEntryRep;
HashEntryRep:
TYPE =
RECORD [
next: HashEntry ← NIL,
lineAddr: Word ← ZerosWord,
words: ARRAY [0..wordsPerLine) OF Word ← ALL[ZerosWord],
index: NAT ← 0,
dirty, readOnly, shared: BOOL ← FALSE
];
CacheData: TYPE = REF CacheDataRep;
CacheDataRep:
TYPE =
RECORD [
hashVector: HashVector ← NIL,
pageEntryCount: INT ← 0,
pageList: PageEntry ← NIL,
freePageList: PageEntry ← NIL,
victimIndex: CARDINAL ← 0,
lineTable: SEQUENCE linesInCache: NAT OF HashEntry
];
HashVector: TYPE = REF HashVectorRep;
HashVectorRep: TYPE = ARRAY [0..HashLim) OF HashEntry;
HashLim: CARDINAL = 512;
NoTrap: DragOpsCross.TrapIndex = ALUCondFalse;
wordsPerLine: NAT = 4;
wordsPerPage: CARDINAL = DragOpsCross.wordsPerPage;
Cost (in reject cycles) for various operations; these are all estimates
cyclesToArbitrate: NAT ← 3;
cyclesToReadFirst:
NAT ← 4;
cyclesToReadRest: NAT ← 3;
cyclesToWriteQuad: NAT ← 6;
cyclesToReadMap: NAT ← 4;
NewBase:
PUBLIC
PROC [mem: SparseMemory.Base]
RETURNS [SharedBase] = {
Creates a new cache on the specified sparse memory.
RETURN [NEW[SharedBaseRep ← [mem, 0]]];
};
NewCache:
PUBLIC
PROC [shared: SharedBase, lines: [0..4096) ← 0]
RETURNS [CacheBase] = {
Creates a new cache on the specified shared memory.
base: CacheBase ← NEW[CacheBaseRep ← [NIL, NIL, LocalFetch, LocalStore, NIL, []]];
private: CacheData ← NEW[CacheDataRep[IF lines = 0 THEN 64 ELSE lines]];
base.private ← private;
base.sharedBase ← shared;
ResetCache[base];
RETURN [base];
};
ResetCache:
PUBLIC
PROC [cache: CacheBase] = {
Resets the given cache to its initial state (all empty).
private: CacheData = NARROW[cache.private];
private.pageList ← NIL;
private.hashVector ← NEW[HashVectorRep ← ALL[NIL]];
private.victimIndex ← 0;
private.pageEntryCount ← 0;
cache.sharedBase.busyUntil ← 0;
FOR i:
NAT
IN [0..private.linesInCache)
DO
private[i] ← NEW[HashEntryRep ← [index: i]];
ENDLOOP;
cache.stats ← [];
};
FlushCache:
PUBLIC
PROC [cache: CacheBase] = {
Makes the underlying memory agree with the current entry. No stats update.
data: CacheData = NARROW[cache.private];
FOR i:
NAT
IN [0..data.linesInCache)
DO
line: HashEntry = data[i];
IF line.dirty
THEN {
mem: SparseMemory.Base = cache.sharedBase.mem;
page: SparseMemory.Page = SparseMemory.FetchPage[mem, line.lineAddr];
indexInPage: CARDINAL = HalfToCard[LowHalf[line.lineAddr]] MOD wordsPerPage;
FOR i: [0..wordsPerLine)
IN [0..wordsPerLine)
DO
page.words[indexInPage+i] ← line.words[i];
ENDLOOP;
};
ENDLOOP;
};
LocalFetch: CacheFetchProc = {
[base: CacheBase, addr: Word, cycle: INT, userMode: BOOL, noEffect: BOOL ← FALSE]
RETURNS [data: Word, status: TrapIndex, rejectCycles: INT];
indexInLine: [0..wordsPerLine) = HalfToCard[LowHalf[addr]] MOD wordsPerLine;
hashEntry: HashEntry;
[hashEntry, rejectCycles] ← Access[base, AddDelta[-INT[indexInLine], addr], cycle];
IF hashEntry =
NIL
THEN
RETURN [ZerosWord, MapFault, rejectCycles];
RETURN [hashEntry.words[indexInLine], NoTrap, rejectCycles];
};
LocalStore: CacheStoreProc = {
[base: CacheBase, addr: Word, data: Word, cycle: INT, userMode: BOOL]
RETURNS [old: Word, status: TrapIndex, rejectCycles: INT];
indexInLine: [0..wordsPerLine) = HalfToCard[LowHalf[addr]] MOD wordsPerLine;
hashEntry: HashEntry;
[hashEntry, rejectCycles] ← Access[base, AddDelta[-INT[indexInLine], addr], cycle];
IF hashEntry =
NIL
THEN
RETURN [ZerosWord, MapFault, rejectCycles];
IF userMode
AND WordToCard[addr] < KernalLimit
THEN
RETURN [ZerosWord, MemAccessFault, rejectCycles];
old ← hashEntry.words[indexInLine];
IF hashEntry.readOnly THEN RETURN [old, MemAccessFault, rejectCycles];
hashEntry.words[indexInLine] ← data;
hashEntry.dirty ← TRUE;
status ← NoTrap;
};
Method: TYPE = {advanceOnMiss, shiftOnHit};
method: Method ← shiftOnHit;
Access:
PROC
[cache: CacheBase, lineAddr: Word, cycles:
INT]
RETURNS [hashEntry: HashEntry, rejectCycles:
INT ← 0] = {
BumpReject:
PROC [n:
CARDINAL] =
INLINE {
rejectCycles ← rejectCycles + n;
busyUntil ← busyUntil + n;
cache.sharedBase.busyUntil ← busyUntil;
};
data: CacheData = NARROW[cache.private];
oldEntry: BOOL ← TRUE;
victim: CARDINAL ← data.victimIndex;
busyUntil: INT;
hashIndex: CARDINAL;
halfHash: Half ← HalfXor[WordToHalves[lineAddr][0], WordToHalves[lineAddr][1]];
halfHash ← HalfXor[halfHash, HalfShift[halfHash, -8]];
hashIndex ← HalfToCard[halfHash] MOD HashLim;
hashEntry ← data.hashVector[hashIndex];
cache.stats.probes ← cache.stats.probes + 1;
WHILE hashEntry #
NIL
DO
IF hashEntry.lineAddr = lineAddr
THEN {
IF hashEntry.index = victim
AND method = shiftOnHit
THEN {
We make the victim index point at the next cache entry
victim ← victim + 1;
data.victimIndex ← IF victim = data.linesInCache THEN 0 ELSE victim;
};
RETURN;
};
hashEntry ← hashEntry.next;
ENDLOOP;
busyUntil ← cache.sharedBase.busyUntil;
cache.stats.misses ← cache.stats.misses + 1;
hashEntry ← data.lineTable[victim];
IF busyUntil > cycles
THEN {
Allow for bus to become available. Note that we should NOT bump busyUntil, since we are really just trying to get the cycle counter up to busyUntil.
rejectCycles ← busyUntil - cycles;
IF rejectCycles > 10000
THEN rejectCycles ← 10000;
Just in case things get weird
};
BumpReject[cyclesToArbitrate]; -- allow for bus arbitration cost
{
The victim must be removed from the hash table and page table.
lag: PageEntry ← NIL;
oldLineAddr: Word = hashEntry.lineAddr;
oldIndexInPage: CARDINAL = HalfToCard[LowHalf[oldLineAddr]] MOD wordsPerPage;
oldPageAddr: Word = AddDelta[-INT[oldIndexInPage], oldLineAddr];
IF oldEntry
THEN {
headHashEntry: HashEntry;
oldHashIndex: CARDINAL;
oldHalfHash: Half ← HalfXor[
WordToHalves[oldLineAddr][0], WordToHalves[oldLineAddr][1]];
oldHalfHash ← HalfXor[oldHalfHash, HalfShift[oldHalfHash, -8]];
oldHashIndex ← HalfToCard[oldHalfHash] MOD HashLim;
headHashEntry ← data.hashVector[oldHashIndex];
Maintain the hash table by removing the victim from the table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
IF headHashEntry = hashEntry
THEN data.hashVector[oldHashIndex] ← hashEntry.next
ELSE
WHILE headHashEntry #
NIL
DO
IF hashEntry = headHashEntry.next
THEN {
headHashEntry.next ← hashEntry.next;
EXIT};
headHashEntry ← headHashEntry.next
ENDLOOP;
Now we need to maintain the page table. We must be prepared for the entry to not be in the hash table at all if the entry is brand new.
FOR pageEntry: PageEntry ← data.pageList, pageEntry.next
WHILE pageEntry #
NIL
DO
IF pageEntry.pageAddr = oldPageAddr
THEN {
Decrement the use count for this page (if an entry already exists)
IF (pageEntry.useCount ← pageEntry.useCount - 1) <= 0
THEN {
Remove this page entry from the list and put it on the free page list.
IF lag = NIL THEN data.pageList ← pageEntry.next ELSE lag.next ← pageEntry.next;
data.pageEntryCount ← data.pageEntryCount - 1;
pageEntry.next ← data.freePageList;
data.freePageList ← pageEntry;
};
EXIT
};
lag ← pageEntry;
ENDLOOP;
};
IF hashEntry.dirty
THEN {
The victim is dirty, so we need to write it back to its home page.
mem: SparseMemory.Base = cache.sharedBase.mem;
oldPage: SparseMemory.Page = SparseMemory.FetchPage[mem, oldPageAddr];
cache.stats.dirtyWrites ← cache.stats.dirtyWrites + 1;
FOR i: [0..wordsPerLine)
IN [0..wordsPerLine)
DO
oldPage.words[oldIndexInPage+i] ← hashEntry.words[i];
ENDLOOP;
BumpReject[cyclesToWriteQuad];
hashEntry.dirty ← FALSE;
};
};
At this point we need to read in the quad word from the memory.
{
indexInPage: CARDINAL = HalfToCard[LowHalf[lineAddr]] MOD wordsPerPage;
pageAddr: Word = AddDelta[-INT[indexInPage], lineAddr];
mem: SparseMemory.Base = cache.sharedBase.mem;
page: SparseMemory.Page = SparseMemory.FetchPage[mem, lineAddr];
pageEntry: PageEntry ← data.pageList;
IF page = NIL THEN RETURN [NIL, 3];
Maintain the hash table
hashEntry.next ← data.hashVector[hashIndex];
data.hashVector[hashIndex] ← hashEntry;
BumpReject[cyclesToReadFirst];
FOR i: [0..wordsPerLine)
IN [0..wordsPerLine)
DO
hashEntry.words[i] ← page.words[indexInPage+i];
ENDLOOP;
hashEntry.lineAddr ← lineAddr;
WHILE pageEntry #
NIL
DO
IF pageEntry.pageAddr = pageAddr
THEN {
Increment the use count for this page (if an entry already exists). Then return.
pageEntry.useCount ← pageEntry.useCount + 1;
GO TO oldEntry;
};
pageEntry ← pageEntry.next;
ENDLOOP;
This entry is brand new, so add it to the list and bump the reject cycles to show that we got a map miss. Note that at this point pageEntry = NIL.
data.pageEntryCount ← data.pageEntryCount + 1;
cache.stats.mapMisses ← cache.stats.mapMisses + 1;
pageEntry ← data.freePageList;
IF pageEntry =
NIL
THEN pageEntry ← NEW[PageEntryRep]
ELSE data.freePageList ← pageEntry.next;
pageEntry^ ← [next: data.pageList, pageAddr: pageAddr, useCount: 1, readOnly: FALSE];
data.pageList ← pageEntry;
BumpReject[cyclesToReadMap];
EXITS oldEntry => {};
};
Finally, show that the bus is tied up until the read is finished.
cache.sharedBase.busyUntil ← busyUntil + cyclesToReadRest;
At this point we have to advance the victim pointer, since in either method this newly retrieved entry clearly should not be the new victim.
victim ← victim + 1;
data.victimIndex ← IF victim = data.linesInCache THEN 0 ELSE victim;
cache.stats.rejectCycles ← cache.stats.rejectCycles + rejectCycles;
END.