VMReplacementImpl.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Levin on January 20, 1984 4:03 pm
Bob Hagmann on May 4, 1984 12:03:43 pm PDT
Russ Atkinson, January 30, 1985 10:29:04 pm PST
DIRECTORY
PrincOps USING [flagsVacant, flagsClean, Port],
PrincOpsUtils USING [DisableInterrupts, EnableInterrupts, GetReturnLink, SetReturnLink],
VMEmergency USING [EmergencyList],
VMInternal USING [AgeInternal, Crash, DataState, freeList, freePages, GetVMMap, InOut, lastRealPage, PageNumber, PageStateFromFlags, RealPageNumber, rmMap, RMMapEntry, SetVMMap, Victim, VMMapEntry, vmStateLock],
VMStatistics USING [pinnedPages];
VMReplacementImpl:
MONITOR
LOCKS VMInternal.vmStateLock
IMPORTS PrincOpsUtils, VMInternal, VMStatistics
EXPORTS VMEmergency, VMInternal, VMStatistics
SHARES VMInternal = BEGIN OPEN VMInternal;
This module is logically part of VMStateImpl, but merging the two of them causes the compiler to break!
Exports to VMStatistics
rmReclamations: PUBLIC INT ← 0;
rmFreeList, rmOldClean, rmNewClean, rmDirty: PUBLIC INT ← 0;
rmAllocPasses: PUBLIC INT ← 0;
Exports to VMInternal
Real Memory Allocation
allocationRover: RealPageNumber ← RealPageNumber.
FIRST;
rover for AllocateRealMemoryInternal
AllocateRealMemoryInternal:
PUBLIC PROC [
vmPage: PageNumber, dirtyVictimOK: BOOL ← TRUE, pin: BOOL ← FALSE]
RETURNS [victim: Victim] ← LOOPHOLE[@AwaitAllocateRealMemoryInternal];
This procedure allocates real memory and tentatively assigns it to the specified vmPage. Actually, this is one half of the coroutine linkage whose other half is AwaitAllocateRealMemoryInternal. The actual algorithm is in InitializeAllocateRealMemoryInternal.
INTERNAL procedures
AwaitAllocateRealMemoryInternal:
PORT [victim: Victim]
RETURNS [vmPage: PageNumber, dirtyVictimOK, pin: BOOL];
InitializeAllocateRealMemoryInternal:
PROC RETURNS [victim: Victim] = {
LOOPHOLE[AwaitAllocateRealMemoryInternal, PrincOps.Port].dest ← PrincOpsUtils.GetReturnLink[];
DO
vmPage: PageNumber;
dirtyVictimOK, pin: BOOL;
[vmPage, dirtyVictimOK, pin] ← AwaitAllocateRealMemoryInternal[victim];
The following is to permit the debugger to trace the stack.
PrincOpsUtils.SetReturnLink[LOOPHOLE[AwaitAllocateRealMemoryInternal, PrincOps.Port].dest];
BEGIN
This block is the body of the actual allocation algorithm.
targetVMEntry: VMMapEntry ← GetVMMap[vmPage];
WITH tVM: targetVMEntry
SELECT InOut[targetVMEntry]
FROM
out => {
IF freePages > 0
THEN {
There is something on the free list.
victim ← [realPage: freeList, body: clean[]];
WITH rmMap[freeList]
SELECT
FROM
rmE: free RMMapEntry => {freeList ← rmE.next; freePages ← freePages.PRED};
ENDCASE => Crash[]; -- free list trashed
--*stats*-- rmFreeList ← rmFreeList.SUCC;
}
ELSE {
The free list is empty; reclamation is necessary.
current: RealPageNumber ← allocationRover;
firstPass: BOOL ← TRUE;
secondPassWorthWhile: BOOL ← FALSE;
victimP: PageNumber;
dirtyVictimState: {none, unreferenced, referenced} ← none;
dirtyVictimRP: RealPageNumber;
dirtyVictimDataState: DataState;
DO
IF current = RealPageNumber.
FIRST
THEN {
current ← lastRealPage;
--*stats*-- rmAllocPasses ← rmAllocPasses.SUCC;
}
ELSE current ← current.PRED;
IF current = allocationRover
THEN
-- a pass has completed
SELECT
TRUE
FROM
firstPass AND secondPassWorthWhile => firstPass ← FALSE;
dirtyVictimOK =>
IF dirtyVictimState ~= none
THEN {
--*stats*-- rmDirty ← rmDirty.SUCC;
WITH rmMap[dirtyVictimRP]
SELECT
FROM
rmE: reclaimable RMMapEntry => {
victimP ← rmE.virtual;
victim ← [realPage: dirtyVictimRP, body: dirty[vmPage: victimP]];
SetVMMap[victimP, [
state: VMInternal.PageStateFromFlags[PrincOps.flagsVacant],
body: out[
checkedOut: FALSE,
readOnly: GetVMMap[victimP].state.flags.readonly,
dataState: dirtyVictimDataState]]
];
};
ENDCASE => Crash[]; -- dirtyVictimRP improperly set
EXIT
}
ELSE Crash[]; -- all memory pinned or in transit
emergencyList =
NIL => Crash[];
Frame fault and nothing in the emergency list
ENDCASE => {
Frame fault and no unpinned clean memory, but there is something in the emergency list. The idea is to force it to be a clean victim, which will then get snarfed up by the allocator.
FOR i:
NAT
IN [0..emergencyList.max)
DO
vp: PageNumber = emergencyList.pages[i];
IF vp # 0
THEN {
entry: VMMapEntry ← GetVMMap[vp];
WITH tE: entry
SELECT InOut[entry]
FROM
in => {
Force the real page to be unpinned, and the virtual page to be clean. Also decrement the available count.
current ← tE.real;
IF rmMap[current].rmState = pinned
THEN
VMStatistics.pinnedPages ← VMStatistics.pinnedPages - 1;
rmMap[current] ← [
dataState: undefined,
needsBackingStoreWrite: FALSE,
body: reclaimable[virtual: vp]];
tE.state.flags ← PrincOps.flagsClean;
SetVMMap[vp, entry];
GO TO foundOne;
};
ENDCASE;
};
ENDLOOP;
Crash[];
EXITS foundOne => {
emergencyList.change ← emergencyList.change - 1;
};
};
WITH rmMap[current]
SELECT
FROM
rmE: free RMMapEntry => Crash[]; -- the free list is supposed to be empty
rmE: reclaimable RMMapEntry => {
victimE: VMMapEntry;
We must disable interrupts to make sure that the dirty bit doesn't get lost during the following machinations.
PrincOpsUtils.DisableInterrupts[];
victimE ← GetVMMap[victimP ← rmE.virtual];
A reclaimable rmMap entry will have an "out VMMapEntry" if it has previously been allocated by this procedure but I/O is still in progress at a higher level. (It will also be checked out.) Such entries are therefore not considered as victims, just like quiescent, swapped out pages.
WITH vE: victimE
SELECT InOut[victimE]
FROM
out => NULL;
in => {
vRefed: BOOL = vE.state.flags.referenced;
vDirty: BOOL = vE.state.flags.dirty OR rmE.needsBackingStoreWrite;
SELECT
TRUE
FROM
vE.real ~= current => Crash[];
vRefed
AND firstPass
AND ~vDirty => {
On the first pass, we convert referenced pages to unreferenced pages, but don't reclaim them.
AgeInternal[victimP, vE];
secondPassWorthWhile ← TRUE;
};
vDirty => {
IF vRefed THEN AgeInternal[victimP, vE];
We remember the first unreferenced dirty page or, if none, the first referenced dirty page. Of course, an unreferenced page may be referenced by the time (and if) it actually becomes a victim.
SELECT dirtyVictimState
FROM
none => {
dirtyVictimState ←
IF vRefed THEN referenced ELSE unreferenced;
dirtyVictimRP ← current;
dirtyVictimDataState ←
IF vE.state.flags.dirty THEN changed ELSE rmE.dataState;
};
unreferenced => NULL;
referenced =>
IF ~vRefed
THEN {
dirtyVictimState ← unreferenced;
dirtyVictimRP ← current;
dirtyVictimDataState ←
IF vE.state.flags.dirty THEN changed ELSE rmE.dataState;
};
ENDCASE;
};
ENDCASE => {
This page is clean and, if this is the first pass, it is unreferenced. We've found our victim.
victim ← [realPage: current, body: clean[]];
SetVMMap[victimP,
[state: VMInternal.PageStateFromFlags[PrincOps.flagsVacant], body: out[
checkedOut: FALSE,
readOnly: vE.state.flags.readonly,
dataState: rmE.dataState]]
];
PrincOpsUtils.EnableInterrupts[];
--*stats*--
IF firstPass THEN rmOldClean ← rmOldClean.SUCC
ELSE rmNewClean ← rmNewClean.SUCC;
EXIT
};
};
ENDCASE;
PrincOpsUtils.EnableInterrupts[];
};
rmE: pinned RMMapEntry => NULL;
ENDCASE;
ENDLOOP;
allocationRover ← current; -- advance round-robin pointer
};
rmMap[victim.realPage] ←
IF pin
THEN
RMMapEntry[dataState: tVM.dataState, needsBackingStoreWrite:
FALSE,
body: pinned[pinCount: 1]]
ELSE
RMMapEntry[dataState: tVM.dataState, needsBackingStoreWrite:
FALSE,
body: reclaimable[virtual: vmPage]];
--*stats*-- rmReclamations ← rmReclamations.SUCC;
};
in => Crash[]; -- already has real memory
ENDCASE;
END;
ENDLOOP;
};
Emergency pages for frame allocation
emergencyList: PUBLIC VMEmergency.EmergencyList ← NIL;
Initialization
[] ← InitializeAllocateRealMemoryInternal[];
END.