VMBackingImpl.mesa
Copyright © 1985 by Xerox Corporation. All rights reserved.
Levin on September 20, 1983 12:37 pm
Birrell, July 27, 1983 5:43 pm
Russ Atkinson (RRA) January 30, 1985 10:02:56 pm PST
DIRECTORY
DebuggerFormat USING [LabelChecksum, Run, VMRunTable],
DebuggerSwap USING [NoteVMRunTable],
Disk USING [Add, Channel, DriveAttributes, DoIO, Label, labelCheck, ok, PageNumber, Request, SameDrive, Status],
VM USING [AddressForPageNumber, lowCore],
VMBacking USING [Run, RunTable, RunTableIndex, RunTableObject, RunTablePageNumber],
VMInternal USING [Crash, Interval, IODirection, IOResult, PageCount, PageNumber, UpdateVMLimit];
VMBackingImpl: MONITOR
IMPORTS DebuggerFormat, DebuggerSwap, Disk, VM, VMInternal
EXPORTS VMBacking, VMInternal = BEGIN OPEN VMInternal, VMBacking;
Global variables protected by the monitor
backingLabel: Disk.Label;
firstBackingDataPage: RunTablePageNumber;
backingRunTable: LONG POINTER TO RunTableObject ← NIL;
debuggerRunTable: LONG POINTER TO DebuggerFormat.VMRunTable ← NIL;
Exports to VMBacking
AttachBackingStorage: PUBLIC ENTRY UNSAFE PROC [
label: Disk.Label, firstDataPage: RunTablePageNumber, runTable: RunTable] = {
Eventually, a more general mechanism may be necessary to permit mapped files. For now, we support only a single run.
IF backingRunTable ~= NIL THEN Crash[];
backingLabel ← label;
firstBackingDataPage ← firstDataPage;
runTable.nRuns is the number of extents, but (see comment in interface) the run table is assumed to contain runTable.nRuns+1 entries.
backingRunTable ← VM.lowCore.NEW[RunTableObject[runTable.nRuns+1]];
backingRunTable.nDataPages ← runTable.nDataPages;
backingRunTable.nRuns ← runTable.nRuns;
FOR i: RunTableIndex IN [RunTableIndex.FIRST..runTable.nRuns] DO
backingRunTable.runs[i] ← runTable.runs[i];
ENDLOOP;
debuggerRunTable ← VM.lowCore.NEW[DebuggerFormat.VMRunTable[runTable.nRuns]];
debuggerRunTable.nRuns ← runTable.nRuns;
FOR j: RunTableIndex IN [RunTableIndex.FIRST..runTable.nRuns) DO
IF firstDataPage IN [runTable.runs[j].filePage..runTable.runs[j+1].filePage) THEN {
FOR i: RunTableIndex IN [j..runTable.nRuns) DO
debuggerLabel: Disk.Label ← label;
filePage: RunTablePageNumber = MAX[runTable.runs[i].filePage, firstDataPage];
debuggerLabel.filePage ← filePage;
debuggerRunTable.runs[i-j] ← [
page: filePage-firstDataPage,
count:
IF i = runTable.nRuns-1 THEN runTable.nDataPages-(filePage-firstDataPage)
ELSE runTable.runs[i+1].filePage-filePage,
deviceType: Disk.DriveAttributes[runTable.runs[i].channel].type,
deviceOrdinal: Disk.DriveAttributes[runTable.runs[i].channel].ordinal,
diskPage: [runTable.runs[i].diskPage+filePage-runTable.runs[i].filePage],
labelCheck: DebuggerFormat.LabelChecksum[debuggerLabel,0]
];
ENDLOOP;
EXIT
};
REPEAT
FINISHED => Crash[];
ENDLOOP;
DebuggerSwap.NoteVMRunTable[debuggerRunTable];
UpdateVMLimit[backingRunTable.nDataPages];
};
Exports to VMInternal
DoIO: PUBLIC UNSAFE PROC [
direction: IODirection, backingPage: PageNumber, interval: Interval, subsequentSeek: PageNumber]
RETURNS [result: IOResult, done: PageCount] = {
Note: The implementation assumes that the disk page size equals the map unit size. If this is not the case, code is required to read multiple disk pages per map unit, worry about discontinuities in the middle of map units, end-of-file in the middle of a map unit, and other funny cases. We'll write this when, and if, we need to.
request: Disk.Request;
channel: Disk.Channel;
diskPage: Disk.PageNumber;
label: Disk.Label;
runLength: PageCount;
status: Disk.Status;
[channel: channel, diskPage: diskPage, runLength: runLength] ←
MapToBackingStorage[backingPage, @label];
request ← [
diskPage: diskPage,
data: VM.AddressForPageNumber[interval.page],
command:
IF direction = read THEN [header: verify, label: verify, data: read]
ELSE [header: verify, label: verify, data: write],
count: MIN[runLength, interval.count]
];
IF INT[request.count] ~= interval.count THEN subsequentSeek ← interval.page + request.count;
IF subsequentSeek ~= 0 THEN {
seekChannel: Disk.Channel;
[channel: seekChannel, diskPage: diskPage] ← MapToBackingStorage[subsequentSeek];
IF Disk.SameDrive[channel, seekChannel] THEN request.seek ← diskPage;
};
Initiate the transfer and wait for it to complete
[status, done] ← Disk.DoIO[channel, @label, @request];
result ← SELECT status FROM
Disk.ok => ok,
Disk.labelCheck => labelCheck,
ENDCASE => someOtherError;
};
HasBackingStorage: PUBLIC ENTRY SAFE PROC [page: PageNumber]
RETURNS [BOOL] = TRUSTED {
RETURN[backingRunTable ~= NIL AND page < backingRunTable.nDataPages];
};
Internal Procedures
MapToBackingStorage: PROC [page: PageNumber, label: POINTER TO Disk.Label ← NIL]
RETURNS [channel: Disk.Channel, diskPage: Disk.PageNumber, runLength: CARDINAL] = {
IF HasBackingStorage[page] THEN {
filePage: RunTablePageNumber = firstBackingDataPage + page;
low: RunTableIndex ← 0;
high: RunTableIndex ← backingRunTable.nRuns; -- "end marker"
nearest: RunTableIndex;
nearestRun: Run;
UNTIL low > high DO
nearest ← (low + high) / 2;
SELECT backingRunTable.runs[nearest].filePage FROM
< filePage => low ← nearest + 1;
> filePage => high ← nearest - 1;
ENDCASE -- = filePage -- => EXIT;
REPEAT
FINISHED => nearest ← low - 1;
ENDLOOP;
nearestRun ← backingRunTable[nearest];
IF label ~= NIL THEN {
label^ ← backingLabel;
label.filePage ← filePage - firstBackingDataPage + label.filePage;
};
RETURN[
channel: nearestRun.channel,
diskPage: Disk.Add[nearestRun.diskPage, filePage - nearestRun.filePage],
runLength: backingRunTable[nearest+1].filePage - filePage
]
}
ELSE Crash[]; -- no backing storage
};
END.