BackupControlImpl.mesa
Carl Hauser, April 22, 1986 3:25:09 pm PST
DIRECTORY
AlpineEnvironment,
AlpineInternal,
AlpineLog,
AlpTransaction,
AlpDirectory,
AlpInstance,
BackupControl,
BackupBTree,
BackupLog,
Basics,
BasicTime,
CedarProcess,
CountedVM,
File,
FileLog,
FS,
IO,
LogBasic,
LogInline,
LogRep,
Process,
RefText,
RestartFile,
Rope,
TransactionMap,
Worker;
BackupControlImpl: CEDAR MONITOR
IMPORTS
AlpDirectory,
AlpTransaction,
AlpInstance,
BackupBTree,
BackupLog,
BasicTime,
CountedVM,
CedarProcess,
File,
LogBasic,
LogInline,
Process,
RefText,
RestartFile,
TransactionMap
EXPORTS
BackupControl
SHARES
FileLog
=
BEGIN
BackupVolume: TYPE ~ AlpineInternal.BackupVolume;
backupCycle: BackupControl.BackupCycle ← NIL;
backupCycle lists the volumes used by backup in order from oldest to newest. It is used to prompt the operator and to confirm his responses, in the normal case. It can be overridden by the operator in case of major disaster (such as when an online backupCycle is not available.
FileLogRecord: TYPE = FileLog.FileLogRecord;
RecoverFiles: PUBLIC PROC [files: LIST OF Rope.ROPE, in, out: IO.STREAM] ~ {
From the backup database extract the volume and record containing the last increment for each file.
Mount the newest such, and begin filling in the file page by page. Keep a bit map for each file, showing which pages have been filled in. As older and older records are encountered, consult the bit map to see if a page has already been written before copying the older data.
Body
};
RecoverBackupDB: PUBLIC PROC [backupVolume: BackupVolume] ~ {
This is not recovered like an ordinary file because we can't write in the BackupDB when we are updating it. Instead, starting from the image on the current backup volume, apply the changes in the backup log.
Body
};
RecoverServer: PUBLIC PROC [in, out: IO.STREAM] ~ {
Starting with the oldest volume in the backup cycle, create files and play the log against them. Note that we have to maintain a mapping from old fileid's to new fileids since most likely we won't be able to create files with the same fileids. This mapping must be preserved until we have taken a base version backup of each new file.
Body
};
continueBackup: BOOLFALSE;
backupRunning: BOOLFALSE;
firstRecord: LogRecordID;
StartBackup: PUBLIC ENTRY PROC [backupVolume: BackupVolume, newBTree: BOOLEANFALSE] RETURNS [startedBackup : BOOLTRUE] ~ {
IF backupRunning THEN RETURN[ startedBackup ← FALSE ];
continueBackup ← TRUE;
backupRunning ← TRUE;
TRUSTED {Process.Detach[FORK BackupProcess[backupVolume, newBTree]] };
};
StopBackup: PUBLIC ENTRY PROC [] ~ {
continueBackup ← FALSE;
};
TransactionHandle: TYPE = AlpTransaction.Handle;
MapHandle: TYPE = TransactionMap.Handle;
LogRecordID: TYPE = AlpineLog.RecordID;
BULogRecordID: TYPE = BackupLog.RecordID;
UniversalFile: TYPE = AlpineEnvironment.UniversalFile;
BackupProcess: PROC [backupVolume: BackupVolume, newBTree: BOOLEAN] ~ {
currentRecord: LogRecordID;
backupRecord: BULogRecordID;
trans: TransactionHandle;
mapTrans: MapHandle;
btreeFile: UniversalFile;
completedTranses: LIST OF MapHandle ← NIL;
Initialize: find the starting point in the log and the backup log.
CedarProcess.SetPriority[background];
trans ← AlpTransaction.Create[AlpInstance.Create["sea-wolf.alpine", NIL, ALL[0]]];
mapTrans ← TransactionMap.GetHandle[trans.transID];
btreeFile ← AlpDirectory.Lookup["/sea-wolf.alpine/chauser.pa/backup.btree"].file;
BackupBTree.OpenForNormalOperation[trans, btreeFile, newBTree];
[firstRecord, backupRecord] ← BackupBTree.GetPositionInfo[]; -- what if it can't be read?
IF firstRecord = AlpineLog.nullRecordID THEN
TRUSTED {firstRecord ← RestartFile.ReadRestartRecord[].recordIDForCheckpointCompleteRecord
};
currentRecord ← firstRecord;
IF newBTree THEN {
Just for the heck of it, assign the make the first backup log recordID correspond to the current log recordID.
backupRecord ← BackupLog.Format[backupVolume, 200, currentRecord];
};
BackupLog.OpenForNormalOperation[backupVolume, backupRecord];
TRUSTED { [] ← LogBasic.OpenRecordStreamFromCheckpoint[checkpointWord: LogBasic.WordNumberFromRecordID[currentRecord], checkpointRecord: currentRecord, firstRecord: currentRecord];};
WHILE continueBackup DO
endOfLog, truncatedRecord: BOOLEAN;
nextRecord: LogRecordID;
TRUSTED {[endOfLog: endOfLog, truncatedRecord: truncatedRecord, currentRecord: nextRecord] ← LogBasic.AdvanceRecordStream[]};
IF NOT endOfLog THEN {
currentRecord ← nextRecord;
[backupRecord, completedTranses] ← ProcessLogRecord[currentRecord, backupRecord, btreeFile, mapTrans, completedTranses]; -- copy to backupLog, expanding if necessary; what about volume changes?
IF too long since last DB update THEN
BackupBTree.SetPositionInfo[currentRecord, backupRecord];
IF too long since last xaction consistent image THEN
take steps to get one
}
ELSE {
Fully caught up, nothing else to do, so remember this fine state
TRUSTED {LogBasic.CloseRecordStream[]};
BackupBTree.SetPositionInfo[currentRecord, backupRecord];
BackupBTree.SetFullyConsistent[backupRecord, BasicTime.Now[]];
BackupLog.Force[];
BackupBTree.Commit[];
FOR doneTranses: LIST OF MapHandle ← completedTranses, doneTranses.rest WHILE doneTranses # NIL DO
There may be NIL transactionHandles in the list because readOnly transactions have already been unregistered.
IF doneTranses.first # NIL THEN TransactionMap.AssertBackupFinished[doneTranses.first]
ENDLOOP;
completedTranses ← NIL;
mapTrans ← TransactionMap.GetHandle[trans.transID];
Process.Pause[Process.SecondsToTicks[30--*60--]];
TRUSTED { [] ← LogBasic.OpenRecordStreamFromCheckpoint[checkpointWord: LogBasic.WordNumberFromRecordID[currentRecord], checkpointRecord: currentRecord, firstRecord: currentRecord];};
};
ENDLOOP;
BackupLog.Close[];
BackupBTree.Commit[];
[] ← AlpTransaction.Finish[trans, commit];
backupRunning ← FALSE;
};
nBufferPages: CARDINAL = 8;
WordsPerPage: CARDINAL = AlpineEnvironment.wordsPerPage;
nBufferWords: CARDINAL = nBufferPages*WordsPerPage;
bufferHandle: CountedVM.Handle ← CountedVM.Allocate[nBufferWords];
Block: TYPE = AlpineLog.Block;
PageNumber: TYPE = AlpineEnvironment.PageNumber;
RecordID: TYPE = LogRecordID;
bufferBlock: Block ← [base: bufferHandle.pointer, length: 0, rest: NIL];
CopyLogPages: PROC [recordID: LogRecordID, wordsToSkip: CARDINAL, nPages: CARDINAL] RETURNS [followingRecord: RecordID] ~ {
nIt: CARDINAL = nPages / nBufferPages;
nLeft: CARDINAL = nPages MOD nBufferPages;
bufferBlock.length ← nBufferWords;
FOR i: CARDINAL ← 0, i+1 WHILE i < nIt DO
TRUSTED {[] ← LogBasic.Get[ thisRecord: recordID, to: [base: NIL, length: wordsToSkip+i*WordsPerPage, rest: @bufferBlock]];};
TRUSTED{ [] ← BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; };
ENDLOOP;
bufferBlock.length ← nLeft*WordsPerPage;
TRUSTED {[] ← LogBasic.Get[ thisRecord: recordID, to: [base: NIL, length: wordsToSkip+nIt*WordsPerPage, rest: @bufferBlock]];};
TRUSTED{ followingRecord ← BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; };
};
CopyFilePages: PROC [universalFile: UniversalFile, firstPage: PageNumber, nPages: PageNumber] RETURNS [followingRecord: RecordID] ~ {
nIt: CARDINAL = nPages / nBufferPages;
nLeft: CARDINAL = nPages MOD nBufferPages;
fileHandle: File.Handle;
bufferBlock.length ← nBufferWords;
open the file
fileHandle ← File.Open[ volume: File.FindVolumeFromID[universalFile.volumeID], fp: universalFile.fileID];
FOR i: CARDINAL ← 0, i+1 WHILE i < nIt DO
TRUSTED {fileHandle.Read[
from: [firstPage+i*nBufferPages], nPages: nBufferPages,
to: bufferHandle.pointer];};
TRUSTED{ [] ← BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ] };
ENDLOOP;
TRUSTED {fileHandle.Read[
from: [firstPage+nIt*nBufferPages], nPages: nLeft,
to: bufferHandle.pointer];};
bufferBlock.length ← nLeft*WordsPerPage;
TRUSTED{ followingRecord ← BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; };
};
TransHeader: TYPE = LogRep.TransactionHeader;
BUHeader: TYPE = MACHINE DEPENDENT RECORD [
seal (0): BULogRecordID,
prevInFile (3): BULogRecordID,
transHeader (6): TransHeader ];
ProcessLogRecord: PROC [currentRecord: LogRecordID, backupRecord: BULogRecordID, btreeFile: UniversalFile, backupTrans: MapHandle, completedTranses: LIST OF MapHandle] RETURNS[ nextBackupRecord: BULogRecordID, newCompletedTranses: LIST OF MapHandle] ~ {
currentRecordType: AlpineLog.RecordType ← PeekType[currentRecord];
baseRecord, lastIncrementRecord: BULogRecordID;
name: Rope.ROPE ← "";
RecordTypeHeader: TYPE = LogRep.RecordTypeHeader;
rest: AlpineLog.Block;
buHd: BUHeader;
trans: MapHandle;
UpdateDirectory: PROC [fileID: UniversalFile] ~ {
found: BOOLEAN;
[found: found, baseRecord: baseRecord, lastIncrementRecord: lastIncrementRecord, name: name] ← BackupBTree.GetFileInfo[fileID];
IF NOT found THEN {
IF currentRecordType # create THEN {
This file wasn't seen before: probably we are in startup phase of the backup system. In the steady state this would be an error, but we've got to get started gracefully somehow.
open the file for reading
copy it to the backup log.
Make sure baseRecord and name are initialized. 
For now, just write the incremental records.
baseRecord ← BackupLog.nullRecordID;
name ← "";
}
ELSE
baseRecord ← backupRecord;
};
IF fileID # btreeFile THEN BackupBTree.SetFileInfo[universalFile: fileID, baseRecord: baseRecord, lastIncrementRecord: backupRecord, name: name];
buHd.prevInFile ← lastIncrementRecord;
};
buHd.seal ← backupRecord;
nextBackupRecord ← backupRecord;
newCompletedTranses ← completedTranses;
SELECT currentRecordType FROM
writePages => {
record: FileLogRecord[writePages];
optimization: if this data is not superceded by data already in the log (not the backup log) THEN ...
The optimization is complicated by the fact that we have no way to know whether or not there is a later instance of the page. Deal with this later.
read the record header part
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[writePages]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{ [] ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
nextBackupRecord ← CopyLogPages[ recordID: currentRecord, wordsToSkip: SIZE[FileLogRecord[writePages]] + SIZE[TransHeader], nPages: record.pageRun.count ];
};
};
writeLeaderPage => {
record: FileLogRecord[writeLeaderPage];
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[writeLeaderPage]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{ [] ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
nextBackupRecord ← CopyLogPages[ recordID: currentRecord, wordsToSkip: SIZE[FileLogRecord[writeLeaderPage]] + SIZE[TransHeader], nPages: record.pageCount ];
};
};
setSize => {
record: FileLogRecord[setSize];
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[setSize]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{ nextBackupRecord ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
};
};
create => {
record: FileLogRecord[create];
textOwner: REF TEXT;
textBlock: AlpineLog.Block;
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[create]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]];
textOwner ← RefText.ObtainScratch[record.owner.length];
textBlock ← [ base: BASE[DESCRIPTOR[textOwner.text]], length: (record.owner.length+Basics.bytesPerWord-1)/Basics.bytesPerWord];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: NIL, length: SIZE[TransHeader]+SIZE[FileLogRecord[create]], rest: @textBlock]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{
[] ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]];
nextBackupRecord ← BackupLog.Write[ recordData: textBlock ];
};
};
RefText.ReleaseScratch[textOwner];
};
delete => {
record: FileLogRecord[delete];
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[delete]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{ nextBackupRecord ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
};
};
writePagesToBase => {
record: FileLogRecord[writePagesToBase];
TRUSTED {
rest ← [base: @record, length: SIZE[FileLogRecord[writePagesToBase]]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
trans ← TransactionMap.GetHandle[buHd.transHeader.transID];
IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN {
UpdateDirectory[ [record.volumeID, record.fileID] ];
write the log record
TRUSTED{ [] ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
nextBackupRecord ← CopyFilePages[ universalFile: [record.volumeID, record.fileID], firstPage: record.pageRun.firstPage, nPages: record.pageRun.count ];
};
};
workerCompleting => {
record: Worker.CompletingLogRep;
buHd.prevInFile ← BackupLog.nullRecordID;
TRUSTED {
rest ← [base: @record, length: SIZE[Worker.CompletingLogRep]];
[] ← LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]
};
write the log record
TRUSTED{ nextBackupRecord ← BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; };
newCompletedTranses ← CONS[TransactionMap.GetHandle[buHd.transHeader.transID], newCompletedTranses];
};
ENDCASE => NULL;
};
PeekType: PROC [thisRecord: LogRecordID] RETURNS [recordType: AlpineLog.RecordType] = TRUSTED {
recordTypeHeader: LogRep.RecordTypeHeader;
status: AlpineLog.ReadProcStatus;
TRUSTED {[status: status] ← LogBasic.GetCurrentRecord[currentRecord: thisRecord,
to: [base: @recordTypeHeader, length: LogRep.RecordTypeHeader.SIZE]]};
IF status = sourceExhausted THEN ERROR ;
RETURN [recordTypeHeader.type];
};
ScanBackupLog: PROC [backupVolume: BackupVolume] RETURNS [] ~ TRUSTED {
currentRecord: RecordID;
header: BUHeader;
headerBlock: Block ← [base: @header, length: BUHeader.SIZE];
status: BackupLog.ReadProcStatus;
currentRecord ← BackupLog.OpenForRecovery[backupVolume];
DO
[status: status] ← BackupLog.Read[currentRecord, 0, headerBlock];
IF status # normal THEN EXIT;
SELECT header.transHeader.type FROM
writePages => {
record: FileLogRecord[writePages];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writePages]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writePages]]+WordsPerPage*record.pageRun.count];
};
writeLeaderPage => {
record: FileLogRecord[writeLeaderPage];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writeLeaderPage]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writeLeaderPage]]+WordsPerPage*record.pageCount];
};
setSize => {
record: FileLogRecord[setSize];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[setSize]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[setSize]]];
};
create => {
record: FileLogRecord[create];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[create]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[create]]+(record.owner.length+Basics.bytesPerWord-1)/Basics.bytesPerWord];
};
delete => {
record: FileLogRecord[delete];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[delete]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[delete]]];
};
writePagesToBase => {
record: FileLogRecord[writePagesToBase];
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writePagesToBase]]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writePagesToBase]]+WordsPerPage*record.pageRun.count];
};
workerCompleting => {
record: Worker.CompletingLogRep;
[] ← BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[Worker.CompletingLogRep]]];
currentRecord ← LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[Worker.CompletingLogRep]];
};
ENDCASE => NULL;
ENDLOOP;
BackupLog.Close[];
};
GetLocalType: PROC [recordType: AlpineLog.RecordType] RETURNS [localRecordType: LocalRecordType] ~ {
SELECT recordType FROM
noop => localRecordType ← noop;
checkpointBegin => localRecordType ← checkpointBegin;
checkpointComplete => localRecordType ← checkpointComplete;
coordinatorBegin => localRecordType ← coordinatorBegin;
coordinatorRegisterWorker => localRecordType ← coordinatorRegisterWorker;
coordinatorCollecting => localRecordType ← coordinatorCollecting;
coordinatorCompleting => localRecordType ← coordinatorCompleting;
coordinatorComplete => localRecordType ← coordinatorComplete;
workerBegin => localRecordType ← workerBegin;
workerReady => localRecordType ← workerReady;
workerCompleting => localRecordType ← workerCompleting;
workerComplete => localRecordType ← workerComplete;
writePages => localRecordType ← writePages;
writeLeaderPage => localRecordType ← writeLeaderPage;
setSize => localRecordType ← setSize;
create => localRecordType ← create;
delete => localRecordType ← delete;
lock => localRecordType ← localRecordType ← lock;
reserved => localRecordType ← reserved;
ENDCASE => localRecordType ← none;
};
LocalRecordType: TYPE = { noop, checkpointBegin, checkpointComplete, coordinatorBegin, coordinatorRegisterWorker, coordinatorCollecting, coordinatorCompleting, coordinatorComplete, workerBegin, workerReady, workerCompleting, workerComplete, writePages, writeLeaderPage, setSize, create, delete, lock, reserved, none };
CleanupProcess: PROC [] RETURNS [] ~ {
ExamineWorker: SAFE PROC [self: TransactionMap.Handle] RETURNS [stop: BOOLFALSE] ~ {
TransactionNotBackedUp: SIGNAL[self: TransactionMap.Handle] ~ CODE;
IF NOT backupRunning THEN TransactionMap.AssertBackupFinished[self]
ELSE TRUSTED {
workerHandle: Worker.Handle ← LOOPHOLE[self];
IF LogInline.Compare[workerHandle.beginRecord, firstRecord] = less THEN {
SIGNAL TransactionNotBackedUp[self];
TransactionMap.AssertBackupFinished[self];
};
};
};
DO
TransactionMap.UnlockedEnumerate[ExamineWorker];
Process.Pause[Process.SecondsToTicks[60]];
ENDLOOP;
};
StartCleanup: PROC [] RETURNS [] ~ {
TRUSTED { Process.Detach[FORK CleanupProcess[]] };
};
END.