DIRECTORY AlpineEnvironment, AlpineInternal, AlpineLog, AlpTransaction, AlpDirectory, AlpInstance, BackupControl, BackupBTree, BackupLog, Basics, BasicTime, CedarProcess, CountedVM, File, FileLog, FS, IO, LogBasic, LogInline, LogRep, Process, RefText, RestartFile, Rope, TransactionMap, Worker; BackupControlImpl: CEDAR MONITOR IMPORTS AlpDirectory, AlpTransaction, AlpInstance, BackupBTree, BackupLog, BasicTime, CountedVM, CedarProcess, File, LogBasic, LogInline, Process, RefText, RestartFile, TransactionMap EXPORTS BackupControl SHARES FileLog = BEGIN BackupVolume: TYPE ~ AlpineInternal.BackupVolume; backupCycle: BackupControl.BackupCycle _ NIL; FileLogRecord: TYPE = FileLog.FileLogRecord; RecoverFiles: PUBLIC PROC [files: LIST OF Rope.ROPE, in, out: IO.STREAM] ~ { }; RecoverBackupDB: PUBLIC PROC [backupVolume: BackupVolume] ~ { }; RecoverServer: PUBLIC PROC [in, out: IO.STREAM] ~ { }; continueBackup: BOOL _ FALSE; backupRunning: BOOL _ FALSE; firstRecord: LogRecordID; StartBackup: PUBLIC ENTRY PROC [backupVolume: BackupVolume, newBTree: BOOLEAN _ FALSE] RETURNS [startedBackup : BOOL _ TRUE] ~ { IF backupRunning THEN RETURN[ startedBackup _ FALSE ]; continueBackup _ TRUE; backupRunning _ TRUE; TRUSTED {Process.Detach[FORK BackupProcess[backupVolume, newBTree]] }; }; StopBackup: PUBLIC ENTRY PROC [] ~ { continueBackup _ FALSE; }; TransactionHandle: TYPE = AlpTransaction.Handle; MapHandle: TYPE = TransactionMap.Handle; LogRecordID: TYPE = AlpineLog.RecordID; BULogRecordID: TYPE = BackupLog.RecordID; UniversalFile: TYPE = AlpineEnvironment.UniversalFile; BackupProcess: PROC [backupVolume: BackupVolume, newBTree: BOOLEAN] ~ { currentRecord: LogRecordID; backupRecord: BULogRecordID; trans: TransactionHandle; mapTrans: MapHandle; btreeFile: UniversalFile; completedTranses: LIST OF MapHandle _ NIL; CedarProcess.SetPriority[background]; trans _ AlpTransaction.Create[AlpInstance.Create["sea-wolf.alpine", NIL, ALL[0]]]; mapTrans _ TransactionMap.GetHandle[trans.transID]; btreeFile _ AlpDirectory.Lookup["/sea-wolf.alpine/chauser.pa/backup.btree"].file; BackupBTree.OpenForNormalOperation[trans, btreeFile, newBTree]; [firstRecord, backupRecord] _ BackupBTree.GetPositionInfo[]; -- what if it can't be read? IF firstRecord = AlpineLog.nullRecordID THEN TRUSTED {firstRecord _ RestartFile.ReadRestartRecord[].recordIDForCheckpointCompleteRecord }; currentRecord _ firstRecord; IF newBTree THEN { backupRecord _ BackupLog.Format[backupVolume, 200, currentRecord]; }; BackupLog.OpenForNormalOperation[backupVolume, backupRecord]; TRUSTED { [] _ LogBasic.OpenRecordStreamFromCheckpoint[checkpointWord: LogBasic.WordNumberFromRecordID[currentRecord], checkpointRecord: currentRecord, firstRecord: currentRecord];}; WHILE continueBackup DO endOfLog, truncatedRecord: BOOLEAN; nextRecord: LogRecordID; TRUSTED {[endOfLog: endOfLog, truncatedRecord: truncatedRecord, currentRecord: nextRecord] _ LogBasic.AdvanceRecordStream[]}; IF NOT endOfLog THEN { currentRecord _ nextRecord; [backupRecord, completedTranses] _ ProcessLogRecord[currentRecord, backupRecord, btreeFile, mapTrans, completedTranses]; -- copy to backupLog, expanding if necessary; what about volume changes? } ELSE { TRUSTED {LogBasic.CloseRecordStream[]}; BackupBTree.SetPositionInfo[currentRecord, backupRecord]; BackupBTree.SetFullyConsistent[backupRecord, BasicTime.Now[]]; BackupLog.Force[]; BackupBTree.Commit[]; FOR doneTranses: LIST OF MapHandle _ completedTranses, doneTranses.rest WHILE doneTranses # NIL DO IF doneTranses.first # NIL THEN TransactionMap.AssertBackupFinished[doneTranses.first] ENDLOOP; completedTranses _ NIL; mapTrans _ TransactionMap.GetHandle[trans.transID]; Process.Pause[Process.SecondsToTicks[30--*60--]]; TRUSTED { [] _ LogBasic.OpenRecordStreamFromCheckpoint[checkpointWord: LogBasic.WordNumberFromRecordID[currentRecord], checkpointRecord: currentRecord, firstRecord: currentRecord];}; }; ENDLOOP; BackupLog.Close[]; BackupBTree.Commit[]; [] _ AlpTransaction.Finish[trans, commit]; backupRunning _ FALSE; }; nBufferPages: CARDINAL = 8; WordsPerPage: CARDINAL = AlpineEnvironment.wordsPerPage; nBufferWords: CARDINAL = nBufferPages*WordsPerPage; bufferHandle: CountedVM.Handle _ CountedVM.Allocate[nBufferWords]; Block: TYPE = AlpineLog.Block; PageNumber: TYPE = AlpineEnvironment.PageNumber; RecordID: TYPE = LogRecordID; bufferBlock: Block _ [base: bufferHandle.pointer, length: 0, rest: NIL]; CopyLogPages: PROC [recordID: LogRecordID, wordsToSkip: CARDINAL, nPages: CARDINAL] RETURNS [followingRecord: RecordID] ~ { nIt: CARDINAL = nPages / nBufferPages; nLeft: CARDINAL = nPages MOD nBufferPages; bufferBlock.length _ nBufferWords; FOR i: CARDINAL _ 0, i+1 WHILE i < nIt DO TRUSTED {[] _ LogBasic.Get[ thisRecord: recordID, to: [base: NIL, length: wordsToSkip+i*WordsPerPage, rest: @bufferBlock]];}; TRUSTED{ [] _ BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; }; ENDLOOP; bufferBlock.length _ nLeft*WordsPerPage; TRUSTED {[] _ LogBasic.Get[ thisRecord: recordID, to: [base: NIL, length: wordsToSkip+nIt*WordsPerPage, rest: @bufferBlock]];}; TRUSTED{ followingRecord _ BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; }; }; CopyFilePages: PROC [universalFile: UniversalFile, firstPage: PageNumber, nPages: PageNumber] RETURNS [followingRecord: RecordID] ~ { nIt: CARDINAL = nPages / nBufferPages; nLeft: CARDINAL = nPages MOD nBufferPages; fileHandle: File.Handle; bufferBlock.length _ nBufferWords; fileHandle _ File.Open[ volume: File.FindVolumeFromID[universalFile.volumeID], fp: universalFile.fileID]; FOR i: CARDINAL _ 0, i+1 WHILE i < nIt DO TRUSTED {fileHandle.Read[ from: [firstPage+i*nBufferPages], nPages: nBufferPages, to: bufferHandle.pointer];}; TRUSTED{ [] _ BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ] }; ENDLOOP; TRUSTED {fileHandle.Read[ from: [firstPage+nIt*nBufferPages], nPages: nLeft, to: bufferHandle.pointer];}; bufferBlock.length _ nLeft*WordsPerPage; TRUSTED{ followingRecord _ BackupLog.Write[ recordData: bufferBlock, continuation: TRUE ]; }; }; TransHeader: TYPE = LogRep.TransactionHeader; BUHeader: TYPE = MACHINE DEPENDENT RECORD [ seal (0): BULogRecordID, prevInFile (3): BULogRecordID, transHeader (6): TransHeader ]; ProcessLogRecord: PROC [currentRecord: LogRecordID, backupRecord: BULogRecordID, btreeFile: UniversalFile, backupTrans: MapHandle, completedTranses: LIST OF MapHandle] RETURNS[ nextBackupRecord: BULogRecordID, newCompletedTranses: LIST OF MapHandle] ~ { currentRecordType: AlpineLog.RecordType _ PeekType[currentRecord]; baseRecord, lastIncrementRecord: BULogRecordID; name: Rope.ROPE _ ""; RecordTypeHeader: TYPE = LogRep.RecordTypeHeader; rest: AlpineLog.Block; buHd: BUHeader; trans: MapHandle; UpdateDirectory: PROC [fileID: UniversalFile] ~ { found: BOOLEAN; [found: found, baseRecord: baseRecord, lastIncrementRecord: lastIncrementRecord, name: name] _ BackupBTree.GetFileInfo[fileID]; IF NOT found THEN { IF currentRecordType # create THEN { baseRecord _ BackupLog.nullRecordID; name _ ""; } ELSE baseRecord _ backupRecord; }; IF fileID # btreeFile THEN BackupBTree.SetFileInfo[universalFile: fileID, baseRecord: baseRecord, lastIncrementRecord: backupRecord, name: name]; buHd.prevInFile _ lastIncrementRecord; }; buHd.seal _ backupRecord; nextBackupRecord _ backupRecord; newCompletedTranses _ completedTranses; SELECT currentRecordType FROM writePages => { record: FileLogRecord[writePages]; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[writePages]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ [] _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; nextBackupRecord _ CopyLogPages[ recordID: currentRecord, wordsToSkip: SIZE[FileLogRecord[writePages]] + SIZE[TransHeader], nPages: record.pageRun.count ]; }; }; writeLeaderPage => { record: FileLogRecord[writeLeaderPage]; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[writeLeaderPage]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ [] _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; nextBackupRecord _ CopyLogPages[ recordID: currentRecord, wordsToSkip: SIZE[FileLogRecord[writeLeaderPage]] + SIZE[TransHeader], nPages: record.pageCount ]; }; }; setSize => { record: FileLogRecord[setSize]; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[setSize]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ nextBackupRecord _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; }; }; create => { record: FileLogRecord[create]; textOwner: REF TEXT; textBlock: AlpineLog.Block; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[create]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]]; textOwner _ RefText.ObtainScratch[record.owner.length]; textBlock _ [ base: BASE[DESCRIPTOR[textOwner.text]], length: (record.owner.length+Basics.bytesPerWord-1)/Basics.bytesPerWord]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: NIL, length: SIZE[TransHeader]+SIZE[FileLogRecord[create]], rest: @textBlock]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ [] _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; nextBackupRecord _ BackupLog.Write[ recordData: textBlock ]; }; }; RefText.ReleaseScratch[textOwner]; }; delete => { record: FileLogRecord[delete]; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[delete]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ nextBackupRecord _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; }; }; writePagesToBase => { record: FileLogRecord[writePagesToBase]; TRUSTED { rest _ [base: @record, length: SIZE[FileLogRecord[writePagesToBase]]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; trans _ TransactionMap.GetHandle[buHd.transHeader.transID]; IF (trans = backupTrans) OR ((trans#NIL) AND TransactionMap.GetOutcome[trans] = commit) THEN { UpdateDirectory[ [record.volumeID, record.fileID] ]; TRUSTED{ [] _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; nextBackupRecord _ CopyFilePages[ universalFile: [record.volumeID, record.fileID], firstPage: record.pageRun.firstPage, nPages: record.pageRun.count ]; }; }; workerCompleting => { record: Worker.CompletingLogRep; buHd.prevInFile _ BackupLog.nullRecordID; TRUSTED { rest _ [base: @record, length: SIZE[Worker.CompletingLogRep]]; [] _ LogBasic.Get[thisRecord: currentRecord, to: [base: @buHd.transHeader, length: SIZE[TransHeader], rest: @rest]] }; TRUSTED{ nextBackupRecord _ BackupLog.Write[ recordData: [base: @buHd, length: SIZE[BUHeader], rest: @rest]]; }; newCompletedTranses _ CONS[TransactionMap.GetHandle[buHd.transHeader.transID], newCompletedTranses]; }; ENDCASE => NULL; }; PeekType: PROC [thisRecord: LogRecordID] RETURNS [recordType: AlpineLog.RecordType] = TRUSTED { recordTypeHeader: LogRep.RecordTypeHeader; status: AlpineLog.ReadProcStatus; TRUSTED {[status: status] _ LogBasic.GetCurrentRecord[currentRecord: thisRecord, to: [base: @recordTypeHeader, length: LogRep.RecordTypeHeader.SIZE]]}; IF status = sourceExhausted THEN ERROR ; RETURN [recordTypeHeader.type]; }; ScanBackupLog: PROC [backupVolume: BackupVolume] RETURNS [] ~ TRUSTED { currentRecord: RecordID; header: BUHeader; headerBlock: Block _ [base: @header, length: BUHeader.SIZE]; status: BackupLog.ReadProcStatus; currentRecord _ BackupLog.OpenForRecovery[backupVolume]; DO [status: status] _ BackupLog.Read[currentRecord, 0, headerBlock]; IF status # normal THEN EXIT; SELECT header.transHeader.type FROM writePages => { record: FileLogRecord[writePages]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writePages]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writePages]]+WordsPerPage*record.pageRun.count]; }; writeLeaderPage => { record: FileLogRecord[writeLeaderPage]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writeLeaderPage]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writeLeaderPage]]+WordsPerPage*record.pageCount]; }; setSize => { record: FileLogRecord[setSize]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[setSize]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[setSize]]]; }; create => { record: FileLogRecord[create]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[create]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[create]]+(record.owner.length+Basics.bytesPerWord-1)/Basics.bytesPerWord]; }; delete => { record: FileLogRecord[delete]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[delete]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[delete]]]; }; writePagesToBase => { record: FileLogRecord[writePagesToBase]; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[FileLogRecord[writePagesToBase]]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[FileLogRecord[writePagesToBase]]+WordsPerPage*record.pageRun.count]; }; workerCompleting => { record: Worker.CompletingLogRep; [] _ BackupLog.Read[currentRecord, SIZE[BUHeader], [base: @record, length: SIZE[Worker.CompletingLogRep]]]; currentRecord _ LogInline.AddLC[currentRecord, SIZE[BUHeader]+SIZE[Worker.CompletingLogRep]]; }; ENDCASE => NULL; ENDLOOP; BackupLog.Close[]; }; CleanupProcess: PROC [] RETURNS [] ~ { ExamineWorker: SAFE PROC [self: TransactionMap.Handle] RETURNS [stop: BOOL _ FALSE] ~ { TransactionNotBackedUp: SIGNAL[self: TransactionMap.Handle] ~ CODE; IF NOT backupRunning THEN TransactionMap.AssertBackupFinished[self] ELSE TRUSTED { workerHandle: Worker.Handle _ LOOPHOLE[self]; IF LogInline.Compare[workerHandle.beginRecord, firstRecord] = less THEN { SIGNAL TransactionNotBackedUp[self]; TransactionMap.AssertBackupFinished[self]; }; }; }; DO TransactionMap.UnlockedEnumerate[ExamineWorker]; Process.Pause[Process.SecondsToTicks[60]]; ENDLOOP; }; StartCleanup: PROC [] RETURNS [] ~ { TRUSTED { Process.Detach[FORK CleanupProcess[]] }; }; END. ΜBackupControlImpl.mesa Carl Hauser, April 22, 1986 3:25:09 pm PST backupCycle lists the volumes used by backup in order from oldest to newest. It is used to prompt the operator and to confirm his responses, in the normal case. It can be overridden by the operator in case of major disaster (such as when an online backupCycle is not available. From the backup database extract the volume and record containing the last increment for each file. Mount the newest such, and begin filling in the file page by page. Keep a bit map for each file, showing which pages have been filled in. As older and older records are encountered, consult the bit map to see if a page has already been written before copying the older data. Body This is not recovered like an ordinary file because we can't write in the BackupDB when we are updating it. Instead, starting from the image on the current backup volume, apply the changes in the backup log. Body Starting with the oldest volume in the backup cycle, create files and play the log against them. Note that we have to maintain a mapping from old fileid's to new fileids since most likely we won't be able to create files with the same fileids. This mapping must be preserved until we have taken a base version backup of each new file. Body Initialize: find the starting point in the log and the backup log. Just for the heck of it, assign the make the first backup log recordID correspond to the current log recordID. IF too long since last DB update THEN BackupBTree.SetPositionInfo[currentRecord, backupRecord]; IF too long since last xaction consistent image THEN take steps to get one Fully caught up, nothing else to do, so remember this fine state There may be NIL transactionHandles in the list because readOnly transactions have already been unregistered. open the file This file wasn't seen before: probably we are in startup phase of the backup system. In the steady state this would be an error, but we've got to get started gracefully somehow. open the file for reading copy it to the backup log. Make sure baseRecord and name are initialized. For now, just write the incremental records. optimization: if this data is not superceded by data already in the log (not the backup log) THEN ... The optimization is complicated by the fact that we have no way to know whether or not there is a later instance of the page. Deal with this later. read the record header part write the log record write the log record write the log record write the log record write the log record write the log record write the log record GetLocalType: PROC [recordType: AlpineLog.RecordType] RETURNS [localRecordType: LocalRecordType] ~ { SELECT recordType FROM noop => localRecordType _ noop; checkpointBegin => localRecordType _ checkpointBegin; checkpointComplete => localRecordType _ checkpointComplete; coordinatorBegin => localRecordType _ coordinatorBegin; coordinatorRegisterWorker => localRecordType _ coordinatorRegisterWorker; coordinatorCollecting => localRecordType _ coordinatorCollecting; coordinatorCompleting => localRecordType _ coordinatorCompleting; coordinatorComplete => localRecordType _ coordinatorComplete; workerBegin => localRecordType _ workerBegin; workerReady => localRecordType _ workerReady; workerCompleting => localRecordType _ workerCompleting; workerComplete => localRecordType _ workerComplete; writePages => localRecordType _ writePages; writeLeaderPage => localRecordType _ writeLeaderPage; setSize => localRecordType _ setSize; create => localRecordType _ create; delete => localRecordType _ delete; lock => localRecordType _ localRecordType _ lock; reserved => localRecordType _ reserved; ENDCASE => localRecordType _ none; }; LocalRecordType: TYPE = { noop, checkpointBegin, checkpointComplete, coordinatorBegin, coordinatorRegisterWorker, coordinatorCollecting, coordinatorCompleting, coordinatorComplete, workerBegin, workerReady, workerCompleting, workerComplete, writePages, writeLeaderPage, setSize, create, delete, lock, reserved, none }; Κ#˜™Icode™*šΟk ˜ J˜Jšœ˜J˜ J˜J˜ J˜ Jšœ˜J˜ J˜ J˜J˜ J˜ J˜ J˜J˜Jšœ˜Jšœ˜J˜ J˜ J˜J˜J˜J˜ J˜J˜J˜——J™šœœ˜ š˜J˜ J˜J˜ J˜ J˜ J˜ J˜ J˜ J˜J˜ J˜ J˜J˜J˜ J˜—šœ˜J˜ —š˜J˜—J˜Jš˜J˜Kšœœ˜1K˜šœ)œ˜-K™—K™—Kšœœ˜,J˜šΟn œœœ œœœ œœ˜LK™cK™”K™Kšœ˜—J˜šžœœœ!˜=K™ΠK™Kšœ˜—J˜š ž œœœ œœ˜3K™ΠK™Kšœ˜—J˜Jšœœœ˜Jšœœœ˜Jšœ˜J˜šž œœœœ)œœœœœ˜Kšœœœœ˜6Kšœœ˜Kšœœ˜Kšœœ*˜FKšœ˜—J˜šž œœœœ˜$Kšœœ˜Kšœ˜K˜—Kšœœ˜0Kšœ œ˜(Kšœ œ˜'Kšœœ˜)šœœ#˜6K˜—šž œœ(œ˜GKšœ˜Kšœ˜Kšœ˜Kšœ˜Kšœ˜Kšœœœ œ˜*K™CKšœ%˜%KšœDœœ˜RK˜3J˜QJ˜Jšœ?˜?Kšœ=Οc˜Yšœ&œ˜-KšœS˜ZK˜—Kšœ˜šœ œ˜J™nJšœB˜BJ˜—Kšœ=˜=Kšœ―˜Άšœ˜Kšœœ˜#Kšœ˜Jšœv˜}šœœ œ˜Kšœ˜KšœyŸI˜ΒšœΟbΠbk œ™%Kšœ9™9—šœ ,œ™4Kš ™—K˜—šœ˜K™@Kšœ ˜'Kšœ9˜9Kšœ>˜>K˜K˜š œœœ0œœ˜bK™mKšœœœ7˜VKšœ˜—Kšœ˜K˜3Kšœ'Ÿœ˜1Jšœ―˜ΆK˜—Kšœ˜—K˜K˜K˜*Kšœœ˜K˜K˜—Kšœœ˜Kšœœ"˜8Kšœœ˜3KšœB˜BKšœœ˜Kšœ œ ˜0Kšœ œ˜šœCœ˜HK˜—š Πbn œœ&œ œœ ˜{Kšœœ˜&Kšœœ œ˜*Kšœ"˜"šœœ œ ˜)Kšœ6œ=˜}Kšœ?œ˜PKšœ˜—Kšœ(˜(Kšœ6œ?˜KšœLœ˜]K˜K˜—šž œœKœ ˜…Kšœœ˜&Kšœœ œ˜*K˜Kšœ"˜"K™ Kšœi˜išœœ œ ˜)šœ˜Jšœ7˜7Jšœ˜—Kšœ?œ˜OKšœ˜—šœ˜Jšœ2˜2Jšœ˜—Kšœ(˜(KšœLœ˜]Kšœ˜K˜—Kšœ œ˜-š œ œœ œœ˜+Kšœ˜Kšœ˜K˜K˜—šžœœœœ œ8œœ˜ύKšœB˜BKšœ/˜/Kšœ œ˜Kšœœ˜1K˜K˜Kšœ˜K˜šžœœ˜1Kšœœ˜Kšœ˜šœœœ˜šœœ˜$J™³Jš ™Jš ™Jš  œ  œ ™/™,Jšœ$˜$J˜ —J˜—š˜Jšœ˜—J˜—Kšœœw˜‘Kšœ&˜&K˜—K˜K˜Kšœ˜Kšœ ˜ K˜'šœ˜šœ˜Kšœ"˜"™eK™•—Kšœ™K™šœ˜ Kšœœ˜@KšœSœ˜sKšœ˜—Kšœ;˜;š œœ œœ,œ˜^K˜4K™Kšœ:œ˜bKšœGœœ.˜›K˜—Kšœ˜—šœ˜Kšœ'˜'šœ˜ Kšœœ"˜EKšœSœ˜sKšœ˜—Kšœ;˜;š œœ œœ,œ˜^K˜4K™Kšœ:œ˜bKšœGœ#œ*˜œK˜—K˜—šœ ˜ Kšœ˜šœ˜ Kšœœ˜=KšœSœ˜sKšœ˜—Kšœ;˜;š œœ œœ,œ˜^K˜4K™KšœHœ˜pK˜—K˜—šœ ˜ Kšœ˜Kšœ œœ˜Kšœ˜šœ˜ Kšœœ˜KšœSœ˜sKšœ˜—K™KšœHœ˜pKšœœJ˜dKšœ˜—Kšœœ˜—K˜—J˜š’œœœ&œ˜_J˜*J˜!šœI˜PJšœ>œ˜F—Jšœœœ˜(Jšœ˜J˜J˜—šž œœœœ˜GK˜K˜Kšœ6œ˜