transaction.py
966 lines
| 32.8 KiB
| text/x-python
|
PythonLexer
/ mercurial / transaction.py
Mads Kiilerich
|
r17424 | # transaction.py - simple journaling scheme for mercurial | ||
mpm@selenic.com
|
r0 | # | ||
# This transaction scheme is intended to gracefully handle program | ||||
# errors and interruptions. More serious failures like system crashes | ||||
# can be recovered with an fsck-like tool. As the whole repository is | ||||
# effectively log-structured, this should amount to simply truncating | ||||
# anything that isn't referenced in the changelog. | ||||
# | ||||
Raphaël Gomès
|
r47575 | # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com> | ||
mpm@selenic.com
|
r0 | # | ||
Martin Geisler
|
r8225 | # This software may be used and distributed according to the terms of the | ||
Matt Mackall
|
r10263 | # GNU General Public License version 2 or any later version. | ||
mpm@selenic.com
|
r0 | |||
r51194 | import errno | |||
r51191 | import os | |||
Gregory Szorc
|
r25986 | |||
from .i18n import _ | ||||
from . import ( | ||||
Matt Harbison
|
r51761 | encoding, | ||
Gregory Szorc
|
r25986 | error, | ||
Augie Fackler
|
r36753 | pycompat, | ||
Gregory Szorc
|
r25986 | util, | ||
) | ||||
Augie Fackler
|
r43346 | from .utils import stringutil | ||
Henrik Stuart
|
r8289 | |||
Pierre-Yves David
|
r23313 | version = 2 | ||
Durham Goode
|
r23064 | |||
r44886 | GEN_GROUP_ALL = b'all' | |||
GEN_GROUP_PRE_FINALIZE = b'prefinalize' | ||||
GEN_GROUP_POST_FINALIZE = b'postfinalize' | ||||
Augie Fackler
|
r43346 | |||
Durham Goode
|
r28830 | |||
Henrik Stuart
|
r8289 | def active(func): | ||
def _active(self, *args, **kwds): | ||||
Gregory Szorc
|
r39710 | if self._count == 0: | ||
Martin von Zweigbergk
|
r46330 | raise error.ProgrammingError( | ||
b'cannot use transaction when it is already committed/aborted' | ||||
Augie Fackler
|
r43346 | ) | ||
Henrik Stuart
|
r8289 | return func(self, *args, **kwds) | ||
Augie Fackler
|
r43346 | |||
Henrik Stuart
|
r8289 | return _active | ||
mpm@selenic.com
|
r0 | |||
Augie Fackler
|
r43346 | |||
r51197 | UNDO_BACKUP = b'%s.backupfiles' | |||
r51194 | ||||
r51195 | UNDO_FILES_MAY_NEED_CLEANUP = [ | |||
r51199 | # legacy entries that might exists on disk from previous version: | |||
(b'store', b'%s.narrowspec'), | ||||
(b'plain', b'%s.narrowspec.dirstate'), | ||||
(b'plain', b'%s.branch'), | ||||
(b'plain', b'%s.bookmarks'), | ||||
(b'store', b'%s.phaseroots'), | ||||
(b'plain', b'%s.dirstate'), | ||||
# files actually in uses today: | ||||
r51197 | (b'plain', b'%s.desc'), | |||
r51195 | # Always delete undo last to make sure we detect that a clean up is needed if | |||
# the process is interrupted. | ||||
r51197 | (b'store', b'%s'), | |||
r51195 | ] | |||
r51194 | ||||
r51197 | def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'): | |||
r51194 | """remove "undo" files used by the rollback logic | |||
This is useful to prevent rollback running in situation were it does not | ||||
make sense. For example after a strip. | ||||
""" | ||||
r51197 | backup_listing = UNDO_BACKUP % undo_prefix | |||
r51194 | backup_entries = [] | |||
undo_files = [] | ||||
r51196 | svfs = vfsmap[b'store'] | |||
r51194 | try: | |||
r51197 | with svfs(backup_listing) as f: | |||
r51196 | backup_entries = read_backup_files(report, f) | |||
r51194 | except OSError as e: | |||
if e.errno != errno.ENOENT: | ||||
msg = _(b'could not read %s: %s\n') | ||||
r51197 | msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e)) | |||
r51196 | report(msg) | |||
r51194 | ||||
for location, f, backup_path, c in backup_entries: | ||||
if location in vfsmap and backup_path: | ||||
undo_files.append((vfsmap[location], backup_path)) | ||||
r51197 | undo_files.append((svfs, backup_listing)) | |||
r51195 | for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP: | |||
r51197 | undo_files.append((vfsmap[location], undo_path % undo_prefix)) | |||
r51194 | for undovfs, undofile in undo_files: | |||
try: | ||||
undovfs.unlink(undofile) | ||||
except OSError as e: | ||||
if e.errno != errno.ENOENT: | ||||
msg = _(b'error removing %s: %s\n') | ||||
msg %= (undovfs.join(undofile), stringutil.forcebytestr(e)) | ||||
r51196 | report(msg) | |||
r51194 | ||||
Augie Fackler
|
r43346 | def _playback( | ||
journal, | ||||
report, | ||||
opener, | ||||
vfsmap, | ||||
entries, | ||||
backupentries, | ||||
unlink=True, | ||||
checkambigfiles=None, | ||||
): | ||||
r51236 | """rollback a transaction : | |||
- truncate files that have been appended to | ||||
- restore file backups | ||||
- delete temporary files | ||||
""" | ||||
r51235 | backupfiles = [] | |||
def restore_one_backup(vfs, f, b, checkambig): | ||||
filepath = vfs.join(f) | ||||
backuppath = vfs.join(b) | ||||
try: | ||||
util.copyfile(backuppath, filepath, checkambig=checkambig) | ||||
backupfiles.append((vfs, b)) | ||||
except IOError as exc: | ||||
e_msg = stringutil.forcebytestr(exc) | ||||
report(_(b"failed to recover %s (%s)\n") % (f, e_msg)) | ||||
raise | ||||
r51236 | # gather all backup files that impact the store | |||
# (we need this to detect files that are both backed up and truncated) | ||||
store_backup = {} | ||||
for entry in backupentries: | ||||
location, file_path, backup_path, cache = entry | ||||
vfs = vfsmap[location] | ||||
is_store = vfs.join(b'') == opener.join(b'') | ||||
if is_store and file_path and backup_path: | ||||
store_backup[file_path] = entry | ||||
copy_done = set() | ||||
# truncate all file `f` to offset `o` | ||||
Joerg Sonnenberger
|
r48066 | for f, o in sorted(dict(entries).items()): | ||
r51236 | # if we have a backup for `f`, we should restore it first and truncate | |||
# the restored file | ||||
bck_entry = store_backup.get(f) | ||||
if bck_entry is not None: | ||||
location, file_path, backup_path, cache = bck_entry | ||||
checkambig = False | ||||
if checkambigfiles: | ||||
checkambig = (file_path, location) in checkambigfiles | ||||
restore_one_backup(opener, file_path, backup_path, checkambig) | ||||
copy_done.add(bck_entry) | ||||
# truncate the file to its pre-transaction size | ||||
Henrik Stuart
|
r8294 | if o or not unlink: | ||
Augie Fackler
|
r43347 | checkambig = checkambigfiles and (f, b'') in checkambigfiles | ||
Henrik Stuart
|
r8294 | try: | ||
Augie Fackler
|
r43347 | fp = opener(f, b'a', checkambig=checkambig) | ||
Kyle Lippincott
|
r43233 | if fp.tell() < o: | ||
Augie Fackler
|
r43346 | raise error.Abort( | ||
_( | ||||
Augie Fackler
|
r43347 | b"attempted to truncate %s to %d bytes, but it was " | ||
b"already %d bytes\n" | ||||
Augie Fackler
|
r43346 | ) | ||
% (f, o, fp.tell()) | ||||
) | ||||
Dan Villiom Podlaski Christiansen
|
r13400 | fp.truncate(o) | ||
fp.close() | ||||
Benoit Boissinot
|
r9686 | except IOError: | ||
Augie Fackler
|
r43347 | report(_(b"failed to truncate %s\n") % f) | ||
Henrik Stuart
|
r8294 | raise | ||
else: | ||||
r51236 | # delete empty file | |||
Henrik Stuart
|
r8294 | try: | ||
FUJIWARA Katsunori
|
r20084 | opener.unlink(f) | ||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
pass | ||||
r51236 | # restore backed up files and clean up temporary files | |||
for entry in backupentries: | ||||
if entry in copy_done: | ||||
continue | ||||
l, f, b, c = entry | ||||
Pierre-Yves David
|
r23312 | if l not in vfsmap and c: | ||
Augie Fackler
|
r43347 | report(b"couldn't handle %s: unknown cache location %s\n" % (b, l)) | ||
Pierre-Yves David
|
r23311 | vfs = vfsmap[l] | ||
Pierre-Yves David
|
r23312 | try: | ||
r51235 | checkambig = checkambigfiles and (f, l) in checkambigfiles | |||
Pierre-Yves David
|
r23312 | if f and b: | ||
r51235 | restore_one_backup(vfs, f, b, checkambig) | |||
Pierre-Yves David
|
r23312 | else: | ||
target = f or b | ||||
try: | ||||
vfs.unlink(target) | ||||
Manuel Jacob
|
r50201 | except FileNotFoundError: | ||
r51233 | # This is fine because | |||
# | ||||
# either we are trying to delete the main file, and it is | ||||
# already deleted. | ||||
# | ||||
# or we are trying to delete a temporary file and it is | ||||
# already deleted. | ||||
# | ||||
# in both case, our target result (delete the file) is | ||||
# already achieved. | ||||
Manuel Jacob
|
r50201 | pass | ||
Martin von Zweigbergk
|
r41401 | except (IOError, OSError, error.Abort): | ||
Pierre-Yves David
|
r23312 | if not c: | ||
Pierre-Yves David
|
r23278 | raise | ||
Durham Goode
|
r20882 | |||
r51236 | # cleanup transaction state file and the backups file | |||
Augie Fackler
|
r43347 | backuppath = b"%s.backupfiles" % journal | ||
Durham Goode
|
r20882 | if opener.exists(backuppath): | ||
opener.unlink(backuppath) | ||||
FUJIWARA Katsunori
|
r26753 | opener.unlink(journal) | ||
Pierre-Yves David
|
r23312 | try: | ||
r51232 | for vfs, f in backupfiles: | |||
if vfs.exists(f): | ||||
vfs.unlink(f) | ||||
Martin von Zweigbergk
|
r41401 | except (IOError, OSError, error.Abort): | ||
Pierre-Yves David
|
r23312 | # only pure backup file remains, it is sage to ignore any error | ||
pass | ||||
Henrik Stuart
|
r8294 | |||
Augie Fackler
|
r43346 | |||
Martin von Zweigbergk
|
r33790 | class transaction(util.transactional): | ||
Augie Fackler
|
r43346 | def __init__( | ||
self, | ||||
report, | ||||
opener, | ||||
vfsmap, | ||||
journalname, | ||||
undoname=None, | ||||
after=None, | ||||
createmode=None, | ||||
validator=None, | ||||
releasefn=None, | ||||
checkambigfiles=None, | ||||
Matt Harbison
|
r51761 | name=b'<unnamed>', | ||
Augie Fackler
|
r43346 | ): | ||
Durham Goode
|
r20881 | """Begin a new transaction | ||
Begins a new transaction that allows rolling back writes in the event of | ||||
an exception. | ||||
* `after`: called after the transaction has been committed | ||||
* `createmode`: the mode of the journal file that will be created | ||||
FUJIWARA Katsunori
|
r26576 | * `releasefn`: called after releasing (with transaction and result) | ||
FUJIWARA Katsunori
|
r33278 | |||
`checkambigfiles` is a set of (path, vfs-location) tuples, | ||||
which determine whether file stat ambiguity should be avoided | ||||
for corresponded files. | ||||
Durham Goode
|
r20881 | """ | ||
Gregory Szorc
|
r39710 | self._count = 1 | ||
self._usages = 1 | ||||
Gregory Szorc
|
r39719 | self._report = report | ||
Pierre-Yves David
|
r23310 | # a vfs to the store content | ||
Gregory Szorc
|
r39718 | self._opener = opener | ||
Pierre-Yves David
|
r23310 | # a map to access file in various {location -> vfs} | ||
vfsmap = vfsmap.copy() | ||||
Augie Fackler
|
r43347 | vfsmap[b''] = opener # set default value | ||
Pierre-Yves David
|
r23310 | self._vfsmap = vfsmap | ||
Gregory Szorc
|
r39717 | self._after = after | ||
Joerg Sonnenberger
|
r46483 | self._offsetmap = {} | ||
Joerg Sonnenberger
|
r46484 | self._newfiles = set() | ||
Gregory Szorc
|
r39712 | self._journal = journalname | ||
r51192 | self._journal_files = [] | |||
Gregory Szorc
|
r39711 | self._undoname = undoname | ||
Pierre-Yves David
|
r23279 | self._queue = [] | ||
FUJIWARA Katsunori
|
r26576 | # A callback to do something just after releasing transaction. | ||
if releasefn is None: | ||||
releasefn = lambda tr, success: None | ||||
Gregory Szorc
|
r39714 | self._releasefn = releasefn | ||
FUJIWARA Katsunori
|
r26576 | |||
Gregory Szorc
|
r39716 | self._checkambigfiles = set() | ||
FUJIWARA Katsunori
|
r33278 | if checkambigfiles: | ||
Gregory Szorc
|
r39716 | self._checkambigfiles.update(checkambigfiles) | ||
FUJIWARA Katsunori
|
r33278 | |||
Gregory Szorc
|
r39721 | self._names = [name] | ||
Martin von Zweigbergk
|
r36837 | |||
Pierre-Yves David
|
r32261 | # A dict dedicated to precisely tracking the changes introduced in the | ||
# transaction. | ||||
self.changes = {} | ||||
Pierre-Yves David
|
r23279 | # a dict of arguments to be passed to hooks | ||
self.hookargs = {} | ||||
Joerg Sonnenberger
|
r46483 | self._file = opener.open(self._journal, b"w+") | ||
Pierre-Yves David
|
r23279 | |||
Pierre-Yves David
|
r23309 | # a list of ('location', 'path', 'backuppath', cache) entries. | ||
Pierre-Yves David
|
r23311 | # - if 'backuppath' is empty, no file existed at backup time | ||
# - if 'path' is empty, this is a temporary transaction file | ||||
# - if 'location' is not empty, the path is outside main opener reach. | ||||
# use 'location' value as a key in a vfsmap to find the right 'vfs' | ||||
# (cache is currently unused) | ||||
Pierre-Yves David
|
r23249 | self._backupentries = [] | ||
self._backupmap = {} | ||||
Augie Fackler
|
r43347 | self._backupjournal = b"%s.backupfiles" % self._journal | ||
self._backupsfile = opener.open(self._backupjournal, b'w') | ||||
self._backupsfile.write(b'%d\n' % version) | ||||
Arseniy Alekseyev
|
r51535 | # the set of temporary files | ||
self._tmp_files = set() | ||||
Pierre-Yves David
|
r23279 | |||
Alexis S. L. Carvalho
|
r6065 | if createmode is not None: | ||
Gregory Szorc
|
r39712 | opener.chmod(self._journal, createmode & 0o666) | ||
Gregory Szorc
|
r25658 | opener.chmod(self._backupjournal, createmode & 0o666) | ||
mpm@selenic.com
|
r0 | |||
Pierre-Yves David
|
r22078 | # hold file generations to be performed on commit | ||
self._filegenerators = {} | ||||
Mads Kiilerich
|
r23543 | # hold callback to write pending data for hooks | ||
Pierre-Yves David
|
r23202 | self._pendingcallback = {} | ||
# True is any pending data have been written ever | ||||
self._anypending = False | ||||
Pierre-Yves David
|
r23204 | # holds callback to call when writing the transaction | ||
self._finalizecallback = {} | ||||
Pulkit Goyal
|
r45031 | # holds callback to call when validating the transaction | ||
# should raise exception if anything is wrong | ||||
self._validatecallback = {} | ||||
if validator is not None: | ||||
self._validatecallback[b'001-userhooks'] = validator | ||||
Mads Kiilerich
|
r23543 | # hold callback for post transaction close | ||
Pierre-Yves David
|
r23220 | self._postclosecallback = {} | ||
Gregory Szorc
|
r23764 | # holds callbacks to call during abort | ||
self._abortcallback = {} | ||||
Pierre-Yves David
|
r22078 | |||
Martin von Zweigbergk
|
r36837 | def __repr__(self): | ||
r51579 | name = b'/'.join(self._names) | |||
Augie Fackler
|
r43906 | return '<transaction name=%s, count=%d, usages=%d>' % ( | ||
Matt Harbison
|
r51761 | encoding.strfromlocal(name), | ||
Augie Fackler
|
r43346 | self._count, | ||
self._usages, | ||||
) | ||||
Martin von Zweigbergk
|
r36837 | |||
mpm@selenic.com
|
r0 | def __del__(self): | ||
Gregory Szorc
|
r39712 | if self._journal: | ||
Sune Foldager
|
r9693 | self._abort() | ||
mpm@selenic.com
|
r0 | |||
r49525 | @property | |||
def finalized(self): | ||||
return self._finalizecallback is None | ||||
Henrik Stuart
|
r8289 | @active | ||
Henrik Stuart
|
r8363 | def startgroup(self): | ||
Pierre-Yves David
|
r23250 | """delay registration of file entry | ||
This is used by strip to delay vision of strip offset. The transaction | ||||
sees either none or all of the strip actions to be done.""" | ||||
Pierre-Yves David
|
r23251 | self._queue.append([]) | ||
Henrik Stuart
|
r8363 | |||
@active | ||||
def endgroup(self): | ||||
Pierre-Yves David
|
r23250 | """apply delayed registration of file entry. | ||
This is used by strip to delay vision of strip offset. The transaction | ||||
sees either none or all of the strip actions to be done.""" | ||||
Henrik Stuart
|
r8363 | q = self._queue.pop() | ||
Joerg Sonnenberger
|
r46481 | for f, o in q: | ||
self._addentry(f, o) | ||||
Henrik Stuart
|
r8363 | |||
@active | ||||
Joerg Sonnenberger
|
r46481 | def add(self, file, offset): | ||
Pierre-Yves David
|
r23252 | """record the state of an append-only file before update""" | ||
Joerg Sonnenberger
|
r46484 | if ( | ||
file in self._newfiles | ||||
or file in self._offsetmap | ||||
or file in self._backupmap | ||||
Arseniy Alekseyev
|
r51535 | or file in self._tmp_files | ||
Joerg Sonnenberger
|
r46484 | ): | ||
Matt Mackall
|
r10282 | return | ||
Henrik Stuart
|
r8363 | if self._queue: | ||
Joerg Sonnenberger
|
r46481 | self._queue[-1].append((file, offset)) | ||
Henrik Stuart
|
r8363 | return | ||
Joerg Sonnenberger
|
r46481 | self._addentry(file, offset) | ||
Pierre-Yves David
|
r23253 | |||
Joerg Sonnenberger
|
r46481 | def _addentry(self, file, offset): | ||
Pierre-Yves David
|
r23253 | """add a append-only entry to memory and on-disk state""" | ||
Joerg Sonnenberger
|
r46484 | if ( | ||
file in self._newfiles | ||||
or file in self._offsetmap | ||||
or file in self._backupmap | ||||
Arseniy Alekseyev
|
r51535 | or file in self._tmp_files | ||
Joerg Sonnenberger
|
r46484 | ): | ||
Pierre-Yves David
|
r23253 | return | ||
Joerg Sonnenberger
|
r46484 | if offset: | ||
self._offsetmap[file] = offset | ||||
else: | ||||
self._newfiles.add(file) | ||||
mpm@selenic.com
|
r0 | # add enough data to the journal to do the truncate | ||
Augie Fackler
|
r43347 | self._file.write(b"%s\0%d\n" % (file, offset)) | ||
Gregory Szorc
|
r39713 | self._file.flush() | ||
mpm@selenic.com
|
r0 | |||
Henrik Stuart
|
r8289 | @active | ||
r51236 | def addbackup(self, file, hardlink=True, location=b'', for_offset=False): | |||
Durham Goode
|
r20882 | """Adds a backup of the file to the transaction | ||
Calling addbackup() creates a hardlink backup of the specified file | ||||
that is used to recover the file in the event of the transaction | ||||
aborting. | ||||
* `file`: the file path, relative to .hg/store | ||||
* `hardlink`: use a hardlink to quickly create the backup | ||||
r51236 | ||||
If `for_offset` is set, we expect a offset for this file to have been previously recorded | ||||
Durham Goode
|
r20882 | """ | ||
Pierre-Yves David
|
r23251 | if self._queue: | ||
Augie Fackler
|
r43347 | msg = b'cannot use transaction.addbackup inside "group"' | ||
Jun Wu
|
r31648 | raise error.ProgrammingError(msg) | ||
Durham Goode
|
r20882 | |||
r51236 | if file in self._newfiles or file in self._backupmap: | |||
return | ||||
elif file in self._offsetmap and not for_offset: | ||||
Durham Goode
|
r20882 | return | ||
r51236 | elif for_offset and file not in self._offsetmap: | |||
msg = ( | ||||
'calling `addbackup` with `for_offmap=True`, ' | ||||
'but no offset recorded: [%r] %r' | ||||
) | ||||
msg %= (location, file) | ||||
raise error.ProgrammingError(msg) | ||||
Pierre-Yves David
|
r23582 | vfs = self._vfsmap[location] | ||
dirname, filename = vfs.split(file) | ||||
r51579 | backupfilename = b"%s.backup.%s.bck" % (self._journal, filename) | |||
Pierre-Yves David
|
r23581 | backupfile = vfs.reljoin(dirname, backupfilename) | ||
Pierre-Yves David
|
r22663 | if vfs.exists(file): | ||
filepath = vfs.join(file) | ||||
Pierre-Yves David
|
r23314 | backuppath = vfs.join(backupfile) | ||
r51348 | # store encoding may result in different directory here. | |||
# so we have to ensure the destination directory exist | ||||
final_dir_name = os.path.dirname(backuppath) | ||||
util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True) | ||||
# then we can copy the backup | ||||
Pierre-Yves David
|
r23900 | util.copyfile(filepath, backuppath, hardlink=hardlink) | ||
Durham Goode
|
r20882 | else: | ||
Augie Fackler
|
r43347 | backupfile = b'' | ||
Durham Goode
|
r20882 | |||
Pierre-Yves David
|
r23316 | self._addbackupentry((location, file, backupfile, False)) | ||
Pierre-Yves David
|
r23283 | |||
def _addbackupentry(self, entry): | ||||
"""register a new backup entry and write it to disk""" | ||||
self._backupentries.append(entry) | ||||
Pierre-Yves David
|
r25294 | self._backupmap[entry[1]] = len(self._backupentries) - 1 | ||
Augie Fackler
|
r43347 | self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry) | ||
Pierre-Yves David
|
r23249 | self._backupsfile.flush() | ||
Durham Goode
|
r20882 | |||
@active | ||||
Augie Fackler
|
r43347 | def registertmp(self, tmpfile, location=b''): | ||
Pierre-Yves David
|
r23291 | """register a temporary transaction file | ||
Matt Mackall
|
r23355 | Such files will be deleted when the transaction exits (on both | ||
failure and success). | ||||
Pierre-Yves David
|
r23291 | """ | ||
Arseniy Alekseyev
|
r51535 | self._tmp_files.add(tmpfile) | ||
Augie Fackler
|
r43347 | self._addbackupentry((location, b'', tmpfile, False)) | ||
Pierre-Yves David
|
r23291 | |||
@active | ||||
Augie Fackler
|
r43347 | def addfilegenerator( | ||
r49534 | self, | |||
genid, | ||||
filenames, | ||||
genfunc, | ||||
order=0, | ||||
location=b'', | ||||
post_finalize=False, | ||||
Augie Fackler
|
r43347 | ): | ||
Pierre-Yves David
|
r22078 | """add a function to generates some files at transaction commit | ||
The `genfunc` argument is a function capable of generating proper | ||||
content of each entry in the `filename` tuple. | ||||
At transaction close time, `genfunc` will be called with one file | ||||
object argument per entries in `filenames`. | ||||
The transaction itself is responsible for the backup, creation and | ||||
final write of such file. | ||||
The `genid` argument is used to ensure the same set of file is only | ||||
generated once. Call to `addfilegenerator` for a `genid` already | ||||
present will overwrite the old entry. | ||||
The `order` argument may be used to control the order in which multiple | ||||
generator will be executed. | ||||
Pierre-Yves David
|
r23317 | |||
The `location` arguments may be used to indicate the files are located | ||||
outside of the the standard directory for transaction. It should match | ||||
Mads Kiilerich
|
r23543 | one of the key of the `transaction.vfsmap` dictionary. | ||
r49534 | ||||
The `post_finalize` argument can be set to `True` for file generation | ||||
that must be run after the transaction has been finalized. | ||||
Pierre-Yves David
|
r22078 | """ | ||
Pierre-Yves David
|
r22663 | # For now, we are unable to do proper backup and restore of custom vfs | ||
# but for bookmarks that are handled outside this mechanism. | ||||
r49534 | entry = (order, filenames, genfunc, location, post_finalize) | |||
self._filegenerators[genid] = entry | ||||
Pierre-Yves David
|
r22078 | |||
Jun Wu
|
r33056 | @active | ||
def removefilegenerator(self, genid): | ||||
"""reverse of addfilegenerator, remove a file generator function""" | ||||
if genid in self._filegenerators: | ||||
del self._filegenerators[genid] | ||||
r44886 | def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL): | |||
Pierre-Yves David
|
r23102 | # write files registered for generation | ||
Pierre-Yves David
|
r23357 | any = False | ||
r44887 | ||||
if group == GEN_GROUP_ALL: | ||||
skip_post = skip_pre = False | ||||
else: | ||||
skip_pre = group == GEN_GROUP_POST_FINALIZE | ||||
skip_post = group == GEN_GROUP_PRE_FINALIZE | ||||
Gregory Szorc
|
r49768 | for id, entry in sorted(self._filegenerators.items()): | ||
Pierre-Yves David
|
r23357 | any = True | ||
r49534 | order, filenames, genfunc, location, post_finalize = entry | |||
Durham Goode
|
r28830 | |||
# for generation at closing, check if it's before or after finalize | ||||
r49534 | if skip_post and post_finalize: | |||
r44887 | continue | |||
r49534 | elif skip_pre and not post_finalize: | |||
Durham Goode
|
r28830 | continue | ||
Pierre-Yves David
|
r23317 | vfs = self._vfsmap[location] | ||
Pierre-Yves David
|
r23102 | files = [] | ||
try: | ||||
for name in filenames: | ||||
Pierre-Yves David
|
r23356 | name += suffix | ||
if suffix: | ||||
self.registertmp(name, location=location) | ||||
FUJIWARA Katsunori
|
r33279 | checkambig = False | ||
Pierre-Yves David
|
r23356 | else: | ||
self.addbackup(name, location=location) | ||||
Gregory Szorc
|
r39716 | checkambig = (name, location) in self._checkambigfiles | ||
Augie Fackler
|
r43346 | files.append( | ||
Augie Fackler
|
r43347 | vfs(name, b'w', atomictemp=True, checkambig=checkambig) | ||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r23102 | genfunc(*files) | ||
Yuya Nishihara
|
r41140 | for f in files: | ||
f.close() | ||||
# skip discard() loop since we're sure no open file remains | ||||
del files[:] | ||||
Pierre-Yves David
|
r23102 | finally: | ||
for f in files: | ||||
Yuya Nishihara
|
r41140 | f.discard() | ||
Pierre-Yves David
|
r23357 | return any | ||
Pierre-Yves David
|
r23102 | |||
Pierre-Yves David
|
r22078 | @active | ||
Joerg Sonnenberger
|
r46482 | def findoffset(self, file): | ||
Joerg Sonnenberger
|
r46484 | if file in self._newfiles: | ||
return 0 | ||||
Joerg Sonnenberger
|
r46483 | return self._offsetmap.get(file) | ||
@active | ||||
def readjournal(self): | ||||
self._file.seek(0) | ||||
entries = [] | ||||
Joerg Sonnenberger
|
r46680 | for l in self._file.readlines(): | ||
Joerg Sonnenberger
|
r46483 | file, troffset = l.split(b'\0') | ||
entries.append((file, int(troffset))) | ||||
return entries | ||||
Chris Mason
|
r2084 | |||
Henrik Stuart
|
r8289 | @active | ||
Joerg Sonnenberger
|
r46481 | def replace(self, file, offset): | ||
Augie Fackler
|
r46554 | """ | ||
Henrik Stuart
|
r8363 | replace can only replace already committed entries | ||
that are not pending in the queue | ||||
Augie Fackler
|
r46554 | """ | ||
Joerg Sonnenberger
|
r46484 | if file in self._newfiles: | ||
if not offset: | ||||
return | ||||
self._newfiles.remove(file) | ||||
self._offsetmap[file] = offset | ||||
elif file in self._offsetmap: | ||||
if not offset: | ||||
del self._offsetmap[file] | ||||
self._newfiles.add(file) | ||||
else: | ||||
self._offsetmap[file] = offset | ||||
else: | ||||
Chris Mason
|
r2084 | raise KeyError(file) | ||
Augie Fackler
|
r43347 | self._file.write(b"%s\0%d\n" % (file, offset)) | ||
Gregory Szorc
|
r39713 | self._file.flush() | ||
Chris Mason
|
r2084 | |||
Henrik Stuart
|
r8289 | @active | ||
Matt Harbison
|
r51761 | def nest(self, name=b'<unnamed>'): | ||
Gregory Szorc
|
r39710 | self._count += 1 | ||
self._usages += 1 | ||||
Gregory Szorc
|
r39721 | self._names.append(name) | ||
mason@suse.com
|
r1806 | return self | ||
Ronny Pfannschmidt
|
r11230 | def release(self): | ||
Gregory Szorc
|
r39710 | if self._count > 0: | ||
self._usages -= 1 | ||||
Gregory Szorc
|
r39721 | if self._names: | ||
self._names.pop() | ||||
Patrick Mezard
|
r11685 | # if the transaction scopes are left without being closed, fail | ||
Gregory Szorc
|
r39710 | if self._count > 0 and self._usages == 0: | ||
Ronny Pfannschmidt
|
r11230 | self._abort() | ||
mason@suse.com
|
r1806 | def running(self): | ||
Gregory Szorc
|
r39710 | return self._count > 0 | ||
mason@suse.com
|
r1806 | |||
Pierre-Yves David
|
r23202 | def addpending(self, category, callback): | ||
"""add a callback to be called when the transaction is pending | ||||
Pierre-Yves David
|
r23280 | The transaction will be given as callback's first argument. | ||
Pierre-Yves David
|
r23202 | Category is a unique identifier to allow overwriting an old callback | ||
with a newer callback. | ||||
""" | ||||
self._pendingcallback[category] = callback | ||||
@active | ||||
def writepending(self): | ||||
Augie Fackler
|
r46554 | """write pending file to temporary version | ||
Pierre-Yves David
|
r23202 | |||
Augie Fackler
|
r46554 | This is used to allow hooks to view a transaction before commit""" | ||
Pierre-Yves David
|
r23202 | categories = sorted(self._pendingcallback) | ||
for cat in categories: | ||||
# remove callback since the data will have been flushed | ||||
Pierre-Yves David
|
r23280 | any = self._pendingcallback.pop(cat)(self) | ||
Pierre-Yves David
|
r23202 | self._anypending = self._anypending or any | ||
Augie Fackler
|
r43347 | self._anypending |= self._generatefiles(suffix=b'.pending') | ||
Pierre-Yves David
|
r23202 | return self._anypending | ||
Henrik Stuart
|
r8289 | @active | ||
r44508 | def hasfinalize(self, category): | |||
Augie Fackler
|
r46554 | """check is a callback already exist for a category""" | ||
r44508 | return category in self._finalizecallback | |||
@active | ||||
Pierre-Yves David
|
r23204 | def addfinalize(self, category, callback): | ||
"""add a callback to be called when the transaction is closed | ||||
Pierre-Yves David
|
r23281 | The transaction will be given as callback's first argument. | ||
Pierre-Yves David
|
r23204 | Category is a unique identifier to allow overwriting old callbacks with | ||
newer callbacks. | ||||
""" | ||||
self._finalizecallback[category] = callback | ||||
@active | ||||
Pierre-Yves David
|
r23220 | def addpostclose(self, category, callback): | ||
Jun Wu
|
r33087 | """add or replace a callback to be called after the transaction closed | ||
Pierre-Yves David
|
r23220 | |||
Pierre-Yves David
|
r23282 | The transaction will be given as callback's first argument. | ||
Pierre-Yves David
|
r23220 | Category is a unique identifier to allow overwriting an old callback | ||
with a newer callback. | ||||
""" | ||||
self._postclosecallback[category] = callback | ||||
@active | ||||
Jun Wu
|
r33087 | def getpostclose(self, category): | ||
"""return a postclose callback added before, or None""" | ||||
return self._postclosecallback.get(category, None) | ||||
@active | ||||
Gregory Szorc
|
r23764 | def addabort(self, category, callback): | ||
"""add a callback to be called when the transaction is aborted. | ||||
The transaction will be given as the first argument to the callback. | ||||
Category is a unique identifier to allow overwriting an old callback | ||||
with a newer callback. | ||||
""" | ||||
self._abortcallback[category] = callback | ||||
@active | ||||
Pulkit Goyal
|
r45031 | def addvalidator(self, category, callback): | ||
Augie Fackler
|
r46554 | """adds a callback to be called when validating the transaction. | ||
Pulkit Goyal
|
r45031 | |||
The transaction will be given as the first argument to the callback. | ||||
Augie Fackler
|
r46554 | callback should raise exception if to abort transaction""" | ||
Pulkit Goyal
|
r45031 | self._validatecallback[category] = callback | ||
@active | ||||
mpm@selenic.com
|
r0 | def close(self): | ||
Greg Ward
|
r9220 | '''commit the transaction''' | ||
Gregory Szorc
|
r39710 | if self._count == 1: | ||
Pulkit Goyal
|
r45031 | for category in sorted(self._validatecallback): | ||
self._validatecallback[category](self) | ||||
self._validatecallback = None # Help prevent cycles. | ||||
r44886 | self._generatefiles(group=GEN_GROUP_PRE_FINALIZE) | |||
r44556 | while self._finalizecallback: | |||
callbacks = self._finalizecallback | ||||
self._finalizecallback = {} | ||||
categories = sorted(callbacks) | ||||
for cat in categories: | ||||
callbacks[cat](self) | ||||
Gregory Szorc
|
r28960 | # Prevent double usage and help clear cycles. | ||
self._finalizecallback = None | ||||
r44886 | self._generatefiles(group=GEN_GROUP_POST_FINALIZE) | |||
Durham Goode
|
r20881 | |||
Gregory Szorc
|
r39710 | self._count -= 1 | ||
if self._count != 0: | ||||
mason@suse.com
|
r1806 | return | ||
Gregory Szorc
|
r39713 | self._file.close() | ||
Pierre-Yves David
|
r23249 | self._backupsfile.close() | ||
Pierre-Yves David
|
r23291 | # cleanup temporary files | ||
Pierre-Yves David
|
r23312 | for l, f, b, c in self._backupentries: | ||
if l not in self._vfsmap and c: | ||||
Augie Fackler
|
r43346 | self._report( | ||
Augie Fackler
|
r43347 | b"couldn't remove %s: unknown cache location %s\n" % (b, l) | ||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r23312 | continue | ||
Pierre-Yves David
|
r23311 | vfs = self._vfsmap[l] | ||
if not f and b and vfs.exists(b): | ||||
Pierre-Yves David
|
r23312 | try: | ||
vfs.unlink(b) | ||||
Pierre-Yves David
|
r26587 | except (IOError, OSError, error.Abort) as inst: | ||
Pierre-Yves David
|
r23312 | if not c: | ||
raise | ||||
# Abort may be raise by read only opener | ||||
Augie Fackler
|
r43346 | self._report( | ||
Augie Fackler
|
r43347 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | ||
Augie Fackler
|
r43346 | ) | ||
Joerg Sonnenberger
|
r46483 | self._offsetmap = {} | ||
Joerg Sonnenberger
|
r46484 | self._newfiles = set() | ||
Pierre-Yves David
|
r23904 | self._writeundo() | ||
Gregory Szorc
|
r39717 | if self._after: | ||
self._after() | ||||
Augie Fackler
|
r43346 | self._after = None # Help prevent cycles. | ||
Gregory Szorc
|
r39718 | if self._opener.isfile(self._backupjournal): | ||
self._opener.unlink(self._backupjournal) | ||||
if self._opener.isfile(self._journal): | ||||
self._opener.unlink(self._journal) | ||||
Martin von Zweigbergk
|
r27662 | for l, _f, b, c in self._backupentries: | ||
if l not in self._vfsmap and c: | ||||
Augie Fackler
|
r43346 | self._report( | ||
Augie Fackler
|
r43347 | b"couldn't remove %s: unknown cache location" | ||
b"%s\n" % (b, l) | ||||
Augie Fackler
|
r43346 | ) | ||
Martin von Zweigbergk
|
r27662 | continue | ||
vfs = self._vfsmap[l] | ||||
if b and vfs.exists(b): | ||||
try: | ||||
vfs.unlink(b) | ||||
except (IOError, OSError, error.Abort) as inst: | ||||
if not c: | ||||
raise | ||||
# Abort may be raise by read only opener | ||||
Augie Fackler
|
r43346 | self._report( | ||
Augie Fackler
|
r43347 | b"couldn't remove %s: %s\n" % (vfs.join(b), inst) | ||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r23249 | self._backupentries = [] | ||
Gregory Szorc
|
r39712 | self._journal = None | ||
FUJIWARA Katsunori
|
r26576 | |||
Augie Fackler
|
r43346 | self._releasefn(self, True) # notify success of closing transaction | ||
self._releasefn = None # Help prevent cycles. | ||||
FUJIWARA Katsunori
|
r26576 | |||
Pierre-Yves David
|
r23220 | # run post close action | ||
categories = sorted(self._postclosecallback) | ||||
for cat in categories: | ||||
Pierre-Yves David
|
r23282 | self._postclosecallback[cat](self) | ||
Gregory Szorc
|
r28960 | # Prevent double usage and help clear cycles. | ||
self._postclosecallback = None | ||||
mpm@selenic.com
|
r0 | |||
Henrik Stuart
|
r8289 | @active | ||
mpm@selenic.com
|
r0 | def abort(self): | ||
Augie Fackler
|
r46554 | """abort the transaction (generally called on error, or when the | ||
Greg Ward
|
r9220 | transaction is not explicitly committed before going out of | ||
Augie Fackler
|
r46554 | scope)""" | ||
Henrik Stuart
|
r8289 | self._abort() | ||
r51192 | @active | |||
def add_journal(self, vfs_id, path): | ||||
self._journal_files.append((vfs_id, path)) | ||||
Pierre-Yves David
|
r23904 | def _writeundo(self): | ||
"""write transaction data for possible future undo call""" | ||||
Gregory Szorc
|
r39711 | if self._undoname is None: | ||
Pierre-Yves David
|
r23904 | return | ||
r51198 | cleanup_undo_files( | |||
self._report, | ||||
self._vfsmap, | ||||
undo_prefix=self._undoname, | ||||
) | ||||
r48215 | ||||
r51191 | def undoname(fn: bytes) -> bytes: | |||
base, name = os.path.split(fn) | ||||
assert name.startswith(self._journal) | ||||
new_name = name.replace(self._journal, self._undoname, 1) | ||||
return os.path.join(base, new_name) | ||||
r48215 | undo_backup_path = b"%s.backupfiles" % self._undoname | |||
undobackupfile = self._opener.open(undo_backup_path, b'w') | ||||
Augie Fackler
|
r43347 | undobackupfile.write(b'%d\n' % version) | ||
Pierre-Yves David
|
r23904 | for l, f, b, c in self._backupentries: | ||
if not f: # temporary file | ||||
continue | ||||
if not b: | ||||
Augie Fackler
|
r43347 | u = b'' | ||
Pierre-Yves David
|
r23904 | else: | ||
if l not in self._vfsmap and c: | ||||
Augie Fackler
|
r43346 | self._report( | ||
Augie Fackler
|
r43347 | b"couldn't remove %s: unknown cache location" | ||
b"%s\n" % (b, l) | ||||
Augie Fackler
|
r43346 | ) | ||
Pierre-Yves David
|
r23904 | continue | ||
vfs = self._vfsmap[l] | ||||
r51191 | u = undoname(b) | |||
Pierre-Yves David
|
r23904 | util.copyfile(vfs.join(b), vfs.join(u), hardlink=True) | ||
Augie Fackler
|
r43347 | undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c)) | ||
Pierre-Yves David
|
r23904 | undobackupfile.close() | ||
r51192 | for vfs, src in self._journal_files: | |||
dest = undoname(src) | ||||
# if src and dest refer to a same file, vfs.rename is a no-op, | ||||
# leaving both src and dest on disk. delete dest to make sure | ||||
# the rename couldn't be such a no-op. | ||||
vfs.tryunlink(dest) | ||||
try: | ||||
vfs.rename(src, dest) | ||||
except FileNotFoundError: # journal file does not yet exist | ||||
pass | ||||
Pierre-Yves David
|
r23904 | |||
Henrik Stuart
|
r8289 | def _abort(self): | ||
Joerg Sonnenberger
|
r46483 | entries = self.readjournal() | ||
Gregory Szorc
|
r39710 | self._count = 0 | ||
self._usages = 0 | ||||
Gregory Szorc
|
r39713 | self._file.close() | ||
Pierre-Yves David
|
r23249 | self._backupsfile.close() | ||
Henrik Stuart
|
r8290 | |||
r50888 | quick = self._can_quick_abort(entries) | |||
Benoit Boissinot
|
r10228 | try: | ||
r50889 | if not quick: | |||
self._report(_(b"transaction abort!\n")) | ||||
for cat in sorted(self._abortcallback): | ||||
self._abortcallback[cat](self) | ||||
# Prevent double usage and help clear cycles. | ||||
self._abortcallback = None | ||||
r50888 | if quick: | |||
self._do_quick_abort(entries) | ||||
else: | ||||
self._do_full_abort(entries) | ||||
Henrik Stuart
|
r8294 | finally: | ||
Gregory Szorc
|
r39712 | self._journal = None | ||
Augie Fackler
|
r43346 | self._releasefn(self, False) # notify failure of transaction | ||
self._releasefn = None # Help prevent cycles. | ||||
r50888 | def _can_quick_abort(self, entries): | |||
"""False if any semantic content have been written on disk | ||||
True if nothing, except temporary files has been writen on disk.""" | ||||
if entries: | ||||
return False | ||||
r50890 | for e in self._backupentries: | |||
if e[1]: | ||||
return False | ||||
r50888 | return True | |||
def _do_quick_abort(self, entries): | ||||
"""(Silently) do a quick cleanup (see _can_quick_abort)""" | ||||
assert self._can_quick_abort(entries) | ||||
r50890 | tmp_files = [e for e in self._backupentries if not e[1]] | |||
for vfs_id, old_path, tmp_path, xxx in tmp_files: | ||||
vfs = self._vfsmap[vfs_id] | ||||
try: | ||||
vfs.unlink(tmp_path) | ||||
except FileNotFoundError: | ||||
pass | ||||
r50888 | if self._backupjournal: | |||
self._opener.unlink(self._backupjournal) | ||||
if self._journal: | ||||
self._opener.unlink(self._journal) | ||||
def _do_full_abort(self, entries): | ||||
"""(Noisily) rollback all the change introduced by the transaction""" | ||||
try: | ||||
_playback( | ||||
self._journal, | ||||
self._report, | ||||
self._opener, | ||||
self._vfsmap, | ||||
entries, | ||||
self._backupentries, | ||||
False, | ||||
checkambigfiles=self._checkambigfiles, | ||||
) | ||||
self._report(_(b"rollback completed\n")) | ||||
except BaseException as exc: | ||||
self._report(_(b"rollback failed - please run hg recover\n")) | ||||
self._report( | ||||
_(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc) | ||||
) | ||||
Henrik Stuart
|
r8290 | |||
r48212 | BAD_VERSION_MSG = _( | |||
b"journal was created by a different version of Mercurial\n" | ||||
) | ||||
r51188 | def read_backup_files(report, fp): | |||
"""parse an (already open) backup file an return contained backup entries | ||||
entries are in the form: (location, file, backupfile, xxx) | ||||
:location: the vfs identifier (vfsmap's key) | ||||
:file: original file path (in the vfs) | ||||
:backupfile: path of the backup (in the vfs) | ||||
:cache: a boolean currently always set to False | ||||
""" | ||||
lines = fp.readlines() | ||||
backupentries = [] | ||||
if lines: | ||||
ver = lines[0][:-1] | ||||
if ver != (b'%d' % version): | ||||
report(BAD_VERSION_MSG) | ||||
else: | ||||
for line in lines[1:]: | ||||
if line: | ||||
# Shave off the trailing newline | ||||
line = line[:-1] | ||||
l, f, b, c = line.split(b'\0') | ||||
backupentries.append((l, f, b, bool(c))) | ||||
return backupentries | ||||
r50965 | def rollback( | |||
opener, | ||||
vfsmap, | ||||
file, | ||||
report, | ||||
checkambigfiles=None, | ||||
skip_journal_pattern=None, | ||||
): | ||||
Durham Goode
|
r20882 | """Rolls back the transaction contained in the given file | ||
Reads the entries in the specified file, and the corresponding | ||||
'*.backupfiles' file, to recover from an incomplete transaction. | ||||
* `file`: a file containing a list of entries, specifying where | ||||
to truncate each file. The file should contain a list of | ||||
file\0offset pairs, delimited by newlines. The corresponding | ||||
'*.backupfiles' file should contain a list of file\0backupfile | ||||
pairs, delimited by \0. | ||||
FUJIWARA Katsunori
|
r33278 | |||
`checkambigfiles` is a set of (path, vfs-location) tuples, | ||||
which determine whether file stat ambiguity should be avoided at | ||||
restoring corresponded files. | ||||
Durham Goode
|
r20882 | """ | ||
Henrik Stuart
|
r8294 | entries = [] | ||
Durham Goode
|
r20882 | backupentries = [] | ||
Henrik Stuart
|
r8294 | |||
Valentin Gatien-Baron
|
r48088 | with opener.open(file) as fp: | ||
lines = fp.readlines() | ||||
Dan Villiom Podlaski Christiansen
|
r13400 | for l in lines: | ||
Matt Mackall
|
r20524 | try: | ||
Augie Fackler
|
r43347 | f, o = l.split(b'\0') | ||
Joerg Sonnenberger
|
r46481 | entries.append((f, int(o))) | ||
Matt Mackall
|
r20524 | except ValueError: | ||
Augie Fackler
|
r43347 | report( | ||
_(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l) | ||||
) | ||||
mpm@selenic.com
|
r0 | |||
Augie Fackler
|
r43347 | backupjournal = b"%s.backupfiles" % file | ||
Durham Goode
|
r20882 | if opener.exists(backupjournal): | ||
r51188 | with opener.open(backupjournal) as fp: | |||
backupentries = read_backup_files(report, fp) | ||||
r50965 | if skip_journal_pattern is not None: | |||
keep = lambda x: not skip_journal_pattern.match(x[1]) | ||||
backupentries = [x for x in backupentries if keep(x)] | ||||
Durham Goode
|
r20882 | |||
Augie Fackler
|
r43346 | _playback( | ||
file, | ||||
report, | ||||
opener, | ||||
vfsmap, | ||||
entries, | ||||
backupentries, | ||||
checkambigfiles=checkambigfiles, | ||||
) | ||||