##// END OF EJS Templates
transaction: tr._names are actually bytes, use byte string to join them...
av6 -
r51343:392e2f31 default
parent child Browse files
Show More
@@ -1,955 +1,955 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 """rollback a transaction :
108 """rollback a transaction :
109 - truncate files that have been appended to
109 - truncate files that have been appended to
110 - restore file backups
110 - restore file backups
111 - delete temporary files
111 - delete temporary files
112 """
112 """
113 backupfiles = []
113 backupfiles = []
114
114
115 def restore_one_backup(vfs, f, b, checkambig):
115 def restore_one_backup(vfs, f, b, checkambig):
116 filepath = vfs.join(f)
116 filepath = vfs.join(f)
117 backuppath = vfs.join(b)
117 backuppath = vfs.join(b)
118 try:
118 try:
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 backupfiles.append((vfs, b))
120 backupfiles.append((vfs, b))
121 except IOError as exc:
121 except IOError as exc:
122 e_msg = stringutil.forcebytestr(exc)
122 e_msg = stringutil.forcebytestr(exc)
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 raise
124 raise
125
125
126 # gather all backup files that impact the store
126 # gather all backup files that impact the store
127 # (we need this to detect files that are both backed up and truncated)
127 # (we need this to detect files that are both backed up and truncated)
128 store_backup = {}
128 store_backup = {}
129 for entry in backupentries:
129 for entry in backupentries:
130 location, file_path, backup_path, cache = entry
130 location, file_path, backup_path, cache = entry
131 vfs = vfsmap[location]
131 vfs = vfsmap[location]
132 is_store = vfs.join(b'') == opener.join(b'')
132 is_store = vfs.join(b'') == opener.join(b'')
133 if is_store and file_path and backup_path:
133 if is_store and file_path and backup_path:
134 store_backup[file_path] = entry
134 store_backup[file_path] = entry
135 copy_done = set()
135 copy_done = set()
136
136
137 # truncate all file `f` to offset `o`
137 # truncate all file `f` to offset `o`
138 for f, o in sorted(dict(entries).items()):
138 for f, o in sorted(dict(entries).items()):
139 # if we have a backup for `f`, we should restore it first and truncate
139 # if we have a backup for `f`, we should restore it first and truncate
140 # the restored file
140 # the restored file
141 bck_entry = store_backup.get(f)
141 bck_entry = store_backup.get(f)
142 if bck_entry is not None:
142 if bck_entry is not None:
143 location, file_path, backup_path, cache = bck_entry
143 location, file_path, backup_path, cache = bck_entry
144 checkambig = False
144 checkambig = False
145 if checkambigfiles:
145 if checkambigfiles:
146 checkambig = (file_path, location) in checkambigfiles
146 checkambig = (file_path, location) in checkambigfiles
147 restore_one_backup(opener, file_path, backup_path, checkambig)
147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 copy_done.add(bck_entry)
148 copy_done.add(bck_entry)
149 # truncate the file to its pre-transaction size
149 # truncate the file to its pre-transaction size
150 if o or not unlink:
150 if o or not unlink:
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 try:
152 try:
153 fp = opener(f, b'a', checkambig=checkambig)
153 fp = opener(f, b'a', checkambig=checkambig)
154 if fp.tell() < o:
154 if fp.tell() < o:
155 raise error.Abort(
155 raise error.Abort(
156 _(
156 _(
157 b"attempted to truncate %s to %d bytes, but it was "
157 b"attempted to truncate %s to %d bytes, but it was "
158 b"already %d bytes\n"
158 b"already %d bytes\n"
159 )
159 )
160 % (f, o, fp.tell())
160 % (f, o, fp.tell())
161 )
161 )
162 fp.truncate(o)
162 fp.truncate(o)
163 fp.close()
163 fp.close()
164 except IOError:
164 except IOError:
165 report(_(b"failed to truncate %s\n") % f)
165 report(_(b"failed to truncate %s\n") % f)
166 raise
166 raise
167 else:
167 else:
168 # delete empty file
168 # delete empty file
169 try:
169 try:
170 opener.unlink(f)
170 opener.unlink(f)
171 except FileNotFoundError:
171 except FileNotFoundError:
172 pass
172 pass
173 # restore backed up files and clean up temporary files
173 # restore backed up files and clean up temporary files
174 for entry in backupentries:
174 for entry in backupentries:
175 if entry in copy_done:
175 if entry in copy_done:
176 continue
176 continue
177 l, f, b, c = entry
177 l, f, b, c = entry
178 if l not in vfsmap and c:
178 if l not in vfsmap and c:
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 vfs = vfsmap[l]
180 vfs = vfsmap[l]
181 try:
181 try:
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 if f and b:
183 if f and b:
184 restore_one_backup(vfs, f, b, checkambig)
184 restore_one_backup(vfs, f, b, checkambig)
185 else:
185 else:
186 target = f or b
186 target = f or b
187 try:
187 try:
188 vfs.unlink(target)
188 vfs.unlink(target)
189 except FileNotFoundError:
189 except FileNotFoundError:
190 # This is fine because
190 # This is fine because
191 #
191 #
192 # either we are trying to delete the main file, and it is
192 # either we are trying to delete the main file, and it is
193 # already deleted.
193 # already deleted.
194 #
194 #
195 # or we are trying to delete a temporary file and it is
195 # or we are trying to delete a temporary file and it is
196 # already deleted.
196 # already deleted.
197 #
197 #
198 # in both case, our target result (delete the file) is
198 # in both case, our target result (delete the file) is
199 # already achieved.
199 # already achieved.
200 pass
200 pass
201 except (IOError, OSError, error.Abort):
201 except (IOError, OSError, error.Abort):
202 if not c:
202 if not c:
203 raise
203 raise
204
204
205 # cleanup transaction state file and the backups file
205 # cleanup transaction state file and the backups file
206 backuppath = b"%s.backupfiles" % journal
206 backuppath = b"%s.backupfiles" % journal
207 if opener.exists(backuppath):
207 if opener.exists(backuppath):
208 opener.unlink(backuppath)
208 opener.unlink(backuppath)
209 opener.unlink(journal)
209 opener.unlink(journal)
210 try:
210 try:
211 for vfs, f in backupfiles:
211 for vfs, f in backupfiles:
212 if vfs.exists(f):
212 if vfs.exists(f):
213 vfs.unlink(f)
213 vfs.unlink(f)
214 except (IOError, OSError, error.Abort):
214 except (IOError, OSError, error.Abort):
215 # only pure backup file remains, it is sage to ignore any error
215 # only pure backup file remains, it is sage to ignore any error
216 pass
216 pass
217
217
218
218
219 class transaction(util.transactional):
219 class transaction(util.transactional):
220 def __init__(
220 def __init__(
221 self,
221 self,
222 report,
222 report,
223 opener,
223 opener,
224 vfsmap,
224 vfsmap,
225 journalname,
225 journalname,
226 undoname=None,
226 undoname=None,
227 after=None,
227 after=None,
228 createmode=None,
228 createmode=None,
229 validator=None,
229 validator=None,
230 releasefn=None,
230 releasefn=None,
231 checkambigfiles=None,
231 checkambigfiles=None,
232 name='<unnamed>',
232 name='<unnamed>',
233 ):
233 ):
234 """Begin a new transaction
234 """Begin a new transaction
235
235
236 Begins a new transaction that allows rolling back writes in the event of
236 Begins a new transaction that allows rolling back writes in the event of
237 an exception.
237 an exception.
238
238
239 * `after`: called after the transaction has been committed
239 * `after`: called after the transaction has been committed
240 * `createmode`: the mode of the journal file that will be created
240 * `createmode`: the mode of the journal file that will be created
241 * `releasefn`: called after releasing (with transaction and result)
241 * `releasefn`: called after releasing (with transaction and result)
242
242
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 which determine whether file stat ambiguity should be avoided
244 which determine whether file stat ambiguity should be avoided
245 for corresponded files.
245 for corresponded files.
246 """
246 """
247 self._count = 1
247 self._count = 1
248 self._usages = 1
248 self._usages = 1
249 self._report = report
249 self._report = report
250 # a vfs to the store content
250 # a vfs to the store content
251 self._opener = opener
251 self._opener = opener
252 # a map to access file in various {location -> vfs}
252 # a map to access file in various {location -> vfs}
253 vfsmap = vfsmap.copy()
253 vfsmap = vfsmap.copy()
254 vfsmap[b''] = opener # set default value
254 vfsmap[b''] = opener # set default value
255 self._vfsmap = vfsmap
255 self._vfsmap = vfsmap
256 self._after = after
256 self._after = after
257 self._offsetmap = {}
257 self._offsetmap = {}
258 self._newfiles = set()
258 self._newfiles = set()
259 self._journal = journalname
259 self._journal = journalname
260 self._journal_files = []
260 self._journal_files = []
261 self._undoname = undoname
261 self._undoname = undoname
262 self._queue = []
262 self._queue = []
263 # A callback to do something just after releasing transaction.
263 # A callback to do something just after releasing transaction.
264 if releasefn is None:
264 if releasefn is None:
265 releasefn = lambda tr, success: None
265 releasefn = lambda tr, success: None
266 self._releasefn = releasefn
266 self._releasefn = releasefn
267
267
268 self._checkambigfiles = set()
268 self._checkambigfiles = set()
269 if checkambigfiles:
269 if checkambigfiles:
270 self._checkambigfiles.update(checkambigfiles)
270 self._checkambigfiles.update(checkambigfiles)
271
271
272 self._names = [name]
272 self._names = [name]
273
273
274 # A dict dedicated to precisely tracking the changes introduced in the
274 # A dict dedicated to precisely tracking the changes introduced in the
275 # transaction.
275 # transaction.
276 self.changes = {}
276 self.changes = {}
277
277
278 # a dict of arguments to be passed to hooks
278 # a dict of arguments to be passed to hooks
279 self.hookargs = {}
279 self.hookargs = {}
280 self._file = opener.open(self._journal, b"w+")
280 self._file = opener.open(self._journal, b"w+")
281
281
282 # a list of ('location', 'path', 'backuppath', cache) entries.
282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # - if 'backuppath' is empty, no file existed at backup time
283 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'path' is empty, this is a temporary transaction file
284 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'location' is not empty, the path is outside main opener reach.
285 # - if 'location' is not empty, the path is outside main opener reach.
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # (cache is currently unused)
287 # (cache is currently unused)
288 self._backupentries = []
288 self._backupentries = []
289 self._backupmap = {}
289 self._backupmap = {}
290 self._backupjournal = b"%s.backupfiles" % self._journal
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
292 self._backupsfile.write(b'%d\n' % version)
293
293
294 if createmode is not None:
294 if createmode is not None:
295 opener.chmod(self._journal, createmode & 0o666)
295 opener.chmod(self._journal, createmode & 0o666)
296 opener.chmod(self._backupjournal, createmode & 0o666)
296 opener.chmod(self._backupjournal, createmode & 0o666)
297
297
298 # hold file generations to be performed on commit
298 # hold file generations to be performed on commit
299 self._filegenerators = {}
299 self._filegenerators = {}
300 # hold callback to write pending data for hooks
300 # hold callback to write pending data for hooks
301 self._pendingcallback = {}
301 self._pendingcallback = {}
302 # True is any pending data have been written ever
302 # True is any pending data have been written ever
303 self._anypending = False
303 self._anypending = False
304 # holds callback to call when writing the transaction
304 # holds callback to call when writing the transaction
305 self._finalizecallback = {}
305 self._finalizecallback = {}
306 # holds callback to call when validating the transaction
306 # holds callback to call when validating the transaction
307 # should raise exception if anything is wrong
307 # should raise exception if anything is wrong
308 self._validatecallback = {}
308 self._validatecallback = {}
309 if validator is not None:
309 if validator is not None:
310 self._validatecallback[b'001-userhooks'] = validator
310 self._validatecallback[b'001-userhooks'] = validator
311 # hold callback for post transaction close
311 # hold callback for post transaction close
312 self._postclosecallback = {}
312 self._postclosecallback = {}
313 # holds callbacks to call during abort
313 # holds callbacks to call during abort
314 self._abortcallback = {}
314 self._abortcallback = {}
315
315
316 def __repr__(self):
316 def __repr__(self):
317 name = '/'.join(self._names)
317 name = b'/'.join(self._names)
318 return '<transaction name=%s, count=%d, usages=%d>' % (
318 return '<transaction name=%s, count=%d, usages=%d>' % (
319 name,
319 name,
320 self._count,
320 self._count,
321 self._usages,
321 self._usages,
322 )
322 )
323
323
324 def __del__(self):
324 def __del__(self):
325 if self._journal:
325 if self._journal:
326 self._abort()
326 self._abort()
327
327
328 @property
328 @property
329 def finalized(self):
329 def finalized(self):
330 return self._finalizecallback is None
330 return self._finalizecallback is None
331
331
332 @active
332 @active
333 def startgroup(self):
333 def startgroup(self):
334 """delay registration of file entry
334 """delay registration of file entry
335
335
336 This is used by strip to delay vision of strip offset. The transaction
336 This is used by strip to delay vision of strip offset. The transaction
337 sees either none or all of the strip actions to be done."""
337 sees either none or all of the strip actions to be done."""
338 self._queue.append([])
338 self._queue.append([])
339
339
340 @active
340 @active
341 def endgroup(self):
341 def endgroup(self):
342 """apply delayed registration of file entry.
342 """apply delayed registration of file entry.
343
343
344 This is used by strip to delay vision of strip offset. The transaction
344 This is used by strip to delay vision of strip offset. The transaction
345 sees either none or all of the strip actions to be done."""
345 sees either none or all of the strip actions to be done."""
346 q = self._queue.pop()
346 q = self._queue.pop()
347 for f, o in q:
347 for f, o in q:
348 self._addentry(f, o)
348 self._addentry(f, o)
349
349
350 @active
350 @active
351 def add(self, file, offset):
351 def add(self, file, offset):
352 """record the state of an append-only file before update"""
352 """record the state of an append-only file before update"""
353 if (
353 if (
354 file in self._newfiles
354 file in self._newfiles
355 or file in self._offsetmap
355 or file in self._offsetmap
356 or file in self._backupmap
356 or file in self._backupmap
357 ):
357 ):
358 return
358 return
359 if self._queue:
359 if self._queue:
360 self._queue[-1].append((file, offset))
360 self._queue[-1].append((file, offset))
361 return
361 return
362
362
363 self._addentry(file, offset)
363 self._addentry(file, offset)
364
364
365 def _addentry(self, file, offset):
365 def _addentry(self, file, offset):
366 """add a append-only entry to memory and on-disk state"""
366 """add a append-only entry to memory and on-disk state"""
367 if (
367 if (
368 file in self._newfiles
368 file in self._newfiles
369 or file in self._offsetmap
369 or file in self._offsetmap
370 or file in self._backupmap
370 or file in self._backupmap
371 ):
371 ):
372 return
372 return
373 if offset:
373 if offset:
374 self._offsetmap[file] = offset
374 self._offsetmap[file] = offset
375 else:
375 else:
376 self._newfiles.add(file)
376 self._newfiles.add(file)
377 # add enough data to the journal to do the truncate
377 # add enough data to the journal to do the truncate
378 self._file.write(b"%s\0%d\n" % (file, offset))
378 self._file.write(b"%s\0%d\n" % (file, offset))
379 self._file.flush()
379 self._file.flush()
380
380
381 @active
381 @active
382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
383 """Adds a backup of the file to the transaction
383 """Adds a backup of the file to the transaction
384
384
385 Calling addbackup() creates a hardlink backup of the specified file
385 Calling addbackup() creates a hardlink backup of the specified file
386 that is used to recover the file in the event of the transaction
386 that is used to recover the file in the event of the transaction
387 aborting.
387 aborting.
388
388
389 * `file`: the file path, relative to .hg/store
389 * `file`: the file path, relative to .hg/store
390 * `hardlink`: use a hardlink to quickly create the backup
390 * `hardlink`: use a hardlink to quickly create the backup
391
391
392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
393 """
393 """
394 if self._queue:
394 if self._queue:
395 msg = b'cannot use transaction.addbackup inside "group"'
395 msg = b'cannot use transaction.addbackup inside "group"'
396 raise error.ProgrammingError(msg)
396 raise error.ProgrammingError(msg)
397
397
398 if file in self._newfiles or file in self._backupmap:
398 if file in self._newfiles or file in self._backupmap:
399 return
399 return
400 elif file in self._offsetmap and not for_offset:
400 elif file in self._offsetmap and not for_offset:
401 return
401 return
402 elif for_offset and file not in self._offsetmap:
402 elif for_offset and file not in self._offsetmap:
403 msg = (
403 msg = (
404 'calling `addbackup` with `for_offmap=True`, '
404 'calling `addbackup` with `for_offmap=True`, '
405 'but no offset recorded: [%r] %r'
405 'but no offset recorded: [%r] %r'
406 )
406 )
407 msg %= (location, file)
407 msg %= (location, file)
408 raise error.ProgrammingError(msg)
408 raise error.ProgrammingError(msg)
409
409
410 vfs = self._vfsmap[location]
410 vfs = self._vfsmap[location]
411 dirname, filename = vfs.split(file)
411 dirname, filename = vfs.split(file)
412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
413 backupfile = vfs.reljoin(dirname, backupfilename)
413 backupfile = vfs.reljoin(dirname, backupfilename)
414 if vfs.exists(file):
414 if vfs.exists(file):
415 filepath = vfs.join(file)
415 filepath = vfs.join(file)
416 backuppath = vfs.join(backupfile)
416 backuppath = vfs.join(backupfile)
417 util.copyfile(filepath, backuppath, hardlink=hardlink)
417 util.copyfile(filepath, backuppath, hardlink=hardlink)
418 else:
418 else:
419 backupfile = b''
419 backupfile = b''
420
420
421 self._addbackupentry((location, file, backupfile, False))
421 self._addbackupentry((location, file, backupfile, False))
422
422
423 def _addbackupentry(self, entry):
423 def _addbackupentry(self, entry):
424 """register a new backup entry and write it to disk"""
424 """register a new backup entry and write it to disk"""
425 self._backupentries.append(entry)
425 self._backupentries.append(entry)
426 self._backupmap[entry[1]] = len(self._backupentries) - 1
426 self._backupmap[entry[1]] = len(self._backupentries) - 1
427 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
427 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
428 self._backupsfile.flush()
428 self._backupsfile.flush()
429
429
430 @active
430 @active
431 def registertmp(self, tmpfile, location=b''):
431 def registertmp(self, tmpfile, location=b''):
432 """register a temporary transaction file
432 """register a temporary transaction file
433
433
434 Such files will be deleted when the transaction exits (on both
434 Such files will be deleted when the transaction exits (on both
435 failure and success).
435 failure and success).
436 """
436 """
437 self._addbackupentry((location, b'', tmpfile, False))
437 self._addbackupentry((location, b'', tmpfile, False))
438
438
439 @active
439 @active
440 def addfilegenerator(
440 def addfilegenerator(
441 self,
441 self,
442 genid,
442 genid,
443 filenames,
443 filenames,
444 genfunc,
444 genfunc,
445 order=0,
445 order=0,
446 location=b'',
446 location=b'',
447 post_finalize=False,
447 post_finalize=False,
448 ):
448 ):
449 """add a function to generates some files at transaction commit
449 """add a function to generates some files at transaction commit
450
450
451 The `genfunc` argument is a function capable of generating proper
451 The `genfunc` argument is a function capable of generating proper
452 content of each entry in the `filename` tuple.
452 content of each entry in the `filename` tuple.
453
453
454 At transaction close time, `genfunc` will be called with one file
454 At transaction close time, `genfunc` will be called with one file
455 object argument per entries in `filenames`.
455 object argument per entries in `filenames`.
456
456
457 The transaction itself is responsible for the backup, creation and
457 The transaction itself is responsible for the backup, creation and
458 final write of such file.
458 final write of such file.
459
459
460 The `genid` argument is used to ensure the same set of file is only
460 The `genid` argument is used to ensure the same set of file is only
461 generated once. Call to `addfilegenerator` for a `genid` already
461 generated once. Call to `addfilegenerator` for a `genid` already
462 present will overwrite the old entry.
462 present will overwrite the old entry.
463
463
464 The `order` argument may be used to control the order in which multiple
464 The `order` argument may be used to control the order in which multiple
465 generator will be executed.
465 generator will be executed.
466
466
467 The `location` arguments may be used to indicate the files are located
467 The `location` arguments may be used to indicate the files are located
468 outside of the the standard directory for transaction. It should match
468 outside of the the standard directory for transaction. It should match
469 one of the key of the `transaction.vfsmap` dictionary.
469 one of the key of the `transaction.vfsmap` dictionary.
470
470
471 The `post_finalize` argument can be set to `True` for file generation
471 The `post_finalize` argument can be set to `True` for file generation
472 that must be run after the transaction has been finalized.
472 that must be run after the transaction has been finalized.
473 """
473 """
474 # For now, we are unable to do proper backup and restore of custom vfs
474 # For now, we are unable to do proper backup and restore of custom vfs
475 # but for bookmarks that are handled outside this mechanism.
475 # but for bookmarks that are handled outside this mechanism.
476 entry = (order, filenames, genfunc, location, post_finalize)
476 entry = (order, filenames, genfunc, location, post_finalize)
477 self._filegenerators[genid] = entry
477 self._filegenerators[genid] = entry
478
478
479 @active
479 @active
480 def removefilegenerator(self, genid):
480 def removefilegenerator(self, genid):
481 """reverse of addfilegenerator, remove a file generator function"""
481 """reverse of addfilegenerator, remove a file generator function"""
482 if genid in self._filegenerators:
482 if genid in self._filegenerators:
483 del self._filegenerators[genid]
483 del self._filegenerators[genid]
484
484
485 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
485 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
486 # write files registered for generation
486 # write files registered for generation
487 any = False
487 any = False
488
488
489 if group == GEN_GROUP_ALL:
489 if group == GEN_GROUP_ALL:
490 skip_post = skip_pre = False
490 skip_post = skip_pre = False
491 else:
491 else:
492 skip_pre = group == GEN_GROUP_POST_FINALIZE
492 skip_pre = group == GEN_GROUP_POST_FINALIZE
493 skip_post = group == GEN_GROUP_PRE_FINALIZE
493 skip_post = group == GEN_GROUP_PRE_FINALIZE
494
494
495 for id, entry in sorted(self._filegenerators.items()):
495 for id, entry in sorted(self._filegenerators.items()):
496 any = True
496 any = True
497 order, filenames, genfunc, location, post_finalize = entry
497 order, filenames, genfunc, location, post_finalize = entry
498
498
499 # for generation at closing, check if it's before or after finalize
499 # for generation at closing, check if it's before or after finalize
500 if skip_post and post_finalize:
500 if skip_post and post_finalize:
501 continue
501 continue
502 elif skip_pre and not post_finalize:
502 elif skip_pre and not post_finalize:
503 continue
503 continue
504
504
505 vfs = self._vfsmap[location]
505 vfs = self._vfsmap[location]
506 files = []
506 files = []
507 try:
507 try:
508 for name in filenames:
508 for name in filenames:
509 name += suffix
509 name += suffix
510 if suffix:
510 if suffix:
511 self.registertmp(name, location=location)
511 self.registertmp(name, location=location)
512 checkambig = False
512 checkambig = False
513 else:
513 else:
514 self.addbackup(name, location=location)
514 self.addbackup(name, location=location)
515 checkambig = (name, location) in self._checkambigfiles
515 checkambig = (name, location) in self._checkambigfiles
516 files.append(
516 files.append(
517 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
517 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
518 )
518 )
519 genfunc(*files)
519 genfunc(*files)
520 for f in files:
520 for f in files:
521 f.close()
521 f.close()
522 # skip discard() loop since we're sure no open file remains
522 # skip discard() loop since we're sure no open file remains
523 del files[:]
523 del files[:]
524 finally:
524 finally:
525 for f in files:
525 for f in files:
526 f.discard()
526 f.discard()
527 return any
527 return any
528
528
529 @active
529 @active
530 def findoffset(self, file):
530 def findoffset(self, file):
531 if file in self._newfiles:
531 if file in self._newfiles:
532 return 0
532 return 0
533 return self._offsetmap.get(file)
533 return self._offsetmap.get(file)
534
534
535 @active
535 @active
536 def readjournal(self):
536 def readjournal(self):
537 self._file.seek(0)
537 self._file.seek(0)
538 entries = []
538 entries = []
539 for l in self._file.readlines():
539 for l in self._file.readlines():
540 file, troffset = l.split(b'\0')
540 file, troffset = l.split(b'\0')
541 entries.append((file, int(troffset)))
541 entries.append((file, int(troffset)))
542 return entries
542 return entries
543
543
544 @active
544 @active
545 def replace(self, file, offset):
545 def replace(self, file, offset):
546 """
546 """
547 replace can only replace already committed entries
547 replace can only replace already committed entries
548 that are not pending in the queue
548 that are not pending in the queue
549 """
549 """
550 if file in self._newfiles:
550 if file in self._newfiles:
551 if not offset:
551 if not offset:
552 return
552 return
553 self._newfiles.remove(file)
553 self._newfiles.remove(file)
554 self._offsetmap[file] = offset
554 self._offsetmap[file] = offset
555 elif file in self._offsetmap:
555 elif file in self._offsetmap:
556 if not offset:
556 if not offset:
557 del self._offsetmap[file]
557 del self._offsetmap[file]
558 self._newfiles.add(file)
558 self._newfiles.add(file)
559 else:
559 else:
560 self._offsetmap[file] = offset
560 self._offsetmap[file] = offset
561 else:
561 else:
562 raise KeyError(file)
562 raise KeyError(file)
563 self._file.write(b"%s\0%d\n" % (file, offset))
563 self._file.write(b"%s\0%d\n" % (file, offset))
564 self._file.flush()
564 self._file.flush()
565
565
566 @active
566 @active
567 def nest(self, name='<unnamed>'):
567 def nest(self, name='<unnamed>'):
568 self._count += 1
568 self._count += 1
569 self._usages += 1
569 self._usages += 1
570 self._names.append(name)
570 self._names.append(name)
571 return self
571 return self
572
572
573 def release(self):
573 def release(self):
574 if self._count > 0:
574 if self._count > 0:
575 self._usages -= 1
575 self._usages -= 1
576 if self._names:
576 if self._names:
577 self._names.pop()
577 self._names.pop()
578 # if the transaction scopes are left without being closed, fail
578 # if the transaction scopes are left without being closed, fail
579 if self._count > 0 and self._usages == 0:
579 if self._count > 0 and self._usages == 0:
580 self._abort()
580 self._abort()
581
581
582 def running(self):
582 def running(self):
583 return self._count > 0
583 return self._count > 0
584
584
585 def addpending(self, category, callback):
585 def addpending(self, category, callback):
586 """add a callback to be called when the transaction is pending
586 """add a callback to be called when the transaction is pending
587
587
588 The transaction will be given as callback's first argument.
588 The transaction will be given as callback's first argument.
589
589
590 Category is a unique identifier to allow overwriting an old callback
590 Category is a unique identifier to allow overwriting an old callback
591 with a newer callback.
591 with a newer callback.
592 """
592 """
593 self._pendingcallback[category] = callback
593 self._pendingcallback[category] = callback
594
594
595 @active
595 @active
596 def writepending(self):
596 def writepending(self):
597 """write pending file to temporary version
597 """write pending file to temporary version
598
598
599 This is used to allow hooks to view a transaction before commit"""
599 This is used to allow hooks to view a transaction before commit"""
600 categories = sorted(self._pendingcallback)
600 categories = sorted(self._pendingcallback)
601 for cat in categories:
601 for cat in categories:
602 # remove callback since the data will have been flushed
602 # remove callback since the data will have been flushed
603 any = self._pendingcallback.pop(cat)(self)
603 any = self._pendingcallback.pop(cat)(self)
604 self._anypending = self._anypending or any
604 self._anypending = self._anypending or any
605 self._anypending |= self._generatefiles(suffix=b'.pending')
605 self._anypending |= self._generatefiles(suffix=b'.pending')
606 return self._anypending
606 return self._anypending
607
607
608 @active
608 @active
609 def hasfinalize(self, category):
609 def hasfinalize(self, category):
610 """check is a callback already exist for a category"""
610 """check is a callback already exist for a category"""
611 return category in self._finalizecallback
611 return category in self._finalizecallback
612
612
613 @active
613 @active
614 def addfinalize(self, category, callback):
614 def addfinalize(self, category, callback):
615 """add a callback to be called when the transaction is closed
615 """add a callback to be called when the transaction is closed
616
616
617 The transaction will be given as callback's first argument.
617 The transaction will be given as callback's first argument.
618
618
619 Category is a unique identifier to allow overwriting old callbacks with
619 Category is a unique identifier to allow overwriting old callbacks with
620 newer callbacks.
620 newer callbacks.
621 """
621 """
622 self._finalizecallback[category] = callback
622 self._finalizecallback[category] = callback
623
623
624 @active
624 @active
625 def addpostclose(self, category, callback):
625 def addpostclose(self, category, callback):
626 """add or replace a callback to be called after the transaction closed
626 """add or replace a callback to be called after the transaction closed
627
627
628 The transaction will be given as callback's first argument.
628 The transaction will be given as callback's first argument.
629
629
630 Category is a unique identifier to allow overwriting an old callback
630 Category is a unique identifier to allow overwriting an old callback
631 with a newer callback.
631 with a newer callback.
632 """
632 """
633 self._postclosecallback[category] = callback
633 self._postclosecallback[category] = callback
634
634
635 @active
635 @active
636 def getpostclose(self, category):
636 def getpostclose(self, category):
637 """return a postclose callback added before, or None"""
637 """return a postclose callback added before, or None"""
638 return self._postclosecallback.get(category, None)
638 return self._postclosecallback.get(category, None)
639
639
640 @active
640 @active
641 def addabort(self, category, callback):
641 def addabort(self, category, callback):
642 """add a callback to be called when the transaction is aborted.
642 """add a callback to be called when the transaction is aborted.
643
643
644 The transaction will be given as the first argument to the callback.
644 The transaction will be given as the first argument to the callback.
645
645
646 Category is a unique identifier to allow overwriting an old callback
646 Category is a unique identifier to allow overwriting an old callback
647 with a newer callback.
647 with a newer callback.
648 """
648 """
649 self._abortcallback[category] = callback
649 self._abortcallback[category] = callback
650
650
651 @active
651 @active
652 def addvalidator(self, category, callback):
652 def addvalidator(self, category, callback):
653 """adds a callback to be called when validating the transaction.
653 """adds a callback to be called when validating the transaction.
654
654
655 The transaction will be given as the first argument to the callback.
655 The transaction will be given as the first argument to the callback.
656
656
657 callback should raise exception if to abort transaction"""
657 callback should raise exception if to abort transaction"""
658 self._validatecallback[category] = callback
658 self._validatecallback[category] = callback
659
659
660 @active
660 @active
661 def close(self):
661 def close(self):
662 '''commit the transaction'''
662 '''commit the transaction'''
663 if self._count == 1:
663 if self._count == 1:
664 for category in sorted(self._validatecallback):
664 for category in sorted(self._validatecallback):
665 self._validatecallback[category](self)
665 self._validatecallback[category](self)
666 self._validatecallback = None # Help prevent cycles.
666 self._validatecallback = None # Help prevent cycles.
667 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
667 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
668 while self._finalizecallback:
668 while self._finalizecallback:
669 callbacks = self._finalizecallback
669 callbacks = self._finalizecallback
670 self._finalizecallback = {}
670 self._finalizecallback = {}
671 categories = sorted(callbacks)
671 categories = sorted(callbacks)
672 for cat in categories:
672 for cat in categories:
673 callbacks[cat](self)
673 callbacks[cat](self)
674 # Prevent double usage and help clear cycles.
674 # Prevent double usage and help clear cycles.
675 self._finalizecallback = None
675 self._finalizecallback = None
676 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
676 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
677
677
678 self._count -= 1
678 self._count -= 1
679 if self._count != 0:
679 if self._count != 0:
680 return
680 return
681 self._file.close()
681 self._file.close()
682 self._backupsfile.close()
682 self._backupsfile.close()
683 # cleanup temporary files
683 # cleanup temporary files
684 for l, f, b, c in self._backupentries:
684 for l, f, b, c in self._backupentries:
685 if l not in self._vfsmap and c:
685 if l not in self._vfsmap and c:
686 self._report(
686 self._report(
687 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
687 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
688 )
688 )
689 continue
689 continue
690 vfs = self._vfsmap[l]
690 vfs = self._vfsmap[l]
691 if not f and b and vfs.exists(b):
691 if not f and b and vfs.exists(b):
692 try:
692 try:
693 vfs.unlink(b)
693 vfs.unlink(b)
694 except (IOError, OSError, error.Abort) as inst:
694 except (IOError, OSError, error.Abort) as inst:
695 if not c:
695 if not c:
696 raise
696 raise
697 # Abort may be raise by read only opener
697 # Abort may be raise by read only opener
698 self._report(
698 self._report(
699 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
699 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
700 )
700 )
701 self._offsetmap = {}
701 self._offsetmap = {}
702 self._newfiles = set()
702 self._newfiles = set()
703 self._writeundo()
703 self._writeundo()
704 if self._after:
704 if self._after:
705 self._after()
705 self._after()
706 self._after = None # Help prevent cycles.
706 self._after = None # Help prevent cycles.
707 if self._opener.isfile(self._backupjournal):
707 if self._opener.isfile(self._backupjournal):
708 self._opener.unlink(self._backupjournal)
708 self._opener.unlink(self._backupjournal)
709 if self._opener.isfile(self._journal):
709 if self._opener.isfile(self._journal):
710 self._opener.unlink(self._journal)
710 self._opener.unlink(self._journal)
711 for l, _f, b, c in self._backupentries:
711 for l, _f, b, c in self._backupentries:
712 if l not in self._vfsmap and c:
712 if l not in self._vfsmap and c:
713 self._report(
713 self._report(
714 b"couldn't remove %s: unknown cache location"
714 b"couldn't remove %s: unknown cache location"
715 b"%s\n" % (b, l)
715 b"%s\n" % (b, l)
716 )
716 )
717 continue
717 continue
718 vfs = self._vfsmap[l]
718 vfs = self._vfsmap[l]
719 if b and vfs.exists(b):
719 if b and vfs.exists(b):
720 try:
720 try:
721 vfs.unlink(b)
721 vfs.unlink(b)
722 except (IOError, OSError, error.Abort) as inst:
722 except (IOError, OSError, error.Abort) as inst:
723 if not c:
723 if not c:
724 raise
724 raise
725 # Abort may be raise by read only opener
725 # Abort may be raise by read only opener
726 self._report(
726 self._report(
727 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
727 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
728 )
728 )
729 self._backupentries = []
729 self._backupentries = []
730 self._journal = None
730 self._journal = None
731
731
732 self._releasefn(self, True) # notify success of closing transaction
732 self._releasefn(self, True) # notify success of closing transaction
733 self._releasefn = None # Help prevent cycles.
733 self._releasefn = None # Help prevent cycles.
734
734
735 # run post close action
735 # run post close action
736 categories = sorted(self._postclosecallback)
736 categories = sorted(self._postclosecallback)
737 for cat in categories:
737 for cat in categories:
738 self._postclosecallback[cat](self)
738 self._postclosecallback[cat](self)
739 # Prevent double usage and help clear cycles.
739 # Prevent double usage and help clear cycles.
740 self._postclosecallback = None
740 self._postclosecallback = None
741
741
742 @active
742 @active
743 def abort(self):
743 def abort(self):
744 """abort the transaction (generally called on error, or when the
744 """abort the transaction (generally called on error, or when the
745 transaction is not explicitly committed before going out of
745 transaction is not explicitly committed before going out of
746 scope)"""
746 scope)"""
747 self._abort()
747 self._abort()
748
748
749 @active
749 @active
750 def add_journal(self, vfs_id, path):
750 def add_journal(self, vfs_id, path):
751 self._journal_files.append((vfs_id, path))
751 self._journal_files.append((vfs_id, path))
752
752
753 def _writeundo(self):
753 def _writeundo(self):
754 """write transaction data for possible future undo call"""
754 """write transaction data for possible future undo call"""
755 if self._undoname is None:
755 if self._undoname is None:
756 return
756 return
757 cleanup_undo_files(
757 cleanup_undo_files(
758 self._report,
758 self._report,
759 self._vfsmap,
759 self._vfsmap,
760 undo_prefix=self._undoname,
760 undo_prefix=self._undoname,
761 )
761 )
762
762
763 def undoname(fn: bytes) -> bytes:
763 def undoname(fn: bytes) -> bytes:
764 base, name = os.path.split(fn)
764 base, name = os.path.split(fn)
765 assert name.startswith(self._journal)
765 assert name.startswith(self._journal)
766 new_name = name.replace(self._journal, self._undoname, 1)
766 new_name = name.replace(self._journal, self._undoname, 1)
767 return os.path.join(base, new_name)
767 return os.path.join(base, new_name)
768
768
769 undo_backup_path = b"%s.backupfiles" % self._undoname
769 undo_backup_path = b"%s.backupfiles" % self._undoname
770 undobackupfile = self._opener.open(undo_backup_path, b'w')
770 undobackupfile = self._opener.open(undo_backup_path, b'w')
771 undobackupfile.write(b'%d\n' % version)
771 undobackupfile.write(b'%d\n' % version)
772 for l, f, b, c in self._backupentries:
772 for l, f, b, c in self._backupentries:
773 if not f: # temporary file
773 if not f: # temporary file
774 continue
774 continue
775 if not b:
775 if not b:
776 u = b''
776 u = b''
777 else:
777 else:
778 if l not in self._vfsmap and c:
778 if l not in self._vfsmap and c:
779 self._report(
779 self._report(
780 b"couldn't remove %s: unknown cache location"
780 b"couldn't remove %s: unknown cache location"
781 b"%s\n" % (b, l)
781 b"%s\n" % (b, l)
782 )
782 )
783 continue
783 continue
784 vfs = self._vfsmap[l]
784 vfs = self._vfsmap[l]
785 u = undoname(b)
785 u = undoname(b)
786 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
786 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
787 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
787 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
788 undobackupfile.close()
788 undobackupfile.close()
789 for vfs, src in self._journal_files:
789 for vfs, src in self._journal_files:
790 dest = undoname(src)
790 dest = undoname(src)
791 # if src and dest refer to a same file, vfs.rename is a no-op,
791 # if src and dest refer to a same file, vfs.rename is a no-op,
792 # leaving both src and dest on disk. delete dest to make sure
792 # leaving both src and dest on disk. delete dest to make sure
793 # the rename couldn't be such a no-op.
793 # the rename couldn't be such a no-op.
794 vfs.tryunlink(dest)
794 vfs.tryunlink(dest)
795 try:
795 try:
796 vfs.rename(src, dest)
796 vfs.rename(src, dest)
797 except FileNotFoundError: # journal file does not yet exist
797 except FileNotFoundError: # journal file does not yet exist
798 pass
798 pass
799
799
800 def _abort(self):
800 def _abort(self):
801 entries = self.readjournal()
801 entries = self.readjournal()
802 self._count = 0
802 self._count = 0
803 self._usages = 0
803 self._usages = 0
804 self._file.close()
804 self._file.close()
805 self._backupsfile.close()
805 self._backupsfile.close()
806
806
807 quick = self._can_quick_abort(entries)
807 quick = self._can_quick_abort(entries)
808 try:
808 try:
809 if not quick:
809 if not quick:
810 self._report(_(b"transaction abort!\n"))
810 self._report(_(b"transaction abort!\n"))
811 for cat in sorted(self._abortcallback):
811 for cat in sorted(self._abortcallback):
812 self._abortcallback[cat](self)
812 self._abortcallback[cat](self)
813 # Prevent double usage and help clear cycles.
813 # Prevent double usage and help clear cycles.
814 self._abortcallback = None
814 self._abortcallback = None
815 if quick:
815 if quick:
816 self._do_quick_abort(entries)
816 self._do_quick_abort(entries)
817 else:
817 else:
818 self._do_full_abort(entries)
818 self._do_full_abort(entries)
819 finally:
819 finally:
820 self._journal = None
820 self._journal = None
821 self._releasefn(self, False) # notify failure of transaction
821 self._releasefn(self, False) # notify failure of transaction
822 self._releasefn = None # Help prevent cycles.
822 self._releasefn = None # Help prevent cycles.
823
823
824 def _can_quick_abort(self, entries):
824 def _can_quick_abort(self, entries):
825 """False if any semantic content have been written on disk
825 """False if any semantic content have been written on disk
826
826
827 True if nothing, except temporary files has been writen on disk."""
827 True if nothing, except temporary files has been writen on disk."""
828 if entries:
828 if entries:
829 return False
829 return False
830 for e in self._backupentries:
830 for e in self._backupentries:
831 if e[1]:
831 if e[1]:
832 return False
832 return False
833 return True
833 return True
834
834
835 def _do_quick_abort(self, entries):
835 def _do_quick_abort(self, entries):
836 """(Silently) do a quick cleanup (see _can_quick_abort)"""
836 """(Silently) do a quick cleanup (see _can_quick_abort)"""
837 assert self._can_quick_abort(entries)
837 assert self._can_quick_abort(entries)
838 tmp_files = [e for e in self._backupentries if not e[1]]
838 tmp_files = [e for e in self._backupentries if not e[1]]
839 for vfs_id, old_path, tmp_path, xxx in tmp_files:
839 for vfs_id, old_path, tmp_path, xxx in tmp_files:
840 vfs = self._vfsmap[vfs_id]
840 vfs = self._vfsmap[vfs_id]
841 try:
841 try:
842 vfs.unlink(tmp_path)
842 vfs.unlink(tmp_path)
843 except FileNotFoundError:
843 except FileNotFoundError:
844 pass
844 pass
845 if self._backupjournal:
845 if self._backupjournal:
846 self._opener.unlink(self._backupjournal)
846 self._opener.unlink(self._backupjournal)
847 if self._journal:
847 if self._journal:
848 self._opener.unlink(self._journal)
848 self._opener.unlink(self._journal)
849
849
850 def _do_full_abort(self, entries):
850 def _do_full_abort(self, entries):
851 """(Noisily) rollback all the change introduced by the transaction"""
851 """(Noisily) rollback all the change introduced by the transaction"""
852 try:
852 try:
853 _playback(
853 _playback(
854 self._journal,
854 self._journal,
855 self._report,
855 self._report,
856 self._opener,
856 self._opener,
857 self._vfsmap,
857 self._vfsmap,
858 entries,
858 entries,
859 self._backupentries,
859 self._backupentries,
860 False,
860 False,
861 checkambigfiles=self._checkambigfiles,
861 checkambigfiles=self._checkambigfiles,
862 )
862 )
863 self._report(_(b"rollback completed\n"))
863 self._report(_(b"rollback completed\n"))
864 except BaseException as exc:
864 except BaseException as exc:
865 self._report(_(b"rollback failed - please run hg recover\n"))
865 self._report(_(b"rollback failed - please run hg recover\n"))
866 self._report(
866 self._report(
867 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
867 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
868 )
868 )
869
869
870
870
871 BAD_VERSION_MSG = _(
871 BAD_VERSION_MSG = _(
872 b"journal was created by a different version of Mercurial\n"
872 b"journal was created by a different version of Mercurial\n"
873 )
873 )
874
874
875
875
876 def read_backup_files(report, fp):
876 def read_backup_files(report, fp):
877 """parse an (already open) backup file an return contained backup entries
877 """parse an (already open) backup file an return contained backup entries
878
878
879 entries are in the form: (location, file, backupfile, xxx)
879 entries are in the form: (location, file, backupfile, xxx)
880
880
881 :location: the vfs identifier (vfsmap's key)
881 :location: the vfs identifier (vfsmap's key)
882 :file: original file path (in the vfs)
882 :file: original file path (in the vfs)
883 :backupfile: path of the backup (in the vfs)
883 :backupfile: path of the backup (in the vfs)
884 :cache: a boolean currently always set to False
884 :cache: a boolean currently always set to False
885 """
885 """
886 lines = fp.readlines()
886 lines = fp.readlines()
887 backupentries = []
887 backupentries = []
888 if lines:
888 if lines:
889 ver = lines[0][:-1]
889 ver = lines[0][:-1]
890 if ver != (b'%d' % version):
890 if ver != (b'%d' % version):
891 report(BAD_VERSION_MSG)
891 report(BAD_VERSION_MSG)
892 else:
892 else:
893 for line in lines[1:]:
893 for line in lines[1:]:
894 if line:
894 if line:
895 # Shave off the trailing newline
895 # Shave off the trailing newline
896 line = line[:-1]
896 line = line[:-1]
897 l, f, b, c = line.split(b'\0')
897 l, f, b, c = line.split(b'\0')
898 backupentries.append((l, f, b, bool(c)))
898 backupentries.append((l, f, b, bool(c)))
899 return backupentries
899 return backupentries
900
900
901
901
902 def rollback(
902 def rollback(
903 opener,
903 opener,
904 vfsmap,
904 vfsmap,
905 file,
905 file,
906 report,
906 report,
907 checkambigfiles=None,
907 checkambigfiles=None,
908 skip_journal_pattern=None,
908 skip_journal_pattern=None,
909 ):
909 ):
910 """Rolls back the transaction contained in the given file
910 """Rolls back the transaction contained in the given file
911
911
912 Reads the entries in the specified file, and the corresponding
912 Reads the entries in the specified file, and the corresponding
913 '*.backupfiles' file, to recover from an incomplete transaction.
913 '*.backupfiles' file, to recover from an incomplete transaction.
914
914
915 * `file`: a file containing a list of entries, specifying where
915 * `file`: a file containing a list of entries, specifying where
916 to truncate each file. The file should contain a list of
916 to truncate each file. The file should contain a list of
917 file\0offset pairs, delimited by newlines. The corresponding
917 file\0offset pairs, delimited by newlines. The corresponding
918 '*.backupfiles' file should contain a list of file\0backupfile
918 '*.backupfiles' file should contain a list of file\0backupfile
919 pairs, delimited by \0.
919 pairs, delimited by \0.
920
920
921 `checkambigfiles` is a set of (path, vfs-location) tuples,
921 `checkambigfiles` is a set of (path, vfs-location) tuples,
922 which determine whether file stat ambiguity should be avoided at
922 which determine whether file stat ambiguity should be avoided at
923 restoring corresponded files.
923 restoring corresponded files.
924 """
924 """
925 entries = []
925 entries = []
926 backupentries = []
926 backupentries = []
927
927
928 with opener.open(file) as fp:
928 with opener.open(file) as fp:
929 lines = fp.readlines()
929 lines = fp.readlines()
930 for l in lines:
930 for l in lines:
931 try:
931 try:
932 f, o = l.split(b'\0')
932 f, o = l.split(b'\0')
933 entries.append((f, int(o)))
933 entries.append((f, int(o)))
934 except ValueError:
934 except ValueError:
935 report(
935 report(
936 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
936 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
937 )
937 )
938
938
939 backupjournal = b"%s.backupfiles" % file
939 backupjournal = b"%s.backupfiles" % file
940 if opener.exists(backupjournal):
940 if opener.exists(backupjournal):
941 with opener.open(backupjournal) as fp:
941 with opener.open(backupjournal) as fp:
942 backupentries = read_backup_files(report, fp)
942 backupentries = read_backup_files(report, fp)
943 if skip_journal_pattern is not None:
943 if skip_journal_pattern is not None:
944 keep = lambda x: not skip_journal_pattern.match(x[1])
944 keep = lambda x: not skip_journal_pattern.match(x[1])
945 backupentries = [x for x in backupentries if keep(x)]
945 backupentries = [x for x in backupentries if keep(x)]
946
946
947 _playback(
947 _playback(
948 file,
948 file,
949 report,
949 report,
950 opener,
950 opener,
951 vfsmap,
951 vfsmap,
952 entries,
952 entries,
953 backupentries,
953 backupentries,
954 checkambigfiles=checkambigfiles,
954 checkambigfiles=checkambigfiles,
955 )
955 )
General Comments 0
You need to be logged in to leave comments. Login now