##// END OF EJS Templates
transaction: fix __repr__() and make the default name bytes...
Matt Harbison -
r51761:0a4efb65 stable
parent child Browse files
Show More
@@ -1,965 +1,966 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 encoding,
19 error,
20 error,
20 pycompat,
21 pycompat,
21 util,
22 util,
22 )
23 )
23 from .utils import stringutil
24 from .utils import stringutil
24
25
25 version = 2
26 version = 2
26
27
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
31
31
32
32 def active(func):
33 def active(func):
33 def _active(self, *args, **kwds):
34 def _active(self, *args, **kwds):
34 if self._count == 0:
35 if self._count == 0:
35 raise error.ProgrammingError(
36 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
37 b'cannot use transaction when it is already committed/aborted'
37 )
38 )
38 return func(self, *args, **kwds)
39 return func(self, *args, **kwds)
39
40
40 return _active
41 return _active
41
42
42
43
43 UNDO_BACKUP = b'%s.backupfiles'
44 UNDO_BACKUP = b'%s.backupfiles'
44
45
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
47 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
48 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
51 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
52 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
53 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
54 # files actually in uses today:
54 (b'plain', b'%s.desc'),
55 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
57 # the process is interrupted.
57 (b'store', b'%s'),
58 (b'store', b'%s'),
58 ]
59 ]
59
60
60
61
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
63 """remove "undo" files used by the rollback logic
63
64
64 This is useful to prevent rollback running in situation were it does not
65 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
66 make sense. For example after a strip.
66 """
67 """
67 backup_listing = UNDO_BACKUP % undo_prefix
68 backup_listing = UNDO_BACKUP % undo_prefix
68
69
69 backup_entries = []
70 backup_entries = []
70 undo_files = []
71 undo_files = []
71 svfs = vfsmap[b'store']
72 svfs = vfsmap[b'store']
72 try:
73 try:
73 with svfs(backup_listing) as f:
74 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
75 backup_entries = read_backup_files(report, f)
75 except OSError as e:
76 except OSError as e:
76 if e.errno != errno.ENOENT:
77 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
78 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
80 report(msg)
80
81
81 for location, f, backup_path, c in backup_entries:
82 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
83 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
84 undo_files.append((vfsmap[location], backup_path))
84
85
85 undo_files.append((svfs, backup_listing))
86 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
89 for undovfs, undofile in undo_files:
89 try:
90 try:
90 undovfs.unlink(undofile)
91 undovfs.unlink(undofile)
91 except OSError as e:
92 except OSError as e:
92 if e.errno != errno.ENOENT:
93 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
94 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
96 report(msg)
96
97
97
98
98 def _playback(
99 def _playback(
99 journal,
100 journal,
100 report,
101 report,
101 opener,
102 opener,
102 vfsmap,
103 vfsmap,
103 entries,
104 entries,
104 backupentries,
105 backupentries,
105 unlink=True,
106 unlink=True,
106 checkambigfiles=None,
107 checkambigfiles=None,
107 ):
108 ):
108 """rollback a transaction :
109 """rollback a transaction :
109 - truncate files that have been appended to
110 - truncate files that have been appended to
110 - restore file backups
111 - restore file backups
111 - delete temporary files
112 - delete temporary files
112 """
113 """
113 backupfiles = []
114 backupfiles = []
114
115
115 def restore_one_backup(vfs, f, b, checkambig):
116 def restore_one_backup(vfs, f, b, checkambig):
116 filepath = vfs.join(f)
117 filepath = vfs.join(f)
117 backuppath = vfs.join(b)
118 backuppath = vfs.join(b)
118 try:
119 try:
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 backupfiles.append((vfs, b))
121 backupfiles.append((vfs, b))
121 except IOError as exc:
122 except IOError as exc:
122 e_msg = stringutil.forcebytestr(exc)
123 e_msg = stringutil.forcebytestr(exc)
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 raise
125 raise
125
126
126 # gather all backup files that impact the store
127 # gather all backup files that impact the store
127 # (we need this to detect files that are both backed up and truncated)
128 # (we need this to detect files that are both backed up and truncated)
128 store_backup = {}
129 store_backup = {}
129 for entry in backupentries:
130 for entry in backupentries:
130 location, file_path, backup_path, cache = entry
131 location, file_path, backup_path, cache = entry
131 vfs = vfsmap[location]
132 vfs = vfsmap[location]
132 is_store = vfs.join(b'') == opener.join(b'')
133 is_store = vfs.join(b'') == opener.join(b'')
133 if is_store and file_path and backup_path:
134 if is_store and file_path and backup_path:
134 store_backup[file_path] = entry
135 store_backup[file_path] = entry
135 copy_done = set()
136 copy_done = set()
136
137
137 # truncate all file `f` to offset `o`
138 # truncate all file `f` to offset `o`
138 for f, o in sorted(dict(entries).items()):
139 for f, o in sorted(dict(entries).items()):
139 # if we have a backup for `f`, we should restore it first and truncate
140 # if we have a backup for `f`, we should restore it first and truncate
140 # the restored file
141 # the restored file
141 bck_entry = store_backup.get(f)
142 bck_entry = store_backup.get(f)
142 if bck_entry is not None:
143 if bck_entry is not None:
143 location, file_path, backup_path, cache = bck_entry
144 location, file_path, backup_path, cache = bck_entry
144 checkambig = False
145 checkambig = False
145 if checkambigfiles:
146 if checkambigfiles:
146 checkambig = (file_path, location) in checkambigfiles
147 checkambig = (file_path, location) in checkambigfiles
147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 restore_one_backup(opener, file_path, backup_path, checkambig)
148 copy_done.add(bck_entry)
149 copy_done.add(bck_entry)
149 # truncate the file to its pre-transaction size
150 # truncate the file to its pre-transaction size
150 if o or not unlink:
151 if o or not unlink:
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 try:
153 try:
153 fp = opener(f, b'a', checkambig=checkambig)
154 fp = opener(f, b'a', checkambig=checkambig)
154 if fp.tell() < o:
155 if fp.tell() < o:
155 raise error.Abort(
156 raise error.Abort(
156 _(
157 _(
157 b"attempted to truncate %s to %d bytes, but it was "
158 b"attempted to truncate %s to %d bytes, but it was "
158 b"already %d bytes\n"
159 b"already %d bytes\n"
159 )
160 )
160 % (f, o, fp.tell())
161 % (f, o, fp.tell())
161 )
162 )
162 fp.truncate(o)
163 fp.truncate(o)
163 fp.close()
164 fp.close()
164 except IOError:
165 except IOError:
165 report(_(b"failed to truncate %s\n") % f)
166 report(_(b"failed to truncate %s\n") % f)
166 raise
167 raise
167 else:
168 else:
168 # delete empty file
169 # delete empty file
169 try:
170 try:
170 opener.unlink(f)
171 opener.unlink(f)
171 except FileNotFoundError:
172 except FileNotFoundError:
172 pass
173 pass
173 # restore backed up files and clean up temporary files
174 # restore backed up files and clean up temporary files
174 for entry in backupentries:
175 for entry in backupentries:
175 if entry in copy_done:
176 if entry in copy_done:
176 continue
177 continue
177 l, f, b, c = entry
178 l, f, b, c = entry
178 if l not in vfsmap and c:
179 if l not in vfsmap and c:
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 vfs = vfsmap[l]
181 vfs = vfsmap[l]
181 try:
182 try:
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 if f and b:
184 if f and b:
184 restore_one_backup(vfs, f, b, checkambig)
185 restore_one_backup(vfs, f, b, checkambig)
185 else:
186 else:
186 target = f or b
187 target = f or b
187 try:
188 try:
188 vfs.unlink(target)
189 vfs.unlink(target)
189 except FileNotFoundError:
190 except FileNotFoundError:
190 # This is fine because
191 # This is fine because
191 #
192 #
192 # either we are trying to delete the main file, and it is
193 # either we are trying to delete the main file, and it is
193 # already deleted.
194 # already deleted.
194 #
195 #
195 # or we are trying to delete a temporary file and it is
196 # or we are trying to delete a temporary file and it is
196 # already deleted.
197 # already deleted.
197 #
198 #
198 # in both case, our target result (delete the file) is
199 # in both case, our target result (delete the file) is
199 # already achieved.
200 # already achieved.
200 pass
201 pass
201 except (IOError, OSError, error.Abort):
202 except (IOError, OSError, error.Abort):
202 if not c:
203 if not c:
203 raise
204 raise
204
205
205 # cleanup transaction state file and the backups file
206 # cleanup transaction state file and the backups file
206 backuppath = b"%s.backupfiles" % journal
207 backuppath = b"%s.backupfiles" % journal
207 if opener.exists(backuppath):
208 if opener.exists(backuppath):
208 opener.unlink(backuppath)
209 opener.unlink(backuppath)
209 opener.unlink(journal)
210 opener.unlink(journal)
210 try:
211 try:
211 for vfs, f in backupfiles:
212 for vfs, f in backupfiles:
212 if vfs.exists(f):
213 if vfs.exists(f):
213 vfs.unlink(f)
214 vfs.unlink(f)
214 except (IOError, OSError, error.Abort):
215 except (IOError, OSError, error.Abort):
215 # only pure backup file remains, it is sage to ignore any error
216 # only pure backup file remains, it is sage to ignore any error
216 pass
217 pass
217
218
218
219
219 class transaction(util.transactional):
220 class transaction(util.transactional):
220 def __init__(
221 def __init__(
221 self,
222 self,
222 report,
223 report,
223 opener,
224 opener,
224 vfsmap,
225 vfsmap,
225 journalname,
226 journalname,
226 undoname=None,
227 undoname=None,
227 after=None,
228 after=None,
228 createmode=None,
229 createmode=None,
229 validator=None,
230 validator=None,
230 releasefn=None,
231 releasefn=None,
231 checkambigfiles=None,
232 checkambigfiles=None,
232 name='<unnamed>',
233 name=b'<unnamed>',
233 ):
234 ):
234 """Begin a new transaction
235 """Begin a new transaction
235
236
236 Begins a new transaction that allows rolling back writes in the event of
237 Begins a new transaction that allows rolling back writes in the event of
237 an exception.
238 an exception.
238
239
239 * `after`: called after the transaction has been committed
240 * `after`: called after the transaction has been committed
240 * `createmode`: the mode of the journal file that will be created
241 * `createmode`: the mode of the journal file that will be created
241 * `releasefn`: called after releasing (with transaction and result)
242 * `releasefn`: called after releasing (with transaction and result)
242
243
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 which determine whether file stat ambiguity should be avoided
245 which determine whether file stat ambiguity should be avoided
245 for corresponded files.
246 for corresponded files.
246 """
247 """
247 self._count = 1
248 self._count = 1
248 self._usages = 1
249 self._usages = 1
249 self._report = report
250 self._report = report
250 # a vfs to the store content
251 # a vfs to the store content
251 self._opener = opener
252 self._opener = opener
252 # a map to access file in various {location -> vfs}
253 # a map to access file in various {location -> vfs}
253 vfsmap = vfsmap.copy()
254 vfsmap = vfsmap.copy()
254 vfsmap[b''] = opener # set default value
255 vfsmap[b''] = opener # set default value
255 self._vfsmap = vfsmap
256 self._vfsmap = vfsmap
256 self._after = after
257 self._after = after
257 self._offsetmap = {}
258 self._offsetmap = {}
258 self._newfiles = set()
259 self._newfiles = set()
259 self._journal = journalname
260 self._journal = journalname
260 self._journal_files = []
261 self._journal_files = []
261 self._undoname = undoname
262 self._undoname = undoname
262 self._queue = []
263 self._queue = []
263 # A callback to do something just after releasing transaction.
264 # A callback to do something just after releasing transaction.
264 if releasefn is None:
265 if releasefn is None:
265 releasefn = lambda tr, success: None
266 releasefn = lambda tr, success: None
266 self._releasefn = releasefn
267 self._releasefn = releasefn
267
268
268 self._checkambigfiles = set()
269 self._checkambigfiles = set()
269 if checkambigfiles:
270 if checkambigfiles:
270 self._checkambigfiles.update(checkambigfiles)
271 self._checkambigfiles.update(checkambigfiles)
271
272
272 self._names = [name]
273 self._names = [name]
273
274
274 # A dict dedicated to precisely tracking the changes introduced in the
275 # A dict dedicated to precisely tracking the changes introduced in the
275 # transaction.
276 # transaction.
276 self.changes = {}
277 self.changes = {}
277
278
278 # a dict of arguments to be passed to hooks
279 # a dict of arguments to be passed to hooks
279 self.hookargs = {}
280 self.hookargs = {}
280 self._file = opener.open(self._journal, b"w+")
281 self._file = opener.open(self._journal, b"w+")
281
282
282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'location' is not empty, the path is outside main opener reach.
286 # - if 'location' is not empty, the path is outside main opener reach.
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # (cache is currently unused)
288 # (cache is currently unused)
288 self._backupentries = []
289 self._backupentries = []
289 self._backupmap = {}
290 self._backupmap = {}
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
293 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
294 # the set of temporary files
294 self._tmp_files = set()
295 self._tmp_files = set()
295
296
296 if createmode is not None:
297 if createmode is not None:
297 opener.chmod(self._journal, createmode & 0o666)
298 opener.chmod(self._journal, createmode & 0o666)
298 opener.chmod(self._backupjournal, createmode & 0o666)
299 opener.chmod(self._backupjournal, createmode & 0o666)
299
300
300 # hold file generations to be performed on commit
301 # hold file generations to be performed on commit
301 self._filegenerators = {}
302 self._filegenerators = {}
302 # hold callback to write pending data for hooks
303 # hold callback to write pending data for hooks
303 self._pendingcallback = {}
304 self._pendingcallback = {}
304 # True is any pending data have been written ever
305 # True is any pending data have been written ever
305 self._anypending = False
306 self._anypending = False
306 # holds callback to call when writing the transaction
307 # holds callback to call when writing the transaction
307 self._finalizecallback = {}
308 self._finalizecallback = {}
308 # holds callback to call when validating the transaction
309 # holds callback to call when validating the transaction
309 # should raise exception if anything is wrong
310 # should raise exception if anything is wrong
310 self._validatecallback = {}
311 self._validatecallback = {}
311 if validator is not None:
312 if validator is not None:
312 self._validatecallback[b'001-userhooks'] = validator
313 self._validatecallback[b'001-userhooks'] = validator
313 # hold callback for post transaction close
314 # hold callback for post transaction close
314 self._postclosecallback = {}
315 self._postclosecallback = {}
315 # holds callbacks to call during abort
316 # holds callbacks to call during abort
316 self._abortcallback = {}
317 self._abortcallback = {}
317
318
318 def __repr__(self):
319 def __repr__(self):
319 name = b'/'.join(self._names)
320 name = b'/'.join(self._names)
320 return '<transaction name=%s, count=%d, usages=%d>' % (
321 return '<transaction name=%s, count=%d, usages=%d>' % (
321 name,
322 encoding.strfromlocal(name),
322 self._count,
323 self._count,
323 self._usages,
324 self._usages,
324 )
325 )
325
326
326 def __del__(self):
327 def __del__(self):
327 if self._journal:
328 if self._journal:
328 self._abort()
329 self._abort()
329
330
330 @property
331 @property
331 def finalized(self):
332 def finalized(self):
332 return self._finalizecallback is None
333 return self._finalizecallback is None
333
334
334 @active
335 @active
335 def startgroup(self):
336 def startgroup(self):
336 """delay registration of file entry
337 """delay registration of file entry
337
338
338 This is used by strip to delay vision of strip offset. The transaction
339 This is used by strip to delay vision of strip offset. The transaction
339 sees either none or all of the strip actions to be done."""
340 sees either none or all of the strip actions to be done."""
340 self._queue.append([])
341 self._queue.append([])
341
342
342 @active
343 @active
343 def endgroup(self):
344 def endgroup(self):
344 """apply delayed registration of file entry.
345 """apply delayed registration of file entry.
345
346
346 This is used by strip to delay vision of strip offset. The transaction
347 This is used by strip to delay vision of strip offset. The transaction
347 sees either none or all of the strip actions to be done."""
348 sees either none or all of the strip actions to be done."""
348 q = self._queue.pop()
349 q = self._queue.pop()
349 for f, o in q:
350 for f, o in q:
350 self._addentry(f, o)
351 self._addentry(f, o)
351
352
352 @active
353 @active
353 def add(self, file, offset):
354 def add(self, file, offset):
354 """record the state of an append-only file before update"""
355 """record the state of an append-only file before update"""
355 if (
356 if (
356 file in self._newfiles
357 file in self._newfiles
357 or file in self._offsetmap
358 or file in self._offsetmap
358 or file in self._backupmap
359 or file in self._backupmap
359 or file in self._tmp_files
360 or file in self._tmp_files
360 ):
361 ):
361 return
362 return
362 if self._queue:
363 if self._queue:
363 self._queue[-1].append((file, offset))
364 self._queue[-1].append((file, offset))
364 return
365 return
365
366
366 self._addentry(file, offset)
367 self._addentry(file, offset)
367
368
368 def _addentry(self, file, offset):
369 def _addentry(self, file, offset):
369 """add a append-only entry to memory and on-disk state"""
370 """add a append-only entry to memory and on-disk state"""
370 if (
371 if (
371 file in self._newfiles
372 file in self._newfiles
372 or file in self._offsetmap
373 or file in self._offsetmap
373 or file in self._backupmap
374 or file in self._backupmap
374 or file in self._tmp_files
375 or file in self._tmp_files
375 ):
376 ):
376 return
377 return
377 if offset:
378 if offset:
378 self._offsetmap[file] = offset
379 self._offsetmap[file] = offset
379 else:
380 else:
380 self._newfiles.add(file)
381 self._newfiles.add(file)
381 # add enough data to the journal to do the truncate
382 # add enough data to the journal to do the truncate
382 self._file.write(b"%s\0%d\n" % (file, offset))
383 self._file.write(b"%s\0%d\n" % (file, offset))
383 self._file.flush()
384 self._file.flush()
384
385
385 @active
386 @active
386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
387 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
387 """Adds a backup of the file to the transaction
388 """Adds a backup of the file to the transaction
388
389
389 Calling addbackup() creates a hardlink backup of the specified file
390 Calling addbackup() creates a hardlink backup of the specified file
390 that is used to recover the file in the event of the transaction
391 that is used to recover the file in the event of the transaction
391 aborting.
392 aborting.
392
393
393 * `file`: the file path, relative to .hg/store
394 * `file`: the file path, relative to .hg/store
394 * `hardlink`: use a hardlink to quickly create the backup
395 * `hardlink`: use a hardlink to quickly create the backup
395
396
396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
397 If `for_offset` is set, we expect a offset for this file to have been previously recorded
397 """
398 """
398 if self._queue:
399 if self._queue:
399 msg = b'cannot use transaction.addbackup inside "group"'
400 msg = b'cannot use transaction.addbackup inside "group"'
400 raise error.ProgrammingError(msg)
401 raise error.ProgrammingError(msg)
401
402
402 if file in self._newfiles or file in self._backupmap:
403 if file in self._newfiles or file in self._backupmap:
403 return
404 return
404 elif file in self._offsetmap and not for_offset:
405 elif file in self._offsetmap and not for_offset:
405 return
406 return
406 elif for_offset and file not in self._offsetmap:
407 elif for_offset and file not in self._offsetmap:
407 msg = (
408 msg = (
408 'calling `addbackup` with `for_offmap=True`, '
409 'calling `addbackup` with `for_offmap=True`, '
409 'but no offset recorded: [%r] %r'
410 'but no offset recorded: [%r] %r'
410 )
411 )
411 msg %= (location, file)
412 msg %= (location, file)
412 raise error.ProgrammingError(msg)
413 raise error.ProgrammingError(msg)
413
414
414 vfs = self._vfsmap[location]
415 vfs = self._vfsmap[location]
415 dirname, filename = vfs.split(file)
416 dirname, filename = vfs.split(file)
416 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
417 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
417 backupfile = vfs.reljoin(dirname, backupfilename)
418 backupfile = vfs.reljoin(dirname, backupfilename)
418 if vfs.exists(file):
419 if vfs.exists(file):
419 filepath = vfs.join(file)
420 filepath = vfs.join(file)
420 backuppath = vfs.join(backupfile)
421 backuppath = vfs.join(backupfile)
421 # store encoding may result in different directory here.
422 # store encoding may result in different directory here.
422 # so we have to ensure the destination directory exist
423 # so we have to ensure the destination directory exist
423 final_dir_name = os.path.dirname(backuppath)
424 final_dir_name = os.path.dirname(backuppath)
424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
425 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
425 # then we can copy the backup
426 # then we can copy the backup
426 util.copyfile(filepath, backuppath, hardlink=hardlink)
427 util.copyfile(filepath, backuppath, hardlink=hardlink)
427 else:
428 else:
428 backupfile = b''
429 backupfile = b''
429
430
430 self._addbackupentry((location, file, backupfile, False))
431 self._addbackupentry((location, file, backupfile, False))
431
432
432 def _addbackupentry(self, entry):
433 def _addbackupentry(self, entry):
433 """register a new backup entry and write it to disk"""
434 """register a new backup entry and write it to disk"""
434 self._backupentries.append(entry)
435 self._backupentries.append(entry)
435 self._backupmap[entry[1]] = len(self._backupentries) - 1
436 self._backupmap[entry[1]] = len(self._backupentries) - 1
436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
437 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
437 self._backupsfile.flush()
438 self._backupsfile.flush()
438
439
439 @active
440 @active
440 def registertmp(self, tmpfile, location=b''):
441 def registertmp(self, tmpfile, location=b''):
441 """register a temporary transaction file
442 """register a temporary transaction file
442
443
443 Such files will be deleted when the transaction exits (on both
444 Such files will be deleted when the transaction exits (on both
444 failure and success).
445 failure and success).
445 """
446 """
446 self._tmp_files.add(tmpfile)
447 self._tmp_files.add(tmpfile)
447 self._addbackupentry((location, b'', tmpfile, False))
448 self._addbackupentry((location, b'', tmpfile, False))
448
449
449 @active
450 @active
450 def addfilegenerator(
451 def addfilegenerator(
451 self,
452 self,
452 genid,
453 genid,
453 filenames,
454 filenames,
454 genfunc,
455 genfunc,
455 order=0,
456 order=0,
456 location=b'',
457 location=b'',
457 post_finalize=False,
458 post_finalize=False,
458 ):
459 ):
459 """add a function to generates some files at transaction commit
460 """add a function to generates some files at transaction commit
460
461
461 The `genfunc` argument is a function capable of generating proper
462 The `genfunc` argument is a function capable of generating proper
462 content of each entry in the `filename` tuple.
463 content of each entry in the `filename` tuple.
463
464
464 At transaction close time, `genfunc` will be called with one file
465 At transaction close time, `genfunc` will be called with one file
465 object argument per entries in `filenames`.
466 object argument per entries in `filenames`.
466
467
467 The transaction itself is responsible for the backup, creation and
468 The transaction itself is responsible for the backup, creation and
468 final write of such file.
469 final write of such file.
469
470
470 The `genid` argument is used to ensure the same set of file is only
471 The `genid` argument is used to ensure the same set of file is only
471 generated once. Call to `addfilegenerator` for a `genid` already
472 generated once. Call to `addfilegenerator` for a `genid` already
472 present will overwrite the old entry.
473 present will overwrite the old entry.
473
474
474 The `order` argument may be used to control the order in which multiple
475 The `order` argument may be used to control the order in which multiple
475 generator will be executed.
476 generator will be executed.
476
477
477 The `location` arguments may be used to indicate the files are located
478 The `location` arguments may be used to indicate the files are located
478 outside of the the standard directory for transaction. It should match
479 outside of the the standard directory for transaction. It should match
479 one of the key of the `transaction.vfsmap` dictionary.
480 one of the key of the `transaction.vfsmap` dictionary.
480
481
481 The `post_finalize` argument can be set to `True` for file generation
482 The `post_finalize` argument can be set to `True` for file generation
482 that must be run after the transaction has been finalized.
483 that must be run after the transaction has been finalized.
483 """
484 """
484 # For now, we are unable to do proper backup and restore of custom vfs
485 # For now, we are unable to do proper backup and restore of custom vfs
485 # but for bookmarks that are handled outside this mechanism.
486 # but for bookmarks that are handled outside this mechanism.
486 entry = (order, filenames, genfunc, location, post_finalize)
487 entry = (order, filenames, genfunc, location, post_finalize)
487 self._filegenerators[genid] = entry
488 self._filegenerators[genid] = entry
488
489
489 @active
490 @active
490 def removefilegenerator(self, genid):
491 def removefilegenerator(self, genid):
491 """reverse of addfilegenerator, remove a file generator function"""
492 """reverse of addfilegenerator, remove a file generator function"""
492 if genid in self._filegenerators:
493 if genid in self._filegenerators:
493 del self._filegenerators[genid]
494 del self._filegenerators[genid]
494
495
495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
496 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
496 # write files registered for generation
497 # write files registered for generation
497 any = False
498 any = False
498
499
499 if group == GEN_GROUP_ALL:
500 if group == GEN_GROUP_ALL:
500 skip_post = skip_pre = False
501 skip_post = skip_pre = False
501 else:
502 else:
502 skip_pre = group == GEN_GROUP_POST_FINALIZE
503 skip_pre = group == GEN_GROUP_POST_FINALIZE
503 skip_post = group == GEN_GROUP_PRE_FINALIZE
504 skip_post = group == GEN_GROUP_PRE_FINALIZE
504
505
505 for id, entry in sorted(self._filegenerators.items()):
506 for id, entry in sorted(self._filegenerators.items()):
506 any = True
507 any = True
507 order, filenames, genfunc, location, post_finalize = entry
508 order, filenames, genfunc, location, post_finalize = entry
508
509
509 # for generation at closing, check if it's before or after finalize
510 # for generation at closing, check if it's before or after finalize
510 if skip_post and post_finalize:
511 if skip_post and post_finalize:
511 continue
512 continue
512 elif skip_pre and not post_finalize:
513 elif skip_pre and not post_finalize:
513 continue
514 continue
514
515
515 vfs = self._vfsmap[location]
516 vfs = self._vfsmap[location]
516 files = []
517 files = []
517 try:
518 try:
518 for name in filenames:
519 for name in filenames:
519 name += suffix
520 name += suffix
520 if suffix:
521 if suffix:
521 self.registertmp(name, location=location)
522 self.registertmp(name, location=location)
522 checkambig = False
523 checkambig = False
523 else:
524 else:
524 self.addbackup(name, location=location)
525 self.addbackup(name, location=location)
525 checkambig = (name, location) in self._checkambigfiles
526 checkambig = (name, location) in self._checkambigfiles
526 files.append(
527 files.append(
527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
528 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
528 )
529 )
529 genfunc(*files)
530 genfunc(*files)
530 for f in files:
531 for f in files:
531 f.close()
532 f.close()
532 # skip discard() loop since we're sure no open file remains
533 # skip discard() loop since we're sure no open file remains
533 del files[:]
534 del files[:]
534 finally:
535 finally:
535 for f in files:
536 for f in files:
536 f.discard()
537 f.discard()
537 return any
538 return any
538
539
539 @active
540 @active
540 def findoffset(self, file):
541 def findoffset(self, file):
541 if file in self._newfiles:
542 if file in self._newfiles:
542 return 0
543 return 0
543 return self._offsetmap.get(file)
544 return self._offsetmap.get(file)
544
545
545 @active
546 @active
546 def readjournal(self):
547 def readjournal(self):
547 self._file.seek(0)
548 self._file.seek(0)
548 entries = []
549 entries = []
549 for l in self._file.readlines():
550 for l in self._file.readlines():
550 file, troffset = l.split(b'\0')
551 file, troffset = l.split(b'\0')
551 entries.append((file, int(troffset)))
552 entries.append((file, int(troffset)))
552 return entries
553 return entries
553
554
554 @active
555 @active
555 def replace(self, file, offset):
556 def replace(self, file, offset):
556 """
557 """
557 replace can only replace already committed entries
558 replace can only replace already committed entries
558 that are not pending in the queue
559 that are not pending in the queue
559 """
560 """
560 if file in self._newfiles:
561 if file in self._newfiles:
561 if not offset:
562 if not offset:
562 return
563 return
563 self._newfiles.remove(file)
564 self._newfiles.remove(file)
564 self._offsetmap[file] = offset
565 self._offsetmap[file] = offset
565 elif file in self._offsetmap:
566 elif file in self._offsetmap:
566 if not offset:
567 if not offset:
567 del self._offsetmap[file]
568 del self._offsetmap[file]
568 self._newfiles.add(file)
569 self._newfiles.add(file)
569 else:
570 else:
570 self._offsetmap[file] = offset
571 self._offsetmap[file] = offset
571 else:
572 else:
572 raise KeyError(file)
573 raise KeyError(file)
573 self._file.write(b"%s\0%d\n" % (file, offset))
574 self._file.write(b"%s\0%d\n" % (file, offset))
574 self._file.flush()
575 self._file.flush()
575
576
576 @active
577 @active
577 def nest(self, name='<unnamed>'):
578 def nest(self, name=b'<unnamed>'):
578 self._count += 1
579 self._count += 1
579 self._usages += 1
580 self._usages += 1
580 self._names.append(name)
581 self._names.append(name)
581 return self
582 return self
582
583
583 def release(self):
584 def release(self):
584 if self._count > 0:
585 if self._count > 0:
585 self._usages -= 1
586 self._usages -= 1
586 if self._names:
587 if self._names:
587 self._names.pop()
588 self._names.pop()
588 # if the transaction scopes are left without being closed, fail
589 # if the transaction scopes are left without being closed, fail
589 if self._count > 0 and self._usages == 0:
590 if self._count > 0 and self._usages == 0:
590 self._abort()
591 self._abort()
591
592
592 def running(self):
593 def running(self):
593 return self._count > 0
594 return self._count > 0
594
595
595 def addpending(self, category, callback):
596 def addpending(self, category, callback):
596 """add a callback to be called when the transaction is pending
597 """add a callback to be called when the transaction is pending
597
598
598 The transaction will be given as callback's first argument.
599 The transaction will be given as callback's first argument.
599
600
600 Category is a unique identifier to allow overwriting an old callback
601 Category is a unique identifier to allow overwriting an old callback
601 with a newer callback.
602 with a newer callback.
602 """
603 """
603 self._pendingcallback[category] = callback
604 self._pendingcallback[category] = callback
604
605
605 @active
606 @active
606 def writepending(self):
607 def writepending(self):
607 """write pending file to temporary version
608 """write pending file to temporary version
608
609
609 This is used to allow hooks to view a transaction before commit"""
610 This is used to allow hooks to view a transaction before commit"""
610 categories = sorted(self._pendingcallback)
611 categories = sorted(self._pendingcallback)
611 for cat in categories:
612 for cat in categories:
612 # remove callback since the data will have been flushed
613 # remove callback since the data will have been flushed
613 any = self._pendingcallback.pop(cat)(self)
614 any = self._pendingcallback.pop(cat)(self)
614 self._anypending = self._anypending or any
615 self._anypending = self._anypending or any
615 self._anypending |= self._generatefiles(suffix=b'.pending')
616 self._anypending |= self._generatefiles(suffix=b'.pending')
616 return self._anypending
617 return self._anypending
617
618
618 @active
619 @active
619 def hasfinalize(self, category):
620 def hasfinalize(self, category):
620 """check is a callback already exist for a category"""
621 """check is a callback already exist for a category"""
621 return category in self._finalizecallback
622 return category in self._finalizecallback
622
623
623 @active
624 @active
624 def addfinalize(self, category, callback):
625 def addfinalize(self, category, callback):
625 """add a callback to be called when the transaction is closed
626 """add a callback to be called when the transaction is closed
626
627
627 The transaction will be given as callback's first argument.
628 The transaction will be given as callback's first argument.
628
629
629 Category is a unique identifier to allow overwriting old callbacks with
630 Category is a unique identifier to allow overwriting old callbacks with
630 newer callbacks.
631 newer callbacks.
631 """
632 """
632 self._finalizecallback[category] = callback
633 self._finalizecallback[category] = callback
633
634
634 @active
635 @active
635 def addpostclose(self, category, callback):
636 def addpostclose(self, category, callback):
636 """add or replace a callback to be called after the transaction closed
637 """add or replace a callback to be called after the transaction closed
637
638
638 The transaction will be given as callback's first argument.
639 The transaction will be given as callback's first argument.
639
640
640 Category is a unique identifier to allow overwriting an old callback
641 Category is a unique identifier to allow overwriting an old callback
641 with a newer callback.
642 with a newer callback.
642 """
643 """
643 self._postclosecallback[category] = callback
644 self._postclosecallback[category] = callback
644
645
645 @active
646 @active
646 def getpostclose(self, category):
647 def getpostclose(self, category):
647 """return a postclose callback added before, or None"""
648 """return a postclose callback added before, or None"""
648 return self._postclosecallback.get(category, None)
649 return self._postclosecallback.get(category, None)
649
650
650 @active
651 @active
651 def addabort(self, category, callback):
652 def addabort(self, category, callback):
652 """add a callback to be called when the transaction is aborted.
653 """add a callback to be called when the transaction is aborted.
653
654
654 The transaction will be given as the first argument to the callback.
655 The transaction will be given as the first argument to the callback.
655
656
656 Category is a unique identifier to allow overwriting an old callback
657 Category is a unique identifier to allow overwriting an old callback
657 with a newer callback.
658 with a newer callback.
658 """
659 """
659 self._abortcallback[category] = callback
660 self._abortcallback[category] = callback
660
661
661 @active
662 @active
662 def addvalidator(self, category, callback):
663 def addvalidator(self, category, callback):
663 """adds a callback to be called when validating the transaction.
664 """adds a callback to be called when validating the transaction.
664
665
665 The transaction will be given as the first argument to the callback.
666 The transaction will be given as the first argument to the callback.
666
667
667 callback should raise exception if to abort transaction"""
668 callback should raise exception if to abort transaction"""
668 self._validatecallback[category] = callback
669 self._validatecallback[category] = callback
669
670
670 @active
671 @active
671 def close(self):
672 def close(self):
672 '''commit the transaction'''
673 '''commit the transaction'''
673 if self._count == 1:
674 if self._count == 1:
674 for category in sorted(self._validatecallback):
675 for category in sorted(self._validatecallback):
675 self._validatecallback[category](self)
676 self._validatecallback[category](self)
676 self._validatecallback = None # Help prevent cycles.
677 self._validatecallback = None # Help prevent cycles.
677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
678 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
678 while self._finalizecallback:
679 while self._finalizecallback:
679 callbacks = self._finalizecallback
680 callbacks = self._finalizecallback
680 self._finalizecallback = {}
681 self._finalizecallback = {}
681 categories = sorted(callbacks)
682 categories = sorted(callbacks)
682 for cat in categories:
683 for cat in categories:
683 callbacks[cat](self)
684 callbacks[cat](self)
684 # Prevent double usage and help clear cycles.
685 # Prevent double usage and help clear cycles.
685 self._finalizecallback = None
686 self._finalizecallback = None
686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
687 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
687
688
688 self._count -= 1
689 self._count -= 1
689 if self._count != 0:
690 if self._count != 0:
690 return
691 return
691 self._file.close()
692 self._file.close()
692 self._backupsfile.close()
693 self._backupsfile.close()
693 # cleanup temporary files
694 # cleanup temporary files
694 for l, f, b, c in self._backupentries:
695 for l, f, b, c in self._backupentries:
695 if l not in self._vfsmap and c:
696 if l not in self._vfsmap and c:
696 self._report(
697 self._report(
697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
698 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
698 )
699 )
699 continue
700 continue
700 vfs = self._vfsmap[l]
701 vfs = self._vfsmap[l]
701 if not f and b and vfs.exists(b):
702 if not f and b and vfs.exists(b):
702 try:
703 try:
703 vfs.unlink(b)
704 vfs.unlink(b)
704 except (IOError, OSError, error.Abort) as inst:
705 except (IOError, OSError, error.Abort) as inst:
705 if not c:
706 if not c:
706 raise
707 raise
707 # Abort may be raise by read only opener
708 # Abort may be raise by read only opener
708 self._report(
709 self._report(
709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
710 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
710 )
711 )
711 self._offsetmap = {}
712 self._offsetmap = {}
712 self._newfiles = set()
713 self._newfiles = set()
713 self._writeundo()
714 self._writeundo()
714 if self._after:
715 if self._after:
715 self._after()
716 self._after()
716 self._after = None # Help prevent cycles.
717 self._after = None # Help prevent cycles.
717 if self._opener.isfile(self._backupjournal):
718 if self._opener.isfile(self._backupjournal):
718 self._opener.unlink(self._backupjournal)
719 self._opener.unlink(self._backupjournal)
719 if self._opener.isfile(self._journal):
720 if self._opener.isfile(self._journal):
720 self._opener.unlink(self._journal)
721 self._opener.unlink(self._journal)
721 for l, _f, b, c in self._backupentries:
722 for l, _f, b, c in self._backupentries:
722 if l not in self._vfsmap and c:
723 if l not in self._vfsmap and c:
723 self._report(
724 self._report(
724 b"couldn't remove %s: unknown cache location"
725 b"couldn't remove %s: unknown cache location"
725 b"%s\n" % (b, l)
726 b"%s\n" % (b, l)
726 )
727 )
727 continue
728 continue
728 vfs = self._vfsmap[l]
729 vfs = self._vfsmap[l]
729 if b and vfs.exists(b):
730 if b and vfs.exists(b):
730 try:
731 try:
731 vfs.unlink(b)
732 vfs.unlink(b)
732 except (IOError, OSError, error.Abort) as inst:
733 except (IOError, OSError, error.Abort) as inst:
733 if not c:
734 if not c:
734 raise
735 raise
735 # Abort may be raise by read only opener
736 # Abort may be raise by read only opener
736 self._report(
737 self._report(
737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
738 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
738 )
739 )
739 self._backupentries = []
740 self._backupentries = []
740 self._journal = None
741 self._journal = None
741
742
742 self._releasefn(self, True) # notify success of closing transaction
743 self._releasefn(self, True) # notify success of closing transaction
743 self._releasefn = None # Help prevent cycles.
744 self._releasefn = None # Help prevent cycles.
744
745
745 # run post close action
746 # run post close action
746 categories = sorted(self._postclosecallback)
747 categories = sorted(self._postclosecallback)
747 for cat in categories:
748 for cat in categories:
748 self._postclosecallback[cat](self)
749 self._postclosecallback[cat](self)
749 # Prevent double usage and help clear cycles.
750 # Prevent double usage and help clear cycles.
750 self._postclosecallback = None
751 self._postclosecallback = None
751
752
752 @active
753 @active
753 def abort(self):
754 def abort(self):
754 """abort the transaction (generally called on error, or when the
755 """abort the transaction (generally called on error, or when the
755 transaction is not explicitly committed before going out of
756 transaction is not explicitly committed before going out of
756 scope)"""
757 scope)"""
757 self._abort()
758 self._abort()
758
759
759 @active
760 @active
760 def add_journal(self, vfs_id, path):
761 def add_journal(self, vfs_id, path):
761 self._journal_files.append((vfs_id, path))
762 self._journal_files.append((vfs_id, path))
762
763
763 def _writeundo(self):
764 def _writeundo(self):
764 """write transaction data for possible future undo call"""
765 """write transaction data for possible future undo call"""
765 if self._undoname is None:
766 if self._undoname is None:
766 return
767 return
767 cleanup_undo_files(
768 cleanup_undo_files(
768 self._report,
769 self._report,
769 self._vfsmap,
770 self._vfsmap,
770 undo_prefix=self._undoname,
771 undo_prefix=self._undoname,
771 )
772 )
772
773
773 def undoname(fn: bytes) -> bytes:
774 def undoname(fn: bytes) -> bytes:
774 base, name = os.path.split(fn)
775 base, name = os.path.split(fn)
775 assert name.startswith(self._journal)
776 assert name.startswith(self._journal)
776 new_name = name.replace(self._journal, self._undoname, 1)
777 new_name = name.replace(self._journal, self._undoname, 1)
777 return os.path.join(base, new_name)
778 return os.path.join(base, new_name)
778
779
779 undo_backup_path = b"%s.backupfiles" % self._undoname
780 undo_backup_path = b"%s.backupfiles" % self._undoname
780 undobackupfile = self._opener.open(undo_backup_path, b'w')
781 undobackupfile = self._opener.open(undo_backup_path, b'w')
781 undobackupfile.write(b'%d\n' % version)
782 undobackupfile.write(b'%d\n' % version)
782 for l, f, b, c in self._backupentries:
783 for l, f, b, c in self._backupentries:
783 if not f: # temporary file
784 if not f: # temporary file
784 continue
785 continue
785 if not b:
786 if not b:
786 u = b''
787 u = b''
787 else:
788 else:
788 if l not in self._vfsmap and c:
789 if l not in self._vfsmap and c:
789 self._report(
790 self._report(
790 b"couldn't remove %s: unknown cache location"
791 b"couldn't remove %s: unknown cache location"
791 b"%s\n" % (b, l)
792 b"%s\n" % (b, l)
792 )
793 )
793 continue
794 continue
794 vfs = self._vfsmap[l]
795 vfs = self._vfsmap[l]
795 u = undoname(b)
796 u = undoname(b)
796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
797 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
798 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
798 undobackupfile.close()
799 undobackupfile.close()
799 for vfs, src in self._journal_files:
800 for vfs, src in self._journal_files:
800 dest = undoname(src)
801 dest = undoname(src)
801 # if src and dest refer to a same file, vfs.rename is a no-op,
802 # if src and dest refer to a same file, vfs.rename is a no-op,
802 # leaving both src and dest on disk. delete dest to make sure
803 # leaving both src and dest on disk. delete dest to make sure
803 # the rename couldn't be such a no-op.
804 # the rename couldn't be such a no-op.
804 vfs.tryunlink(dest)
805 vfs.tryunlink(dest)
805 try:
806 try:
806 vfs.rename(src, dest)
807 vfs.rename(src, dest)
807 except FileNotFoundError: # journal file does not yet exist
808 except FileNotFoundError: # journal file does not yet exist
808 pass
809 pass
809
810
810 def _abort(self):
811 def _abort(self):
811 entries = self.readjournal()
812 entries = self.readjournal()
812 self._count = 0
813 self._count = 0
813 self._usages = 0
814 self._usages = 0
814 self._file.close()
815 self._file.close()
815 self._backupsfile.close()
816 self._backupsfile.close()
816
817
817 quick = self._can_quick_abort(entries)
818 quick = self._can_quick_abort(entries)
818 try:
819 try:
819 if not quick:
820 if not quick:
820 self._report(_(b"transaction abort!\n"))
821 self._report(_(b"transaction abort!\n"))
821 for cat in sorted(self._abortcallback):
822 for cat in sorted(self._abortcallback):
822 self._abortcallback[cat](self)
823 self._abortcallback[cat](self)
823 # Prevent double usage and help clear cycles.
824 # Prevent double usage and help clear cycles.
824 self._abortcallback = None
825 self._abortcallback = None
825 if quick:
826 if quick:
826 self._do_quick_abort(entries)
827 self._do_quick_abort(entries)
827 else:
828 else:
828 self._do_full_abort(entries)
829 self._do_full_abort(entries)
829 finally:
830 finally:
830 self._journal = None
831 self._journal = None
831 self._releasefn(self, False) # notify failure of transaction
832 self._releasefn(self, False) # notify failure of transaction
832 self._releasefn = None # Help prevent cycles.
833 self._releasefn = None # Help prevent cycles.
833
834
834 def _can_quick_abort(self, entries):
835 def _can_quick_abort(self, entries):
835 """False if any semantic content have been written on disk
836 """False if any semantic content have been written on disk
836
837
837 True if nothing, except temporary files has been writen on disk."""
838 True if nothing, except temporary files has been writen on disk."""
838 if entries:
839 if entries:
839 return False
840 return False
840 for e in self._backupentries:
841 for e in self._backupentries:
841 if e[1]:
842 if e[1]:
842 return False
843 return False
843 return True
844 return True
844
845
845 def _do_quick_abort(self, entries):
846 def _do_quick_abort(self, entries):
846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
847 """(Silently) do a quick cleanup (see _can_quick_abort)"""
847 assert self._can_quick_abort(entries)
848 assert self._can_quick_abort(entries)
848 tmp_files = [e for e in self._backupentries if not e[1]]
849 tmp_files = [e for e in self._backupentries if not e[1]]
849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
850 for vfs_id, old_path, tmp_path, xxx in tmp_files:
850 vfs = self._vfsmap[vfs_id]
851 vfs = self._vfsmap[vfs_id]
851 try:
852 try:
852 vfs.unlink(tmp_path)
853 vfs.unlink(tmp_path)
853 except FileNotFoundError:
854 except FileNotFoundError:
854 pass
855 pass
855 if self._backupjournal:
856 if self._backupjournal:
856 self._opener.unlink(self._backupjournal)
857 self._opener.unlink(self._backupjournal)
857 if self._journal:
858 if self._journal:
858 self._opener.unlink(self._journal)
859 self._opener.unlink(self._journal)
859
860
860 def _do_full_abort(self, entries):
861 def _do_full_abort(self, entries):
861 """(Noisily) rollback all the change introduced by the transaction"""
862 """(Noisily) rollback all the change introduced by the transaction"""
862 try:
863 try:
863 _playback(
864 _playback(
864 self._journal,
865 self._journal,
865 self._report,
866 self._report,
866 self._opener,
867 self._opener,
867 self._vfsmap,
868 self._vfsmap,
868 entries,
869 entries,
869 self._backupentries,
870 self._backupentries,
870 False,
871 False,
871 checkambigfiles=self._checkambigfiles,
872 checkambigfiles=self._checkambigfiles,
872 )
873 )
873 self._report(_(b"rollback completed\n"))
874 self._report(_(b"rollback completed\n"))
874 except BaseException as exc:
875 except BaseException as exc:
875 self._report(_(b"rollback failed - please run hg recover\n"))
876 self._report(_(b"rollback failed - please run hg recover\n"))
876 self._report(
877 self._report(
877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
878 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
878 )
879 )
879
880
880
881
881 BAD_VERSION_MSG = _(
882 BAD_VERSION_MSG = _(
882 b"journal was created by a different version of Mercurial\n"
883 b"journal was created by a different version of Mercurial\n"
883 )
884 )
884
885
885
886
886 def read_backup_files(report, fp):
887 def read_backup_files(report, fp):
887 """parse an (already open) backup file an return contained backup entries
888 """parse an (already open) backup file an return contained backup entries
888
889
889 entries are in the form: (location, file, backupfile, xxx)
890 entries are in the form: (location, file, backupfile, xxx)
890
891
891 :location: the vfs identifier (vfsmap's key)
892 :location: the vfs identifier (vfsmap's key)
892 :file: original file path (in the vfs)
893 :file: original file path (in the vfs)
893 :backupfile: path of the backup (in the vfs)
894 :backupfile: path of the backup (in the vfs)
894 :cache: a boolean currently always set to False
895 :cache: a boolean currently always set to False
895 """
896 """
896 lines = fp.readlines()
897 lines = fp.readlines()
897 backupentries = []
898 backupentries = []
898 if lines:
899 if lines:
899 ver = lines[0][:-1]
900 ver = lines[0][:-1]
900 if ver != (b'%d' % version):
901 if ver != (b'%d' % version):
901 report(BAD_VERSION_MSG)
902 report(BAD_VERSION_MSG)
902 else:
903 else:
903 for line in lines[1:]:
904 for line in lines[1:]:
904 if line:
905 if line:
905 # Shave off the trailing newline
906 # Shave off the trailing newline
906 line = line[:-1]
907 line = line[:-1]
907 l, f, b, c = line.split(b'\0')
908 l, f, b, c = line.split(b'\0')
908 backupentries.append((l, f, b, bool(c)))
909 backupentries.append((l, f, b, bool(c)))
909 return backupentries
910 return backupentries
910
911
911
912
912 def rollback(
913 def rollback(
913 opener,
914 opener,
914 vfsmap,
915 vfsmap,
915 file,
916 file,
916 report,
917 report,
917 checkambigfiles=None,
918 checkambigfiles=None,
918 skip_journal_pattern=None,
919 skip_journal_pattern=None,
919 ):
920 ):
920 """Rolls back the transaction contained in the given file
921 """Rolls back the transaction contained in the given file
921
922
922 Reads the entries in the specified file, and the corresponding
923 Reads the entries in the specified file, and the corresponding
923 '*.backupfiles' file, to recover from an incomplete transaction.
924 '*.backupfiles' file, to recover from an incomplete transaction.
924
925
925 * `file`: a file containing a list of entries, specifying where
926 * `file`: a file containing a list of entries, specifying where
926 to truncate each file. The file should contain a list of
927 to truncate each file. The file should contain a list of
927 file\0offset pairs, delimited by newlines. The corresponding
928 file\0offset pairs, delimited by newlines. The corresponding
928 '*.backupfiles' file should contain a list of file\0backupfile
929 '*.backupfiles' file should contain a list of file\0backupfile
929 pairs, delimited by \0.
930 pairs, delimited by \0.
930
931
931 `checkambigfiles` is a set of (path, vfs-location) tuples,
932 `checkambigfiles` is a set of (path, vfs-location) tuples,
932 which determine whether file stat ambiguity should be avoided at
933 which determine whether file stat ambiguity should be avoided at
933 restoring corresponded files.
934 restoring corresponded files.
934 """
935 """
935 entries = []
936 entries = []
936 backupentries = []
937 backupentries = []
937
938
938 with opener.open(file) as fp:
939 with opener.open(file) as fp:
939 lines = fp.readlines()
940 lines = fp.readlines()
940 for l in lines:
941 for l in lines:
941 try:
942 try:
942 f, o = l.split(b'\0')
943 f, o = l.split(b'\0')
943 entries.append((f, int(o)))
944 entries.append((f, int(o)))
944 except ValueError:
945 except ValueError:
945 report(
946 report(
946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
947 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
947 )
948 )
948
949
949 backupjournal = b"%s.backupfiles" % file
950 backupjournal = b"%s.backupfiles" % file
950 if opener.exists(backupjournal):
951 if opener.exists(backupjournal):
951 with opener.open(backupjournal) as fp:
952 with opener.open(backupjournal) as fp:
952 backupentries = read_backup_files(report, fp)
953 backupentries = read_backup_files(report, fp)
953 if skip_journal_pattern is not None:
954 if skip_journal_pattern is not None:
954 keep = lambda x: not skip_journal_pattern.match(x[1])
955 keep = lambda x: not skip_journal_pattern.match(x[1])
955 backupentries = [x for x in backupentries if keep(x)]
956 backupentries = [x for x in backupentries if keep(x)]
956
957
957 _playback(
958 _playback(
958 file,
959 file,
959 report,
960 report,
960 opener,
961 opener,
961 vfsmap,
962 vfsmap,
962 entries,
963 entries,
963 backupentries,
964 backupentries,
964 checkambigfiles=checkambigfiles,
965 checkambigfiles=checkambigfiles,
965 )
966 )
General Comments 0
You need to be logged in to leave comments. Login now