##// END OF EJS Templates
transaction: add clarifying comment about why ignoring some error is fine...
marmoute -
r51233:70ca1f09 stable
parent child Browse files
Show More
@@ -1,899 +1,909 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 for f, o in sorted(dict(entries).items()):
108 for f, o in sorted(dict(entries).items()):
109 if o or not unlink:
109 if o or not unlink:
110 checkambig = checkambigfiles and (f, b'') in checkambigfiles
110 checkambig = checkambigfiles and (f, b'') in checkambigfiles
111 try:
111 try:
112 fp = opener(f, b'a', checkambig=checkambig)
112 fp = opener(f, b'a', checkambig=checkambig)
113 if fp.tell() < o:
113 if fp.tell() < o:
114 raise error.Abort(
114 raise error.Abort(
115 _(
115 _(
116 b"attempted to truncate %s to %d bytes, but it was "
116 b"attempted to truncate %s to %d bytes, but it was "
117 b"already %d bytes\n"
117 b"already %d bytes\n"
118 )
118 )
119 % (f, o, fp.tell())
119 % (f, o, fp.tell())
120 )
120 )
121 fp.truncate(o)
121 fp.truncate(o)
122 fp.close()
122 fp.close()
123 except IOError:
123 except IOError:
124 report(_(b"failed to truncate %s\n") % f)
124 report(_(b"failed to truncate %s\n") % f)
125 raise
125 raise
126 else:
126 else:
127 try:
127 try:
128 opener.unlink(f)
128 opener.unlink(f)
129 except FileNotFoundError:
129 except FileNotFoundError:
130 pass
130 pass
131
131
132 backupfiles = []
132 backupfiles = []
133 for l, f, b, c in backupentries:
133 for l, f, b, c in backupentries:
134 if l not in vfsmap and c:
134 if l not in vfsmap and c:
135 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
135 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
136 vfs = vfsmap[l]
136 vfs = vfsmap[l]
137 try:
137 try:
138 if f and b:
138 if f and b:
139 filepath = vfs.join(f)
139 filepath = vfs.join(f)
140 backuppath = vfs.join(b)
140 backuppath = vfs.join(b)
141 checkambig = checkambigfiles and (f, l) in checkambigfiles
141 checkambig = checkambigfiles and (f, l) in checkambigfiles
142 try:
142 try:
143 util.copyfile(backuppath, filepath, checkambig=checkambig)
143 util.copyfile(backuppath, filepath, checkambig=checkambig)
144 backupfiles.append((vfs, b))
144 backupfiles.append((vfs, b))
145 except IOError as exc:
145 except IOError as exc:
146 e_msg = stringutil.forcebytestr(exc)
146 e_msg = stringutil.forcebytestr(exc)
147 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
147 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
148 else:
148 else:
149 target = f or b
149 target = f or b
150 try:
150 try:
151 vfs.unlink(target)
151 vfs.unlink(target)
152 except FileNotFoundError:
152 except FileNotFoundError:
153 # This is fine because
154 #
155 # either we are trying to delete the main file, and it is
156 # already deleted.
157 #
158 # or we are trying to delete a temporary file and it is
159 # already deleted.
160 #
161 # in both case, our target result (delete the file) is
162 # already achieved.
153 pass
163 pass
154 except (IOError, OSError, error.Abort):
164 except (IOError, OSError, error.Abort):
155 if not c:
165 if not c:
156 raise
166 raise
157
167
158 backuppath = b"%s.backupfiles" % journal
168 backuppath = b"%s.backupfiles" % journal
159 if opener.exists(backuppath):
169 if opener.exists(backuppath):
160 opener.unlink(backuppath)
170 opener.unlink(backuppath)
161 opener.unlink(journal)
171 opener.unlink(journal)
162 try:
172 try:
163 for vfs, f in backupfiles:
173 for vfs, f in backupfiles:
164 if vfs.exists(f):
174 if vfs.exists(f):
165 vfs.unlink(f)
175 vfs.unlink(f)
166 except (IOError, OSError, error.Abort):
176 except (IOError, OSError, error.Abort):
167 # only pure backup file remains, it is sage to ignore any error
177 # only pure backup file remains, it is sage to ignore any error
168 pass
178 pass
169
179
170
180
171 class transaction(util.transactional):
181 class transaction(util.transactional):
172 def __init__(
182 def __init__(
173 self,
183 self,
174 report,
184 report,
175 opener,
185 opener,
176 vfsmap,
186 vfsmap,
177 journalname,
187 journalname,
178 undoname=None,
188 undoname=None,
179 after=None,
189 after=None,
180 createmode=None,
190 createmode=None,
181 validator=None,
191 validator=None,
182 releasefn=None,
192 releasefn=None,
183 checkambigfiles=None,
193 checkambigfiles=None,
184 name='<unnamed>',
194 name='<unnamed>',
185 ):
195 ):
186 """Begin a new transaction
196 """Begin a new transaction
187
197
188 Begins a new transaction that allows rolling back writes in the event of
198 Begins a new transaction that allows rolling back writes in the event of
189 an exception.
199 an exception.
190
200
191 * `after`: called after the transaction has been committed
201 * `after`: called after the transaction has been committed
192 * `createmode`: the mode of the journal file that will be created
202 * `createmode`: the mode of the journal file that will be created
193 * `releasefn`: called after releasing (with transaction and result)
203 * `releasefn`: called after releasing (with transaction and result)
194
204
195 `checkambigfiles` is a set of (path, vfs-location) tuples,
205 `checkambigfiles` is a set of (path, vfs-location) tuples,
196 which determine whether file stat ambiguity should be avoided
206 which determine whether file stat ambiguity should be avoided
197 for corresponded files.
207 for corresponded files.
198 """
208 """
199 self._count = 1
209 self._count = 1
200 self._usages = 1
210 self._usages = 1
201 self._report = report
211 self._report = report
202 # a vfs to the store content
212 # a vfs to the store content
203 self._opener = opener
213 self._opener = opener
204 # a map to access file in various {location -> vfs}
214 # a map to access file in various {location -> vfs}
205 vfsmap = vfsmap.copy()
215 vfsmap = vfsmap.copy()
206 vfsmap[b''] = opener # set default value
216 vfsmap[b''] = opener # set default value
207 self._vfsmap = vfsmap
217 self._vfsmap = vfsmap
208 self._after = after
218 self._after = after
209 self._offsetmap = {}
219 self._offsetmap = {}
210 self._newfiles = set()
220 self._newfiles = set()
211 self._journal = journalname
221 self._journal = journalname
212 self._journal_files = []
222 self._journal_files = []
213 self._undoname = undoname
223 self._undoname = undoname
214 self._queue = []
224 self._queue = []
215 # A callback to do something just after releasing transaction.
225 # A callback to do something just after releasing transaction.
216 if releasefn is None:
226 if releasefn is None:
217 releasefn = lambda tr, success: None
227 releasefn = lambda tr, success: None
218 self._releasefn = releasefn
228 self._releasefn = releasefn
219
229
220 self._checkambigfiles = set()
230 self._checkambigfiles = set()
221 if checkambigfiles:
231 if checkambigfiles:
222 self._checkambigfiles.update(checkambigfiles)
232 self._checkambigfiles.update(checkambigfiles)
223
233
224 self._names = [name]
234 self._names = [name]
225
235
226 # A dict dedicated to precisely tracking the changes introduced in the
236 # A dict dedicated to precisely tracking the changes introduced in the
227 # transaction.
237 # transaction.
228 self.changes = {}
238 self.changes = {}
229
239
230 # a dict of arguments to be passed to hooks
240 # a dict of arguments to be passed to hooks
231 self.hookargs = {}
241 self.hookargs = {}
232 self._file = opener.open(self._journal, b"w+")
242 self._file = opener.open(self._journal, b"w+")
233
243
234 # a list of ('location', 'path', 'backuppath', cache) entries.
244 # a list of ('location', 'path', 'backuppath', cache) entries.
235 # - if 'backuppath' is empty, no file existed at backup time
245 # - if 'backuppath' is empty, no file existed at backup time
236 # - if 'path' is empty, this is a temporary transaction file
246 # - if 'path' is empty, this is a temporary transaction file
237 # - if 'location' is not empty, the path is outside main opener reach.
247 # - if 'location' is not empty, the path is outside main opener reach.
238 # use 'location' value as a key in a vfsmap to find the right 'vfs'
248 # use 'location' value as a key in a vfsmap to find the right 'vfs'
239 # (cache is currently unused)
249 # (cache is currently unused)
240 self._backupentries = []
250 self._backupentries = []
241 self._backupmap = {}
251 self._backupmap = {}
242 self._backupjournal = b"%s.backupfiles" % self._journal
252 self._backupjournal = b"%s.backupfiles" % self._journal
243 self._backupsfile = opener.open(self._backupjournal, b'w')
253 self._backupsfile = opener.open(self._backupjournal, b'w')
244 self._backupsfile.write(b'%d\n' % version)
254 self._backupsfile.write(b'%d\n' % version)
245
255
246 if createmode is not None:
256 if createmode is not None:
247 opener.chmod(self._journal, createmode & 0o666)
257 opener.chmod(self._journal, createmode & 0o666)
248 opener.chmod(self._backupjournal, createmode & 0o666)
258 opener.chmod(self._backupjournal, createmode & 0o666)
249
259
250 # hold file generations to be performed on commit
260 # hold file generations to be performed on commit
251 self._filegenerators = {}
261 self._filegenerators = {}
252 # hold callback to write pending data for hooks
262 # hold callback to write pending data for hooks
253 self._pendingcallback = {}
263 self._pendingcallback = {}
254 # True is any pending data have been written ever
264 # True is any pending data have been written ever
255 self._anypending = False
265 self._anypending = False
256 # holds callback to call when writing the transaction
266 # holds callback to call when writing the transaction
257 self._finalizecallback = {}
267 self._finalizecallback = {}
258 # holds callback to call when validating the transaction
268 # holds callback to call when validating the transaction
259 # should raise exception if anything is wrong
269 # should raise exception if anything is wrong
260 self._validatecallback = {}
270 self._validatecallback = {}
261 if validator is not None:
271 if validator is not None:
262 self._validatecallback[b'001-userhooks'] = validator
272 self._validatecallback[b'001-userhooks'] = validator
263 # hold callback for post transaction close
273 # hold callback for post transaction close
264 self._postclosecallback = {}
274 self._postclosecallback = {}
265 # holds callbacks to call during abort
275 # holds callbacks to call during abort
266 self._abortcallback = {}
276 self._abortcallback = {}
267
277
268 def __repr__(self):
278 def __repr__(self):
269 name = '/'.join(self._names)
279 name = '/'.join(self._names)
270 return '<transaction name=%s, count=%d, usages=%d>' % (
280 return '<transaction name=%s, count=%d, usages=%d>' % (
271 name,
281 name,
272 self._count,
282 self._count,
273 self._usages,
283 self._usages,
274 )
284 )
275
285
276 def __del__(self):
286 def __del__(self):
277 if self._journal:
287 if self._journal:
278 self._abort()
288 self._abort()
279
289
280 @property
290 @property
281 def finalized(self):
291 def finalized(self):
282 return self._finalizecallback is None
292 return self._finalizecallback is None
283
293
284 @active
294 @active
285 def startgroup(self):
295 def startgroup(self):
286 """delay registration of file entry
296 """delay registration of file entry
287
297
288 This is used by strip to delay vision of strip offset. The transaction
298 This is used by strip to delay vision of strip offset. The transaction
289 sees either none or all of the strip actions to be done."""
299 sees either none or all of the strip actions to be done."""
290 self._queue.append([])
300 self._queue.append([])
291
301
292 @active
302 @active
293 def endgroup(self):
303 def endgroup(self):
294 """apply delayed registration of file entry.
304 """apply delayed registration of file entry.
295
305
296 This is used by strip to delay vision of strip offset. The transaction
306 This is used by strip to delay vision of strip offset. The transaction
297 sees either none or all of the strip actions to be done."""
307 sees either none or all of the strip actions to be done."""
298 q = self._queue.pop()
308 q = self._queue.pop()
299 for f, o in q:
309 for f, o in q:
300 self._addentry(f, o)
310 self._addentry(f, o)
301
311
302 @active
312 @active
303 def add(self, file, offset):
313 def add(self, file, offset):
304 """record the state of an append-only file before update"""
314 """record the state of an append-only file before update"""
305 if (
315 if (
306 file in self._newfiles
316 file in self._newfiles
307 or file in self._offsetmap
317 or file in self._offsetmap
308 or file in self._backupmap
318 or file in self._backupmap
309 ):
319 ):
310 return
320 return
311 if self._queue:
321 if self._queue:
312 self._queue[-1].append((file, offset))
322 self._queue[-1].append((file, offset))
313 return
323 return
314
324
315 self._addentry(file, offset)
325 self._addentry(file, offset)
316
326
317 def _addentry(self, file, offset):
327 def _addentry(self, file, offset):
318 """add a append-only entry to memory and on-disk state"""
328 """add a append-only entry to memory and on-disk state"""
319 if (
329 if (
320 file in self._newfiles
330 file in self._newfiles
321 or file in self._offsetmap
331 or file in self._offsetmap
322 or file in self._backupmap
332 or file in self._backupmap
323 ):
333 ):
324 return
334 return
325 if offset:
335 if offset:
326 self._offsetmap[file] = offset
336 self._offsetmap[file] = offset
327 else:
337 else:
328 self._newfiles.add(file)
338 self._newfiles.add(file)
329 # add enough data to the journal to do the truncate
339 # add enough data to the journal to do the truncate
330 self._file.write(b"%s\0%d\n" % (file, offset))
340 self._file.write(b"%s\0%d\n" % (file, offset))
331 self._file.flush()
341 self._file.flush()
332
342
333 @active
343 @active
334 def addbackup(self, file, hardlink=True, location=b''):
344 def addbackup(self, file, hardlink=True, location=b''):
335 """Adds a backup of the file to the transaction
345 """Adds a backup of the file to the transaction
336
346
337 Calling addbackup() creates a hardlink backup of the specified file
347 Calling addbackup() creates a hardlink backup of the specified file
338 that is used to recover the file in the event of the transaction
348 that is used to recover the file in the event of the transaction
339 aborting.
349 aborting.
340
350
341 * `file`: the file path, relative to .hg/store
351 * `file`: the file path, relative to .hg/store
342 * `hardlink`: use a hardlink to quickly create the backup
352 * `hardlink`: use a hardlink to quickly create the backup
343 """
353 """
344 if self._queue:
354 if self._queue:
345 msg = b'cannot use transaction.addbackup inside "group"'
355 msg = b'cannot use transaction.addbackup inside "group"'
346 raise error.ProgrammingError(msg)
356 raise error.ProgrammingError(msg)
347
357
348 if (
358 if (
349 file in self._newfiles
359 file in self._newfiles
350 or file in self._offsetmap
360 or file in self._offsetmap
351 or file in self._backupmap
361 or file in self._backupmap
352 ):
362 ):
353 return
363 return
354 vfs = self._vfsmap[location]
364 vfs = self._vfsmap[location]
355 dirname, filename = vfs.split(file)
365 dirname, filename = vfs.split(file)
356 backupfilename = b"%s.backup.%s" % (self._journal, filename)
366 backupfilename = b"%s.backup.%s" % (self._journal, filename)
357 backupfile = vfs.reljoin(dirname, backupfilename)
367 backupfile = vfs.reljoin(dirname, backupfilename)
358 if vfs.exists(file):
368 if vfs.exists(file):
359 filepath = vfs.join(file)
369 filepath = vfs.join(file)
360 backuppath = vfs.join(backupfile)
370 backuppath = vfs.join(backupfile)
361 util.copyfile(filepath, backuppath, hardlink=hardlink)
371 util.copyfile(filepath, backuppath, hardlink=hardlink)
362 else:
372 else:
363 backupfile = b''
373 backupfile = b''
364
374
365 self._addbackupentry((location, file, backupfile, False))
375 self._addbackupentry((location, file, backupfile, False))
366
376
367 def _addbackupentry(self, entry):
377 def _addbackupentry(self, entry):
368 """register a new backup entry and write it to disk"""
378 """register a new backup entry and write it to disk"""
369 self._backupentries.append(entry)
379 self._backupentries.append(entry)
370 self._backupmap[entry[1]] = len(self._backupentries) - 1
380 self._backupmap[entry[1]] = len(self._backupentries) - 1
371 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
381 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
372 self._backupsfile.flush()
382 self._backupsfile.flush()
373
383
374 @active
384 @active
375 def registertmp(self, tmpfile, location=b''):
385 def registertmp(self, tmpfile, location=b''):
376 """register a temporary transaction file
386 """register a temporary transaction file
377
387
378 Such files will be deleted when the transaction exits (on both
388 Such files will be deleted when the transaction exits (on both
379 failure and success).
389 failure and success).
380 """
390 """
381 self._addbackupentry((location, b'', tmpfile, False))
391 self._addbackupentry((location, b'', tmpfile, False))
382
392
383 @active
393 @active
384 def addfilegenerator(
394 def addfilegenerator(
385 self,
395 self,
386 genid,
396 genid,
387 filenames,
397 filenames,
388 genfunc,
398 genfunc,
389 order=0,
399 order=0,
390 location=b'',
400 location=b'',
391 post_finalize=False,
401 post_finalize=False,
392 ):
402 ):
393 """add a function to generates some files at transaction commit
403 """add a function to generates some files at transaction commit
394
404
395 The `genfunc` argument is a function capable of generating proper
405 The `genfunc` argument is a function capable of generating proper
396 content of each entry in the `filename` tuple.
406 content of each entry in the `filename` tuple.
397
407
398 At transaction close time, `genfunc` will be called with one file
408 At transaction close time, `genfunc` will be called with one file
399 object argument per entries in `filenames`.
409 object argument per entries in `filenames`.
400
410
401 The transaction itself is responsible for the backup, creation and
411 The transaction itself is responsible for the backup, creation and
402 final write of such file.
412 final write of such file.
403
413
404 The `genid` argument is used to ensure the same set of file is only
414 The `genid` argument is used to ensure the same set of file is only
405 generated once. Call to `addfilegenerator` for a `genid` already
415 generated once. Call to `addfilegenerator` for a `genid` already
406 present will overwrite the old entry.
416 present will overwrite the old entry.
407
417
408 The `order` argument may be used to control the order in which multiple
418 The `order` argument may be used to control the order in which multiple
409 generator will be executed.
419 generator will be executed.
410
420
411 The `location` arguments may be used to indicate the files are located
421 The `location` arguments may be used to indicate the files are located
412 outside of the the standard directory for transaction. It should match
422 outside of the the standard directory for transaction. It should match
413 one of the key of the `transaction.vfsmap` dictionary.
423 one of the key of the `transaction.vfsmap` dictionary.
414
424
415 The `post_finalize` argument can be set to `True` for file generation
425 The `post_finalize` argument can be set to `True` for file generation
416 that must be run after the transaction has been finalized.
426 that must be run after the transaction has been finalized.
417 """
427 """
418 # For now, we are unable to do proper backup and restore of custom vfs
428 # For now, we are unable to do proper backup and restore of custom vfs
419 # but for bookmarks that are handled outside this mechanism.
429 # but for bookmarks that are handled outside this mechanism.
420 entry = (order, filenames, genfunc, location, post_finalize)
430 entry = (order, filenames, genfunc, location, post_finalize)
421 self._filegenerators[genid] = entry
431 self._filegenerators[genid] = entry
422
432
423 @active
433 @active
424 def removefilegenerator(self, genid):
434 def removefilegenerator(self, genid):
425 """reverse of addfilegenerator, remove a file generator function"""
435 """reverse of addfilegenerator, remove a file generator function"""
426 if genid in self._filegenerators:
436 if genid in self._filegenerators:
427 del self._filegenerators[genid]
437 del self._filegenerators[genid]
428
438
429 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
439 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
430 # write files registered for generation
440 # write files registered for generation
431 any = False
441 any = False
432
442
433 if group == GEN_GROUP_ALL:
443 if group == GEN_GROUP_ALL:
434 skip_post = skip_pre = False
444 skip_post = skip_pre = False
435 else:
445 else:
436 skip_pre = group == GEN_GROUP_POST_FINALIZE
446 skip_pre = group == GEN_GROUP_POST_FINALIZE
437 skip_post = group == GEN_GROUP_PRE_FINALIZE
447 skip_post = group == GEN_GROUP_PRE_FINALIZE
438
448
439 for id, entry in sorted(self._filegenerators.items()):
449 for id, entry in sorted(self._filegenerators.items()):
440 any = True
450 any = True
441 order, filenames, genfunc, location, post_finalize = entry
451 order, filenames, genfunc, location, post_finalize = entry
442
452
443 # for generation at closing, check if it's before or after finalize
453 # for generation at closing, check if it's before or after finalize
444 if skip_post and post_finalize:
454 if skip_post and post_finalize:
445 continue
455 continue
446 elif skip_pre and not post_finalize:
456 elif skip_pre and not post_finalize:
447 continue
457 continue
448
458
449 vfs = self._vfsmap[location]
459 vfs = self._vfsmap[location]
450 files = []
460 files = []
451 try:
461 try:
452 for name in filenames:
462 for name in filenames:
453 name += suffix
463 name += suffix
454 if suffix:
464 if suffix:
455 self.registertmp(name, location=location)
465 self.registertmp(name, location=location)
456 checkambig = False
466 checkambig = False
457 else:
467 else:
458 self.addbackup(name, location=location)
468 self.addbackup(name, location=location)
459 checkambig = (name, location) in self._checkambigfiles
469 checkambig = (name, location) in self._checkambigfiles
460 files.append(
470 files.append(
461 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
471 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
462 )
472 )
463 genfunc(*files)
473 genfunc(*files)
464 for f in files:
474 for f in files:
465 f.close()
475 f.close()
466 # skip discard() loop since we're sure no open file remains
476 # skip discard() loop since we're sure no open file remains
467 del files[:]
477 del files[:]
468 finally:
478 finally:
469 for f in files:
479 for f in files:
470 f.discard()
480 f.discard()
471 return any
481 return any
472
482
473 @active
483 @active
474 def findoffset(self, file):
484 def findoffset(self, file):
475 if file in self._newfiles:
485 if file in self._newfiles:
476 return 0
486 return 0
477 return self._offsetmap.get(file)
487 return self._offsetmap.get(file)
478
488
479 @active
489 @active
480 def readjournal(self):
490 def readjournal(self):
481 self._file.seek(0)
491 self._file.seek(0)
482 entries = []
492 entries = []
483 for l in self._file.readlines():
493 for l in self._file.readlines():
484 file, troffset = l.split(b'\0')
494 file, troffset = l.split(b'\0')
485 entries.append((file, int(troffset)))
495 entries.append((file, int(troffset)))
486 return entries
496 return entries
487
497
488 @active
498 @active
489 def replace(self, file, offset):
499 def replace(self, file, offset):
490 """
500 """
491 replace can only replace already committed entries
501 replace can only replace already committed entries
492 that are not pending in the queue
502 that are not pending in the queue
493 """
503 """
494 if file in self._newfiles:
504 if file in self._newfiles:
495 if not offset:
505 if not offset:
496 return
506 return
497 self._newfiles.remove(file)
507 self._newfiles.remove(file)
498 self._offsetmap[file] = offset
508 self._offsetmap[file] = offset
499 elif file in self._offsetmap:
509 elif file in self._offsetmap:
500 if not offset:
510 if not offset:
501 del self._offsetmap[file]
511 del self._offsetmap[file]
502 self._newfiles.add(file)
512 self._newfiles.add(file)
503 else:
513 else:
504 self._offsetmap[file] = offset
514 self._offsetmap[file] = offset
505 else:
515 else:
506 raise KeyError(file)
516 raise KeyError(file)
507 self._file.write(b"%s\0%d\n" % (file, offset))
517 self._file.write(b"%s\0%d\n" % (file, offset))
508 self._file.flush()
518 self._file.flush()
509
519
510 @active
520 @active
511 def nest(self, name='<unnamed>'):
521 def nest(self, name='<unnamed>'):
512 self._count += 1
522 self._count += 1
513 self._usages += 1
523 self._usages += 1
514 self._names.append(name)
524 self._names.append(name)
515 return self
525 return self
516
526
517 def release(self):
527 def release(self):
518 if self._count > 0:
528 if self._count > 0:
519 self._usages -= 1
529 self._usages -= 1
520 if self._names:
530 if self._names:
521 self._names.pop()
531 self._names.pop()
522 # if the transaction scopes are left without being closed, fail
532 # if the transaction scopes are left without being closed, fail
523 if self._count > 0 and self._usages == 0:
533 if self._count > 0 and self._usages == 0:
524 self._abort()
534 self._abort()
525
535
526 def running(self):
536 def running(self):
527 return self._count > 0
537 return self._count > 0
528
538
529 def addpending(self, category, callback):
539 def addpending(self, category, callback):
530 """add a callback to be called when the transaction is pending
540 """add a callback to be called when the transaction is pending
531
541
532 The transaction will be given as callback's first argument.
542 The transaction will be given as callback's first argument.
533
543
534 Category is a unique identifier to allow overwriting an old callback
544 Category is a unique identifier to allow overwriting an old callback
535 with a newer callback.
545 with a newer callback.
536 """
546 """
537 self._pendingcallback[category] = callback
547 self._pendingcallback[category] = callback
538
548
539 @active
549 @active
540 def writepending(self):
550 def writepending(self):
541 """write pending file to temporary version
551 """write pending file to temporary version
542
552
543 This is used to allow hooks to view a transaction before commit"""
553 This is used to allow hooks to view a transaction before commit"""
544 categories = sorted(self._pendingcallback)
554 categories = sorted(self._pendingcallback)
545 for cat in categories:
555 for cat in categories:
546 # remove callback since the data will have been flushed
556 # remove callback since the data will have been flushed
547 any = self._pendingcallback.pop(cat)(self)
557 any = self._pendingcallback.pop(cat)(self)
548 self._anypending = self._anypending or any
558 self._anypending = self._anypending or any
549 self._anypending |= self._generatefiles(suffix=b'.pending')
559 self._anypending |= self._generatefiles(suffix=b'.pending')
550 return self._anypending
560 return self._anypending
551
561
552 @active
562 @active
553 def hasfinalize(self, category):
563 def hasfinalize(self, category):
554 """check is a callback already exist for a category"""
564 """check is a callback already exist for a category"""
555 return category in self._finalizecallback
565 return category in self._finalizecallback
556
566
557 @active
567 @active
558 def addfinalize(self, category, callback):
568 def addfinalize(self, category, callback):
559 """add a callback to be called when the transaction is closed
569 """add a callback to be called when the transaction is closed
560
570
561 The transaction will be given as callback's first argument.
571 The transaction will be given as callback's first argument.
562
572
563 Category is a unique identifier to allow overwriting old callbacks with
573 Category is a unique identifier to allow overwriting old callbacks with
564 newer callbacks.
574 newer callbacks.
565 """
575 """
566 self._finalizecallback[category] = callback
576 self._finalizecallback[category] = callback
567
577
568 @active
578 @active
569 def addpostclose(self, category, callback):
579 def addpostclose(self, category, callback):
570 """add or replace a callback to be called after the transaction closed
580 """add or replace a callback to be called after the transaction closed
571
581
572 The transaction will be given as callback's first argument.
582 The transaction will be given as callback's first argument.
573
583
574 Category is a unique identifier to allow overwriting an old callback
584 Category is a unique identifier to allow overwriting an old callback
575 with a newer callback.
585 with a newer callback.
576 """
586 """
577 self._postclosecallback[category] = callback
587 self._postclosecallback[category] = callback
578
588
579 @active
589 @active
580 def getpostclose(self, category):
590 def getpostclose(self, category):
581 """return a postclose callback added before, or None"""
591 """return a postclose callback added before, or None"""
582 return self._postclosecallback.get(category, None)
592 return self._postclosecallback.get(category, None)
583
593
584 @active
594 @active
585 def addabort(self, category, callback):
595 def addabort(self, category, callback):
586 """add a callback to be called when the transaction is aborted.
596 """add a callback to be called when the transaction is aborted.
587
597
588 The transaction will be given as the first argument to the callback.
598 The transaction will be given as the first argument to the callback.
589
599
590 Category is a unique identifier to allow overwriting an old callback
600 Category is a unique identifier to allow overwriting an old callback
591 with a newer callback.
601 with a newer callback.
592 """
602 """
593 self._abortcallback[category] = callback
603 self._abortcallback[category] = callback
594
604
595 @active
605 @active
596 def addvalidator(self, category, callback):
606 def addvalidator(self, category, callback):
597 """adds a callback to be called when validating the transaction.
607 """adds a callback to be called when validating the transaction.
598
608
599 The transaction will be given as the first argument to the callback.
609 The transaction will be given as the first argument to the callback.
600
610
601 callback should raise exception if to abort transaction"""
611 callback should raise exception if to abort transaction"""
602 self._validatecallback[category] = callback
612 self._validatecallback[category] = callback
603
613
604 @active
614 @active
605 def close(self):
615 def close(self):
606 '''commit the transaction'''
616 '''commit the transaction'''
607 if self._count == 1:
617 if self._count == 1:
608 for category in sorted(self._validatecallback):
618 for category in sorted(self._validatecallback):
609 self._validatecallback[category](self)
619 self._validatecallback[category](self)
610 self._validatecallback = None # Help prevent cycles.
620 self._validatecallback = None # Help prevent cycles.
611 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
621 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
612 while self._finalizecallback:
622 while self._finalizecallback:
613 callbacks = self._finalizecallback
623 callbacks = self._finalizecallback
614 self._finalizecallback = {}
624 self._finalizecallback = {}
615 categories = sorted(callbacks)
625 categories = sorted(callbacks)
616 for cat in categories:
626 for cat in categories:
617 callbacks[cat](self)
627 callbacks[cat](self)
618 # Prevent double usage and help clear cycles.
628 # Prevent double usage and help clear cycles.
619 self._finalizecallback = None
629 self._finalizecallback = None
620 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
630 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
621
631
622 self._count -= 1
632 self._count -= 1
623 if self._count != 0:
633 if self._count != 0:
624 return
634 return
625 self._file.close()
635 self._file.close()
626 self._backupsfile.close()
636 self._backupsfile.close()
627 # cleanup temporary files
637 # cleanup temporary files
628 for l, f, b, c in self._backupentries:
638 for l, f, b, c in self._backupentries:
629 if l not in self._vfsmap and c:
639 if l not in self._vfsmap and c:
630 self._report(
640 self._report(
631 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
641 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
632 )
642 )
633 continue
643 continue
634 vfs = self._vfsmap[l]
644 vfs = self._vfsmap[l]
635 if not f and b and vfs.exists(b):
645 if not f and b and vfs.exists(b):
636 try:
646 try:
637 vfs.unlink(b)
647 vfs.unlink(b)
638 except (IOError, OSError, error.Abort) as inst:
648 except (IOError, OSError, error.Abort) as inst:
639 if not c:
649 if not c:
640 raise
650 raise
641 # Abort may be raise by read only opener
651 # Abort may be raise by read only opener
642 self._report(
652 self._report(
643 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
653 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
644 )
654 )
645 self._offsetmap = {}
655 self._offsetmap = {}
646 self._newfiles = set()
656 self._newfiles = set()
647 self._writeundo()
657 self._writeundo()
648 if self._after:
658 if self._after:
649 self._after()
659 self._after()
650 self._after = None # Help prevent cycles.
660 self._after = None # Help prevent cycles.
651 if self._opener.isfile(self._backupjournal):
661 if self._opener.isfile(self._backupjournal):
652 self._opener.unlink(self._backupjournal)
662 self._opener.unlink(self._backupjournal)
653 if self._opener.isfile(self._journal):
663 if self._opener.isfile(self._journal):
654 self._opener.unlink(self._journal)
664 self._opener.unlink(self._journal)
655 for l, _f, b, c in self._backupentries:
665 for l, _f, b, c in self._backupentries:
656 if l not in self._vfsmap and c:
666 if l not in self._vfsmap and c:
657 self._report(
667 self._report(
658 b"couldn't remove %s: unknown cache location"
668 b"couldn't remove %s: unknown cache location"
659 b"%s\n" % (b, l)
669 b"%s\n" % (b, l)
660 )
670 )
661 continue
671 continue
662 vfs = self._vfsmap[l]
672 vfs = self._vfsmap[l]
663 if b and vfs.exists(b):
673 if b and vfs.exists(b):
664 try:
674 try:
665 vfs.unlink(b)
675 vfs.unlink(b)
666 except (IOError, OSError, error.Abort) as inst:
676 except (IOError, OSError, error.Abort) as inst:
667 if not c:
677 if not c:
668 raise
678 raise
669 # Abort may be raise by read only opener
679 # Abort may be raise by read only opener
670 self._report(
680 self._report(
671 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
681 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
672 )
682 )
673 self._backupentries = []
683 self._backupentries = []
674 self._journal = None
684 self._journal = None
675
685
676 self._releasefn(self, True) # notify success of closing transaction
686 self._releasefn(self, True) # notify success of closing transaction
677 self._releasefn = None # Help prevent cycles.
687 self._releasefn = None # Help prevent cycles.
678
688
679 # run post close action
689 # run post close action
680 categories = sorted(self._postclosecallback)
690 categories = sorted(self._postclosecallback)
681 for cat in categories:
691 for cat in categories:
682 self._postclosecallback[cat](self)
692 self._postclosecallback[cat](self)
683 # Prevent double usage and help clear cycles.
693 # Prevent double usage and help clear cycles.
684 self._postclosecallback = None
694 self._postclosecallback = None
685
695
686 @active
696 @active
687 def abort(self):
697 def abort(self):
688 """abort the transaction (generally called on error, or when the
698 """abort the transaction (generally called on error, or when the
689 transaction is not explicitly committed before going out of
699 transaction is not explicitly committed before going out of
690 scope)"""
700 scope)"""
691 self._abort()
701 self._abort()
692
702
693 @active
703 @active
694 def add_journal(self, vfs_id, path):
704 def add_journal(self, vfs_id, path):
695 self._journal_files.append((vfs_id, path))
705 self._journal_files.append((vfs_id, path))
696
706
697 def _writeundo(self):
707 def _writeundo(self):
698 """write transaction data for possible future undo call"""
708 """write transaction data for possible future undo call"""
699 if self._undoname is None:
709 if self._undoname is None:
700 return
710 return
701 cleanup_undo_files(
711 cleanup_undo_files(
702 self._report,
712 self._report,
703 self._vfsmap,
713 self._vfsmap,
704 undo_prefix=self._undoname,
714 undo_prefix=self._undoname,
705 )
715 )
706
716
707 def undoname(fn: bytes) -> bytes:
717 def undoname(fn: bytes) -> bytes:
708 base, name = os.path.split(fn)
718 base, name = os.path.split(fn)
709 assert name.startswith(self._journal)
719 assert name.startswith(self._journal)
710 new_name = name.replace(self._journal, self._undoname, 1)
720 new_name = name.replace(self._journal, self._undoname, 1)
711 return os.path.join(base, new_name)
721 return os.path.join(base, new_name)
712
722
713 undo_backup_path = b"%s.backupfiles" % self._undoname
723 undo_backup_path = b"%s.backupfiles" % self._undoname
714 undobackupfile = self._opener.open(undo_backup_path, b'w')
724 undobackupfile = self._opener.open(undo_backup_path, b'w')
715 undobackupfile.write(b'%d\n' % version)
725 undobackupfile.write(b'%d\n' % version)
716 for l, f, b, c in self._backupentries:
726 for l, f, b, c in self._backupentries:
717 if not f: # temporary file
727 if not f: # temporary file
718 continue
728 continue
719 if not b:
729 if not b:
720 u = b''
730 u = b''
721 else:
731 else:
722 if l not in self._vfsmap and c:
732 if l not in self._vfsmap and c:
723 self._report(
733 self._report(
724 b"couldn't remove %s: unknown cache location"
734 b"couldn't remove %s: unknown cache location"
725 b"%s\n" % (b, l)
735 b"%s\n" % (b, l)
726 )
736 )
727 continue
737 continue
728 vfs = self._vfsmap[l]
738 vfs = self._vfsmap[l]
729 u = undoname(b)
739 u = undoname(b)
730 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
740 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
731 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
741 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
732 undobackupfile.close()
742 undobackupfile.close()
733 for vfs, src in self._journal_files:
743 for vfs, src in self._journal_files:
734 dest = undoname(src)
744 dest = undoname(src)
735 # if src and dest refer to a same file, vfs.rename is a no-op,
745 # if src and dest refer to a same file, vfs.rename is a no-op,
736 # leaving both src and dest on disk. delete dest to make sure
746 # leaving both src and dest on disk. delete dest to make sure
737 # the rename couldn't be such a no-op.
747 # the rename couldn't be such a no-op.
738 vfs.tryunlink(dest)
748 vfs.tryunlink(dest)
739 try:
749 try:
740 vfs.rename(src, dest)
750 vfs.rename(src, dest)
741 except FileNotFoundError: # journal file does not yet exist
751 except FileNotFoundError: # journal file does not yet exist
742 pass
752 pass
743
753
744 def _abort(self):
754 def _abort(self):
745 entries = self.readjournal()
755 entries = self.readjournal()
746 self._count = 0
756 self._count = 0
747 self._usages = 0
757 self._usages = 0
748 self._file.close()
758 self._file.close()
749 self._backupsfile.close()
759 self._backupsfile.close()
750
760
751 quick = self._can_quick_abort(entries)
761 quick = self._can_quick_abort(entries)
752 try:
762 try:
753 if not quick:
763 if not quick:
754 self._report(_(b"transaction abort!\n"))
764 self._report(_(b"transaction abort!\n"))
755 for cat in sorted(self._abortcallback):
765 for cat in sorted(self._abortcallback):
756 self._abortcallback[cat](self)
766 self._abortcallback[cat](self)
757 # Prevent double usage and help clear cycles.
767 # Prevent double usage and help clear cycles.
758 self._abortcallback = None
768 self._abortcallback = None
759 if quick:
769 if quick:
760 self._do_quick_abort(entries)
770 self._do_quick_abort(entries)
761 else:
771 else:
762 self._do_full_abort(entries)
772 self._do_full_abort(entries)
763 finally:
773 finally:
764 self._journal = None
774 self._journal = None
765 self._releasefn(self, False) # notify failure of transaction
775 self._releasefn(self, False) # notify failure of transaction
766 self._releasefn = None # Help prevent cycles.
776 self._releasefn = None # Help prevent cycles.
767
777
768 def _can_quick_abort(self, entries):
778 def _can_quick_abort(self, entries):
769 """False if any semantic content have been written on disk
779 """False if any semantic content have been written on disk
770
780
771 True if nothing, except temporary files has been writen on disk."""
781 True if nothing, except temporary files has been writen on disk."""
772 if entries:
782 if entries:
773 return False
783 return False
774 for e in self._backupentries:
784 for e in self._backupentries:
775 if e[1]:
785 if e[1]:
776 return False
786 return False
777 return True
787 return True
778
788
779 def _do_quick_abort(self, entries):
789 def _do_quick_abort(self, entries):
780 """(Silently) do a quick cleanup (see _can_quick_abort)"""
790 """(Silently) do a quick cleanup (see _can_quick_abort)"""
781 assert self._can_quick_abort(entries)
791 assert self._can_quick_abort(entries)
782 tmp_files = [e for e in self._backupentries if not e[1]]
792 tmp_files = [e for e in self._backupentries if not e[1]]
783 for vfs_id, old_path, tmp_path, xxx in tmp_files:
793 for vfs_id, old_path, tmp_path, xxx in tmp_files:
784 vfs = self._vfsmap[vfs_id]
794 vfs = self._vfsmap[vfs_id]
785 try:
795 try:
786 vfs.unlink(tmp_path)
796 vfs.unlink(tmp_path)
787 except FileNotFoundError:
797 except FileNotFoundError:
788 pass
798 pass
789 if self._backupjournal:
799 if self._backupjournal:
790 self._opener.unlink(self._backupjournal)
800 self._opener.unlink(self._backupjournal)
791 if self._journal:
801 if self._journal:
792 self._opener.unlink(self._journal)
802 self._opener.unlink(self._journal)
793
803
794 def _do_full_abort(self, entries):
804 def _do_full_abort(self, entries):
795 """(Noisily) rollback all the change introduced by the transaction"""
805 """(Noisily) rollback all the change introduced by the transaction"""
796 try:
806 try:
797 _playback(
807 _playback(
798 self._journal,
808 self._journal,
799 self._report,
809 self._report,
800 self._opener,
810 self._opener,
801 self._vfsmap,
811 self._vfsmap,
802 entries,
812 entries,
803 self._backupentries,
813 self._backupentries,
804 False,
814 False,
805 checkambigfiles=self._checkambigfiles,
815 checkambigfiles=self._checkambigfiles,
806 )
816 )
807 self._report(_(b"rollback completed\n"))
817 self._report(_(b"rollback completed\n"))
808 except BaseException as exc:
818 except BaseException as exc:
809 self._report(_(b"rollback failed - please run hg recover\n"))
819 self._report(_(b"rollback failed - please run hg recover\n"))
810 self._report(
820 self._report(
811 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
821 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
812 )
822 )
813
823
814
824
815 BAD_VERSION_MSG = _(
825 BAD_VERSION_MSG = _(
816 b"journal was created by a different version of Mercurial\n"
826 b"journal was created by a different version of Mercurial\n"
817 )
827 )
818
828
819
829
820 def read_backup_files(report, fp):
830 def read_backup_files(report, fp):
821 """parse an (already open) backup file an return contained backup entries
831 """parse an (already open) backup file an return contained backup entries
822
832
823 entries are in the form: (location, file, backupfile, xxx)
833 entries are in the form: (location, file, backupfile, xxx)
824
834
825 :location: the vfs identifier (vfsmap's key)
835 :location: the vfs identifier (vfsmap's key)
826 :file: original file path (in the vfs)
836 :file: original file path (in the vfs)
827 :backupfile: path of the backup (in the vfs)
837 :backupfile: path of the backup (in the vfs)
828 :cache: a boolean currently always set to False
838 :cache: a boolean currently always set to False
829 """
839 """
830 lines = fp.readlines()
840 lines = fp.readlines()
831 backupentries = []
841 backupentries = []
832 if lines:
842 if lines:
833 ver = lines[0][:-1]
843 ver = lines[0][:-1]
834 if ver != (b'%d' % version):
844 if ver != (b'%d' % version):
835 report(BAD_VERSION_MSG)
845 report(BAD_VERSION_MSG)
836 else:
846 else:
837 for line in lines[1:]:
847 for line in lines[1:]:
838 if line:
848 if line:
839 # Shave off the trailing newline
849 # Shave off the trailing newline
840 line = line[:-1]
850 line = line[:-1]
841 l, f, b, c = line.split(b'\0')
851 l, f, b, c = line.split(b'\0')
842 backupentries.append((l, f, b, bool(c)))
852 backupentries.append((l, f, b, bool(c)))
843 return backupentries
853 return backupentries
844
854
845
855
846 def rollback(
856 def rollback(
847 opener,
857 opener,
848 vfsmap,
858 vfsmap,
849 file,
859 file,
850 report,
860 report,
851 checkambigfiles=None,
861 checkambigfiles=None,
852 skip_journal_pattern=None,
862 skip_journal_pattern=None,
853 ):
863 ):
854 """Rolls back the transaction contained in the given file
864 """Rolls back the transaction contained in the given file
855
865
856 Reads the entries in the specified file, and the corresponding
866 Reads the entries in the specified file, and the corresponding
857 '*.backupfiles' file, to recover from an incomplete transaction.
867 '*.backupfiles' file, to recover from an incomplete transaction.
858
868
859 * `file`: a file containing a list of entries, specifying where
869 * `file`: a file containing a list of entries, specifying where
860 to truncate each file. The file should contain a list of
870 to truncate each file. The file should contain a list of
861 file\0offset pairs, delimited by newlines. The corresponding
871 file\0offset pairs, delimited by newlines. The corresponding
862 '*.backupfiles' file should contain a list of file\0backupfile
872 '*.backupfiles' file should contain a list of file\0backupfile
863 pairs, delimited by \0.
873 pairs, delimited by \0.
864
874
865 `checkambigfiles` is a set of (path, vfs-location) tuples,
875 `checkambigfiles` is a set of (path, vfs-location) tuples,
866 which determine whether file stat ambiguity should be avoided at
876 which determine whether file stat ambiguity should be avoided at
867 restoring corresponded files.
877 restoring corresponded files.
868 """
878 """
869 entries = []
879 entries = []
870 backupentries = []
880 backupentries = []
871
881
872 with opener.open(file) as fp:
882 with opener.open(file) as fp:
873 lines = fp.readlines()
883 lines = fp.readlines()
874 for l in lines:
884 for l in lines:
875 try:
885 try:
876 f, o = l.split(b'\0')
886 f, o = l.split(b'\0')
877 entries.append((f, int(o)))
887 entries.append((f, int(o)))
878 except ValueError:
888 except ValueError:
879 report(
889 report(
880 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
890 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
881 )
891 )
882
892
883 backupjournal = b"%s.backupfiles" % file
893 backupjournal = b"%s.backupfiles" % file
884 if opener.exists(backupjournal):
894 if opener.exists(backupjournal):
885 with opener.open(backupjournal) as fp:
895 with opener.open(backupjournal) as fp:
886 backupentries = read_backup_files(report, fp)
896 backupentries = read_backup_files(report, fp)
887 if skip_journal_pattern is not None:
897 if skip_journal_pattern is not None:
888 keep = lambda x: not skip_journal_pattern.match(x[1])
898 keep = lambda x: not skip_journal_pattern.match(x[1])
889 backupentries = [x for x in backupentries if keep(x)]
899 backupentries = [x for x in backupentries if keep(x)]
890
900
891 _playback(
901 _playback(
892 file,
902 file,
893 report,
903 report,
894 opener,
904 opener,
895 vfsmap,
905 vfsmap,
896 entries,
906 entries,
897 backupentries,
907 backupentries,
898 checkambigfiles=checkambigfiles,
908 checkambigfiles=checkambigfiles,
899 )
909 )
General Comments 0
You need to be logged in to leave comments. Login now