##// END OF EJS Templates
transaction: move the restoration of backup file in a small closure...
marmoute -
r51235:86dc9e09 stable
parent child Browse files
Show More
@@ -1,910 +1,914 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 backupfiles = []
109
110 def restore_one_backup(vfs, f, b, checkambig):
111 filepath = vfs.join(f)
112 backuppath = vfs.join(b)
113 try:
114 util.copyfile(backuppath, filepath, checkambig=checkambig)
115 backupfiles.append((vfs, b))
116 except IOError as exc:
117 e_msg = stringutil.forcebytestr(exc)
118 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
119 raise
120
108 for f, o in sorted(dict(entries).items()):
121 for f, o in sorted(dict(entries).items()):
109 if o or not unlink:
122 if o or not unlink:
110 checkambig = checkambigfiles and (f, b'') in checkambigfiles
123 checkambig = checkambigfiles and (f, b'') in checkambigfiles
111 try:
124 try:
112 fp = opener(f, b'a', checkambig=checkambig)
125 fp = opener(f, b'a', checkambig=checkambig)
113 if fp.tell() < o:
126 if fp.tell() < o:
114 raise error.Abort(
127 raise error.Abort(
115 _(
128 _(
116 b"attempted to truncate %s to %d bytes, but it was "
129 b"attempted to truncate %s to %d bytes, but it was "
117 b"already %d bytes\n"
130 b"already %d bytes\n"
118 )
131 )
119 % (f, o, fp.tell())
132 % (f, o, fp.tell())
120 )
133 )
121 fp.truncate(o)
134 fp.truncate(o)
122 fp.close()
135 fp.close()
123 except IOError:
136 except IOError:
124 report(_(b"failed to truncate %s\n") % f)
137 report(_(b"failed to truncate %s\n") % f)
125 raise
138 raise
126 else:
139 else:
127 try:
140 try:
128 opener.unlink(f)
141 opener.unlink(f)
129 except FileNotFoundError:
142 except FileNotFoundError:
130 pass
143 pass
131
144
132 backupfiles = []
133 for l, f, b, c in backupentries:
145 for l, f, b, c in backupentries:
134 if l not in vfsmap and c:
146 if l not in vfsmap and c:
135 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
147 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
136 vfs = vfsmap[l]
148 vfs = vfsmap[l]
137 try:
149 try:
150 checkambig = checkambigfiles and (f, l) in checkambigfiles
138 if f and b:
151 if f and b:
139 filepath = vfs.join(f)
152 restore_one_backup(vfs, f, b, checkambig)
140 backuppath = vfs.join(b)
141 checkambig = checkambigfiles and (f, l) in checkambigfiles
142 try:
143 util.copyfile(backuppath, filepath, checkambig=checkambig)
144 backupfiles.append((vfs, b))
145 except IOError as exc:
146 e_msg = stringutil.forcebytestr(exc)
147 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
148 raise
149 else:
153 else:
150 target = f or b
154 target = f or b
151 try:
155 try:
152 vfs.unlink(target)
156 vfs.unlink(target)
153 except FileNotFoundError:
157 except FileNotFoundError:
154 # This is fine because
158 # This is fine because
155 #
159 #
156 # either we are trying to delete the main file, and it is
160 # either we are trying to delete the main file, and it is
157 # already deleted.
161 # already deleted.
158 #
162 #
159 # or we are trying to delete a temporary file and it is
163 # or we are trying to delete a temporary file and it is
160 # already deleted.
164 # already deleted.
161 #
165 #
162 # in both case, our target result (delete the file) is
166 # in both case, our target result (delete the file) is
163 # already achieved.
167 # already achieved.
164 pass
168 pass
165 except (IOError, OSError, error.Abort):
169 except (IOError, OSError, error.Abort):
166 if not c:
170 if not c:
167 raise
171 raise
168
172
169 backuppath = b"%s.backupfiles" % journal
173 backuppath = b"%s.backupfiles" % journal
170 if opener.exists(backuppath):
174 if opener.exists(backuppath):
171 opener.unlink(backuppath)
175 opener.unlink(backuppath)
172 opener.unlink(journal)
176 opener.unlink(journal)
173 try:
177 try:
174 for vfs, f in backupfiles:
178 for vfs, f in backupfiles:
175 if vfs.exists(f):
179 if vfs.exists(f):
176 vfs.unlink(f)
180 vfs.unlink(f)
177 except (IOError, OSError, error.Abort):
181 except (IOError, OSError, error.Abort):
178 # only pure backup file remains, it is sage to ignore any error
182 # only pure backup file remains, it is sage to ignore any error
179 pass
183 pass
180
184
181
185
182 class transaction(util.transactional):
186 class transaction(util.transactional):
183 def __init__(
187 def __init__(
184 self,
188 self,
185 report,
189 report,
186 opener,
190 opener,
187 vfsmap,
191 vfsmap,
188 journalname,
192 journalname,
189 undoname=None,
193 undoname=None,
190 after=None,
194 after=None,
191 createmode=None,
195 createmode=None,
192 validator=None,
196 validator=None,
193 releasefn=None,
197 releasefn=None,
194 checkambigfiles=None,
198 checkambigfiles=None,
195 name='<unnamed>',
199 name='<unnamed>',
196 ):
200 ):
197 """Begin a new transaction
201 """Begin a new transaction
198
202
199 Begins a new transaction that allows rolling back writes in the event of
203 Begins a new transaction that allows rolling back writes in the event of
200 an exception.
204 an exception.
201
205
202 * `after`: called after the transaction has been committed
206 * `after`: called after the transaction has been committed
203 * `createmode`: the mode of the journal file that will be created
207 * `createmode`: the mode of the journal file that will be created
204 * `releasefn`: called after releasing (with transaction and result)
208 * `releasefn`: called after releasing (with transaction and result)
205
209
206 `checkambigfiles` is a set of (path, vfs-location) tuples,
210 `checkambigfiles` is a set of (path, vfs-location) tuples,
207 which determine whether file stat ambiguity should be avoided
211 which determine whether file stat ambiguity should be avoided
208 for corresponded files.
212 for corresponded files.
209 """
213 """
210 self._count = 1
214 self._count = 1
211 self._usages = 1
215 self._usages = 1
212 self._report = report
216 self._report = report
213 # a vfs to the store content
217 # a vfs to the store content
214 self._opener = opener
218 self._opener = opener
215 # a map to access file in various {location -> vfs}
219 # a map to access file in various {location -> vfs}
216 vfsmap = vfsmap.copy()
220 vfsmap = vfsmap.copy()
217 vfsmap[b''] = opener # set default value
221 vfsmap[b''] = opener # set default value
218 self._vfsmap = vfsmap
222 self._vfsmap = vfsmap
219 self._after = after
223 self._after = after
220 self._offsetmap = {}
224 self._offsetmap = {}
221 self._newfiles = set()
225 self._newfiles = set()
222 self._journal = journalname
226 self._journal = journalname
223 self._journal_files = []
227 self._journal_files = []
224 self._undoname = undoname
228 self._undoname = undoname
225 self._queue = []
229 self._queue = []
226 # A callback to do something just after releasing transaction.
230 # A callback to do something just after releasing transaction.
227 if releasefn is None:
231 if releasefn is None:
228 releasefn = lambda tr, success: None
232 releasefn = lambda tr, success: None
229 self._releasefn = releasefn
233 self._releasefn = releasefn
230
234
231 self._checkambigfiles = set()
235 self._checkambigfiles = set()
232 if checkambigfiles:
236 if checkambigfiles:
233 self._checkambigfiles.update(checkambigfiles)
237 self._checkambigfiles.update(checkambigfiles)
234
238
235 self._names = [name]
239 self._names = [name]
236
240
237 # A dict dedicated to precisely tracking the changes introduced in the
241 # A dict dedicated to precisely tracking the changes introduced in the
238 # transaction.
242 # transaction.
239 self.changes = {}
243 self.changes = {}
240
244
241 # a dict of arguments to be passed to hooks
245 # a dict of arguments to be passed to hooks
242 self.hookargs = {}
246 self.hookargs = {}
243 self._file = opener.open(self._journal, b"w+")
247 self._file = opener.open(self._journal, b"w+")
244
248
245 # a list of ('location', 'path', 'backuppath', cache) entries.
249 # a list of ('location', 'path', 'backuppath', cache) entries.
246 # - if 'backuppath' is empty, no file existed at backup time
250 # - if 'backuppath' is empty, no file existed at backup time
247 # - if 'path' is empty, this is a temporary transaction file
251 # - if 'path' is empty, this is a temporary transaction file
248 # - if 'location' is not empty, the path is outside main opener reach.
252 # - if 'location' is not empty, the path is outside main opener reach.
249 # use 'location' value as a key in a vfsmap to find the right 'vfs'
253 # use 'location' value as a key in a vfsmap to find the right 'vfs'
250 # (cache is currently unused)
254 # (cache is currently unused)
251 self._backupentries = []
255 self._backupentries = []
252 self._backupmap = {}
256 self._backupmap = {}
253 self._backupjournal = b"%s.backupfiles" % self._journal
257 self._backupjournal = b"%s.backupfiles" % self._journal
254 self._backupsfile = opener.open(self._backupjournal, b'w')
258 self._backupsfile = opener.open(self._backupjournal, b'w')
255 self._backupsfile.write(b'%d\n' % version)
259 self._backupsfile.write(b'%d\n' % version)
256
260
257 if createmode is not None:
261 if createmode is not None:
258 opener.chmod(self._journal, createmode & 0o666)
262 opener.chmod(self._journal, createmode & 0o666)
259 opener.chmod(self._backupjournal, createmode & 0o666)
263 opener.chmod(self._backupjournal, createmode & 0o666)
260
264
261 # hold file generations to be performed on commit
265 # hold file generations to be performed on commit
262 self._filegenerators = {}
266 self._filegenerators = {}
263 # hold callback to write pending data for hooks
267 # hold callback to write pending data for hooks
264 self._pendingcallback = {}
268 self._pendingcallback = {}
265 # True is any pending data have been written ever
269 # True is any pending data have been written ever
266 self._anypending = False
270 self._anypending = False
267 # holds callback to call when writing the transaction
271 # holds callback to call when writing the transaction
268 self._finalizecallback = {}
272 self._finalizecallback = {}
269 # holds callback to call when validating the transaction
273 # holds callback to call when validating the transaction
270 # should raise exception if anything is wrong
274 # should raise exception if anything is wrong
271 self._validatecallback = {}
275 self._validatecallback = {}
272 if validator is not None:
276 if validator is not None:
273 self._validatecallback[b'001-userhooks'] = validator
277 self._validatecallback[b'001-userhooks'] = validator
274 # hold callback for post transaction close
278 # hold callback for post transaction close
275 self._postclosecallback = {}
279 self._postclosecallback = {}
276 # holds callbacks to call during abort
280 # holds callbacks to call during abort
277 self._abortcallback = {}
281 self._abortcallback = {}
278
282
279 def __repr__(self):
283 def __repr__(self):
280 name = '/'.join(self._names)
284 name = '/'.join(self._names)
281 return '<transaction name=%s, count=%d, usages=%d>' % (
285 return '<transaction name=%s, count=%d, usages=%d>' % (
282 name,
286 name,
283 self._count,
287 self._count,
284 self._usages,
288 self._usages,
285 )
289 )
286
290
287 def __del__(self):
291 def __del__(self):
288 if self._journal:
292 if self._journal:
289 self._abort()
293 self._abort()
290
294
291 @property
295 @property
292 def finalized(self):
296 def finalized(self):
293 return self._finalizecallback is None
297 return self._finalizecallback is None
294
298
295 @active
299 @active
296 def startgroup(self):
300 def startgroup(self):
297 """delay registration of file entry
301 """delay registration of file entry
298
302
299 This is used by strip to delay vision of strip offset. The transaction
303 This is used by strip to delay vision of strip offset. The transaction
300 sees either none or all of the strip actions to be done."""
304 sees either none or all of the strip actions to be done."""
301 self._queue.append([])
305 self._queue.append([])
302
306
303 @active
307 @active
304 def endgroup(self):
308 def endgroup(self):
305 """apply delayed registration of file entry.
309 """apply delayed registration of file entry.
306
310
307 This is used by strip to delay vision of strip offset. The transaction
311 This is used by strip to delay vision of strip offset. The transaction
308 sees either none or all of the strip actions to be done."""
312 sees either none or all of the strip actions to be done."""
309 q = self._queue.pop()
313 q = self._queue.pop()
310 for f, o in q:
314 for f, o in q:
311 self._addentry(f, o)
315 self._addentry(f, o)
312
316
313 @active
317 @active
314 def add(self, file, offset):
318 def add(self, file, offset):
315 """record the state of an append-only file before update"""
319 """record the state of an append-only file before update"""
316 if (
320 if (
317 file in self._newfiles
321 file in self._newfiles
318 or file in self._offsetmap
322 or file in self._offsetmap
319 or file in self._backupmap
323 or file in self._backupmap
320 ):
324 ):
321 return
325 return
322 if self._queue:
326 if self._queue:
323 self._queue[-1].append((file, offset))
327 self._queue[-1].append((file, offset))
324 return
328 return
325
329
326 self._addentry(file, offset)
330 self._addentry(file, offset)
327
331
328 def _addentry(self, file, offset):
332 def _addentry(self, file, offset):
329 """add a append-only entry to memory and on-disk state"""
333 """add a append-only entry to memory and on-disk state"""
330 if (
334 if (
331 file in self._newfiles
335 file in self._newfiles
332 or file in self._offsetmap
336 or file in self._offsetmap
333 or file in self._backupmap
337 or file in self._backupmap
334 ):
338 ):
335 return
339 return
336 if offset:
340 if offset:
337 self._offsetmap[file] = offset
341 self._offsetmap[file] = offset
338 else:
342 else:
339 self._newfiles.add(file)
343 self._newfiles.add(file)
340 # add enough data to the journal to do the truncate
344 # add enough data to the journal to do the truncate
341 self._file.write(b"%s\0%d\n" % (file, offset))
345 self._file.write(b"%s\0%d\n" % (file, offset))
342 self._file.flush()
346 self._file.flush()
343
347
344 @active
348 @active
345 def addbackup(self, file, hardlink=True, location=b''):
349 def addbackup(self, file, hardlink=True, location=b''):
346 """Adds a backup of the file to the transaction
350 """Adds a backup of the file to the transaction
347
351
348 Calling addbackup() creates a hardlink backup of the specified file
352 Calling addbackup() creates a hardlink backup of the specified file
349 that is used to recover the file in the event of the transaction
353 that is used to recover the file in the event of the transaction
350 aborting.
354 aborting.
351
355
352 * `file`: the file path, relative to .hg/store
356 * `file`: the file path, relative to .hg/store
353 * `hardlink`: use a hardlink to quickly create the backup
357 * `hardlink`: use a hardlink to quickly create the backup
354 """
358 """
355 if self._queue:
359 if self._queue:
356 msg = b'cannot use transaction.addbackup inside "group"'
360 msg = b'cannot use transaction.addbackup inside "group"'
357 raise error.ProgrammingError(msg)
361 raise error.ProgrammingError(msg)
358
362
359 if (
363 if (
360 file in self._newfiles
364 file in self._newfiles
361 or file in self._offsetmap
365 or file in self._offsetmap
362 or file in self._backupmap
366 or file in self._backupmap
363 ):
367 ):
364 return
368 return
365 vfs = self._vfsmap[location]
369 vfs = self._vfsmap[location]
366 dirname, filename = vfs.split(file)
370 dirname, filename = vfs.split(file)
367 backupfilename = b"%s.backup.%s" % (self._journal, filename)
371 backupfilename = b"%s.backup.%s" % (self._journal, filename)
368 backupfile = vfs.reljoin(dirname, backupfilename)
372 backupfile = vfs.reljoin(dirname, backupfilename)
369 if vfs.exists(file):
373 if vfs.exists(file):
370 filepath = vfs.join(file)
374 filepath = vfs.join(file)
371 backuppath = vfs.join(backupfile)
375 backuppath = vfs.join(backupfile)
372 util.copyfile(filepath, backuppath, hardlink=hardlink)
376 util.copyfile(filepath, backuppath, hardlink=hardlink)
373 else:
377 else:
374 backupfile = b''
378 backupfile = b''
375
379
376 self._addbackupentry((location, file, backupfile, False))
380 self._addbackupentry((location, file, backupfile, False))
377
381
378 def _addbackupentry(self, entry):
382 def _addbackupentry(self, entry):
379 """register a new backup entry and write it to disk"""
383 """register a new backup entry and write it to disk"""
380 self._backupentries.append(entry)
384 self._backupentries.append(entry)
381 self._backupmap[entry[1]] = len(self._backupentries) - 1
385 self._backupmap[entry[1]] = len(self._backupentries) - 1
382 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
386 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
383 self._backupsfile.flush()
387 self._backupsfile.flush()
384
388
385 @active
389 @active
386 def registertmp(self, tmpfile, location=b''):
390 def registertmp(self, tmpfile, location=b''):
387 """register a temporary transaction file
391 """register a temporary transaction file
388
392
389 Such files will be deleted when the transaction exits (on both
393 Such files will be deleted when the transaction exits (on both
390 failure and success).
394 failure and success).
391 """
395 """
392 self._addbackupentry((location, b'', tmpfile, False))
396 self._addbackupentry((location, b'', tmpfile, False))
393
397
394 @active
398 @active
395 def addfilegenerator(
399 def addfilegenerator(
396 self,
400 self,
397 genid,
401 genid,
398 filenames,
402 filenames,
399 genfunc,
403 genfunc,
400 order=0,
404 order=0,
401 location=b'',
405 location=b'',
402 post_finalize=False,
406 post_finalize=False,
403 ):
407 ):
404 """add a function to generates some files at transaction commit
408 """add a function to generates some files at transaction commit
405
409
406 The `genfunc` argument is a function capable of generating proper
410 The `genfunc` argument is a function capable of generating proper
407 content of each entry in the `filename` tuple.
411 content of each entry in the `filename` tuple.
408
412
409 At transaction close time, `genfunc` will be called with one file
413 At transaction close time, `genfunc` will be called with one file
410 object argument per entries in `filenames`.
414 object argument per entries in `filenames`.
411
415
412 The transaction itself is responsible for the backup, creation and
416 The transaction itself is responsible for the backup, creation and
413 final write of such file.
417 final write of such file.
414
418
415 The `genid` argument is used to ensure the same set of file is only
419 The `genid` argument is used to ensure the same set of file is only
416 generated once. Call to `addfilegenerator` for a `genid` already
420 generated once. Call to `addfilegenerator` for a `genid` already
417 present will overwrite the old entry.
421 present will overwrite the old entry.
418
422
419 The `order` argument may be used to control the order in which multiple
423 The `order` argument may be used to control the order in which multiple
420 generator will be executed.
424 generator will be executed.
421
425
422 The `location` arguments may be used to indicate the files are located
426 The `location` arguments may be used to indicate the files are located
423 outside of the the standard directory for transaction. It should match
427 outside of the the standard directory for transaction. It should match
424 one of the key of the `transaction.vfsmap` dictionary.
428 one of the key of the `transaction.vfsmap` dictionary.
425
429
426 The `post_finalize` argument can be set to `True` for file generation
430 The `post_finalize` argument can be set to `True` for file generation
427 that must be run after the transaction has been finalized.
431 that must be run after the transaction has been finalized.
428 """
432 """
429 # For now, we are unable to do proper backup and restore of custom vfs
433 # For now, we are unable to do proper backup and restore of custom vfs
430 # but for bookmarks that are handled outside this mechanism.
434 # but for bookmarks that are handled outside this mechanism.
431 entry = (order, filenames, genfunc, location, post_finalize)
435 entry = (order, filenames, genfunc, location, post_finalize)
432 self._filegenerators[genid] = entry
436 self._filegenerators[genid] = entry
433
437
434 @active
438 @active
435 def removefilegenerator(self, genid):
439 def removefilegenerator(self, genid):
436 """reverse of addfilegenerator, remove a file generator function"""
440 """reverse of addfilegenerator, remove a file generator function"""
437 if genid in self._filegenerators:
441 if genid in self._filegenerators:
438 del self._filegenerators[genid]
442 del self._filegenerators[genid]
439
443
440 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
444 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
441 # write files registered for generation
445 # write files registered for generation
442 any = False
446 any = False
443
447
444 if group == GEN_GROUP_ALL:
448 if group == GEN_GROUP_ALL:
445 skip_post = skip_pre = False
449 skip_post = skip_pre = False
446 else:
450 else:
447 skip_pre = group == GEN_GROUP_POST_FINALIZE
451 skip_pre = group == GEN_GROUP_POST_FINALIZE
448 skip_post = group == GEN_GROUP_PRE_FINALIZE
452 skip_post = group == GEN_GROUP_PRE_FINALIZE
449
453
450 for id, entry in sorted(self._filegenerators.items()):
454 for id, entry in sorted(self._filegenerators.items()):
451 any = True
455 any = True
452 order, filenames, genfunc, location, post_finalize = entry
456 order, filenames, genfunc, location, post_finalize = entry
453
457
454 # for generation at closing, check if it's before or after finalize
458 # for generation at closing, check if it's before or after finalize
455 if skip_post and post_finalize:
459 if skip_post and post_finalize:
456 continue
460 continue
457 elif skip_pre and not post_finalize:
461 elif skip_pre and not post_finalize:
458 continue
462 continue
459
463
460 vfs = self._vfsmap[location]
464 vfs = self._vfsmap[location]
461 files = []
465 files = []
462 try:
466 try:
463 for name in filenames:
467 for name in filenames:
464 name += suffix
468 name += suffix
465 if suffix:
469 if suffix:
466 self.registertmp(name, location=location)
470 self.registertmp(name, location=location)
467 checkambig = False
471 checkambig = False
468 else:
472 else:
469 self.addbackup(name, location=location)
473 self.addbackup(name, location=location)
470 checkambig = (name, location) in self._checkambigfiles
474 checkambig = (name, location) in self._checkambigfiles
471 files.append(
475 files.append(
472 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
476 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
473 )
477 )
474 genfunc(*files)
478 genfunc(*files)
475 for f in files:
479 for f in files:
476 f.close()
480 f.close()
477 # skip discard() loop since we're sure no open file remains
481 # skip discard() loop since we're sure no open file remains
478 del files[:]
482 del files[:]
479 finally:
483 finally:
480 for f in files:
484 for f in files:
481 f.discard()
485 f.discard()
482 return any
486 return any
483
487
484 @active
488 @active
485 def findoffset(self, file):
489 def findoffset(self, file):
486 if file in self._newfiles:
490 if file in self._newfiles:
487 return 0
491 return 0
488 return self._offsetmap.get(file)
492 return self._offsetmap.get(file)
489
493
490 @active
494 @active
491 def readjournal(self):
495 def readjournal(self):
492 self._file.seek(0)
496 self._file.seek(0)
493 entries = []
497 entries = []
494 for l in self._file.readlines():
498 for l in self._file.readlines():
495 file, troffset = l.split(b'\0')
499 file, troffset = l.split(b'\0')
496 entries.append((file, int(troffset)))
500 entries.append((file, int(troffset)))
497 return entries
501 return entries
498
502
499 @active
503 @active
500 def replace(self, file, offset):
504 def replace(self, file, offset):
501 """
505 """
502 replace can only replace already committed entries
506 replace can only replace already committed entries
503 that are not pending in the queue
507 that are not pending in the queue
504 """
508 """
505 if file in self._newfiles:
509 if file in self._newfiles:
506 if not offset:
510 if not offset:
507 return
511 return
508 self._newfiles.remove(file)
512 self._newfiles.remove(file)
509 self._offsetmap[file] = offset
513 self._offsetmap[file] = offset
510 elif file in self._offsetmap:
514 elif file in self._offsetmap:
511 if not offset:
515 if not offset:
512 del self._offsetmap[file]
516 del self._offsetmap[file]
513 self._newfiles.add(file)
517 self._newfiles.add(file)
514 else:
518 else:
515 self._offsetmap[file] = offset
519 self._offsetmap[file] = offset
516 else:
520 else:
517 raise KeyError(file)
521 raise KeyError(file)
518 self._file.write(b"%s\0%d\n" % (file, offset))
522 self._file.write(b"%s\0%d\n" % (file, offset))
519 self._file.flush()
523 self._file.flush()
520
524
521 @active
525 @active
522 def nest(self, name='<unnamed>'):
526 def nest(self, name='<unnamed>'):
523 self._count += 1
527 self._count += 1
524 self._usages += 1
528 self._usages += 1
525 self._names.append(name)
529 self._names.append(name)
526 return self
530 return self
527
531
528 def release(self):
532 def release(self):
529 if self._count > 0:
533 if self._count > 0:
530 self._usages -= 1
534 self._usages -= 1
531 if self._names:
535 if self._names:
532 self._names.pop()
536 self._names.pop()
533 # if the transaction scopes are left without being closed, fail
537 # if the transaction scopes are left without being closed, fail
534 if self._count > 0 and self._usages == 0:
538 if self._count > 0 and self._usages == 0:
535 self._abort()
539 self._abort()
536
540
537 def running(self):
541 def running(self):
538 return self._count > 0
542 return self._count > 0
539
543
540 def addpending(self, category, callback):
544 def addpending(self, category, callback):
541 """add a callback to be called when the transaction is pending
545 """add a callback to be called when the transaction is pending
542
546
543 The transaction will be given as callback's first argument.
547 The transaction will be given as callback's first argument.
544
548
545 Category is a unique identifier to allow overwriting an old callback
549 Category is a unique identifier to allow overwriting an old callback
546 with a newer callback.
550 with a newer callback.
547 """
551 """
548 self._pendingcallback[category] = callback
552 self._pendingcallback[category] = callback
549
553
550 @active
554 @active
551 def writepending(self):
555 def writepending(self):
552 """write pending file to temporary version
556 """write pending file to temporary version
553
557
554 This is used to allow hooks to view a transaction before commit"""
558 This is used to allow hooks to view a transaction before commit"""
555 categories = sorted(self._pendingcallback)
559 categories = sorted(self._pendingcallback)
556 for cat in categories:
560 for cat in categories:
557 # remove callback since the data will have been flushed
561 # remove callback since the data will have been flushed
558 any = self._pendingcallback.pop(cat)(self)
562 any = self._pendingcallback.pop(cat)(self)
559 self._anypending = self._anypending or any
563 self._anypending = self._anypending or any
560 self._anypending |= self._generatefiles(suffix=b'.pending')
564 self._anypending |= self._generatefiles(suffix=b'.pending')
561 return self._anypending
565 return self._anypending
562
566
563 @active
567 @active
564 def hasfinalize(self, category):
568 def hasfinalize(self, category):
565 """check is a callback already exist for a category"""
569 """check is a callback already exist for a category"""
566 return category in self._finalizecallback
570 return category in self._finalizecallback
567
571
568 @active
572 @active
569 def addfinalize(self, category, callback):
573 def addfinalize(self, category, callback):
570 """add a callback to be called when the transaction is closed
574 """add a callback to be called when the transaction is closed
571
575
572 The transaction will be given as callback's first argument.
576 The transaction will be given as callback's first argument.
573
577
574 Category is a unique identifier to allow overwriting old callbacks with
578 Category is a unique identifier to allow overwriting old callbacks with
575 newer callbacks.
579 newer callbacks.
576 """
580 """
577 self._finalizecallback[category] = callback
581 self._finalizecallback[category] = callback
578
582
579 @active
583 @active
580 def addpostclose(self, category, callback):
584 def addpostclose(self, category, callback):
581 """add or replace a callback to be called after the transaction closed
585 """add or replace a callback to be called after the transaction closed
582
586
583 The transaction will be given as callback's first argument.
587 The transaction will be given as callback's first argument.
584
588
585 Category is a unique identifier to allow overwriting an old callback
589 Category is a unique identifier to allow overwriting an old callback
586 with a newer callback.
590 with a newer callback.
587 """
591 """
588 self._postclosecallback[category] = callback
592 self._postclosecallback[category] = callback
589
593
590 @active
594 @active
591 def getpostclose(self, category):
595 def getpostclose(self, category):
592 """return a postclose callback added before, or None"""
596 """return a postclose callback added before, or None"""
593 return self._postclosecallback.get(category, None)
597 return self._postclosecallback.get(category, None)
594
598
595 @active
599 @active
596 def addabort(self, category, callback):
600 def addabort(self, category, callback):
597 """add a callback to be called when the transaction is aborted.
601 """add a callback to be called when the transaction is aborted.
598
602
599 The transaction will be given as the first argument to the callback.
603 The transaction will be given as the first argument to the callback.
600
604
601 Category is a unique identifier to allow overwriting an old callback
605 Category is a unique identifier to allow overwriting an old callback
602 with a newer callback.
606 with a newer callback.
603 """
607 """
604 self._abortcallback[category] = callback
608 self._abortcallback[category] = callback
605
609
606 @active
610 @active
607 def addvalidator(self, category, callback):
611 def addvalidator(self, category, callback):
608 """adds a callback to be called when validating the transaction.
612 """adds a callback to be called when validating the transaction.
609
613
610 The transaction will be given as the first argument to the callback.
614 The transaction will be given as the first argument to the callback.
611
615
612 callback should raise exception if to abort transaction"""
616 callback should raise exception if to abort transaction"""
613 self._validatecallback[category] = callback
617 self._validatecallback[category] = callback
614
618
615 @active
619 @active
616 def close(self):
620 def close(self):
617 '''commit the transaction'''
621 '''commit the transaction'''
618 if self._count == 1:
622 if self._count == 1:
619 for category in sorted(self._validatecallback):
623 for category in sorted(self._validatecallback):
620 self._validatecallback[category](self)
624 self._validatecallback[category](self)
621 self._validatecallback = None # Help prevent cycles.
625 self._validatecallback = None # Help prevent cycles.
622 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
626 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
623 while self._finalizecallback:
627 while self._finalizecallback:
624 callbacks = self._finalizecallback
628 callbacks = self._finalizecallback
625 self._finalizecallback = {}
629 self._finalizecallback = {}
626 categories = sorted(callbacks)
630 categories = sorted(callbacks)
627 for cat in categories:
631 for cat in categories:
628 callbacks[cat](self)
632 callbacks[cat](self)
629 # Prevent double usage and help clear cycles.
633 # Prevent double usage and help clear cycles.
630 self._finalizecallback = None
634 self._finalizecallback = None
631 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
635 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
632
636
633 self._count -= 1
637 self._count -= 1
634 if self._count != 0:
638 if self._count != 0:
635 return
639 return
636 self._file.close()
640 self._file.close()
637 self._backupsfile.close()
641 self._backupsfile.close()
638 # cleanup temporary files
642 # cleanup temporary files
639 for l, f, b, c in self._backupentries:
643 for l, f, b, c in self._backupentries:
640 if l not in self._vfsmap and c:
644 if l not in self._vfsmap and c:
641 self._report(
645 self._report(
642 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
646 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
643 )
647 )
644 continue
648 continue
645 vfs = self._vfsmap[l]
649 vfs = self._vfsmap[l]
646 if not f and b and vfs.exists(b):
650 if not f and b and vfs.exists(b):
647 try:
651 try:
648 vfs.unlink(b)
652 vfs.unlink(b)
649 except (IOError, OSError, error.Abort) as inst:
653 except (IOError, OSError, error.Abort) as inst:
650 if not c:
654 if not c:
651 raise
655 raise
652 # Abort may be raise by read only opener
656 # Abort may be raise by read only opener
653 self._report(
657 self._report(
654 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
658 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
655 )
659 )
656 self._offsetmap = {}
660 self._offsetmap = {}
657 self._newfiles = set()
661 self._newfiles = set()
658 self._writeundo()
662 self._writeundo()
659 if self._after:
663 if self._after:
660 self._after()
664 self._after()
661 self._after = None # Help prevent cycles.
665 self._after = None # Help prevent cycles.
662 if self._opener.isfile(self._backupjournal):
666 if self._opener.isfile(self._backupjournal):
663 self._opener.unlink(self._backupjournal)
667 self._opener.unlink(self._backupjournal)
664 if self._opener.isfile(self._journal):
668 if self._opener.isfile(self._journal):
665 self._opener.unlink(self._journal)
669 self._opener.unlink(self._journal)
666 for l, _f, b, c in self._backupentries:
670 for l, _f, b, c in self._backupentries:
667 if l not in self._vfsmap and c:
671 if l not in self._vfsmap and c:
668 self._report(
672 self._report(
669 b"couldn't remove %s: unknown cache location"
673 b"couldn't remove %s: unknown cache location"
670 b"%s\n" % (b, l)
674 b"%s\n" % (b, l)
671 )
675 )
672 continue
676 continue
673 vfs = self._vfsmap[l]
677 vfs = self._vfsmap[l]
674 if b and vfs.exists(b):
678 if b and vfs.exists(b):
675 try:
679 try:
676 vfs.unlink(b)
680 vfs.unlink(b)
677 except (IOError, OSError, error.Abort) as inst:
681 except (IOError, OSError, error.Abort) as inst:
678 if not c:
682 if not c:
679 raise
683 raise
680 # Abort may be raise by read only opener
684 # Abort may be raise by read only opener
681 self._report(
685 self._report(
682 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
686 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
683 )
687 )
684 self._backupentries = []
688 self._backupentries = []
685 self._journal = None
689 self._journal = None
686
690
687 self._releasefn(self, True) # notify success of closing transaction
691 self._releasefn(self, True) # notify success of closing transaction
688 self._releasefn = None # Help prevent cycles.
692 self._releasefn = None # Help prevent cycles.
689
693
690 # run post close action
694 # run post close action
691 categories = sorted(self._postclosecallback)
695 categories = sorted(self._postclosecallback)
692 for cat in categories:
696 for cat in categories:
693 self._postclosecallback[cat](self)
697 self._postclosecallback[cat](self)
694 # Prevent double usage and help clear cycles.
698 # Prevent double usage and help clear cycles.
695 self._postclosecallback = None
699 self._postclosecallback = None
696
700
697 @active
701 @active
698 def abort(self):
702 def abort(self):
699 """abort the transaction (generally called on error, or when the
703 """abort the transaction (generally called on error, or when the
700 transaction is not explicitly committed before going out of
704 transaction is not explicitly committed before going out of
701 scope)"""
705 scope)"""
702 self._abort()
706 self._abort()
703
707
704 @active
708 @active
705 def add_journal(self, vfs_id, path):
709 def add_journal(self, vfs_id, path):
706 self._journal_files.append((vfs_id, path))
710 self._journal_files.append((vfs_id, path))
707
711
708 def _writeundo(self):
712 def _writeundo(self):
709 """write transaction data for possible future undo call"""
713 """write transaction data for possible future undo call"""
710 if self._undoname is None:
714 if self._undoname is None:
711 return
715 return
712 cleanup_undo_files(
716 cleanup_undo_files(
713 self._report,
717 self._report,
714 self._vfsmap,
718 self._vfsmap,
715 undo_prefix=self._undoname,
719 undo_prefix=self._undoname,
716 )
720 )
717
721
718 def undoname(fn: bytes) -> bytes:
722 def undoname(fn: bytes) -> bytes:
719 base, name = os.path.split(fn)
723 base, name = os.path.split(fn)
720 assert name.startswith(self._journal)
724 assert name.startswith(self._journal)
721 new_name = name.replace(self._journal, self._undoname, 1)
725 new_name = name.replace(self._journal, self._undoname, 1)
722 return os.path.join(base, new_name)
726 return os.path.join(base, new_name)
723
727
724 undo_backup_path = b"%s.backupfiles" % self._undoname
728 undo_backup_path = b"%s.backupfiles" % self._undoname
725 undobackupfile = self._opener.open(undo_backup_path, b'w')
729 undobackupfile = self._opener.open(undo_backup_path, b'w')
726 undobackupfile.write(b'%d\n' % version)
730 undobackupfile.write(b'%d\n' % version)
727 for l, f, b, c in self._backupentries:
731 for l, f, b, c in self._backupentries:
728 if not f: # temporary file
732 if not f: # temporary file
729 continue
733 continue
730 if not b:
734 if not b:
731 u = b''
735 u = b''
732 else:
736 else:
733 if l not in self._vfsmap and c:
737 if l not in self._vfsmap and c:
734 self._report(
738 self._report(
735 b"couldn't remove %s: unknown cache location"
739 b"couldn't remove %s: unknown cache location"
736 b"%s\n" % (b, l)
740 b"%s\n" % (b, l)
737 )
741 )
738 continue
742 continue
739 vfs = self._vfsmap[l]
743 vfs = self._vfsmap[l]
740 u = undoname(b)
744 u = undoname(b)
741 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
745 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
742 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
746 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
743 undobackupfile.close()
747 undobackupfile.close()
744 for vfs, src in self._journal_files:
748 for vfs, src in self._journal_files:
745 dest = undoname(src)
749 dest = undoname(src)
746 # if src and dest refer to a same file, vfs.rename is a no-op,
750 # if src and dest refer to a same file, vfs.rename is a no-op,
747 # leaving both src and dest on disk. delete dest to make sure
751 # leaving both src and dest on disk. delete dest to make sure
748 # the rename couldn't be such a no-op.
752 # the rename couldn't be such a no-op.
749 vfs.tryunlink(dest)
753 vfs.tryunlink(dest)
750 try:
754 try:
751 vfs.rename(src, dest)
755 vfs.rename(src, dest)
752 except FileNotFoundError: # journal file does not yet exist
756 except FileNotFoundError: # journal file does not yet exist
753 pass
757 pass
754
758
755 def _abort(self):
759 def _abort(self):
756 entries = self.readjournal()
760 entries = self.readjournal()
757 self._count = 0
761 self._count = 0
758 self._usages = 0
762 self._usages = 0
759 self._file.close()
763 self._file.close()
760 self._backupsfile.close()
764 self._backupsfile.close()
761
765
762 quick = self._can_quick_abort(entries)
766 quick = self._can_quick_abort(entries)
763 try:
767 try:
764 if not quick:
768 if not quick:
765 self._report(_(b"transaction abort!\n"))
769 self._report(_(b"transaction abort!\n"))
766 for cat in sorted(self._abortcallback):
770 for cat in sorted(self._abortcallback):
767 self._abortcallback[cat](self)
771 self._abortcallback[cat](self)
768 # Prevent double usage and help clear cycles.
772 # Prevent double usage and help clear cycles.
769 self._abortcallback = None
773 self._abortcallback = None
770 if quick:
774 if quick:
771 self._do_quick_abort(entries)
775 self._do_quick_abort(entries)
772 else:
776 else:
773 self._do_full_abort(entries)
777 self._do_full_abort(entries)
774 finally:
778 finally:
775 self._journal = None
779 self._journal = None
776 self._releasefn(self, False) # notify failure of transaction
780 self._releasefn(self, False) # notify failure of transaction
777 self._releasefn = None # Help prevent cycles.
781 self._releasefn = None # Help prevent cycles.
778
782
779 def _can_quick_abort(self, entries):
783 def _can_quick_abort(self, entries):
780 """False if any semantic content have been written on disk
784 """False if any semantic content have been written on disk
781
785
782 True if nothing, except temporary files has been writen on disk."""
786 True if nothing, except temporary files has been writen on disk."""
783 if entries:
787 if entries:
784 return False
788 return False
785 for e in self._backupentries:
789 for e in self._backupentries:
786 if e[1]:
790 if e[1]:
787 return False
791 return False
788 return True
792 return True
789
793
790 def _do_quick_abort(self, entries):
794 def _do_quick_abort(self, entries):
791 """(Silently) do a quick cleanup (see _can_quick_abort)"""
795 """(Silently) do a quick cleanup (see _can_quick_abort)"""
792 assert self._can_quick_abort(entries)
796 assert self._can_quick_abort(entries)
793 tmp_files = [e for e in self._backupentries if not e[1]]
797 tmp_files = [e for e in self._backupentries if not e[1]]
794 for vfs_id, old_path, tmp_path, xxx in tmp_files:
798 for vfs_id, old_path, tmp_path, xxx in tmp_files:
795 vfs = self._vfsmap[vfs_id]
799 vfs = self._vfsmap[vfs_id]
796 try:
800 try:
797 vfs.unlink(tmp_path)
801 vfs.unlink(tmp_path)
798 except FileNotFoundError:
802 except FileNotFoundError:
799 pass
803 pass
800 if self._backupjournal:
804 if self._backupjournal:
801 self._opener.unlink(self._backupjournal)
805 self._opener.unlink(self._backupjournal)
802 if self._journal:
806 if self._journal:
803 self._opener.unlink(self._journal)
807 self._opener.unlink(self._journal)
804
808
805 def _do_full_abort(self, entries):
809 def _do_full_abort(self, entries):
806 """(Noisily) rollback all the change introduced by the transaction"""
810 """(Noisily) rollback all the change introduced by the transaction"""
807 try:
811 try:
808 _playback(
812 _playback(
809 self._journal,
813 self._journal,
810 self._report,
814 self._report,
811 self._opener,
815 self._opener,
812 self._vfsmap,
816 self._vfsmap,
813 entries,
817 entries,
814 self._backupentries,
818 self._backupentries,
815 False,
819 False,
816 checkambigfiles=self._checkambigfiles,
820 checkambigfiles=self._checkambigfiles,
817 )
821 )
818 self._report(_(b"rollback completed\n"))
822 self._report(_(b"rollback completed\n"))
819 except BaseException as exc:
823 except BaseException as exc:
820 self._report(_(b"rollback failed - please run hg recover\n"))
824 self._report(_(b"rollback failed - please run hg recover\n"))
821 self._report(
825 self._report(
822 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
826 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
823 )
827 )
824
828
825
829
826 BAD_VERSION_MSG = _(
830 BAD_VERSION_MSG = _(
827 b"journal was created by a different version of Mercurial\n"
831 b"journal was created by a different version of Mercurial\n"
828 )
832 )
829
833
830
834
831 def read_backup_files(report, fp):
835 def read_backup_files(report, fp):
832 """parse an (already open) backup file an return contained backup entries
836 """parse an (already open) backup file an return contained backup entries
833
837
834 entries are in the form: (location, file, backupfile, xxx)
838 entries are in the form: (location, file, backupfile, xxx)
835
839
836 :location: the vfs identifier (vfsmap's key)
840 :location: the vfs identifier (vfsmap's key)
837 :file: original file path (in the vfs)
841 :file: original file path (in the vfs)
838 :backupfile: path of the backup (in the vfs)
842 :backupfile: path of the backup (in the vfs)
839 :cache: a boolean currently always set to False
843 :cache: a boolean currently always set to False
840 """
844 """
841 lines = fp.readlines()
845 lines = fp.readlines()
842 backupentries = []
846 backupentries = []
843 if lines:
847 if lines:
844 ver = lines[0][:-1]
848 ver = lines[0][:-1]
845 if ver != (b'%d' % version):
849 if ver != (b'%d' % version):
846 report(BAD_VERSION_MSG)
850 report(BAD_VERSION_MSG)
847 else:
851 else:
848 for line in lines[1:]:
852 for line in lines[1:]:
849 if line:
853 if line:
850 # Shave off the trailing newline
854 # Shave off the trailing newline
851 line = line[:-1]
855 line = line[:-1]
852 l, f, b, c = line.split(b'\0')
856 l, f, b, c = line.split(b'\0')
853 backupentries.append((l, f, b, bool(c)))
857 backupentries.append((l, f, b, bool(c)))
854 return backupentries
858 return backupentries
855
859
856
860
857 def rollback(
861 def rollback(
858 opener,
862 opener,
859 vfsmap,
863 vfsmap,
860 file,
864 file,
861 report,
865 report,
862 checkambigfiles=None,
866 checkambigfiles=None,
863 skip_journal_pattern=None,
867 skip_journal_pattern=None,
864 ):
868 ):
865 """Rolls back the transaction contained in the given file
869 """Rolls back the transaction contained in the given file
866
870
867 Reads the entries in the specified file, and the corresponding
871 Reads the entries in the specified file, and the corresponding
868 '*.backupfiles' file, to recover from an incomplete transaction.
872 '*.backupfiles' file, to recover from an incomplete transaction.
869
873
870 * `file`: a file containing a list of entries, specifying where
874 * `file`: a file containing a list of entries, specifying where
871 to truncate each file. The file should contain a list of
875 to truncate each file. The file should contain a list of
872 file\0offset pairs, delimited by newlines. The corresponding
876 file\0offset pairs, delimited by newlines. The corresponding
873 '*.backupfiles' file should contain a list of file\0backupfile
877 '*.backupfiles' file should contain a list of file\0backupfile
874 pairs, delimited by \0.
878 pairs, delimited by \0.
875
879
876 `checkambigfiles` is a set of (path, vfs-location) tuples,
880 `checkambigfiles` is a set of (path, vfs-location) tuples,
877 which determine whether file stat ambiguity should be avoided at
881 which determine whether file stat ambiguity should be avoided at
878 restoring corresponded files.
882 restoring corresponded files.
879 """
883 """
880 entries = []
884 entries = []
881 backupentries = []
885 backupentries = []
882
886
883 with opener.open(file) as fp:
887 with opener.open(file) as fp:
884 lines = fp.readlines()
888 lines = fp.readlines()
885 for l in lines:
889 for l in lines:
886 try:
890 try:
887 f, o = l.split(b'\0')
891 f, o = l.split(b'\0')
888 entries.append((f, int(o)))
892 entries.append((f, int(o)))
889 except ValueError:
893 except ValueError:
890 report(
894 report(
891 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
895 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
892 )
896 )
893
897
894 backupjournal = b"%s.backupfiles" % file
898 backupjournal = b"%s.backupfiles" % file
895 if opener.exists(backupjournal):
899 if opener.exists(backupjournal):
896 with opener.open(backupjournal) as fp:
900 with opener.open(backupjournal) as fp:
897 backupentries = read_backup_files(report, fp)
901 backupentries = read_backup_files(report, fp)
898 if skip_journal_pattern is not None:
902 if skip_journal_pattern is not None:
899 keep = lambda x: not skip_journal_pattern.match(x[1])
903 keep = lambda x: not skip_journal_pattern.match(x[1])
900 backupentries = [x for x in backupentries if keep(x)]
904 backupentries = [x for x in backupentries if keep(x)]
901
905
902 _playback(
906 _playback(
903 file,
907 file,
904 report,
908 report,
905 opener,
909 opener,
906 vfsmap,
910 vfsmap,
907 entries,
911 entries,
908 backupentries,
912 backupentries,
909 checkambigfiles=checkambigfiles,
913 checkambigfiles=checkambigfiles,
910 )
914 )
General Comments 0
You need to be logged in to leave comments. Login now