##// END OF EJS Templates
undo-files: make the undo-prefix configurable in `cleanup_undo_files`...
marmoute -
r51197:94a8c354 stable
parent child Browse files
Show More
@@ -1,884 +1,886 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'undo.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 (b'plain', b'undo.desc'),
46 (b'plain', b'%s.desc'),
47 # Always delete undo last to make sure we detect that a clean up is needed if
47 # Always delete undo last to make sure we detect that a clean up is needed if
48 # the process is interrupted.
48 # the process is interrupted.
49 (b'store', b'undo'),
49 (b'store', b'%s'),
50 ]
50 ]
51
51
52
52
53 def cleanup_undo_files(report, vfsmap):
53 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
54 """remove "undo" files used by the rollback logic
54 """remove "undo" files used by the rollback logic
55
55
56 This is useful to prevent rollback running in situation were it does not
56 This is useful to prevent rollback running in situation were it does not
57 make sense. For example after a strip.
57 make sense. For example after a strip.
58 """
58 """
59 backup_listing = UNDO_BACKUP % undo_prefix
60
59 backup_entries = []
61 backup_entries = []
60 undo_files = []
62 undo_files = []
61 svfs = vfsmap[b'store']
63 svfs = vfsmap[b'store']
62 try:
64 try:
63 with svfs(UNDO_BACKUP) as f:
65 with svfs(backup_listing) as f:
64 backup_entries = read_backup_files(report, f)
66 backup_entries = read_backup_files(report, f)
65 except OSError as e:
67 except OSError as e:
66 if e.errno != errno.ENOENT:
68 if e.errno != errno.ENOENT:
67 msg = _(b'could not read %s: %s\n')
69 msg = _(b'could not read %s: %s\n')
68 msg %= (svfs.join(UNDO_BACKUP), stringutil.forcebytestr(e))
70 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
69 report(msg)
71 report(msg)
70
72
71 for location, f, backup_path, c in backup_entries:
73 for location, f, backup_path, c in backup_entries:
72 if location in vfsmap and backup_path:
74 if location in vfsmap and backup_path:
73 undo_files.append((vfsmap[location], backup_path))
75 undo_files.append((vfsmap[location], backup_path))
74
76
75 undo_files.append((svfs, UNDO_BACKUP))
77 undo_files.append((svfs, backup_listing))
76 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
78 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
77 undo_files.append((vfsmap[location], undo_path))
79 undo_files.append((vfsmap[location], undo_path % undo_prefix))
78 for undovfs, undofile in undo_files:
80 for undovfs, undofile in undo_files:
79 try:
81 try:
80 undovfs.unlink(undofile)
82 undovfs.unlink(undofile)
81 except OSError as e:
83 except OSError as e:
82 if e.errno != errno.ENOENT:
84 if e.errno != errno.ENOENT:
83 msg = _(b'error removing %s: %s\n')
85 msg = _(b'error removing %s: %s\n')
84 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
86 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
85 report(msg)
87 report(msg)
86
88
87
89
88 def _playback(
90 def _playback(
89 journal,
91 journal,
90 report,
92 report,
91 opener,
93 opener,
92 vfsmap,
94 vfsmap,
93 entries,
95 entries,
94 backupentries,
96 backupentries,
95 unlink=True,
97 unlink=True,
96 checkambigfiles=None,
98 checkambigfiles=None,
97 ):
99 ):
98 for f, o in sorted(dict(entries).items()):
100 for f, o in sorted(dict(entries).items()):
99 if o or not unlink:
101 if o or not unlink:
100 checkambig = checkambigfiles and (f, b'') in checkambigfiles
102 checkambig = checkambigfiles and (f, b'') in checkambigfiles
101 try:
103 try:
102 fp = opener(f, b'a', checkambig=checkambig)
104 fp = opener(f, b'a', checkambig=checkambig)
103 if fp.tell() < o:
105 if fp.tell() < o:
104 raise error.Abort(
106 raise error.Abort(
105 _(
107 _(
106 b"attempted to truncate %s to %d bytes, but it was "
108 b"attempted to truncate %s to %d bytes, but it was "
107 b"already %d bytes\n"
109 b"already %d bytes\n"
108 )
110 )
109 % (f, o, fp.tell())
111 % (f, o, fp.tell())
110 )
112 )
111 fp.truncate(o)
113 fp.truncate(o)
112 fp.close()
114 fp.close()
113 except IOError:
115 except IOError:
114 report(_(b"failed to truncate %s\n") % f)
116 report(_(b"failed to truncate %s\n") % f)
115 raise
117 raise
116 else:
118 else:
117 try:
119 try:
118 opener.unlink(f)
120 opener.unlink(f)
119 except FileNotFoundError:
121 except FileNotFoundError:
120 pass
122 pass
121
123
122 backupfiles = []
124 backupfiles = []
123 for l, f, b, c in backupentries:
125 for l, f, b, c in backupentries:
124 if l not in vfsmap and c:
126 if l not in vfsmap and c:
125 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
127 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
126 vfs = vfsmap[l]
128 vfs = vfsmap[l]
127 try:
129 try:
128 if f and b:
130 if f and b:
129 filepath = vfs.join(f)
131 filepath = vfs.join(f)
130 backuppath = vfs.join(b)
132 backuppath = vfs.join(b)
131 checkambig = checkambigfiles and (f, l) in checkambigfiles
133 checkambig = checkambigfiles and (f, l) in checkambigfiles
132 try:
134 try:
133 util.copyfile(backuppath, filepath, checkambig=checkambig)
135 util.copyfile(backuppath, filepath, checkambig=checkambig)
134 backupfiles.append(b)
136 backupfiles.append(b)
135 except IOError as exc:
137 except IOError as exc:
136 e_msg = stringutil.forcebytestr(exc)
138 e_msg = stringutil.forcebytestr(exc)
137 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
139 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
138 else:
140 else:
139 target = f or b
141 target = f or b
140 try:
142 try:
141 vfs.unlink(target)
143 vfs.unlink(target)
142 except FileNotFoundError:
144 except FileNotFoundError:
143 pass
145 pass
144 except (IOError, OSError, error.Abort):
146 except (IOError, OSError, error.Abort):
145 if not c:
147 if not c:
146 raise
148 raise
147
149
148 backuppath = b"%s.backupfiles" % journal
150 backuppath = b"%s.backupfiles" % journal
149 if opener.exists(backuppath):
151 if opener.exists(backuppath):
150 opener.unlink(backuppath)
152 opener.unlink(backuppath)
151 opener.unlink(journal)
153 opener.unlink(journal)
152 try:
154 try:
153 for f in backupfiles:
155 for f in backupfiles:
154 if opener.exists(f):
156 if opener.exists(f):
155 opener.unlink(f)
157 opener.unlink(f)
156 except (IOError, OSError, error.Abort):
158 except (IOError, OSError, error.Abort):
157 # only pure backup file remains, it is sage to ignore any error
159 # only pure backup file remains, it is sage to ignore any error
158 pass
160 pass
159
161
160
162
161 class transaction(util.transactional):
163 class transaction(util.transactional):
162 def __init__(
164 def __init__(
163 self,
165 self,
164 report,
166 report,
165 opener,
167 opener,
166 vfsmap,
168 vfsmap,
167 journalname,
169 journalname,
168 undoname=None,
170 undoname=None,
169 after=None,
171 after=None,
170 createmode=None,
172 createmode=None,
171 validator=None,
173 validator=None,
172 releasefn=None,
174 releasefn=None,
173 checkambigfiles=None,
175 checkambigfiles=None,
174 name='<unnamed>',
176 name='<unnamed>',
175 ):
177 ):
176 """Begin a new transaction
178 """Begin a new transaction
177
179
178 Begins a new transaction that allows rolling back writes in the event of
180 Begins a new transaction that allows rolling back writes in the event of
179 an exception.
181 an exception.
180
182
181 * `after`: called after the transaction has been committed
183 * `after`: called after the transaction has been committed
182 * `createmode`: the mode of the journal file that will be created
184 * `createmode`: the mode of the journal file that will be created
183 * `releasefn`: called after releasing (with transaction and result)
185 * `releasefn`: called after releasing (with transaction and result)
184
186
185 `checkambigfiles` is a set of (path, vfs-location) tuples,
187 `checkambigfiles` is a set of (path, vfs-location) tuples,
186 which determine whether file stat ambiguity should be avoided
188 which determine whether file stat ambiguity should be avoided
187 for corresponded files.
189 for corresponded files.
188 """
190 """
189 self._count = 1
191 self._count = 1
190 self._usages = 1
192 self._usages = 1
191 self._report = report
193 self._report = report
192 # a vfs to the store content
194 # a vfs to the store content
193 self._opener = opener
195 self._opener = opener
194 # a map to access file in various {location -> vfs}
196 # a map to access file in various {location -> vfs}
195 vfsmap = vfsmap.copy()
197 vfsmap = vfsmap.copy()
196 vfsmap[b''] = opener # set default value
198 vfsmap[b''] = opener # set default value
197 self._vfsmap = vfsmap
199 self._vfsmap = vfsmap
198 self._after = after
200 self._after = after
199 self._offsetmap = {}
201 self._offsetmap = {}
200 self._newfiles = set()
202 self._newfiles = set()
201 self._journal = journalname
203 self._journal = journalname
202 self._journal_files = []
204 self._journal_files = []
203 self._undoname = undoname
205 self._undoname = undoname
204 self._queue = []
206 self._queue = []
205 # A callback to do something just after releasing transaction.
207 # A callback to do something just after releasing transaction.
206 if releasefn is None:
208 if releasefn is None:
207 releasefn = lambda tr, success: None
209 releasefn = lambda tr, success: None
208 self._releasefn = releasefn
210 self._releasefn = releasefn
209
211
210 self._checkambigfiles = set()
212 self._checkambigfiles = set()
211 if checkambigfiles:
213 if checkambigfiles:
212 self._checkambigfiles.update(checkambigfiles)
214 self._checkambigfiles.update(checkambigfiles)
213
215
214 self._names = [name]
216 self._names = [name]
215
217
216 # A dict dedicated to precisely tracking the changes introduced in the
218 # A dict dedicated to precisely tracking the changes introduced in the
217 # transaction.
219 # transaction.
218 self.changes = {}
220 self.changes = {}
219
221
220 # a dict of arguments to be passed to hooks
222 # a dict of arguments to be passed to hooks
221 self.hookargs = {}
223 self.hookargs = {}
222 self._file = opener.open(self._journal, b"w+")
224 self._file = opener.open(self._journal, b"w+")
223
225
224 # a list of ('location', 'path', 'backuppath', cache) entries.
226 # a list of ('location', 'path', 'backuppath', cache) entries.
225 # - if 'backuppath' is empty, no file existed at backup time
227 # - if 'backuppath' is empty, no file existed at backup time
226 # - if 'path' is empty, this is a temporary transaction file
228 # - if 'path' is empty, this is a temporary transaction file
227 # - if 'location' is not empty, the path is outside main opener reach.
229 # - if 'location' is not empty, the path is outside main opener reach.
228 # use 'location' value as a key in a vfsmap to find the right 'vfs'
230 # use 'location' value as a key in a vfsmap to find the right 'vfs'
229 # (cache is currently unused)
231 # (cache is currently unused)
230 self._backupentries = []
232 self._backupentries = []
231 self._backupmap = {}
233 self._backupmap = {}
232 self._backupjournal = b"%s.backupfiles" % self._journal
234 self._backupjournal = b"%s.backupfiles" % self._journal
233 self._backupsfile = opener.open(self._backupjournal, b'w')
235 self._backupsfile = opener.open(self._backupjournal, b'w')
234 self._backupsfile.write(b'%d\n' % version)
236 self._backupsfile.write(b'%d\n' % version)
235
237
236 if createmode is not None:
238 if createmode is not None:
237 opener.chmod(self._journal, createmode & 0o666)
239 opener.chmod(self._journal, createmode & 0o666)
238 opener.chmod(self._backupjournal, createmode & 0o666)
240 opener.chmod(self._backupjournal, createmode & 0o666)
239
241
240 # hold file generations to be performed on commit
242 # hold file generations to be performed on commit
241 self._filegenerators = {}
243 self._filegenerators = {}
242 # hold callback to write pending data for hooks
244 # hold callback to write pending data for hooks
243 self._pendingcallback = {}
245 self._pendingcallback = {}
244 # True is any pending data have been written ever
246 # True is any pending data have been written ever
245 self._anypending = False
247 self._anypending = False
246 # holds callback to call when writing the transaction
248 # holds callback to call when writing the transaction
247 self._finalizecallback = {}
249 self._finalizecallback = {}
248 # holds callback to call when validating the transaction
250 # holds callback to call when validating the transaction
249 # should raise exception if anything is wrong
251 # should raise exception if anything is wrong
250 self._validatecallback = {}
252 self._validatecallback = {}
251 if validator is not None:
253 if validator is not None:
252 self._validatecallback[b'001-userhooks'] = validator
254 self._validatecallback[b'001-userhooks'] = validator
253 # hold callback for post transaction close
255 # hold callback for post transaction close
254 self._postclosecallback = {}
256 self._postclosecallback = {}
255 # holds callbacks to call during abort
257 # holds callbacks to call during abort
256 self._abortcallback = {}
258 self._abortcallback = {}
257
259
258 def __repr__(self):
260 def __repr__(self):
259 name = '/'.join(self._names)
261 name = '/'.join(self._names)
260 return '<transaction name=%s, count=%d, usages=%d>' % (
262 return '<transaction name=%s, count=%d, usages=%d>' % (
261 name,
263 name,
262 self._count,
264 self._count,
263 self._usages,
265 self._usages,
264 )
266 )
265
267
266 def __del__(self):
268 def __del__(self):
267 if self._journal:
269 if self._journal:
268 self._abort()
270 self._abort()
269
271
270 @property
272 @property
271 def finalized(self):
273 def finalized(self):
272 return self._finalizecallback is None
274 return self._finalizecallback is None
273
275
274 @active
276 @active
275 def startgroup(self):
277 def startgroup(self):
276 """delay registration of file entry
278 """delay registration of file entry
277
279
278 This is used by strip to delay vision of strip offset. The transaction
280 This is used by strip to delay vision of strip offset. The transaction
279 sees either none or all of the strip actions to be done."""
281 sees either none or all of the strip actions to be done."""
280 self._queue.append([])
282 self._queue.append([])
281
283
282 @active
284 @active
283 def endgroup(self):
285 def endgroup(self):
284 """apply delayed registration of file entry.
286 """apply delayed registration of file entry.
285
287
286 This is used by strip to delay vision of strip offset. The transaction
288 This is used by strip to delay vision of strip offset. The transaction
287 sees either none or all of the strip actions to be done."""
289 sees either none or all of the strip actions to be done."""
288 q = self._queue.pop()
290 q = self._queue.pop()
289 for f, o in q:
291 for f, o in q:
290 self._addentry(f, o)
292 self._addentry(f, o)
291
293
292 @active
294 @active
293 def add(self, file, offset):
295 def add(self, file, offset):
294 """record the state of an append-only file before update"""
296 """record the state of an append-only file before update"""
295 if (
297 if (
296 file in self._newfiles
298 file in self._newfiles
297 or file in self._offsetmap
299 or file in self._offsetmap
298 or file in self._backupmap
300 or file in self._backupmap
299 ):
301 ):
300 return
302 return
301 if self._queue:
303 if self._queue:
302 self._queue[-1].append((file, offset))
304 self._queue[-1].append((file, offset))
303 return
305 return
304
306
305 self._addentry(file, offset)
307 self._addentry(file, offset)
306
308
307 def _addentry(self, file, offset):
309 def _addentry(self, file, offset):
308 """add a append-only entry to memory and on-disk state"""
310 """add a append-only entry to memory and on-disk state"""
309 if (
311 if (
310 file in self._newfiles
312 file in self._newfiles
311 or file in self._offsetmap
313 or file in self._offsetmap
312 or file in self._backupmap
314 or file in self._backupmap
313 ):
315 ):
314 return
316 return
315 if offset:
317 if offset:
316 self._offsetmap[file] = offset
318 self._offsetmap[file] = offset
317 else:
319 else:
318 self._newfiles.add(file)
320 self._newfiles.add(file)
319 # add enough data to the journal to do the truncate
321 # add enough data to the journal to do the truncate
320 self._file.write(b"%s\0%d\n" % (file, offset))
322 self._file.write(b"%s\0%d\n" % (file, offset))
321 self._file.flush()
323 self._file.flush()
322
324
323 @active
325 @active
324 def addbackup(self, file, hardlink=True, location=b''):
326 def addbackup(self, file, hardlink=True, location=b''):
325 """Adds a backup of the file to the transaction
327 """Adds a backup of the file to the transaction
326
328
327 Calling addbackup() creates a hardlink backup of the specified file
329 Calling addbackup() creates a hardlink backup of the specified file
328 that is used to recover the file in the event of the transaction
330 that is used to recover the file in the event of the transaction
329 aborting.
331 aborting.
330
332
331 * `file`: the file path, relative to .hg/store
333 * `file`: the file path, relative to .hg/store
332 * `hardlink`: use a hardlink to quickly create the backup
334 * `hardlink`: use a hardlink to quickly create the backup
333 """
335 """
334 if self._queue:
336 if self._queue:
335 msg = b'cannot use transaction.addbackup inside "group"'
337 msg = b'cannot use transaction.addbackup inside "group"'
336 raise error.ProgrammingError(msg)
338 raise error.ProgrammingError(msg)
337
339
338 if (
340 if (
339 file in self._newfiles
341 file in self._newfiles
340 or file in self._offsetmap
342 or file in self._offsetmap
341 or file in self._backupmap
343 or file in self._backupmap
342 ):
344 ):
343 return
345 return
344 vfs = self._vfsmap[location]
346 vfs = self._vfsmap[location]
345 dirname, filename = vfs.split(file)
347 dirname, filename = vfs.split(file)
346 backupfilename = b"%s.backup.%s" % (self._journal, filename)
348 backupfilename = b"%s.backup.%s" % (self._journal, filename)
347 backupfile = vfs.reljoin(dirname, backupfilename)
349 backupfile = vfs.reljoin(dirname, backupfilename)
348 if vfs.exists(file):
350 if vfs.exists(file):
349 filepath = vfs.join(file)
351 filepath = vfs.join(file)
350 backuppath = vfs.join(backupfile)
352 backuppath = vfs.join(backupfile)
351 util.copyfile(filepath, backuppath, hardlink=hardlink)
353 util.copyfile(filepath, backuppath, hardlink=hardlink)
352 else:
354 else:
353 backupfile = b''
355 backupfile = b''
354
356
355 self._addbackupentry((location, file, backupfile, False))
357 self._addbackupentry((location, file, backupfile, False))
356
358
357 def _addbackupentry(self, entry):
359 def _addbackupentry(self, entry):
358 """register a new backup entry and write it to disk"""
360 """register a new backup entry and write it to disk"""
359 self._backupentries.append(entry)
361 self._backupentries.append(entry)
360 self._backupmap[entry[1]] = len(self._backupentries) - 1
362 self._backupmap[entry[1]] = len(self._backupentries) - 1
361 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
363 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
362 self._backupsfile.flush()
364 self._backupsfile.flush()
363
365
364 @active
366 @active
365 def registertmp(self, tmpfile, location=b''):
367 def registertmp(self, tmpfile, location=b''):
366 """register a temporary transaction file
368 """register a temporary transaction file
367
369
368 Such files will be deleted when the transaction exits (on both
370 Such files will be deleted when the transaction exits (on both
369 failure and success).
371 failure and success).
370 """
372 """
371 self._addbackupentry((location, b'', tmpfile, False))
373 self._addbackupentry((location, b'', tmpfile, False))
372
374
373 @active
375 @active
374 def addfilegenerator(
376 def addfilegenerator(
375 self,
377 self,
376 genid,
378 genid,
377 filenames,
379 filenames,
378 genfunc,
380 genfunc,
379 order=0,
381 order=0,
380 location=b'',
382 location=b'',
381 post_finalize=False,
383 post_finalize=False,
382 ):
384 ):
383 """add a function to generates some files at transaction commit
385 """add a function to generates some files at transaction commit
384
386
385 The `genfunc` argument is a function capable of generating proper
387 The `genfunc` argument is a function capable of generating proper
386 content of each entry in the `filename` tuple.
388 content of each entry in the `filename` tuple.
387
389
388 At transaction close time, `genfunc` will be called with one file
390 At transaction close time, `genfunc` will be called with one file
389 object argument per entries in `filenames`.
391 object argument per entries in `filenames`.
390
392
391 The transaction itself is responsible for the backup, creation and
393 The transaction itself is responsible for the backup, creation and
392 final write of such file.
394 final write of such file.
393
395
394 The `genid` argument is used to ensure the same set of file is only
396 The `genid` argument is used to ensure the same set of file is only
395 generated once. Call to `addfilegenerator` for a `genid` already
397 generated once. Call to `addfilegenerator` for a `genid` already
396 present will overwrite the old entry.
398 present will overwrite the old entry.
397
399
398 The `order` argument may be used to control the order in which multiple
400 The `order` argument may be used to control the order in which multiple
399 generator will be executed.
401 generator will be executed.
400
402
401 The `location` arguments may be used to indicate the files are located
403 The `location` arguments may be used to indicate the files are located
402 outside of the the standard directory for transaction. It should match
404 outside of the the standard directory for transaction. It should match
403 one of the key of the `transaction.vfsmap` dictionary.
405 one of the key of the `transaction.vfsmap` dictionary.
404
406
405 The `post_finalize` argument can be set to `True` for file generation
407 The `post_finalize` argument can be set to `True` for file generation
406 that must be run after the transaction has been finalized.
408 that must be run after the transaction has been finalized.
407 """
409 """
408 # For now, we are unable to do proper backup and restore of custom vfs
410 # For now, we are unable to do proper backup and restore of custom vfs
409 # but for bookmarks that are handled outside this mechanism.
411 # but for bookmarks that are handled outside this mechanism.
410 entry = (order, filenames, genfunc, location, post_finalize)
412 entry = (order, filenames, genfunc, location, post_finalize)
411 self._filegenerators[genid] = entry
413 self._filegenerators[genid] = entry
412
414
413 @active
415 @active
414 def removefilegenerator(self, genid):
416 def removefilegenerator(self, genid):
415 """reverse of addfilegenerator, remove a file generator function"""
417 """reverse of addfilegenerator, remove a file generator function"""
416 if genid in self._filegenerators:
418 if genid in self._filegenerators:
417 del self._filegenerators[genid]
419 del self._filegenerators[genid]
418
420
419 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
421 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
420 # write files registered for generation
422 # write files registered for generation
421 any = False
423 any = False
422
424
423 if group == GEN_GROUP_ALL:
425 if group == GEN_GROUP_ALL:
424 skip_post = skip_pre = False
426 skip_post = skip_pre = False
425 else:
427 else:
426 skip_pre = group == GEN_GROUP_POST_FINALIZE
428 skip_pre = group == GEN_GROUP_POST_FINALIZE
427 skip_post = group == GEN_GROUP_PRE_FINALIZE
429 skip_post = group == GEN_GROUP_PRE_FINALIZE
428
430
429 for id, entry in sorted(self._filegenerators.items()):
431 for id, entry in sorted(self._filegenerators.items()):
430 any = True
432 any = True
431 order, filenames, genfunc, location, post_finalize = entry
433 order, filenames, genfunc, location, post_finalize = entry
432
434
433 # for generation at closing, check if it's before or after finalize
435 # for generation at closing, check if it's before or after finalize
434 if skip_post and post_finalize:
436 if skip_post and post_finalize:
435 continue
437 continue
436 elif skip_pre and not post_finalize:
438 elif skip_pre and not post_finalize:
437 continue
439 continue
438
440
439 vfs = self._vfsmap[location]
441 vfs = self._vfsmap[location]
440 files = []
442 files = []
441 try:
443 try:
442 for name in filenames:
444 for name in filenames:
443 name += suffix
445 name += suffix
444 if suffix:
446 if suffix:
445 self.registertmp(name, location=location)
447 self.registertmp(name, location=location)
446 checkambig = False
448 checkambig = False
447 else:
449 else:
448 self.addbackup(name, location=location)
450 self.addbackup(name, location=location)
449 checkambig = (name, location) in self._checkambigfiles
451 checkambig = (name, location) in self._checkambigfiles
450 files.append(
452 files.append(
451 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
453 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
452 )
454 )
453 genfunc(*files)
455 genfunc(*files)
454 for f in files:
456 for f in files:
455 f.close()
457 f.close()
456 # skip discard() loop since we're sure no open file remains
458 # skip discard() loop since we're sure no open file remains
457 del files[:]
459 del files[:]
458 finally:
460 finally:
459 for f in files:
461 for f in files:
460 f.discard()
462 f.discard()
461 return any
463 return any
462
464
463 @active
465 @active
464 def findoffset(self, file):
466 def findoffset(self, file):
465 if file in self._newfiles:
467 if file in self._newfiles:
466 return 0
468 return 0
467 return self._offsetmap.get(file)
469 return self._offsetmap.get(file)
468
470
469 @active
471 @active
470 def readjournal(self):
472 def readjournal(self):
471 self._file.seek(0)
473 self._file.seek(0)
472 entries = []
474 entries = []
473 for l in self._file.readlines():
475 for l in self._file.readlines():
474 file, troffset = l.split(b'\0')
476 file, troffset = l.split(b'\0')
475 entries.append((file, int(troffset)))
477 entries.append((file, int(troffset)))
476 return entries
478 return entries
477
479
478 @active
480 @active
479 def replace(self, file, offset):
481 def replace(self, file, offset):
480 """
482 """
481 replace can only replace already committed entries
483 replace can only replace already committed entries
482 that are not pending in the queue
484 that are not pending in the queue
483 """
485 """
484 if file in self._newfiles:
486 if file in self._newfiles:
485 if not offset:
487 if not offset:
486 return
488 return
487 self._newfiles.remove(file)
489 self._newfiles.remove(file)
488 self._offsetmap[file] = offset
490 self._offsetmap[file] = offset
489 elif file in self._offsetmap:
491 elif file in self._offsetmap:
490 if not offset:
492 if not offset:
491 del self._offsetmap[file]
493 del self._offsetmap[file]
492 self._newfiles.add(file)
494 self._newfiles.add(file)
493 else:
495 else:
494 self._offsetmap[file] = offset
496 self._offsetmap[file] = offset
495 else:
497 else:
496 raise KeyError(file)
498 raise KeyError(file)
497 self._file.write(b"%s\0%d\n" % (file, offset))
499 self._file.write(b"%s\0%d\n" % (file, offset))
498 self._file.flush()
500 self._file.flush()
499
501
500 @active
502 @active
501 def nest(self, name='<unnamed>'):
503 def nest(self, name='<unnamed>'):
502 self._count += 1
504 self._count += 1
503 self._usages += 1
505 self._usages += 1
504 self._names.append(name)
506 self._names.append(name)
505 return self
507 return self
506
508
507 def release(self):
509 def release(self):
508 if self._count > 0:
510 if self._count > 0:
509 self._usages -= 1
511 self._usages -= 1
510 if self._names:
512 if self._names:
511 self._names.pop()
513 self._names.pop()
512 # if the transaction scopes are left without being closed, fail
514 # if the transaction scopes are left without being closed, fail
513 if self._count > 0 and self._usages == 0:
515 if self._count > 0 and self._usages == 0:
514 self._abort()
516 self._abort()
515
517
516 def running(self):
518 def running(self):
517 return self._count > 0
519 return self._count > 0
518
520
519 def addpending(self, category, callback):
521 def addpending(self, category, callback):
520 """add a callback to be called when the transaction is pending
522 """add a callback to be called when the transaction is pending
521
523
522 The transaction will be given as callback's first argument.
524 The transaction will be given as callback's first argument.
523
525
524 Category is a unique identifier to allow overwriting an old callback
526 Category is a unique identifier to allow overwriting an old callback
525 with a newer callback.
527 with a newer callback.
526 """
528 """
527 self._pendingcallback[category] = callback
529 self._pendingcallback[category] = callback
528
530
529 @active
531 @active
530 def writepending(self):
532 def writepending(self):
531 """write pending file to temporary version
533 """write pending file to temporary version
532
534
533 This is used to allow hooks to view a transaction before commit"""
535 This is used to allow hooks to view a transaction before commit"""
534 categories = sorted(self._pendingcallback)
536 categories = sorted(self._pendingcallback)
535 for cat in categories:
537 for cat in categories:
536 # remove callback since the data will have been flushed
538 # remove callback since the data will have been flushed
537 any = self._pendingcallback.pop(cat)(self)
539 any = self._pendingcallback.pop(cat)(self)
538 self._anypending = self._anypending or any
540 self._anypending = self._anypending or any
539 self._anypending |= self._generatefiles(suffix=b'.pending')
541 self._anypending |= self._generatefiles(suffix=b'.pending')
540 return self._anypending
542 return self._anypending
541
543
542 @active
544 @active
543 def hasfinalize(self, category):
545 def hasfinalize(self, category):
544 """check is a callback already exist for a category"""
546 """check is a callback already exist for a category"""
545 return category in self._finalizecallback
547 return category in self._finalizecallback
546
548
547 @active
549 @active
548 def addfinalize(self, category, callback):
550 def addfinalize(self, category, callback):
549 """add a callback to be called when the transaction is closed
551 """add a callback to be called when the transaction is closed
550
552
551 The transaction will be given as callback's first argument.
553 The transaction will be given as callback's first argument.
552
554
553 Category is a unique identifier to allow overwriting old callbacks with
555 Category is a unique identifier to allow overwriting old callbacks with
554 newer callbacks.
556 newer callbacks.
555 """
557 """
556 self._finalizecallback[category] = callback
558 self._finalizecallback[category] = callback
557
559
558 @active
560 @active
559 def addpostclose(self, category, callback):
561 def addpostclose(self, category, callback):
560 """add or replace a callback to be called after the transaction closed
562 """add or replace a callback to be called after the transaction closed
561
563
562 The transaction will be given as callback's first argument.
564 The transaction will be given as callback's first argument.
563
565
564 Category is a unique identifier to allow overwriting an old callback
566 Category is a unique identifier to allow overwriting an old callback
565 with a newer callback.
567 with a newer callback.
566 """
568 """
567 self._postclosecallback[category] = callback
569 self._postclosecallback[category] = callback
568
570
569 @active
571 @active
570 def getpostclose(self, category):
572 def getpostclose(self, category):
571 """return a postclose callback added before, or None"""
573 """return a postclose callback added before, or None"""
572 return self._postclosecallback.get(category, None)
574 return self._postclosecallback.get(category, None)
573
575
574 @active
576 @active
575 def addabort(self, category, callback):
577 def addabort(self, category, callback):
576 """add a callback to be called when the transaction is aborted.
578 """add a callback to be called when the transaction is aborted.
577
579
578 The transaction will be given as the first argument to the callback.
580 The transaction will be given as the first argument to the callback.
579
581
580 Category is a unique identifier to allow overwriting an old callback
582 Category is a unique identifier to allow overwriting an old callback
581 with a newer callback.
583 with a newer callback.
582 """
584 """
583 self._abortcallback[category] = callback
585 self._abortcallback[category] = callback
584
586
585 @active
587 @active
586 def addvalidator(self, category, callback):
588 def addvalidator(self, category, callback):
587 """adds a callback to be called when validating the transaction.
589 """adds a callback to be called when validating the transaction.
588
590
589 The transaction will be given as the first argument to the callback.
591 The transaction will be given as the first argument to the callback.
590
592
591 callback should raise exception if to abort transaction"""
593 callback should raise exception if to abort transaction"""
592 self._validatecallback[category] = callback
594 self._validatecallback[category] = callback
593
595
594 @active
596 @active
595 def close(self):
597 def close(self):
596 '''commit the transaction'''
598 '''commit the transaction'''
597 if self._count == 1:
599 if self._count == 1:
598 for category in sorted(self._validatecallback):
600 for category in sorted(self._validatecallback):
599 self._validatecallback[category](self)
601 self._validatecallback[category](self)
600 self._validatecallback = None # Help prevent cycles.
602 self._validatecallback = None # Help prevent cycles.
601 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
603 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
602 while self._finalizecallback:
604 while self._finalizecallback:
603 callbacks = self._finalizecallback
605 callbacks = self._finalizecallback
604 self._finalizecallback = {}
606 self._finalizecallback = {}
605 categories = sorted(callbacks)
607 categories = sorted(callbacks)
606 for cat in categories:
608 for cat in categories:
607 callbacks[cat](self)
609 callbacks[cat](self)
608 # Prevent double usage and help clear cycles.
610 # Prevent double usage and help clear cycles.
609 self._finalizecallback = None
611 self._finalizecallback = None
610 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
612 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
611
613
612 self._count -= 1
614 self._count -= 1
613 if self._count != 0:
615 if self._count != 0:
614 return
616 return
615 self._file.close()
617 self._file.close()
616 self._backupsfile.close()
618 self._backupsfile.close()
617 # cleanup temporary files
619 # cleanup temporary files
618 for l, f, b, c in self._backupentries:
620 for l, f, b, c in self._backupentries:
619 if l not in self._vfsmap and c:
621 if l not in self._vfsmap and c:
620 self._report(
622 self._report(
621 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
623 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
622 )
624 )
623 continue
625 continue
624 vfs = self._vfsmap[l]
626 vfs = self._vfsmap[l]
625 if not f and b and vfs.exists(b):
627 if not f and b and vfs.exists(b):
626 try:
628 try:
627 vfs.unlink(b)
629 vfs.unlink(b)
628 except (IOError, OSError, error.Abort) as inst:
630 except (IOError, OSError, error.Abort) as inst:
629 if not c:
631 if not c:
630 raise
632 raise
631 # Abort may be raise by read only opener
633 # Abort may be raise by read only opener
632 self._report(
634 self._report(
633 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
635 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
634 )
636 )
635 self._offsetmap = {}
637 self._offsetmap = {}
636 self._newfiles = set()
638 self._newfiles = set()
637 self._writeundo()
639 self._writeundo()
638 if self._after:
640 if self._after:
639 self._after()
641 self._after()
640 self._after = None # Help prevent cycles.
642 self._after = None # Help prevent cycles.
641 if self._opener.isfile(self._backupjournal):
643 if self._opener.isfile(self._backupjournal):
642 self._opener.unlink(self._backupjournal)
644 self._opener.unlink(self._backupjournal)
643 if self._opener.isfile(self._journal):
645 if self._opener.isfile(self._journal):
644 self._opener.unlink(self._journal)
646 self._opener.unlink(self._journal)
645 for l, _f, b, c in self._backupentries:
647 for l, _f, b, c in self._backupentries:
646 if l not in self._vfsmap and c:
648 if l not in self._vfsmap and c:
647 self._report(
649 self._report(
648 b"couldn't remove %s: unknown cache location"
650 b"couldn't remove %s: unknown cache location"
649 b"%s\n" % (b, l)
651 b"%s\n" % (b, l)
650 )
652 )
651 continue
653 continue
652 vfs = self._vfsmap[l]
654 vfs = self._vfsmap[l]
653 if b and vfs.exists(b):
655 if b and vfs.exists(b):
654 try:
656 try:
655 vfs.unlink(b)
657 vfs.unlink(b)
656 except (IOError, OSError, error.Abort) as inst:
658 except (IOError, OSError, error.Abort) as inst:
657 if not c:
659 if not c:
658 raise
660 raise
659 # Abort may be raise by read only opener
661 # Abort may be raise by read only opener
660 self._report(
662 self._report(
661 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
663 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
662 )
664 )
663 self._backupentries = []
665 self._backupentries = []
664 self._journal = None
666 self._journal = None
665
667
666 self._releasefn(self, True) # notify success of closing transaction
668 self._releasefn(self, True) # notify success of closing transaction
667 self._releasefn = None # Help prevent cycles.
669 self._releasefn = None # Help prevent cycles.
668
670
669 # run post close action
671 # run post close action
670 categories = sorted(self._postclosecallback)
672 categories = sorted(self._postclosecallback)
671 for cat in categories:
673 for cat in categories:
672 self._postclosecallback[cat](self)
674 self._postclosecallback[cat](self)
673 # Prevent double usage and help clear cycles.
675 # Prevent double usage and help clear cycles.
674 self._postclosecallback = None
676 self._postclosecallback = None
675
677
676 @active
678 @active
677 def abort(self):
679 def abort(self):
678 """abort the transaction (generally called on error, or when the
680 """abort the transaction (generally called on error, or when the
679 transaction is not explicitly committed before going out of
681 transaction is not explicitly committed before going out of
680 scope)"""
682 scope)"""
681 self._abort()
683 self._abort()
682
684
683 @active
685 @active
684 def add_journal(self, vfs_id, path):
686 def add_journal(self, vfs_id, path):
685 self._journal_files.append((vfs_id, path))
687 self._journal_files.append((vfs_id, path))
686
688
687 def _writeundo(self):
689 def _writeundo(self):
688 """write transaction data for possible future undo call"""
690 """write transaction data for possible future undo call"""
689 if self._undoname is None:
691 if self._undoname is None:
690 return
692 return
691
693
692 def undoname(fn: bytes) -> bytes:
694 def undoname(fn: bytes) -> bytes:
693 base, name = os.path.split(fn)
695 base, name = os.path.split(fn)
694 assert name.startswith(self._journal)
696 assert name.startswith(self._journal)
695 new_name = name.replace(self._journal, self._undoname, 1)
697 new_name = name.replace(self._journal, self._undoname, 1)
696 return os.path.join(base, new_name)
698 return os.path.join(base, new_name)
697
699
698 undo_backup_path = b"%s.backupfiles" % self._undoname
700 undo_backup_path = b"%s.backupfiles" % self._undoname
699 undobackupfile = self._opener.open(undo_backup_path, b'w')
701 undobackupfile = self._opener.open(undo_backup_path, b'w')
700 undobackupfile.write(b'%d\n' % version)
702 undobackupfile.write(b'%d\n' % version)
701 for l, f, b, c in self._backupentries:
703 for l, f, b, c in self._backupentries:
702 if not f: # temporary file
704 if not f: # temporary file
703 continue
705 continue
704 if not b:
706 if not b:
705 u = b''
707 u = b''
706 else:
708 else:
707 if l not in self._vfsmap and c:
709 if l not in self._vfsmap and c:
708 self._report(
710 self._report(
709 b"couldn't remove %s: unknown cache location"
711 b"couldn't remove %s: unknown cache location"
710 b"%s\n" % (b, l)
712 b"%s\n" % (b, l)
711 )
713 )
712 continue
714 continue
713 vfs = self._vfsmap[l]
715 vfs = self._vfsmap[l]
714 u = undoname(b)
716 u = undoname(b)
715 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
717 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
716 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
718 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
717 undobackupfile.close()
719 undobackupfile.close()
718 for vfs, src in self._journal_files:
720 for vfs, src in self._journal_files:
719 dest = undoname(src)
721 dest = undoname(src)
720 # if src and dest refer to a same file, vfs.rename is a no-op,
722 # if src and dest refer to a same file, vfs.rename is a no-op,
721 # leaving both src and dest on disk. delete dest to make sure
723 # leaving both src and dest on disk. delete dest to make sure
722 # the rename couldn't be such a no-op.
724 # the rename couldn't be such a no-op.
723 vfs.tryunlink(dest)
725 vfs.tryunlink(dest)
724 try:
726 try:
725 vfs.rename(src, dest)
727 vfs.rename(src, dest)
726 except FileNotFoundError: # journal file does not yet exist
728 except FileNotFoundError: # journal file does not yet exist
727 pass
729 pass
728
730
729 def _abort(self):
731 def _abort(self):
730 entries = self.readjournal()
732 entries = self.readjournal()
731 self._count = 0
733 self._count = 0
732 self._usages = 0
734 self._usages = 0
733 self._file.close()
735 self._file.close()
734 self._backupsfile.close()
736 self._backupsfile.close()
735
737
736 quick = self._can_quick_abort(entries)
738 quick = self._can_quick_abort(entries)
737 try:
739 try:
738 if not quick:
740 if not quick:
739 self._report(_(b"transaction abort!\n"))
741 self._report(_(b"transaction abort!\n"))
740 for cat in sorted(self._abortcallback):
742 for cat in sorted(self._abortcallback):
741 self._abortcallback[cat](self)
743 self._abortcallback[cat](self)
742 # Prevent double usage and help clear cycles.
744 # Prevent double usage and help clear cycles.
743 self._abortcallback = None
745 self._abortcallback = None
744 if quick:
746 if quick:
745 self._do_quick_abort(entries)
747 self._do_quick_abort(entries)
746 else:
748 else:
747 self._do_full_abort(entries)
749 self._do_full_abort(entries)
748 finally:
750 finally:
749 self._journal = None
751 self._journal = None
750 self._releasefn(self, False) # notify failure of transaction
752 self._releasefn(self, False) # notify failure of transaction
751 self._releasefn = None # Help prevent cycles.
753 self._releasefn = None # Help prevent cycles.
752
754
753 def _can_quick_abort(self, entries):
755 def _can_quick_abort(self, entries):
754 """False if any semantic content have been written on disk
756 """False if any semantic content have been written on disk
755
757
756 True if nothing, except temporary files has been writen on disk."""
758 True if nothing, except temporary files has been writen on disk."""
757 if entries:
759 if entries:
758 return False
760 return False
759 for e in self._backupentries:
761 for e in self._backupentries:
760 if e[1]:
762 if e[1]:
761 return False
763 return False
762 return True
764 return True
763
765
764 def _do_quick_abort(self, entries):
766 def _do_quick_abort(self, entries):
765 """(Silently) do a quick cleanup (see _can_quick_abort)"""
767 """(Silently) do a quick cleanup (see _can_quick_abort)"""
766 assert self._can_quick_abort(entries)
768 assert self._can_quick_abort(entries)
767 tmp_files = [e for e in self._backupentries if not e[1]]
769 tmp_files = [e for e in self._backupentries if not e[1]]
768 for vfs_id, old_path, tmp_path, xxx in tmp_files:
770 for vfs_id, old_path, tmp_path, xxx in tmp_files:
769 vfs = self._vfsmap[vfs_id]
771 vfs = self._vfsmap[vfs_id]
770 try:
772 try:
771 vfs.unlink(tmp_path)
773 vfs.unlink(tmp_path)
772 except FileNotFoundError:
774 except FileNotFoundError:
773 pass
775 pass
774 if self._backupjournal:
776 if self._backupjournal:
775 self._opener.unlink(self._backupjournal)
777 self._opener.unlink(self._backupjournal)
776 if self._journal:
778 if self._journal:
777 self._opener.unlink(self._journal)
779 self._opener.unlink(self._journal)
778
780
779 def _do_full_abort(self, entries):
781 def _do_full_abort(self, entries):
780 """(Noisily) rollback all the change introduced by the transaction"""
782 """(Noisily) rollback all the change introduced by the transaction"""
781 try:
783 try:
782 _playback(
784 _playback(
783 self._journal,
785 self._journal,
784 self._report,
786 self._report,
785 self._opener,
787 self._opener,
786 self._vfsmap,
788 self._vfsmap,
787 entries,
789 entries,
788 self._backupentries,
790 self._backupentries,
789 False,
791 False,
790 checkambigfiles=self._checkambigfiles,
792 checkambigfiles=self._checkambigfiles,
791 )
793 )
792 self._report(_(b"rollback completed\n"))
794 self._report(_(b"rollback completed\n"))
793 except BaseException as exc:
795 except BaseException as exc:
794 self._report(_(b"rollback failed - please run hg recover\n"))
796 self._report(_(b"rollback failed - please run hg recover\n"))
795 self._report(
797 self._report(
796 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
798 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
797 )
799 )
798
800
799
801
800 BAD_VERSION_MSG = _(
802 BAD_VERSION_MSG = _(
801 b"journal was created by a different version of Mercurial\n"
803 b"journal was created by a different version of Mercurial\n"
802 )
804 )
803
805
804
806
805 def read_backup_files(report, fp):
807 def read_backup_files(report, fp):
806 """parse an (already open) backup file an return contained backup entries
808 """parse an (already open) backup file an return contained backup entries
807
809
808 entries are in the form: (location, file, backupfile, xxx)
810 entries are in the form: (location, file, backupfile, xxx)
809
811
810 :location: the vfs identifier (vfsmap's key)
812 :location: the vfs identifier (vfsmap's key)
811 :file: original file path (in the vfs)
813 :file: original file path (in the vfs)
812 :backupfile: path of the backup (in the vfs)
814 :backupfile: path of the backup (in the vfs)
813 :cache: a boolean currently always set to False
815 :cache: a boolean currently always set to False
814 """
816 """
815 lines = fp.readlines()
817 lines = fp.readlines()
816 backupentries = []
818 backupentries = []
817 if lines:
819 if lines:
818 ver = lines[0][:-1]
820 ver = lines[0][:-1]
819 if ver != (b'%d' % version):
821 if ver != (b'%d' % version):
820 report(BAD_VERSION_MSG)
822 report(BAD_VERSION_MSG)
821 else:
823 else:
822 for line in lines[1:]:
824 for line in lines[1:]:
823 if line:
825 if line:
824 # Shave off the trailing newline
826 # Shave off the trailing newline
825 line = line[:-1]
827 line = line[:-1]
826 l, f, b, c = line.split(b'\0')
828 l, f, b, c = line.split(b'\0')
827 backupentries.append((l, f, b, bool(c)))
829 backupentries.append((l, f, b, bool(c)))
828 return backupentries
830 return backupentries
829
831
830
832
831 def rollback(
833 def rollback(
832 opener,
834 opener,
833 vfsmap,
835 vfsmap,
834 file,
836 file,
835 report,
837 report,
836 checkambigfiles=None,
838 checkambigfiles=None,
837 skip_journal_pattern=None,
839 skip_journal_pattern=None,
838 ):
840 ):
839 """Rolls back the transaction contained in the given file
841 """Rolls back the transaction contained in the given file
840
842
841 Reads the entries in the specified file, and the corresponding
843 Reads the entries in the specified file, and the corresponding
842 '*.backupfiles' file, to recover from an incomplete transaction.
844 '*.backupfiles' file, to recover from an incomplete transaction.
843
845
844 * `file`: a file containing a list of entries, specifying where
846 * `file`: a file containing a list of entries, specifying where
845 to truncate each file. The file should contain a list of
847 to truncate each file. The file should contain a list of
846 file\0offset pairs, delimited by newlines. The corresponding
848 file\0offset pairs, delimited by newlines. The corresponding
847 '*.backupfiles' file should contain a list of file\0backupfile
849 '*.backupfiles' file should contain a list of file\0backupfile
848 pairs, delimited by \0.
850 pairs, delimited by \0.
849
851
850 `checkambigfiles` is a set of (path, vfs-location) tuples,
852 `checkambigfiles` is a set of (path, vfs-location) tuples,
851 which determine whether file stat ambiguity should be avoided at
853 which determine whether file stat ambiguity should be avoided at
852 restoring corresponded files.
854 restoring corresponded files.
853 """
855 """
854 entries = []
856 entries = []
855 backupentries = []
857 backupentries = []
856
858
857 with opener.open(file) as fp:
859 with opener.open(file) as fp:
858 lines = fp.readlines()
860 lines = fp.readlines()
859 for l in lines:
861 for l in lines:
860 try:
862 try:
861 f, o = l.split(b'\0')
863 f, o = l.split(b'\0')
862 entries.append((f, int(o)))
864 entries.append((f, int(o)))
863 except ValueError:
865 except ValueError:
864 report(
866 report(
865 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
867 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
866 )
868 )
867
869
868 backupjournal = b"%s.backupfiles" % file
870 backupjournal = b"%s.backupfiles" % file
869 if opener.exists(backupjournal):
871 if opener.exists(backupjournal):
870 with opener.open(backupjournal) as fp:
872 with opener.open(backupjournal) as fp:
871 backupentries = read_backup_files(report, fp)
873 backupentries = read_backup_files(report, fp)
872 if skip_journal_pattern is not None:
874 if skip_journal_pattern is not None:
873 keep = lambda x: not skip_journal_pattern.match(x[1])
875 keep = lambda x: not skip_journal_pattern.match(x[1])
874 backupentries = [x for x in backupentries if keep(x)]
876 backupentries = [x for x in backupentries if keep(x)]
875
877
876 _playback(
878 _playback(
877 file,
879 file,
878 report,
880 report,
879 opener,
881 opener,
880 vfsmap,
882 vfsmap,
881 entries,
883 entries,
882 backupentries,
884 backupentries,
883 checkambigfiles=checkambigfiles,
885 checkambigfiles=checkambigfiles,
884 )
886 )
General Comments 0
You need to be logged in to leave comments. Login now