##// END OF EJS Templates
backup: fix issue when the backup end up in a different directory...
marmoute -
r51348:a445194f stable
parent child Browse files
Show More
@@ -1,955 +1,960 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 """rollback a transaction :
108 """rollback a transaction :
109 - truncate files that have been appended to
109 - truncate files that have been appended to
110 - restore file backups
110 - restore file backups
111 - delete temporary files
111 - delete temporary files
112 """
112 """
113 backupfiles = []
113 backupfiles = []
114
114
115 def restore_one_backup(vfs, f, b, checkambig):
115 def restore_one_backup(vfs, f, b, checkambig):
116 filepath = vfs.join(f)
116 filepath = vfs.join(f)
117 backuppath = vfs.join(b)
117 backuppath = vfs.join(b)
118 try:
118 try:
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 backupfiles.append((vfs, b))
120 backupfiles.append((vfs, b))
121 except IOError as exc:
121 except IOError as exc:
122 e_msg = stringutil.forcebytestr(exc)
122 e_msg = stringutil.forcebytestr(exc)
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 raise
124 raise
125
125
126 # gather all backup files that impact the store
126 # gather all backup files that impact the store
127 # (we need this to detect files that are both backed up and truncated)
127 # (we need this to detect files that are both backed up and truncated)
128 store_backup = {}
128 store_backup = {}
129 for entry in backupentries:
129 for entry in backupentries:
130 location, file_path, backup_path, cache = entry
130 location, file_path, backup_path, cache = entry
131 vfs = vfsmap[location]
131 vfs = vfsmap[location]
132 is_store = vfs.join(b'') == opener.join(b'')
132 is_store = vfs.join(b'') == opener.join(b'')
133 if is_store and file_path and backup_path:
133 if is_store and file_path and backup_path:
134 store_backup[file_path] = entry
134 store_backup[file_path] = entry
135 copy_done = set()
135 copy_done = set()
136
136
137 # truncate all file `f` to offset `o`
137 # truncate all file `f` to offset `o`
138 for f, o in sorted(dict(entries).items()):
138 for f, o in sorted(dict(entries).items()):
139 # if we have a backup for `f`, we should restore it first and truncate
139 # if we have a backup for `f`, we should restore it first and truncate
140 # the restored file
140 # the restored file
141 bck_entry = store_backup.get(f)
141 bck_entry = store_backup.get(f)
142 if bck_entry is not None:
142 if bck_entry is not None:
143 location, file_path, backup_path, cache = bck_entry
143 location, file_path, backup_path, cache = bck_entry
144 checkambig = False
144 checkambig = False
145 if checkambigfiles:
145 if checkambigfiles:
146 checkambig = (file_path, location) in checkambigfiles
146 checkambig = (file_path, location) in checkambigfiles
147 restore_one_backup(opener, file_path, backup_path, checkambig)
147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 copy_done.add(bck_entry)
148 copy_done.add(bck_entry)
149 # truncate the file to its pre-transaction size
149 # truncate the file to its pre-transaction size
150 if o or not unlink:
150 if o or not unlink:
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 try:
152 try:
153 fp = opener(f, b'a', checkambig=checkambig)
153 fp = opener(f, b'a', checkambig=checkambig)
154 if fp.tell() < o:
154 if fp.tell() < o:
155 raise error.Abort(
155 raise error.Abort(
156 _(
156 _(
157 b"attempted to truncate %s to %d bytes, but it was "
157 b"attempted to truncate %s to %d bytes, but it was "
158 b"already %d bytes\n"
158 b"already %d bytes\n"
159 )
159 )
160 % (f, o, fp.tell())
160 % (f, o, fp.tell())
161 )
161 )
162 fp.truncate(o)
162 fp.truncate(o)
163 fp.close()
163 fp.close()
164 except IOError:
164 except IOError:
165 report(_(b"failed to truncate %s\n") % f)
165 report(_(b"failed to truncate %s\n") % f)
166 raise
166 raise
167 else:
167 else:
168 # delete empty file
168 # delete empty file
169 try:
169 try:
170 opener.unlink(f)
170 opener.unlink(f)
171 except FileNotFoundError:
171 except FileNotFoundError:
172 pass
172 pass
173 # restore backed up files and clean up temporary files
173 # restore backed up files and clean up temporary files
174 for entry in backupentries:
174 for entry in backupentries:
175 if entry in copy_done:
175 if entry in copy_done:
176 continue
176 continue
177 l, f, b, c = entry
177 l, f, b, c = entry
178 if l not in vfsmap and c:
178 if l not in vfsmap and c:
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 vfs = vfsmap[l]
180 vfs = vfsmap[l]
181 try:
181 try:
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 if f and b:
183 if f and b:
184 restore_one_backup(vfs, f, b, checkambig)
184 restore_one_backup(vfs, f, b, checkambig)
185 else:
185 else:
186 target = f or b
186 target = f or b
187 try:
187 try:
188 vfs.unlink(target)
188 vfs.unlink(target)
189 except FileNotFoundError:
189 except FileNotFoundError:
190 # This is fine because
190 # This is fine because
191 #
191 #
192 # either we are trying to delete the main file, and it is
192 # either we are trying to delete the main file, and it is
193 # already deleted.
193 # already deleted.
194 #
194 #
195 # or we are trying to delete a temporary file and it is
195 # or we are trying to delete a temporary file and it is
196 # already deleted.
196 # already deleted.
197 #
197 #
198 # in both case, our target result (delete the file) is
198 # in both case, our target result (delete the file) is
199 # already achieved.
199 # already achieved.
200 pass
200 pass
201 except (IOError, OSError, error.Abort):
201 except (IOError, OSError, error.Abort):
202 if not c:
202 if not c:
203 raise
203 raise
204
204
205 # cleanup transaction state file and the backups file
205 # cleanup transaction state file and the backups file
206 backuppath = b"%s.backupfiles" % journal
206 backuppath = b"%s.backupfiles" % journal
207 if opener.exists(backuppath):
207 if opener.exists(backuppath):
208 opener.unlink(backuppath)
208 opener.unlink(backuppath)
209 opener.unlink(journal)
209 opener.unlink(journal)
210 try:
210 try:
211 for vfs, f in backupfiles:
211 for vfs, f in backupfiles:
212 if vfs.exists(f):
212 if vfs.exists(f):
213 vfs.unlink(f)
213 vfs.unlink(f)
214 except (IOError, OSError, error.Abort):
214 except (IOError, OSError, error.Abort):
215 # only pure backup file remains, it is sage to ignore any error
215 # only pure backup file remains, it is sage to ignore any error
216 pass
216 pass
217
217
218
218
219 class transaction(util.transactional):
219 class transaction(util.transactional):
220 def __init__(
220 def __init__(
221 self,
221 self,
222 report,
222 report,
223 opener,
223 opener,
224 vfsmap,
224 vfsmap,
225 journalname,
225 journalname,
226 undoname=None,
226 undoname=None,
227 after=None,
227 after=None,
228 createmode=None,
228 createmode=None,
229 validator=None,
229 validator=None,
230 releasefn=None,
230 releasefn=None,
231 checkambigfiles=None,
231 checkambigfiles=None,
232 name='<unnamed>',
232 name='<unnamed>',
233 ):
233 ):
234 """Begin a new transaction
234 """Begin a new transaction
235
235
236 Begins a new transaction that allows rolling back writes in the event of
236 Begins a new transaction that allows rolling back writes in the event of
237 an exception.
237 an exception.
238
238
239 * `after`: called after the transaction has been committed
239 * `after`: called after the transaction has been committed
240 * `createmode`: the mode of the journal file that will be created
240 * `createmode`: the mode of the journal file that will be created
241 * `releasefn`: called after releasing (with transaction and result)
241 * `releasefn`: called after releasing (with transaction and result)
242
242
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 which determine whether file stat ambiguity should be avoided
244 which determine whether file stat ambiguity should be avoided
245 for corresponded files.
245 for corresponded files.
246 """
246 """
247 self._count = 1
247 self._count = 1
248 self._usages = 1
248 self._usages = 1
249 self._report = report
249 self._report = report
250 # a vfs to the store content
250 # a vfs to the store content
251 self._opener = opener
251 self._opener = opener
252 # a map to access file in various {location -> vfs}
252 # a map to access file in various {location -> vfs}
253 vfsmap = vfsmap.copy()
253 vfsmap = vfsmap.copy()
254 vfsmap[b''] = opener # set default value
254 vfsmap[b''] = opener # set default value
255 self._vfsmap = vfsmap
255 self._vfsmap = vfsmap
256 self._after = after
256 self._after = after
257 self._offsetmap = {}
257 self._offsetmap = {}
258 self._newfiles = set()
258 self._newfiles = set()
259 self._journal = journalname
259 self._journal = journalname
260 self._journal_files = []
260 self._journal_files = []
261 self._undoname = undoname
261 self._undoname = undoname
262 self._queue = []
262 self._queue = []
263 # A callback to do something just after releasing transaction.
263 # A callback to do something just after releasing transaction.
264 if releasefn is None:
264 if releasefn is None:
265 releasefn = lambda tr, success: None
265 releasefn = lambda tr, success: None
266 self._releasefn = releasefn
266 self._releasefn = releasefn
267
267
268 self._checkambigfiles = set()
268 self._checkambigfiles = set()
269 if checkambigfiles:
269 if checkambigfiles:
270 self._checkambigfiles.update(checkambigfiles)
270 self._checkambigfiles.update(checkambigfiles)
271
271
272 self._names = [name]
272 self._names = [name]
273
273
274 # A dict dedicated to precisely tracking the changes introduced in the
274 # A dict dedicated to precisely tracking the changes introduced in the
275 # transaction.
275 # transaction.
276 self.changes = {}
276 self.changes = {}
277
277
278 # a dict of arguments to be passed to hooks
278 # a dict of arguments to be passed to hooks
279 self.hookargs = {}
279 self.hookargs = {}
280 self._file = opener.open(self._journal, b"w+")
280 self._file = opener.open(self._journal, b"w+")
281
281
282 # a list of ('location', 'path', 'backuppath', cache) entries.
282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # - if 'backuppath' is empty, no file existed at backup time
283 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'path' is empty, this is a temporary transaction file
284 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'location' is not empty, the path is outside main opener reach.
285 # - if 'location' is not empty, the path is outside main opener reach.
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # (cache is currently unused)
287 # (cache is currently unused)
288 self._backupentries = []
288 self._backupentries = []
289 self._backupmap = {}
289 self._backupmap = {}
290 self._backupjournal = b"%s.backupfiles" % self._journal
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
292 self._backupsfile.write(b'%d\n' % version)
293
293
294 if createmode is not None:
294 if createmode is not None:
295 opener.chmod(self._journal, createmode & 0o666)
295 opener.chmod(self._journal, createmode & 0o666)
296 opener.chmod(self._backupjournal, createmode & 0o666)
296 opener.chmod(self._backupjournal, createmode & 0o666)
297
297
298 # hold file generations to be performed on commit
298 # hold file generations to be performed on commit
299 self._filegenerators = {}
299 self._filegenerators = {}
300 # hold callback to write pending data for hooks
300 # hold callback to write pending data for hooks
301 self._pendingcallback = {}
301 self._pendingcallback = {}
302 # True is any pending data have been written ever
302 # True is any pending data have been written ever
303 self._anypending = False
303 self._anypending = False
304 # holds callback to call when writing the transaction
304 # holds callback to call when writing the transaction
305 self._finalizecallback = {}
305 self._finalizecallback = {}
306 # holds callback to call when validating the transaction
306 # holds callback to call when validating the transaction
307 # should raise exception if anything is wrong
307 # should raise exception if anything is wrong
308 self._validatecallback = {}
308 self._validatecallback = {}
309 if validator is not None:
309 if validator is not None:
310 self._validatecallback[b'001-userhooks'] = validator
310 self._validatecallback[b'001-userhooks'] = validator
311 # hold callback for post transaction close
311 # hold callback for post transaction close
312 self._postclosecallback = {}
312 self._postclosecallback = {}
313 # holds callbacks to call during abort
313 # holds callbacks to call during abort
314 self._abortcallback = {}
314 self._abortcallback = {}
315
315
316 def __repr__(self):
316 def __repr__(self):
317 name = '/'.join(self._names)
317 name = '/'.join(self._names)
318 return '<transaction name=%s, count=%d, usages=%d>' % (
318 return '<transaction name=%s, count=%d, usages=%d>' % (
319 name,
319 name,
320 self._count,
320 self._count,
321 self._usages,
321 self._usages,
322 )
322 )
323
323
324 def __del__(self):
324 def __del__(self):
325 if self._journal:
325 if self._journal:
326 self._abort()
326 self._abort()
327
327
328 @property
328 @property
329 def finalized(self):
329 def finalized(self):
330 return self._finalizecallback is None
330 return self._finalizecallback is None
331
331
332 @active
332 @active
333 def startgroup(self):
333 def startgroup(self):
334 """delay registration of file entry
334 """delay registration of file entry
335
335
336 This is used by strip to delay vision of strip offset. The transaction
336 This is used by strip to delay vision of strip offset. The transaction
337 sees either none or all of the strip actions to be done."""
337 sees either none or all of the strip actions to be done."""
338 self._queue.append([])
338 self._queue.append([])
339
339
340 @active
340 @active
341 def endgroup(self):
341 def endgroup(self):
342 """apply delayed registration of file entry.
342 """apply delayed registration of file entry.
343
343
344 This is used by strip to delay vision of strip offset. The transaction
344 This is used by strip to delay vision of strip offset. The transaction
345 sees either none or all of the strip actions to be done."""
345 sees either none or all of the strip actions to be done."""
346 q = self._queue.pop()
346 q = self._queue.pop()
347 for f, o in q:
347 for f, o in q:
348 self._addentry(f, o)
348 self._addentry(f, o)
349
349
350 @active
350 @active
351 def add(self, file, offset):
351 def add(self, file, offset):
352 """record the state of an append-only file before update"""
352 """record the state of an append-only file before update"""
353 if (
353 if (
354 file in self._newfiles
354 file in self._newfiles
355 or file in self._offsetmap
355 or file in self._offsetmap
356 or file in self._backupmap
356 or file in self._backupmap
357 ):
357 ):
358 return
358 return
359 if self._queue:
359 if self._queue:
360 self._queue[-1].append((file, offset))
360 self._queue[-1].append((file, offset))
361 return
361 return
362
362
363 self._addentry(file, offset)
363 self._addentry(file, offset)
364
364
365 def _addentry(self, file, offset):
365 def _addentry(self, file, offset):
366 """add a append-only entry to memory and on-disk state"""
366 """add a append-only entry to memory and on-disk state"""
367 if (
367 if (
368 file in self._newfiles
368 file in self._newfiles
369 or file in self._offsetmap
369 or file in self._offsetmap
370 or file in self._backupmap
370 or file in self._backupmap
371 ):
371 ):
372 return
372 return
373 if offset:
373 if offset:
374 self._offsetmap[file] = offset
374 self._offsetmap[file] = offset
375 else:
375 else:
376 self._newfiles.add(file)
376 self._newfiles.add(file)
377 # add enough data to the journal to do the truncate
377 # add enough data to the journal to do the truncate
378 self._file.write(b"%s\0%d\n" % (file, offset))
378 self._file.write(b"%s\0%d\n" % (file, offset))
379 self._file.flush()
379 self._file.flush()
380
380
381 @active
381 @active
382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
383 """Adds a backup of the file to the transaction
383 """Adds a backup of the file to the transaction
384
384
385 Calling addbackup() creates a hardlink backup of the specified file
385 Calling addbackup() creates a hardlink backup of the specified file
386 that is used to recover the file in the event of the transaction
386 that is used to recover the file in the event of the transaction
387 aborting.
387 aborting.
388
388
389 * `file`: the file path, relative to .hg/store
389 * `file`: the file path, relative to .hg/store
390 * `hardlink`: use a hardlink to quickly create the backup
390 * `hardlink`: use a hardlink to quickly create the backup
391
391
392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
393 """
393 """
394 if self._queue:
394 if self._queue:
395 msg = b'cannot use transaction.addbackup inside "group"'
395 msg = b'cannot use transaction.addbackup inside "group"'
396 raise error.ProgrammingError(msg)
396 raise error.ProgrammingError(msg)
397
397
398 if file in self._newfiles or file in self._backupmap:
398 if file in self._newfiles or file in self._backupmap:
399 return
399 return
400 elif file in self._offsetmap and not for_offset:
400 elif file in self._offsetmap and not for_offset:
401 return
401 return
402 elif for_offset and file not in self._offsetmap:
402 elif for_offset and file not in self._offsetmap:
403 msg = (
403 msg = (
404 'calling `addbackup` with `for_offmap=True`, '
404 'calling `addbackup` with `for_offmap=True`, '
405 'but no offset recorded: [%r] %r'
405 'but no offset recorded: [%r] %r'
406 )
406 )
407 msg %= (location, file)
407 msg %= (location, file)
408 raise error.ProgrammingError(msg)
408 raise error.ProgrammingError(msg)
409
409
410 vfs = self._vfsmap[location]
410 vfs = self._vfsmap[location]
411 dirname, filename = vfs.split(file)
411 dirname, filename = vfs.split(file)
412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
413 backupfile = vfs.reljoin(dirname, backupfilename)
413 backupfile = vfs.reljoin(dirname, backupfilename)
414 if vfs.exists(file):
414 if vfs.exists(file):
415 filepath = vfs.join(file)
415 filepath = vfs.join(file)
416 backuppath = vfs.join(backupfile)
416 backuppath = vfs.join(backupfile)
417 # store encoding may result in different directory here.
418 # so we have to ensure the destination directory exist
419 final_dir_name = os.path.dirname(backuppath)
420 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
421 # then we can copy the backup
417 util.copyfile(filepath, backuppath, hardlink=hardlink)
422 util.copyfile(filepath, backuppath, hardlink=hardlink)
418 else:
423 else:
419 backupfile = b''
424 backupfile = b''
420
425
421 self._addbackupentry((location, file, backupfile, False))
426 self._addbackupentry((location, file, backupfile, False))
422
427
423 def _addbackupentry(self, entry):
428 def _addbackupentry(self, entry):
424 """register a new backup entry and write it to disk"""
429 """register a new backup entry and write it to disk"""
425 self._backupentries.append(entry)
430 self._backupentries.append(entry)
426 self._backupmap[entry[1]] = len(self._backupentries) - 1
431 self._backupmap[entry[1]] = len(self._backupentries) - 1
427 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
432 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
428 self._backupsfile.flush()
433 self._backupsfile.flush()
429
434
430 @active
435 @active
431 def registertmp(self, tmpfile, location=b''):
436 def registertmp(self, tmpfile, location=b''):
432 """register a temporary transaction file
437 """register a temporary transaction file
433
438
434 Such files will be deleted when the transaction exits (on both
439 Such files will be deleted when the transaction exits (on both
435 failure and success).
440 failure and success).
436 """
441 """
437 self._addbackupentry((location, b'', tmpfile, False))
442 self._addbackupentry((location, b'', tmpfile, False))
438
443
439 @active
444 @active
440 def addfilegenerator(
445 def addfilegenerator(
441 self,
446 self,
442 genid,
447 genid,
443 filenames,
448 filenames,
444 genfunc,
449 genfunc,
445 order=0,
450 order=0,
446 location=b'',
451 location=b'',
447 post_finalize=False,
452 post_finalize=False,
448 ):
453 ):
449 """add a function to generates some files at transaction commit
454 """add a function to generates some files at transaction commit
450
455
451 The `genfunc` argument is a function capable of generating proper
456 The `genfunc` argument is a function capable of generating proper
452 content of each entry in the `filename` tuple.
457 content of each entry in the `filename` tuple.
453
458
454 At transaction close time, `genfunc` will be called with one file
459 At transaction close time, `genfunc` will be called with one file
455 object argument per entries in `filenames`.
460 object argument per entries in `filenames`.
456
461
457 The transaction itself is responsible for the backup, creation and
462 The transaction itself is responsible for the backup, creation and
458 final write of such file.
463 final write of such file.
459
464
460 The `genid` argument is used to ensure the same set of file is only
465 The `genid` argument is used to ensure the same set of file is only
461 generated once. Call to `addfilegenerator` for a `genid` already
466 generated once. Call to `addfilegenerator` for a `genid` already
462 present will overwrite the old entry.
467 present will overwrite the old entry.
463
468
464 The `order` argument may be used to control the order in which multiple
469 The `order` argument may be used to control the order in which multiple
465 generator will be executed.
470 generator will be executed.
466
471
467 The `location` arguments may be used to indicate the files are located
472 The `location` arguments may be used to indicate the files are located
468 outside of the the standard directory for transaction. It should match
473 outside of the the standard directory for transaction. It should match
469 one of the key of the `transaction.vfsmap` dictionary.
474 one of the key of the `transaction.vfsmap` dictionary.
470
475
471 The `post_finalize` argument can be set to `True` for file generation
476 The `post_finalize` argument can be set to `True` for file generation
472 that must be run after the transaction has been finalized.
477 that must be run after the transaction has been finalized.
473 """
478 """
474 # For now, we are unable to do proper backup and restore of custom vfs
479 # For now, we are unable to do proper backup and restore of custom vfs
475 # but for bookmarks that are handled outside this mechanism.
480 # but for bookmarks that are handled outside this mechanism.
476 entry = (order, filenames, genfunc, location, post_finalize)
481 entry = (order, filenames, genfunc, location, post_finalize)
477 self._filegenerators[genid] = entry
482 self._filegenerators[genid] = entry
478
483
479 @active
484 @active
480 def removefilegenerator(self, genid):
485 def removefilegenerator(self, genid):
481 """reverse of addfilegenerator, remove a file generator function"""
486 """reverse of addfilegenerator, remove a file generator function"""
482 if genid in self._filegenerators:
487 if genid in self._filegenerators:
483 del self._filegenerators[genid]
488 del self._filegenerators[genid]
484
489
485 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
490 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
486 # write files registered for generation
491 # write files registered for generation
487 any = False
492 any = False
488
493
489 if group == GEN_GROUP_ALL:
494 if group == GEN_GROUP_ALL:
490 skip_post = skip_pre = False
495 skip_post = skip_pre = False
491 else:
496 else:
492 skip_pre = group == GEN_GROUP_POST_FINALIZE
497 skip_pre = group == GEN_GROUP_POST_FINALIZE
493 skip_post = group == GEN_GROUP_PRE_FINALIZE
498 skip_post = group == GEN_GROUP_PRE_FINALIZE
494
499
495 for id, entry in sorted(self._filegenerators.items()):
500 for id, entry in sorted(self._filegenerators.items()):
496 any = True
501 any = True
497 order, filenames, genfunc, location, post_finalize = entry
502 order, filenames, genfunc, location, post_finalize = entry
498
503
499 # for generation at closing, check if it's before or after finalize
504 # for generation at closing, check if it's before or after finalize
500 if skip_post and post_finalize:
505 if skip_post and post_finalize:
501 continue
506 continue
502 elif skip_pre and not post_finalize:
507 elif skip_pre and not post_finalize:
503 continue
508 continue
504
509
505 vfs = self._vfsmap[location]
510 vfs = self._vfsmap[location]
506 files = []
511 files = []
507 try:
512 try:
508 for name in filenames:
513 for name in filenames:
509 name += suffix
514 name += suffix
510 if suffix:
515 if suffix:
511 self.registertmp(name, location=location)
516 self.registertmp(name, location=location)
512 checkambig = False
517 checkambig = False
513 else:
518 else:
514 self.addbackup(name, location=location)
519 self.addbackup(name, location=location)
515 checkambig = (name, location) in self._checkambigfiles
520 checkambig = (name, location) in self._checkambigfiles
516 files.append(
521 files.append(
517 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
522 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
518 )
523 )
519 genfunc(*files)
524 genfunc(*files)
520 for f in files:
525 for f in files:
521 f.close()
526 f.close()
522 # skip discard() loop since we're sure no open file remains
527 # skip discard() loop since we're sure no open file remains
523 del files[:]
528 del files[:]
524 finally:
529 finally:
525 for f in files:
530 for f in files:
526 f.discard()
531 f.discard()
527 return any
532 return any
528
533
529 @active
534 @active
530 def findoffset(self, file):
535 def findoffset(self, file):
531 if file in self._newfiles:
536 if file in self._newfiles:
532 return 0
537 return 0
533 return self._offsetmap.get(file)
538 return self._offsetmap.get(file)
534
539
535 @active
540 @active
536 def readjournal(self):
541 def readjournal(self):
537 self._file.seek(0)
542 self._file.seek(0)
538 entries = []
543 entries = []
539 for l in self._file.readlines():
544 for l in self._file.readlines():
540 file, troffset = l.split(b'\0')
545 file, troffset = l.split(b'\0')
541 entries.append((file, int(troffset)))
546 entries.append((file, int(troffset)))
542 return entries
547 return entries
543
548
544 @active
549 @active
545 def replace(self, file, offset):
550 def replace(self, file, offset):
546 """
551 """
547 replace can only replace already committed entries
552 replace can only replace already committed entries
548 that are not pending in the queue
553 that are not pending in the queue
549 """
554 """
550 if file in self._newfiles:
555 if file in self._newfiles:
551 if not offset:
556 if not offset:
552 return
557 return
553 self._newfiles.remove(file)
558 self._newfiles.remove(file)
554 self._offsetmap[file] = offset
559 self._offsetmap[file] = offset
555 elif file in self._offsetmap:
560 elif file in self._offsetmap:
556 if not offset:
561 if not offset:
557 del self._offsetmap[file]
562 del self._offsetmap[file]
558 self._newfiles.add(file)
563 self._newfiles.add(file)
559 else:
564 else:
560 self._offsetmap[file] = offset
565 self._offsetmap[file] = offset
561 else:
566 else:
562 raise KeyError(file)
567 raise KeyError(file)
563 self._file.write(b"%s\0%d\n" % (file, offset))
568 self._file.write(b"%s\0%d\n" % (file, offset))
564 self._file.flush()
569 self._file.flush()
565
570
566 @active
571 @active
567 def nest(self, name='<unnamed>'):
572 def nest(self, name='<unnamed>'):
568 self._count += 1
573 self._count += 1
569 self._usages += 1
574 self._usages += 1
570 self._names.append(name)
575 self._names.append(name)
571 return self
576 return self
572
577
573 def release(self):
578 def release(self):
574 if self._count > 0:
579 if self._count > 0:
575 self._usages -= 1
580 self._usages -= 1
576 if self._names:
581 if self._names:
577 self._names.pop()
582 self._names.pop()
578 # if the transaction scopes are left without being closed, fail
583 # if the transaction scopes are left without being closed, fail
579 if self._count > 0 and self._usages == 0:
584 if self._count > 0 and self._usages == 0:
580 self._abort()
585 self._abort()
581
586
582 def running(self):
587 def running(self):
583 return self._count > 0
588 return self._count > 0
584
589
585 def addpending(self, category, callback):
590 def addpending(self, category, callback):
586 """add a callback to be called when the transaction is pending
591 """add a callback to be called when the transaction is pending
587
592
588 The transaction will be given as callback's first argument.
593 The transaction will be given as callback's first argument.
589
594
590 Category is a unique identifier to allow overwriting an old callback
595 Category is a unique identifier to allow overwriting an old callback
591 with a newer callback.
596 with a newer callback.
592 """
597 """
593 self._pendingcallback[category] = callback
598 self._pendingcallback[category] = callback
594
599
595 @active
600 @active
596 def writepending(self):
601 def writepending(self):
597 """write pending file to temporary version
602 """write pending file to temporary version
598
603
599 This is used to allow hooks to view a transaction before commit"""
604 This is used to allow hooks to view a transaction before commit"""
600 categories = sorted(self._pendingcallback)
605 categories = sorted(self._pendingcallback)
601 for cat in categories:
606 for cat in categories:
602 # remove callback since the data will have been flushed
607 # remove callback since the data will have been flushed
603 any = self._pendingcallback.pop(cat)(self)
608 any = self._pendingcallback.pop(cat)(self)
604 self._anypending = self._anypending or any
609 self._anypending = self._anypending or any
605 self._anypending |= self._generatefiles(suffix=b'.pending')
610 self._anypending |= self._generatefiles(suffix=b'.pending')
606 return self._anypending
611 return self._anypending
607
612
608 @active
613 @active
609 def hasfinalize(self, category):
614 def hasfinalize(self, category):
610 """check is a callback already exist for a category"""
615 """check is a callback already exist for a category"""
611 return category in self._finalizecallback
616 return category in self._finalizecallback
612
617
613 @active
618 @active
614 def addfinalize(self, category, callback):
619 def addfinalize(self, category, callback):
615 """add a callback to be called when the transaction is closed
620 """add a callback to be called when the transaction is closed
616
621
617 The transaction will be given as callback's first argument.
622 The transaction will be given as callback's first argument.
618
623
619 Category is a unique identifier to allow overwriting old callbacks with
624 Category is a unique identifier to allow overwriting old callbacks with
620 newer callbacks.
625 newer callbacks.
621 """
626 """
622 self._finalizecallback[category] = callback
627 self._finalizecallback[category] = callback
623
628
624 @active
629 @active
625 def addpostclose(self, category, callback):
630 def addpostclose(self, category, callback):
626 """add or replace a callback to be called after the transaction closed
631 """add or replace a callback to be called after the transaction closed
627
632
628 The transaction will be given as callback's first argument.
633 The transaction will be given as callback's first argument.
629
634
630 Category is a unique identifier to allow overwriting an old callback
635 Category is a unique identifier to allow overwriting an old callback
631 with a newer callback.
636 with a newer callback.
632 """
637 """
633 self._postclosecallback[category] = callback
638 self._postclosecallback[category] = callback
634
639
635 @active
640 @active
636 def getpostclose(self, category):
641 def getpostclose(self, category):
637 """return a postclose callback added before, or None"""
642 """return a postclose callback added before, or None"""
638 return self._postclosecallback.get(category, None)
643 return self._postclosecallback.get(category, None)
639
644
640 @active
645 @active
641 def addabort(self, category, callback):
646 def addabort(self, category, callback):
642 """add a callback to be called when the transaction is aborted.
647 """add a callback to be called when the transaction is aborted.
643
648
644 The transaction will be given as the first argument to the callback.
649 The transaction will be given as the first argument to the callback.
645
650
646 Category is a unique identifier to allow overwriting an old callback
651 Category is a unique identifier to allow overwriting an old callback
647 with a newer callback.
652 with a newer callback.
648 """
653 """
649 self._abortcallback[category] = callback
654 self._abortcallback[category] = callback
650
655
651 @active
656 @active
652 def addvalidator(self, category, callback):
657 def addvalidator(self, category, callback):
653 """adds a callback to be called when validating the transaction.
658 """adds a callback to be called when validating the transaction.
654
659
655 The transaction will be given as the first argument to the callback.
660 The transaction will be given as the first argument to the callback.
656
661
657 callback should raise exception if to abort transaction"""
662 callback should raise exception if to abort transaction"""
658 self._validatecallback[category] = callback
663 self._validatecallback[category] = callback
659
664
660 @active
665 @active
661 def close(self):
666 def close(self):
662 '''commit the transaction'''
667 '''commit the transaction'''
663 if self._count == 1:
668 if self._count == 1:
664 for category in sorted(self._validatecallback):
669 for category in sorted(self._validatecallback):
665 self._validatecallback[category](self)
670 self._validatecallback[category](self)
666 self._validatecallback = None # Help prevent cycles.
671 self._validatecallback = None # Help prevent cycles.
667 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
672 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
668 while self._finalizecallback:
673 while self._finalizecallback:
669 callbacks = self._finalizecallback
674 callbacks = self._finalizecallback
670 self._finalizecallback = {}
675 self._finalizecallback = {}
671 categories = sorted(callbacks)
676 categories = sorted(callbacks)
672 for cat in categories:
677 for cat in categories:
673 callbacks[cat](self)
678 callbacks[cat](self)
674 # Prevent double usage and help clear cycles.
679 # Prevent double usage and help clear cycles.
675 self._finalizecallback = None
680 self._finalizecallback = None
676 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
681 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
677
682
678 self._count -= 1
683 self._count -= 1
679 if self._count != 0:
684 if self._count != 0:
680 return
685 return
681 self._file.close()
686 self._file.close()
682 self._backupsfile.close()
687 self._backupsfile.close()
683 # cleanup temporary files
688 # cleanup temporary files
684 for l, f, b, c in self._backupentries:
689 for l, f, b, c in self._backupentries:
685 if l not in self._vfsmap and c:
690 if l not in self._vfsmap and c:
686 self._report(
691 self._report(
687 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
692 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
688 )
693 )
689 continue
694 continue
690 vfs = self._vfsmap[l]
695 vfs = self._vfsmap[l]
691 if not f and b and vfs.exists(b):
696 if not f and b and vfs.exists(b):
692 try:
697 try:
693 vfs.unlink(b)
698 vfs.unlink(b)
694 except (IOError, OSError, error.Abort) as inst:
699 except (IOError, OSError, error.Abort) as inst:
695 if not c:
700 if not c:
696 raise
701 raise
697 # Abort may be raise by read only opener
702 # Abort may be raise by read only opener
698 self._report(
703 self._report(
699 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
704 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
700 )
705 )
701 self._offsetmap = {}
706 self._offsetmap = {}
702 self._newfiles = set()
707 self._newfiles = set()
703 self._writeundo()
708 self._writeundo()
704 if self._after:
709 if self._after:
705 self._after()
710 self._after()
706 self._after = None # Help prevent cycles.
711 self._after = None # Help prevent cycles.
707 if self._opener.isfile(self._backupjournal):
712 if self._opener.isfile(self._backupjournal):
708 self._opener.unlink(self._backupjournal)
713 self._opener.unlink(self._backupjournal)
709 if self._opener.isfile(self._journal):
714 if self._opener.isfile(self._journal):
710 self._opener.unlink(self._journal)
715 self._opener.unlink(self._journal)
711 for l, _f, b, c in self._backupentries:
716 for l, _f, b, c in self._backupentries:
712 if l not in self._vfsmap and c:
717 if l not in self._vfsmap and c:
713 self._report(
718 self._report(
714 b"couldn't remove %s: unknown cache location"
719 b"couldn't remove %s: unknown cache location"
715 b"%s\n" % (b, l)
720 b"%s\n" % (b, l)
716 )
721 )
717 continue
722 continue
718 vfs = self._vfsmap[l]
723 vfs = self._vfsmap[l]
719 if b and vfs.exists(b):
724 if b and vfs.exists(b):
720 try:
725 try:
721 vfs.unlink(b)
726 vfs.unlink(b)
722 except (IOError, OSError, error.Abort) as inst:
727 except (IOError, OSError, error.Abort) as inst:
723 if not c:
728 if not c:
724 raise
729 raise
725 # Abort may be raise by read only opener
730 # Abort may be raise by read only opener
726 self._report(
731 self._report(
727 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
732 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
728 )
733 )
729 self._backupentries = []
734 self._backupentries = []
730 self._journal = None
735 self._journal = None
731
736
732 self._releasefn(self, True) # notify success of closing transaction
737 self._releasefn(self, True) # notify success of closing transaction
733 self._releasefn = None # Help prevent cycles.
738 self._releasefn = None # Help prevent cycles.
734
739
735 # run post close action
740 # run post close action
736 categories = sorted(self._postclosecallback)
741 categories = sorted(self._postclosecallback)
737 for cat in categories:
742 for cat in categories:
738 self._postclosecallback[cat](self)
743 self._postclosecallback[cat](self)
739 # Prevent double usage and help clear cycles.
744 # Prevent double usage and help clear cycles.
740 self._postclosecallback = None
745 self._postclosecallback = None
741
746
742 @active
747 @active
743 def abort(self):
748 def abort(self):
744 """abort the transaction (generally called on error, or when the
749 """abort the transaction (generally called on error, or when the
745 transaction is not explicitly committed before going out of
750 transaction is not explicitly committed before going out of
746 scope)"""
751 scope)"""
747 self._abort()
752 self._abort()
748
753
749 @active
754 @active
750 def add_journal(self, vfs_id, path):
755 def add_journal(self, vfs_id, path):
751 self._journal_files.append((vfs_id, path))
756 self._journal_files.append((vfs_id, path))
752
757
753 def _writeundo(self):
758 def _writeundo(self):
754 """write transaction data for possible future undo call"""
759 """write transaction data for possible future undo call"""
755 if self._undoname is None:
760 if self._undoname is None:
756 return
761 return
757 cleanup_undo_files(
762 cleanup_undo_files(
758 self._report,
763 self._report,
759 self._vfsmap,
764 self._vfsmap,
760 undo_prefix=self._undoname,
765 undo_prefix=self._undoname,
761 )
766 )
762
767
763 def undoname(fn: bytes) -> bytes:
768 def undoname(fn: bytes) -> bytes:
764 base, name = os.path.split(fn)
769 base, name = os.path.split(fn)
765 assert name.startswith(self._journal)
770 assert name.startswith(self._journal)
766 new_name = name.replace(self._journal, self._undoname, 1)
771 new_name = name.replace(self._journal, self._undoname, 1)
767 return os.path.join(base, new_name)
772 return os.path.join(base, new_name)
768
773
769 undo_backup_path = b"%s.backupfiles" % self._undoname
774 undo_backup_path = b"%s.backupfiles" % self._undoname
770 undobackupfile = self._opener.open(undo_backup_path, b'w')
775 undobackupfile = self._opener.open(undo_backup_path, b'w')
771 undobackupfile.write(b'%d\n' % version)
776 undobackupfile.write(b'%d\n' % version)
772 for l, f, b, c in self._backupentries:
777 for l, f, b, c in self._backupentries:
773 if not f: # temporary file
778 if not f: # temporary file
774 continue
779 continue
775 if not b:
780 if not b:
776 u = b''
781 u = b''
777 else:
782 else:
778 if l not in self._vfsmap and c:
783 if l not in self._vfsmap and c:
779 self._report(
784 self._report(
780 b"couldn't remove %s: unknown cache location"
785 b"couldn't remove %s: unknown cache location"
781 b"%s\n" % (b, l)
786 b"%s\n" % (b, l)
782 )
787 )
783 continue
788 continue
784 vfs = self._vfsmap[l]
789 vfs = self._vfsmap[l]
785 u = undoname(b)
790 u = undoname(b)
786 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
791 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
787 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
792 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
788 undobackupfile.close()
793 undobackupfile.close()
789 for vfs, src in self._journal_files:
794 for vfs, src in self._journal_files:
790 dest = undoname(src)
795 dest = undoname(src)
791 # if src and dest refer to a same file, vfs.rename is a no-op,
796 # if src and dest refer to a same file, vfs.rename is a no-op,
792 # leaving both src and dest on disk. delete dest to make sure
797 # leaving both src and dest on disk. delete dest to make sure
793 # the rename couldn't be such a no-op.
798 # the rename couldn't be such a no-op.
794 vfs.tryunlink(dest)
799 vfs.tryunlink(dest)
795 try:
800 try:
796 vfs.rename(src, dest)
801 vfs.rename(src, dest)
797 except FileNotFoundError: # journal file does not yet exist
802 except FileNotFoundError: # journal file does not yet exist
798 pass
803 pass
799
804
800 def _abort(self):
805 def _abort(self):
801 entries = self.readjournal()
806 entries = self.readjournal()
802 self._count = 0
807 self._count = 0
803 self._usages = 0
808 self._usages = 0
804 self._file.close()
809 self._file.close()
805 self._backupsfile.close()
810 self._backupsfile.close()
806
811
807 quick = self._can_quick_abort(entries)
812 quick = self._can_quick_abort(entries)
808 try:
813 try:
809 if not quick:
814 if not quick:
810 self._report(_(b"transaction abort!\n"))
815 self._report(_(b"transaction abort!\n"))
811 for cat in sorted(self._abortcallback):
816 for cat in sorted(self._abortcallback):
812 self._abortcallback[cat](self)
817 self._abortcallback[cat](self)
813 # Prevent double usage and help clear cycles.
818 # Prevent double usage and help clear cycles.
814 self._abortcallback = None
819 self._abortcallback = None
815 if quick:
820 if quick:
816 self._do_quick_abort(entries)
821 self._do_quick_abort(entries)
817 else:
822 else:
818 self._do_full_abort(entries)
823 self._do_full_abort(entries)
819 finally:
824 finally:
820 self._journal = None
825 self._journal = None
821 self._releasefn(self, False) # notify failure of transaction
826 self._releasefn(self, False) # notify failure of transaction
822 self._releasefn = None # Help prevent cycles.
827 self._releasefn = None # Help prevent cycles.
823
828
824 def _can_quick_abort(self, entries):
829 def _can_quick_abort(self, entries):
825 """False if any semantic content have been written on disk
830 """False if any semantic content have been written on disk
826
831
827 True if nothing, except temporary files has been writen on disk."""
832 True if nothing, except temporary files has been writen on disk."""
828 if entries:
833 if entries:
829 return False
834 return False
830 for e in self._backupentries:
835 for e in self._backupentries:
831 if e[1]:
836 if e[1]:
832 return False
837 return False
833 return True
838 return True
834
839
835 def _do_quick_abort(self, entries):
840 def _do_quick_abort(self, entries):
836 """(Silently) do a quick cleanup (see _can_quick_abort)"""
841 """(Silently) do a quick cleanup (see _can_quick_abort)"""
837 assert self._can_quick_abort(entries)
842 assert self._can_quick_abort(entries)
838 tmp_files = [e for e in self._backupentries if not e[1]]
843 tmp_files = [e for e in self._backupentries if not e[1]]
839 for vfs_id, old_path, tmp_path, xxx in tmp_files:
844 for vfs_id, old_path, tmp_path, xxx in tmp_files:
840 vfs = self._vfsmap[vfs_id]
845 vfs = self._vfsmap[vfs_id]
841 try:
846 try:
842 vfs.unlink(tmp_path)
847 vfs.unlink(tmp_path)
843 except FileNotFoundError:
848 except FileNotFoundError:
844 pass
849 pass
845 if self._backupjournal:
850 if self._backupjournal:
846 self._opener.unlink(self._backupjournal)
851 self._opener.unlink(self._backupjournal)
847 if self._journal:
852 if self._journal:
848 self._opener.unlink(self._journal)
853 self._opener.unlink(self._journal)
849
854
850 def _do_full_abort(self, entries):
855 def _do_full_abort(self, entries):
851 """(Noisily) rollback all the change introduced by the transaction"""
856 """(Noisily) rollback all the change introduced by the transaction"""
852 try:
857 try:
853 _playback(
858 _playback(
854 self._journal,
859 self._journal,
855 self._report,
860 self._report,
856 self._opener,
861 self._opener,
857 self._vfsmap,
862 self._vfsmap,
858 entries,
863 entries,
859 self._backupentries,
864 self._backupentries,
860 False,
865 False,
861 checkambigfiles=self._checkambigfiles,
866 checkambigfiles=self._checkambigfiles,
862 )
867 )
863 self._report(_(b"rollback completed\n"))
868 self._report(_(b"rollback completed\n"))
864 except BaseException as exc:
869 except BaseException as exc:
865 self._report(_(b"rollback failed - please run hg recover\n"))
870 self._report(_(b"rollback failed - please run hg recover\n"))
866 self._report(
871 self._report(
867 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
872 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
868 )
873 )
869
874
870
875
871 BAD_VERSION_MSG = _(
876 BAD_VERSION_MSG = _(
872 b"journal was created by a different version of Mercurial\n"
877 b"journal was created by a different version of Mercurial\n"
873 )
878 )
874
879
875
880
876 def read_backup_files(report, fp):
881 def read_backup_files(report, fp):
877 """parse an (already open) backup file an return contained backup entries
882 """parse an (already open) backup file an return contained backup entries
878
883
879 entries are in the form: (location, file, backupfile, xxx)
884 entries are in the form: (location, file, backupfile, xxx)
880
885
881 :location: the vfs identifier (vfsmap's key)
886 :location: the vfs identifier (vfsmap's key)
882 :file: original file path (in the vfs)
887 :file: original file path (in the vfs)
883 :backupfile: path of the backup (in the vfs)
888 :backupfile: path of the backup (in the vfs)
884 :cache: a boolean currently always set to False
889 :cache: a boolean currently always set to False
885 """
890 """
886 lines = fp.readlines()
891 lines = fp.readlines()
887 backupentries = []
892 backupentries = []
888 if lines:
893 if lines:
889 ver = lines[0][:-1]
894 ver = lines[0][:-1]
890 if ver != (b'%d' % version):
895 if ver != (b'%d' % version):
891 report(BAD_VERSION_MSG)
896 report(BAD_VERSION_MSG)
892 else:
897 else:
893 for line in lines[1:]:
898 for line in lines[1:]:
894 if line:
899 if line:
895 # Shave off the trailing newline
900 # Shave off the trailing newline
896 line = line[:-1]
901 line = line[:-1]
897 l, f, b, c = line.split(b'\0')
902 l, f, b, c = line.split(b'\0')
898 backupentries.append((l, f, b, bool(c)))
903 backupentries.append((l, f, b, bool(c)))
899 return backupentries
904 return backupentries
900
905
901
906
902 def rollback(
907 def rollback(
903 opener,
908 opener,
904 vfsmap,
909 vfsmap,
905 file,
910 file,
906 report,
911 report,
907 checkambigfiles=None,
912 checkambigfiles=None,
908 skip_journal_pattern=None,
913 skip_journal_pattern=None,
909 ):
914 ):
910 """Rolls back the transaction contained in the given file
915 """Rolls back the transaction contained in the given file
911
916
912 Reads the entries in the specified file, and the corresponding
917 Reads the entries in the specified file, and the corresponding
913 '*.backupfiles' file, to recover from an incomplete transaction.
918 '*.backupfiles' file, to recover from an incomplete transaction.
914
919
915 * `file`: a file containing a list of entries, specifying where
920 * `file`: a file containing a list of entries, specifying where
916 to truncate each file. The file should contain a list of
921 to truncate each file. The file should contain a list of
917 file\0offset pairs, delimited by newlines. The corresponding
922 file\0offset pairs, delimited by newlines. The corresponding
918 '*.backupfiles' file should contain a list of file\0backupfile
923 '*.backupfiles' file should contain a list of file\0backupfile
919 pairs, delimited by \0.
924 pairs, delimited by \0.
920
925
921 `checkambigfiles` is a set of (path, vfs-location) tuples,
926 `checkambigfiles` is a set of (path, vfs-location) tuples,
922 which determine whether file stat ambiguity should be avoided at
927 which determine whether file stat ambiguity should be avoided at
923 restoring corresponded files.
928 restoring corresponded files.
924 """
929 """
925 entries = []
930 entries = []
926 backupentries = []
931 backupentries = []
927
932
928 with opener.open(file) as fp:
933 with opener.open(file) as fp:
929 lines = fp.readlines()
934 lines = fp.readlines()
930 for l in lines:
935 for l in lines:
931 try:
936 try:
932 f, o = l.split(b'\0')
937 f, o = l.split(b'\0')
933 entries.append((f, int(o)))
938 entries.append((f, int(o)))
934 except ValueError:
939 except ValueError:
935 report(
940 report(
936 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
941 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
937 )
942 )
938
943
939 backupjournal = b"%s.backupfiles" % file
944 backupjournal = b"%s.backupfiles" % file
940 if opener.exists(backupjournal):
945 if opener.exists(backupjournal):
941 with opener.open(backupjournal) as fp:
946 with opener.open(backupjournal) as fp:
942 backupentries = read_backup_files(report, fp)
947 backupentries = read_backup_files(report, fp)
943 if skip_journal_pattern is not None:
948 if skip_journal_pattern is not None:
944 keep = lambda x: not skip_journal_pattern.match(x[1])
949 keep = lambda x: not skip_journal_pattern.match(x[1])
945 backupentries = [x for x in backupentries if keep(x)]
950 backupentries = [x for x in backupentries if keep(x)]
946
951
947 _playback(
952 _playback(
948 file,
953 file,
949 report,
954 report,
950 opener,
955 opener,
951 vfsmap,
956 vfsmap,
952 entries,
957 entries,
953 backupentries,
958 backupentries,
954 checkambigfiles=checkambigfiles,
959 checkambigfiles=checkambigfiles,
955 )
960 )
@@ -1,444 +1,448 b''
1 Test correctness of revlog inline -> non-inline transition
1 Test correctness of revlog inline -> non-inline transition
2 ----------------------------------------------------------
2 ----------------------------------------------------------
3
3
4 We test various file length and naming pattern as this created issue in the
5 past.
6
4 Helper extension to intercept renames and kill process
7 Helper extension to intercept renames and kill process
5
8
6 $ cat > $TESTTMP/intercept_before_rename.py << EOF
9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
7 > import os
10 > import os
8 > import signal
11 > import signal
9 > from mercurial import extensions, util
12 > from mercurial import extensions, util
10 >
13 >
11 > def extsetup(ui):
14 > def extsetup(ui):
12 > def rename(orig, src, dest, *args, **kwargs):
15 > def rename(orig, src, dest, *args, **kwargs):
13 > path = util.normpath(dest)
16 > path = util.normpath(dest)
14 > if path.endswith(b'data/file.i'):
17 > if path.endswith(b'data/file.i'):
15 > os.kill(os.getpid(), signal.SIGKILL)
18 > os.kill(os.getpid(), signal.SIGKILL)
16 > return orig(src, dest, *args, **kwargs)
19 > return orig(src, dest, *args, **kwargs)
17 > extensions.wrapfunction(util, 'rename', rename)
20 > extensions.wrapfunction(util, 'rename', rename)
18 > EOF
21 > EOF
19
22
20 $ cat > $TESTTMP/intercept_after_rename.py << EOF
23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
21 > import os
24 > import os
22 > import signal
25 > import signal
23 > from mercurial import extensions, util
26 > from mercurial import extensions, util
24 >
27 >
25 > def extsetup(ui):
28 > def extsetup(ui):
26 > def close(orig, *args, **kwargs):
29 > def close(orig, *args, **kwargs):
27 > path = util.normpath(args[0]._atomictempfile__name)
30 > path = util.normpath(args[0]._atomictempfile__name)
28 > r = orig(*args, **kwargs)
31 > r = orig(*args, **kwargs)
29 > if path.endswith(b'/.hg/store/data/file.i'):
32 > if path.endswith(b'/.hg/store/data/file.i'):
30 > os.kill(os.getpid(), signal.SIGKILL)
33 > os.kill(os.getpid(), signal.SIGKILL)
31 > return r
34 > return r
32 > extensions.wrapfunction(util.atomictempfile, 'close', close)
35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
33 > def extsetup(ui):
36 > def extsetup(ui):
34 > def rename(orig, src, dest, *args, **kwargs):
37 > def rename(orig, src, dest, *args, **kwargs):
35 > path = util.normpath(dest)
38 > path = util.normpath(dest)
36 > r = orig(src, dest, *args, **kwargs)
39 > r = orig(src, dest, *args, **kwargs)
37 > if path.endswith(b'data/file.i'):
40 > if path.endswith(b'data/file.i'):
38 > os.kill(os.getpid(), signal.SIGKILL)
41 > os.kill(os.getpid(), signal.SIGKILL)
39 > return r
42 > return r
40 > extensions.wrapfunction(util, 'rename', rename)
43 > extensions.wrapfunction(util, 'rename', rename)
41 > EOF
44 > EOF
42
45
43 $ cat > $TESTTMP/killme.py << EOF
46 $ cat > $TESTTMP/killme.py << EOF
44 > import os
47 > import os
45 > import signal
48 > import signal
46 >
49 >
47 > def killme(ui, repo, hooktype, **kwargs):
50 > def killme(ui, repo, hooktype, **kwargs):
48 > os.kill(os.getpid(), signal.SIGKILL)
51 > os.kill(os.getpid(), signal.SIGKILL)
49 > EOF
52 > EOF
50
53
51 $ cat > $TESTTMP/reader_wait_split.py << EOF
54 $ cat > $TESTTMP/reader_wait_split.py << EOF
52 > import os
55 > import os
53 > import signal
56 > import signal
54 > from mercurial import extensions, revlog, testing
57 > from mercurial import extensions, revlog, testing
55 > def _wait_post_load(orig, self, *args, **kwargs):
58 > def _wait_post_load(orig, self, *args, **kwargs):
56 > wait = b'data/file' in self.radix
59 > wait = b'data/file' in self.radix
57 > if wait:
60 > if wait:
58 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
59 > r = orig(self, *args, **kwargs)
62 > r = orig(self, *args, **kwargs)
60 > if wait:
63 > if wait:
61 > testing.write_file(b"$TESTTMP/reader-index-read")
64 > testing.write_file(b"$TESTTMP/reader-index-read")
62 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
63 > return r
66 > return r
64 >
67 >
65 > def extsetup(ui):
68 > def extsetup(ui):
66 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
67 > EOF
70 > EOF
68
71
69 setup a repository for tests
72 setup a repository for tests
70 ----------------------------
73 ----------------------------
71
74
72 $ cat >> $HGRCPATH << EOF
75 $ cat >> $HGRCPATH << EOF
73 > [format]
76 > [format]
74 > revlog-compression=none
77 > revlog-compression=none
75 > EOF
78 > EOF
76
79
77 $ hg init troffset-computation
80 $ hg init troffset-computation
78 $ cd troffset-computation
81 $ cd troffset-computation
79 $ files="
82 $ files="
80 > file
83 > file
81 > Directory_With,Special%Char/Complex_File.babar
84 > Directory_With,Special%Char/Complex_File.babar
82 > foo/bar/babar_celeste/foo
85 > foo/bar/babar_celeste/foo
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
83 > "
87 > "
84 $ for f in $files; do
88 $ for f in $files; do
85 > mkdir -p `dirname $f`
89 > mkdir -p `dirname $f`
86 > done
90 > done
87 $ for f in $files; do
91 $ for f in $files; do
88 > printf '%20d' '1' > $f
92 > printf '%20d' '1' > $f
89 > done
93 > done
90 $ hg commit -Aqma
94 $ hg commit -Aqma
91 $ for f in $files; do
95 $ for f in $files; do
92 > printf '%1024d' '1' > $f
96 > printf '%1024d' '1' > $f
93 > done
97 > done
94 $ hg commit -Aqmb
98 $ hg commit -Aqmb
95 $ for f in $files; do
99 $ for f in $files; do
96 > printf '%20d' '1' > $f
100 > printf '%20d' '1' > $f
97 > done
101 > done
98 $ hg commit -Aqmc
102 $ hg commit -Aqmc
99 $ for f in $files; do
103 $ for f in $files; do
100 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
104 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
101 > done
105 > done
102 $ hg commit -AqmD --traceback
106 $ hg commit -AqmD --traceback
103
107
104 Reference size:
108 Reference size:
105 $ f -s file
109 $ f -s file
106 file: size=131072
110 file: size=131072
107 $ f -s .hg/store/data/file*
111 $ f -s .hg/store/data/file*
108 .hg/store/data/file.d: size=132139
112 .hg/store/data/file.d: size=132139
109 .hg/store/data/file.i: size=256
113 .hg/store/data/file.i: size=256
110
114
111 $ cd ..
115 $ cd ..
112
116
113
117
114 Test a hard crash after the file was split but before the transaction was committed
118 Test a hard crash after the file was split but before the transaction was committed
115 ===================================================================================
119 ===================================================================================
116
120
117 Test offset computation to correctly factor in the index entries themselves.
121 Test offset computation to correctly factor in the index entries themselves.
118 Also test that the new data size has the correct size if the transaction is aborted
122 Also test that the new data size has the correct size if the transaction is aborted
119 after the index has been replaced.
123 after the index has been replaced.
120
124
121 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
125 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
122 transitions to non-inline storage). The clone initially has changes a, b
126 transitions to non-inline storage). The clone initially has changes a, b
123 and will transition to non-inline storage when adding c, D.
127 and will transition to non-inline storage when adding c, D.
124
128
125 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
129 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
126 but truncate the index and the data to remove both c and D.
130 but truncate the index and the data to remove both c and D.
127
131
128
132
129 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
133 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
130 $ cd troffset-computation-copy
134 $ cd troffset-computation-copy
131
135
132 Reference size:
136 Reference size:
133 $ f -s file
137 $ f -s file
134 file: size=1024
138 file: size=1024
135 $ f -s .hg/store/data/file*
139 $ f -s .hg/store/data/file*
136 .hg/store/data/file.i: size=1174
140 .hg/store/data/file.i: size=1174
137
141
138 $ cat > .hg/hgrc <<EOF
142 $ cat > .hg/hgrc <<EOF
139 > [hooks]
143 > [hooks]
140 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
144 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
141 > EOF
145 > EOF
142 #if chg
146 #if chg
143 $ hg pull ../troffset-computation
147 $ hg pull ../troffset-computation
144 pulling from ../troffset-computation
148 pulling from ../troffset-computation
145 [255]
149 [255]
146 #else
150 #else
147 $ hg pull ../troffset-computation
151 $ hg pull ../troffset-computation
148 pulling from ../troffset-computation
152 pulling from ../troffset-computation
149 *Killed* (glob)
153 *Killed* (glob)
150 [137]
154 [137]
151 #endif
155 #endif
152
156
153
157
154 The inline revlog still exist, but a split version exist next to it
158 The inline revlog still exist, but a split version exist next to it
155
159
156 $ f -s .hg/store/data/file*
160 $ f -s .hg/store/data/file*
157 .hg/store/data/file.d: size=132139
161 .hg/store/data/file.d: size=132139
158 .hg/store/data/file.i: size=132395
162 .hg/store/data/file.i: size=132395
159 .hg/store/data/file.i.s: size=256
163 .hg/store/data/file.i.s: size=256
160
164
161
165
162 The first file.i entry should match the "Reference size" above.
166 The first file.i entry should match the "Reference size" above.
163 The first file.d entry is the temporary record during the split,
167 The first file.d entry is the temporary record during the split,
164
168
165 A "temporary file" entry exist for the split index.
169 A "temporary file" entry exist for the split index.
166
170
167 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
171 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
168 data/file.i 1174
172 data/file.i 1174
169 data/file.d 0
173 data/file.d 0
170 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
174 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
171 data/file.i data/journal.backup.file.i 0
175 data/file.i data/journal.backup.file.i 0
172 data/file.i.s 0
176 data/file.i.s 0
173
177
174 recover is rolling the split back, the fncache is still valid
178 recover is rolling the split back, the fncache is still valid
175
179
176 $ hg recover
180 $ hg recover
177 rolling back interrupted transaction
181 rolling back interrupted transaction
178 (verify step skipped, run `hg verify` to check your repository content)
182 (verify step skipped, run `hg verify` to check your repository content)
179 $ f -s .hg/store/data/file*
183 $ f -s .hg/store/data/file*
180 .hg/store/data/file.i: size=1174
184 .hg/store/data/file.i: size=1174
181 $ hg tip
185 $ hg tip
182 changeset: 1:272bd31be9b8
186 changeset: 1:cc8dfb126534
183 tag: tip
187 tag: tip
184 user: test
188 user: test
185 date: Thu Jan 01 00:00:00 1970 +0000
189 date: Thu Jan 01 00:00:00 1970 +0000
186 summary: b
190 summary: b
187
191
188 $ hg verify -q
192 $ hg verify -q
189 $ hg debugrebuildfncache --only-data
193 $ hg debugrebuildfncache --only-data
190 fncache already up to date
194 fncache already up to date
191 $ hg verify -q
195 $ hg verify -q
192 $ cd ..
196 $ cd ..
193
197
194 Test a hard crash right before the index is move into place
198 Test a hard crash right before the index is move into place
195 ===========================================================
199 ===========================================================
196
200
197 Now retry the procedure but intercept the rename of the index and check that
201 Now retry the procedure but intercept the rename of the index and check that
198 the journal does not contain the new index size. This demonstrates the edge case
202 the journal does not contain the new index size. This demonstrates the edge case
199 where the data file is left as garbage.
203 where the data file is left as garbage.
200
204
201 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
205 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
202 $ cd troffset-computation-copy2
206 $ cd troffset-computation-copy2
203
207
204 Reference size:
208 Reference size:
205 $ f -s file
209 $ f -s file
206 file: size=1024
210 file: size=1024
207 $ f -s .hg/store/data/file*
211 $ f -s .hg/store/data/file*
208 .hg/store/data/file.i: size=1174
212 .hg/store/data/file.i: size=1174
209
213
210 $ cat > .hg/hgrc <<EOF
214 $ cat > .hg/hgrc <<EOF
211 > [extensions]
215 > [extensions]
212 > intercept_rename = $TESTTMP/intercept_before_rename.py
216 > intercept_rename = $TESTTMP/intercept_before_rename.py
213 > EOF
217 > EOF
214 #if chg
218 #if chg
215 $ hg pull ../troffset-computation
219 $ hg pull ../troffset-computation
216 pulling from ../troffset-computation
220 pulling from ../troffset-computation
217 searching for changes
221 searching for changes
218 adding changesets
222 adding changesets
219 adding manifests
223 adding manifests
220 adding file changes
224 adding file changes
221 [255]
225 [255]
222 #else
226 #else
223 $ hg pull ../troffset-computation
227 $ hg pull ../troffset-computation
224 pulling from ../troffset-computation
228 pulling from ../troffset-computation
225 searching for changes
229 searching for changes
226 adding changesets
230 adding changesets
227 adding manifests
231 adding manifests
228 adding file changes
232 adding file changes
229 *Killed* (glob)
233 *Killed* (glob)
230 [137]
234 [137]
231 #endif
235 #endif
232
236
233 The inline revlog still exist, but a split version exist next to it
237 The inline revlog still exist, but a split version exist next to it
234
238
235 $ f -s .hg/store/data/file*
239 $ f -s .hg/store/data/file*
236 .hg/store/data/file.d: size=132139
240 .hg/store/data/file.d: size=132139
237 .hg/store/data/file.i: size=132395
241 .hg/store/data/file.i: size=132395
238 .hg/store/data/file.i.s: size=256
242 .hg/store/data/file.i.s: size=256
239
243
240 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
244 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
241 data/file.i 1174
245 data/file.i 1174
242 data/file.d 0
246 data/file.d 0
243
247
244 recover is rolling the split back, the fncache is still valid
248 recover is rolling the split back, the fncache is still valid
245
249
246 $ hg recover
250 $ hg recover
247 rolling back interrupted transaction
251 rolling back interrupted transaction
248 (verify step skipped, run `hg verify` to check your repository content)
252 (verify step skipped, run `hg verify` to check your repository content)
249 $ f -s .hg/store/data/file*
253 $ f -s .hg/store/data/file*
250 .hg/store/data/file.i: size=1174
254 .hg/store/data/file.i: size=1174
251 $ hg tip
255 $ hg tip
252 changeset: 1:272bd31be9b8
256 changeset: 1:cc8dfb126534
253 tag: tip
257 tag: tip
254 user: test
258 user: test
255 date: Thu Jan 01 00:00:00 1970 +0000
259 date: Thu Jan 01 00:00:00 1970 +0000
256 summary: b
260 summary: b
257
261
258 $ hg verify -q
262 $ hg verify -q
259 $ cd ..
263 $ cd ..
260
264
261 Test a hard crash right after the index is move into place
265 Test a hard crash right after the index is move into place
262 ===========================================================
266 ===========================================================
263
267
264 Now retry the procedure but intercept the rename of the index.
268 Now retry the procedure but intercept the rename of the index.
265
269
266 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
270 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
267 $ cd troffset-computation-crash-after-rename
271 $ cd troffset-computation-crash-after-rename
268
272
269 Reference size:
273 Reference size:
270 $ f -s file
274 $ f -s file
271 file: size=1024
275 file: size=1024
272 $ f -s .hg/store/data/file*
276 $ f -s .hg/store/data/file*
273 .hg/store/data/file.i: size=1174
277 .hg/store/data/file.i: size=1174
274
278
275 $ cat > .hg/hgrc <<EOF
279 $ cat > .hg/hgrc <<EOF
276 > [extensions]
280 > [extensions]
277 > intercept_rename = $TESTTMP/intercept_after_rename.py
281 > intercept_rename = $TESTTMP/intercept_after_rename.py
278 > EOF
282 > EOF
279 #if chg
283 #if chg
280 $ hg pull ../troffset-computation
284 $ hg pull ../troffset-computation
281 pulling from ../troffset-computation
285 pulling from ../troffset-computation
282 searching for changes
286 searching for changes
283 adding changesets
287 adding changesets
284 adding manifests
288 adding manifests
285 adding file changes
289 adding file changes
286 [255]
290 [255]
287 #else
291 #else
288 $ hg pull ../troffset-computation
292 $ hg pull ../troffset-computation
289 pulling from ../troffset-computation
293 pulling from ../troffset-computation
290 searching for changes
294 searching for changes
291 adding changesets
295 adding changesets
292 adding manifests
296 adding manifests
293 adding file changes
297 adding file changes
294 *Killed* (glob)
298 *Killed* (glob)
295 [137]
299 [137]
296 #endif
300 #endif
297
301
298 The inline revlog was over written on disk
302 The inline revlog was over written on disk
299
303
300 $ f -s .hg/store/data/file*
304 $ f -s .hg/store/data/file*
301 .hg/store/data/file.d: size=132139
305 .hg/store/data/file.d: size=132139
302 .hg/store/data/file.i: size=256
306 .hg/store/data/file.i: size=256
303
307
304 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
308 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
305 data/file.i 1174
309 data/file.i 1174
306 data/file.d 0
310 data/file.d 0
307
311
308 recover is rolling the split back, the fncache is still valid
312 recover is rolling the split back, the fncache is still valid
309
313
310 $ hg recover
314 $ hg recover
311 rolling back interrupted transaction
315 rolling back interrupted transaction
312 (verify step skipped, run `hg verify` to check your repository content)
316 (verify step skipped, run `hg verify` to check your repository content)
313 $ f -s .hg/store/data/file*
317 $ f -s .hg/store/data/file*
314 .hg/store/data/file.i: size=1174
318 .hg/store/data/file.i: size=1174
315 $ hg tip
319 $ hg tip
316 changeset: 1:272bd31be9b8
320 changeset: 1:cc8dfb126534
317 tag: tip
321 tag: tip
318 user: test
322 user: test
319 date: Thu Jan 01 00:00:00 1970 +0000
323 date: Thu Jan 01 00:00:00 1970 +0000
320 summary: b
324 summary: b
321
325
322 $ hg verify -q
326 $ hg verify -q
323 $ cd ..
327 $ cd ..
324
328
325 Have the transaction rollback itself without any hard crash
329 Have the transaction rollback itself without any hard crash
326 ===========================================================
330 ===========================================================
327
331
328
332
329 Repeat the original test but let hg rollback the transaction.
333 Repeat the original test but let hg rollback the transaction.
330
334
331 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
335 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
332 $ cd troffset-computation-copy-rb
336 $ cd troffset-computation-copy-rb
333 $ cat > .hg/hgrc <<EOF
337 $ cat > .hg/hgrc <<EOF
334 > [hooks]
338 > [hooks]
335 > pretxnchangegroup = false
339 > pretxnchangegroup = false
336 > EOF
340 > EOF
337 $ hg pull ../troffset-computation
341 $ hg pull ../troffset-computation
338 pulling from ../troffset-computation
342 pulling from ../troffset-computation
339 searching for changes
343 searching for changes
340 adding changesets
344 adding changesets
341 adding manifests
345 adding manifests
342 adding file changes
346 adding file changes
343 transaction abort!
347 transaction abort!
344 rollback completed
348 rollback completed
345 abort: pretxnchangegroup hook exited with status 1
349 abort: pretxnchangegroup hook exited with status 1
346 [40]
350 [40]
347
351
348 The split was rollback
352 The split was rollback
349
353
350 $ f -s .hg/store/data/file*
354 $ f -s .hg/store/data/file*
351 .hg/store/data/file.d: size=0
355 .hg/store/data/file.d: size=0
352 .hg/store/data/file.i: size=1174
356 .hg/store/data/file.i: size=1174
353
357
354
358
355 $ hg tip
359 $ hg tip
356 changeset: 1:272bd31be9b8
360 changeset: 1:cc8dfb126534
357 tag: tip
361 tag: tip
358 user: test
362 user: test
359 date: Thu Jan 01 00:00:00 1970 +0000
363 date: Thu Jan 01 00:00:00 1970 +0000
360 summary: b
364 summary: b
361
365
362 $ hg verify -q
366 $ hg verify -q
363 $ cd ..
367 $ cd ..
364
368
365 Read race
369 Read race
366 =========
370 =========
367
371
368 We check that a client that started reading a revlog (its index) after the
372 We check that a client that started reading a revlog (its index) after the
369 split and end reading (the data) after the rollback should be fine
373 split and end reading (the data) after the rollback should be fine
370
374
371 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
375 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
372 $ cd troffset-computation-race
376 $ cd troffset-computation-race
373 $ cat > .hg/hgrc <<EOF
377 $ cat > .hg/hgrc <<EOF
374 > [hooks]
378 > [hooks]
375 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
379 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
376 > pretxnclose = false
380 > pretxnclose = false
377 > EOF
381 > EOF
378
382
379 start a reader
383 start a reader
380
384
381 $ hg cat --rev 0 file \
385 $ hg cat --rev 0 file \
382 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
386 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
383 > 2> $TESTTMP/reader.stderr \
387 > 2> $TESTTMP/reader.stderr \
384 > > $TESTTMP/reader.stdout &
388 > > $TESTTMP/reader.stdout &
385
389
386 Do a failed pull in //
390 Do a failed pull in //
387
391
388 $ hg pull ../troffset-computation
392 $ hg pull ../troffset-computation
389 pulling from ../troffset-computation
393 pulling from ../troffset-computation
390 searching for changes
394 searching for changes
391 adding changesets
395 adding changesets
392 adding manifests
396 adding manifests
393 adding file changes
397 adding file changes
394 transaction abort!
398 transaction abort!
395 rollback completed
399 rollback completed
396 abort: pretxnclose hook exited with status 1
400 abort: pretxnclose hook exited with status 1
397 [40]
401 [40]
398 $ touch $TESTTMP/writer-revlog-unsplit
402 $ touch $TESTTMP/writer-revlog-unsplit
399 $ wait
403 $ wait
400
404
401 The reader should be fine
405 The reader should be fine
402 $ cat $TESTTMP/reader.stderr
406 $ cat $TESTTMP/reader.stderr
403 $ cat $TESTTMP/reader.stdout
407 $ cat $TESTTMP/reader.stdout
404 1 (no-eol)
408 1 (no-eol)
405 $ cd ..
409 $ cd ..
406
410
407 pending hooks
411 pending hooks
408 =============
412 =============
409
413
410 We checks that hooks properly see the inside of the transaction, while other process don't.
414 We checks that hooks properly see the inside of the transaction, while other process don't.
411
415
412 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
416 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
413 $ cd troffset-computation-hooks
417 $ cd troffset-computation-hooks
414 $ cat > .hg/hgrc <<EOF
418 $ cat > .hg/hgrc <<EOF
415 > [hooks]
419 > [hooks]
416 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
420 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
417 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
421 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
418 > pretxnclose.03-abort = false
422 > pretxnclose.03-abort = false
419 > EOF
423 > EOF
420
424
421 $ (
425 $ (
422 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
426 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
423 > hg cat -r 'max(all())' file | f --size;\
427 > hg cat -r 'max(all())' file | f --size;\
424 > touch $TESTTMP/hook-done
428 > touch $TESTTMP/hook-done
425 > ) >stdout 2>stderr &
429 > ) >stdout 2>stderr &
426
430
427 $ hg pull ../troffset-computation
431 $ hg pull ../troffset-computation
428 pulling from ../troffset-computation
432 pulling from ../troffset-computation
429 searching for changes
433 searching for changes
430 adding changesets
434 adding changesets
431 adding manifests
435 adding manifests
432 adding file changes
436 adding file changes
433 size=131072
437 size=131072
434 transaction abort!
438 transaction abort!
435 rollback completed
439 rollback completed
436 abort: pretxnclose.03-abort hook exited with status 1
440 abort: pretxnclose.03-abort hook exited with status 1
437 [40]
441 [40]
438
442
439 $ cat stdout
443 $ cat stdout
440 size=1024
444 size=1024
441 $ cat stderr
445 $ cat stderr
442
446
443
447
444 $ cd ..
448 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now