##// END OF EJS Templates
revlog: fix a bug in revlog splitting...
Arseniy Alekseyev -
r51535:05d429fe stable
parent child Browse files
Show More
@@ -1,960 +1,965 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 """rollback a transaction :
108 """rollback a transaction :
109 - truncate files that have been appended to
109 - truncate files that have been appended to
110 - restore file backups
110 - restore file backups
111 - delete temporary files
111 - delete temporary files
112 """
112 """
113 backupfiles = []
113 backupfiles = []
114
114
115 def restore_one_backup(vfs, f, b, checkambig):
115 def restore_one_backup(vfs, f, b, checkambig):
116 filepath = vfs.join(f)
116 filepath = vfs.join(f)
117 backuppath = vfs.join(b)
117 backuppath = vfs.join(b)
118 try:
118 try:
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 backupfiles.append((vfs, b))
120 backupfiles.append((vfs, b))
121 except IOError as exc:
121 except IOError as exc:
122 e_msg = stringutil.forcebytestr(exc)
122 e_msg = stringutil.forcebytestr(exc)
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 raise
124 raise
125
125
126 # gather all backup files that impact the store
126 # gather all backup files that impact the store
127 # (we need this to detect files that are both backed up and truncated)
127 # (we need this to detect files that are both backed up and truncated)
128 store_backup = {}
128 store_backup = {}
129 for entry in backupentries:
129 for entry in backupentries:
130 location, file_path, backup_path, cache = entry
130 location, file_path, backup_path, cache = entry
131 vfs = vfsmap[location]
131 vfs = vfsmap[location]
132 is_store = vfs.join(b'') == opener.join(b'')
132 is_store = vfs.join(b'') == opener.join(b'')
133 if is_store and file_path and backup_path:
133 if is_store and file_path and backup_path:
134 store_backup[file_path] = entry
134 store_backup[file_path] = entry
135 copy_done = set()
135 copy_done = set()
136
136
137 # truncate all file `f` to offset `o`
137 # truncate all file `f` to offset `o`
138 for f, o in sorted(dict(entries).items()):
138 for f, o in sorted(dict(entries).items()):
139 # if we have a backup for `f`, we should restore it first and truncate
139 # if we have a backup for `f`, we should restore it first and truncate
140 # the restored file
140 # the restored file
141 bck_entry = store_backup.get(f)
141 bck_entry = store_backup.get(f)
142 if bck_entry is not None:
142 if bck_entry is not None:
143 location, file_path, backup_path, cache = bck_entry
143 location, file_path, backup_path, cache = bck_entry
144 checkambig = False
144 checkambig = False
145 if checkambigfiles:
145 if checkambigfiles:
146 checkambig = (file_path, location) in checkambigfiles
146 checkambig = (file_path, location) in checkambigfiles
147 restore_one_backup(opener, file_path, backup_path, checkambig)
147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 copy_done.add(bck_entry)
148 copy_done.add(bck_entry)
149 # truncate the file to its pre-transaction size
149 # truncate the file to its pre-transaction size
150 if o or not unlink:
150 if o or not unlink:
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 try:
152 try:
153 fp = opener(f, b'a', checkambig=checkambig)
153 fp = opener(f, b'a', checkambig=checkambig)
154 if fp.tell() < o:
154 if fp.tell() < o:
155 raise error.Abort(
155 raise error.Abort(
156 _(
156 _(
157 b"attempted to truncate %s to %d bytes, but it was "
157 b"attempted to truncate %s to %d bytes, but it was "
158 b"already %d bytes\n"
158 b"already %d bytes\n"
159 )
159 )
160 % (f, o, fp.tell())
160 % (f, o, fp.tell())
161 )
161 )
162 fp.truncate(o)
162 fp.truncate(o)
163 fp.close()
163 fp.close()
164 except IOError:
164 except IOError:
165 report(_(b"failed to truncate %s\n") % f)
165 report(_(b"failed to truncate %s\n") % f)
166 raise
166 raise
167 else:
167 else:
168 # delete empty file
168 # delete empty file
169 try:
169 try:
170 opener.unlink(f)
170 opener.unlink(f)
171 except FileNotFoundError:
171 except FileNotFoundError:
172 pass
172 pass
173 # restore backed up files and clean up temporary files
173 # restore backed up files and clean up temporary files
174 for entry in backupentries:
174 for entry in backupentries:
175 if entry in copy_done:
175 if entry in copy_done:
176 continue
176 continue
177 l, f, b, c = entry
177 l, f, b, c = entry
178 if l not in vfsmap and c:
178 if l not in vfsmap and c:
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 vfs = vfsmap[l]
180 vfs = vfsmap[l]
181 try:
181 try:
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 if f and b:
183 if f and b:
184 restore_one_backup(vfs, f, b, checkambig)
184 restore_one_backup(vfs, f, b, checkambig)
185 else:
185 else:
186 target = f or b
186 target = f or b
187 try:
187 try:
188 vfs.unlink(target)
188 vfs.unlink(target)
189 except FileNotFoundError:
189 except FileNotFoundError:
190 # This is fine because
190 # This is fine because
191 #
191 #
192 # either we are trying to delete the main file, and it is
192 # either we are trying to delete the main file, and it is
193 # already deleted.
193 # already deleted.
194 #
194 #
195 # or we are trying to delete a temporary file and it is
195 # or we are trying to delete a temporary file and it is
196 # already deleted.
196 # already deleted.
197 #
197 #
198 # in both case, our target result (delete the file) is
198 # in both case, our target result (delete the file) is
199 # already achieved.
199 # already achieved.
200 pass
200 pass
201 except (IOError, OSError, error.Abort):
201 except (IOError, OSError, error.Abort):
202 if not c:
202 if not c:
203 raise
203 raise
204
204
205 # cleanup transaction state file and the backups file
205 # cleanup transaction state file and the backups file
206 backuppath = b"%s.backupfiles" % journal
206 backuppath = b"%s.backupfiles" % journal
207 if opener.exists(backuppath):
207 if opener.exists(backuppath):
208 opener.unlink(backuppath)
208 opener.unlink(backuppath)
209 opener.unlink(journal)
209 opener.unlink(journal)
210 try:
210 try:
211 for vfs, f in backupfiles:
211 for vfs, f in backupfiles:
212 if vfs.exists(f):
212 if vfs.exists(f):
213 vfs.unlink(f)
213 vfs.unlink(f)
214 except (IOError, OSError, error.Abort):
214 except (IOError, OSError, error.Abort):
215 # only pure backup file remains, it is sage to ignore any error
215 # only pure backup file remains, it is sage to ignore any error
216 pass
216 pass
217
217
218
218
219 class transaction(util.transactional):
219 class transaction(util.transactional):
220 def __init__(
220 def __init__(
221 self,
221 self,
222 report,
222 report,
223 opener,
223 opener,
224 vfsmap,
224 vfsmap,
225 journalname,
225 journalname,
226 undoname=None,
226 undoname=None,
227 after=None,
227 after=None,
228 createmode=None,
228 createmode=None,
229 validator=None,
229 validator=None,
230 releasefn=None,
230 releasefn=None,
231 checkambigfiles=None,
231 checkambigfiles=None,
232 name='<unnamed>',
232 name='<unnamed>',
233 ):
233 ):
234 """Begin a new transaction
234 """Begin a new transaction
235
235
236 Begins a new transaction that allows rolling back writes in the event of
236 Begins a new transaction that allows rolling back writes in the event of
237 an exception.
237 an exception.
238
238
239 * `after`: called after the transaction has been committed
239 * `after`: called after the transaction has been committed
240 * `createmode`: the mode of the journal file that will be created
240 * `createmode`: the mode of the journal file that will be created
241 * `releasefn`: called after releasing (with transaction and result)
241 * `releasefn`: called after releasing (with transaction and result)
242
242
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 which determine whether file stat ambiguity should be avoided
244 which determine whether file stat ambiguity should be avoided
245 for corresponded files.
245 for corresponded files.
246 """
246 """
247 self._count = 1
247 self._count = 1
248 self._usages = 1
248 self._usages = 1
249 self._report = report
249 self._report = report
250 # a vfs to the store content
250 # a vfs to the store content
251 self._opener = opener
251 self._opener = opener
252 # a map to access file in various {location -> vfs}
252 # a map to access file in various {location -> vfs}
253 vfsmap = vfsmap.copy()
253 vfsmap = vfsmap.copy()
254 vfsmap[b''] = opener # set default value
254 vfsmap[b''] = opener # set default value
255 self._vfsmap = vfsmap
255 self._vfsmap = vfsmap
256 self._after = after
256 self._after = after
257 self._offsetmap = {}
257 self._offsetmap = {}
258 self._newfiles = set()
258 self._newfiles = set()
259 self._journal = journalname
259 self._journal = journalname
260 self._journal_files = []
260 self._journal_files = []
261 self._undoname = undoname
261 self._undoname = undoname
262 self._queue = []
262 self._queue = []
263 # A callback to do something just after releasing transaction.
263 # A callback to do something just after releasing transaction.
264 if releasefn is None:
264 if releasefn is None:
265 releasefn = lambda tr, success: None
265 releasefn = lambda tr, success: None
266 self._releasefn = releasefn
266 self._releasefn = releasefn
267
267
268 self._checkambigfiles = set()
268 self._checkambigfiles = set()
269 if checkambigfiles:
269 if checkambigfiles:
270 self._checkambigfiles.update(checkambigfiles)
270 self._checkambigfiles.update(checkambigfiles)
271
271
272 self._names = [name]
272 self._names = [name]
273
273
274 # A dict dedicated to precisely tracking the changes introduced in the
274 # A dict dedicated to precisely tracking the changes introduced in the
275 # transaction.
275 # transaction.
276 self.changes = {}
276 self.changes = {}
277
277
278 # a dict of arguments to be passed to hooks
278 # a dict of arguments to be passed to hooks
279 self.hookargs = {}
279 self.hookargs = {}
280 self._file = opener.open(self._journal, b"w+")
280 self._file = opener.open(self._journal, b"w+")
281
281
282 # a list of ('location', 'path', 'backuppath', cache) entries.
282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # - if 'backuppath' is empty, no file existed at backup time
283 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'path' is empty, this is a temporary transaction file
284 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'location' is not empty, the path is outside main opener reach.
285 # - if 'location' is not empty, the path is outside main opener reach.
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # (cache is currently unused)
287 # (cache is currently unused)
288 self._backupentries = []
288 self._backupentries = []
289 self._backupmap = {}
289 self._backupmap = {}
290 self._backupjournal = b"%s.backupfiles" % self._journal
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
292 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
294 self._tmp_files = set()
293
295
294 if createmode is not None:
296 if createmode is not None:
295 opener.chmod(self._journal, createmode & 0o666)
297 opener.chmod(self._journal, createmode & 0o666)
296 opener.chmod(self._backupjournal, createmode & 0o666)
298 opener.chmod(self._backupjournal, createmode & 0o666)
297
299
298 # hold file generations to be performed on commit
300 # hold file generations to be performed on commit
299 self._filegenerators = {}
301 self._filegenerators = {}
300 # hold callback to write pending data for hooks
302 # hold callback to write pending data for hooks
301 self._pendingcallback = {}
303 self._pendingcallback = {}
302 # True is any pending data have been written ever
304 # True is any pending data have been written ever
303 self._anypending = False
305 self._anypending = False
304 # holds callback to call when writing the transaction
306 # holds callback to call when writing the transaction
305 self._finalizecallback = {}
307 self._finalizecallback = {}
306 # holds callback to call when validating the transaction
308 # holds callback to call when validating the transaction
307 # should raise exception if anything is wrong
309 # should raise exception if anything is wrong
308 self._validatecallback = {}
310 self._validatecallback = {}
309 if validator is not None:
311 if validator is not None:
310 self._validatecallback[b'001-userhooks'] = validator
312 self._validatecallback[b'001-userhooks'] = validator
311 # hold callback for post transaction close
313 # hold callback for post transaction close
312 self._postclosecallback = {}
314 self._postclosecallback = {}
313 # holds callbacks to call during abort
315 # holds callbacks to call during abort
314 self._abortcallback = {}
316 self._abortcallback = {}
315
317
316 def __repr__(self):
318 def __repr__(self):
317 name = '/'.join(self._names)
319 name = '/'.join(self._names)
318 return '<transaction name=%s, count=%d, usages=%d>' % (
320 return '<transaction name=%s, count=%d, usages=%d>' % (
319 name,
321 name,
320 self._count,
322 self._count,
321 self._usages,
323 self._usages,
322 )
324 )
323
325
324 def __del__(self):
326 def __del__(self):
325 if self._journal:
327 if self._journal:
326 self._abort()
328 self._abort()
327
329
328 @property
330 @property
329 def finalized(self):
331 def finalized(self):
330 return self._finalizecallback is None
332 return self._finalizecallback is None
331
333
332 @active
334 @active
333 def startgroup(self):
335 def startgroup(self):
334 """delay registration of file entry
336 """delay registration of file entry
335
337
336 This is used by strip to delay vision of strip offset. The transaction
338 This is used by strip to delay vision of strip offset. The transaction
337 sees either none or all of the strip actions to be done."""
339 sees either none or all of the strip actions to be done."""
338 self._queue.append([])
340 self._queue.append([])
339
341
340 @active
342 @active
341 def endgroup(self):
343 def endgroup(self):
342 """apply delayed registration of file entry.
344 """apply delayed registration of file entry.
343
345
344 This is used by strip to delay vision of strip offset. The transaction
346 This is used by strip to delay vision of strip offset. The transaction
345 sees either none or all of the strip actions to be done."""
347 sees either none or all of the strip actions to be done."""
346 q = self._queue.pop()
348 q = self._queue.pop()
347 for f, o in q:
349 for f, o in q:
348 self._addentry(f, o)
350 self._addentry(f, o)
349
351
350 @active
352 @active
351 def add(self, file, offset):
353 def add(self, file, offset):
352 """record the state of an append-only file before update"""
354 """record the state of an append-only file before update"""
353 if (
355 if (
354 file in self._newfiles
356 file in self._newfiles
355 or file in self._offsetmap
357 or file in self._offsetmap
356 or file in self._backupmap
358 or file in self._backupmap
359 or file in self._tmp_files
357 ):
360 ):
358 return
361 return
359 if self._queue:
362 if self._queue:
360 self._queue[-1].append((file, offset))
363 self._queue[-1].append((file, offset))
361 return
364 return
362
365
363 self._addentry(file, offset)
366 self._addentry(file, offset)
364
367
365 def _addentry(self, file, offset):
368 def _addentry(self, file, offset):
366 """add a append-only entry to memory and on-disk state"""
369 """add a append-only entry to memory and on-disk state"""
367 if (
370 if (
368 file in self._newfiles
371 file in self._newfiles
369 or file in self._offsetmap
372 or file in self._offsetmap
370 or file in self._backupmap
373 or file in self._backupmap
374 or file in self._tmp_files
371 ):
375 ):
372 return
376 return
373 if offset:
377 if offset:
374 self._offsetmap[file] = offset
378 self._offsetmap[file] = offset
375 else:
379 else:
376 self._newfiles.add(file)
380 self._newfiles.add(file)
377 # add enough data to the journal to do the truncate
381 # add enough data to the journal to do the truncate
378 self._file.write(b"%s\0%d\n" % (file, offset))
382 self._file.write(b"%s\0%d\n" % (file, offset))
379 self._file.flush()
383 self._file.flush()
380
384
381 @active
385 @active
382 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
383 """Adds a backup of the file to the transaction
387 """Adds a backup of the file to the transaction
384
388
385 Calling addbackup() creates a hardlink backup of the specified file
389 Calling addbackup() creates a hardlink backup of the specified file
386 that is used to recover the file in the event of the transaction
390 that is used to recover the file in the event of the transaction
387 aborting.
391 aborting.
388
392
389 * `file`: the file path, relative to .hg/store
393 * `file`: the file path, relative to .hg/store
390 * `hardlink`: use a hardlink to quickly create the backup
394 * `hardlink`: use a hardlink to quickly create the backup
391
395
392 If `for_offset` is set, we expect a offset for this file to have been previously recorded
396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
393 """
397 """
394 if self._queue:
398 if self._queue:
395 msg = b'cannot use transaction.addbackup inside "group"'
399 msg = b'cannot use transaction.addbackup inside "group"'
396 raise error.ProgrammingError(msg)
400 raise error.ProgrammingError(msg)
397
401
398 if file in self._newfiles or file in self._backupmap:
402 if file in self._newfiles or file in self._backupmap:
399 return
403 return
400 elif file in self._offsetmap and not for_offset:
404 elif file in self._offsetmap and not for_offset:
401 return
405 return
402 elif for_offset and file not in self._offsetmap:
406 elif for_offset and file not in self._offsetmap:
403 msg = (
407 msg = (
404 'calling `addbackup` with `for_offmap=True`, '
408 'calling `addbackup` with `for_offmap=True`, '
405 'but no offset recorded: [%r] %r'
409 'but no offset recorded: [%r] %r'
406 )
410 )
407 msg %= (location, file)
411 msg %= (location, file)
408 raise error.ProgrammingError(msg)
412 raise error.ProgrammingError(msg)
409
413
410 vfs = self._vfsmap[location]
414 vfs = self._vfsmap[location]
411 dirname, filename = vfs.split(file)
415 dirname, filename = vfs.split(file)
412 backupfilename = b"%s.backup.%s" % (self._journal, filename)
416 backupfilename = b"%s.backup.%s" % (self._journal, filename)
413 backupfile = vfs.reljoin(dirname, backupfilename)
417 backupfile = vfs.reljoin(dirname, backupfilename)
414 if vfs.exists(file):
418 if vfs.exists(file):
415 filepath = vfs.join(file)
419 filepath = vfs.join(file)
416 backuppath = vfs.join(backupfile)
420 backuppath = vfs.join(backupfile)
417 # store encoding may result in different directory here.
421 # store encoding may result in different directory here.
418 # so we have to ensure the destination directory exist
422 # so we have to ensure the destination directory exist
419 final_dir_name = os.path.dirname(backuppath)
423 final_dir_name = os.path.dirname(backuppath)
420 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
421 # then we can copy the backup
425 # then we can copy the backup
422 util.copyfile(filepath, backuppath, hardlink=hardlink)
426 util.copyfile(filepath, backuppath, hardlink=hardlink)
423 else:
427 else:
424 backupfile = b''
428 backupfile = b''
425
429
426 self._addbackupentry((location, file, backupfile, False))
430 self._addbackupentry((location, file, backupfile, False))
427
431
428 def _addbackupentry(self, entry):
432 def _addbackupentry(self, entry):
429 """register a new backup entry and write it to disk"""
433 """register a new backup entry and write it to disk"""
430 self._backupentries.append(entry)
434 self._backupentries.append(entry)
431 self._backupmap[entry[1]] = len(self._backupentries) - 1
435 self._backupmap[entry[1]] = len(self._backupentries) - 1
432 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
433 self._backupsfile.flush()
437 self._backupsfile.flush()
434
438
435 @active
439 @active
436 def registertmp(self, tmpfile, location=b''):
440 def registertmp(self, tmpfile, location=b''):
437 """register a temporary transaction file
441 """register a temporary transaction file
438
442
439 Such files will be deleted when the transaction exits (on both
443 Such files will be deleted when the transaction exits (on both
440 failure and success).
444 failure and success).
441 """
445 """
446 self._tmp_files.add(tmpfile)
442 self._addbackupentry((location, b'', tmpfile, False))
447 self._addbackupentry((location, b'', tmpfile, False))
443
448
444 @active
449 @active
445 def addfilegenerator(
450 def addfilegenerator(
446 self,
451 self,
447 genid,
452 genid,
448 filenames,
453 filenames,
449 genfunc,
454 genfunc,
450 order=0,
455 order=0,
451 location=b'',
456 location=b'',
452 post_finalize=False,
457 post_finalize=False,
453 ):
458 ):
454 """add a function to generates some files at transaction commit
459 """add a function to generates some files at transaction commit
455
460
456 The `genfunc` argument is a function capable of generating proper
461 The `genfunc` argument is a function capable of generating proper
457 content of each entry in the `filename` tuple.
462 content of each entry in the `filename` tuple.
458
463
459 At transaction close time, `genfunc` will be called with one file
464 At transaction close time, `genfunc` will be called with one file
460 object argument per entries in `filenames`.
465 object argument per entries in `filenames`.
461
466
462 The transaction itself is responsible for the backup, creation and
467 The transaction itself is responsible for the backup, creation and
463 final write of such file.
468 final write of such file.
464
469
465 The `genid` argument is used to ensure the same set of file is only
470 The `genid` argument is used to ensure the same set of file is only
466 generated once. Call to `addfilegenerator` for a `genid` already
471 generated once. Call to `addfilegenerator` for a `genid` already
467 present will overwrite the old entry.
472 present will overwrite the old entry.
468
473
469 The `order` argument may be used to control the order in which multiple
474 The `order` argument may be used to control the order in which multiple
470 generator will be executed.
475 generator will be executed.
471
476
472 The `location` arguments may be used to indicate the files are located
477 The `location` arguments may be used to indicate the files are located
473 outside of the the standard directory for transaction. It should match
478 outside of the the standard directory for transaction. It should match
474 one of the key of the `transaction.vfsmap` dictionary.
479 one of the key of the `transaction.vfsmap` dictionary.
475
480
476 The `post_finalize` argument can be set to `True` for file generation
481 The `post_finalize` argument can be set to `True` for file generation
477 that must be run after the transaction has been finalized.
482 that must be run after the transaction has been finalized.
478 """
483 """
479 # For now, we are unable to do proper backup and restore of custom vfs
484 # For now, we are unable to do proper backup and restore of custom vfs
480 # but for bookmarks that are handled outside this mechanism.
485 # but for bookmarks that are handled outside this mechanism.
481 entry = (order, filenames, genfunc, location, post_finalize)
486 entry = (order, filenames, genfunc, location, post_finalize)
482 self._filegenerators[genid] = entry
487 self._filegenerators[genid] = entry
483
488
484 @active
489 @active
485 def removefilegenerator(self, genid):
490 def removefilegenerator(self, genid):
486 """reverse of addfilegenerator, remove a file generator function"""
491 """reverse of addfilegenerator, remove a file generator function"""
487 if genid in self._filegenerators:
492 if genid in self._filegenerators:
488 del self._filegenerators[genid]
493 del self._filegenerators[genid]
489
494
490 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
491 # write files registered for generation
496 # write files registered for generation
492 any = False
497 any = False
493
498
494 if group == GEN_GROUP_ALL:
499 if group == GEN_GROUP_ALL:
495 skip_post = skip_pre = False
500 skip_post = skip_pre = False
496 else:
501 else:
497 skip_pre = group == GEN_GROUP_POST_FINALIZE
502 skip_pre = group == GEN_GROUP_POST_FINALIZE
498 skip_post = group == GEN_GROUP_PRE_FINALIZE
503 skip_post = group == GEN_GROUP_PRE_FINALIZE
499
504
500 for id, entry in sorted(self._filegenerators.items()):
505 for id, entry in sorted(self._filegenerators.items()):
501 any = True
506 any = True
502 order, filenames, genfunc, location, post_finalize = entry
507 order, filenames, genfunc, location, post_finalize = entry
503
508
504 # for generation at closing, check if it's before or after finalize
509 # for generation at closing, check if it's before or after finalize
505 if skip_post and post_finalize:
510 if skip_post and post_finalize:
506 continue
511 continue
507 elif skip_pre and not post_finalize:
512 elif skip_pre and not post_finalize:
508 continue
513 continue
509
514
510 vfs = self._vfsmap[location]
515 vfs = self._vfsmap[location]
511 files = []
516 files = []
512 try:
517 try:
513 for name in filenames:
518 for name in filenames:
514 name += suffix
519 name += suffix
515 if suffix:
520 if suffix:
516 self.registertmp(name, location=location)
521 self.registertmp(name, location=location)
517 checkambig = False
522 checkambig = False
518 else:
523 else:
519 self.addbackup(name, location=location)
524 self.addbackup(name, location=location)
520 checkambig = (name, location) in self._checkambigfiles
525 checkambig = (name, location) in self._checkambigfiles
521 files.append(
526 files.append(
522 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
523 )
528 )
524 genfunc(*files)
529 genfunc(*files)
525 for f in files:
530 for f in files:
526 f.close()
531 f.close()
527 # skip discard() loop since we're sure no open file remains
532 # skip discard() loop since we're sure no open file remains
528 del files[:]
533 del files[:]
529 finally:
534 finally:
530 for f in files:
535 for f in files:
531 f.discard()
536 f.discard()
532 return any
537 return any
533
538
534 @active
539 @active
535 def findoffset(self, file):
540 def findoffset(self, file):
536 if file in self._newfiles:
541 if file in self._newfiles:
537 return 0
542 return 0
538 return self._offsetmap.get(file)
543 return self._offsetmap.get(file)
539
544
540 @active
545 @active
541 def readjournal(self):
546 def readjournal(self):
542 self._file.seek(0)
547 self._file.seek(0)
543 entries = []
548 entries = []
544 for l in self._file.readlines():
549 for l in self._file.readlines():
545 file, troffset = l.split(b'\0')
550 file, troffset = l.split(b'\0')
546 entries.append((file, int(troffset)))
551 entries.append((file, int(troffset)))
547 return entries
552 return entries
548
553
549 @active
554 @active
550 def replace(self, file, offset):
555 def replace(self, file, offset):
551 """
556 """
552 replace can only replace already committed entries
557 replace can only replace already committed entries
553 that are not pending in the queue
558 that are not pending in the queue
554 """
559 """
555 if file in self._newfiles:
560 if file in self._newfiles:
556 if not offset:
561 if not offset:
557 return
562 return
558 self._newfiles.remove(file)
563 self._newfiles.remove(file)
559 self._offsetmap[file] = offset
564 self._offsetmap[file] = offset
560 elif file in self._offsetmap:
565 elif file in self._offsetmap:
561 if not offset:
566 if not offset:
562 del self._offsetmap[file]
567 del self._offsetmap[file]
563 self._newfiles.add(file)
568 self._newfiles.add(file)
564 else:
569 else:
565 self._offsetmap[file] = offset
570 self._offsetmap[file] = offset
566 else:
571 else:
567 raise KeyError(file)
572 raise KeyError(file)
568 self._file.write(b"%s\0%d\n" % (file, offset))
573 self._file.write(b"%s\0%d\n" % (file, offset))
569 self._file.flush()
574 self._file.flush()
570
575
571 @active
576 @active
572 def nest(self, name='<unnamed>'):
577 def nest(self, name='<unnamed>'):
573 self._count += 1
578 self._count += 1
574 self._usages += 1
579 self._usages += 1
575 self._names.append(name)
580 self._names.append(name)
576 return self
581 return self
577
582
578 def release(self):
583 def release(self):
579 if self._count > 0:
584 if self._count > 0:
580 self._usages -= 1
585 self._usages -= 1
581 if self._names:
586 if self._names:
582 self._names.pop()
587 self._names.pop()
583 # if the transaction scopes are left without being closed, fail
588 # if the transaction scopes are left without being closed, fail
584 if self._count > 0 and self._usages == 0:
589 if self._count > 0 and self._usages == 0:
585 self._abort()
590 self._abort()
586
591
587 def running(self):
592 def running(self):
588 return self._count > 0
593 return self._count > 0
589
594
590 def addpending(self, category, callback):
595 def addpending(self, category, callback):
591 """add a callback to be called when the transaction is pending
596 """add a callback to be called when the transaction is pending
592
597
593 The transaction will be given as callback's first argument.
598 The transaction will be given as callback's first argument.
594
599
595 Category is a unique identifier to allow overwriting an old callback
600 Category is a unique identifier to allow overwriting an old callback
596 with a newer callback.
601 with a newer callback.
597 """
602 """
598 self._pendingcallback[category] = callback
603 self._pendingcallback[category] = callback
599
604
600 @active
605 @active
601 def writepending(self):
606 def writepending(self):
602 """write pending file to temporary version
607 """write pending file to temporary version
603
608
604 This is used to allow hooks to view a transaction before commit"""
609 This is used to allow hooks to view a transaction before commit"""
605 categories = sorted(self._pendingcallback)
610 categories = sorted(self._pendingcallback)
606 for cat in categories:
611 for cat in categories:
607 # remove callback since the data will have been flushed
612 # remove callback since the data will have been flushed
608 any = self._pendingcallback.pop(cat)(self)
613 any = self._pendingcallback.pop(cat)(self)
609 self._anypending = self._anypending or any
614 self._anypending = self._anypending or any
610 self._anypending |= self._generatefiles(suffix=b'.pending')
615 self._anypending |= self._generatefiles(suffix=b'.pending')
611 return self._anypending
616 return self._anypending
612
617
613 @active
618 @active
614 def hasfinalize(self, category):
619 def hasfinalize(self, category):
615 """check is a callback already exist for a category"""
620 """check is a callback already exist for a category"""
616 return category in self._finalizecallback
621 return category in self._finalizecallback
617
622
618 @active
623 @active
619 def addfinalize(self, category, callback):
624 def addfinalize(self, category, callback):
620 """add a callback to be called when the transaction is closed
625 """add a callback to be called when the transaction is closed
621
626
622 The transaction will be given as callback's first argument.
627 The transaction will be given as callback's first argument.
623
628
624 Category is a unique identifier to allow overwriting old callbacks with
629 Category is a unique identifier to allow overwriting old callbacks with
625 newer callbacks.
630 newer callbacks.
626 """
631 """
627 self._finalizecallback[category] = callback
632 self._finalizecallback[category] = callback
628
633
629 @active
634 @active
630 def addpostclose(self, category, callback):
635 def addpostclose(self, category, callback):
631 """add or replace a callback to be called after the transaction closed
636 """add or replace a callback to be called after the transaction closed
632
637
633 The transaction will be given as callback's first argument.
638 The transaction will be given as callback's first argument.
634
639
635 Category is a unique identifier to allow overwriting an old callback
640 Category is a unique identifier to allow overwriting an old callback
636 with a newer callback.
641 with a newer callback.
637 """
642 """
638 self._postclosecallback[category] = callback
643 self._postclosecallback[category] = callback
639
644
640 @active
645 @active
641 def getpostclose(self, category):
646 def getpostclose(self, category):
642 """return a postclose callback added before, or None"""
647 """return a postclose callback added before, or None"""
643 return self._postclosecallback.get(category, None)
648 return self._postclosecallback.get(category, None)
644
649
645 @active
650 @active
646 def addabort(self, category, callback):
651 def addabort(self, category, callback):
647 """add a callback to be called when the transaction is aborted.
652 """add a callback to be called when the transaction is aborted.
648
653
649 The transaction will be given as the first argument to the callback.
654 The transaction will be given as the first argument to the callback.
650
655
651 Category is a unique identifier to allow overwriting an old callback
656 Category is a unique identifier to allow overwriting an old callback
652 with a newer callback.
657 with a newer callback.
653 """
658 """
654 self._abortcallback[category] = callback
659 self._abortcallback[category] = callback
655
660
656 @active
661 @active
657 def addvalidator(self, category, callback):
662 def addvalidator(self, category, callback):
658 """adds a callback to be called when validating the transaction.
663 """adds a callback to be called when validating the transaction.
659
664
660 The transaction will be given as the first argument to the callback.
665 The transaction will be given as the first argument to the callback.
661
666
662 callback should raise exception if to abort transaction"""
667 callback should raise exception if to abort transaction"""
663 self._validatecallback[category] = callback
668 self._validatecallback[category] = callback
664
669
665 @active
670 @active
666 def close(self):
671 def close(self):
667 '''commit the transaction'''
672 '''commit the transaction'''
668 if self._count == 1:
673 if self._count == 1:
669 for category in sorted(self._validatecallback):
674 for category in sorted(self._validatecallback):
670 self._validatecallback[category](self)
675 self._validatecallback[category](self)
671 self._validatecallback = None # Help prevent cycles.
676 self._validatecallback = None # Help prevent cycles.
672 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
673 while self._finalizecallback:
678 while self._finalizecallback:
674 callbacks = self._finalizecallback
679 callbacks = self._finalizecallback
675 self._finalizecallback = {}
680 self._finalizecallback = {}
676 categories = sorted(callbacks)
681 categories = sorted(callbacks)
677 for cat in categories:
682 for cat in categories:
678 callbacks[cat](self)
683 callbacks[cat](self)
679 # Prevent double usage and help clear cycles.
684 # Prevent double usage and help clear cycles.
680 self._finalizecallback = None
685 self._finalizecallback = None
681 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
682
687
683 self._count -= 1
688 self._count -= 1
684 if self._count != 0:
689 if self._count != 0:
685 return
690 return
686 self._file.close()
691 self._file.close()
687 self._backupsfile.close()
692 self._backupsfile.close()
688 # cleanup temporary files
693 # cleanup temporary files
689 for l, f, b, c in self._backupentries:
694 for l, f, b, c in self._backupentries:
690 if l not in self._vfsmap and c:
695 if l not in self._vfsmap and c:
691 self._report(
696 self._report(
692 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
693 )
698 )
694 continue
699 continue
695 vfs = self._vfsmap[l]
700 vfs = self._vfsmap[l]
696 if not f and b and vfs.exists(b):
701 if not f and b and vfs.exists(b):
697 try:
702 try:
698 vfs.unlink(b)
703 vfs.unlink(b)
699 except (IOError, OSError, error.Abort) as inst:
704 except (IOError, OSError, error.Abort) as inst:
700 if not c:
705 if not c:
701 raise
706 raise
702 # Abort may be raise by read only opener
707 # Abort may be raise by read only opener
703 self._report(
708 self._report(
704 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
705 )
710 )
706 self._offsetmap = {}
711 self._offsetmap = {}
707 self._newfiles = set()
712 self._newfiles = set()
708 self._writeundo()
713 self._writeundo()
709 if self._after:
714 if self._after:
710 self._after()
715 self._after()
711 self._after = None # Help prevent cycles.
716 self._after = None # Help prevent cycles.
712 if self._opener.isfile(self._backupjournal):
717 if self._opener.isfile(self._backupjournal):
713 self._opener.unlink(self._backupjournal)
718 self._opener.unlink(self._backupjournal)
714 if self._opener.isfile(self._journal):
719 if self._opener.isfile(self._journal):
715 self._opener.unlink(self._journal)
720 self._opener.unlink(self._journal)
716 for l, _f, b, c in self._backupentries:
721 for l, _f, b, c in self._backupentries:
717 if l not in self._vfsmap and c:
722 if l not in self._vfsmap and c:
718 self._report(
723 self._report(
719 b"couldn't remove %s: unknown cache location"
724 b"couldn't remove %s: unknown cache location"
720 b"%s\n" % (b, l)
725 b"%s\n" % (b, l)
721 )
726 )
722 continue
727 continue
723 vfs = self._vfsmap[l]
728 vfs = self._vfsmap[l]
724 if b and vfs.exists(b):
729 if b and vfs.exists(b):
725 try:
730 try:
726 vfs.unlink(b)
731 vfs.unlink(b)
727 except (IOError, OSError, error.Abort) as inst:
732 except (IOError, OSError, error.Abort) as inst:
728 if not c:
733 if not c:
729 raise
734 raise
730 # Abort may be raise by read only opener
735 # Abort may be raise by read only opener
731 self._report(
736 self._report(
732 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
733 )
738 )
734 self._backupentries = []
739 self._backupentries = []
735 self._journal = None
740 self._journal = None
736
741
737 self._releasefn(self, True) # notify success of closing transaction
742 self._releasefn(self, True) # notify success of closing transaction
738 self._releasefn = None # Help prevent cycles.
743 self._releasefn = None # Help prevent cycles.
739
744
740 # run post close action
745 # run post close action
741 categories = sorted(self._postclosecallback)
746 categories = sorted(self._postclosecallback)
742 for cat in categories:
747 for cat in categories:
743 self._postclosecallback[cat](self)
748 self._postclosecallback[cat](self)
744 # Prevent double usage and help clear cycles.
749 # Prevent double usage and help clear cycles.
745 self._postclosecallback = None
750 self._postclosecallback = None
746
751
747 @active
752 @active
748 def abort(self):
753 def abort(self):
749 """abort the transaction (generally called on error, or when the
754 """abort the transaction (generally called on error, or when the
750 transaction is not explicitly committed before going out of
755 transaction is not explicitly committed before going out of
751 scope)"""
756 scope)"""
752 self._abort()
757 self._abort()
753
758
754 @active
759 @active
755 def add_journal(self, vfs_id, path):
760 def add_journal(self, vfs_id, path):
756 self._journal_files.append((vfs_id, path))
761 self._journal_files.append((vfs_id, path))
757
762
758 def _writeundo(self):
763 def _writeundo(self):
759 """write transaction data for possible future undo call"""
764 """write transaction data for possible future undo call"""
760 if self._undoname is None:
765 if self._undoname is None:
761 return
766 return
762 cleanup_undo_files(
767 cleanup_undo_files(
763 self._report,
768 self._report,
764 self._vfsmap,
769 self._vfsmap,
765 undo_prefix=self._undoname,
770 undo_prefix=self._undoname,
766 )
771 )
767
772
768 def undoname(fn: bytes) -> bytes:
773 def undoname(fn: bytes) -> bytes:
769 base, name = os.path.split(fn)
774 base, name = os.path.split(fn)
770 assert name.startswith(self._journal)
775 assert name.startswith(self._journal)
771 new_name = name.replace(self._journal, self._undoname, 1)
776 new_name = name.replace(self._journal, self._undoname, 1)
772 return os.path.join(base, new_name)
777 return os.path.join(base, new_name)
773
778
774 undo_backup_path = b"%s.backupfiles" % self._undoname
779 undo_backup_path = b"%s.backupfiles" % self._undoname
775 undobackupfile = self._opener.open(undo_backup_path, b'w')
780 undobackupfile = self._opener.open(undo_backup_path, b'w')
776 undobackupfile.write(b'%d\n' % version)
781 undobackupfile.write(b'%d\n' % version)
777 for l, f, b, c in self._backupentries:
782 for l, f, b, c in self._backupentries:
778 if not f: # temporary file
783 if not f: # temporary file
779 continue
784 continue
780 if not b:
785 if not b:
781 u = b''
786 u = b''
782 else:
787 else:
783 if l not in self._vfsmap and c:
788 if l not in self._vfsmap and c:
784 self._report(
789 self._report(
785 b"couldn't remove %s: unknown cache location"
790 b"couldn't remove %s: unknown cache location"
786 b"%s\n" % (b, l)
791 b"%s\n" % (b, l)
787 )
792 )
788 continue
793 continue
789 vfs = self._vfsmap[l]
794 vfs = self._vfsmap[l]
790 u = undoname(b)
795 u = undoname(b)
791 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
792 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
793 undobackupfile.close()
798 undobackupfile.close()
794 for vfs, src in self._journal_files:
799 for vfs, src in self._journal_files:
795 dest = undoname(src)
800 dest = undoname(src)
796 # if src and dest refer to a same file, vfs.rename is a no-op,
801 # if src and dest refer to a same file, vfs.rename is a no-op,
797 # leaving both src and dest on disk. delete dest to make sure
802 # leaving both src and dest on disk. delete dest to make sure
798 # the rename couldn't be such a no-op.
803 # the rename couldn't be such a no-op.
799 vfs.tryunlink(dest)
804 vfs.tryunlink(dest)
800 try:
805 try:
801 vfs.rename(src, dest)
806 vfs.rename(src, dest)
802 except FileNotFoundError: # journal file does not yet exist
807 except FileNotFoundError: # journal file does not yet exist
803 pass
808 pass
804
809
805 def _abort(self):
810 def _abort(self):
806 entries = self.readjournal()
811 entries = self.readjournal()
807 self._count = 0
812 self._count = 0
808 self._usages = 0
813 self._usages = 0
809 self._file.close()
814 self._file.close()
810 self._backupsfile.close()
815 self._backupsfile.close()
811
816
812 quick = self._can_quick_abort(entries)
817 quick = self._can_quick_abort(entries)
813 try:
818 try:
814 if not quick:
819 if not quick:
815 self._report(_(b"transaction abort!\n"))
820 self._report(_(b"transaction abort!\n"))
816 for cat in sorted(self._abortcallback):
821 for cat in sorted(self._abortcallback):
817 self._abortcallback[cat](self)
822 self._abortcallback[cat](self)
818 # Prevent double usage and help clear cycles.
823 # Prevent double usage and help clear cycles.
819 self._abortcallback = None
824 self._abortcallback = None
820 if quick:
825 if quick:
821 self._do_quick_abort(entries)
826 self._do_quick_abort(entries)
822 else:
827 else:
823 self._do_full_abort(entries)
828 self._do_full_abort(entries)
824 finally:
829 finally:
825 self._journal = None
830 self._journal = None
826 self._releasefn(self, False) # notify failure of transaction
831 self._releasefn(self, False) # notify failure of transaction
827 self._releasefn = None # Help prevent cycles.
832 self._releasefn = None # Help prevent cycles.
828
833
829 def _can_quick_abort(self, entries):
834 def _can_quick_abort(self, entries):
830 """False if any semantic content have been written on disk
835 """False if any semantic content have been written on disk
831
836
832 True if nothing, except temporary files has been writen on disk."""
837 True if nothing, except temporary files has been writen on disk."""
833 if entries:
838 if entries:
834 return False
839 return False
835 for e in self._backupentries:
840 for e in self._backupentries:
836 if e[1]:
841 if e[1]:
837 return False
842 return False
838 return True
843 return True
839
844
840 def _do_quick_abort(self, entries):
845 def _do_quick_abort(self, entries):
841 """(Silently) do a quick cleanup (see _can_quick_abort)"""
846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
842 assert self._can_quick_abort(entries)
847 assert self._can_quick_abort(entries)
843 tmp_files = [e for e in self._backupentries if not e[1]]
848 tmp_files = [e for e in self._backupentries if not e[1]]
844 for vfs_id, old_path, tmp_path, xxx in tmp_files:
849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
845 vfs = self._vfsmap[vfs_id]
850 vfs = self._vfsmap[vfs_id]
846 try:
851 try:
847 vfs.unlink(tmp_path)
852 vfs.unlink(tmp_path)
848 except FileNotFoundError:
853 except FileNotFoundError:
849 pass
854 pass
850 if self._backupjournal:
855 if self._backupjournal:
851 self._opener.unlink(self._backupjournal)
856 self._opener.unlink(self._backupjournal)
852 if self._journal:
857 if self._journal:
853 self._opener.unlink(self._journal)
858 self._opener.unlink(self._journal)
854
859
855 def _do_full_abort(self, entries):
860 def _do_full_abort(self, entries):
856 """(Noisily) rollback all the change introduced by the transaction"""
861 """(Noisily) rollback all the change introduced by the transaction"""
857 try:
862 try:
858 _playback(
863 _playback(
859 self._journal,
864 self._journal,
860 self._report,
865 self._report,
861 self._opener,
866 self._opener,
862 self._vfsmap,
867 self._vfsmap,
863 entries,
868 entries,
864 self._backupentries,
869 self._backupentries,
865 False,
870 False,
866 checkambigfiles=self._checkambigfiles,
871 checkambigfiles=self._checkambigfiles,
867 )
872 )
868 self._report(_(b"rollback completed\n"))
873 self._report(_(b"rollback completed\n"))
869 except BaseException as exc:
874 except BaseException as exc:
870 self._report(_(b"rollback failed - please run hg recover\n"))
875 self._report(_(b"rollback failed - please run hg recover\n"))
871 self._report(
876 self._report(
872 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
873 )
878 )
874
879
875
880
876 BAD_VERSION_MSG = _(
881 BAD_VERSION_MSG = _(
877 b"journal was created by a different version of Mercurial\n"
882 b"journal was created by a different version of Mercurial\n"
878 )
883 )
879
884
880
885
881 def read_backup_files(report, fp):
886 def read_backup_files(report, fp):
882 """parse an (already open) backup file an return contained backup entries
887 """parse an (already open) backup file an return contained backup entries
883
888
884 entries are in the form: (location, file, backupfile, xxx)
889 entries are in the form: (location, file, backupfile, xxx)
885
890
886 :location: the vfs identifier (vfsmap's key)
891 :location: the vfs identifier (vfsmap's key)
887 :file: original file path (in the vfs)
892 :file: original file path (in the vfs)
888 :backupfile: path of the backup (in the vfs)
893 :backupfile: path of the backup (in the vfs)
889 :cache: a boolean currently always set to False
894 :cache: a boolean currently always set to False
890 """
895 """
891 lines = fp.readlines()
896 lines = fp.readlines()
892 backupentries = []
897 backupentries = []
893 if lines:
898 if lines:
894 ver = lines[0][:-1]
899 ver = lines[0][:-1]
895 if ver != (b'%d' % version):
900 if ver != (b'%d' % version):
896 report(BAD_VERSION_MSG)
901 report(BAD_VERSION_MSG)
897 else:
902 else:
898 for line in lines[1:]:
903 for line in lines[1:]:
899 if line:
904 if line:
900 # Shave off the trailing newline
905 # Shave off the trailing newline
901 line = line[:-1]
906 line = line[:-1]
902 l, f, b, c = line.split(b'\0')
907 l, f, b, c = line.split(b'\0')
903 backupentries.append((l, f, b, bool(c)))
908 backupentries.append((l, f, b, bool(c)))
904 return backupentries
909 return backupentries
905
910
906
911
907 def rollback(
912 def rollback(
908 opener,
913 opener,
909 vfsmap,
914 vfsmap,
910 file,
915 file,
911 report,
916 report,
912 checkambigfiles=None,
917 checkambigfiles=None,
913 skip_journal_pattern=None,
918 skip_journal_pattern=None,
914 ):
919 ):
915 """Rolls back the transaction contained in the given file
920 """Rolls back the transaction contained in the given file
916
921
917 Reads the entries in the specified file, and the corresponding
922 Reads the entries in the specified file, and the corresponding
918 '*.backupfiles' file, to recover from an incomplete transaction.
923 '*.backupfiles' file, to recover from an incomplete transaction.
919
924
920 * `file`: a file containing a list of entries, specifying where
925 * `file`: a file containing a list of entries, specifying where
921 to truncate each file. The file should contain a list of
926 to truncate each file. The file should contain a list of
922 file\0offset pairs, delimited by newlines. The corresponding
927 file\0offset pairs, delimited by newlines. The corresponding
923 '*.backupfiles' file should contain a list of file\0backupfile
928 '*.backupfiles' file should contain a list of file\0backupfile
924 pairs, delimited by \0.
929 pairs, delimited by \0.
925
930
926 `checkambigfiles` is a set of (path, vfs-location) tuples,
931 `checkambigfiles` is a set of (path, vfs-location) tuples,
927 which determine whether file stat ambiguity should be avoided at
932 which determine whether file stat ambiguity should be avoided at
928 restoring corresponded files.
933 restoring corresponded files.
929 """
934 """
930 entries = []
935 entries = []
931 backupentries = []
936 backupentries = []
932
937
933 with opener.open(file) as fp:
938 with opener.open(file) as fp:
934 lines = fp.readlines()
939 lines = fp.readlines()
935 for l in lines:
940 for l in lines:
936 try:
941 try:
937 f, o = l.split(b'\0')
942 f, o = l.split(b'\0')
938 entries.append((f, int(o)))
943 entries.append((f, int(o)))
939 except ValueError:
944 except ValueError:
940 report(
945 report(
941 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
942 )
947 )
943
948
944 backupjournal = b"%s.backupfiles" % file
949 backupjournal = b"%s.backupfiles" % file
945 if opener.exists(backupjournal):
950 if opener.exists(backupjournal):
946 with opener.open(backupjournal) as fp:
951 with opener.open(backupjournal) as fp:
947 backupentries = read_backup_files(report, fp)
952 backupentries = read_backup_files(report, fp)
948 if skip_journal_pattern is not None:
953 if skip_journal_pattern is not None:
949 keep = lambda x: not skip_journal_pattern.match(x[1])
954 keep = lambda x: not skip_journal_pattern.match(x[1])
950 backupentries = [x for x in backupentries if keep(x)]
955 backupentries = [x for x in backupentries if keep(x)]
951
956
952 _playback(
957 _playback(
953 file,
958 file,
954 report,
959 report,
955 opener,
960 opener,
956 vfsmap,
961 vfsmap,
957 entries,
962 entries,
958 backupentries,
963 backupentries,
959 checkambigfiles=checkambigfiles,
964 checkambigfiles=checkambigfiles,
960 )
965 )
@@ -1,448 +1,454 b''
1 Test correctness of revlog inline -> non-inline transition
1 Test correctness of revlog inline -> non-inline transition
2 ----------------------------------------------------------
2 ----------------------------------------------------------
3
3
4 We test various file length and naming pattern as this created issue in the
4 We test various file length and naming pattern as this created issue in the
5 past.
5 past.
6
6
7 Helper extension to intercept renames and kill process
7 Helper extension to intercept renames and kill process
8
8
9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
10 > import os
10 > import os
11 > import signal
11 > import signal
12 > from mercurial import extensions, util
12 > from mercurial import extensions, util
13 >
13 >
14 > def extsetup(ui):
14 > def extsetup(ui):
15 > def rename(orig, src, dest, *args, **kwargs):
15 > def rename(orig, src, dest, *args, **kwargs):
16 > path = util.normpath(dest)
16 > path = util.normpath(dest)
17 > if path.endswith(b'data/file.i'):
17 > if path.endswith(b'data/file.i'):
18 > os.kill(os.getpid(), signal.SIGKILL)
18 > os.kill(os.getpid(), signal.SIGKILL)
19 > return orig(src, dest, *args, **kwargs)
19 > return orig(src, dest, *args, **kwargs)
20 > extensions.wrapfunction(util, 'rename', rename)
20 > extensions.wrapfunction(util, 'rename', rename)
21 > EOF
21 > EOF
22
22
23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
24 > import os
24 > import os
25 > import signal
25 > import signal
26 > from mercurial import extensions, util
26 > from mercurial import extensions, util
27 >
27 >
28 > def extsetup(ui):
28 > def extsetup(ui):
29 > def close(orig, *args, **kwargs):
29 > def close(orig, *args, **kwargs):
30 > path = util.normpath(args[0]._atomictempfile__name)
30 > path = util.normpath(args[0]._atomictempfile__name)
31 > r = orig(*args, **kwargs)
31 > r = orig(*args, **kwargs)
32 > if path.endswith(b'/.hg/store/data/file.i'):
32 > if path.endswith(b'/.hg/store/data/file.i'):
33 > os.kill(os.getpid(), signal.SIGKILL)
33 > os.kill(os.getpid(), signal.SIGKILL)
34 > return r
34 > return r
35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
36 > def extsetup(ui):
36 > def extsetup(ui):
37 > def rename(orig, src, dest, *args, **kwargs):
37 > def rename(orig, src, dest, *args, **kwargs):
38 > path = util.normpath(dest)
38 > path = util.normpath(dest)
39 > r = orig(src, dest, *args, **kwargs)
39 > r = orig(src, dest, *args, **kwargs)
40 > if path.endswith(b'data/file.i'):
40 > if path.endswith(b'data/file.i'):
41 > os.kill(os.getpid(), signal.SIGKILL)
41 > os.kill(os.getpid(), signal.SIGKILL)
42 > return r
42 > return r
43 > extensions.wrapfunction(util, 'rename', rename)
43 > extensions.wrapfunction(util, 'rename', rename)
44 > EOF
44 > EOF
45
45
46 $ cat > $TESTTMP/killme.py << EOF
46 $ cat > $TESTTMP/killme.py << EOF
47 > import os
47 > import os
48 > import signal
48 > import signal
49 >
49 >
50 > def killme(ui, repo, hooktype, **kwargs):
50 > def killme(ui, repo, hooktype, **kwargs):
51 > os.kill(os.getpid(), signal.SIGKILL)
51 > os.kill(os.getpid(), signal.SIGKILL)
52 > EOF
52 > EOF
53
53
54 $ cat > $TESTTMP/reader_wait_split.py << EOF
54 $ cat > $TESTTMP/reader_wait_split.py << EOF
55 > import os
55 > import os
56 > import signal
56 > import signal
57 > from mercurial import extensions, revlog, testing
57 > from mercurial import extensions, revlog, testing
58 > def _wait_post_load(orig, self, *args, **kwargs):
58 > def _wait_post_load(orig, self, *args, **kwargs):
59 > wait = b'data/file' in self.radix
59 > wait = b'data/file' in self.radix
60 > if wait:
60 > if wait:
61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
62 > r = orig(self, *args, **kwargs)
62 > r = orig(self, *args, **kwargs)
63 > if wait:
63 > if wait:
64 > testing.write_file(b"$TESTTMP/reader-index-read")
64 > testing.write_file(b"$TESTTMP/reader-index-read")
65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
66 > return r
66 > return r
67 >
67 >
68 > def extsetup(ui):
68 > def extsetup(ui):
69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
70 > EOF
70 > EOF
71
71
72 setup a repository for tests
72 setup a repository for tests
73 ----------------------------
73 ----------------------------
74
74
75 $ cat >> $HGRCPATH << EOF
75 $ cat >> $HGRCPATH << EOF
76 > [format]
76 > [format]
77 > revlog-compression=none
77 > revlog-compression=none
78 > EOF
78 > EOF
79
79
80 $ hg init troffset-computation
80 $ hg init troffset-computation
81 $ cd troffset-computation
81 $ cd troffset-computation
82 $ files="
82 $ files="
83 > file
83 > file
84 > Directory_With,Special%Char/Complex_File.babar
84 > Directory_With,Special%Char/Complex_File.babar
85 > foo/bar/babar_celeste/foo
85 > foo/bar/babar_celeste/foo
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 > "
87 > "
88 $ for f in $files; do
88 $ for f in $files; do
89 > mkdir -p `dirname $f`
89 > mkdir -p `dirname $f`
90 > done
90 > done
91 $ for f in $files; do
91 $ for f in $files; do
92 > printf '%20d' '1' > $f
92 > printf '%20d' '1' > $f
93 > done
93 > done
94 $ hg commit -Aqma
94 $ hg commit -Aqma
95 $ for f in $files; do
95 $ for f in $files; do
96 > printf '%1024d' '1' > $f
96 > printf '%1024d' '1' > $f
97 > done
97 > done
98 $ hg commit -Aqmb
98 $ hg commit -Aqmb
99 $ for f in $files; do
99 $ for f in $files; do
100 > printf '%20d' '1' > $f
100 > printf '%20d' '1' > $f
101 > done
101 > done
102 $ hg commit -Aqmc
102 $ hg commit -Aqmc
103 $ for f in $files; do
103 $ for f in $files; do
104 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
104 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
105 > done
105 > done
106 $ hg commit -AqmD --traceback
106 $ hg commit -AqmD --traceback
107 $ for f in $files; do
108 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
109 > done
110 $ hg commit -AqmD --traceback
107
111
108 Reference size:
112 Reference size:
109 $ f -s file
113 $ f -s file
110 file: size=131072
114 file: size=135168
111 $ f -s .hg/store/data/file*
115 $ f -s .hg/store/data/file*
112 .hg/store/data/file.d: size=132139
116 .hg/store/data/file.d: size=267307
113 .hg/store/data/file.i: size=256
117 .hg/store/data/file.i: size=320
114
118
115 $ cd ..
119 $ cd ..
116
120
117
121
118 Test a hard crash after the file was split but before the transaction was committed
122 Test a hard crash after the file was split but before the transaction was committed
119 ===================================================================================
123 ===================================================================================
120
124
121 Test offset computation to correctly factor in the index entries themselves.
125 Test offset computation to correctly factor in the index entries themselves.
122 Also test that the new data size has the correct size if the transaction is aborted
126 Also test that the new data size has the correct size if the transaction is aborted
123 after the index has been replaced.
127 after the index has been replaced.
124
128
125 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
129 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
126 transitions to non-inline storage). The clone initially has changes a, b
130 transitions to non-inline storage). The clone initially has changes a, b
127 and will transition to non-inline storage when adding c, D.
131 and will transition to non-inline storage when adding c, D.
128
132
129 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
133 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
130 but truncate the index and the data to remove both c and D.
134 but truncate the index and the data to remove both c and D.
131
135
132
136
133 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
137 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
134 $ cd troffset-computation-copy
138 $ cd troffset-computation-copy
135
139
136 Reference size:
140 Reference size:
137 $ f -s file
141 $ f -s file
138 file: size=1024
142 file: size=1024
139 $ f -s .hg/store/data/file*
143 $ f -s .hg/store/data/file*
140 .hg/store/data/file.i: size=1174
144 .hg/store/data/file.i: size=1174
141
145
142 $ cat > .hg/hgrc <<EOF
146 $ cat > .hg/hgrc <<EOF
143 > [hooks]
147 > [hooks]
144 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
148 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
145 > EOF
149 > EOF
146 #if chg
150 #if chg
147 $ hg pull ../troffset-computation
151 $ hg pull ../troffset-computation
148 pulling from ../troffset-computation
152 pulling from ../troffset-computation
149 [255]
153 [255]
150 #else
154 #else
151 $ hg pull ../troffset-computation
155 $ hg pull ../troffset-computation
152 pulling from ../troffset-computation
156 pulling from ../troffset-computation
153 *Killed* (glob)
157 *Killed* (glob)
154 [137]
158 [137]
155 #endif
159 #endif
156
160
157
161
158 The inline revlog still exist, but a split version exist next to it
162 The inline revlog still exist, but a split version exist next to it
159
163
164 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
165 [1]
160 $ f -s .hg/store/data/file*
166 $ f -s .hg/store/data/file*
161 .hg/store/data/file.d: size=132139
167 .hg/store/data/file.d: size=267307
162 .hg/store/data/file.i: size=132395
168 .hg/store/data/file.i: size=132395
163 .hg/store/data/file.i.s: size=256
169 .hg/store/data/file.i.s: size=320
164
170
165
171
166 The first file.i entry should match the "Reference size" above.
172 The first file.i entry should match the "Reference size" above.
167 The first file.d entry is the temporary record during the split,
173 The first file.d entry is the temporary record during the split,
168
174
169 A "temporary file" entry exist for the split index.
175 A "temporary file" entry exist for the split index.
170
176
171 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
177 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
172 data/file.i 1174
178 data/file.i 1174
173 data/file.d 0
179 data/file.d 0
174 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
180 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep data/file
175 data/file.i data/journal.backup.file.i 0
181 data/file.i data/journal.backup.file.i 0
176 data/file.i.s 0
182 data/file.i.s 0
177
183
178 recover is rolling the split back, the fncache is still valid
184 recover is rolling the split back, the fncache is still valid
179
185
180 $ hg recover
186 $ hg recover
181 rolling back interrupted transaction
187 rolling back interrupted transaction
182 (verify step skipped, run `hg verify` to check your repository content)
188 (verify step skipped, run `hg verify` to check your repository content)
183 $ f -s .hg/store/data/file*
189 $ f -s .hg/store/data/file*
184 .hg/store/data/file.i: size=1174
190 .hg/store/data/file.i: size=1174
185 $ hg tip
191 $ hg tip
186 changeset: 1:cc8dfb126534
192 changeset: 1:cc8dfb126534
187 tag: tip
193 tag: tip
188 user: test
194 user: test
189 date: Thu Jan 01 00:00:00 1970 +0000
195 date: Thu Jan 01 00:00:00 1970 +0000
190 summary: b
196 summary: b
191
197
192 $ hg verify -q
198 $ hg verify -q
193 $ hg debugrebuildfncache --only-data
199 $ hg debugrebuildfncache --only-data
194 fncache already up to date
200 fncache already up to date
195 $ hg verify -q
201 $ hg verify -q
196 $ cd ..
202 $ cd ..
197
203
198 Test a hard crash right before the index is move into place
204 Test a hard crash right before the index is move into place
199 ===========================================================
205 ===========================================================
200
206
201 Now retry the procedure but intercept the rename of the index and check that
207 Now retry the procedure but intercept the rename of the index and check that
202 the journal does not contain the new index size. This demonstrates the edge case
208 the journal does not contain the new index size. This demonstrates the edge case
203 where the data file is left as garbage.
209 where the data file is left as garbage.
204
210
205 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
211 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
206 $ cd troffset-computation-copy2
212 $ cd troffset-computation-copy2
207
213
208 Reference size:
214 Reference size:
209 $ f -s file
215 $ f -s file
210 file: size=1024
216 file: size=1024
211 $ f -s .hg/store/data/file*
217 $ f -s .hg/store/data/file*
212 .hg/store/data/file.i: size=1174
218 .hg/store/data/file.i: size=1174
213
219
214 $ cat > .hg/hgrc <<EOF
220 $ cat > .hg/hgrc <<EOF
215 > [extensions]
221 > [extensions]
216 > intercept_rename = $TESTTMP/intercept_before_rename.py
222 > intercept_rename = $TESTTMP/intercept_before_rename.py
217 > EOF
223 > EOF
218 #if chg
224 #if chg
219 $ hg pull ../troffset-computation
225 $ hg pull ../troffset-computation
220 pulling from ../troffset-computation
226 pulling from ../troffset-computation
221 searching for changes
227 searching for changes
222 adding changesets
228 adding changesets
223 adding manifests
229 adding manifests
224 adding file changes
230 adding file changes
225 [255]
231 [255]
226 #else
232 #else
227 $ hg pull ../troffset-computation
233 $ hg pull ../troffset-computation
228 pulling from ../troffset-computation
234 pulling from ../troffset-computation
229 searching for changes
235 searching for changes
230 adding changesets
236 adding changesets
231 adding manifests
237 adding manifests
232 adding file changes
238 adding file changes
233 *Killed* (glob)
239 *Killed* (glob)
234 [137]
240 [137]
235 #endif
241 #endif
236
242
237 The inline revlog still exist, but a split version exist next to it
243 The inline revlog still exist, but a split version exist next to it
238
244
239 $ f -s .hg/store/data/file*
245 $ f -s .hg/store/data/file*
240 .hg/store/data/file.d: size=132139
246 .hg/store/data/file.d: size=267307
241 .hg/store/data/file.i: size=132395
247 .hg/store/data/file.i: size=132395
242 .hg/store/data/file.i.s: size=256
248 .hg/store/data/file.i.s: size=320
243
249
244 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
250 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
245 data/file.i 1174
251 data/file.i 1174
246 data/file.d 0
252 data/file.d 0
247
253
248 recover is rolling the split back, the fncache is still valid
254 recover is rolling the split back, the fncache is still valid
249
255
250 $ hg recover
256 $ hg recover
251 rolling back interrupted transaction
257 rolling back interrupted transaction
252 (verify step skipped, run `hg verify` to check your repository content)
258 (verify step skipped, run `hg verify` to check your repository content)
253 $ f -s .hg/store/data/file*
259 $ f -s .hg/store/data/file*
254 .hg/store/data/file.i: size=1174
260 .hg/store/data/file.i: size=1174
255 $ hg tip
261 $ hg tip
256 changeset: 1:cc8dfb126534
262 changeset: 1:cc8dfb126534
257 tag: tip
263 tag: tip
258 user: test
264 user: test
259 date: Thu Jan 01 00:00:00 1970 +0000
265 date: Thu Jan 01 00:00:00 1970 +0000
260 summary: b
266 summary: b
261
267
262 $ hg verify -q
268 $ hg verify -q
263 $ cd ..
269 $ cd ..
264
270
265 Test a hard crash right after the index is move into place
271 Test a hard crash right after the index is move into place
266 ===========================================================
272 ===========================================================
267
273
268 Now retry the procedure but intercept the rename of the index.
274 Now retry the procedure but intercept the rename of the index.
269
275
270 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
276 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
271 $ cd troffset-computation-crash-after-rename
277 $ cd troffset-computation-crash-after-rename
272
278
273 Reference size:
279 Reference size:
274 $ f -s file
280 $ f -s file
275 file: size=1024
281 file: size=1024
276 $ f -s .hg/store/data/file*
282 $ f -s .hg/store/data/file*
277 .hg/store/data/file.i: size=1174
283 .hg/store/data/file.i: size=1174
278
284
279 $ cat > .hg/hgrc <<EOF
285 $ cat > .hg/hgrc <<EOF
280 > [extensions]
286 > [extensions]
281 > intercept_rename = $TESTTMP/intercept_after_rename.py
287 > intercept_rename = $TESTTMP/intercept_after_rename.py
282 > EOF
288 > EOF
283 #if chg
289 #if chg
284 $ hg pull ../troffset-computation
290 $ hg pull ../troffset-computation
285 pulling from ../troffset-computation
291 pulling from ../troffset-computation
286 searching for changes
292 searching for changes
287 adding changesets
293 adding changesets
288 adding manifests
294 adding manifests
289 adding file changes
295 adding file changes
290 [255]
296 [255]
291 #else
297 #else
292 $ hg pull ../troffset-computation
298 $ hg pull ../troffset-computation
293 pulling from ../troffset-computation
299 pulling from ../troffset-computation
294 searching for changes
300 searching for changes
295 adding changesets
301 adding changesets
296 adding manifests
302 adding manifests
297 adding file changes
303 adding file changes
298 *Killed* (glob)
304 *Killed* (glob)
299 [137]
305 [137]
300 #endif
306 #endif
301
307
302 The inline revlog was over written on disk
308 The inline revlog was over written on disk
303
309
304 $ f -s .hg/store/data/file*
310 $ f -s .hg/store/data/file*
305 .hg/store/data/file.d: size=132139
311 .hg/store/data/file.d: size=267307
306 .hg/store/data/file.i: size=256
312 .hg/store/data/file.i: size=320
307
313
308 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
314 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
309 data/file.i 1174
315 data/file.i 1174
310 data/file.d 0
316 data/file.d 0
311
317
312 recover is rolling the split back, the fncache is still valid
318 recover is rolling the split back, the fncache is still valid
313
319
314 $ hg recover
320 $ hg recover
315 rolling back interrupted transaction
321 rolling back interrupted transaction
316 (verify step skipped, run `hg verify` to check your repository content)
322 (verify step skipped, run `hg verify` to check your repository content)
317 $ f -s .hg/store/data/file*
323 $ f -s .hg/store/data/file*
318 .hg/store/data/file.i: size=1174
324 .hg/store/data/file.i: size=1174
319 $ hg tip
325 $ hg tip
320 changeset: 1:cc8dfb126534
326 changeset: 1:cc8dfb126534
321 tag: tip
327 tag: tip
322 user: test
328 user: test
323 date: Thu Jan 01 00:00:00 1970 +0000
329 date: Thu Jan 01 00:00:00 1970 +0000
324 summary: b
330 summary: b
325
331
326 $ hg verify -q
332 $ hg verify -q
327 $ cd ..
333 $ cd ..
328
334
329 Have the transaction rollback itself without any hard crash
335 Have the transaction rollback itself without any hard crash
330 ===========================================================
336 ===========================================================
331
337
332
338
333 Repeat the original test but let hg rollback the transaction.
339 Repeat the original test but let hg rollback the transaction.
334
340
335 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
341 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
336 $ cd troffset-computation-copy-rb
342 $ cd troffset-computation-copy-rb
337 $ cat > .hg/hgrc <<EOF
343 $ cat > .hg/hgrc <<EOF
338 > [hooks]
344 > [hooks]
339 > pretxnchangegroup = false
345 > pretxnchangegroup = false
340 > EOF
346 > EOF
341 $ hg pull ../troffset-computation
347 $ hg pull ../troffset-computation
342 pulling from ../troffset-computation
348 pulling from ../troffset-computation
343 searching for changes
349 searching for changes
344 adding changesets
350 adding changesets
345 adding manifests
351 adding manifests
346 adding file changes
352 adding file changes
347 transaction abort!
353 transaction abort!
348 rollback completed
354 rollback completed
349 abort: pretxnchangegroup hook exited with status 1
355 abort: pretxnchangegroup hook exited with status 1
350 [40]
356 [40]
351
357
352 The split was rollback
358 The split was rollback
353
359
354 $ f -s .hg/store/data/file*
360 $ f -s .hg/store/data/file*
355 .hg/store/data/file.d: size=0
361 .hg/store/data/file.d: size=0
356 .hg/store/data/file.i: size=1174
362 .hg/store/data/file.i: size=1174
357
363
358
364
359 $ hg tip
365 $ hg tip
360 changeset: 1:cc8dfb126534
366 changeset: 1:cc8dfb126534
361 tag: tip
367 tag: tip
362 user: test
368 user: test
363 date: Thu Jan 01 00:00:00 1970 +0000
369 date: Thu Jan 01 00:00:00 1970 +0000
364 summary: b
370 summary: b
365
371
366 $ hg verify -q
372 $ hg verify -q
367 $ cd ..
373 $ cd ..
368
374
369 Read race
375 Read race
370 =========
376 =========
371
377
372 We check that a client that started reading a revlog (its index) after the
378 We check that a client that started reading a revlog (its index) after the
373 split and end reading (the data) after the rollback should be fine
379 split and end reading (the data) after the rollback should be fine
374
380
375 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
381 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
376 $ cd troffset-computation-race
382 $ cd troffset-computation-race
377 $ cat > .hg/hgrc <<EOF
383 $ cat > .hg/hgrc <<EOF
378 > [hooks]
384 > [hooks]
379 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
385 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
380 > pretxnclose = false
386 > pretxnclose = false
381 > EOF
387 > EOF
382
388
383 start a reader
389 start a reader
384
390
385 $ hg cat --rev 0 file \
391 $ hg cat --rev 0 file \
386 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
392 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
387 > 2> $TESTTMP/reader.stderr \
393 > 2> $TESTTMP/reader.stderr \
388 > > $TESTTMP/reader.stdout &
394 > > $TESTTMP/reader.stdout &
389
395
390 Do a failed pull in //
396 Do a failed pull in //
391
397
392 $ hg pull ../troffset-computation
398 $ hg pull ../troffset-computation
393 pulling from ../troffset-computation
399 pulling from ../troffset-computation
394 searching for changes
400 searching for changes
395 adding changesets
401 adding changesets
396 adding manifests
402 adding manifests
397 adding file changes
403 adding file changes
398 transaction abort!
404 transaction abort!
399 rollback completed
405 rollback completed
400 abort: pretxnclose hook exited with status 1
406 abort: pretxnclose hook exited with status 1
401 [40]
407 [40]
402 $ touch $TESTTMP/writer-revlog-unsplit
408 $ touch $TESTTMP/writer-revlog-unsplit
403 $ wait
409 $ wait
404
410
405 The reader should be fine
411 The reader should be fine
406 $ cat $TESTTMP/reader.stderr
412 $ cat $TESTTMP/reader.stderr
407 $ cat $TESTTMP/reader.stdout
413 $ cat $TESTTMP/reader.stdout
408 1 (no-eol)
414 1 (no-eol)
409 $ cd ..
415 $ cd ..
410
416
411 pending hooks
417 pending hooks
412 =============
418 =============
413
419
414 We checks that hooks properly see the inside of the transaction, while other process don't.
420 We checks that hooks properly see the inside of the transaction, while other process don't.
415
421
416 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
422 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
417 $ cd troffset-computation-hooks
423 $ cd troffset-computation-hooks
418 $ cat > .hg/hgrc <<EOF
424 $ cat > .hg/hgrc <<EOF
419 > [hooks]
425 > [hooks]
420 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
426 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
421 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
427 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
422 > pretxnclose.03-abort = false
428 > pretxnclose.03-abort = false
423 > EOF
429 > EOF
424
430
425 $ (
431 $ (
426 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
432 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
427 > hg cat -r 'max(all())' file | f --size;\
433 > hg cat -r 'max(all())' file | f --size;\
428 > touch $TESTTMP/hook-done
434 > touch $TESTTMP/hook-done
429 > ) >stdout 2>stderr &
435 > ) >stdout 2>stderr &
430
436
431 $ hg pull ../troffset-computation
437 $ hg pull ../troffset-computation
432 pulling from ../troffset-computation
438 pulling from ../troffset-computation
433 searching for changes
439 searching for changes
434 adding changesets
440 adding changesets
435 adding manifests
441 adding manifests
436 adding file changes
442 adding file changes
437 size=131072
443 size=135168
438 transaction abort!
444 transaction abort!
439 rollback completed
445 rollback completed
440 abort: pretxnclose.03-abort hook exited with status 1
446 abort: pretxnclose.03-abort hook exited with status 1
441 [40]
447 [40]
442
448
443 $ cat stdout
449 $ cat stdout
444 size=1024
450 size=1024
445 $ cat stderr
451 $ cat stderr
446
452
447
453
448 $ cd ..
454 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now