##// END OF EJS Templates
transaction: actually delete file created during the transaction on rollback...
marmoute -
r51703:5c3d0795 default
parent child Browse files
Show More
@@ -1,965 +1,965 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 import errno
14 import errno
15 import os
15 import os
16
16
17 from .i18n import _
17 from .i18n import _
18 from . import (
18 from . import (
19 error,
19 error,
20 pycompat,
20 pycompat,
21 util,
21 util,
22 )
22 )
23 from .utils import stringutil
23 from .utils import stringutil
24
24
25 version = 2
25 version = 2
26
26
27 GEN_GROUP_ALL = b'all'
27 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
28 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
29 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30
30
31
31
32 def active(func):
32 def active(func):
33 def _active(self, *args, **kwds):
33 def _active(self, *args, **kwds):
34 if self._count == 0:
34 if self._count == 0:
35 raise error.ProgrammingError(
35 raise error.ProgrammingError(
36 b'cannot use transaction when it is already committed/aborted'
36 b'cannot use transaction when it is already committed/aborted'
37 )
37 )
38 return func(self, *args, **kwds)
38 return func(self, *args, **kwds)
39
39
40 return _active
40 return _active
41
41
42
42
43 UNDO_BACKUP = b'%s.backupfiles'
43 UNDO_BACKUP = b'%s.backupfiles'
44
44
45 UNDO_FILES_MAY_NEED_CLEANUP = [
45 UNDO_FILES_MAY_NEED_CLEANUP = [
46 # legacy entries that might exists on disk from previous version:
46 # legacy entries that might exists on disk from previous version:
47 (b'store', b'%s.narrowspec'),
47 (b'store', b'%s.narrowspec'),
48 (b'plain', b'%s.narrowspec.dirstate'),
48 (b'plain', b'%s.narrowspec.dirstate'),
49 (b'plain', b'%s.branch'),
49 (b'plain', b'%s.branch'),
50 (b'plain', b'%s.bookmarks'),
50 (b'plain', b'%s.bookmarks'),
51 (b'store', b'%s.phaseroots'),
51 (b'store', b'%s.phaseroots'),
52 (b'plain', b'%s.dirstate'),
52 (b'plain', b'%s.dirstate'),
53 # files actually in uses today:
53 # files actually in uses today:
54 (b'plain', b'%s.desc'),
54 (b'plain', b'%s.desc'),
55 # Always delete undo last to make sure we detect that a clean up is needed if
55 # Always delete undo last to make sure we detect that a clean up is needed if
56 # the process is interrupted.
56 # the process is interrupted.
57 (b'store', b'%s'),
57 (b'store', b'%s'),
58 ]
58 ]
59
59
60
60
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
61 def cleanup_undo_files(report, vfsmap, undo_prefix=b'undo'):
62 """remove "undo" files used by the rollback logic
62 """remove "undo" files used by the rollback logic
63
63
64 This is useful to prevent rollback running in situation were it does not
64 This is useful to prevent rollback running in situation were it does not
65 make sense. For example after a strip.
65 make sense. For example after a strip.
66 """
66 """
67 backup_listing = UNDO_BACKUP % undo_prefix
67 backup_listing = UNDO_BACKUP % undo_prefix
68
68
69 backup_entries = []
69 backup_entries = []
70 undo_files = []
70 undo_files = []
71 svfs = vfsmap[b'store']
71 svfs = vfsmap[b'store']
72 try:
72 try:
73 with svfs(backup_listing) as f:
73 with svfs(backup_listing) as f:
74 backup_entries = read_backup_files(report, f)
74 backup_entries = read_backup_files(report, f)
75 except OSError as e:
75 except OSError as e:
76 if e.errno != errno.ENOENT:
76 if e.errno != errno.ENOENT:
77 msg = _(b'could not read %s: %s\n')
77 msg = _(b'could not read %s: %s\n')
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
78 msg %= (svfs.join(backup_listing), stringutil.forcebytestr(e))
79 report(msg)
79 report(msg)
80
80
81 for location, f, backup_path, c in backup_entries:
81 for location, f, backup_path, c in backup_entries:
82 if location in vfsmap and backup_path:
82 if location in vfsmap and backup_path:
83 undo_files.append((vfsmap[location], backup_path))
83 undo_files.append((vfsmap[location], backup_path))
84
84
85 undo_files.append((svfs, backup_listing))
85 undo_files.append((svfs, backup_listing))
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
86 for location, undo_path in UNDO_FILES_MAY_NEED_CLEANUP:
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
87 undo_files.append((vfsmap[location], undo_path % undo_prefix))
88 for undovfs, undofile in undo_files:
88 for undovfs, undofile in undo_files:
89 try:
89 try:
90 undovfs.unlink(undofile)
90 undovfs.unlink(undofile)
91 except OSError as e:
91 except OSError as e:
92 if e.errno != errno.ENOENT:
92 if e.errno != errno.ENOENT:
93 msg = _(b'error removing %s: %s\n')
93 msg = _(b'error removing %s: %s\n')
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
94 msg %= (undovfs.join(undofile), stringutil.forcebytestr(e))
95 report(msg)
95 report(msg)
96
96
97
97
98 def _playback(
98 def _playback(
99 journal,
99 journal,
100 report,
100 report,
101 opener,
101 opener,
102 vfsmap,
102 vfsmap,
103 entries,
103 entries,
104 backupentries,
104 backupentries,
105 unlink=True,
105 unlink=True,
106 checkambigfiles=None,
106 checkambigfiles=None,
107 ):
107 ):
108 """rollback a transaction :
108 """rollback a transaction :
109 - truncate files that have been appended to
109 - truncate files that have been appended to
110 - restore file backups
110 - restore file backups
111 - delete temporary files
111 - delete temporary files
112 """
112 """
113 backupfiles = []
113 backupfiles = []
114
114
115 def restore_one_backup(vfs, f, b, checkambig):
115 def restore_one_backup(vfs, f, b, checkambig):
116 filepath = vfs.join(f)
116 filepath = vfs.join(f)
117 backuppath = vfs.join(b)
117 backuppath = vfs.join(b)
118 try:
118 try:
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
119 util.copyfile(backuppath, filepath, checkambig=checkambig)
120 backupfiles.append((vfs, b))
120 backupfiles.append((vfs, b))
121 except IOError as exc:
121 except IOError as exc:
122 e_msg = stringutil.forcebytestr(exc)
122 e_msg = stringutil.forcebytestr(exc)
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
123 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
124 raise
124 raise
125
125
126 # gather all backup files that impact the store
126 # gather all backup files that impact the store
127 # (we need this to detect files that are both backed up and truncated)
127 # (we need this to detect files that are both backed up and truncated)
128 store_backup = {}
128 store_backup = {}
129 for entry in backupentries:
129 for entry in backupentries:
130 location, file_path, backup_path, cache = entry
130 location, file_path, backup_path, cache = entry
131 vfs = vfsmap[location]
131 vfs = vfsmap[location]
132 is_store = vfs.join(b'') == opener.join(b'')
132 is_store = vfs.join(b'') == opener.join(b'')
133 if is_store and file_path and backup_path:
133 if is_store and file_path and backup_path:
134 store_backup[file_path] = entry
134 store_backup[file_path] = entry
135 copy_done = set()
135 copy_done = set()
136
136
137 # truncate all file `f` to offset `o`
137 # truncate all file `f` to offset `o`
138 for f, o in sorted(dict(entries).items()):
138 for f, o in sorted(dict(entries).items()):
139 # if we have a backup for `f`, we should restore it first and truncate
139 # if we have a backup for `f`, we should restore it first and truncate
140 # the restored file
140 # the restored file
141 bck_entry = store_backup.get(f)
141 bck_entry = store_backup.get(f)
142 if bck_entry is not None:
142 if bck_entry is not None:
143 location, file_path, backup_path, cache = bck_entry
143 location, file_path, backup_path, cache = bck_entry
144 checkambig = False
144 checkambig = False
145 if checkambigfiles:
145 if checkambigfiles:
146 checkambig = (file_path, location) in checkambigfiles
146 checkambig = (file_path, location) in checkambigfiles
147 restore_one_backup(opener, file_path, backup_path, checkambig)
147 restore_one_backup(opener, file_path, backup_path, checkambig)
148 copy_done.add(bck_entry)
148 copy_done.add(bck_entry)
149 # truncate the file to its pre-transaction size
149 # truncate the file to its pre-transaction size
150 if o or not unlink:
150 if o or not unlink:
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
151 checkambig = checkambigfiles and (f, b'') in checkambigfiles
152 try:
152 try:
153 fp = opener(f, b'a', checkambig=checkambig)
153 fp = opener(f, b'a', checkambig=checkambig)
154 if fp.tell() < o:
154 if fp.tell() < o:
155 raise error.Abort(
155 raise error.Abort(
156 _(
156 _(
157 b"attempted to truncate %s to %d bytes, but it was "
157 b"attempted to truncate %s to %d bytes, but it was "
158 b"already %d bytes\n"
158 b"already %d bytes\n"
159 )
159 )
160 % (f, o, fp.tell())
160 % (f, o, fp.tell())
161 )
161 )
162 fp.truncate(o)
162 fp.truncate(o)
163 fp.close()
163 fp.close()
164 except IOError:
164 except IOError:
165 report(_(b"failed to truncate %s\n") % f)
165 report(_(b"failed to truncate %s\n") % f)
166 raise
166 raise
167 else:
167 else:
168 # delete empty file
168 # delete empty file
169 try:
169 try:
170 opener.unlink(f)
170 opener.unlink(f)
171 except FileNotFoundError:
171 except FileNotFoundError:
172 pass
172 pass
173 # restore backed up files and clean up temporary files
173 # restore backed up files and clean up temporary files
174 for entry in backupentries:
174 for entry in backupentries:
175 if entry in copy_done:
175 if entry in copy_done:
176 continue
176 continue
177 l, f, b, c = entry
177 l, f, b, c = entry
178 if l not in vfsmap and c:
178 if l not in vfsmap and c:
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
179 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
180 vfs = vfsmap[l]
180 vfs = vfsmap[l]
181 try:
181 try:
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
182 checkambig = checkambigfiles and (f, l) in checkambigfiles
183 if f and b:
183 if f and b:
184 restore_one_backup(vfs, f, b, checkambig)
184 restore_one_backup(vfs, f, b, checkambig)
185 else:
185 else:
186 target = f or b
186 target = f or b
187 try:
187 try:
188 vfs.unlink(target)
188 vfs.unlink(target)
189 except FileNotFoundError:
189 except FileNotFoundError:
190 # This is fine because
190 # This is fine because
191 #
191 #
192 # either we are trying to delete the main file, and it is
192 # either we are trying to delete the main file, and it is
193 # already deleted.
193 # already deleted.
194 #
194 #
195 # or we are trying to delete a temporary file and it is
195 # or we are trying to delete a temporary file and it is
196 # already deleted.
196 # already deleted.
197 #
197 #
198 # in both case, our target result (delete the file) is
198 # in both case, our target result (delete the file) is
199 # already achieved.
199 # already achieved.
200 pass
200 pass
201 except (IOError, OSError, error.Abort):
201 except (IOError, OSError, error.Abort):
202 if not c:
202 if not c:
203 raise
203 raise
204
204
205 # cleanup transaction state file and the backups file
205 # cleanup transaction state file and the backups file
206 backuppath = b"%s.backupfiles" % journal
206 backuppath = b"%s.backupfiles" % journal
207 if opener.exists(backuppath):
207 if opener.exists(backuppath):
208 opener.unlink(backuppath)
208 opener.unlink(backuppath)
209 opener.unlink(journal)
209 opener.unlink(journal)
210 try:
210 try:
211 for vfs, f in backupfiles:
211 for vfs, f in backupfiles:
212 if vfs.exists(f):
212 if vfs.exists(f):
213 vfs.unlink(f)
213 vfs.unlink(f)
214 except (IOError, OSError, error.Abort):
214 except (IOError, OSError, error.Abort):
215 # only pure backup file remains, it is sage to ignore any error
215 # only pure backup file remains, it is sage to ignore any error
216 pass
216 pass
217
217
218
218
219 class transaction(util.transactional):
219 class transaction(util.transactional):
220 def __init__(
220 def __init__(
221 self,
221 self,
222 report,
222 report,
223 opener,
223 opener,
224 vfsmap,
224 vfsmap,
225 journalname,
225 journalname,
226 undoname=None,
226 undoname=None,
227 after=None,
227 after=None,
228 createmode=None,
228 createmode=None,
229 validator=None,
229 validator=None,
230 releasefn=None,
230 releasefn=None,
231 checkambigfiles=None,
231 checkambigfiles=None,
232 name='<unnamed>',
232 name='<unnamed>',
233 ):
233 ):
234 """Begin a new transaction
234 """Begin a new transaction
235
235
236 Begins a new transaction that allows rolling back writes in the event of
236 Begins a new transaction that allows rolling back writes in the event of
237 an exception.
237 an exception.
238
238
239 * `after`: called after the transaction has been committed
239 * `after`: called after the transaction has been committed
240 * `createmode`: the mode of the journal file that will be created
240 * `createmode`: the mode of the journal file that will be created
241 * `releasefn`: called after releasing (with transaction and result)
241 * `releasefn`: called after releasing (with transaction and result)
242
242
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
243 `checkambigfiles` is a set of (path, vfs-location) tuples,
244 which determine whether file stat ambiguity should be avoided
244 which determine whether file stat ambiguity should be avoided
245 for corresponded files.
245 for corresponded files.
246 """
246 """
247 self._count = 1
247 self._count = 1
248 self._usages = 1
248 self._usages = 1
249 self._report = report
249 self._report = report
250 # a vfs to the store content
250 # a vfs to the store content
251 self._opener = opener
251 self._opener = opener
252 # a map to access file in various {location -> vfs}
252 # a map to access file in various {location -> vfs}
253 vfsmap = vfsmap.copy()
253 vfsmap = vfsmap.copy()
254 vfsmap[b''] = opener # set default value
254 vfsmap[b''] = opener # set default value
255 self._vfsmap = vfsmap
255 self._vfsmap = vfsmap
256 self._after = after
256 self._after = after
257 self._offsetmap = {}
257 self._offsetmap = {}
258 self._newfiles = set()
258 self._newfiles = set()
259 self._journal = journalname
259 self._journal = journalname
260 self._journal_files = []
260 self._journal_files = []
261 self._undoname = undoname
261 self._undoname = undoname
262 self._queue = []
262 self._queue = []
263 # A callback to do something just after releasing transaction.
263 # A callback to do something just after releasing transaction.
264 if releasefn is None:
264 if releasefn is None:
265 releasefn = lambda tr, success: None
265 releasefn = lambda tr, success: None
266 self._releasefn = releasefn
266 self._releasefn = releasefn
267
267
268 self._checkambigfiles = set()
268 self._checkambigfiles = set()
269 if checkambigfiles:
269 if checkambigfiles:
270 self._checkambigfiles.update(checkambigfiles)
270 self._checkambigfiles.update(checkambigfiles)
271
271
272 self._names = [name]
272 self._names = [name]
273
273
274 # A dict dedicated to precisely tracking the changes introduced in the
274 # A dict dedicated to precisely tracking the changes introduced in the
275 # transaction.
275 # transaction.
276 self.changes = {}
276 self.changes = {}
277
277
278 # a dict of arguments to be passed to hooks
278 # a dict of arguments to be passed to hooks
279 self.hookargs = {}
279 self.hookargs = {}
280 self._file = opener.open(self._journal, b"w+")
280 self._file = opener.open(self._journal, b"w+")
281
281
282 # a list of ('location', 'path', 'backuppath', cache) entries.
282 # a list of ('location', 'path', 'backuppath', cache) entries.
283 # - if 'backuppath' is empty, no file existed at backup time
283 # - if 'backuppath' is empty, no file existed at backup time
284 # - if 'path' is empty, this is a temporary transaction file
284 # - if 'path' is empty, this is a temporary transaction file
285 # - if 'location' is not empty, the path is outside main opener reach.
285 # - if 'location' is not empty, the path is outside main opener reach.
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
286 # use 'location' value as a key in a vfsmap to find the right 'vfs'
287 # (cache is currently unused)
287 # (cache is currently unused)
288 self._backupentries = []
288 self._backupentries = []
289 self._backupmap = {}
289 self._backupmap = {}
290 self._backupjournal = b"%s.backupfiles" % self._journal
290 self._backupjournal = b"%s.backupfiles" % self._journal
291 self._backupsfile = opener.open(self._backupjournal, b'w')
291 self._backupsfile = opener.open(self._backupjournal, b'w')
292 self._backupsfile.write(b'%d\n' % version)
292 self._backupsfile.write(b'%d\n' % version)
293 # the set of temporary files
293 # the set of temporary files
294 self._tmp_files = set()
294 self._tmp_files = set()
295
295
296 if createmode is not None:
296 if createmode is not None:
297 opener.chmod(self._journal, createmode & 0o666)
297 opener.chmod(self._journal, createmode & 0o666)
298 opener.chmod(self._backupjournal, createmode & 0o666)
298 opener.chmod(self._backupjournal, createmode & 0o666)
299
299
300 # hold file generations to be performed on commit
300 # hold file generations to be performed on commit
301 self._filegenerators = {}
301 self._filegenerators = {}
302 # hold callback to write pending data for hooks
302 # hold callback to write pending data for hooks
303 self._pendingcallback = {}
303 self._pendingcallback = {}
304 # True is any pending data have been written ever
304 # True is any pending data have been written ever
305 self._anypending = False
305 self._anypending = False
306 # holds callback to call when writing the transaction
306 # holds callback to call when writing the transaction
307 self._finalizecallback = {}
307 self._finalizecallback = {}
308 # holds callback to call when validating the transaction
308 # holds callback to call when validating the transaction
309 # should raise exception if anything is wrong
309 # should raise exception if anything is wrong
310 self._validatecallback = {}
310 self._validatecallback = {}
311 if validator is not None:
311 if validator is not None:
312 self._validatecallback[b'001-userhooks'] = validator
312 self._validatecallback[b'001-userhooks'] = validator
313 # hold callback for post transaction close
313 # hold callback for post transaction close
314 self._postclosecallback = {}
314 self._postclosecallback = {}
315 # holds callbacks to call during abort
315 # holds callbacks to call during abort
316 self._abortcallback = {}
316 self._abortcallback = {}
317
317
318 def __repr__(self):
318 def __repr__(self):
319 name = b'/'.join(self._names)
319 name = b'/'.join(self._names)
320 return '<transaction name=%s, count=%d, usages=%d>' % (
320 return '<transaction name=%s, count=%d, usages=%d>' % (
321 name,
321 name,
322 self._count,
322 self._count,
323 self._usages,
323 self._usages,
324 )
324 )
325
325
326 def __del__(self):
326 def __del__(self):
327 if self._journal:
327 if self._journal:
328 self._abort()
328 self._abort()
329
329
330 @property
330 @property
331 def finalized(self):
331 def finalized(self):
332 return self._finalizecallback is None
332 return self._finalizecallback is None
333
333
334 @active
334 @active
335 def startgroup(self):
335 def startgroup(self):
336 """delay registration of file entry
336 """delay registration of file entry
337
337
338 This is used by strip to delay vision of strip offset. The transaction
338 This is used by strip to delay vision of strip offset. The transaction
339 sees either none or all of the strip actions to be done."""
339 sees either none or all of the strip actions to be done."""
340 self._queue.append([])
340 self._queue.append([])
341
341
342 @active
342 @active
343 def endgroup(self):
343 def endgroup(self):
344 """apply delayed registration of file entry.
344 """apply delayed registration of file entry.
345
345
346 This is used by strip to delay vision of strip offset. The transaction
346 This is used by strip to delay vision of strip offset. The transaction
347 sees either none or all of the strip actions to be done."""
347 sees either none or all of the strip actions to be done."""
348 q = self._queue.pop()
348 q = self._queue.pop()
349 for f, o in q:
349 for f, o in q:
350 self._addentry(f, o)
350 self._addentry(f, o)
351
351
352 @active
352 @active
353 def add(self, file, offset):
353 def add(self, file, offset):
354 """record the state of an append-only file before update"""
354 """record the state of an append-only file before update"""
355 if (
355 if (
356 file in self._newfiles
356 file in self._newfiles
357 or file in self._offsetmap
357 or file in self._offsetmap
358 or file in self._backupmap
358 or file in self._backupmap
359 or file in self._tmp_files
359 or file in self._tmp_files
360 ):
360 ):
361 return
361 return
362 if self._queue:
362 if self._queue:
363 self._queue[-1].append((file, offset))
363 self._queue[-1].append((file, offset))
364 return
364 return
365
365
366 self._addentry(file, offset)
366 self._addentry(file, offset)
367
367
368 def _addentry(self, file, offset):
368 def _addentry(self, file, offset):
369 """add a append-only entry to memory and on-disk state"""
369 """add a append-only entry to memory and on-disk state"""
370 if (
370 if (
371 file in self._newfiles
371 file in self._newfiles
372 or file in self._offsetmap
372 or file in self._offsetmap
373 or file in self._backupmap
373 or file in self._backupmap
374 or file in self._tmp_files
374 or file in self._tmp_files
375 ):
375 ):
376 return
376 return
377 if offset:
377 if offset:
378 self._offsetmap[file] = offset
378 self._offsetmap[file] = offset
379 else:
379 else:
380 self._newfiles.add(file)
380 self._newfiles.add(file)
381 # add enough data to the journal to do the truncate
381 # add enough data to the journal to do the truncate
382 self._file.write(b"%s\0%d\n" % (file, offset))
382 self._file.write(b"%s\0%d\n" % (file, offset))
383 self._file.flush()
383 self._file.flush()
384
384
385 @active
385 @active
386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
386 def addbackup(self, file, hardlink=True, location=b'', for_offset=False):
387 """Adds a backup of the file to the transaction
387 """Adds a backup of the file to the transaction
388
388
389 Calling addbackup() creates a hardlink backup of the specified file
389 Calling addbackup() creates a hardlink backup of the specified file
390 that is used to recover the file in the event of the transaction
390 that is used to recover the file in the event of the transaction
391 aborting.
391 aborting.
392
392
393 * `file`: the file path, relative to .hg/store
393 * `file`: the file path, relative to .hg/store
394 * `hardlink`: use a hardlink to quickly create the backup
394 * `hardlink`: use a hardlink to quickly create the backup
395
395
396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
396 If `for_offset` is set, we expect a offset for this file to have been previously recorded
397 """
397 """
398 if self._queue:
398 if self._queue:
399 msg = b'cannot use transaction.addbackup inside "group"'
399 msg = b'cannot use transaction.addbackup inside "group"'
400 raise error.ProgrammingError(msg)
400 raise error.ProgrammingError(msg)
401
401
402 if file in self._newfiles or file in self._backupmap:
402 if file in self._newfiles or file in self._backupmap:
403 return
403 return
404 elif file in self._offsetmap and not for_offset:
404 elif file in self._offsetmap and not for_offset:
405 return
405 return
406 elif for_offset and file not in self._offsetmap:
406 elif for_offset and file not in self._offsetmap:
407 msg = (
407 msg = (
408 'calling `addbackup` with `for_offmap=True`, '
408 'calling `addbackup` with `for_offmap=True`, '
409 'but no offset recorded: [%r] %r'
409 'but no offset recorded: [%r] %r'
410 )
410 )
411 msg %= (location, file)
411 msg %= (location, file)
412 raise error.ProgrammingError(msg)
412 raise error.ProgrammingError(msg)
413
413
414 vfs = self._vfsmap[location]
414 vfs = self._vfsmap[location]
415 dirname, filename = vfs.split(file)
415 dirname, filename = vfs.split(file)
416 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
416 backupfilename = b"%s.backup.%s.bck" % (self._journal, filename)
417 backupfile = vfs.reljoin(dirname, backupfilename)
417 backupfile = vfs.reljoin(dirname, backupfilename)
418 if vfs.exists(file):
418 if vfs.exists(file):
419 filepath = vfs.join(file)
419 filepath = vfs.join(file)
420 backuppath = vfs.join(backupfile)
420 backuppath = vfs.join(backupfile)
421 # store encoding may result in different directory here.
421 # store encoding may result in different directory here.
422 # so we have to ensure the destination directory exist
422 # so we have to ensure the destination directory exist
423 final_dir_name = os.path.dirname(backuppath)
423 final_dir_name = os.path.dirname(backuppath)
424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
424 util.makedirs(final_dir_name, mode=vfs.createmode, notindexed=True)
425 # then we can copy the backup
425 # then we can copy the backup
426 util.copyfile(filepath, backuppath, hardlink=hardlink)
426 util.copyfile(filepath, backuppath, hardlink=hardlink)
427 else:
427 else:
428 backupfile = b''
428 backupfile = b''
429
429
430 self._addbackupentry((location, file, backupfile, False))
430 self._addbackupentry((location, file, backupfile, False))
431
431
432 def _addbackupentry(self, entry):
432 def _addbackupentry(self, entry):
433 """register a new backup entry and write it to disk"""
433 """register a new backup entry and write it to disk"""
434 self._backupentries.append(entry)
434 self._backupentries.append(entry)
435 self._backupmap[entry[1]] = len(self._backupentries) - 1
435 self._backupmap[entry[1]] = len(self._backupentries) - 1
436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
436 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
437 self._backupsfile.flush()
437 self._backupsfile.flush()
438
438
439 @active
439 @active
440 def registertmp(self, tmpfile, location=b''):
440 def registertmp(self, tmpfile, location=b''):
441 """register a temporary transaction file
441 """register a temporary transaction file
442
442
443 Such files will be deleted when the transaction exits (on both
443 Such files will be deleted when the transaction exits (on both
444 failure and success).
444 failure and success).
445 """
445 """
446 self._tmp_files.add(tmpfile)
446 self._tmp_files.add(tmpfile)
447 self._addbackupentry((location, b'', tmpfile, False))
447 self._addbackupentry((location, b'', tmpfile, False))
448
448
449 @active
449 @active
450 def addfilegenerator(
450 def addfilegenerator(
451 self,
451 self,
452 genid,
452 genid,
453 filenames,
453 filenames,
454 genfunc,
454 genfunc,
455 order=0,
455 order=0,
456 location=b'',
456 location=b'',
457 post_finalize=False,
457 post_finalize=False,
458 ):
458 ):
459 """add a function to generates some files at transaction commit
459 """add a function to generates some files at transaction commit
460
460
461 The `genfunc` argument is a function capable of generating proper
461 The `genfunc` argument is a function capable of generating proper
462 content of each entry in the `filename` tuple.
462 content of each entry in the `filename` tuple.
463
463
464 At transaction close time, `genfunc` will be called with one file
464 At transaction close time, `genfunc` will be called with one file
465 object argument per entries in `filenames`.
465 object argument per entries in `filenames`.
466
466
467 The transaction itself is responsible for the backup, creation and
467 The transaction itself is responsible for the backup, creation and
468 final write of such file.
468 final write of such file.
469
469
470 The `genid` argument is used to ensure the same set of file is only
470 The `genid` argument is used to ensure the same set of file is only
471 generated once. Call to `addfilegenerator` for a `genid` already
471 generated once. Call to `addfilegenerator` for a `genid` already
472 present will overwrite the old entry.
472 present will overwrite the old entry.
473
473
474 The `order` argument may be used to control the order in which multiple
474 The `order` argument may be used to control the order in which multiple
475 generator will be executed.
475 generator will be executed.
476
476
477 The `location` arguments may be used to indicate the files are located
477 The `location` arguments may be used to indicate the files are located
478 outside of the the standard directory for transaction. It should match
478 outside of the the standard directory for transaction. It should match
479 one of the key of the `transaction.vfsmap` dictionary.
479 one of the key of the `transaction.vfsmap` dictionary.
480
480
481 The `post_finalize` argument can be set to `True` for file generation
481 The `post_finalize` argument can be set to `True` for file generation
482 that must be run after the transaction has been finalized.
482 that must be run after the transaction has been finalized.
483 """
483 """
484 # For now, we are unable to do proper backup and restore of custom vfs
484 # For now, we are unable to do proper backup and restore of custom vfs
485 # but for bookmarks that are handled outside this mechanism.
485 # but for bookmarks that are handled outside this mechanism.
486 entry = (order, filenames, genfunc, location, post_finalize)
486 entry = (order, filenames, genfunc, location, post_finalize)
487 self._filegenerators[genid] = entry
487 self._filegenerators[genid] = entry
488
488
489 @active
489 @active
490 def removefilegenerator(self, genid):
490 def removefilegenerator(self, genid):
491 """reverse of addfilegenerator, remove a file generator function"""
491 """reverse of addfilegenerator, remove a file generator function"""
492 if genid in self._filegenerators:
492 if genid in self._filegenerators:
493 del self._filegenerators[genid]
493 del self._filegenerators[genid]
494
494
495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
495 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
496 # write files registered for generation
496 # write files registered for generation
497 any = False
497 any = False
498
498
499 if group == GEN_GROUP_ALL:
499 if group == GEN_GROUP_ALL:
500 skip_post = skip_pre = False
500 skip_post = skip_pre = False
501 else:
501 else:
502 skip_pre = group == GEN_GROUP_POST_FINALIZE
502 skip_pre = group == GEN_GROUP_POST_FINALIZE
503 skip_post = group == GEN_GROUP_PRE_FINALIZE
503 skip_post = group == GEN_GROUP_PRE_FINALIZE
504
504
505 for id, entry in sorted(self._filegenerators.items()):
505 for id, entry in sorted(self._filegenerators.items()):
506 any = True
506 any = True
507 order, filenames, genfunc, location, post_finalize = entry
507 order, filenames, genfunc, location, post_finalize = entry
508
508
509 # for generation at closing, check if it's before or after finalize
509 # for generation at closing, check if it's before or after finalize
510 if skip_post and post_finalize:
510 if skip_post and post_finalize:
511 continue
511 continue
512 elif skip_pre and not post_finalize:
512 elif skip_pre and not post_finalize:
513 continue
513 continue
514
514
515 vfs = self._vfsmap[location]
515 vfs = self._vfsmap[location]
516 files = []
516 files = []
517 try:
517 try:
518 for name in filenames:
518 for name in filenames:
519 name += suffix
519 name += suffix
520 if suffix:
520 if suffix:
521 self.registertmp(name, location=location)
521 self.registertmp(name, location=location)
522 checkambig = False
522 checkambig = False
523 else:
523 else:
524 self.addbackup(name, location=location)
524 self.addbackup(name, location=location)
525 checkambig = (name, location) in self._checkambigfiles
525 checkambig = (name, location) in self._checkambigfiles
526 files.append(
526 files.append(
527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
527 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
528 )
528 )
529 genfunc(*files)
529 genfunc(*files)
530 for f in files:
530 for f in files:
531 f.close()
531 f.close()
532 # skip discard() loop since we're sure no open file remains
532 # skip discard() loop since we're sure no open file remains
533 del files[:]
533 del files[:]
534 finally:
534 finally:
535 for f in files:
535 for f in files:
536 f.discard()
536 f.discard()
537 return any
537 return any
538
538
539 @active
539 @active
540 def findoffset(self, file):
540 def findoffset(self, file):
541 if file in self._newfiles:
541 if file in self._newfiles:
542 return 0
542 return 0
543 return self._offsetmap.get(file)
543 return self._offsetmap.get(file)
544
544
545 @active
545 @active
546 def readjournal(self):
546 def readjournal(self):
547 self._file.seek(0)
547 self._file.seek(0)
548 entries = []
548 entries = []
549 for l in self._file.readlines():
549 for l in self._file.readlines():
550 file, troffset = l.split(b'\0')
550 file, troffset = l.split(b'\0')
551 entries.append((file, int(troffset)))
551 entries.append((file, int(troffset)))
552 return entries
552 return entries
553
553
554 @active
554 @active
555 def replace(self, file, offset):
555 def replace(self, file, offset):
556 """
556 """
557 replace can only replace already committed entries
557 replace can only replace already committed entries
558 that are not pending in the queue
558 that are not pending in the queue
559 """
559 """
560 if file in self._newfiles:
560 if file in self._newfiles:
561 if not offset:
561 if not offset:
562 return
562 return
563 self._newfiles.remove(file)
563 self._newfiles.remove(file)
564 self._offsetmap[file] = offset
564 self._offsetmap[file] = offset
565 elif file in self._offsetmap:
565 elif file in self._offsetmap:
566 if not offset:
566 if not offset:
567 del self._offsetmap[file]
567 del self._offsetmap[file]
568 self._newfiles.add(file)
568 self._newfiles.add(file)
569 else:
569 else:
570 self._offsetmap[file] = offset
570 self._offsetmap[file] = offset
571 else:
571 else:
572 raise KeyError(file)
572 raise KeyError(file)
573 self._file.write(b"%s\0%d\n" % (file, offset))
573 self._file.write(b"%s\0%d\n" % (file, offset))
574 self._file.flush()
574 self._file.flush()
575
575
576 @active
576 @active
577 def nest(self, name='<unnamed>'):
577 def nest(self, name='<unnamed>'):
578 self._count += 1
578 self._count += 1
579 self._usages += 1
579 self._usages += 1
580 self._names.append(name)
580 self._names.append(name)
581 return self
581 return self
582
582
583 def release(self):
583 def release(self):
584 if self._count > 0:
584 if self._count > 0:
585 self._usages -= 1
585 self._usages -= 1
586 if self._names:
586 if self._names:
587 self._names.pop()
587 self._names.pop()
588 # if the transaction scopes are left without being closed, fail
588 # if the transaction scopes are left without being closed, fail
589 if self._count > 0 and self._usages == 0:
589 if self._count > 0 and self._usages == 0:
590 self._abort()
590 self._abort()
591
591
592 def running(self):
592 def running(self):
593 return self._count > 0
593 return self._count > 0
594
594
595 def addpending(self, category, callback):
595 def addpending(self, category, callback):
596 """add a callback to be called when the transaction is pending
596 """add a callback to be called when the transaction is pending
597
597
598 The transaction will be given as callback's first argument.
598 The transaction will be given as callback's first argument.
599
599
600 Category is a unique identifier to allow overwriting an old callback
600 Category is a unique identifier to allow overwriting an old callback
601 with a newer callback.
601 with a newer callback.
602 """
602 """
603 self._pendingcallback[category] = callback
603 self._pendingcallback[category] = callback
604
604
605 @active
605 @active
606 def writepending(self):
606 def writepending(self):
607 """write pending file to temporary version
607 """write pending file to temporary version
608
608
609 This is used to allow hooks to view a transaction before commit"""
609 This is used to allow hooks to view a transaction before commit"""
610 categories = sorted(self._pendingcallback)
610 categories = sorted(self._pendingcallback)
611 for cat in categories:
611 for cat in categories:
612 # remove callback since the data will have been flushed
612 # remove callback since the data will have been flushed
613 any = self._pendingcallback.pop(cat)(self)
613 any = self._pendingcallback.pop(cat)(self)
614 self._anypending = self._anypending or any
614 self._anypending = self._anypending or any
615 self._anypending |= self._generatefiles(suffix=b'.pending')
615 self._anypending |= self._generatefiles(suffix=b'.pending')
616 return self._anypending
616 return self._anypending
617
617
618 @active
618 @active
619 def hasfinalize(self, category):
619 def hasfinalize(self, category):
620 """check is a callback already exist for a category"""
620 """check is a callback already exist for a category"""
621 return category in self._finalizecallback
621 return category in self._finalizecallback
622
622
623 @active
623 @active
624 def addfinalize(self, category, callback):
624 def addfinalize(self, category, callback):
625 """add a callback to be called when the transaction is closed
625 """add a callback to be called when the transaction is closed
626
626
627 The transaction will be given as callback's first argument.
627 The transaction will be given as callback's first argument.
628
628
629 Category is a unique identifier to allow overwriting old callbacks with
629 Category is a unique identifier to allow overwriting old callbacks with
630 newer callbacks.
630 newer callbacks.
631 """
631 """
632 self._finalizecallback[category] = callback
632 self._finalizecallback[category] = callback
633
633
634 @active
634 @active
635 def addpostclose(self, category, callback):
635 def addpostclose(self, category, callback):
636 """add or replace a callback to be called after the transaction closed
636 """add or replace a callback to be called after the transaction closed
637
637
638 The transaction will be given as callback's first argument.
638 The transaction will be given as callback's first argument.
639
639
640 Category is a unique identifier to allow overwriting an old callback
640 Category is a unique identifier to allow overwriting an old callback
641 with a newer callback.
641 with a newer callback.
642 """
642 """
643 self._postclosecallback[category] = callback
643 self._postclosecallback[category] = callback
644
644
645 @active
645 @active
646 def getpostclose(self, category):
646 def getpostclose(self, category):
647 """return a postclose callback added before, or None"""
647 """return a postclose callback added before, or None"""
648 return self._postclosecallback.get(category, None)
648 return self._postclosecallback.get(category, None)
649
649
650 @active
650 @active
651 def addabort(self, category, callback):
651 def addabort(self, category, callback):
652 """add a callback to be called when the transaction is aborted.
652 """add a callback to be called when the transaction is aborted.
653
653
654 The transaction will be given as the first argument to the callback.
654 The transaction will be given as the first argument to the callback.
655
655
656 Category is a unique identifier to allow overwriting an old callback
656 Category is a unique identifier to allow overwriting an old callback
657 with a newer callback.
657 with a newer callback.
658 """
658 """
659 self._abortcallback[category] = callback
659 self._abortcallback[category] = callback
660
660
661 @active
661 @active
662 def addvalidator(self, category, callback):
662 def addvalidator(self, category, callback):
663 """adds a callback to be called when validating the transaction.
663 """adds a callback to be called when validating the transaction.
664
664
665 The transaction will be given as the first argument to the callback.
665 The transaction will be given as the first argument to the callback.
666
666
667 callback should raise exception if to abort transaction"""
667 callback should raise exception if to abort transaction"""
668 self._validatecallback[category] = callback
668 self._validatecallback[category] = callback
669
669
670 @active
670 @active
671 def close(self):
671 def close(self):
672 '''commit the transaction'''
672 '''commit the transaction'''
673 if self._count == 1:
673 if self._count == 1:
674 for category in sorted(self._validatecallback):
674 for category in sorted(self._validatecallback):
675 self._validatecallback[category](self)
675 self._validatecallback[category](self)
676 self._validatecallback = None # Help prevent cycles.
676 self._validatecallback = None # Help prevent cycles.
677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
677 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
678 while self._finalizecallback:
678 while self._finalizecallback:
679 callbacks = self._finalizecallback
679 callbacks = self._finalizecallback
680 self._finalizecallback = {}
680 self._finalizecallback = {}
681 categories = sorted(callbacks)
681 categories = sorted(callbacks)
682 for cat in categories:
682 for cat in categories:
683 callbacks[cat](self)
683 callbacks[cat](self)
684 # Prevent double usage and help clear cycles.
684 # Prevent double usage and help clear cycles.
685 self._finalizecallback = None
685 self._finalizecallback = None
686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
686 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
687
687
688 self._count -= 1
688 self._count -= 1
689 if self._count != 0:
689 if self._count != 0:
690 return
690 return
691 self._file.close()
691 self._file.close()
692 self._backupsfile.close()
692 self._backupsfile.close()
693 # cleanup temporary files
693 # cleanup temporary files
694 for l, f, b, c in self._backupentries:
694 for l, f, b, c in self._backupentries:
695 if l not in self._vfsmap and c:
695 if l not in self._vfsmap and c:
696 self._report(
696 self._report(
697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
697 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
698 )
698 )
699 continue
699 continue
700 vfs = self._vfsmap[l]
700 vfs = self._vfsmap[l]
701 if not f and b and vfs.exists(b):
701 if not f and b and vfs.exists(b):
702 try:
702 try:
703 vfs.unlink(b)
703 vfs.unlink(b)
704 except (IOError, OSError, error.Abort) as inst:
704 except (IOError, OSError, error.Abort) as inst:
705 if not c:
705 if not c:
706 raise
706 raise
707 # Abort may be raise by read only opener
707 # Abort may be raise by read only opener
708 self._report(
708 self._report(
709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
709 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
710 )
710 )
711 self._offsetmap = {}
711 self._offsetmap = {}
712 self._newfiles = set()
712 self._newfiles = set()
713 self._writeundo()
713 self._writeundo()
714 if self._after:
714 if self._after:
715 self._after()
715 self._after()
716 self._after = None # Help prevent cycles.
716 self._after = None # Help prevent cycles.
717 if self._opener.isfile(self._backupjournal):
717 if self._opener.isfile(self._backupjournal):
718 self._opener.unlink(self._backupjournal)
718 self._opener.unlink(self._backupjournal)
719 if self._opener.isfile(self._journal):
719 if self._opener.isfile(self._journal):
720 self._opener.unlink(self._journal)
720 self._opener.unlink(self._journal)
721 for l, _f, b, c in self._backupentries:
721 for l, _f, b, c in self._backupentries:
722 if l not in self._vfsmap and c:
722 if l not in self._vfsmap and c:
723 self._report(
723 self._report(
724 b"couldn't remove %s: unknown cache location"
724 b"couldn't remove %s: unknown cache location"
725 b"%s\n" % (b, l)
725 b"%s\n" % (b, l)
726 )
726 )
727 continue
727 continue
728 vfs = self._vfsmap[l]
728 vfs = self._vfsmap[l]
729 if b and vfs.exists(b):
729 if b and vfs.exists(b):
730 try:
730 try:
731 vfs.unlink(b)
731 vfs.unlink(b)
732 except (IOError, OSError, error.Abort) as inst:
732 except (IOError, OSError, error.Abort) as inst:
733 if not c:
733 if not c:
734 raise
734 raise
735 # Abort may be raise by read only opener
735 # Abort may be raise by read only opener
736 self._report(
736 self._report(
737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
737 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
738 )
738 )
739 self._backupentries = []
739 self._backupentries = []
740 self._journal = None
740 self._journal = None
741
741
742 self._releasefn(self, True) # notify success of closing transaction
742 self._releasefn(self, True) # notify success of closing transaction
743 self._releasefn = None # Help prevent cycles.
743 self._releasefn = None # Help prevent cycles.
744
744
745 # run post close action
745 # run post close action
746 categories = sorted(self._postclosecallback)
746 categories = sorted(self._postclosecallback)
747 for cat in categories:
747 for cat in categories:
748 self._postclosecallback[cat](self)
748 self._postclosecallback[cat](self)
749 # Prevent double usage and help clear cycles.
749 # Prevent double usage and help clear cycles.
750 self._postclosecallback = None
750 self._postclosecallback = None
751
751
752 @active
752 @active
753 def abort(self):
753 def abort(self):
754 """abort the transaction (generally called on error, or when the
754 """abort the transaction (generally called on error, or when the
755 transaction is not explicitly committed before going out of
755 transaction is not explicitly committed before going out of
756 scope)"""
756 scope)"""
757 self._abort()
757 self._abort()
758
758
759 @active
759 @active
760 def add_journal(self, vfs_id, path):
760 def add_journal(self, vfs_id, path):
761 self._journal_files.append((vfs_id, path))
761 self._journal_files.append((vfs_id, path))
762
762
763 def _writeundo(self):
763 def _writeundo(self):
764 """write transaction data for possible future undo call"""
764 """write transaction data for possible future undo call"""
765 if self._undoname is None:
765 if self._undoname is None:
766 return
766 return
767 cleanup_undo_files(
767 cleanup_undo_files(
768 self._report,
768 self._report,
769 self._vfsmap,
769 self._vfsmap,
770 undo_prefix=self._undoname,
770 undo_prefix=self._undoname,
771 )
771 )
772
772
773 def undoname(fn: bytes) -> bytes:
773 def undoname(fn: bytes) -> bytes:
774 base, name = os.path.split(fn)
774 base, name = os.path.split(fn)
775 assert name.startswith(self._journal)
775 assert name.startswith(self._journal)
776 new_name = name.replace(self._journal, self._undoname, 1)
776 new_name = name.replace(self._journal, self._undoname, 1)
777 return os.path.join(base, new_name)
777 return os.path.join(base, new_name)
778
778
779 undo_backup_path = b"%s.backupfiles" % self._undoname
779 undo_backup_path = b"%s.backupfiles" % self._undoname
780 undobackupfile = self._opener.open(undo_backup_path, b'w')
780 undobackupfile = self._opener.open(undo_backup_path, b'w')
781 undobackupfile.write(b'%d\n' % version)
781 undobackupfile.write(b'%d\n' % version)
782 for l, f, b, c in self._backupentries:
782 for l, f, b, c in self._backupentries:
783 if not f: # temporary file
783 if not f: # temporary file
784 continue
784 continue
785 if not b:
785 if not b:
786 u = b''
786 u = b''
787 else:
787 else:
788 if l not in self._vfsmap and c:
788 if l not in self._vfsmap and c:
789 self._report(
789 self._report(
790 b"couldn't remove %s: unknown cache location"
790 b"couldn't remove %s: unknown cache location"
791 b"%s\n" % (b, l)
791 b"%s\n" % (b, l)
792 )
792 )
793 continue
793 continue
794 vfs = self._vfsmap[l]
794 vfs = self._vfsmap[l]
795 u = undoname(b)
795 u = undoname(b)
796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
796 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
797 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
798 undobackupfile.close()
798 undobackupfile.close()
799 for vfs, src in self._journal_files:
799 for vfs, src in self._journal_files:
800 dest = undoname(src)
800 dest = undoname(src)
801 # if src and dest refer to a same file, vfs.rename is a no-op,
801 # if src and dest refer to a same file, vfs.rename is a no-op,
802 # leaving both src and dest on disk. delete dest to make sure
802 # leaving both src and dest on disk. delete dest to make sure
803 # the rename couldn't be such a no-op.
803 # the rename couldn't be such a no-op.
804 vfs.tryunlink(dest)
804 vfs.tryunlink(dest)
805 try:
805 try:
806 vfs.rename(src, dest)
806 vfs.rename(src, dest)
807 except FileNotFoundError: # journal file does not yet exist
807 except FileNotFoundError: # journal file does not yet exist
808 pass
808 pass
809
809
810 def _abort(self):
810 def _abort(self):
811 entries = self.readjournal()
811 entries = self.readjournal()
812 self._count = 0
812 self._count = 0
813 self._usages = 0
813 self._usages = 0
814 self._file.close()
814 self._file.close()
815 self._backupsfile.close()
815 self._backupsfile.close()
816
816
817 quick = self._can_quick_abort(entries)
817 quick = self._can_quick_abort(entries)
818 try:
818 try:
819 if not quick:
819 if not quick:
820 self._report(_(b"transaction abort!\n"))
820 self._report(_(b"transaction abort!\n"))
821 for cat in sorted(self._abortcallback):
821 for cat in sorted(self._abortcallback):
822 self._abortcallback[cat](self)
822 self._abortcallback[cat](self)
823 # Prevent double usage and help clear cycles.
823 # Prevent double usage and help clear cycles.
824 self._abortcallback = None
824 self._abortcallback = None
825 if quick:
825 if quick:
826 self._do_quick_abort(entries)
826 self._do_quick_abort(entries)
827 else:
827 else:
828 self._do_full_abort(entries)
828 self._do_full_abort(entries)
829 finally:
829 finally:
830 self._journal = None
830 self._journal = None
831 self._releasefn(self, False) # notify failure of transaction
831 self._releasefn(self, False) # notify failure of transaction
832 self._releasefn = None # Help prevent cycles.
832 self._releasefn = None # Help prevent cycles.
833
833
834 def _can_quick_abort(self, entries):
834 def _can_quick_abort(self, entries):
835 """False if any semantic content have been written on disk
835 """False if any semantic content have been written on disk
836
836
837 True if nothing, except temporary files has been writen on disk."""
837 True if nothing, except temporary files has been writen on disk."""
838 if entries:
838 if entries:
839 return False
839 return False
840 for e in self._backupentries:
840 for e in self._backupentries:
841 if e[1]:
841 if e[1]:
842 return False
842 return False
843 return True
843 return True
844
844
845 def _do_quick_abort(self, entries):
845 def _do_quick_abort(self, entries):
846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
846 """(Silently) do a quick cleanup (see _can_quick_abort)"""
847 assert self._can_quick_abort(entries)
847 assert self._can_quick_abort(entries)
848 tmp_files = [e for e in self._backupentries if not e[1]]
848 tmp_files = [e for e in self._backupentries if not e[1]]
849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
849 for vfs_id, old_path, tmp_path, xxx in tmp_files:
850 vfs = self._vfsmap[vfs_id]
850 vfs = self._vfsmap[vfs_id]
851 try:
851 try:
852 vfs.unlink(tmp_path)
852 vfs.unlink(tmp_path)
853 except FileNotFoundError:
853 except FileNotFoundError:
854 pass
854 pass
855 if self._backupjournal:
855 if self._backupjournal:
856 self._opener.unlink(self._backupjournal)
856 self._opener.unlink(self._backupjournal)
857 if self._journal:
857 if self._journal:
858 self._opener.unlink(self._journal)
858 self._opener.unlink(self._journal)
859
859
860 def _do_full_abort(self, entries):
860 def _do_full_abort(self, entries):
861 """(Noisily) rollback all the change introduced by the transaction"""
861 """(Noisily) rollback all the change introduced by the transaction"""
862 try:
862 try:
863 _playback(
863 _playback(
864 self._journal,
864 self._journal,
865 self._report,
865 self._report,
866 self._opener,
866 self._opener,
867 self._vfsmap,
867 self._vfsmap,
868 entries,
868 entries,
869 self._backupentries,
869 self._backupentries,
870 False,
870 unlink=True,
871 checkambigfiles=self._checkambigfiles,
871 checkambigfiles=self._checkambigfiles,
872 )
872 )
873 self._report(_(b"rollback completed\n"))
873 self._report(_(b"rollback completed\n"))
874 except BaseException as exc:
874 except BaseException as exc:
875 self._report(_(b"rollback failed - please run hg recover\n"))
875 self._report(_(b"rollback failed - please run hg recover\n"))
876 self._report(
876 self._report(
877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
877 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
878 )
878 )
879
879
880
880
881 BAD_VERSION_MSG = _(
881 BAD_VERSION_MSG = _(
882 b"journal was created by a different version of Mercurial\n"
882 b"journal was created by a different version of Mercurial\n"
883 )
883 )
884
884
885
885
886 def read_backup_files(report, fp):
886 def read_backup_files(report, fp):
887 """parse an (already open) backup file an return contained backup entries
887 """parse an (already open) backup file an return contained backup entries
888
888
889 entries are in the form: (location, file, backupfile, xxx)
889 entries are in the form: (location, file, backupfile, xxx)
890
890
891 :location: the vfs identifier (vfsmap's key)
891 :location: the vfs identifier (vfsmap's key)
892 :file: original file path (in the vfs)
892 :file: original file path (in the vfs)
893 :backupfile: path of the backup (in the vfs)
893 :backupfile: path of the backup (in the vfs)
894 :cache: a boolean currently always set to False
894 :cache: a boolean currently always set to False
895 """
895 """
896 lines = fp.readlines()
896 lines = fp.readlines()
897 backupentries = []
897 backupentries = []
898 if lines:
898 if lines:
899 ver = lines[0][:-1]
899 ver = lines[0][:-1]
900 if ver != (b'%d' % version):
900 if ver != (b'%d' % version):
901 report(BAD_VERSION_MSG)
901 report(BAD_VERSION_MSG)
902 else:
902 else:
903 for line in lines[1:]:
903 for line in lines[1:]:
904 if line:
904 if line:
905 # Shave off the trailing newline
905 # Shave off the trailing newline
906 line = line[:-1]
906 line = line[:-1]
907 l, f, b, c = line.split(b'\0')
907 l, f, b, c = line.split(b'\0')
908 backupentries.append((l, f, b, bool(c)))
908 backupentries.append((l, f, b, bool(c)))
909 return backupentries
909 return backupentries
910
910
911
911
912 def rollback(
912 def rollback(
913 opener,
913 opener,
914 vfsmap,
914 vfsmap,
915 file,
915 file,
916 report,
916 report,
917 checkambigfiles=None,
917 checkambigfiles=None,
918 skip_journal_pattern=None,
918 skip_journal_pattern=None,
919 ):
919 ):
920 """Rolls back the transaction contained in the given file
920 """Rolls back the transaction contained in the given file
921
921
922 Reads the entries in the specified file, and the corresponding
922 Reads the entries in the specified file, and the corresponding
923 '*.backupfiles' file, to recover from an incomplete transaction.
923 '*.backupfiles' file, to recover from an incomplete transaction.
924
924
925 * `file`: a file containing a list of entries, specifying where
925 * `file`: a file containing a list of entries, specifying where
926 to truncate each file. The file should contain a list of
926 to truncate each file. The file should contain a list of
927 file\0offset pairs, delimited by newlines. The corresponding
927 file\0offset pairs, delimited by newlines. The corresponding
928 '*.backupfiles' file should contain a list of file\0backupfile
928 '*.backupfiles' file should contain a list of file\0backupfile
929 pairs, delimited by \0.
929 pairs, delimited by \0.
930
930
931 `checkambigfiles` is a set of (path, vfs-location) tuples,
931 `checkambigfiles` is a set of (path, vfs-location) tuples,
932 which determine whether file stat ambiguity should be avoided at
932 which determine whether file stat ambiguity should be avoided at
933 restoring corresponded files.
933 restoring corresponded files.
934 """
934 """
935 entries = []
935 entries = []
936 backupentries = []
936 backupentries = []
937
937
938 with opener.open(file) as fp:
938 with opener.open(file) as fp:
939 lines = fp.readlines()
939 lines = fp.readlines()
940 for l in lines:
940 for l in lines:
941 try:
941 try:
942 f, o = l.split(b'\0')
942 f, o = l.split(b'\0')
943 entries.append((f, int(o)))
943 entries.append((f, int(o)))
944 except ValueError:
944 except ValueError:
945 report(
945 report(
946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
946 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
947 )
947 )
948
948
949 backupjournal = b"%s.backupfiles" % file
949 backupjournal = b"%s.backupfiles" % file
950 if opener.exists(backupjournal):
950 if opener.exists(backupjournal):
951 with opener.open(backupjournal) as fp:
951 with opener.open(backupjournal) as fp:
952 backupentries = read_backup_files(report, fp)
952 backupentries = read_backup_files(report, fp)
953 if skip_journal_pattern is not None:
953 if skip_journal_pattern is not None:
954 keep = lambda x: not skip_journal_pattern.match(x[1])
954 keep = lambda x: not skip_journal_pattern.match(x[1])
955 backupentries = [x for x in backupentries if keep(x)]
955 backupentries = [x for x in backupentries if keep(x)]
956
956
957 _playback(
957 _playback(
958 file,
958 file,
959 report,
959 report,
960 opener,
960 opener,
961 vfsmap,
961 vfsmap,
962 entries,
962 entries,
963 backupentries,
963 backupentries,
964 checkambigfiles=checkambigfiles,
964 checkambigfiles=checkambigfiles,
965 )
965 )
@@ -1,497 +1,496 b''
1 Test correctness of revlog inline -> non-inline transition
1 Test correctness of revlog inline -> non-inline transition
2 ----------------------------------------------------------
2 ----------------------------------------------------------
3
3
4 We test various file length and naming pattern as this created issue in the
4 We test various file length and naming pattern as this created issue in the
5 past.
5 past.
6
6
7 Helper extension to intercept renames and kill process
7 Helper extension to intercept renames and kill process
8
8
9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
9 $ cat > $TESTTMP/intercept_before_rename.py << EOF
10 > import os
10 > import os
11 > import signal
11 > import signal
12 > from mercurial import extensions, util
12 > from mercurial import extensions, util
13 >
13 >
14 > def extsetup(ui):
14 > def extsetup(ui):
15 > def rename(orig, src, dest, *args, **kwargs):
15 > def rename(orig, src, dest, *args, **kwargs):
16 > path = util.normpath(dest)
16 > path = util.normpath(dest)
17 > if path.endswith(b'data/file.i'):
17 > if path.endswith(b'data/file.i'):
18 > os.kill(os.getpid(), signal.SIGKILL)
18 > os.kill(os.getpid(), signal.SIGKILL)
19 > return orig(src, dest, *args, **kwargs)
19 > return orig(src, dest, *args, **kwargs)
20 > extensions.wrapfunction(util, 'rename', rename)
20 > extensions.wrapfunction(util, 'rename', rename)
21 > EOF
21 > EOF
22
22
23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
23 $ cat > $TESTTMP/intercept_after_rename.py << EOF
24 > import os
24 > import os
25 > import signal
25 > import signal
26 > from mercurial import extensions, util
26 > from mercurial import extensions, util
27 >
27 >
28 > def extsetup(ui):
28 > def extsetup(ui):
29 > def close(orig, *args, **kwargs):
29 > def close(orig, *args, **kwargs):
30 > path = util.normpath(args[0]._atomictempfile__name)
30 > path = util.normpath(args[0]._atomictempfile__name)
31 > r = orig(*args, **kwargs)
31 > r = orig(*args, **kwargs)
32 > if path.endswith(b'/.hg/store/data/file.i'):
32 > if path.endswith(b'/.hg/store/data/file.i'):
33 > os.kill(os.getpid(), signal.SIGKILL)
33 > os.kill(os.getpid(), signal.SIGKILL)
34 > return r
34 > return r
35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
35 > extensions.wrapfunction(util.atomictempfile, 'close', close)
36 > def extsetup(ui):
36 > def extsetup(ui):
37 > def rename(orig, src, dest, *args, **kwargs):
37 > def rename(orig, src, dest, *args, **kwargs):
38 > path = util.normpath(dest)
38 > path = util.normpath(dest)
39 > r = orig(src, dest, *args, **kwargs)
39 > r = orig(src, dest, *args, **kwargs)
40 > if path.endswith(b'data/file.i'):
40 > if path.endswith(b'data/file.i'):
41 > os.kill(os.getpid(), signal.SIGKILL)
41 > os.kill(os.getpid(), signal.SIGKILL)
42 > return r
42 > return r
43 > extensions.wrapfunction(util, 'rename', rename)
43 > extensions.wrapfunction(util, 'rename', rename)
44 > EOF
44 > EOF
45
45
46 $ cat > $TESTTMP/killme.py << EOF
46 $ cat > $TESTTMP/killme.py << EOF
47 > import os
47 > import os
48 > import signal
48 > import signal
49 >
49 >
50 > def killme(ui, repo, hooktype, **kwargs):
50 > def killme(ui, repo, hooktype, **kwargs):
51 > os.kill(os.getpid(), signal.SIGKILL)
51 > os.kill(os.getpid(), signal.SIGKILL)
52 > EOF
52 > EOF
53
53
54 $ cat > $TESTTMP/reader_wait_split.py << EOF
54 $ cat > $TESTTMP/reader_wait_split.py << EOF
55 > import os
55 > import os
56 > import signal
56 > import signal
57 > from mercurial import extensions, revlog, testing
57 > from mercurial import extensions, revlog, testing
58 > def _wait_post_load(orig, self, *args, **kwargs):
58 > def _wait_post_load(orig, self, *args, **kwargs):
59 > wait = b'data/file' in self.radix
59 > wait = b'data/file' in self.radix
60 > if wait:
60 > if wait:
61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
61 > testing.wait_file(b"$TESTTMP/writer-revlog-split")
62 > r = orig(self, *args, **kwargs)
62 > r = orig(self, *args, **kwargs)
63 > if wait:
63 > if wait:
64 > testing.write_file(b"$TESTTMP/reader-index-read")
64 > testing.write_file(b"$TESTTMP/reader-index-read")
65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
65 > testing.wait_file(b"$TESTTMP/writer-revlog-unsplit")
66 > return r
66 > return r
67 >
67 >
68 > def extsetup(ui):
68 > def extsetup(ui):
69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
69 > extensions.wrapfunction(revlog.revlog, '_loadindex', _wait_post_load)
70 > EOF
70 > EOF
71
71
72 setup a repository for tests
72 setup a repository for tests
73 ----------------------------
73 ----------------------------
74
74
75 $ cat >> $HGRCPATH << EOF
75 $ cat >> $HGRCPATH << EOF
76 > [format]
76 > [format]
77 > revlog-compression=none
77 > revlog-compression=none
78 > EOF
78 > EOF
79
79
80 $ hg init troffset-computation
80 $ hg init troffset-computation
81 $ cd troffset-computation
81 $ cd troffset-computation
82 $ files="
82 $ files="
83 > file
83 > file
84 > Directory_With,Special%Char/Complex_File.babar
84 > Directory_With,Special%Char/Complex_File.babar
85 > foo/bar/babar_celeste/foo
85 > foo/bar/babar_celeste/foo
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
86 > 1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/1234567890/f
87 > some_dir/sub_dir/foo_bar
87 > some_dir/sub_dir/foo_bar
88 > some_dir/sub_dir/foo_bar.i.s/tutu
88 > some_dir/sub_dir/foo_bar.i.s/tutu
89 > "
89 > "
90 $ for f in $files; do
90 $ for f in $files; do
91 > mkdir -p `dirname $f`
91 > mkdir -p `dirname $f`
92 > done
92 > done
93 $ for f in $files; do
93 $ for f in $files; do
94 > printf '%20d' '1' > $f
94 > printf '%20d' '1' > $f
95 > done
95 > done
96 $ hg commit -Aqma
96 $ hg commit -Aqma
97 $ for f in $files; do
97 $ for f in $files; do
98 > printf '%1024d' '1' > $f
98 > printf '%1024d' '1' > $f
99 > done
99 > done
100 $ hg commit -Aqmb
100 $ hg commit -Aqmb
101 $ for f in $files; do
101 $ for f in $files; do
102 > printf '%20d' '1' > $f
102 > printf '%20d' '1' > $f
103 > done
103 > done
104 $ hg commit -Aqmc
104 $ hg commit -Aqmc
105 $ for f in $files; do
105 $ for f in $files; do
106 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
106 > dd if=/dev/zero of=$f bs=1k count=128 > /dev/null 2>&1
107 > done
107 > done
108 $ hg commit -AqmD --traceback
108 $ hg commit -AqmD --traceback
109 $ for f in $files; do
109 $ for f in $files; do
110 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
110 > dd if=/dev/zero of=$f bs=1k count=132 > /dev/null 2>&1
111 > done
111 > done
112 $ hg commit -AqmD --traceback
112 $ hg commit -AqmD --traceback
113
113
114 Reference size:
114 Reference size:
115 $ f -s file
115 $ f -s file
116 file: size=135168
116 file: size=135168
117 $ f -s .hg/store/data*/file*
117 $ f -s .hg/store/data*/file*
118 .hg/store/data/file.d: size=267307
118 .hg/store/data/file.d: size=267307
119 .hg/store/data/file.i: size=320
119 .hg/store/data/file.i: size=320
120
120
121 $ cd ..
121 $ cd ..
122
122
123 Test a succesful pull
123 Test a succesful pull
124 =====================
124 =====================
125
125
126 Make sure everything goes though as expect if we don't do any crash
126 Make sure everything goes though as expect if we don't do any crash
127
127
128 $ hg clone --quiet --rev 1 troffset-computation troffset-success
128 $ hg clone --quiet --rev 1 troffset-computation troffset-success
129 $ cd troffset-success
129 $ cd troffset-success
130
130
131 Reference size:
131 Reference size:
132 $ f -s file
132 $ f -s file
133 file: size=1024
133 file: size=1024
134 $ f -s .hg/store/data/file*
134 $ f -s .hg/store/data/file*
135 .hg/store/data/file.i: size=1174
135 .hg/store/data/file.i: size=1174
136
136
137 $ hg pull ../troffset-computation
137 $ hg pull ../troffset-computation
138 pulling from ../troffset-computation
138 pulling from ../troffset-computation
139 searching for changes
139 searching for changes
140 adding changesets
140 adding changesets
141 adding manifests
141 adding manifests
142 adding file changes
142 adding file changes
143 added 3 changesets with 18 changes to 6 files
143 added 3 changesets with 18 changes to 6 files
144 new changesets c99a94cae9b1:64874a3b0160
144 new changesets c99a94cae9b1:64874a3b0160
145 (run 'hg update' to get a working copy)
145 (run 'hg update' to get a working copy)
146
146
147
147
148 The inline revlog has been replaced
148 The inline revlog has been replaced
149
149
150 $ f -s .hg/store/data/file*
150 $ f -s .hg/store/data/file*
151 .hg/store/data/file.d: size=267307
151 .hg/store/data/file.d: size=267307
152 .hg/store/data/file.i: size=320
152 .hg/store/data/file.i: size=320
153
153
154
154
155 $ hg verify -q
155 $ hg verify -q
156 $ cd ..
156 $ cd ..
157
157
158
158
159 Test a hard crash after the file was split but before the transaction was committed
159 Test a hard crash after the file was split but before the transaction was committed
160 ===================================================================================
160 ===================================================================================
161
161
162 Test offset computation to correctly factor in the index entries themselves.
162 Test offset computation to correctly factor in the index entries themselves.
163 Also test that the new data size has the correct size if the transaction is aborted
163 Also test that the new data size has the correct size if the transaction is aborted
164 after the index has been replaced.
164 after the index has been replaced.
165
165
166 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
166 Test repo has commits a, b, c, D, where D is large (grows the revlog enough that it
167 transitions to non-inline storage). The clone initially has changes a, b
167 transitions to non-inline storage). The clone initially has changes a, b
168 and will transition to non-inline storage when adding c, D.
168 and will transition to non-inline storage when adding c, D.
169
169
170 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
170 If the transaction adding c, D is rolled back, then we don't undo the revlog split,
171 but truncate the index and the data to remove both c and D.
171 but truncate the index and the data to remove both c and D.
172
172
173
173
174 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
174 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy
175 $ cd troffset-computation-copy
175 $ cd troffset-computation-copy
176
176
177 Reference size:
177 Reference size:
178 $ f -s file
178 $ f -s file
179 file: size=1024
179 file: size=1024
180 $ f -s .hg/store/data*/file*
180 $ f -s .hg/store/data*/file*
181 .hg/store/data/file.i: size=1174
181 .hg/store/data/file.i: size=1174
182
182
183 $ cat > .hg/hgrc <<EOF
183 $ cat > .hg/hgrc <<EOF
184 > [hooks]
184 > [hooks]
185 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
185 > pretxnchangegroup = python:$TESTTMP/killme.py:killme
186 > EOF
186 > EOF
187 #if chg
187 #if chg
188 $ hg pull ../troffset-computation
188 $ hg pull ../troffset-computation
189 pulling from ../troffset-computation
189 pulling from ../troffset-computation
190 [255]
190 [255]
191 #else
191 #else
192 $ hg pull ../troffset-computation
192 $ hg pull ../troffset-computation
193 pulling from ../troffset-computation
193 pulling from ../troffset-computation
194 *Killed* (glob)
194 *Killed* (glob)
195 [137]
195 [137]
196 #endif
196 #endif
197
197
198
198
199 The inline revlog still exist, but a split version exist next to it
199 The inline revlog still exist, but a split version exist next to it
200
200
201 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
201 $ cat .hg/store/journal | tr '\0' ' ' | grep '\.s'
202 data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
202 data/some_dir/sub_dir/foo_bar.i.s/tutu.i 1174
203 data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
203 data/some_dir/sub_dir/foo_bar.i.s/tutu.d 0
204 $ f -s .hg/store/data*/file*
204 $ f -s .hg/store/data*/file*
205 .hg/store/data-s/file: size=320
205 .hg/store/data-s/file: size=320
206 .hg/store/data/file.d: size=267307
206 .hg/store/data/file.d: size=267307
207 .hg/store/data/file.i: size=132395
207 .hg/store/data/file.i: size=132395
208
208
209
209
210 The first file.i entry should match the "Reference size" above.
210 The first file.i entry should match the "Reference size" above.
211 The first file.d entry is the temporary record during the split,
211 The first file.d entry is the temporary record during the split,
212
212
213 A "temporary file" entry exist for the split index.
213 A "temporary file" entry exist for the split index.
214
214
215 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
215 $ cat .hg/store/journal | tr -s '\000' ' ' | grep data/file
216 data/file.i 1174
216 data/file.i 1174
217 data/file.d 0
217 data/file.d 0
218 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
218 $ cat .hg/store/journal.backupfiles | tr -s '\000' ' ' | tr -s '\00' ' '| grep 'data.*/file'
219 data/file.i data/journal.backup.file.i.bck 0
219 data/file.i data/journal.backup.file.i.bck 0
220 data-s/file 0
220 data-s/file 0
221
221
222 recover is rolling the split back, the fncache is still valid
222 recover is rolling the split back, the fncache is still valid
223
223
224 $ hg recover
224 $ hg recover
225 rolling back interrupted transaction
225 rolling back interrupted transaction
226 (verify step skipped, run `hg verify` to check your repository content)
226 (verify step skipped, run `hg verify` to check your repository content)
227 $ f -s .hg/store/data*/file*
227 $ f -s .hg/store/data*/file*
228 .hg/store/data/file.i: size=1174
228 .hg/store/data/file.i: size=1174
229 $ hg tip
229 $ hg tip
230 changeset: 1:64b04c8dc267
230 changeset: 1:64b04c8dc267
231 tag: tip
231 tag: tip
232 user: test
232 user: test
233 date: Thu Jan 01 00:00:00 1970 +0000
233 date: Thu Jan 01 00:00:00 1970 +0000
234 summary: b
234 summary: b
235
235
236 $ hg verify -q
236 $ hg verify -q
237 $ hg debugrebuildfncache --only-data
237 $ hg debugrebuildfncache --only-data
238 fncache already up to date
238 fncache already up to date
239 $ hg verify -q
239 $ hg verify -q
240 $ cd ..
240 $ cd ..
241
241
242 Test a hard crash right before the index is move into place
242 Test a hard crash right before the index is move into place
243 ===========================================================
243 ===========================================================
244
244
245 Now retry the procedure but intercept the rename of the index and check that
245 Now retry the procedure but intercept the rename of the index and check that
246 the journal does not contain the new index size. This demonstrates the edge case
246 the journal does not contain the new index size. This demonstrates the edge case
247 where the data file is left as garbage.
247 where the data file is left as garbage.
248
248
249 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
249 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy2
250 $ cd troffset-computation-copy2
250 $ cd troffset-computation-copy2
251
251
252 Reference size:
252 Reference size:
253 $ f -s file
253 $ f -s file
254 file: size=1024
254 file: size=1024
255 $ f -s .hg/store/data*/file*
255 $ f -s .hg/store/data*/file*
256 .hg/store/data/file.i: size=1174
256 .hg/store/data/file.i: size=1174
257
257
258 $ cat > .hg/hgrc <<EOF
258 $ cat > .hg/hgrc <<EOF
259 > [extensions]
259 > [extensions]
260 > intercept_rename = $TESTTMP/intercept_before_rename.py
260 > intercept_rename = $TESTTMP/intercept_before_rename.py
261 > EOF
261 > EOF
262 #if chg
262 #if chg
263 $ hg pull ../troffset-computation
263 $ hg pull ../troffset-computation
264 pulling from ../troffset-computation
264 pulling from ../troffset-computation
265 searching for changes
265 searching for changes
266 adding changesets
266 adding changesets
267 adding manifests
267 adding manifests
268 adding file changes
268 adding file changes
269 [255]
269 [255]
270 #else
270 #else
271 $ hg pull ../troffset-computation
271 $ hg pull ../troffset-computation
272 pulling from ../troffset-computation
272 pulling from ../troffset-computation
273 searching for changes
273 searching for changes
274 adding changesets
274 adding changesets
275 adding manifests
275 adding manifests
276 adding file changes
276 adding file changes
277 *Killed* (glob)
277 *Killed* (glob)
278 [137]
278 [137]
279 #endif
279 #endif
280
280
281 The inline revlog still exist, but a split version exist next to it
281 The inline revlog still exist, but a split version exist next to it
282
282
283 $ f -s .hg/store/data*/file*
283 $ f -s .hg/store/data*/file*
284 .hg/store/data-s/file: size=320
284 .hg/store/data-s/file: size=320
285 .hg/store/data/file.d: size=267307
285 .hg/store/data/file.d: size=267307
286 .hg/store/data/file.i: size=132395
286 .hg/store/data/file.i: size=132395
287
287
288 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
288 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
289 data/file.i 1174
289 data/file.i 1174
290 data/file.d 0
290 data/file.d 0
291
291
292 recover is rolling the split back, the fncache is still valid
292 recover is rolling the split back, the fncache is still valid
293
293
294 $ hg recover
294 $ hg recover
295 rolling back interrupted transaction
295 rolling back interrupted transaction
296 (verify step skipped, run `hg verify` to check your repository content)
296 (verify step skipped, run `hg verify` to check your repository content)
297 $ f -s .hg/store/data*/file*
297 $ f -s .hg/store/data*/file*
298 .hg/store/data/file.i: size=1174
298 .hg/store/data/file.i: size=1174
299 $ hg tip
299 $ hg tip
300 changeset: 1:64b04c8dc267
300 changeset: 1:64b04c8dc267
301 tag: tip
301 tag: tip
302 user: test
302 user: test
303 date: Thu Jan 01 00:00:00 1970 +0000
303 date: Thu Jan 01 00:00:00 1970 +0000
304 summary: b
304 summary: b
305
305
306 $ hg verify -q
306 $ hg verify -q
307 $ cd ..
307 $ cd ..
308
308
309 Test a hard crash right after the index is move into place
309 Test a hard crash right after the index is move into place
310 ===========================================================
310 ===========================================================
311
311
312 Now retry the procedure but intercept the rename of the index.
312 Now retry the procedure but intercept the rename of the index.
313
313
314 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
314 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-crash-after-rename
315 $ cd troffset-computation-crash-after-rename
315 $ cd troffset-computation-crash-after-rename
316
316
317 Reference size:
317 Reference size:
318 $ f -s file
318 $ f -s file
319 file: size=1024
319 file: size=1024
320 $ f -s .hg/store/data*/file*
320 $ f -s .hg/store/data*/file*
321 .hg/store/data/file.i: size=1174
321 .hg/store/data/file.i: size=1174
322
322
323 $ cat > .hg/hgrc <<EOF
323 $ cat > .hg/hgrc <<EOF
324 > [extensions]
324 > [extensions]
325 > intercept_rename = $TESTTMP/intercept_after_rename.py
325 > intercept_rename = $TESTTMP/intercept_after_rename.py
326 > EOF
326 > EOF
327 #if chg
327 #if chg
328 $ hg pull ../troffset-computation
328 $ hg pull ../troffset-computation
329 pulling from ../troffset-computation
329 pulling from ../troffset-computation
330 searching for changes
330 searching for changes
331 adding changesets
331 adding changesets
332 adding manifests
332 adding manifests
333 adding file changes
333 adding file changes
334 [255]
334 [255]
335 #else
335 #else
336 $ hg pull ../troffset-computation
336 $ hg pull ../troffset-computation
337 pulling from ../troffset-computation
337 pulling from ../troffset-computation
338 searching for changes
338 searching for changes
339 adding changesets
339 adding changesets
340 adding manifests
340 adding manifests
341 adding file changes
341 adding file changes
342 *Killed* (glob)
342 *Killed* (glob)
343 [137]
343 [137]
344 #endif
344 #endif
345
345
346 The inline revlog was over written on disk
346 The inline revlog was over written on disk
347
347
348 $ f -s .hg/store/data*/file*
348 $ f -s .hg/store/data*/file*
349 .hg/store/data/file.d: size=267307
349 .hg/store/data/file.d: size=267307
350 .hg/store/data/file.i: size=320
350 .hg/store/data/file.i: size=320
351
351
352 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
352 $ cat .hg/store/journal | tr -s '\000' ' ' | grep 'data.*/file'
353 data/file.i 1174
353 data/file.i 1174
354 data/file.d 0
354 data/file.d 0
355
355
356 recover is rolling the split back, the fncache is still valid
356 recover is rolling the split back, the fncache is still valid
357
357
358 $ hg recover
358 $ hg recover
359 rolling back interrupted transaction
359 rolling back interrupted transaction
360 (verify step skipped, run `hg verify` to check your repository content)
360 (verify step skipped, run `hg verify` to check your repository content)
361 $ f -s .hg/store/data*/file*
361 $ f -s .hg/store/data*/file*
362 .hg/store/data/file.i: size=1174
362 .hg/store/data/file.i: size=1174
363 $ hg tip
363 $ hg tip
364 changeset: 1:64b04c8dc267
364 changeset: 1:64b04c8dc267
365 tag: tip
365 tag: tip
366 user: test
366 user: test
367 date: Thu Jan 01 00:00:00 1970 +0000
367 date: Thu Jan 01 00:00:00 1970 +0000
368 summary: b
368 summary: b
369
369
370 $ hg verify -q
370 $ hg verify -q
371 $ cd ..
371 $ cd ..
372
372
373 Have the transaction rollback itself without any hard crash
373 Have the transaction rollback itself without any hard crash
374 ===========================================================
374 ===========================================================
375
375
376
376
377 Repeat the original test but let hg rollback the transaction.
377 Repeat the original test but let hg rollback the transaction.
378
378
379 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
379 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-copy-rb
380 $ cd troffset-computation-copy-rb
380 $ cd troffset-computation-copy-rb
381 $ cat > .hg/hgrc <<EOF
381 $ cat > .hg/hgrc <<EOF
382 > [hooks]
382 > [hooks]
383 > pretxnchangegroup = false
383 > pretxnchangegroup = false
384 > EOF
384 > EOF
385 $ hg pull ../troffset-computation
385 $ hg pull ../troffset-computation
386 pulling from ../troffset-computation
386 pulling from ../troffset-computation
387 searching for changes
387 searching for changes
388 adding changesets
388 adding changesets
389 adding manifests
389 adding manifests
390 adding file changes
390 adding file changes
391 transaction abort!
391 transaction abort!
392 rollback completed
392 rollback completed
393 abort: pretxnchangegroup hook exited with status 1
393 abort: pretxnchangegroup hook exited with status 1
394 [40]
394 [40]
395
395
396 The split was rollback
396 The split was rollback
397
397
398 $ f -s .hg/store/data*/file*
398 $ f -s .hg/store/data*/file*
399 .hg/store/data/file.d: size=0
400 .hg/store/data/file.i: size=1174
399 .hg/store/data/file.i: size=1174
401
400
402
401
403 $ hg tip
402 $ hg tip
404 changeset: 1:64b04c8dc267
403 changeset: 1:64b04c8dc267
405 tag: tip
404 tag: tip
406 user: test
405 user: test
407 date: Thu Jan 01 00:00:00 1970 +0000
406 date: Thu Jan 01 00:00:00 1970 +0000
408 summary: b
407 summary: b
409
408
410 $ hg verify -q
409 $ hg verify -q
411 $ cd ..
410 $ cd ..
412
411
413 Read race
412 Read race
414 =========
413 =========
415
414
416 We check that a client that started reading a revlog (its index) after the
415 We check that a client that started reading a revlog (its index) after the
417 split and end reading (the data) after the rollback should be fine
416 split and end reading (the data) after the rollback should be fine
418
417
419 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
418 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-race
420 $ cd troffset-computation-race
419 $ cd troffset-computation-race
421 $ cat > .hg/hgrc <<EOF
420 $ cat > .hg/hgrc <<EOF
422 > [hooks]
421 > [hooks]
423 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
422 > pretxnchangegroup=$RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/reader-index-read $TESTTMP/writer-revlog-split
424 > pretxnclose = false
423 > pretxnclose = false
425 > EOF
424 > EOF
426
425
427 start a reader
426 start a reader
428
427
429 $ hg cat --rev 0 file \
428 $ hg cat --rev 0 file \
430 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
429 > --config "extensions.wait_read=$TESTTMP/reader_wait_split.py" \
431 > 2> $TESTTMP/reader.stderr \
430 > 2> $TESTTMP/reader.stderr \
432 > > $TESTTMP/reader.stdout &
431 > > $TESTTMP/reader.stdout &
433
432
434 Do a failed pull in //
433 Do a failed pull in //
435
434
436 $ hg pull ../troffset-computation
435 $ hg pull ../troffset-computation
437 pulling from ../troffset-computation
436 pulling from ../troffset-computation
438 searching for changes
437 searching for changes
439 adding changesets
438 adding changesets
440 adding manifests
439 adding manifests
441 adding file changes
440 adding file changes
442 transaction abort!
441 transaction abort!
443 rollback completed
442 rollback completed
444 abort: pretxnclose hook exited with status 1
443 abort: pretxnclose hook exited with status 1
445 [40]
444 [40]
446 $ touch $TESTTMP/writer-revlog-unsplit
445 $ touch $TESTTMP/writer-revlog-unsplit
447 $ wait
446 $ wait
448
447
449 The reader should be fine
448 The reader should be fine
450 $ cat $TESTTMP/reader.stderr
449 $ cat $TESTTMP/reader.stderr
451 $ cat $TESTTMP/reader.stdout
450 $ cat $TESTTMP/reader.stdout
452 1 (no-eol)
451 1 (no-eol)
453
452
454 $ hg verify -q
453 $ hg verify -q
455
454
456 $ cd ..
455 $ cd ..
457
456
458 pending hooks
457 pending hooks
459 =============
458 =============
460
459
461 We checks that hooks properly see the inside of the transaction, while other process don't.
460 We checks that hooks properly see the inside of the transaction, while other process don't.
462
461
463 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
462 $ hg clone --quiet --rev 1 troffset-computation troffset-computation-hooks
464 $ cd troffset-computation-hooks
463 $ cd troffset-computation-hooks
465 $ cat > .hg/hgrc <<EOF
464 $ cat > .hg/hgrc <<EOF
466 > [hooks]
465 > [hooks]
467 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
466 > pretxnclose.01-echo = hg cat -r 'max(all())' file | f --size
468 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
467 > pretxnclose.02-echo = $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-done $TESTTMP/hook-tr-ready
469 > pretxnclose.03-abort = false
468 > pretxnclose.03-abort = false
470 > EOF
469 > EOF
471
470
472 $ (
471 $ (
473 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
472 > $RUNTESTDIR/testlib/wait-on-file 5 $TESTTMP/hook-tr-ready;\
474 > hg cat -r 'max(all())' file | f --size;\
473 > hg cat -r 'max(all())' file | f --size;\
475 > touch $TESTTMP/hook-done
474 > touch $TESTTMP/hook-done
476 > ) >stdout 2>stderr &
475 > ) >stdout 2>stderr &
477
476
478 $ hg pull ../troffset-computation
477 $ hg pull ../troffset-computation
479 pulling from ../troffset-computation
478 pulling from ../troffset-computation
480 searching for changes
479 searching for changes
481 adding changesets
480 adding changesets
482 adding manifests
481 adding manifests
483 adding file changes
482 adding file changes
484 size=135168
483 size=135168
485 transaction abort!
484 transaction abort!
486 rollback completed
485 rollback completed
487 abort: pretxnclose.03-abort hook exited with status 1
486 abort: pretxnclose.03-abort hook exited with status 1
488 [40]
487 [40]
489
488
490 $ cat stdout
489 $ cat stdout
491 size=1024
490 size=1024
492 $ cat stderr
491 $ cat stderr
493
492
494 $ hg verify -q
493 $ hg verify -q
495
494
496
495
497 $ cd ..
496 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now