##// END OF EJS Templates
transaction: display data about why the transaction failed to rollback...
Boris Feld -
r40614:aca09df3 default
parent child Browse files
Show More
@@ -1,640 +1,645 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import (
25 stringutil,
26 )
24
27
25 version = 2
28 version = 2
26
29
27 # These are the file generators that should only be executed after the
30 # These are the file generators that should only be executed after the
28 # finalizers are done, since they rely on the output of the finalizers (like
31 # finalizers are done, since they rely on the output of the finalizers (like
29 # the changelog having been written).
32 # the changelog having been written).
30 postfinalizegenerators = {
33 postfinalizegenerators = {
31 'bookmarks',
34 'bookmarks',
32 'dirstate'
35 'dirstate'
33 }
36 }
34
37
35 gengroupall='all'
38 gengroupall='all'
36 gengroupprefinalize='prefinalize'
39 gengroupprefinalize='prefinalize'
37 gengrouppostfinalize='postfinalize'
40 gengrouppostfinalize='postfinalize'
38
41
39 def active(func):
42 def active(func):
40 def _active(self, *args, **kwds):
43 def _active(self, *args, **kwds):
41 if self._count == 0:
44 if self._count == 0:
42 raise error.Abort(_(
45 raise error.Abort(_(
43 'cannot use transaction when it is already committed/aborted'))
46 'cannot use transaction when it is already committed/aborted'))
44 return func(self, *args, **kwds)
47 return func(self, *args, **kwds)
45 return _active
48 return _active
46
49
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 unlink=True, checkambigfiles=None):
51 unlink=True, checkambigfiles=None):
49 for f, o, _ignore in entries:
52 for f, o, _ignore in entries:
50 if o or not unlink:
53 if o or not unlink:
51 checkambig = checkambigfiles and (f, '') in checkambigfiles
54 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 try:
55 try:
53 fp = opener(f, 'a', checkambig=checkambig)
56 fp = opener(f, 'a', checkambig=checkambig)
54 fp.truncate(o)
57 fp.truncate(o)
55 fp.close()
58 fp.close()
56 except IOError:
59 except IOError:
57 report(_("failed to truncate %s\n") % f)
60 report(_("failed to truncate %s\n") % f)
58 raise
61 raise
59 else:
62 else:
60 try:
63 try:
61 opener.unlink(f)
64 opener.unlink(f)
62 except (IOError, OSError) as inst:
65 except (IOError, OSError) as inst:
63 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
64 raise
67 raise
65
68
66 backupfiles = []
69 backupfiles = []
67 for l, f, b, c in backupentries:
70 for l, f, b, c in backupentries:
68 if l not in vfsmap and c:
71 if l not in vfsmap and c:
69 report("couldn't handle %s: unknown cache location %s\n"
72 report("couldn't handle %s: unknown cache location %s\n"
70 % (b, l))
73 % (b, l))
71 vfs = vfsmap[l]
74 vfs = vfsmap[l]
72 try:
75 try:
73 if f and b:
76 if f and b:
74 filepath = vfs.join(f)
77 filepath = vfs.join(f)
75 backuppath = vfs.join(b)
78 backuppath = vfs.join(b)
76 checkambig = checkambigfiles and (f, l) in checkambigfiles
79 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 try:
80 try:
78 util.copyfile(backuppath, filepath, checkambig=checkambig)
81 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 backupfiles.append(b)
82 backupfiles.append(b)
80 except IOError:
83 except IOError:
81 report(_("failed to recover %s\n") % f)
84 report(_("failed to recover %s\n") % f)
82 else:
85 else:
83 target = f or b
86 target = f or b
84 try:
87 try:
85 vfs.unlink(target)
88 vfs.unlink(target)
86 except (IOError, OSError) as inst:
89 except (IOError, OSError) as inst:
87 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
88 raise
91 raise
89 except (IOError, OSError, error.Abort) as inst:
92 except (IOError, OSError, error.Abort) as inst:
90 if not c:
93 if not c:
91 raise
94 raise
92
95
93 backuppath = "%s.backupfiles" % journal
96 backuppath = "%s.backupfiles" % journal
94 if opener.exists(backuppath):
97 if opener.exists(backuppath):
95 opener.unlink(backuppath)
98 opener.unlink(backuppath)
96 opener.unlink(journal)
99 opener.unlink(journal)
97 try:
100 try:
98 for f in backupfiles:
101 for f in backupfiles:
99 if opener.exists(f):
102 if opener.exists(f):
100 opener.unlink(f)
103 opener.unlink(f)
101 except (IOError, OSError, error.Abort) as inst:
104 except (IOError, OSError, error.Abort) as inst:
102 # only pure backup file remains, it is sage to ignore any error
105 # only pure backup file remains, it is sage to ignore any error
103 pass
106 pass
104
107
105 class transaction(util.transactional):
108 class transaction(util.transactional):
106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
109 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 after=None, createmode=None, validator=None, releasefn=None,
110 after=None, createmode=None, validator=None, releasefn=None,
108 checkambigfiles=None, name=r'<unnamed>'):
111 checkambigfiles=None, name=r'<unnamed>'):
109 """Begin a new transaction
112 """Begin a new transaction
110
113
111 Begins a new transaction that allows rolling back writes in the event of
114 Begins a new transaction that allows rolling back writes in the event of
112 an exception.
115 an exception.
113
116
114 * `after`: called after the transaction has been committed
117 * `after`: called after the transaction has been committed
115 * `createmode`: the mode of the journal file that will be created
118 * `createmode`: the mode of the journal file that will be created
116 * `releasefn`: called after releasing (with transaction and result)
119 * `releasefn`: called after releasing (with transaction and result)
117
120
118 `checkambigfiles` is a set of (path, vfs-location) tuples,
121 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 which determine whether file stat ambiguity should be avoided
122 which determine whether file stat ambiguity should be avoided
120 for corresponded files.
123 for corresponded files.
121 """
124 """
122 self._count = 1
125 self._count = 1
123 self._usages = 1
126 self._usages = 1
124 self._report = report
127 self._report = report
125 # a vfs to the store content
128 # a vfs to the store content
126 self._opener = opener
129 self._opener = opener
127 # a map to access file in various {location -> vfs}
130 # a map to access file in various {location -> vfs}
128 vfsmap = vfsmap.copy()
131 vfsmap = vfsmap.copy()
129 vfsmap[''] = opener # set default value
132 vfsmap[''] = opener # set default value
130 self._vfsmap = vfsmap
133 self._vfsmap = vfsmap
131 self._after = after
134 self._after = after
132 self._entries = []
135 self._entries = []
133 self._map = {}
136 self._map = {}
134 self._journal = journalname
137 self._journal = journalname
135 self._undoname = undoname
138 self._undoname = undoname
136 self._queue = []
139 self._queue = []
137 # A callback to validate transaction content before closing it.
140 # A callback to validate transaction content before closing it.
138 # should raise exception is anything is wrong.
141 # should raise exception is anything is wrong.
139 # target user is repository hooks.
142 # target user is repository hooks.
140 if validator is None:
143 if validator is None:
141 validator = lambda tr: None
144 validator = lambda tr: None
142 self._validator = validator
145 self._validator = validator
143 # A callback to do something just after releasing transaction.
146 # A callback to do something just after releasing transaction.
144 if releasefn is None:
147 if releasefn is None:
145 releasefn = lambda tr, success: None
148 releasefn = lambda tr, success: None
146 self._releasefn = releasefn
149 self._releasefn = releasefn
147
150
148 self._checkambigfiles = set()
151 self._checkambigfiles = set()
149 if checkambigfiles:
152 if checkambigfiles:
150 self._checkambigfiles.update(checkambigfiles)
153 self._checkambigfiles.update(checkambigfiles)
151
154
152 self._names = [name]
155 self._names = [name]
153
156
154 # A dict dedicated to precisely tracking the changes introduced in the
157 # A dict dedicated to precisely tracking the changes introduced in the
155 # transaction.
158 # transaction.
156 self.changes = {}
159 self.changes = {}
157
160
158 # a dict of arguments to be passed to hooks
161 # a dict of arguments to be passed to hooks
159 self.hookargs = {}
162 self.hookargs = {}
160 self._file = opener.open(self._journal, "w")
163 self._file = opener.open(self._journal, "w")
161
164
162 # a list of ('location', 'path', 'backuppath', cache) entries.
165 # a list of ('location', 'path', 'backuppath', cache) entries.
163 # - if 'backuppath' is empty, no file existed at backup time
166 # - if 'backuppath' is empty, no file existed at backup time
164 # - if 'path' is empty, this is a temporary transaction file
167 # - if 'path' is empty, this is a temporary transaction file
165 # - if 'location' is not empty, the path is outside main opener reach.
168 # - if 'location' is not empty, the path is outside main opener reach.
166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
169 # use 'location' value as a key in a vfsmap to find the right 'vfs'
167 # (cache is currently unused)
170 # (cache is currently unused)
168 self._backupentries = []
171 self._backupentries = []
169 self._backupmap = {}
172 self._backupmap = {}
170 self._backupjournal = "%s.backupfiles" % self._journal
173 self._backupjournal = "%s.backupfiles" % self._journal
171 self._backupsfile = opener.open(self._backupjournal, 'w')
174 self._backupsfile = opener.open(self._backupjournal, 'w')
172 self._backupsfile.write('%d\n' % version)
175 self._backupsfile.write('%d\n' % version)
173
176
174 if createmode is not None:
177 if createmode is not None:
175 opener.chmod(self._journal, createmode & 0o666)
178 opener.chmod(self._journal, createmode & 0o666)
176 opener.chmod(self._backupjournal, createmode & 0o666)
179 opener.chmod(self._backupjournal, createmode & 0o666)
177
180
178 # hold file generations to be performed on commit
181 # hold file generations to be performed on commit
179 self._filegenerators = {}
182 self._filegenerators = {}
180 # hold callback to write pending data for hooks
183 # hold callback to write pending data for hooks
181 self._pendingcallback = {}
184 self._pendingcallback = {}
182 # True is any pending data have been written ever
185 # True is any pending data have been written ever
183 self._anypending = False
186 self._anypending = False
184 # holds callback to call when writing the transaction
187 # holds callback to call when writing the transaction
185 self._finalizecallback = {}
188 self._finalizecallback = {}
186 # hold callback for post transaction close
189 # hold callback for post transaction close
187 self._postclosecallback = {}
190 self._postclosecallback = {}
188 # holds callbacks to call during abort
191 # holds callbacks to call during abort
189 self._abortcallback = {}
192 self._abortcallback = {}
190
193
191 def __repr__(self):
194 def __repr__(self):
192 name = r'/'.join(self._names)
195 name = r'/'.join(self._names)
193 return (r'<transaction name=%s, count=%d, usages=%d>' %
196 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 (name, self._count, self._usages))
197 (name, self._count, self._usages))
195
198
196 def __del__(self):
199 def __del__(self):
197 if self._journal:
200 if self._journal:
198 self._abort()
201 self._abort()
199
202
200 @active
203 @active
201 def startgroup(self):
204 def startgroup(self):
202 """delay registration of file entry
205 """delay registration of file entry
203
206
204 This is used by strip to delay vision of strip offset. The transaction
207 This is used by strip to delay vision of strip offset. The transaction
205 sees either none or all of the strip actions to be done."""
208 sees either none or all of the strip actions to be done."""
206 self._queue.append([])
209 self._queue.append([])
207
210
208 @active
211 @active
209 def endgroup(self):
212 def endgroup(self):
210 """apply delayed registration of file entry.
213 """apply delayed registration of file entry.
211
214
212 This is used by strip to delay vision of strip offset. The transaction
215 This is used by strip to delay vision of strip offset. The transaction
213 sees either none or all of the strip actions to be done."""
216 sees either none or all of the strip actions to be done."""
214 q = self._queue.pop()
217 q = self._queue.pop()
215 for f, o, data in q:
218 for f, o, data in q:
216 self._addentry(f, o, data)
219 self._addentry(f, o, data)
217
220
218 @active
221 @active
219 def add(self, file, offset, data=None):
222 def add(self, file, offset, data=None):
220 """record the state of an append-only file before update"""
223 """record the state of an append-only file before update"""
221 if file in self._map or file in self._backupmap:
224 if file in self._map or file in self._backupmap:
222 return
225 return
223 if self._queue:
226 if self._queue:
224 self._queue[-1].append((file, offset, data))
227 self._queue[-1].append((file, offset, data))
225 return
228 return
226
229
227 self._addentry(file, offset, data)
230 self._addentry(file, offset, data)
228
231
229 def _addentry(self, file, offset, data):
232 def _addentry(self, file, offset, data):
230 """add a append-only entry to memory and on-disk state"""
233 """add a append-only entry to memory and on-disk state"""
231 if file in self._map or file in self._backupmap:
234 if file in self._map or file in self._backupmap:
232 return
235 return
233 self._entries.append((file, offset, data))
236 self._entries.append((file, offset, data))
234 self._map[file] = len(self._entries) - 1
237 self._map[file] = len(self._entries) - 1
235 # add enough data to the journal to do the truncate
238 # add enough data to the journal to do the truncate
236 self._file.write("%s\0%d\n" % (file, offset))
239 self._file.write("%s\0%d\n" % (file, offset))
237 self._file.flush()
240 self._file.flush()
238
241
239 @active
242 @active
240 def addbackup(self, file, hardlink=True, location=''):
243 def addbackup(self, file, hardlink=True, location=''):
241 """Adds a backup of the file to the transaction
244 """Adds a backup of the file to the transaction
242
245
243 Calling addbackup() creates a hardlink backup of the specified file
246 Calling addbackup() creates a hardlink backup of the specified file
244 that is used to recover the file in the event of the transaction
247 that is used to recover the file in the event of the transaction
245 aborting.
248 aborting.
246
249
247 * `file`: the file path, relative to .hg/store
250 * `file`: the file path, relative to .hg/store
248 * `hardlink`: use a hardlink to quickly create the backup
251 * `hardlink`: use a hardlink to quickly create the backup
249 """
252 """
250 if self._queue:
253 if self._queue:
251 msg = 'cannot use transaction.addbackup inside "group"'
254 msg = 'cannot use transaction.addbackup inside "group"'
252 raise error.ProgrammingError(msg)
255 raise error.ProgrammingError(msg)
253
256
254 if file in self._map or file in self._backupmap:
257 if file in self._map or file in self._backupmap:
255 return
258 return
256 vfs = self._vfsmap[location]
259 vfs = self._vfsmap[location]
257 dirname, filename = vfs.split(file)
260 dirname, filename = vfs.split(file)
258 backupfilename = "%s.backup.%s" % (self._journal, filename)
261 backupfilename = "%s.backup.%s" % (self._journal, filename)
259 backupfile = vfs.reljoin(dirname, backupfilename)
262 backupfile = vfs.reljoin(dirname, backupfilename)
260 if vfs.exists(file):
263 if vfs.exists(file):
261 filepath = vfs.join(file)
264 filepath = vfs.join(file)
262 backuppath = vfs.join(backupfile)
265 backuppath = vfs.join(backupfile)
263 util.copyfile(filepath, backuppath, hardlink=hardlink)
266 util.copyfile(filepath, backuppath, hardlink=hardlink)
264 else:
267 else:
265 backupfile = ''
268 backupfile = ''
266
269
267 self._addbackupentry((location, file, backupfile, False))
270 self._addbackupentry((location, file, backupfile, False))
268
271
269 def _addbackupentry(self, entry):
272 def _addbackupentry(self, entry):
270 """register a new backup entry and write it to disk"""
273 """register a new backup entry and write it to disk"""
271 self._backupentries.append(entry)
274 self._backupentries.append(entry)
272 self._backupmap[entry[1]] = len(self._backupentries) - 1
275 self._backupmap[entry[1]] = len(self._backupentries) - 1
273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
276 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
274 self._backupsfile.flush()
277 self._backupsfile.flush()
275
278
276 @active
279 @active
277 def registertmp(self, tmpfile, location=''):
280 def registertmp(self, tmpfile, location=''):
278 """register a temporary transaction file
281 """register a temporary transaction file
279
282
280 Such files will be deleted when the transaction exits (on both
283 Such files will be deleted when the transaction exits (on both
281 failure and success).
284 failure and success).
282 """
285 """
283 self._addbackupentry((location, '', tmpfile, False))
286 self._addbackupentry((location, '', tmpfile, False))
284
287
285 @active
288 @active
286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
289 def addfilegenerator(self, genid, filenames, genfunc, order=0,
287 location=''):
290 location=''):
288 """add a function to generates some files at transaction commit
291 """add a function to generates some files at transaction commit
289
292
290 The `genfunc` argument is a function capable of generating proper
293 The `genfunc` argument is a function capable of generating proper
291 content of each entry in the `filename` tuple.
294 content of each entry in the `filename` tuple.
292
295
293 At transaction close time, `genfunc` will be called with one file
296 At transaction close time, `genfunc` will be called with one file
294 object argument per entries in `filenames`.
297 object argument per entries in `filenames`.
295
298
296 The transaction itself is responsible for the backup, creation and
299 The transaction itself is responsible for the backup, creation and
297 final write of such file.
300 final write of such file.
298
301
299 The `genid` argument is used to ensure the same set of file is only
302 The `genid` argument is used to ensure the same set of file is only
300 generated once. Call to `addfilegenerator` for a `genid` already
303 generated once. Call to `addfilegenerator` for a `genid` already
301 present will overwrite the old entry.
304 present will overwrite the old entry.
302
305
303 The `order` argument may be used to control the order in which multiple
306 The `order` argument may be used to control the order in which multiple
304 generator will be executed.
307 generator will be executed.
305
308
306 The `location` arguments may be used to indicate the files are located
309 The `location` arguments may be used to indicate the files are located
307 outside of the the standard directory for transaction. It should match
310 outside of the the standard directory for transaction. It should match
308 one of the key of the `transaction.vfsmap` dictionary.
311 one of the key of the `transaction.vfsmap` dictionary.
309 """
312 """
310 # For now, we are unable to do proper backup and restore of custom vfs
313 # For now, we are unable to do proper backup and restore of custom vfs
311 # but for bookmarks that are handled outside this mechanism.
314 # but for bookmarks that are handled outside this mechanism.
312 self._filegenerators[genid] = (order, filenames, genfunc, location)
315 self._filegenerators[genid] = (order, filenames, genfunc, location)
313
316
314 @active
317 @active
315 def removefilegenerator(self, genid):
318 def removefilegenerator(self, genid):
316 """reverse of addfilegenerator, remove a file generator function"""
319 """reverse of addfilegenerator, remove a file generator function"""
317 if genid in self._filegenerators:
320 if genid in self._filegenerators:
318 del self._filegenerators[genid]
321 del self._filegenerators[genid]
319
322
320 def _generatefiles(self, suffix='', group=gengroupall):
323 def _generatefiles(self, suffix='', group=gengroupall):
321 # write files registered for generation
324 # write files registered for generation
322 any = False
325 any = False
323 for id, entry in sorted(self._filegenerators.iteritems()):
326 for id, entry in sorted(self._filegenerators.iteritems()):
324 any = True
327 any = True
325 order, filenames, genfunc, location = entry
328 order, filenames, genfunc, location = entry
326
329
327 # for generation at closing, check if it's before or after finalize
330 # for generation at closing, check if it's before or after finalize
328 postfinalize = group == gengrouppostfinalize
331 postfinalize = group == gengrouppostfinalize
329 if (group != gengroupall and
332 if (group != gengroupall and
330 (id in postfinalizegenerators) != (postfinalize)):
333 (id in postfinalizegenerators) != (postfinalize)):
331 continue
334 continue
332
335
333 vfs = self._vfsmap[location]
336 vfs = self._vfsmap[location]
334 files = []
337 files = []
335 try:
338 try:
336 for name in filenames:
339 for name in filenames:
337 name += suffix
340 name += suffix
338 if suffix:
341 if suffix:
339 self.registertmp(name, location=location)
342 self.registertmp(name, location=location)
340 checkambig = False
343 checkambig = False
341 else:
344 else:
342 self.addbackup(name, location=location)
345 self.addbackup(name, location=location)
343 checkambig = (name, location) in self._checkambigfiles
346 checkambig = (name, location) in self._checkambigfiles
344 files.append(vfs(name, 'w', atomictemp=True,
347 files.append(vfs(name, 'w', atomictemp=True,
345 checkambig=checkambig))
348 checkambig=checkambig))
346 genfunc(*files)
349 genfunc(*files)
347 finally:
350 finally:
348 for f in files:
351 for f in files:
349 f.close()
352 f.close()
350 return any
353 return any
351
354
352 @active
355 @active
353 def find(self, file):
356 def find(self, file):
354 if file in self._map:
357 if file in self._map:
355 return self._entries[self._map[file]]
358 return self._entries[self._map[file]]
356 if file in self._backupmap:
359 if file in self._backupmap:
357 return self._backupentries[self._backupmap[file]]
360 return self._backupentries[self._backupmap[file]]
358 return None
361 return None
359
362
360 @active
363 @active
361 def replace(self, file, offset, data=None):
364 def replace(self, file, offset, data=None):
362 '''
365 '''
363 replace can only replace already committed entries
366 replace can only replace already committed entries
364 that are not pending in the queue
367 that are not pending in the queue
365 '''
368 '''
366
369
367 if file not in self._map:
370 if file not in self._map:
368 raise KeyError(file)
371 raise KeyError(file)
369 index = self._map[file]
372 index = self._map[file]
370 self._entries[index] = (file, offset, data)
373 self._entries[index] = (file, offset, data)
371 self._file.write("%s\0%d\n" % (file, offset))
374 self._file.write("%s\0%d\n" % (file, offset))
372 self._file.flush()
375 self._file.flush()
373
376
374 @active
377 @active
375 def nest(self, name=r'<unnamed>'):
378 def nest(self, name=r'<unnamed>'):
376 self._count += 1
379 self._count += 1
377 self._usages += 1
380 self._usages += 1
378 self._names.append(name)
381 self._names.append(name)
379 return self
382 return self
380
383
381 def release(self):
384 def release(self):
382 if self._count > 0:
385 if self._count > 0:
383 self._usages -= 1
386 self._usages -= 1
384 if self._names:
387 if self._names:
385 self._names.pop()
388 self._names.pop()
386 # if the transaction scopes are left without being closed, fail
389 # if the transaction scopes are left without being closed, fail
387 if self._count > 0 and self._usages == 0:
390 if self._count > 0 and self._usages == 0:
388 self._abort()
391 self._abort()
389
392
390 def running(self):
393 def running(self):
391 return self._count > 0
394 return self._count > 0
392
395
393 def addpending(self, category, callback):
396 def addpending(self, category, callback):
394 """add a callback to be called when the transaction is pending
397 """add a callback to be called when the transaction is pending
395
398
396 The transaction will be given as callback's first argument.
399 The transaction will be given as callback's first argument.
397
400
398 Category is a unique identifier to allow overwriting an old callback
401 Category is a unique identifier to allow overwriting an old callback
399 with a newer callback.
402 with a newer callback.
400 """
403 """
401 self._pendingcallback[category] = callback
404 self._pendingcallback[category] = callback
402
405
403 @active
406 @active
404 def writepending(self):
407 def writepending(self):
405 '''write pending file to temporary version
408 '''write pending file to temporary version
406
409
407 This is used to allow hooks to view a transaction before commit'''
410 This is used to allow hooks to view a transaction before commit'''
408 categories = sorted(self._pendingcallback)
411 categories = sorted(self._pendingcallback)
409 for cat in categories:
412 for cat in categories:
410 # remove callback since the data will have been flushed
413 # remove callback since the data will have been flushed
411 any = self._pendingcallback.pop(cat)(self)
414 any = self._pendingcallback.pop(cat)(self)
412 self._anypending = self._anypending or any
415 self._anypending = self._anypending or any
413 self._anypending |= self._generatefiles(suffix='.pending')
416 self._anypending |= self._generatefiles(suffix='.pending')
414 return self._anypending
417 return self._anypending
415
418
416 @active
419 @active
417 def addfinalize(self, category, callback):
420 def addfinalize(self, category, callback):
418 """add a callback to be called when the transaction is closed
421 """add a callback to be called when the transaction is closed
419
422
420 The transaction will be given as callback's first argument.
423 The transaction will be given as callback's first argument.
421
424
422 Category is a unique identifier to allow overwriting old callbacks with
425 Category is a unique identifier to allow overwriting old callbacks with
423 newer callbacks.
426 newer callbacks.
424 """
427 """
425 self._finalizecallback[category] = callback
428 self._finalizecallback[category] = callback
426
429
427 @active
430 @active
428 def addpostclose(self, category, callback):
431 def addpostclose(self, category, callback):
429 """add or replace a callback to be called after the transaction closed
432 """add or replace a callback to be called after the transaction closed
430
433
431 The transaction will be given as callback's first argument.
434 The transaction will be given as callback's first argument.
432
435
433 Category is a unique identifier to allow overwriting an old callback
436 Category is a unique identifier to allow overwriting an old callback
434 with a newer callback.
437 with a newer callback.
435 """
438 """
436 self._postclosecallback[category] = callback
439 self._postclosecallback[category] = callback
437
440
438 @active
441 @active
439 def getpostclose(self, category):
442 def getpostclose(self, category):
440 """return a postclose callback added before, or None"""
443 """return a postclose callback added before, or None"""
441 return self._postclosecallback.get(category, None)
444 return self._postclosecallback.get(category, None)
442
445
443 @active
446 @active
444 def addabort(self, category, callback):
447 def addabort(self, category, callback):
445 """add a callback to be called when the transaction is aborted.
448 """add a callback to be called when the transaction is aborted.
446
449
447 The transaction will be given as the first argument to the callback.
450 The transaction will be given as the first argument to the callback.
448
451
449 Category is a unique identifier to allow overwriting an old callback
452 Category is a unique identifier to allow overwriting an old callback
450 with a newer callback.
453 with a newer callback.
451 """
454 """
452 self._abortcallback[category] = callback
455 self._abortcallback[category] = callback
453
456
454 @active
457 @active
455 def close(self):
458 def close(self):
456 '''commit the transaction'''
459 '''commit the transaction'''
457 if self._count == 1:
460 if self._count == 1:
458 self._validator(self) # will raise exception if needed
461 self._validator(self) # will raise exception if needed
459 self._validator = None # Help prevent cycles.
462 self._validator = None # Help prevent cycles.
460 self._generatefiles(group=gengroupprefinalize)
463 self._generatefiles(group=gengroupprefinalize)
461 categories = sorted(self._finalizecallback)
464 categories = sorted(self._finalizecallback)
462 for cat in categories:
465 for cat in categories:
463 self._finalizecallback[cat](self)
466 self._finalizecallback[cat](self)
464 # Prevent double usage and help clear cycles.
467 # Prevent double usage and help clear cycles.
465 self._finalizecallback = None
468 self._finalizecallback = None
466 self._generatefiles(group=gengrouppostfinalize)
469 self._generatefiles(group=gengrouppostfinalize)
467
470
468 self._count -= 1
471 self._count -= 1
469 if self._count != 0:
472 if self._count != 0:
470 return
473 return
471 self._file.close()
474 self._file.close()
472 self._backupsfile.close()
475 self._backupsfile.close()
473 # cleanup temporary files
476 # cleanup temporary files
474 for l, f, b, c in self._backupentries:
477 for l, f, b, c in self._backupentries:
475 if l not in self._vfsmap and c:
478 if l not in self._vfsmap and c:
476 self._report("couldn't remove %s: unknown cache location %s\n"
479 self._report("couldn't remove %s: unknown cache location %s\n"
477 % (b, l))
480 % (b, l))
478 continue
481 continue
479 vfs = self._vfsmap[l]
482 vfs = self._vfsmap[l]
480 if not f and b and vfs.exists(b):
483 if not f and b and vfs.exists(b):
481 try:
484 try:
482 vfs.unlink(b)
485 vfs.unlink(b)
483 except (IOError, OSError, error.Abort) as inst:
486 except (IOError, OSError, error.Abort) as inst:
484 if not c:
487 if not c:
485 raise
488 raise
486 # Abort may be raise by read only opener
489 # Abort may be raise by read only opener
487 self._report("couldn't remove %s: %s\n"
490 self._report("couldn't remove %s: %s\n"
488 % (vfs.join(b), inst))
491 % (vfs.join(b), inst))
489 self._entries = []
492 self._entries = []
490 self._writeundo()
493 self._writeundo()
491 if self._after:
494 if self._after:
492 self._after()
495 self._after()
493 self._after = None # Help prevent cycles.
496 self._after = None # Help prevent cycles.
494 if self._opener.isfile(self._backupjournal):
497 if self._opener.isfile(self._backupjournal):
495 self._opener.unlink(self._backupjournal)
498 self._opener.unlink(self._backupjournal)
496 if self._opener.isfile(self._journal):
499 if self._opener.isfile(self._journal):
497 self._opener.unlink(self._journal)
500 self._opener.unlink(self._journal)
498 for l, _f, b, c in self._backupentries:
501 for l, _f, b, c in self._backupentries:
499 if l not in self._vfsmap and c:
502 if l not in self._vfsmap and c:
500 self._report("couldn't remove %s: unknown cache location"
503 self._report("couldn't remove %s: unknown cache location"
501 "%s\n" % (b, l))
504 "%s\n" % (b, l))
502 continue
505 continue
503 vfs = self._vfsmap[l]
506 vfs = self._vfsmap[l]
504 if b and vfs.exists(b):
507 if b and vfs.exists(b):
505 try:
508 try:
506 vfs.unlink(b)
509 vfs.unlink(b)
507 except (IOError, OSError, error.Abort) as inst:
510 except (IOError, OSError, error.Abort) as inst:
508 if not c:
511 if not c:
509 raise
512 raise
510 # Abort may be raise by read only opener
513 # Abort may be raise by read only opener
511 self._report("couldn't remove %s: %s\n"
514 self._report("couldn't remove %s: %s\n"
512 % (vfs.join(b), inst))
515 % (vfs.join(b), inst))
513 self._backupentries = []
516 self._backupentries = []
514 self._journal = None
517 self._journal = None
515
518
516 self._releasefn(self, True) # notify success of closing transaction
519 self._releasefn(self, True) # notify success of closing transaction
517 self._releasefn = None # Help prevent cycles.
520 self._releasefn = None # Help prevent cycles.
518
521
519 # run post close action
522 # run post close action
520 categories = sorted(self._postclosecallback)
523 categories = sorted(self._postclosecallback)
521 for cat in categories:
524 for cat in categories:
522 self._postclosecallback[cat](self)
525 self._postclosecallback[cat](self)
523 # Prevent double usage and help clear cycles.
526 # Prevent double usage and help clear cycles.
524 self._postclosecallback = None
527 self._postclosecallback = None
525
528
526 @active
529 @active
527 def abort(self):
530 def abort(self):
528 '''abort the transaction (generally called on error, or when the
531 '''abort the transaction (generally called on error, or when the
529 transaction is not explicitly committed before going out of
532 transaction is not explicitly committed before going out of
530 scope)'''
533 scope)'''
531 self._abort()
534 self._abort()
532
535
533 def _writeundo(self):
536 def _writeundo(self):
534 """write transaction data for possible future undo call"""
537 """write transaction data for possible future undo call"""
535 if self._undoname is None:
538 if self._undoname is None:
536 return
539 return
537 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
540 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
538 'w')
541 'w')
539 undobackupfile.write('%d\n' % version)
542 undobackupfile.write('%d\n' % version)
540 for l, f, b, c in self._backupentries:
543 for l, f, b, c in self._backupentries:
541 if not f: # temporary file
544 if not f: # temporary file
542 continue
545 continue
543 if not b:
546 if not b:
544 u = ''
547 u = ''
545 else:
548 else:
546 if l not in self._vfsmap and c:
549 if l not in self._vfsmap and c:
547 self._report("couldn't remove %s: unknown cache location"
550 self._report("couldn't remove %s: unknown cache location"
548 "%s\n" % (b, l))
551 "%s\n" % (b, l))
549 continue
552 continue
550 vfs = self._vfsmap[l]
553 vfs = self._vfsmap[l]
551 base, name = vfs.split(b)
554 base, name = vfs.split(b)
552 assert name.startswith(self._journal), name
555 assert name.startswith(self._journal), name
553 uname = name.replace(self._journal, self._undoname, 1)
556 uname = name.replace(self._journal, self._undoname, 1)
554 u = vfs.reljoin(base, uname)
557 u = vfs.reljoin(base, uname)
555 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
558 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
556 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
559 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
557 undobackupfile.close()
560 undobackupfile.close()
558
561
559
562
560 def _abort(self):
563 def _abort(self):
561 self._count = 0
564 self._count = 0
562 self._usages = 0
565 self._usages = 0
563 self._file.close()
566 self._file.close()
564 self._backupsfile.close()
567 self._backupsfile.close()
565
568
566 try:
569 try:
567 if not self._entries and not self._backupentries:
570 if not self._entries and not self._backupentries:
568 if self._backupjournal:
571 if self._backupjournal:
569 self._opener.unlink(self._backupjournal)
572 self._opener.unlink(self._backupjournal)
570 if self._journal:
573 if self._journal:
571 self._opener.unlink(self._journal)
574 self._opener.unlink(self._journal)
572 return
575 return
573
576
574 self._report(_("transaction abort!\n"))
577 self._report(_("transaction abort!\n"))
575
578
576 try:
579 try:
577 for cat in sorted(self._abortcallback):
580 for cat in sorted(self._abortcallback):
578 self._abortcallback[cat](self)
581 self._abortcallback[cat](self)
579 # Prevent double usage and help clear cycles.
582 # Prevent double usage and help clear cycles.
580 self._abortcallback = None
583 self._abortcallback = None
581 _playback(self._journal, self._report, self._opener,
584 _playback(self._journal, self._report, self._opener,
582 self._vfsmap, self._entries, self._backupentries,
585 self._vfsmap, self._entries, self._backupentries,
583 False, checkambigfiles=self._checkambigfiles)
586 False, checkambigfiles=self._checkambigfiles)
584 self._report(_("rollback completed\n"))
587 self._report(_("rollback completed\n"))
585 except BaseException:
588 except BaseException as exc:
586 self._report(_("rollback failed - please run hg recover\n"))
589 self._report(_("rollback failed - please run hg recover\n"))
590 self._report(_("(failure reason: %s)\n")
591 % stringutil.forcebytestr(exc))
587 finally:
592 finally:
588 self._journal = None
593 self._journal = None
589 self._releasefn(self, False) # notify failure of transaction
594 self._releasefn(self, False) # notify failure of transaction
590 self._releasefn = None # Help prevent cycles.
595 self._releasefn = None # Help prevent cycles.
591
596
592 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
597 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
593 """Rolls back the transaction contained in the given file
598 """Rolls back the transaction contained in the given file
594
599
595 Reads the entries in the specified file, and the corresponding
600 Reads the entries in the specified file, and the corresponding
596 '*.backupfiles' file, to recover from an incomplete transaction.
601 '*.backupfiles' file, to recover from an incomplete transaction.
597
602
598 * `file`: a file containing a list of entries, specifying where
603 * `file`: a file containing a list of entries, specifying where
599 to truncate each file. The file should contain a list of
604 to truncate each file. The file should contain a list of
600 file\0offset pairs, delimited by newlines. The corresponding
605 file\0offset pairs, delimited by newlines. The corresponding
601 '*.backupfiles' file should contain a list of file\0backupfile
606 '*.backupfiles' file should contain a list of file\0backupfile
602 pairs, delimited by \0.
607 pairs, delimited by \0.
603
608
604 `checkambigfiles` is a set of (path, vfs-location) tuples,
609 `checkambigfiles` is a set of (path, vfs-location) tuples,
605 which determine whether file stat ambiguity should be avoided at
610 which determine whether file stat ambiguity should be avoided at
606 restoring corresponded files.
611 restoring corresponded files.
607 """
612 """
608 entries = []
613 entries = []
609 backupentries = []
614 backupentries = []
610
615
611 fp = opener.open(file)
616 fp = opener.open(file)
612 lines = fp.readlines()
617 lines = fp.readlines()
613 fp.close()
618 fp.close()
614 for l in lines:
619 for l in lines:
615 try:
620 try:
616 f, o = l.split('\0')
621 f, o = l.split('\0')
617 entries.append((f, int(o), None))
622 entries.append((f, int(o), None))
618 except ValueError:
623 except ValueError:
619 report(
624 report(
620 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
625 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
621
626
622 backupjournal = "%s.backupfiles" % file
627 backupjournal = "%s.backupfiles" % file
623 if opener.exists(backupjournal):
628 if opener.exists(backupjournal):
624 fp = opener.open(backupjournal)
629 fp = opener.open(backupjournal)
625 lines = fp.readlines()
630 lines = fp.readlines()
626 if lines:
631 if lines:
627 ver = lines[0][:-1]
632 ver = lines[0][:-1]
628 if ver == (b'%d' % version):
633 if ver == (b'%d' % version):
629 for line in lines[1:]:
634 for line in lines[1:]:
630 if line:
635 if line:
631 # Shave off the trailing newline
636 # Shave off the trailing newline
632 line = line[:-1]
637 line = line[:-1]
633 l, f, b, c = line.split('\0')
638 l, f, b, c = line.split('\0')
634 backupentries.append((l, f, b, bool(c)))
639 backupentries.append((l, f, b, bool(c)))
635 else:
640 else:
636 report(_("journal was created by a different version of "
641 report(_("journal was created by a different version of "
637 "Mercurial\n"))
642 "Mercurial\n"))
638
643
639 _playback(file, report, opener, vfsmap, entries, backupentries,
644 _playback(file, report, opener, vfsmap, entries, backupentries,
640 checkambigfiles=checkambigfiles)
645 checkambigfiles=checkambigfiles)
@@ -1,139 +1,141 b''
1 #require unix-permissions no-root reporevlogstore
1 #require unix-permissions no-root reporevlogstore
2
2
3 $ cat > $TESTTMP/dumpjournal.py <<EOF
3 $ cat > $TESTTMP/dumpjournal.py <<EOF
4 > import sys
4 > import sys
5 > for entry in sys.stdin.read().split('\n'):
5 > for entry in sys.stdin.read().split('\n'):
6 > if entry:
6 > if entry:
7 > print(entry.split('\x00')[0])
7 > print(entry.split('\x00')[0])
8 > EOF
8 > EOF
9
9
10 $ echo "[extensions]" >> $HGRCPATH
10 $ echo "[extensions]" >> $HGRCPATH
11 $ echo "mq=">> $HGRCPATH
11 $ echo "mq=">> $HGRCPATH
12
12
13 $ teststrip() {
13 $ teststrip() {
14 > hg -q up -C $1
14 > hg -q up -C $1
15 > echo % before update $1, strip $2
15 > echo % before update $1, strip $2
16 > hg parents
16 > hg parents
17 > chmod -$3 $4
17 > chmod -$3 $4
18 > hg strip $2 2>&1 | sed 's/\(bundle\).*/\1/' | sed 's/Permission denied.*\.hg\/store\/\(.*\)/Permission denied \.hg\/store\/\1/'
18 > hg strip $2 2>&1 | sed 's/\(bundle\).*/\1/' | sed 's/Permission denied.*\.hg\/store\/\(.*\)/Permission denied \.hg\/store\/\1/'
19 > echo % after update $1, strip $2
19 > echo % after update $1, strip $2
20 > chmod +$3 $4
20 > chmod +$3 $4
21 > hg verify
21 > hg verify
22 > echo % journal contents
22 > echo % journal contents
23 > if [ -f .hg/store/journal ]; then
23 > if [ -f .hg/store/journal ]; then
24 > cat .hg/store/journal | "$PYTHON" $TESTTMP/dumpjournal.py
24 > cat .hg/store/journal | "$PYTHON" $TESTTMP/dumpjournal.py
25 > else
25 > else
26 > echo "(no journal)"
26 > echo "(no journal)"
27 > fi
27 > fi
28 > ls .hg/store/journal >/dev/null 2>&1 && hg recover
28 > ls .hg/store/journal >/dev/null 2>&1 && hg recover
29 > ls .hg/strip-backup/* >/dev/null 2>&1 && hg unbundle -q .hg/strip-backup/*
29 > ls .hg/strip-backup/* >/dev/null 2>&1 && hg unbundle -q .hg/strip-backup/*
30 > rm -rf .hg/strip-backup
30 > rm -rf .hg/strip-backup
31 > }
31 > }
32
32
33 $ hg init test
33 $ hg init test
34 $ cd test
34 $ cd test
35 $ echo a > a
35 $ echo a > a
36 $ hg -q ci -m "a" -A
36 $ hg -q ci -m "a" -A
37 $ echo b > b
37 $ echo b > b
38 $ hg -q ci -m "b" -A
38 $ hg -q ci -m "b" -A
39 $ echo b2 >> b
39 $ echo b2 >> b
40 $ hg -q ci -m "b2" -A
40 $ hg -q ci -m "b2" -A
41 $ echo c > c
41 $ echo c > c
42 $ hg -q ci -m "c" -A
42 $ hg -q ci -m "c" -A
43 $ teststrip 0 2 w .hg/store/data/b.i
43 $ teststrip 0 2 w .hg/store/data/b.i
44 % before update 0, strip 2
44 % before update 0, strip 2
45 changeset: 0:cb9a9f314b8b
45 changeset: 0:cb9a9f314b8b
46 user: test
46 user: test
47 date: Thu Jan 01 00:00:00 1970 +0000
47 date: Thu Jan 01 00:00:00 1970 +0000
48 summary: a
48 summary: a
49
49
50 saved backup bundle
50 saved backup bundle
51 transaction abort!
51 transaction abort!
52 failed to truncate data/b.i
52 failed to truncate data/b.i
53 rollback failed - please run hg recover
53 rollback failed - please run hg recover
54 (failure reason: [Errno 13] Permission denied .hg/store/data/b.i')
54 strip failed, backup bundle
55 strip failed, backup bundle
55 abort: Permission denied .hg/store/data/b.i
56 abort: Permission denied .hg/store/data/b.i
56 % after update 0, strip 2
57 % after update 0, strip 2
57 abandoned transaction found - run hg recover
58 abandoned transaction found - run hg recover
58 checking changesets
59 checking changesets
59 checking manifests
60 checking manifests
60 crosschecking files in changesets and manifests
61 crosschecking files in changesets and manifests
61 checking files
62 checking files
62 b@?: rev 1 points to nonexistent changeset 2
63 b@?: rev 1 points to nonexistent changeset 2
63 (expected 1)
64 (expected 1)
64 b@?: 736c29771fba not in manifests
65 b@?: 736c29771fba not in manifests
65 warning: orphan data file 'data/c.i'
66 warning: orphan data file 'data/c.i'
66 checked 2 changesets with 3 changes to 2 files
67 checked 2 changesets with 3 changes to 2 files
67 2 warnings encountered!
68 2 warnings encountered!
68 2 integrity errors encountered!
69 2 integrity errors encountered!
69 % journal contents
70 % journal contents
70 00changelog.i
71 00changelog.i
71 00manifest.i
72 00manifest.i
72 data/b.i
73 data/b.i
73 data/c.i
74 data/c.i
74 rolling back interrupted transaction
75 rolling back interrupted transaction
75 checking changesets
76 checking changesets
76 checking manifests
77 checking manifests
77 crosschecking files in changesets and manifests
78 crosschecking files in changesets and manifests
78 checking files
79 checking files
79 checked 2 changesets with 2 changes to 2 files
80 checked 2 changesets with 2 changes to 2 files
80 $ teststrip 0 2 r .hg/store/data/b.i
81 $ teststrip 0 2 r .hg/store/data/b.i
81 % before update 0, strip 2
82 % before update 0, strip 2
82 changeset: 0:cb9a9f314b8b
83 changeset: 0:cb9a9f314b8b
83 user: test
84 user: test
84 date: Thu Jan 01 00:00:00 1970 +0000
85 date: Thu Jan 01 00:00:00 1970 +0000
85 summary: a
86 summary: a
86
87
87 abort: Permission denied .hg/store/data/b.i
88 abort: Permission denied .hg/store/data/b.i
88 % after update 0, strip 2
89 % after update 0, strip 2
89 checking changesets
90 checking changesets
90 checking manifests
91 checking manifests
91 crosschecking files in changesets and manifests
92 crosschecking files in changesets and manifests
92 checking files
93 checking files
93 checked 4 changesets with 4 changes to 3 files
94 checked 4 changesets with 4 changes to 3 files
94 % journal contents
95 % journal contents
95 (no journal)
96 (no journal)
96 $ teststrip 0 2 w .hg/store/00manifest.i
97 $ teststrip 0 2 w .hg/store/00manifest.i
97 % before update 0, strip 2
98 % before update 0, strip 2
98 changeset: 0:cb9a9f314b8b
99 changeset: 0:cb9a9f314b8b
99 user: test
100 user: test
100 date: Thu Jan 01 00:00:00 1970 +0000
101 date: Thu Jan 01 00:00:00 1970 +0000
101 summary: a
102 summary: a
102
103
103 saved backup bundle
104 saved backup bundle
104 transaction abort!
105 transaction abort!
105 failed to truncate 00manifest.i
106 failed to truncate 00manifest.i
106 rollback failed - please run hg recover
107 rollback failed - please run hg recover
108 (failure reason: [Errno 13] Permission denied .hg/store/00manifest.i')
107 strip failed, backup bundle
109 strip failed, backup bundle
108 abort: Permission denied .hg/store/00manifest.i
110 abort: Permission denied .hg/store/00manifest.i
109 % after update 0, strip 2
111 % after update 0, strip 2
110 abandoned transaction found - run hg recover
112 abandoned transaction found - run hg recover
111 checking changesets
113 checking changesets
112 checking manifests
114 checking manifests
113 manifest@?: rev 2 points to nonexistent changeset 2
115 manifest@?: rev 2 points to nonexistent changeset 2
114 manifest@?: 3362547cdf64 not in changesets
116 manifest@?: 3362547cdf64 not in changesets
115 manifest@?: rev 3 points to nonexistent changeset 3
117 manifest@?: rev 3 points to nonexistent changeset 3
116 manifest@?: 265a85892ecb not in changesets
118 manifest@?: 265a85892ecb not in changesets
117 crosschecking files in changesets and manifests
119 crosschecking files in changesets and manifests
118 c@3: in manifest but not in changeset
120 c@3: in manifest but not in changeset
119 checking files
121 checking files
120 b@?: rev 1 points to nonexistent changeset 2
122 b@?: rev 1 points to nonexistent changeset 2
121 (expected 1)
123 (expected 1)
122 c@?: rev 0 points to nonexistent changeset 3
124 c@?: rev 0 points to nonexistent changeset 3
123 checked 2 changesets with 4 changes to 3 files
125 checked 2 changesets with 4 changes to 3 files
124 1 warnings encountered!
126 1 warnings encountered!
125 7 integrity errors encountered!
127 7 integrity errors encountered!
126 (first damaged changeset appears to be 3)
128 (first damaged changeset appears to be 3)
127 % journal contents
129 % journal contents
128 00changelog.i
130 00changelog.i
129 00manifest.i
131 00manifest.i
130 data/b.i
132 data/b.i
131 data/c.i
133 data/c.i
132 rolling back interrupted transaction
134 rolling back interrupted transaction
133 checking changesets
135 checking changesets
134 checking manifests
136 checking manifests
135 crosschecking files in changesets and manifests
137 crosschecking files in changesets and manifests
136 checking files
138 checking files
137 checked 2 changesets with 2 changes to 2 files
139 checked 2 changesets with 2 changes to 2 files
138
140
139 $ cd ..
141 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now