##// END OF EJS Templates
transaction: document the contents of `tr.backupentries`...
Pierre-Yves David -
r23248:e754b623 default
parent child Browse files
Show More
@@ -1,421 +1,422 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 filepath = opener.join(f)
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
48 backuppath = opener.join(b)
49 try:
49 try:
50 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
51 backupfiles.append(b)
52 except IOError:
52 except IOError:
53 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
54 raise
54 raise
55
55
56 opener.unlink(journal)
56 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
58 if opener.exists(backuppath):
59 opener.unlink(backuppath)
59 opener.unlink(backuppath)
60 for f in backupfiles:
60 for f in backupfiles:
61 opener.unlink(f)
61 opener.unlink(f)
62
62
63 class transaction(object):
63 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
65 onclose=None, onabort=None):
66 """Begin a new transaction
66 """Begin a new transaction
67
67
68 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
69 an exception.
70
70
71 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
74 closed
74 closed
75 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
76 have been truncated
77 """
77 """
78 self.count = 1
78 self.count = 1
79 self.usages = 1
79 self.usages = 1
80 self.report = report
80 self.report = report
81 self.opener = opener
81 self.opener = opener
82 self.after = after
82 self.after = after
83 self.onclose = onclose
83 self.onclose = onclose
84 self.onabort = onabort
84 self.onabort = onabort
85 self.entries = []
85 self.entries = []
86 # a list of ('path', 'backuppath') entries.
86 self.backupentries = []
87 self.backupentries = []
87 self.map = {}
88 self.map = {}
88 self.backupmap = {}
89 self.backupmap = {}
89 self.journal = journal
90 self.journal = journal
90 self._queue = []
91 self._queue = []
91 # a dict of arguments to be passed to hooks
92 # a dict of arguments to be passed to hooks
92 self.hookargs = {}
93 self.hookargs = {}
93
94
94 self.backupjournal = "%s.backupfiles" % journal
95 self.backupjournal = "%s.backupfiles" % journal
95 self.file = opener.open(self.journal, "w")
96 self.file = opener.open(self.journal, "w")
96 self.backupsfile = opener.open(self.backupjournal, 'w')
97 self.backupsfile = opener.open(self.backupjournal, 'w')
97 self.backupsfile.write('%d\n' % version)
98 self.backupsfile.write('%d\n' % version)
98 if createmode is not None:
99 if createmode is not None:
99 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.backupjournal, createmode & 0666)
101 opener.chmod(self.backupjournal, createmode & 0666)
101
102
102 # hold file generations to be performed on commit
103 # hold file generations to be performed on commit
103 self._filegenerators = {}
104 self._filegenerators = {}
104 # hold callbalk to write pending data for hooks
105 # hold callbalk to write pending data for hooks
105 self._pendingcallback = {}
106 self._pendingcallback = {}
106 # True is any pending data have been written ever
107 # True is any pending data have been written ever
107 self._anypending = False
108 self._anypending = False
108 # holds callback to call when writing the transaction
109 # holds callback to call when writing the transaction
109 self._finalizecallback = {}
110 self._finalizecallback = {}
110 # hold callbalk for post transaction close
111 # hold callbalk for post transaction close
111 self._postclosecallback = {}
112 self._postclosecallback = {}
112
113
113 def __del__(self):
114 def __del__(self):
114 if self.journal:
115 if self.journal:
115 self._abort()
116 self._abort()
116
117
117 @active
118 @active
118 def startgroup(self):
119 def startgroup(self):
119 self._queue.append(([], []))
120 self._queue.append(([], []))
120
121
121 @active
122 @active
122 def endgroup(self):
123 def endgroup(self):
123 q = self._queue.pop()
124 q = self._queue.pop()
124 self.entries.extend(q[0])
125 self.entries.extend(q[0])
125 self.backupentries.extend(q[1])
126 self.backupentries.extend(q[1])
126
127
127 offsets = []
128 offsets = []
128 backups = []
129 backups = []
129 for f, o, _data in q[0]:
130 for f, o, _data in q[0]:
130 offsets.append((f, o))
131 offsets.append((f, o))
131
132
132 for f, b in q[1]:
133 for f, b in q[1]:
133 backups.append((f, b))
134 backups.append((f, b))
134
135
135 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
136 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
136 self.file.write(d)
137 self.file.write(d)
137 self.file.flush()
138 self.file.flush()
138
139
139 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
140 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
140 self.backupsfile.write(d)
141 self.backupsfile.write(d)
141 self.backupsfile.flush()
142 self.backupsfile.flush()
142
143
143 @active
144 @active
144 def add(self, file, offset, data=None):
145 def add(self, file, offset, data=None):
145 if file in self.map or file in self.backupmap:
146 if file in self.map or file in self.backupmap:
146 return
147 return
147 if self._queue:
148 if self._queue:
148 self._queue[-1][0].append((file, offset, data))
149 self._queue[-1][0].append((file, offset, data))
149 return
150 return
150
151
151 self.entries.append((file, offset, data))
152 self.entries.append((file, offset, data))
152 self.map[file] = len(self.entries) - 1
153 self.map[file] = len(self.entries) - 1
153 # add enough data to the journal to do the truncate
154 # add enough data to the journal to do the truncate
154 self.file.write("%s\0%d\n" % (file, offset))
155 self.file.write("%s\0%d\n" % (file, offset))
155 self.file.flush()
156 self.file.flush()
156
157
157 @active
158 @active
158 def addbackup(self, file, hardlink=True, vfs=None):
159 def addbackup(self, file, hardlink=True, vfs=None):
159 """Adds a backup of the file to the transaction
160 """Adds a backup of the file to the transaction
160
161
161 Calling addbackup() creates a hardlink backup of the specified file
162 Calling addbackup() creates a hardlink backup of the specified file
162 that is used to recover the file in the event of the transaction
163 that is used to recover the file in the event of the transaction
163 aborting.
164 aborting.
164
165
165 * `file`: the file path, relative to .hg/store
166 * `file`: the file path, relative to .hg/store
166 * `hardlink`: use a hardlink to quickly create the backup
167 * `hardlink`: use a hardlink to quickly create the backup
167 """
168 """
168
169
169 if file in self.map or file in self.backupmap:
170 if file in self.map or file in self.backupmap:
170 return
171 return
171 backupfile = "%s.backup.%s" % (self.journal, file)
172 backupfile = "%s.backup.%s" % (self.journal, file)
172 if vfs is None:
173 if vfs is None:
173 vfs = self.opener
174 vfs = self.opener
174 if vfs.exists(file):
175 if vfs.exists(file):
175 filepath = vfs.join(file)
176 filepath = vfs.join(file)
176 backuppath = self.opener.join(backupfile)
177 backuppath = self.opener.join(backupfile)
177 util.copyfiles(filepath, backuppath, hardlink=hardlink)
178 util.copyfiles(filepath, backuppath, hardlink=hardlink)
178 else:
179 else:
179 self.add(file, 0)
180 self.add(file, 0)
180 return
181 return
181
182
182 if self._queue:
183 if self._queue:
183 self._queue[-1][1].append((file, backupfile))
184 self._queue[-1][1].append((file, backupfile))
184 return
185 return
185
186
186 self.backupentries.append((file, backupfile))
187 self.backupentries.append((file, backupfile))
187 self.backupmap[file] = len(self.backupentries) - 1
188 self.backupmap[file] = len(self.backupentries) - 1
188 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
189 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
189 self.backupsfile.flush()
190 self.backupsfile.flush()
190
191
191 @active
192 @active
192 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
193 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
193 """add a function to generates some files at transaction commit
194 """add a function to generates some files at transaction commit
194
195
195 The `genfunc` argument is a function capable of generating proper
196 The `genfunc` argument is a function capable of generating proper
196 content of each entry in the `filename` tuple.
197 content of each entry in the `filename` tuple.
197
198
198 At transaction close time, `genfunc` will be called with one file
199 At transaction close time, `genfunc` will be called with one file
199 object argument per entries in `filenames`.
200 object argument per entries in `filenames`.
200
201
201 The transaction itself is responsible for the backup, creation and
202 The transaction itself is responsible for the backup, creation and
202 final write of such file.
203 final write of such file.
203
204
204 The `genid` argument is used to ensure the same set of file is only
205 The `genid` argument is used to ensure the same set of file is only
205 generated once. Call to `addfilegenerator` for a `genid` already
206 generated once. Call to `addfilegenerator` for a `genid` already
206 present will overwrite the old entry.
207 present will overwrite the old entry.
207
208
208 The `order` argument may be used to control the order in which multiple
209 The `order` argument may be used to control the order in which multiple
209 generator will be executed.
210 generator will be executed.
210 """
211 """
211 # For now, we are unable to do proper backup and restore of custom vfs
212 # For now, we are unable to do proper backup and restore of custom vfs
212 # but for bookmarks that are handled outside this mechanism.
213 # but for bookmarks that are handled outside this mechanism.
213 assert vfs is None or filenames == ('bookmarks',)
214 assert vfs is None or filenames == ('bookmarks',)
214 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
215 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
215
216
216 def _generatefiles(self):
217 def _generatefiles(self):
217 # write files registered for generation
218 # write files registered for generation
218 for entry in sorted(self._filegenerators.values()):
219 for entry in sorted(self._filegenerators.values()):
219 order, filenames, genfunc, vfs = entry
220 order, filenames, genfunc, vfs = entry
220 if vfs is None:
221 if vfs is None:
221 vfs = self.opener
222 vfs = self.opener
222 files = []
223 files = []
223 try:
224 try:
224 for name in filenames:
225 for name in filenames:
225 # Some files are already backed up when creating the
226 # Some files are already backed up when creating the
226 # localrepo. Until this is properly fixed we disable the
227 # localrepo. Until this is properly fixed we disable the
227 # backup for them.
228 # backup for them.
228 if name not in ('phaseroots', 'bookmarks'):
229 if name not in ('phaseroots', 'bookmarks'):
229 self.addbackup(name)
230 self.addbackup(name)
230 files.append(vfs(name, 'w', atomictemp=True))
231 files.append(vfs(name, 'w', atomictemp=True))
231 genfunc(*files)
232 genfunc(*files)
232 finally:
233 finally:
233 for f in files:
234 for f in files:
234 f.close()
235 f.close()
235
236
236 @active
237 @active
237 def find(self, file):
238 def find(self, file):
238 if file in self.map:
239 if file in self.map:
239 return self.entries[self.map[file]]
240 return self.entries[self.map[file]]
240 if file in self.backupmap:
241 if file in self.backupmap:
241 return self.backupentries[self.backupmap[file]]
242 return self.backupentries[self.backupmap[file]]
242 return None
243 return None
243
244
244 @active
245 @active
245 def replace(self, file, offset, data=None):
246 def replace(self, file, offset, data=None):
246 '''
247 '''
247 replace can only replace already committed entries
248 replace can only replace already committed entries
248 that are not pending in the queue
249 that are not pending in the queue
249 '''
250 '''
250
251
251 if file not in self.map:
252 if file not in self.map:
252 raise KeyError(file)
253 raise KeyError(file)
253 index = self.map[file]
254 index = self.map[file]
254 self.entries[index] = (file, offset, data)
255 self.entries[index] = (file, offset, data)
255 self.file.write("%s\0%d\n" % (file, offset))
256 self.file.write("%s\0%d\n" % (file, offset))
256 self.file.flush()
257 self.file.flush()
257
258
258 @active
259 @active
259 def nest(self):
260 def nest(self):
260 self.count += 1
261 self.count += 1
261 self.usages += 1
262 self.usages += 1
262 return self
263 return self
263
264
264 def release(self):
265 def release(self):
265 if self.count > 0:
266 if self.count > 0:
266 self.usages -= 1
267 self.usages -= 1
267 # if the transaction scopes are left without being closed, fail
268 # if the transaction scopes are left without being closed, fail
268 if self.count > 0 and self.usages == 0:
269 if self.count > 0 and self.usages == 0:
269 self._abort()
270 self._abort()
270
271
271 def running(self):
272 def running(self):
272 return self.count > 0
273 return self.count > 0
273
274
274 def addpending(self, category, callback):
275 def addpending(self, category, callback):
275 """add a callback to be called when the transaction is pending
276 """add a callback to be called when the transaction is pending
276
277
277 Category is a unique identifier to allow overwriting an old callback
278 Category is a unique identifier to allow overwriting an old callback
278 with a newer callback.
279 with a newer callback.
279 """
280 """
280 self._pendingcallback[category] = callback
281 self._pendingcallback[category] = callback
281
282
282 @active
283 @active
283 def writepending(self):
284 def writepending(self):
284 '''write pending file to temporary version
285 '''write pending file to temporary version
285
286
286 This is used to allow hooks to view a transaction before commit'''
287 This is used to allow hooks to view a transaction before commit'''
287 categories = sorted(self._pendingcallback)
288 categories = sorted(self._pendingcallback)
288 for cat in categories:
289 for cat in categories:
289 # remove callback since the data will have been flushed
290 # remove callback since the data will have been flushed
290 any = self._pendingcallback.pop(cat)()
291 any = self._pendingcallback.pop(cat)()
291 self._anypending = self._anypending or any
292 self._anypending = self._anypending or any
292 return self._anypending
293 return self._anypending
293
294
294 @active
295 @active
295 def addfinalize(self, category, callback):
296 def addfinalize(self, category, callback):
296 """add a callback to be called when the transaction is closed
297 """add a callback to be called when the transaction is closed
297
298
298 Category is a unique identifier to allow overwriting old callbacks with
299 Category is a unique identifier to allow overwriting old callbacks with
299 newer callbacks.
300 newer callbacks.
300 """
301 """
301 self._finalizecallback[category] = callback
302 self._finalizecallback[category] = callback
302
303
303 @active
304 @active
304 def addpostclose(self, category, callback):
305 def addpostclose(self, category, callback):
305 """add a callback to be called after the transaction is closed
306 """add a callback to be called after the transaction is closed
306
307
307 Category is a unique identifier to allow overwriting an old callback
308 Category is a unique identifier to allow overwriting an old callback
308 with a newer callback.
309 with a newer callback.
309 """
310 """
310 self._postclosecallback[category] = callback
311 self._postclosecallback[category] = callback
311
312
312 @active
313 @active
313 def close(self):
314 def close(self):
314 '''commit the transaction'''
315 '''commit the transaction'''
315 if self.count == 1 and self.onclose is not None:
316 if self.count == 1 and self.onclose is not None:
316 self._generatefiles()
317 self._generatefiles()
317 categories = sorted(self._finalizecallback)
318 categories = sorted(self._finalizecallback)
318 for cat in categories:
319 for cat in categories:
319 self._finalizecallback[cat]()
320 self._finalizecallback[cat]()
320 self.onclose()
321 self.onclose()
321
322
322 self.count -= 1
323 self.count -= 1
323 if self.count != 0:
324 if self.count != 0:
324 return
325 return
325 self.file.close()
326 self.file.close()
326 self.backupsfile.close()
327 self.backupsfile.close()
327 self.entries = []
328 self.entries = []
328 if self.after:
329 if self.after:
329 self.after()
330 self.after()
330 if self.opener.isfile(self.journal):
331 if self.opener.isfile(self.journal):
331 self.opener.unlink(self.journal)
332 self.opener.unlink(self.journal)
332 if self.opener.isfile(self.backupjournal):
333 if self.opener.isfile(self.backupjournal):
333 self.opener.unlink(self.backupjournal)
334 self.opener.unlink(self.backupjournal)
334 for _f, b in self.backupentries:
335 for _f, b in self.backupentries:
335 self.opener.unlink(b)
336 self.opener.unlink(b)
336 self.backupentries = []
337 self.backupentries = []
337 self.journal = None
338 self.journal = None
338 # run post close action
339 # run post close action
339 categories = sorted(self._postclosecallback)
340 categories = sorted(self._postclosecallback)
340 for cat in categories:
341 for cat in categories:
341 self._postclosecallback[cat]()
342 self._postclosecallback[cat]()
342
343
343 @active
344 @active
344 def abort(self):
345 def abort(self):
345 '''abort the transaction (generally called on error, or when the
346 '''abort the transaction (generally called on error, or when the
346 transaction is not explicitly committed before going out of
347 transaction is not explicitly committed before going out of
347 scope)'''
348 scope)'''
348 self._abort()
349 self._abort()
349
350
350 def _abort(self):
351 def _abort(self):
351 self.count = 0
352 self.count = 0
352 self.usages = 0
353 self.usages = 0
353 self.file.close()
354 self.file.close()
354 self.backupsfile.close()
355 self.backupsfile.close()
355
356
356 if self.onabort is not None:
357 if self.onabort is not None:
357 self.onabort()
358 self.onabort()
358
359
359 try:
360 try:
360 if not self.entries and not self.backupentries:
361 if not self.entries and not self.backupentries:
361 if self.journal:
362 if self.journal:
362 self.opener.unlink(self.journal)
363 self.opener.unlink(self.journal)
363 if self.backupjournal:
364 if self.backupjournal:
364 self.opener.unlink(self.backupjournal)
365 self.opener.unlink(self.backupjournal)
365 return
366 return
366
367
367 self.report(_("transaction abort!\n"))
368 self.report(_("transaction abort!\n"))
368
369
369 try:
370 try:
370 _playback(self.journal, self.report, self.opener,
371 _playback(self.journal, self.report, self.opener,
371 self.entries, self.backupentries, False)
372 self.entries, self.backupentries, False)
372 self.report(_("rollback completed\n"))
373 self.report(_("rollback completed\n"))
373 except Exception:
374 except Exception:
374 self.report(_("rollback failed - please run hg recover\n"))
375 self.report(_("rollback failed - please run hg recover\n"))
375 finally:
376 finally:
376 self.journal = None
377 self.journal = None
377
378
378
379
379 def rollback(opener, file, report):
380 def rollback(opener, file, report):
380 """Rolls back the transaction contained in the given file
381 """Rolls back the transaction contained in the given file
381
382
382 Reads the entries in the specified file, and the corresponding
383 Reads the entries in the specified file, and the corresponding
383 '*.backupfiles' file, to recover from an incomplete transaction.
384 '*.backupfiles' file, to recover from an incomplete transaction.
384
385
385 * `file`: a file containing a list of entries, specifying where
386 * `file`: a file containing a list of entries, specifying where
386 to truncate each file. The file should contain a list of
387 to truncate each file. The file should contain a list of
387 file\0offset pairs, delimited by newlines. The corresponding
388 file\0offset pairs, delimited by newlines. The corresponding
388 '*.backupfiles' file should contain a list of file\0backupfile
389 '*.backupfiles' file should contain a list of file\0backupfile
389 pairs, delimited by \0.
390 pairs, delimited by \0.
390 """
391 """
391 entries = []
392 entries = []
392 backupentries = []
393 backupentries = []
393
394
394 fp = opener.open(file)
395 fp = opener.open(file)
395 lines = fp.readlines()
396 lines = fp.readlines()
396 fp.close()
397 fp.close()
397 for l in lines:
398 for l in lines:
398 try:
399 try:
399 f, o = l.split('\0')
400 f, o = l.split('\0')
400 entries.append((f, int(o), None))
401 entries.append((f, int(o), None))
401 except ValueError:
402 except ValueError:
402 report(_("couldn't read journal entry %r!\n") % l)
403 report(_("couldn't read journal entry %r!\n") % l)
403
404
404 backupjournal = "%s.backupfiles" % file
405 backupjournal = "%s.backupfiles" % file
405 if opener.exists(backupjournal):
406 if opener.exists(backupjournal):
406 fp = opener.open(backupjournal)
407 fp = opener.open(backupjournal)
407 lines = fp.readlines()
408 lines = fp.readlines()
408 if lines:
409 if lines:
409 ver = lines[0][:-1]
410 ver = lines[0][:-1]
410 if ver == str(version):
411 if ver == str(version):
411 for line in lines[1:]:
412 for line in lines[1:]:
412 if line:
413 if line:
413 # Shave off the trailing newline
414 # Shave off the trailing newline
414 line = line[:-1]
415 line = line[:-1]
415 f, b = line.split('\0')
416 f, b = line.split('\0')
416 backupentries.append((f, b))
417 backupentries.append((f, b))
417 else:
418 else:
418 report(_("journal was created by a newer version of "
419 report(_("journal was created by a newer version of "
419 "Mercurial"))
420 "Mercurial"))
420
421
421 _playback(file, report, opener, entries, backupentries)
422 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now