##// END OF EJS Templates
transaction: document `tr.add`
Pierre-Yves David -
r23252:70809438 default
parent child Browse files
Show More
@@ -1,420 +1,421
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 filepath = opener.join(f)
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
48 backuppath = opener.join(b)
49 try:
49 try:
50 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
51 backupfiles.append(b)
52 except IOError:
52 except IOError:
53 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
54 raise
54 raise
55
55
56 opener.unlink(journal)
56 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
58 if opener.exists(backuppath):
59 opener.unlink(backuppath)
59 opener.unlink(backuppath)
60 for f in backupfiles:
60 for f in backupfiles:
61 opener.unlink(f)
61 opener.unlink(f)
62
62
63 class transaction(object):
63 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
65 onclose=None, onabort=None):
66 """Begin a new transaction
66 """Begin a new transaction
67
67
68 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
69 an exception.
70
70
71 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
74 closed
74 closed
75 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
76 have been truncated
77 """
77 """
78 self.count = 1
78 self.count = 1
79 self.usages = 1
79 self.usages = 1
80 self.report = report
80 self.report = report
81 self.opener = opener
81 self.opener = opener
82 self.after = after
82 self.after = after
83 self.onclose = onclose
83 self.onclose = onclose
84 self.onabort = onabort
84 self.onabort = onabort
85 self.entries = []
85 self.entries = []
86 self.map = {}
86 self.map = {}
87 # a list of ('path', 'backuppath') entries.
87 # a list of ('path', 'backuppath') entries.
88 self._backupentries = []
88 self._backupentries = []
89 self._backupmap = {}
89 self._backupmap = {}
90 self.journal = journal
90 self.journal = journal
91 self._queue = []
91 self._queue = []
92 # a dict of arguments to be passed to hooks
92 # a dict of arguments to be passed to hooks
93 self.hookargs = {}
93 self.hookargs = {}
94
94
95 self._backupjournal = "%s.backupfiles" % journal
95 self._backupjournal = "%s.backupfiles" % journal
96 self.file = opener.open(self.journal, "w")
96 self.file = opener.open(self.journal, "w")
97 self._backupsfile = opener.open(self._backupjournal, 'w')
97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 self._backupsfile.write('%d\n' % version)
98 self._backupsfile.write('%d\n' % version)
99 if createmode is not None:
99 if createmode is not None:
100 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.journal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
102
102
103 # hold file generations to be performed on commit
103 # hold file generations to be performed on commit
104 self._filegenerators = {}
104 self._filegenerators = {}
105 # hold callbalk to write pending data for hooks
105 # hold callbalk to write pending data for hooks
106 self._pendingcallback = {}
106 self._pendingcallback = {}
107 # True is any pending data have been written ever
107 # True is any pending data have been written ever
108 self._anypending = False
108 self._anypending = False
109 # holds callback to call when writing the transaction
109 # holds callback to call when writing the transaction
110 self._finalizecallback = {}
110 self._finalizecallback = {}
111 # hold callbalk for post transaction close
111 # hold callbalk for post transaction close
112 self._postclosecallback = {}
112 self._postclosecallback = {}
113
113
114 def __del__(self):
114 def __del__(self):
115 if self.journal:
115 if self.journal:
116 self._abort()
116 self._abort()
117
117
118 @active
118 @active
119 def startgroup(self):
119 def startgroup(self):
120 """delay registration of file entry
120 """delay registration of file entry
121
121
122 This is used by strip to delay vision of strip offset. The transaction
122 This is used by strip to delay vision of strip offset. The transaction
123 sees either none or all of the strip actions to be done."""
123 sees either none or all of the strip actions to be done."""
124 self._queue.append([])
124 self._queue.append([])
125
125
126 @active
126 @active
127 def endgroup(self):
127 def endgroup(self):
128 """apply delayed registration of file entry.
128 """apply delayed registration of file entry.
129
129
130 This is used by strip to delay vision of strip offset. The transaction
130 This is used by strip to delay vision of strip offset. The transaction
131 sees either none or all of the strip actions to be done."""
131 sees either none or all of the strip actions to be done."""
132 q = self._queue.pop()
132 q = self._queue.pop()
133 self.entries.extend(q)
133 self.entries.extend(q)
134
134
135 offsets = []
135 offsets = []
136 for f, o, _data in q:
136 for f, o, _data in q:
137 offsets.append((f, o))
137 offsets.append((f, o))
138
138
139 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
139 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
140 self.file.write(d)
140 self.file.write(d)
141 self.file.flush()
141 self.file.flush()
142
142
143 @active
143 @active
144 def add(self, file, offset, data=None):
144 def add(self, file, offset, data=None):
145 """record the state of an append-only file before update"""
145 if file in self.map or file in self._backupmap:
146 if file in self.map or file in self._backupmap:
146 return
147 return
147 if self._queue:
148 if self._queue:
148 self._queue[-1].append((file, offset, data))
149 self._queue[-1].append((file, offset, data))
149 return
150 return
150
151
151 self.entries.append((file, offset, data))
152 self.entries.append((file, offset, data))
152 self.map[file] = len(self.entries) - 1
153 self.map[file] = len(self.entries) - 1
153 # add enough data to the journal to do the truncate
154 # add enough data to the journal to do the truncate
154 self.file.write("%s\0%d\n" % (file, offset))
155 self.file.write("%s\0%d\n" % (file, offset))
155 self.file.flush()
156 self.file.flush()
156
157
157 @active
158 @active
158 def addbackup(self, file, hardlink=True, vfs=None):
159 def addbackup(self, file, hardlink=True, vfs=None):
159 """Adds a backup of the file to the transaction
160 """Adds a backup of the file to the transaction
160
161
161 Calling addbackup() creates a hardlink backup of the specified file
162 Calling addbackup() creates a hardlink backup of the specified file
162 that is used to recover the file in the event of the transaction
163 that is used to recover the file in the event of the transaction
163 aborting.
164 aborting.
164
165
165 * `file`: the file path, relative to .hg/store
166 * `file`: the file path, relative to .hg/store
166 * `hardlink`: use a hardlink to quickly create the backup
167 * `hardlink`: use a hardlink to quickly create the backup
167 """
168 """
168 if self._queue:
169 if self._queue:
169 msg = 'cannot use transaction.addbackup inside "group"'
170 msg = 'cannot use transaction.addbackup inside "group"'
170 raise RuntimeError(msg)
171 raise RuntimeError(msg)
171
172
172 if file in self.map or file in self._backupmap:
173 if file in self.map or file in self._backupmap:
173 return
174 return
174 backupfile = "%s.backup.%s" % (self.journal, file)
175 backupfile = "%s.backup.%s" % (self.journal, file)
175 if vfs is None:
176 if vfs is None:
176 vfs = self.opener
177 vfs = self.opener
177 if vfs.exists(file):
178 if vfs.exists(file):
178 filepath = vfs.join(file)
179 filepath = vfs.join(file)
179 backuppath = self.opener.join(backupfile)
180 backuppath = self.opener.join(backupfile)
180 util.copyfiles(filepath, backuppath, hardlink=hardlink)
181 util.copyfiles(filepath, backuppath, hardlink=hardlink)
181 else:
182 else:
182 self.add(file, 0)
183 self.add(file, 0)
183 return
184 return
184
185
185 self._backupentries.append((file, backupfile))
186 self._backupentries.append((file, backupfile))
186 self._backupmap[file] = len(self._backupentries) - 1
187 self._backupmap[file] = len(self._backupentries) - 1
187 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
188 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
188 self._backupsfile.flush()
189 self._backupsfile.flush()
189
190
190 @active
191 @active
191 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
192 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
192 """add a function to generates some files at transaction commit
193 """add a function to generates some files at transaction commit
193
194
194 The `genfunc` argument is a function capable of generating proper
195 The `genfunc` argument is a function capable of generating proper
195 content of each entry in the `filename` tuple.
196 content of each entry in the `filename` tuple.
196
197
197 At transaction close time, `genfunc` will be called with one file
198 At transaction close time, `genfunc` will be called with one file
198 object argument per entries in `filenames`.
199 object argument per entries in `filenames`.
199
200
200 The transaction itself is responsible for the backup, creation and
201 The transaction itself is responsible for the backup, creation and
201 final write of such file.
202 final write of such file.
202
203
203 The `genid` argument is used to ensure the same set of file is only
204 The `genid` argument is used to ensure the same set of file is only
204 generated once. Call to `addfilegenerator` for a `genid` already
205 generated once. Call to `addfilegenerator` for a `genid` already
205 present will overwrite the old entry.
206 present will overwrite the old entry.
206
207
207 The `order` argument may be used to control the order in which multiple
208 The `order` argument may be used to control the order in which multiple
208 generator will be executed.
209 generator will be executed.
209 """
210 """
210 # For now, we are unable to do proper backup and restore of custom vfs
211 # For now, we are unable to do proper backup and restore of custom vfs
211 # but for bookmarks that are handled outside this mechanism.
212 # but for bookmarks that are handled outside this mechanism.
212 assert vfs is None or filenames == ('bookmarks',)
213 assert vfs is None or filenames == ('bookmarks',)
213 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
214 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
214
215
215 def _generatefiles(self):
216 def _generatefiles(self):
216 # write files registered for generation
217 # write files registered for generation
217 for entry in sorted(self._filegenerators.values()):
218 for entry in sorted(self._filegenerators.values()):
218 order, filenames, genfunc, vfs = entry
219 order, filenames, genfunc, vfs = entry
219 if vfs is None:
220 if vfs is None:
220 vfs = self.opener
221 vfs = self.opener
221 files = []
222 files = []
222 try:
223 try:
223 for name in filenames:
224 for name in filenames:
224 # Some files are already backed up when creating the
225 # Some files are already backed up when creating the
225 # localrepo. Until this is properly fixed we disable the
226 # localrepo. Until this is properly fixed we disable the
226 # backup for them.
227 # backup for them.
227 if name not in ('phaseroots', 'bookmarks'):
228 if name not in ('phaseroots', 'bookmarks'):
228 self.addbackup(name)
229 self.addbackup(name)
229 files.append(vfs(name, 'w', atomictemp=True))
230 files.append(vfs(name, 'w', atomictemp=True))
230 genfunc(*files)
231 genfunc(*files)
231 finally:
232 finally:
232 for f in files:
233 for f in files:
233 f.close()
234 f.close()
234
235
235 @active
236 @active
236 def find(self, file):
237 def find(self, file):
237 if file in self.map:
238 if file in self.map:
238 return self.entries[self.map[file]]
239 return self.entries[self.map[file]]
239 if file in self._backupmap:
240 if file in self._backupmap:
240 return self._backupentries[self._backupmap[file]]
241 return self._backupentries[self._backupmap[file]]
241 return None
242 return None
242
243
243 @active
244 @active
244 def replace(self, file, offset, data=None):
245 def replace(self, file, offset, data=None):
245 '''
246 '''
246 replace can only replace already committed entries
247 replace can only replace already committed entries
247 that are not pending in the queue
248 that are not pending in the queue
248 '''
249 '''
249
250
250 if file not in self.map:
251 if file not in self.map:
251 raise KeyError(file)
252 raise KeyError(file)
252 index = self.map[file]
253 index = self.map[file]
253 self.entries[index] = (file, offset, data)
254 self.entries[index] = (file, offset, data)
254 self.file.write("%s\0%d\n" % (file, offset))
255 self.file.write("%s\0%d\n" % (file, offset))
255 self.file.flush()
256 self.file.flush()
256
257
257 @active
258 @active
258 def nest(self):
259 def nest(self):
259 self.count += 1
260 self.count += 1
260 self.usages += 1
261 self.usages += 1
261 return self
262 return self
262
263
263 def release(self):
264 def release(self):
264 if self.count > 0:
265 if self.count > 0:
265 self.usages -= 1
266 self.usages -= 1
266 # if the transaction scopes are left without being closed, fail
267 # if the transaction scopes are left without being closed, fail
267 if self.count > 0 and self.usages == 0:
268 if self.count > 0 and self.usages == 0:
268 self._abort()
269 self._abort()
269
270
270 def running(self):
271 def running(self):
271 return self.count > 0
272 return self.count > 0
272
273
273 def addpending(self, category, callback):
274 def addpending(self, category, callback):
274 """add a callback to be called when the transaction is pending
275 """add a callback to be called when the transaction is pending
275
276
276 Category is a unique identifier to allow overwriting an old callback
277 Category is a unique identifier to allow overwriting an old callback
277 with a newer callback.
278 with a newer callback.
278 """
279 """
279 self._pendingcallback[category] = callback
280 self._pendingcallback[category] = callback
280
281
281 @active
282 @active
282 def writepending(self):
283 def writepending(self):
283 '''write pending file to temporary version
284 '''write pending file to temporary version
284
285
285 This is used to allow hooks to view a transaction before commit'''
286 This is used to allow hooks to view a transaction before commit'''
286 categories = sorted(self._pendingcallback)
287 categories = sorted(self._pendingcallback)
287 for cat in categories:
288 for cat in categories:
288 # remove callback since the data will have been flushed
289 # remove callback since the data will have been flushed
289 any = self._pendingcallback.pop(cat)()
290 any = self._pendingcallback.pop(cat)()
290 self._anypending = self._anypending or any
291 self._anypending = self._anypending or any
291 return self._anypending
292 return self._anypending
292
293
293 @active
294 @active
294 def addfinalize(self, category, callback):
295 def addfinalize(self, category, callback):
295 """add a callback to be called when the transaction is closed
296 """add a callback to be called when the transaction is closed
296
297
297 Category is a unique identifier to allow overwriting old callbacks with
298 Category is a unique identifier to allow overwriting old callbacks with
298 newer callbacks.
299 newer callbacks.
299 """
300 """
300 self._finalizecallback[category] = callback
301 self._finalizecallback[category] = callback
301
302
302 @active
303 @active
303 def addpostclose(self, category, callback):
304 def addpostclose(self, category, callback):
304 """add a callback to be called after the transaction is closed
305 """add a callback to be called after the transaction is closed
305
306
306 Category is a unique identifier to allow overwriting an old callback
307 Category is a unique identifier to allow overwriting an old callback
307 with a newer callback.
308 with a newer callback.
308 """
309 """
309 self._postclosecallback[category] = callback
310 self._postclosecallback[category] = callback
310
311
311 @active
312 @active
312 def close(self):
313 def close(self):
313 '''commit the transaction'''
314 '''commit the transaction'''
314 if self.count == 1 and self.onclose is not None:
315 if self.count == 1 and self.onclose is not None:
315 self._generatefiles()
316 self._generatefiles()
316 categories = sorted(self._finalizecallback)
317 categories = sorted(self._finalizecallback)
317 for cat in categories:
318 for cat in categories:
318 self._finalizecallback[cat]()
319 self._finalizecallback[cat]()
319 self.onclose()
320 self.onclose()
320
321
321 self.count -= 1
322 self.count -= 1
322 if self.count != 0:
323 if self.count != 0:
323 return
324 return
324 self.file.close()
325 self.file.close()
325 self._backupsfile.close()
326 self._backupsfile.close()
326 self.entries = []
327 self.entries = []
327 if self.after:
328 if self.after:
328 self.after()
329 self.after()
329 if self.opener.isfile(self.journal):
330 if self.opener.isfile(self.journal):
330 self.opener.unlink(self.journal)
331 self.opener.unlink(self.journal)
331 if self.opener.isfile(self._backupjournal):
332 if self.opener.isfile(self._backupjournal):
332 self.opener.unlink(self._backupjournal)
333 self.opener.unlink(self._backupjournal)
333 for _f, b in self._backupentries:
334 for _f, b in self._backupentries:
334 self.opener.unlink(b)
335 self.opener.unlink(b)
335 self._backupentries = []
336 self._backupentries = []
336 self.journal = None
337 self.journal = None
337 # run post close action
338 # run post close action
338 categories = sorted(self._postclosecallback)
339 categories = sorted(self._postclosecallback)
339 for cat in categories:
340 for cat in categories:
340 self._postclosecallback[cat]()
341 self._postclosecallback[cat]()
341
342
342 @active
343 @active
343 def abort(self):
344 def abort(self):
344 '''abort the transaction (generally called on error, or when the
345 '''abort the transaction (generally called on error, or when the
345 transaction is not explicitly committed before going out of
346 transaction is not explicitly committed before going out of
346 scope)'''
347 scope)'''
347 self._abort()
348 self._abort()
348
349
349 def _abort(self):
350 def _abort(self):
350 self.count = 0
351 self.count = 0
351 self.usages = 0
352 self.usages = 0
352 self.file.close()
353 self.file.close()
353 self._backupsfile.close()
354 self._backupsfile.close()
354
355
355 if self.onabort is not None:
356 if self.onabort is not None:
356 self.onabort()
357 self.onabort()
357
358
358 try:
359 try:
359 if not self.entries and not self._backupentries:
360 if not self.entries and not self._backupentries:
360 if self.journal:
361 if self.journal:
361 self.opener.unlink(self.journal)
362 self.opener.unlink(self.journal)
362 if self._backupjournal:
363 if self._backupjournal:
363 self.opener.unlink(self._backupjournal)
364 self.opener.unlink(self._backupjournal)
364 return
365 return
365
366
366 self.report(_("transaction abort!\n"))
367 self.report(_("transaction abort!\n"))
367
368
368 try:
369 try:
369 _playback(self.journal, self.report, self.opener,
370 _playback(self.journal, self.report, self.opener,
370 self.entries, self._backupentries, False)
371 self.entries, self._backupentries, False)
371 self.report(_("rollback completed\n"))
372 self.report(_("rollback completed\n"))
372 except Exception:
373 except Exception:
373 self.report(_("rollback failed - please run hg recover\n"))
374 self.report(_("rollback failed - please run hg recover\n"))
374 finally:
375 finally:
375 self.journal = None
376 self.journal = None
376
377
377
378
378 def rollback(opener, file, report):
379 def rollback(opener, file, report):
379 """Rolls back the transaction contained in the given file
380 """Rolls back the transaction contained in the given file
380
381
381 Reads the entries in the specified file, and the corresponding
382 Reads the entries in the specified file, and the corresponding
382 '*.backupfiles' file, to recover from an incomplete transaction.
383 '*.backupfiles' file, to recover from an incomplete transaction.
383
384
384 * `file`: a file containing a list of entries, specifying where
385 * `file`: a file containing a list of entries, specifying where
385 to truncate each file. The file should contain a list of
386 to truncate each file. The file should contain a list of
386 file\0offset pairs, delimited by newlines. The corresponding
387 file\0offset pairs, delimited by newlines. The corresponding
387 '*.backupfiles' file should contain a list of file\0backupfile
388 '*.backupfiles' file should contain a list of file\0backupfile
388 pairs, delimited by \0.
389 pairs, delimited by \0.
389 """
390 """
390 entries = []
391 entries = []
391 backupentries = []
392 backupentries = []
392
393
393 fp = opener.open(file)
394 fp = opener.open(file)
394 lines = fp.readlines()
395 lines = fp.readlines()
395 fp.close()
396 fp.close()
396 for l in lines:
397 for l in lines:
397 try:
398 try:
398 f, o = l.split('\0')
399 f, o = l.split('\0')
399 entries.append((f, int(o), None))
400 entries.append((f, int(o), None))
400 except ValueError:
401 except ValueError:
401 report(_("couldn't read journal entry %r!\n") % l)
402 report(_("couldn't read journal entry %r!\n") % l)
402
403
403 backupjournal = "%s.backupfiles" % file
404 backupjournal = "%s.backupfiles" % file
404 if opener.exists(backupjournal):
405 if opener.exists(backupjournal):
405 fp = opener.open(backupjournal)
406 fp = opener.open(backupjournal)
406 lines = fp.readlines()
407 lines = fp.readlines()
407 if lines:
408 if lines:
408 ver = lines[0][:-1]
409 ver = lines[0][:-1]
409 if ver == str(version):
410 if ver == str(version):
410 for line in lines[1:]:
411 for line in lines[1:]:
411 if line:
412 if line:
412 # Shave off the trailing newline
413 # Shave off the trailing newline
413 line = line[:-1]
414 line = line[:-1]
414 f, b = line.split('\0')
415 f, b = line.split('\0')
415 backupentries.append((f, b))
416 backupentries.append((f, b))
416 else:
417 else:
417 report(_("journal was created by a newer version of "
418 report(_("journal was created by a newer version of "
418 "Mercurial"))
419 "Mercurial"))
419
420
420 _playback(file, report, opener, entries, backupentries)
421 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now