##// END OF EJS Templates
transaction: factorise append-only file registration...
Pierre-Yves David -
r23253:8d84b7a2 default
parent child Browse files
Show More
@@ -1,421 +1,420 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 filepath = opener.join(f)
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
48 backuppath = opener.join(b)
49 try:
49 try:
50 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
51 backupfiles.append(b)
52 except IOError:
52 except IOError:
53 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
54 raise
54 raise
55
55
56 opener.unlink(journal)
56 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
58 if opener.exists(backuppath):
59 opener.unlink(backuppath)
59 opener.unlink(backuppath)
60 for f in backupfiles:
60 for f in backupfiles:
61 opener.unlink(f)
61 opener.unlink(f)
62
62
63 class transaction(object):
63 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
65 onclose=None, onabort=None):
66 """Begin a new transaction
66 """Begin a new transaction
67
67
68 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
69 an exception.
70
70
71 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
74 closed
74 closed
75 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
76 have been truncated
77 """
77 """
78 self.count = 1
78 self.count = 1
79 self.usages = 1
79 self.usages = 1
80 self.report = report
80 self.report = report
81 self.opener = opener
81 self.opener = opener
82 self.after = after
82 self.after = after
83 self.onclose = onclose
83 self.onclose = onclose
84 self.onabort = onabort
84 self.onabort = onabort
85 self.entries = []
85 self.entries = []
86 self.map = {}
86 self.map = {}
87 # a list of ('path', 'backuppath') entries.
87 # a list of ('path', 'backuppath') entries.
88 self._backupentries = []
88 self._backupentries = []
89 self._backupmap = {}
89 self._backupmap = {}
90 self.journal = journal
90 self.journal = journal
91 self._queue = []
91 self._queue = []
92 # a dict of arguments to be passed to hooks
92 # a dict of arguments to be passed to hooks
93 self.hookargs = {}
93 self.hookargs = {}
94
94
95 self._backupjournal = "%s.backupfiles" % journal
95 self._backupjournal = "%s.backupfiles" % journal
96 self.file = opener.open(self.journal, "w")
96 self.file = opener.open(self.journal, "w")
97 self._backupsfile = opener.open(self._backupjournal, 'w')
97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 self._backupsfile.write('%d\n' % version)
98 self._backupsfile.write('%d\n' % version)
99 if createmode is not None:
99 if createmode is not None:
100 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.journal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
102
102
103 # hold file generations to be performed on commit
103 # hold file generations to be performed on commit
104 self._filegenerators = {}
104 self._filegenerators = {}
105 # hold callbalk to write pending data for hooks
105 # hold callbalk to write pending data for hooks
106 self._pendingcallback = {}
106 self._pendingcallback = {}
107 # True is any pending data have been written ever
107 # True is any pending data have been written ever
108 self._anypending = False
108 self._anypending = False
109 # holds callback to call when writing the transaction
109 # holds callback to call when writing the transaction
110 self._finalizecallback = {}
110 self._finalizecallback = {}
111 # hold callbalk for post transaction close
111 # hold callbalk for post transaction close
112 self._postclosecallback = {}
112 self._postclosecallback = {}
113
113
114 def __del__(self):
114 def __del__(self):
115 if self.journal:
115 if self.journal:
116 self._abort()
116 self._abort()
117
117
118 @active
118 @active
119 def startgroup(self):
119 def startgroup(self):
120 """delay registration of file entry
120 """delay registration of file entry
121
121
122 This is used by strip to delay vision of strip offset. The transaction
122 This is used by strip to delay vision of strip offset. The transaction
123 sees either none or all of the strip actions to be done."""
123 sees either none or all of the strip actions to be done."""
124 self._queue.append([])
124 self._queue.append([])
125
125
126 @active
126 @active
127 def endgroup(self):
127 def endgroup(self):
128 """apply delayed registration of file entry.
128 """apply delayed registration of file entry.
129
129
130 This is used by strip to delay vision of strip offset. The transaction
130 This is used by strip to delay vision of strip offset. The transaction
131 sees either none or all of the strip actions to be done."""
131 sees either none or all of the strip actions to be done."""
132 q = self._queue.pop()
132 q = self._queue.pop()
133 self.entries.extend(q)
133 for f, o, data in q:
134
134 self._addentry(f, o, data)
135 offsets = []
136 for f, o, _data in q:
137 offsets.append((f, o))
138
139 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
140 self.file.write(d)
141 self.file.flush()
142
135
143 @active
136 @active
144 def add(self, file, offset, data=None):
137 def add(self, file, offset, data=None):
145 """record the state of an append-only file before update"""
138 """record the state of an append-only file before update"""
146 if file in self.map or file in self._backupmap:
139 if file in self.map or file in self._backupmap:
147 return
140 return
148 if self._queue:
141 if self._queue:
149 self._queue[-1].append((file, offset, data))
142 self._queue[-1].append((file, offset, data))
150 return
143 return
151
144
145 self._addentry(file, offset, data)
146
147 def _addentry(self, file, offset, data):
148 """add a append-only entry to memory and on-disk state"""
149 if file in self.map or file in self._backupmap:
150 return
152 self.entries.append((file, offset, data))
151 self.entries.append((file, offset, data))
153 self.map[file] = len(self.entries) - 1
152 self.map[file] = len(self.entries) - 1
154 # add enough data to the journal to do the truncate
153 # add enough data to the journal to do the truncate
155 self.file.write("%s\0%d\n" % (file, offset))
154 self.file.write("%s\0%d\n" % (file, offset))
156 self.file.flush()
155 self.file.flush()
157
156
158 @active
157 @active
159 def addbackup(self, file, hardlink=True, vfs=None):
158 def addbackup(self, file, hardlink=True, vfs=None):
160 """Adds a backup of the file to the transaction
159 """Adds a backup of the file to the transaction
161
160
162 Calling addbackup() creates a hardlink backup of the specified file
161 Calling addbackup() creates a hardlink backup of the specified file
163 that is used to recover the file in the event of the transaction
162 that is used to recover the file in the event of the transaction
164 aborting.
163 aborting.
165
164
166 * `file`: the file path, relative to .hg/store
165 * `file`: the file path, relative to .hg/store
167 * `hardlink`: use a hardlink to quickly create the backup
166 * `hardlink`: use a hardlink to quickly create the backup
168 """
167 """
169 if self._queue:
168 if self._queue:
170 msg = 'cannot use transaction.addbackup inside "group"'
169 msg = 'cannot use transaction.addbackup inside "group"'
171 raise RuntimeError(msg)
170 raise RuntimeError(msg)
172
171
173 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
174 return
173 return
175 backupfile = "%s.backup.%s" % (self.journal, file)
174 backupfile = "%s.backup.%s" % (self.journal, file)
176 if vfs is None:
175 if vfs is None:
177 vfs = self.opener
176 vfs = self.opener
178 if vfs.exists(file):
177 if vfs.exists(file):
179 filepath = vfs.join(file)
178 filepath = vfs.join(file)
180 backuppath = self.opener.join(backupfile)
179 backuppath = self.opener.join(backupfile)
181 util.copyfiles(filepath, backuppath, hardlink=hardlink)
180 util.copyfiles(filepath, backuppath, hardlink=hardlink)
182 else:
181 else:
183 self.add(file, 0)
182 self.add(file, 0)
184 return
183 return
185
184
186 self._backupentries.append((file, backupfile))
185 self._backupentries.append((file, backupfile))
187 self._backupmap[file] = len(self._backupentries) - 1
186 self._backupmap[file] = len(self._backupentries) - 1
188 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
187 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
189 self._backupsfile.flush()
188 self._backupsfile.flush()
190
189
191 @active
190 @active
192 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
191 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
193 """add a function to generates some files at transaction commit
192 """add a function to generates some files at transaction commit
194
193
195 The `genfunc` argument is a function capable of generating proper
194 The `genfunc` argument is a function capable of generating proper
196 content of each entry in the `filename` tuple.
195 content of each entry in the `filename` tuple.
197
196
198 At transaction close time, `genfunc` will be called with one file
197 At transaction close time, `genfunc` will be called with one file
199 object argument per entries in `filenames`.
198 object argument per entries in `filenames`.
200
199
201 The transaction itself is responsible for the backup, creation and
200 The transaction itself is responsible for the backup, creation and
202 final write of such file.
201 final write of such file.
203
202
204 The `genid` argument is used to ensure the same set of file is only
203 The `genid` argument is used to ensure the same set of file is only
205 generated once. Call to `addfilegenerator` for a `genid` already
204 generated once. Call to `addfilegenerator` for a `genid` already
206 present will overwrite the old entry.
205 present will overwrite the old entry.
207
206
208 The `order` argument may be used to control the order in which multiple
207 The `order` argument may be used to control the order in which multiple
209 generator will be executed.
208 generator will be executed.
210 """
209 """
211 # For now, we are unable to do proper backup and restore of custom vfs
210 # For now, we are unable to do proper backup and restore of custom vfs
212 # but for bookmarks that are handled outside this mechanism.
211 # but for bookmarks that are handled outside this mechanism.
213 assert vfs is None or filenames == ('bookmarks',)
212 assert vfs is None or filenames == ('bookmarks',)
214 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
213 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
215
214
216 def _generatefiles(self):
215 def _generatefiles(self):
217 # write files registered for generation
216 # write files registered for generation
218 for entry in sorted(self._filegenerators.values()):
217 for entry in sorted(self._filegenerators.values()):
219 order, filenames, genfunc, vfs = entry
218 order, filenames, genfunc, vfs = entry
220 if vfs is None:
219 if vfs is None:
221 vfs = self.opener
220 vfs = self.opener
222 files = []
221 files = []
223 try:
222 try:
224 for name in filenames:
223 for name in filenames:
225 # Some files are already backed up when creating the
224 # Some files are already backed up when creating the
226 # localrepo. Until this is properly fixed we disable the
225 # localrepo. Until this is properly fixed we disable the
227 # backup for them.
226 # backup for them.
228 if name not in ('phaseroots', 'bookmarks'):
227 if name not in ('phaseroots', 'bookmarks'):
229 self.addbackup(name)
228 self.addbackup(name)
230 files.append(vfs(name, 'w', atomictemp=True))
229 files.append(vfs(name, 'w', atomictemp=True))
231 genfunc(*files)
230 genfunc(*files)
232 finally:
231 finally:
233 for f in files:
232 for f in files:
234 f.close()
233 f.close()
235
234
236 @active
235 @active
237 def find(self, file):
236 def find(self, file):
238 if file in self.map:
237 if file in self.map:
239 return self.entries[self.map[file]]
238 return self.entries[self.map[file]]
240 if file in self._backupmap:
239 if file in self._backupmap:
241 return self._backupentries[self._backupmap[file]]
240 return self._backupentries[self._backupmap[file]]
242 return None
241 return None
243
242
244 @active
243 @active
245 def replace(self, file, offset, data=None):
244 def replace(self, file, offset, data=None):
246 '''
245 '''
247 replace can only replace already committed entries
246 replace can only replace already committed entries
248 that are not pending in the queue
247 that are not pending in the queue
249 '''
248 '''
250
249
251 if file not in self.map:
250 if file not in self.map:
252 raise KeyError(file)
251 raise KeyError(file)
253 index = self.map[file]
252 index = self.map[file]
254 self.entries[index] = (file, offset, data)
253 self.entries[index] = (file, offset, data)
255 self.file.write("%s\0%d\n" % (file, offset))
254 self.file.write("%s\0%d\n" % (file, offset))
256 self.file.flush()
255 self.file.flush()
257
256
258 @active
257 @active
259 def nest(self):
258 def nest(self):
260 self.count += 1
259 self.count += 1
261 self.usages += 1
260 self.usages += 1
262 return self
261 return self
263
262
264 def release(self):
263 def release(self):
265 if self.count > 0:
264 if self.count > 0:
266 self.usages -= 1
265 self.usages -= 1
267 # if the transaction scopes are left without being closed, fail
266 # if the transaction scopes are left without being closed, fail
268 if self.count > 0 and self.usages == 0:
267 if self.count > 0 and self.usages == 0:
269 self._abort()
268 self._abort()
270
269
271 def running(self):
270 def running(self):
272 return self.count > 0
271 return self.count > 0
273
272
274 def addpending(self, category, callback):
273 def addpending(self, category, callback):
275 """add a callback to be called when the transaction is pending
274 """add a callback to be called when the transaction is pending
276
275
277 Category is a unique identifier to allow overwriting an old callback
276 Category is a unique identifier to allow overwriting an old callback
278 with a newer callback.
277 with a newer callback.
279 """
278 """
280 self._pendingcallback[category] = callback
279 self._pendingcallback[category] = callback
281
280
282 @active
281 @active
283 def writepending(self):
282 def writepending(self):
284 '''write pending file to temporary version
283 '''write pending file to temporary version
285
284
286 This is used to allow hooks to view a transaction before commit'''
285 This is used to allow hooks to view a transaction before commit'''
287 categories = sorted(self._pendingcallback)
286 categories = sorted(self._pendingcallback)
288 for cat in categories:
287 for cat in categories:
289 # remove callback since the data will have been flushed
288 # remove callback since the data will have been flushed
290 any = self._pendingcallback.pop(cat)()
289 any = self._pendingcallback.pop(cat)()
291 self._anypending = self._anypending or any
290 self._anypending = self._anypending or any
292 return self._anypending
291 return self._anypending
293
292
294 @active
293 @active
295 def addfinalize(self, category, callback):
294 def addfinalize(self, category, callback):
296 """add a callback to be called when the transaction is closed
295 """add a callback to be called when the transaction is closed
297
296
298 Category is a unique identifier to allow overwriting old callbacks with
297 Category is a unique identifier to allow overwriting old callbacks with
299 newer callbacks.
298 newer callbacks.
300 """
299 """
301 self._finalizecallback[category] = callback
300 self._finalizecallback[category] = callback
302
301
303 @active
302 @active
304 def addpostclose(self, category, callback):
303 def addpostclose(self, category, callback):
305 """add a callback to be called after the transaction is closed
304 """add a callback to be called after the transaction is closed
306
305
307 Category is a unique identifier to allow overwriting an old callback
306 Category is a unique identifier to allow overwriting an old callback
308 with a newer callback.
307 with a newer callback.
309 """
308 """
310 self._postclosecallback[category] = callback
309 self._postclosecallback[category] = callback
311
310
312 @active
311 @active
313 def close(self):
312 def close(self):
314 '''commit the transaction'''
313 '''commit the transaction'''
315 if self.count == 1 and self.onclose is not None:
314 if self.count == 1 and self.onclose is not None:
316 self._generatefiles()
315 self._generatefiles()
317 categories = sorted(self._finalizecallback)
316 categories = sorted(self._finalizecallback)
318 for cat in categories:
317 for cat in categories:
319 self._finalizecallback[cat]()
318 self._finalizecallback[cat]()
320 self.onclose()
319 self.onclose()
321
320
322 self.count -= 1
321 self.count -= 1
323 if self.count != 0:
322 if self.count != 0:
324 return
323 return
325 self.file.close()
324 self.file.close()
326 self._backupsfile.close()
325 self._backupsfile.close()
327 self.entries = []
326 self.entries = []
328 if self.after:
327 if self.after:
329 self.after()
328 self.after()
330 if self.opener.isfile(self.journal):
329 if self.opener.isfile(self.journal):
331 self.opener.unlink(self.journal)
330 self.opener.unlink(self.journal)
332 if self.opener.isfile(self._backupjournal):
331 if self.opener.isfile(self._backupjournal):
333 self.opener.unlink(self._backupjournal)
332 self.opener.unlink(self._backupjournal)
334 for _f, b in self._backupentries:
333 for _f, b in self._backupentries:
335 self.opener.unlink(b)
334 self.opener.unlink(b)
336 self._backupentries = []
335 self._backupentries = []
337 self.journal = None
336 self.journal = None
338 # run post close action
337 # run post close action
339 categories = sorted(self._postclosecallback)
338 categories = sorted(self._postclosecallback)
340 for cat in categories:
339 for cat in categories:
341 self._postclosecallback[cat]()
340 self._postclosecallback[cat]()
342
341
343 @active
342 @active
344 def abort(self):
343 def abort(self):
345 '''abort the transaction (generally called on error, or when the
344 '''abort the transaction (generally called on error, or when the
346 transaction is not explicitly committed before going out of
345 transaction is not explicitly committed before going out of
347 scope)'''
346 scope)'''
348 self._abort()
347 self._abort()
349
348
350 def _abort(self):
349 def _abort(self):
351 self.count = 0
350 self.count = 0
352 self.usages = 0
351 self.usages = 0
353 self.file.close()
352 self.file.close()
354 self._backupsfile.close()
353 self._backupsfile.close()
355
354
356 if self.onabort is not None:
355 if self.onabort is not None:
357 self.onabort()
356 self.onabort()
358
357
359 try:
358 try:
360 if not self.entries and not self._backupentries:
359 if not self.entries and not self._backupentries:
361 if self.journal:
360 if self.journal:
362 self.opener.unlink(self.journal)
361 self.opener.unlink(self.journal)
363 if self._backupjournal:
362 if self._backupjournal:
364 self.opener.unlink(self._backupjournal)
363 self.opener.unlink(self._backupjournal)
365 return
364 return
366
365
367 self.report(_("transaction abort!\n"))
366 self.report(_("transaction abort!\n"))
368
367
369 try:
368 try:
370 _playback(self.journal, self.report, self.opener,
369 _playback(self.journal, self.report, self.opener,
371 self.entries, self._backupentries, False)
370 self.entries, self._backupentries, False)
372 self.report(_("rollback completed\n"))
371 self.report(_("rollback completed\n"))
373 except Exception:
372 except Exception:
374 self.report(_("rollback failed - please run hg recover\n"))
373 self.report(_("rollback failed - please run hg recover\n"))
375 finally:
374 finally:
376 self.journal = None
375 self.journal = None
377
376
378
377
379 def rollback(opener, file, report):
378 def rollback(opener, file, report):
380 """Rolls back the transaction contained in the given file
379 """Rolls back the transaction contained in the given file
381
380
382 Reads the entries in the specified file, and the corresponding
381 Reads the entries in the specified file, and the corresponding
383 '*.backupfiles' file, to recover from an incomplete transaction.
382 '*.backupfiles' file, to recover from an incomplete transaction.
384
383
385 * `file`: a file containing a list of entries, specifying where
384 * `file`: a file containing a list of entries, specifying where
386 to truncate each file. The file should contain a list of
385 to truncate each file. The file should contain a list of
387 file\0offset pairs, delimited by newlines. The corresponding
386 file\0offset pairs, delimited by newlines. The corresponding
388 '*.backupfiles' file should contain a list of file\0backupfile
387 '*.backupfiles' file should contain a list of file\0backupfile
389 pairs, delimited by \0.
388 pairs, delimited by \0.
390 """
389 """
391 entries = []
390 entries = []
392 backupentries = []
391 backupentries = []
393
392
394 fp = opener.open(file)
393 fp = opener.open(file)
395 lines = fp.readlines()
394 lines = fp.readlines()
396 fp.close()
395 fp.close()
397 for l in lines:
396 for l in lines:
398 try:
397 try:
399 f, o = l.split('\0')
398 f, o = l.split('\0')
400 entries.append((f, int(o), None))
399 entries.append((f, int(o), None))
401 except ValueError:
400 except ValueError:
402 report(_("couldn't read journal entry %r!\n") % l)
401 report(_("couldn't read journal entry %r!\n") % l)
403
402
404 backupjournal = "%s.backupfiles" % file
403 backupjournal = "%s.backupfiles" % file
405 if opener.exists(backupjournal):
404 if opener.exists(backupjournal):
406 fp = opener.open(backupjournal)
405 fp = opener.open(backupjournal)
407 lines = fp.readlines()
406 lines = fp.readlines()
408 if lines:
407 if lines:
409 ver = lines[0][:-1]
408 ver = lines[0][:-1]
410 if ver == str(version):
409 if ver == str(version):
411 for line in lines[1:]:
410 for line in lines[1:]:
412 if line:
411 if line:
413 # Shave off the trailing newline
412 # Shave off the trailing newline
414 line = line[:-1]
413 line = line[:-1]
415 f, b = line.split('\0')
414 f, b = line.split('\0')
416 backupentries.append((f, b))
415 backupentries.append((f, b))
417 else:
416 else:
418 report(_("journal was created by a newer version of "
417 report(_("journal was created by a newer version of "
419 "Mercurial"))
418 "Mercurial"))
420
419
421 _playback(file, report, opener, entries, backupentries)
420 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now