##// END OF EJS Templates
transaction: handle missing file in backupentries (instead of using entries)...
Pierre-Yves David -
r23278:aa194327 default
parent child Browse files
Show More
@@ -1,420 +1,428
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 filepath = opener.join(f)
47 if b:
48 backuppath = opener.join(b)
48 filepath = opener.join(f)
49 try:
49 backuppath = opener.join(b)
50 util.copyfile(backuppath, filepath)
50 try:
51 backupfiles.append(b)
51 util.copyfile(backuppath, filepath)
52 except IOError:
52 backupfiles.append(b)
53 report(_("failed to recover %s\n") % f)
53 except IOError:
54 raise
54 report(_("failed to recover %s\n") % f)
55 raise
56 else:
57 try:
58 opener.unlink(f)
59 except (IOError, OSError), inst:
60 if inst.errno != errno.ENOENT:
61 raise
55
62
56 opener.unlink(journal)
63 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
64 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
65 if opener.exists(backuppath):
59 opener.unlink(backuppath)
66 opener.unlink(backuppath)
60 for f in backupfiles:
67 for f in backupfiles:
61 opener.unlink(f)
68 opener.unlink(f)
62
69
63 class transaction(object):
70 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
71 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
72 onclose=None, onabort=None):
66 """Begin a new transaction
73 """Begin a new transaction
67
74
68 Begins a new transaction that allows rolling back writes in the event of
75 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
76 an exception.
70
77
71 * `after`: called after the transaction has been committed
78 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
79 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
80 * `onclose`: called as the transaction is closing, but before it is
74 closed
81 closed
75 * `onabort`: called as the transaction is aborting, but before any files
82 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
83 have been truncated
77 """
84 """
78 self.count = 1
85 self.count = 1
79 self.usages = 1
86 self.usages = 1
80 self.report = report
87 self.report = report
81 self.opener = opener
88 self.opener = opener
82 self.after = after
89 self.after = after
83 self.onclose = onclose
90 self.onclose = onclose
84 self.onabort = onabort
91 self.onabort = onabort
85 self.entries = []
92 self.entries = []
86 self.map = {}
93 self.map = {}
87 # a list of ('path', 'backuppath') entries.
94 # a list of ('path', 'backuppath') entries.
95 # if 'backuppath' is empty, no file existed at backup time
88 self._backupentries = []
96 self._backupentries = []
89 self._backupmap = {}
97 self._backupmap = {}
90 self.journal = journal
98 self.journal = journal
91 self._queue = []
99 self._queue = []
92 # a dict of arguments to be passed to hooks
100 # a dict of arguments to be passed to hooks
93 self.hookargs = {}
101 self.hookargs = {}
94
102
95 self._backupjournal = "%s.backupfiles" % journal
103 self._backupjournal = "%s.backupfiles" % journal
96 self.file = opener.open(self.journal, "w")
104 self.file = opener.open(self.journal, "w")
97 self._backupsfile = opener.open(self._backupjournal, 'w')
105 self._backupsfile = opener.open(self._backupjournal, 'w')
98 self._backupsfile.write('%d\n' % version)
106 self._backupsfile.write('%d\n' % version)
99 if createmode is not None:
107 if createmode is not None:
100 opener.chmod(self.journal, createmode & 0666)
108 opener.chmod(self.journal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
109 opener.chmod(self._backupjournal, createmode & 0666)
102
110
103 # hold file generations to be performed on commit
111 # hold file generations to be performed on commit
104 self._filegenerators = {}
112 self._filegenerators = {}
105 # hold callbalk to write pending data for hooks
113 # hold callbalk to write pending data for hooks
106 self._pendingcallback = {}
114 self._pendingcallback = {}
107 # True is any pending data have been written ever
115 # True is any pending data have been written ever
108 self._anypending = False
116 self._anypending = False
109 # holds callback to call when writing the transaction
117 # holds callback to call when writing the transaction
110 self._finalizecallback = {}
118 self._finalizecallback = {}
111 # hold callbalk for post transaction close
119 # hold callbalk for post transaction close
112 self._postclosecallback = {}
120 self._postclosecallback = {}
113
121
114 def __del__(self):
122 def __del__(self):
115 if self.journal:
123 if self.journal:
116 self._abort()
124 self._abort()
117
125
118 @active
126 @active
119 def startgroup(self):
127 def startgroup(self):
120 """delay registration of file entry
128 """delay registration of file entry
121
129
122 This is used by strip to delay vision of strip offset. The transaction
130 This is used by strip to delay vision of strip offset. The transaction
123 sees either none or all of the strip actions to be done."""
131 sees either none or all of the strip actions to be done."""
124 self._queue.append([])
132 self._queue.append([])
125
133
126 @active
134 @active
127 def endgroup(self):
135 def endgroup(self):
128 """apply delayed registration of file entry.
136 """apply delayed registration of file entry.
129
137
130 This is used by strip to delay vision of strip offset. The transaction
138 This is used by strip to delay vision of strip offset. The transaction
131 sees either none or all of the strip actions to be done."""
139 sees either none or all of the strip actions to be done."""
132 q = self._queue.pop()
140 q = self._queue.pop()
133 for f, o, data in q:
141 for f, o, data in q:
134 self._addentry(f, o, data)
142 self._addentry(f, o, data)
135
143
136 @active
144 @active
137 def add(self, file, offset, data=None):
145 def add(self, file, offset, data=None):
138 """record the state of an append-only file before update"""
146 """record the state of an append-only file before update"""
139 if file in self.map or file in self._backupmap:
147 if file in self.map or file in self._backupmap:
140 return
148 return
141 if self._queue:
149 if self._queue:
142 self._queue[-1].append((file, offset, data))
150 self._queue[-1].append((file, offset, data))
143 return
151 return
144
152
145 self._addentry(file, offset, data)
153 self._addentry(file, offset, data)
146
154
147 def _addentry(self, file, offset, data):
155 def _addentry(self, file, offset, data):
148 """add a append-only entry to memory and on-disk state"""
156 """add a append-only entry to memory and on-disk state"""
149 if file in self.map or file in self._backupmap:
157 if file in self.map or file in self._backupmap:
150 return
158 return
151 self.entries.append((file, offset, data))
159 self.entries.append((file, offset, data))
152 self.map[file] = len(self.entries) - 1
160 self.map[file] = len(self.entries) - 1
153 # add enough data to the journal to do the truncate
161 # add enough data to the journal to do the truncate
154 self.file.write("%s\0%d\n" % (file, offset))
162 self.file.write("%s\0%d\n" % (file, offset))
155 self.file.flush()
163 self.file.flush()
156
164
157 @active
165 @active
158 def addbackup(self, file, hardlink=True, vfs=None):
166 def addbackup(self, file, hardlink=True, vfs=None):
159 """Adds a backup of the file to the transaction
167 """Adds a backup of the file to the transaction
160
168
161 Calling addbackup() creates a hardlink backup of the specified file
169 Calling addbackup() creates a hardlink backup of the specified file
162 that is used to recover the file in the event of the transaction
170 that is used to recover the file in the event of the transaction
163 aborting.
171 aborting.
164
172
165 * `file`: the file path, relative to .hg/store
173 * `file`: the file path, relative to .hg/store
166 * `hardlink`: use a hardlink to quickly create the backup
174 * `hardlink`: use a hardlink to quickly create the backup
167 """
175 """
168 if self._queue:
176 if self._queue:
169 msg = 'cannot use transaction.addbackup inside "group"'
177 msg = 'cannot use transaction.addbackup inside "group"'
170 raise RuntimeError(msg)
178 raise RuntimeError(msg)
171
179
172 if file in self.map or file in self._backupmap:
180 if file in self.map or file in self._backupmap:
173 return
181 return
174 backupfile = "%s.backup.%s" % (self.journal, file)
182 backupfile = "%s.backup.%s" % (self.journal, file)
175 if vfs is None:
183 if vfs is None:
176 vfs = self.opener
184 vfs = self.opener
177 if vfs.exists(file):
185 if vfs.exists(file):
178 filepath = vfs.join(file)
186 filepath = vfs.join(file)
179 backuppath = self.opener.join(backupfile)
187 backuppath = self.opener.join(backupfile)
180 util.copyfiles(filepath, backuppath, hardlink=hardlink)
188 util.copyfiles(filepath, backuppath, hardlink=hardlink)
181 else:
189 else:
182 self.add(file, 0)
190 backupfile = ''
183 return
184
191
185 self._backupentries.append((file, backupfile))
192 self._backupentries.append((file, backupfile))
186 self._backupmap[file] = len(self._backupentries) - 1
193 self._backupmap[file] = len(self._backupentries) - 1
187 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
194 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
188 self._backupsfile.flush()
195 self._backupsfile.flush()
189
196
190 @active
197 @active
191 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
198 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
192 """add a function to generates some files at transaction commit
199 """add a function to generates some files at transaction commit
193
200
194 The `genfunc` argument is a function capable of generating proper
201 The `genfunc` argument is a function capable of generating proper
195 content of each entry in the `filename` tuple.
202 content of each entry in the `filename` tuple.
196
203
197 At transaction close time, `genfunc` will be called with one file
204 At transaction close time, `genfunc` will be called with one file
198 object argument per entries in `filenames`.
205 object argument per entries in `filenames`.
199
206
200 The transaction itself is responsible for the backup, creation and
207 The transaction itself is responsible for the backup, creation and
201 final write of such file.
208 final write of such file.
202
209
203 The `genid` argument is used to ensure the same set of file is only
210 The `genid` argument is used to ensure the same set of file is only
204 generated once. Call to `addfilegenerator` for a `genid` already
211 generated once. Call to `addfilegenerator` for a `genid` already
205 present will overwrite the old entry.
212 present will overwrite the old entry.
206
213
207 The `order` argument may be used to control the order in which multiple
214 The `order` argument may be used to control the order in which multiple
208 generator will be executed.
215 generator will be executed.
209 """
216 """
210 # For now, we are unable to do proper backup and restore of custom vfs
217 # For now, we are unable to do proper backup and restore of custom vfs
211 # but for bookmarks that are handled outside this mechanism.
218 # but for bookmarks that are handled outside this mechanism.
212 assert vfs is None or filenames == ('bookmarks',)
219 assert vfs is None or filenames == ('bookmarks',)
213 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
220 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
214
221
215 def _generatefiles(self):
222 def _generatefiles(self):
216 # write files registered for generation
223 # write files registered for generation
217 for entry in sorted(self._filegenerators.values()):
224 for entry in sorted(self._filegenerators.values()):
218 order, filenames, genfunc, vfs = entry
225 order, filenames, genfunc, vfs = entry
219 if vfs is None:
226 if vfs is None:
220 vfs = self.opener
227 vfs = self.opener
221 files = []
228 files = []
222 try:
229 try:
223 for name in filenames:
230 for name in filenames:
224 # Some files are already backed up when creating the
231 # Some files are already backed up when creating the
225 # localrepo. Until this is properly fixed we disable the
232 # localrepo. Until this is properly fixed we disable the
226 # backup for them.
233 # backup for them.
227 if name not in ('phaseroots', 'bookmarks'):
234 if name not in ('phaseroots', 'bookmarks'):
228 self.addbackup(name)
235 self.addbackup(name)
229 files.append(vfs(name, 'w', atomictemp=True))
236 files.append(vfs(name, 'w', atomictemp=True))
230 genfunc(*files)
237 genfunc(*files)
231 finally:
238 finally:
232 for f in files:
239 for f in files:
233 f.close()
240 f.close()
234
241
235 @active
242 @active
236 def find(self, file):
243 def find(self, file):
237 if file in self.map:
244 if file in self.map:
238 return self.entries[self.map[file]]
245 return self.entries[self.map[file]]
239 if file in self._backupmap:
246 if file in self._backupmap:
240 return self._backupentries[self._backupmap[file]]
247 return self._backupentries[self._backupmap[file]]
241 return None
248 return None
242
249
243 @active
250 @active
244 def replace(self, file, offset, data=None):
251 def replace(self, file, offset, data=None):
245 '''
252 '''
246 replace can only replace already committed entries
253 replace can only replace already committed entries
247 that are not pending in the queue
254 that are not pending in the queue
248 '''
255 '''
249
256
250 if file not in self.map:
257 if file not in self.map:
251 raise KeyError(file)
258 raise KeyError(file)
252 index = self.map[file]
259 index = self.map[file]
253 self.entries[index] = (file, offset, data)
260 self.entries[index] = (file, offset, data)
254 self.file.write("%s\0%d\n" % (file, offset))
261 self.file.write("%s\0%d\n" % (file, offset))
255 self.file.flush()
262 self.file.flush()
256
263
257 @active
264 @active
258 def nest(self):
265 def nest(self):
259 self.count += 1
266 self.count += 1
260 self.usages += 1
267 self.usages += 1
261 return self
268 return self
262
269
263 def release(self):
270 def release(self):
264 if self.count > 0:
271 if self.count > 0:
265 self.usages -= 1
272 self.usages -= 1
266 # if the transaction scopes are left without being closed, fail
273 # if the transaction scopes are left without being closed, fail
267 if self.count > 0 and self.usages == 0:
274 if self.count > 0 and self.usages == 0:
268 self._abort()
275 self._abort()
269
276
270 def running(self):
277 def running(self):
271 return self.count > 0
278 return self.count > 0
272
279
273 def addpending(self, category, callback):
280 def addpending(self, category, callback):
274 """add a callback to be called when the transaction is pending
281 """add a callback to be called when the transaction is pending
275
282
276 Category is a unique identifier to allow overwriting an old callback
283 Category is a unique identifier to allow overwriting an old callback
277 with a newer callback.
284 with a newer callback.
278 """
285 """
279 self._pendingcallback[category] = callback
286 self._pendingcallback[category] = callback
280
287
281 @active
288 @active
282 def writepending(self):
289 def writepending(self):
283 '''write pending file to temporary version
290 '''write pending file to temporary version
284
291
285 This is used to allow hooks to view a transaction before commit'''
292 This is used to allow hooks to view a transaction before commit'''
286 categories = sorted(self._pendingcallback)
293 categories = sorted(self._pendingcallback)
287 for cat in categories:
294 for cat in categories:
288 # remove callback since the data will have been flushed
295 # remove callback since the data will have been flushed
289 any = self._pendingcallback.pop(cat)()
296 any = self._pendingcallback.pop(cat)()
290 self._anypending = self._anypending or any
297 self._anypending = self._anypending or any
291 return self._anypending
298 return self._anypending
292
299
293 @active
300 @active
294 def addfinalize(self, category, callback):
301 def addfinalize(self, category, callback):
295 """add a callback to be called when the transaction is closed
302 """add a callback to be called when the transaction is closed
296
303
297 Category is a unique identifier to allow overwriting old callbacks with
304 Category is a unique identifier to allow overwriting old callbacks with
298 newer callbacks.
305 newer callbacks.
299 """
306 """
300 self._finalizecallback[category] = callback
307 self._finalizecallback[category] = callback
301
308
302 @active
309 @active
303 def addpostclose(self, category, callback):
310 def addpostclose(self, category, callback):
304 """add a callback to be called after the transaction is closed
311 """add a callback to be called after the transaction is closed
305
312
306 Category is a unique identifier to allow overwriting an old callback
313 Category is a unique identifier to allow overwriting an old callback
307 with a newer callback.
314 with a newer callback.
308 """
315 """
309 self._postclosecallback[category] = callback
316 self._postclosecallback[category] = callback
310
317
311 @active
318 @active
312 def close(self):
319 def close(self):
313 '''commit the transaction'''
320 '''commit the transaction'''
314 if self.count == 1 and self.onclose is not None:
321 if self.count == 1 and self.onclose is not None:
315 self._generatefiles()
322 self._generatefiles()
316 categories = sorted(self._finalizecallback)
323 categories = sorted(self._finalizecallback)
317 for cat in categories:
324 for cat in categories:
318 self._finalizecallback[cat]()
325 self._finalizecallback[cat]()
319 self.onclose()
326 self.onclose()
320
327
321 self.count -= 1
328 self.count -= 1
322 if self.count != 0:
329 if self.count != 0:
323 return
330 return
324 self.file.close()
331 self.file.close()
325 self._backupsfile.close()
332 self._backupsfile.close()
326 self.entries = []
333 self.entries = []
327 if self.after:
334 if self.after:
328 self.after()
335 self.after()
329 if self.opener.isfile(self.journal):
336 if self.opener.isfile(self.journal):
330 self.opener.unlink(self.journal)
337 self.opener.unlink(self.journal)
331 if self.opener.isfile(self._backupjournal):
338 if self.opener.isfile(self._backupjournal):
332 self.opener.unlink(self._backupjournal)
339 self.opener.unlink(self._backupjournal)
333 for _f, b in self._backupentries:
340 for _f, b in self._backupentries:
334 self.opener.unlink(b)
341 if b:
342 self.opener.unlink(b)
335 self._backupentries = []
343 self._backupentries = []
336 self.journal = None
344 self.journal = None
337 # run post close action
345 # run post close action
338 categories = sorted(self._postclosecallback)
346 categories = sorted(self._postclosecallback)
339 for cat in categories:
347 for cat in categories:
340 self._postclosecallback[cat]()
348 self._postclosecallback[cat]()
341
349
342 @active
350 @active
343 def abort(self):
351 def abort(self):
344 '''abort the transaction (generally called on error, or when the
352 '''abort the transaction (generally called on error, or when the
345 transaction is not explicitly committed before going out of
353 transaction is not explicitly committed before going out of
346 scope)'''
354 scope)'''
347 self._abort()
355 self._abort()
348
356
349 def _abort(self):
357 def _abort(self):
350 self.count = 0
358 self.count = 0
351 self.usages = 0
359 self.usages = 0
352 self.file.close()
360 self.file.close()
353 self._backupsfile.close()
361 self._backupsfile.close()
354
362
355 if self.onabort is not None:
363 if self.onabort is not None:
356 self.onabort()
364 self.onabort()
357
365
358 try:
366 try:
359 if not self.entries and not self._backupentries:
367 if not self.entries and not self._backupentries:
360 if self.journal:
368 if self.journal:
361 self.opener.unlink(self.journal)
369 self.opener.unlink(self.journal)
362 if self._backupjournal:
370 if self._backupjournal:
363 self.opener.unlink(self._backupjournal)
371 self.opener.unlink(self._backupjournal)
364 return
372 return
365
373
366 self.report(_("transaction abort!\n"))
374 self.report(_("transaction abort!\n"))
367
375
368 try:
376 try:
369 _playback(self.journal, self.report, self.opener,
377 _playback(self.journal, self.report, self.opener,
370 self.entries, self._backupentries, False)
378 self.entries, self._backupentries, False)
371 self.report(_("rollback completed\n"))
379 self.report(_("rollback completed\n"))
372 except Exception:
380 except Exception:
373 self.report(_("rollback failed - please run hg recover\n"))
381 self.report(_("rollback failed - please run hg recover\n"))
374 finally:
382 finally:
375 self.journal = None
383 self.journal = None
376
384
377
385
378 def rollback(opener, file, report):
386 def rollback(opener, file, report):
379 """Rolls back the transaction contained in the given file
387 """Rolls back the transaction contained in the given file
380
388
381 Reads the entries in the specified file, and the corresponding
389 Reads the entries in the specified file, and the corresponding
382 '*.backupfiles' file, to recover from an incomplete transaction.
390 '*.backupfiles' file, to recover from an incomplete transaction.
383
391
384 * `file`: a file containing a list of entries, specifying where
392 * `file`: a file containing a list of entries, specifying where
385 to truncate each file. The file should contain a list of
393 to truncate each file. The file should contain a list of
386 file\0offset pairs, delimited by newlines. The corresponding
394 file\0offset pairs, delimited by newlines. The corresponding
387 '*.backupfiles' file should contain a list of file\0backupfile
395 '*.backupfiles' file should contain a list of file\0backupfile
388 pairs, delimited by \0.
396 pairs, delimited by \0.
389 """
397 """
390 entries = []
398 entries = []
391 backupentries = []
399 backupentries = []
392
400
393 fp = opener.open(file)
401 fp = opener.open(file)
394 lines = fp.readlines()
402 lines = fp.readlines()
395 fp.close()
403 fp.close()
396 for l in lines:
404 for l in lines:
397 try:
405 try:
398 f, o = l.split('\0')
406 f, o = l.split('\0')
399 entries.append((f, int(o), None))
407 entries.append((f, int(o), None))
400 except ValueError:
408 except ValueError:
401 report(_("couldn't read journal entry %r!\n") % l)
409 report(_("couldn't read journal entry %r!\n") % l)
402
410
403 backupjournal = "%s.backupfiles" % file
411 backupjournal = "%s.backupfiles" % file
404 if opener.exists(backupjournal):
412 if opener.exists(backupjournal):
405 fp = opener.open(backupjournal)
413 fp = opener.open(backupjournal)
406 lines = fp.readlines()
414 lines = fp.readlines()
407 if lines:
415 if lines:
408 ver = lines[0][:-1]
416 ver = lines[0][:-1]
409 if ver == str(version):
417 if ver == str(version):
410 for line in lines[1:]:
418 for line in lines[1:]:
411 if line:
419 if line:
412 # Shave off the trailing newline
420 # Shave off the trailing newline
413 line = line[:-1]
421 line = line[:-1]
414 f, b = line.split('\0')
422 f, b = line.split('\0')
415 backupentries.append((f, b))
423 backupentries.append((f, b))
416 else:
424 else:
417 report(_("journal was created by a newer version of "
425 report(_("journal was created by a newer version of "
418 "Mercurial"))
426 "Mercurial"))
419
427
420 _playback(file, report, opener, entries, backupentries)
428 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now