##// END OF EJS Templates
transaction: document startgroup and endgroup...
Pierre-Yves David -
r23250:8919dc7f default
parent child Browse files
Show More
@@ -1,422 +1,430 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 filepath = opener.join(f)
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
48 backuppath = opener.join(b)
49 try:
49 try:
50 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
51 backupfiles.append(b)
52 except IOError:
52 except IOError:
53 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
54 raise
54 raise
55
55
56 opener.unlink(journal)
56 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
58 if opener.exists(backuppath):
59 opener.unlink(backuppath)
59 opener.unlink(backuppath)
60 for f in backupfiles:
60 for f in backupfiles:
61 opener.unlink(f)
61 opener.unlink(f)
62
62
63 class transaction(object):
63 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
65 onclose=None, onabort=None):
66 """Begin a new transaction
66 """Begin a new transaction
67
67
68 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
69 an exception.
70
70
71 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
74 closed
74 closed
75 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
76 have been truncated
77 """
77 """
78 self.count = 1
78 self.count = 1
79 self.usages = 1
79 self.usages = 1
80 self.report = report
80 self.report = report
81 self.opener = opener
81 self.opener = opener
82 self.after = after
82 self.after = after
83 self.onclose = onclose
83 self.onclose = onclose
84 self.onabort = onabort
84 self.onabort = onabort
85 self.entries = []
85 self.entries = []
86 self.map = {}
86 self.map = {}
87 # a list of ('path', 'backuppath') entries.
87 # a list of ('path', 'backuppath') entries.
88 self._backupentries = []
88 self._backupentries = []
89 self._backupmap = {}
89 self._backupmap = {}
90 self.journal = journal
90 self.journal = journal
91 self._queue = []
91 self._queue = []
92 # a dict of arguments to be passed to hooks
92 # a dict of arguments to be passed to hooks
93 self.hookargs = {}
93 self.hookargs = {}
94
94
95 self._backupjournal = "%s.backupfiles" % journal
95 self._backupjournal = "%s.backupfiles" % journal
96 self.file = opener.open(self.journal, "w")
96 self.file = opener.open(self.journal, "w")
97 self._backupsfile = opener.open(self._backupjournal, 'w')
97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 self._backupsfile.write('%d\n' % version)
98 self._backupsfile.write('%d\n' % version)
99 if createmode is not None:
99 if createmode is not None:
100 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.journal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
102
102
103 # hold file generations to be performed on commit
103 # hold file generations to be performed on commit
104 self._filegenerators = {}
104 self._filegenerators = {}
105 # hold callbalk to write pending data for hooks
105 # hold callbalk to write pending data for hooks
106 self._pendingcallback = {}
106 self._pendingcallback = {}
107 # True is any pending data have been written ever
107 # True is any pending data have been written ever
108 self._anypending = False
108 self._anypending = False
109 # holds callback to call when writing the transaction
109 # holds callback to call when writing the transaction
110 self._finalizecallback = {}
110 self._finalizecallback = {}
111 # hold callbalk for post transaction close
111 # hold callbalk for post transaction close
112 self._postclosecallback = {}
112 self._postclosecallback = {}
113
113
114 def __del__(self):
114 def __del__(self):
115 if self.journal:
115 if self.journal:
116 self._abort()
116 self._abort()
117
117
118 @active
118 @active
119 def startgroup(self):
119 def startgroup(self):
120 """delay registration of file entry
121
122 This is used by strip to delay vision of strip offset. The transaction
123 sees either none or all of the strip actions to be done."""
120 self._queue.append(([], []))
124 self._queue.append(([], []))
121
125
122 @active
126 @active
123 def endgroup(self):
127 def endgroup(self):
128 """apply delayed registration of file entry.
129
130 This is used by strip to delay vision of strip offset. The transaction
131 sees either none or all of the strip actions to be done."""
124 q = self._queue.pop()
132 q = self._queue.pop()
125 self.entries.extend(q[0])
133 self.entries.extend(q[0])
126 self._backupentries.extend(q[1])
134 self._backupentries.extend(q[1])
127
135
128 offsets = []
136 offsets = []
129 backups = []
137 backups = []
130 for f, o, _data in q[0]:
138 for f, o, _data in q[0]:
131 offsets.append((f, o))
139 offsets.append((f, o))
132
140
133 for f, b in q[1]:
141 for f, b in q[1]:
134 backups.append((f, b))
142 backups.append((f, b))
135
143
136 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
144 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
137 self.file.write(d)
145 self.file.write(d)
138 self.file.flush()
146 self.file.flush()
139
147
140 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
148 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
141 self._backupsfile.write(d)
149 self._backupsfile.write(d)
142 self._backupsfile.flush()
150 self._backupsfile.flush()
143
151
144 @active
152 @active
145 def add(self, file, offset, data=None):
153 def add(self, file, offset, data=None):
146 if file in self.map or file in self._backupmap:
154 if file in self.map or file in self._backupmap:
147 return
155 return
148 if self._queue:
156 if self._queue:
149 self._queue[-1][0].append((file, offset, data))
157 self._queue[-1][0].append((file, offset, data))
150 return
158 return
151
159
152 self.entries.append((file, offset, data))
160 self.entries.append((file, offset, data))
153 self.map[file] = len(self.entries) - 1
161 self.map[file] = len(self.entries) - 1
154 # add enough data to the journal to do the truncate
162 # add enough data to the journal to do the truncate
155 self.file.write("%s\0%d\n" % (file, offset))
163 self.file.write("%s\0%d\n" % (file, offset))
156 self.file.flush()
164 self.file.flush()
157
165
158 @active
166 @active
159 def addbackup(self, file, hardlink=True, vfs=None):
167 def addbackup(self, file, hardlink=True, vfs=None):
160 """Adds a backup of the file to the transaction
168 """Adds a backup of the file to the transaction
161
169
162 Calling addbackup() creates a hardlink backup of the specified file
170 Calling addbackup() creates a hardlink backup of the specified file
163 that is used to recover the file in the event of the transaction
171 that is used to recover the file in the event of the transaction
164 aborting.
172 aborting.
165
173
166 * `file`: the file path, relative to .hg/store
174 * `file`: the file path, relative to .hg/store
167 * `hardlink`: use a hardlink to quickly create the backup
175 * `hardlink`: use a hardlink to quickly create the backup
168 """
176 """
169
177
170 if file in self.map or file in self._backupmap:
178 if file in self.map or file in self._backupmap:
171 return
179 return
172 backupfile = "%s.backup.%s" % (self.journal, file)
180 backupfile = "%s.backup.%s" % (self.journal, file)
173 if vfs is None:
181 if vfs is None:
174 vfs = self.opener
182 vfs = self.opener
175 if vfs.exists(file):
183 if vfs.exists(file):
176 filepath = vfs.join(file)
184 filepath = vfs.join(file)
177 backuppath = self.opener.join(backupfile)
185 backuppath = self.opener.join(backupfile)
178 util.copyfiles(filepath, backuppath, hardlink=hardlink)
186 util.copyfiles(filepath, backuppath, hardlink=hardlink)
179 else:
187 else:
180 self.add(file, 0)
188 self.add(file, 0)
181 return
189 return
182
190
183 if self._queue:
191 if self._queue:
184 self._queue[-1][1].append((file, backupfile))
192 self._queue[-1][1].append((file, backupfile))
185 return
193 return
186
194
187 self._backupentries.append((file, backupfile))
195 self._backupentries.append((file, backupfile))
188 self._backupmap[file] = len(self._backupentries) - 1
196 self._backupmap[file] = len(self._backupentries) - 1
189 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
197 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
190 self._backupsfile.flush()
198 self._backupsfile.flush()
191
199
192 @active
200 @active
193 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
201 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
194 """add a function to generates some files at transaction commit
202 """add a function to generates some files at transaction commit
195
203
196 The `genfunc` argument is a function capable of generating proper
204 The `genfunc` argument is a function capable of generating proper
197 content of each entry in the `filename` tuple.
205 content of each entry in the `filename` tuple.
198
206
199 At transaction close time, `genfunc` will be called with one file
207 At transaction close time, `genfunc` will be called with one file
200 object argument per entries in `filenames`.
208 object argument per entries in `filenames`.
201
209
202 The transaction itself is responsible for the backup, creation and
210 The transaction itself is responsible for the backup, creation and
203 final write of such file.
211 final write of such file.
204
212
205 The `genid` argument is used to ensure the same set of file is only
213 The `genid` argument is used to ensure the same set of file is only
206 generated once. Call to `addfilegenerator` for a `genid` already
214 generated once. Call to `addfilegenerator` for a `genid` already
207 present will overwrite the old entry.
215 present will overwrite the old entry.
208
216
209 The `order` argument may be used to control the order in which multiple
217 The `order` argument may be used to control the order in which multiple
210 generator will be executed.
218 generator will be executed.
211 """
219 """
212 # For now, we are unable to do proper backup and restore of custom vfs
220 # For now, we are unable to do proper backup and restore of custom vfs
213 # but for bookmarks that are handled outside this mechanism.
221 # but for bookmarks that are handled outside this mechanism.
214 assert vfs is None or filenames == ('bookmarks',)
222 assert vfs is None or filenames == ('bookmarks',)
215 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
223 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
216
224
217 def _generatefiles(self):
225 def _generatefiles(self):
218 # write files registered for generation
226 # write files registered for generation
219 for entry in sorted(self._filegenerators.values()):
227 for entry in sorted(self._filegenerators.values()):
220 order, filenames, genfunc, vfs = entry
228 order, filenames, genfunc, vfs = entry
221 if vfs is None:
229 if vfs is None:
222 vfs = self.opener
230 vfs = self.opener
223 files = []
231 files = []
224 try:
232 try:
225 for name in filenames:
233 for name in filenames:
226 # Some files are already backed up when creating the
234 # Some files are already backed up when creating the
227 # localrepo. Until this is properly fixed we disable the
235 # localrepo. Until this is properly fixed we disable the
228 # backup for them.
236 # backup for them.
229 if name not in ('phaseroots', 'bookmarks'):
237 if name not in ('phaseroots', 'bookmarks'):
230 self.addbackup(name)
238 self.addbackup(name)
231 files.append(vfs(name, 'w', atomictemp=True))
239 files.append(vfs(name, 'w', atomictemp=True))
232 genfunc(*files)
240 genfunc(*files)
233 finally:
241 finally:
234 for f in files:
242 for f in files:
235 f.close()
243 f.close()
236
244
237 @active
245 @active
238 def find(self, file):
246 def find(self, file):
239 if file in self.map:
247 if file in self.map:
240 return self.entries[self.map[file]]
248 return self.entries[self.map[file]]
241 if file in self._backupmap:
249 if file in self._backupmap:
242 return self._backupentries[self._backupmap[file]]
250 return self._backupentries[self._backupmap[file]]
243 return None
251 return None
244
252
245 @active
253 @active
246 def replace(self, file, offset, data=None):
254 def replace(self, file, offset, data=None):
247 '''
255 '''
248 replace can only replace already committed entries
256 replace can only replace already committed entries
249 that are not pending in the queue
257 that are not pending in the queue
250 '''
258 '''
251
259
252 if file not in self.map:
260 if file not in self.map:
253 raise KeyError(file)
261 raise KeyError(file)
254 index = self.map[file]
262 index = self.map[file]
255 self.entries[index] = (file, offset, data)
263 self.entries[index] = (file, offset, data)
256 self.file.write("%s\0%d\n" % (file, offset))
264 self.file.write("%s\0%d\n" % (file, offset))
257 self.file.flush()
265 self.file.flush()
258
266
259 @active
267 @active
260 def nest(self):
268 def nest(self):
261 self.count += 1
269 self.count += 1
262 self.usages += 1
270 self.usages += 1
263 return self
271 return self
264
272
265 def release(self):
273 def release(self):
266 if self.count > 0:
274 if self.count > 0:
267 self.usages -= 1
275 self.usages -= 1
268 # if the transaction scopes are left without being closed, fail
276 # if the transaction scopes are left without being closed, fail
269 if self.count > 0 and self.usages == 0:
277 if self.count > 0 and self.usages == 0:
270 self._abort()
278 self._abort()
271
279
272 def running(self):
280 def running(self):
273 return self.count > 0
281 return self.count > 0
274
282
275 def addpending(self, category, callback):
283 def addpending(self, category, callback):
276 """add a callback to be called when the transaction is pending
284 """add a callback to be called when the transaction is pending
277
285
278 Category is a unique identifier to allow overwriting an old callback
286 Category is a unique identifier to allow overwriting an old callback
279 with a newer callback.
287 with a newer callback.
280 """
288 """
281 self._pendingcallback[category] = callback
289 self._pendingcallback[category] = callback
282
290
283 @active
291 @active
284 def writepending(self):
292 def writepending(self):
285 '''write pending file to temporary version
293 '''write pending file to temporary version
286
294
287 This is used to allow hooks to view a transaction before commit'''
295 This is used to allow hooks to view a transaction before commit'''
288 categories = sorted(self._pendingcallback)
296 categories = sorted(self._pendingcallback)
289 for cat in categories:
297 for cat in categories:
290 # remove callback since the data will have been flushed
298 # remove callback since the data will have been flushed
291 any = self._pendingcallback.pop(cat)()
299 any = self._pendingcallback.pop(cat)()
292 self._anypending = self._anypending or any
300 self._anypending = self._anypending or any
293 return self._anypending
301 return self._anypending
294
302
295 @active
303 @active
296 def addfinalize(self, category, callback):
304 def addfinalize(self, category, callback):
297 """add a callback to be called when the transaction is closed
305 """add a callback to be called when the transaction is closed
298
306
299 Category is a unique identifier to allow overwriting old callbacks with
307 Category is a unique identifier to allow overwriting old callbacks with
300 newer callbacks.
308 newer callbacks.
301 """
309 """
302 self._finalizecallback[category] = callback
310 self._finalizecallback[category] = callback
303
311
304 @active
312 @active
305 def addpostclose(self, category, callback):
313 def addpostclose(self, category, callback):
306 """add a callback to be called after the transaction is closed
314 """add a callback to be called after the transaction is closed
307
315
308 Category is a unique identifier to allow overwriting an old callback
316 Category is a unique identifier to allow overwriting an old callback
309 with a newer callback.
317 with a newer callback.
310 """
318 """
311 self._postclosecallback[category] = callback
319 self._postclosecallback[category] = callback
312
320
313 @active
321 @active
314 def close(self):
322 def close(self):
315 '''commit the transaction'''
323 '''commit the transaction'''
316 if self.count == 1 and self.onclose is not None:
324 if self.count == 1 and self.onclose is not None:
317 self._generatefiles()
325 self._generatefiles()
318 categories = sorted(self._finalizecallback)
326 categories = sorted(self._finalizecallback)
319 for cat in categories:
327 for cat in categories:
320 self._finalizecallback[cat]()
328 self._finalizecallback[cat]()
321 self.onclose()
329 self.onclose()
322
330
323 self.count -= 1
331 self.count -= 1
324 if self.count != 0:
332 if self.count != 0:
325 return
333 return
326 self.file.close()
334 self.file.close()
327 self._backupsfile.close()
335 self._backupsfile.close()
328 self.entries = []
336 self.entries = []
329 if self.after:
337 if self.after:
330 self.after()
338 self.after()
331 if self.opener.isfile(self.journal):
339 if self.opener.isfile(self.journal):
332 self.opener.unlink(self.journal)
340 self.opener.unlink(self.journal)
333 if self.opener.isfile(self._backupjournal):
341 if self.opener.isfile(self._backupjournal):
334 self.opener.unlink(self._backupjournal)
342 self.opener.unlink(self._backupjournal)
335 for _f, b in self._backupentries:
343 for _f, b in self._backupentries:
336 self.opener.unlink(b)
344 self.opener.unlink(b)
337 self._backupentries = []
345 self._backupentries = []
338 self.journal = None
346 self.journal = None
339 # run post close action
347 # run post close action
340 categories = sorted(self._postclosecallback)
348 categories = sorted(self._postclosecallback)
341 for cat in categories:
349 for cat in categories:
342 self._postclosecallback[cat]()
350 self._postclosecallback[cat]()
343
351
344 @active
352 @active
345 def abort(self):
353 def abort(self):
346 '''abort the transaction (generally called on error, or when the
354 '''abort the transaction (generally called on error, or when the
347 transaction is not explicitly committed before going out of
355 transaction is not explicitly committed before going out of
348 scope)'''
356 scope)'''
349 self._abort()
357 self._abort()
350
358
351 def _abort(self):
359 def _abort(self):
352 self.count = 0
360 self.count = 0
353 self.usages = 0
361 self.usages = 0
354 self.file.close()
362 self.file.close()
355 self._backupsfile.close()
363 self._backupsfile.close()
356
364
357 if self.onabort is not None:
365 if self.onabort is not None:
358 self.onabort()
366 self.onabort()
359
367
360 try:
368 try:
361 if not self.entries and not self._backupentries:
369 if not self.entries and not self._backupentries:
362 if self.journal:
370 if self.journal:
363 self.opener.unlink(self.journal)
371 self.opener.unlink(self.journal)
364 if self._backupjournal:
372 if self._backupjournal:
365 self.opener.unlink(self._backupjournal)
373 self.opener.unlink(self._backupjournal)
366 return
374 return
367
375
368 self.report(_("transaction abort!\n"))
376 self.report(_("transaction abort!\n"))
369
377
370 try:
378 try:
371 _playback(self.journal, self.report, self.opener,
379 _playback(self.journal, self.report, self.opener,
372 self.entries, self._backupentries, False)
380 self.entries, self._backupentries, False)
373 self.report(_("rollback completed\n"))
381 self.report(_("rollback completed\n"))
374 except Exception:
382 except Exception:
375 self.report(_("rollback failed - please run hg recover\n"))
383 self.report(_("rollback failed - please run hg recover\n"))
376 finally:
384 finally:
377 self.journal = None
385 self.journal = None
378
386
379
387
380 def rollback(opener, file, report):
388 def rollback(opener, file, report):
381 """Rolls back the transaction contained in the given file
389 """Rolls back the transaction contained in the given file
382
390
383 Reads the entries in the specified file, and the corresponding
391 Reads the entries in the specified file, and the corresponding
384 '*.backupfiles' file, to recover from an incomplete transaction.
392 '*.backupfiles' file, to recover from an incomplete transaction.
385
393
386 * `file`: a file containing a list of entries, specifying where
394 * `file`: a file containing a list of entries, specifying where
387 to truncate each file. The file should contain a list of
395 to truncate each file. The file should contain a list of
388 file\0offset pairs, delimited by newlines. The corresponding
396 file\0offset pairs, delimited by newlines. The corresponding
389 '*.backupfiles' file should contain a list of file\0backupfile
397 '*.backupfiles' file should contain a list of file\0backupfile
390 pairs, delimited by \0.
398 pairs, delimited by \0.
391 """
399 """
392 entries = []
400 entries = []
393 backupentries = []
401 backupentries = []
394
402
395 fp = opener.open(file)
403 fp = opener.open(file)
396 lines = fp.readlines()
404 lines = fp.readlines()
397 fp.close()
405 fp.close()
398 for l in lines:
406 for l in lines:
399 try:
407 try:
400 f, o = l.split('\0')
408 f, o = l.split('\0')
401 entries.append((f, int(o), None))
409 entries.append((f, int(o), None))
402 except ValueError:
410 except ValueError:
403 report(_("couldn't read journal entry %r!\n") % l)
411 report(_("couldn't read journal entry %r!\n") % l)
404
412
405 backupjournal = "%s.backupfiles" % file
413 backupjournal = "%s.backupfiles" % file
406 if opener.exists(backupjournal):
414 if opener.exists(backupjournal):
407 fp = opener.open(backupjournal)
415 fp = opener.open(backupjournal)
408 lines = fp.readlines()
416 lines = fp.readlines()
409 if lines:
417 if lines:
410 ver = lines[0][:-1]
418 ver = lines[0][:-1]
411 if ver == str(version):
419 if ver == str(version):
412 for line in lines[1:]:
420 for line in lines[1:]:
413 if line:
421 if line:
414 # Shave off the trailing newline
422 # Shave off the trailing newline
415 line = line[:-1]
423 line = line[:-1]
416 f, b = line.split('\0')
424 f, b = line.split('\0')
417 backupentries.append((f, b))
425 backupentries.append((f, b))
418 else:
426 else:
419 report(_("journal was created by a newer version of "
427 report(_("journal was created by a newer version of "
420 "Mercurial"))
428 "Mercurial"))
421
429
422 _playback(file, report, opener, entries, backupentries)
430 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now