##// END OF EJS Templates
transaction: extract backupentry registration in a dedicated function...
Pierre-Yves David -
r23283:b04263c3 default
parent child Browse files
Show More
@@ -1,435 +1,439 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 if b:
47 if b:
48 filepath = opener.join(f)
48 filepath = opener.join(f)
49 backuppath = opener.join(b)
49 backuppath = opener.join(b)
50 try:
50 try:
51 util.copyfile(backuppath, filepath)
51 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
52 backupfiles.append(b)
53 except IOError:
53 except IOError:
54 report(_("failed to recover %s\n") % f)
54 report(_("failed to recover %s\n") % f)
55 raise
55 raise
56 else:
56 else:
57 try:
57 try:
58 opener.unlink(f)
58 opener.unlink(f)
59 except (IOError, OSError), inst:
59 except (IOError, OSError), inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 opener.unlink(journal)
63 opener.unlink(journal)
64 backuppath = "%s.backupfiles" % journal
64 backuppath = "%s.backupfiles" % journal
65 if opener.exists(backuppath):
65 if opener.exists(backuppath):
66 opener.unlink(backuppath)
66 opener.unlink(backuppath)
67 for f in backupfiles:
67 for f in backupfiles:
68 opener.unlink(f)
68 opener.unlink(f)
69
69
70 class transaction(object):
70 class transaction(object):
71 def __init__(self, report, opener, journal, after=None, createmode=None,
71 def __init__(self, report, opener, journal, after=None, createmode=None,
72 onclose=None, onabort=None):
72 onclose=None, onabort=None):
73 """Begin a new transaction
73 """Begin a new transaction
74
74
75 Begins a new transaction that allows rolling back writes in the event of
75 Begins a new transaction that allows rolling back writes in the event of
76 an exception.
76 an exception.
77
77
78 * `after`: called after the transaction has been committed
78 * `after`: called after the transaction has been committed
79 * `createmode`: the mode of the journal file that will be created
79 * `createmode`: the mode of the journal file that will be created
80 * `onclose`: called as the transaction is closing, but before it is
80 * `onclose`: called as the transaction is closing, but before it is
81 closed
81 closed
82 * `onabort`: called as the transaction is aborting, but before any files
82 * `onabort`: called as the transaction is aborting, but before any files
83 have been truncated
83 have been truncated
84 """
84 """
85 self.count = 1
85 self.count = 1
86 self.usages = 1
86 self.usages = 1
87 self.report = report
87 self.report = report
88 self.opener = opener
88 self.opener = opener
89 self.after = after
89 self.after = after
90 self.onclose = onclose
90 self.onclose = onclose
91 self.onabort = onabort
91 self.onabort = onabort
92 self.entries = []
92 self.entries = []
93 self.map = {}
93 self.map = {}
94 self.journal = journal
94 self.journal = journal
95 self._queue = []
95 self._queue = []
96 # a dict of arguments to be passed to hooks
96 # a dict of arguments to be passed to hooks
97 self.hookargs = {}
97 self.hookargs = {}
98 self.file = opener.open(self.journal, "w")
98 self.file = opener.open(self.journal, "w")
99
99
100 # a list of ('path', 'backuppath') entries.
100 # a list of ('path', 'backuppath') entries.
101 # if 'backuppath' is empty, no file existed at backup time
101 # if 'backuppath' is empty, no file existed at backup time
102 self._backupentries = []
102 self._backupentries = []
103 self._backupmap = {}
103 self._backupmap = {}
104 self._backupjournal = "%s.backupfiles" % journal
104 self._backupjournal = "%s.backupfiles" % journal
105 self._backupsfile = opener.open(self._backupjournal, 'w')
105 self._backupsfile = opener.open(self._backupjournal, 'w')
106 self._backupsfile.write('%d\n' % version)
106 self._backupsfile.write('%d\n' % version)
107
107
108 if createmode is not None:
108 if createmode is not None:
109 opener.chmod(self.journal, createmode & 0666)
109 opener.chmod(self.journal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
111
111
112 # hold file generations to be performed on commit
112 # hold file generations to be performed on commit
113 self._filegenerators = {}
113 self._filegenerators = {}
114 # hold callbalk to write pending data for hooks
114 # hold callbalk to write pending data for hooks
115 self._pendingcallback = {}
115 self._pendingcallback = {}
116 # True is any pending data have been written ever
116 # True is any pending data have been written ever
117 self._anypending = False
117 self._anypending = False
118 # holds callback to call when writing the transaction
118 # holds callback to call when writing the transaction
119 self._finalizecallback = {}
119 self._finalizecallback = {}
120 # hold callbalk for post transaction close
120 # hold callbalk for post transaction close
121 self._postclosecallback = {}
121 self._postclosecallback = {}
122
122
123 def __del__(self):
123 def __del__(self):
124 if self.journal:
124 if self.journal:
125 self._abort()
125 self._abort()
126
126
127 @active
127 @active
128 def startgroup(self):
128 def startgroup(self):
129 """delay registration of file entry
129 """delay registration of file entry
130
130
131 This is used by strip to delay vision of strip offset. The transaction
131 This is used by strip to delay vision of strip offset. The transaction
132 sees either none or all of the strip actions to be done."""
132 sees either none or all of the strip actions to be done."""
133 self._queue.append([])
133 self._queue.append([])
134
134
135 @active
135 @active
136 def endgroup(self):
136 def endgroup(self):
137 """apply delayed registration of file entry.
137 """apply delayed registration of file entry.
138
138
139 This is used by strip to delay vision of strip offset. The transaction
139 This is used by strip to delay vision of strip offset. The transaction
140 sees either none or all of the strip actions to be done."""
140 sees either none or all of the strip actions to be done."""
141 q = self._queue.pop()
141 q = self._queue.pop()
142 for f, o, data in q:
142 for f, o, data in q:
143 self._addentry(f, o, data)
143 self._addentry(f, o, data)
144
144
145 @active
145 @active
146 def add(self, file, offset, data=None):
146 def add(self, file, offset, data=None):
147 """record the state of an append-only file before update"""
147 """record the state of an append-only file before update"""
148 if file in self.map or file in self._backupmap:
148 if file in self.map or file in self._backupmap:
149 return
149 return
150 if self._queue:
150 if self._queue:
151 self._queue[-1].append((file, offset, data))
151 self._queue[-1].append((file, offset, data))
152 return
152 return
153
153
154 self._addentry(file, offset, data)
154 self._addentry(file, offset, data)
155
155
156 def _addentry(self, file, offset, data):
156 def _addentry(self, file, offset, data):
157 """add a append-only entry to memory and on-disk state"""
157 """add a append-only entry to memory and on-disk state"""
158 if file in self.map or file in self._backupmap:
158 if file in self.map or file in self._backupmap:
159 return
159 return
160 self.entries.append((file, offset, data))
160 self.entries.append((file, offset, data))
161 self.map[file] = len(self.entries) - 1
161 self.map[file] = len(self.entries) - 1
162 # add enough data to the journal to do the truncate
162 # add enough data to the journal to do the truncate
163 self.file.write("%s\0%d\n" % (file, offset))
163 self.file.write("%s\0%d\n" % (file, offset))
164 self.file.flush()
164 self.file.flush()
165
165
166 @active
166 @active
167 def addbackup(self, file, hardlink=True, vfs=None):
167 def addbackup(self, file, hardlink=True, vfs=None):
168 """Adds a backup of the file to the transaction
168 """Adds a backup of the file to the transaction
169
169
170 Calling addbackup() creates a hardlink backup of the specified file
170 Calling addbackup() creates a hardlink backup of the specified file
171 that is used to recover the file in the event of the transaction
171 that is used to recover the file in the event of the transaction
172 aborting.
172 aborting.
173
173
174 * `file`: the file path, relative to .hg/store
174 * `file`: the file path, relative to .hg/store
175 * `hardlink`: use a hardlink to quickly create the backup
175 * `hardlink`: use a hardlink to quickly create the backup
176 """
176 """
177 if self._queue:
177 if self._queue:
178 msg = 'cannot use transaction.addbackup inside "group"'
178 msg = 'cannot use transaction.addbackup inside "group"'
179 raise RuntimeError(msg)
179 raise RuntimeError(msg)
180
180
181 if file in self.map or file in self._backupmap:
181 if file in self.map or file in self._backupmap:
182 return
182 return
183 backupfile = "%s.backup.%s" % (self.journal, file)
183 backupfile = "%s.backup.%s" % (self.journal, file)
184 if vfs is None:
184 if vfs is None:
185 vfs = self.opener
185 vfs = self.opener
186 if vfs.exists(file):
186 if vfs.exists(file):
187 filepath = vfs.join(file)
187 filepath = vfs.join(file)
188 backuppath = self.opener.join(backupfile)
188 backuppath = self.opener.join(backupfile)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
190 else:
190 else:
191 backupfile = ''
191 backupfile = ''
192
192
193 self._backupentries.append((file, backupfile))
193 self._addbackupentry((file, backupfile))
194
195 def _addbackupentry(self, entry):
196 """register a new backup entry and write it to disk"""
197 self._backupentries.append(entry)
194 self._backupmap[file] = len(self._backupentries) - 1
198 self._backupmap[file] = len(self._backupentries) - 1
195 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
199 self._backupsfile.write("%s\0%s\n" % entry)
196 self._backupsfile.flush()
200 self._backupsfile.flush()
197
201
198 @active
202 @active
199 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
203 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
200 """add a function to generates some files at transaction commit
204 """add a function to generates some files at transaction commit
201
205
202 The `genfunc` argument is a function capable of generating proper
206 The `genfunc` argument is a function capable of generating proper
203 content of each entry in the `filename` tuple.
207 content of each entry in the `filename` tuple.
204
208
205 At transaction close time, `genfunc` will be called with one file
209 At transaction close time, `genfunc` will be called with one file
206 object argument per entries in `filenames`.
210 object argument per entries in `filenames`.
207
211
208 The transaction itself is responsible for the backup, creation and
212 The transaction itself is responsible for the backup, creation and
209 final write of such file.
213 final write of such file.
210
214
211 The `genid` argument is used to ensure the same set of file is only
215 The `genid` argument is used to ensure the same set of file is only
212 generated once. Call to `addfilegenerator` for a `genid` already
216 generated once. Call to `addfilegenerator` for a `genid` already
213 present will overwrite the old entry.
217 present will overwrite the old entry.
214
218
215 The `order` argument may be used to control the order in which multiple
219 The `order` argument may be used to control the order in which multiple
216 generator will be executed.
220 generator will be executed.
217 """
221 """
218 # For now, we are unable to do proper backup and restore of custom vfs
222 # For now, we are unable to do proper backup and restore of custom vfs
219 # but for bookmarks that are handled outside this mechanism.
223 # but for bookmarks that are handled outside this mechanism.
220 assert vfs is None or filenames == ('bookmarks',)
224 assert vfs is None or filenames == ('bookmarks',)
221 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
225 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
222
226
223 def _generatefiles(self):
227 def _generatefiles(self):
224 # write files registered for generation
228 # write files registered for generation
225 for entry in sorted(self._filegenerators.values()):
229 for entry in sorted(self._filegenerators.values()):
226 order, filenames, genfunc, vfs = entry
230 order, filenames, genfunc, vfs = entry
227 if vfs is None:
231 if vfs is None:
228 vfs = self.opener
232 vfs = self.opener
229 files = []
233 files = []
230 try:
234 try:
231 for name in filenames:
235 for name in filenames:
232 # Some files are already backed up when creating the
236 # Some files are already backed up when creating the
233 # localrepo. Until this is properly fixed we disable the
237 # localrepo. Until this is properly fixed we disable the
234 # backup for them.
238 # backup for them.
235 if name not in ('phaseroots', 'bookmarks'):
239 if name not in ('phaseroots', 'bookmarks'):
236 self.addbackup(name)
240 self.addbackup(name)
237 files.append(vfs(name, 'w', atomictemp=True))
241 files.append(vfs(name, 'w', atomictemp=True))
238 genfunc(*files)
242 genfunc(*files)
239 finally:
243 finally:
240 for f in files:
244 for f in files:
241 f.close()
245 f.close()
242
246
243 @active
247 @active
244 def find(self, file):
248 def find(self, file):
245 if file in self.map:
249 if file in self.map:
246 return self.entries[self.map[file]]
250 return self.entries[self.map[file]]
247 if file in self._backupmap:
251 if file in self._backupmap:
248 return self._backupentries[self._backupmap[file]]
252 return self._backupentries[self._backupmap[file]]
249 return None
253 return None
250
254
251 @active
255 @active
252 def replace(self, file, offset, data=None):
256 def replace(self, file, offset, data=None):
253 '''
257 '''
254 replace can only replace already committed entries
258 replace can only replace already committed entries
255 that are not pending in the queue
259 that are not pending in the queue
256 '''
260 '''
257
261
258 if file not in self.map:
262 if file not in self.map:
259 raise KeyError(file)
263 raise KeyError(file)
260 index = self.map[file]
264 index = self.map[file]
261 self.entries[index] = (file, offset, data)
265 self.entries[index] = (file, offset, data)
262 self.file.write("%s\0%d\n" % (file, offset))
266 self.file.write("%s\0%d\n" % (file, offset))
263 self.file.flush()
267 self.file.flush()
264
268
265 @active
269 @active
266 def nest(self):
270 def nest(self):
267 self.count += 1
271 self.count += 1
268 self.usages += 1
272 self.usages += 1
269 return self
273 return self
270
274
271 def release(self):
275 def release(self):
272 if self.count > 0:
276 if self.count > 0:
273 self.usages -= 1
277 self.usages -= 1
274 # if the transaction scopes are left without being closed, fail
278 # if the transaction scopes are left without being closed, fail
275 if self.count > 0 and self.usages == 0:
279 if self.count > 0 and self.usages == 0:
276 self._abort()
280 self._abort()
277
281
278 def running(self):
282 def running(self):
279 return self.count > 0
283 return self.count > 0
280
284
281 def addpending(self, category, callback):
285 def addpending(self, category, callback):
282 """add a callback to be called when the transaction is pending
286 """add a callback to be called when the transaction is pending
283
287
284 The transaction will be given as callback's first argument.
288 The transaction will be given as callback's first argument.
285
289
286 Category is a unique identifier to allow overwriting an old callback
290 Category is a unique identifier to allow overwriting an old callback
287 with a newer callback.
291 with a newer callback.
288 """
292 """
289 self._pendingcallback[category] = callback
293 self._pendingcallback[category] = callback
290
294
291 @active
295 @active
292 def writepending(self):
296 def writepending(self):
293 '''write pending file to temporary version
297 '''write pending file to temporary version
294
298
295 This is used to allow hooks to view a transaction before commit'''
299 This is used to allow hooks to view a transaction before commit'''
296 categories = sorted(self._pendingcallback)
300 categories = sorted(self._pendingcallback)
297 for cat in categories:
301 for cat in categories:
298 # remove callback since the data will have been flushed
302 # remove callback since the data will have been flushed
299 any = self._pendingcallback.pop(cat)(self)
303 any = self._pendingcallback.pop(cat)(self)
300 self._anypending = self._anypending or any
304 self._anypending = self._anypending or any
301 return self._anypending
305 return self._anypending
302
306
303 @active
307 @active
304 def addfinalize(self, category, callback):
308 def addfinalize(self, category, callback):
305 """add a callback to be called when the transaction is closed
309 """add a callback to be called when the transaction is closed
306
310
307 The transaction will be given as callback's first argument.
311 The transaction will be given as callback's first argument.
308
312
309 Category is a unique identifier to allow overwriting old callbacks with
313 Category is a unique identifier to allow overwriting old callbacks with
310 newer callbacks.
314 newer callbacks.
311 """
315 """
312 self._finalizecallback[category] = callback
316 self._finalizecallback[category] = callback
313
317
314 @active
318 @active
315 def addpostclose(self, category, callback):
319 def addpostclose(self, category, callback):
316 """add a callback to be called after the transaction is closed
320 """add a callback to be called after the transaction is closed
317
321
318 The transaction will be given as callback's first argument.
322 The transaction will be given as callback's first argument.
319
323
320 Category is a unique identifier to allow overwriting an old callback
324 Category is a unique identifier to allow overwriting an old callback
321 with a newer callback.
325 with a newer callback.
322 """
326 """
323 self._postclosecallback[category] = callback
327 self._postclosecallback[category] = callback
324
328
325 @active
329 @active
326 def close(self):
330 def close(self):
327 '''commit the transaction'''
331 '''commit the transaction'''
328 if self.count == 1 and self.onclose is not None:
332 if self.count == 1 and self.onclose is not None:
329 self._generatefiles()
333 self._generatefiles()
330 categories = sorted(self._finalizecallback)
334 categories = sorted(self._finalizecallback)
331 for cat in categories:
335 for cat in categories:
332 self._finalizecallback[cat](self)
336 self._finalizecallback[cat](self)
333 self.onclose()
337 self.onclose()
334
338
335 self.count -= 1
339 self.count -= 1
336 if self.count != 0:
340 if self.count != 0:
337 return
341 return
338 self.file.close()
342 self.file.close()
339 self._backupsfile.close()
343 self._backupsfile.close()
340 self.entries = []
344 self.entries = []
341 if self.after:
345 if self.after:
342 self.after()
346 self.after()
343 if self.opener.isfile(self.journal):
347 if self.opener.isfile(self.journal):
344 self.opener.unlink(self.journal)
348 self.opener.unlink(self.journal)
345 if self.opener.isfile(self._backupjournal):
349 if self.opener.isfile(self._backupjournal):
346 self.opener.unlink(self._backupjournal)
350 self.opener.unlink(self._backupjournal)
347 for _f, b in self._backupentries:
351 for _f, b in self._backupentries:
348 if b:
352 if b:
349 self.opener.unlink(b)
353 self.opener.unlink(b)
350 self._backupentries = []
354 self._backupentries = []
351 self.journal = None
355 self.journal = None
352 # run post close action
356 # run post close action
353 categories = sorted(self._postclosecallback)
357 categories = sorted(self._postclosecallback)
354 for cat in categories:
358 for cat in categories:
355 self._postclosecallback[cat](self)
359 self._postclosecallback[cat](self)
356
360
357 @active
361 @active
358 def abort(self):
362 def abort(self):
359 '''abort the transaction (generally called on error, or when the
363 '''abort the transaction (generally called on error, or when the
360 transaction is not explicitly committed before going out of
364 transaction is not explicitly committed before going out of
361 scope)'''
365 scope)'''
362 self._abort()
366 self._abort()
363
367
364 def _abort(self):
368 def _abort(self):
365 self.count = 0
369 self.count = 0
366 self.usages = 0
370 self.usages = 0
367 self.file.close()
371 self.file.close()
368 self._backupsfile.close()
372 self._backupsfile.close()
369
373
370 if self.onabort is not None:
374 if self.onabort is not None:
371 self.onabort()
375 self.onabort()
372
376
373 try:
377 try:
374 if not self.entries and not self._backupentries:
378 if not self.entries and not self._backupentries:
375 if self.journal:
379 if self.journal:
376 self.opener.unlink(self.journal)
380 self.opener.unlink(self.journal)
377 if self._backupjournal:
381 if self._backupjournal:
378 self.opener.unlink(self._backupjournal)
382 self.opener.unlink(self._backupjournal)
379 return
383 return
380
384
381 self.report(_("transaction abort!\n"))
385 self.report(_("transaction abort!\n"))
382
386
383 try:
387 try:
384 _playback(self.journal, self.report, self.opener,
388 _playback(self.journal, self.report, self.opener,
385 self.entries, self._backupentries, False)
389 self.entries, self._backupentries, False)
386 self.report(_("rollback completed\n"))
390 self.report(_("rollback completed\n"))
387 except Exception:
391 except Exception:
388 self.report(_("rollback failed - please run hg recover\n"))
392 self.report(_("rollback failed - please run hg recover\n"))
389 finally:
393 finally:
390 self.journal = None
394 self.journal = None
391
395
392
396
393 def rollback(opener, file, report):
397 def rollback(opener, file, report):
394 """Rolls back the transaction contained in the given file
398 """Rolls back the transaction contained in the given file
395
399
396 Reads the entries in the specified file, and the corresponding
400 Reads the entries in the specified file, and the corresponding
397 '*.backupfiles' file, to recover from an incomplete transaction.
401 '*.backupfiles' file, to recover from an incomplete transaction.
398
402
399 * `file`: a file containing a list of entries, specifying where
403 * `file`: a file containing a list of entries, specifying where
400 to truncate each file. The file should contain a list of
404 to truncate each file. The file should contain a list of
401 file\0offset pairs, delimited by newlines. The corresponding
405 file\0offset pairs, delimited by newlines. The corresponding
402 '*.backupfiles' file should contain a list of file\0backupfile
406 '*.backupfiles' file should contain a list of file\0backupfile
403 pairs, delimited by \0.
407 pairs, delimited by \0.
404 """
408 """
405 entries = []
409 entries = []
406 backupentries = []
410 backupentries = []
407
411
408 fp = opener.open(file)
412 fp = opener.open(file)
409 lines = fp.readlines()
413 lines = fp.readlines()
410 fp.close()
414 fp.close()
411 for l in lines:
415 for l in lines:
412 try:
416 try:
413 f, o = l.split('\0')
417 f, o = l.split('\0')
414 entries.append((f, int(o), None))
418 entries.append((f, int(o), None))
415 except ValueError:
419 except ValueError:
416 report(_("couldn't read journal entry %r!\n") % l)
420 report(_("couldn't read journal entry %r!\n") % l)
417
421
418 backupjournal = "%s.backupfiles" % file
422 backupjournal = "%s.backupfiles" % file
419 if opener.exists(backupjournal):
423 if opener.exists(backupjournal):
420 fp = opener.open(backupjournal)
424 fp = opener.open(backupjournal)
421 lines = fp.readlines()
425 lines = fp.readlines()
422 if lines:
426 if lines:
423 ver = lines[0][:-1]
427 ver = lines[0][:-1]
424 if ver == str(version):
428 if ver == str(version):
425 for line in lines[1:]:
429 for line in lines[1:]:
426 if line:
430 if line:
427 # Shave off the trailing newline
431 # Shave off the trailing newline
428 line = line[:-1]
432 line = line[:-1]
429 f, b = line.split('\0')
433 f, b = line.split('\0')
430 backupentries.append((f, b))
434 backupentries.append((f, b))
431 else:
435 else:
432 report(_("journal was created by a newer version of "
436 report(_("journal was created by a newer version of "
433 "Mercurial"))
437 "Mercurial"))
434
438
435 _playback(file, report, opener, entries, backupentries)
439 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now