##// END OF EJS Templates
transaction: always generate file on close...
Pierre-Yves David -
r23290:59513ec7 default
parent child Browse files
Show More
@@ -1,439 +1,440 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 if b:
47 if b:
48 filepath = opener.join(f)
48 filepath = opener.join(f)
49 backuppath = opener.join(b)
49 backuppath = opener.join(b)
50 try:
50 try:
51 util.copyfile(backuppath, filepath)
51 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
52 backupfiles.append(b)
53 except IOError:
53 except IOError:
54 report(_("failed to recover %s\n") % f)
54 report(_("failed to recover %s\n") % f)
55 raise
55 raise
56 else:
56 else:
57 try:
57 try:
58 opener.unlink(f)
58 opener.unlink(f)
59 except (IOError, OSError), inst:
59 except (IOError, OSError), inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 opener.unlink(journal)
63 opener.unlink(journal)
64 backuppath = "%s.backupfiles" % journal
64 backuppath = "%s.backupfiles" % journal
65 if opener.exists(backuppath):
65 if opener.exists(backuppath):
66 opener.unlink(backuppath)
66 opener.unlink(backuppath)
67 for f in backupfiles:
67 for f in backupfiles:
68 opener.unlink(f)
68 opener.unlink(f)
69
69
70 class transaction(object):
70 class transaction(object):
71 def __init__(self, report, opener, journal, after=None, createmode=None,
71 def __init__(self, report, opener, journal, after=None, createmode=None,
72 onclose=None, onabort=None):
72 onclose=None, onabort=None):
73 """Begin a new transaction
73 """Begin a new transaction
74
74
75 Begins a new transaction that allows rolling back writes in the event of
75 Begins a new transaction that allows rolling back writes in the event of
76 an exception.
76 an exception.
77
77
78 * `after`: called after the transaction has been committed
78 * `after`: called after the transaction has been committed
79 * `createmode`: the mode of the journal file that will be created
79 * `createmode`: the mode of the journal file that will be created
80 * `onclose`: called as the transaction is closing, but before it is
80 * `onclose`: called as the transaction is closing, but before it is
81 closed
81 closed
82 * `onabort`: called as the transaction is aborting, but before any files
82 * `onabort`: called as the transaction is aborting, but before any files
83 have been truncated
83 have been truncated
84 """
84 """
85 self.count = 1
85 self.count = 1
86 self.usages = 1
86 self.usages = 1
87 self.report = report
87 self.report = report
88 self.opener = opener
88 self.opener = opener
89 self.after = after
89 self.after = after
90 self.onclose = onclose
90 self.onclose = onclose
91 self.onabort = onabort
91 self.onabort = onabort
92 self.entries = []
92 self.entries = []
93 self.map = {}
93 self.map = {}
94 self.journal = journal
94 self.journal = journal
95 self._queue = []
95 self._queue = []
96 # a dict of arguments to be passed to hooks
96 # a dict of arguments to be passed to hooks
97 self.hookargs = {}
97 self.hookargs = {}
98 self.file = opener.open(self.journal, "w")
98 self.file = opener.open(self.journal, "w")
99
99
100 # a list of ('path', 'backuppath') entries.
100 # a list of ('path', 'backuppath') entries.
101 # if 'backuppath' is empty, no file existed at backup time
101 # if 'backuppath' is empty, no file existed at backup time
102 self._backupentries = []
102 self._backupentries = []
103 self._backupmap = {}
103 self._backupmap = {}
104 self._backupjournal = "%s.backupfiles" % journal
104 self._backupjournal = "%s.backupfiles" % journal
105 self._backupsfile = opener.open(self._backupjournal, 'w')
105 self._backupsfile = opener.open(self._backupjournal, 'w')
106 self._backupsfile.write('%d\n' % version)
106 self._backupsfile.write('%d\n' % version)
107
107
108 if createmode is not None:
108 if createmode is not None:
109 opener.chmod(self.journal, createmode & 0666)
109 opener.chmod(self.journal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
111
111
112 # hold file generations to be performed on commit
112 # hold file generations to be performed on commit
113 self._filegenerators = {}
113 self._filegenerators = {}
114 # hold callbalk to write pending data for hooks
114 # hold callbalk to write pending data for hooks
115 self._pendingcallback = {}
115 self._pendingcallback = {}
116 # True is any pending data have been written ever
116 # True is any pending data have been written ever
117 self._anypending = False
117 self._anypending = False
118 # holds callback to call when writing the transaction
118 # holds callback to call when writing the transaction
119 self._finalizecallback = {}
119 self._finalizecallback = {}
120 # hold callbalk for post transaction close
120 # hold callbalk for post transaction close
121 self._postclosecallback = {}
121 self._postclosecallback = {}
122
122
123 def __del__(self):
123 def __del__(self):
124 if self.journal:
124 if self.journal:
125 self._abort()
125 self._abort()
126
126
127 @active
127 @active
128 def startgroup(self):
128 def startgroup(self):
129 """delay registration of file entry
129 """delay registration of file entry
130
130
131 This is used by strip to delay vision of strip offset. The transaction
131 This is used by strip to delay vision of strip offset. The transaction
132 sees either none or all of the strip actions to be done."""
132 sees either none or all of the strip actions to be done."""
133 self._queue.append([])
133 self._queue.append([])
134
134
135 @active
135 @active
136 def endgroup(self):
136 def endgroup(self):
137 """apply delayed registration of file entry.
137 """apply delayed registration of file entry.
138
138
139 This is used by strip to delay vision of strip offset. The transaction
139 This is used by strip to delay vision of strip offset. The transaction
140 sees either none or all of the strip actions to be done."""
140 sees either none or all of the strip actions to be done."""
141 q = self._queue.pop()
141 q = self._queue.pop()
142 for f, o, data in q:
142 for f, o, data in q:
143 self._addentry(f, o, data)
143 self._addentry(f, o, data)
144
144
145 @active
145 @active
146 def add(self, file, offset, data=None):
146 def add(self, file, offset, data=None):
147 """record the state of an append-only file before update"""
147 """record the state of an append-only file before update"""
148 if file in self.map or file in self._backupmap:
148 if file in self.map or file in self._backupmap:
149 return
149 return
150 if self._queue:
150 if self._queue:
151 self._queue[-1].append((file, offset, data))
151 self._queue[-1].append((file, offset, data))
152 return
152 return
153
153
154 self._addentry(file, offset, data)
154 self._addentry(file, offset, data)
155
155
156 def _addentry(self, file, offset, data):
156 def _addentry(self, file, offset, data):
157 """add a append-only entry to memory and on-disk state"""
157 """add a append-only entry to memory and on-disk state"""
158 if file in self.map or file in self._backupmap:
158 if file in self.map or file in self._backupmap:
159 return
159 return
160 self.entries.append((file, offset, data))
160 self.entries.append((file, offset, data))
161 self.map[file] = len(self.entries) - 1
161 self.map[file] = len(self.entries) - 1
162 # add enough data to the journal to do the truncate
162 # add enough data to the journal to do the truncate
163 self.file.write("%s\0%d\n" % (file, offset))
163 self.file.write("%s\0%d\n" % (file, offset))
164 self.file.flush()
164 self.file.flush()
165
165
166 @active
166 @active
167 def addbackup(self, file, hardlink=True, vfs=None):
167 def addbackup(self, file, hardlink=True, vfs=None):
168 """Adds a backup of the file to the transaction
168 """Adds a backup of the file to the transaction
169
169
170 Calling addbackup() creates a hardlink backup of the specified file
170 Calling addbackup() creates a hardlink backup of the specified file
171 that is used to recover the file in the event of the transaction
171 that is used to recover the file in the event of the transaction
172 aborting.
172 aborting.
173
173
174 * `file`: the file path, relative to .hg/store
174 * `file`: the file path, relative to .hg/store
175 * `hardlink`: use a hardlink to quickly create the backup
175 * `hardlink`: use a hardlink to quickly create the backup
176 """
176 """
177 if self._queue:
177 if self._queue:
178 msg = 'cannot use transaction.addbackup inside "group"'
178 msg = 'cannot use transaction.addbackup inside "group"'
179 raise RuntimeError(msg)
179 raise RuntimeError(msg)
180
180
181 if file in self.map or file in self._backupmap:
181 if file in self.map or file in self._backupmap:
182 return
182 return
183 backupfile = "%s.backup.%s" % (self.journal, file)
183 backupfile = "%s.backup.%s" % (self.journal, file)
184 if vfs is None:
184 if vfs is None:
185 vfs = self.opener
185 vfs = self.opener
186 if vfs.exists(file):
186 if vfs.exists(file):
187 filepath = vfs.join(file)
187 filepath = vfs.join(file)
188 backuppath = self.opener.join(backupfile)
188 backuppath = self.opener.join(backupfile)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
190 else:
190 else:
191 backupfile = ''
191 backupfile = ''
192
192
193 self._addbackupentry((file, backupfile))
193 self._addbackupentry((file, backupfile))
194
194
195 def _addbackupentry(self, entry):
195 def _addbackupentry(self, entry):
196 """register a new backup entry and write it to disk"""
196 """register a new backup entry and write it to disk"""
197 self._backupentries.append(entry)
197 self._backupentries.append(entry)
198 self._backupmap[file] = len(self._backupentries) - 1
198 self._backupmap[file] = len(self._backupentries) - 1
199 self._backupsfile.write("%s\0%s\n" % entry)
199 self._backupsfile.write("%s\0%s\n" % entry)
200 self._backupsfile.flush()
200 self._backupsfile.flush()
201
201
202 @active
202 @active
203 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
203 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
204 """add a function to generates some files at transaction commit
204 """add a function to generates some files at transaction commit
205
205
206 The `genfunc` argument is a function capable of generating proper
206 The `genfunc` argument is a function capable of generating proper
207 content of each entry in the `filename` tuple.
207 content of each entry in the `filename` tuple.
208
208
209 At transaction close time, `genfunc` will be called with one file
209 At transaction close time, `genfunc` will be called with one file
210 object argument per entries in `filenames`.
210 object argument per entries in `filenames`.
211
211
212 The transaction itself is responsible for the backup, creation and
212 The transaction itself is responsible for the backup, creation and
213 final write of such file.
213 final write of such file.
214
214
215 The `genid` argument is used to ensure the same set of file is only
215 The `genid` argument is used to ensure the same set of file is only
216 generated once. Call to `addfilegenerator` for a `genid` already
216 generated once. Call to `addfilegenerator` for a `genid` already
217 present will overwrite the old entry.
217 present will overwrite the old entry.
218
218
219 The `order` argument may be used to control the order in which multiple
219 The `order` argument may be used to control the order in which multiple
220 generator will be executed.
220 generator will be executed.
221 """
221 """
222 # For now, we are unable to do proper backup and restore of custom vfs
222 # For now, we are unable to do proper backup and restore of custom vfs
223 # but for bookmarks that are handled outside this mechanism.
223 # but for bookmarks that are handled outside this mechanism.
224 assert vfs is None or filenames == ('bookmarks',)
224 assert vfs is None or filenames == ('bookmarks',)
225 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
225 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
226
226
227 def _generatefiles(self):
227 def _generatefiles(self):
228 # write files registered for generation
228 # write files registered for generation
229 for entry in sorted(self._filegenerators.values()):
229 for entry in sorted(self._filegenerators.values()):
230 order, filenames, genfunc, vfs = entry
230 order, filenames, genfunc, vfs = entry
231 if vfs is None:
231 if vfs is None:
232 vfs = self.opener
232 vfs = self.opener
233 files = []
233 files = []
234 try:
234 try:
235 for name in filenames:
235 for name in filenames:
236 # Some files are already backed up when creating the
236 # Some files are already backed up when creating the
237 # localrepo. Until this is properly fixed we disable the
237 # localrepo. Until this is properly fixed we disable the
238 # backup for them.
238 # backup for them.
239 if name not in ('phaseroots', 'bookmarks'):
239 if name not in ('phaseroots', 'bookmarks'):
240 self.addbackup(name)
240 self.addbackup(name)
241 files.append(vfs(name, 'w', atomictemp=True))
241 files.append(vfs(name, 'w', atomictemp=True))
242 genfunc(*files)
242 genfunc(*files)
243 finally:
243 finally:
244 for f in files:
244 for f in files:
245 f.close()
245 f.close()
246
246
247 @active
247 @active
248 def find(self, file):
248 def find(self, file):
249 if file in self.map:
249 if file in self.map:
250 return self.entries[self.map[file]]
250 return self.entries[self.map[file]]
251 if file in self._backupmap:
251 if file in self._backupmap:
252 return self._backupentries[self._backupmap[file]]
252 return self._backupentries[self._backupmap[file]]
253 return None
253 return None
254
254
255 @active
255 @active
256 def replace(self, file, offset, data=None):
256 def replace(self, file, offset, data=None):
257 '''
257 '''
258 replace can only replace already committed entries
258 replace can only replace already committed entries
259 that are not pending in the queue
259 that are not pending in the queue
260 '''
260 '''
261
261
262 if file not in self.map:
262 if file not in self.map:
263 raise KeyError(file)
263 raise KeyError(file)
264 index = self.map[file]
264 index = self.map[file]
265 self.entries[index] = (file, offset, data)
265 self.entries[index] = (file, offset, data)
266 self.file.write("%s\0%d\n" % (file, offset))
266 self.file.write("%s\0%d\n" % (file, offset))
267 self.file.flush()
267 self.file.flush()
268
268
269 @active
269 @active
270 def nest(self):
270 def nest(self):
271 self.count += 1
271 self.count += 1
272 self.usages += 1
272 self.usages += 1
273 return self
273 return self
274
274
275 def release(self):
275 def release(self):
276 if self.count > 0:
276 if self.count > 0:
277 self.usages -= 1
277 self.usages -= 1
278 # if the transaction scopes are left without being closed, fail
278 # if the transaction scopes are left without being closed, fail
279 if self.count > 0 and self.usages == 0:
279 if self.count > 0 and self.usages == 0:
280 self._abort()
280 self._abort()
281
281
282 def running(self):
282 def running(self):
283 return self.count > 0
283 return self.count > 0
284
284
285 def addpending(self, category, callback):
285 def addpending(self, category, callback):
286 """add a callback to be called when the transaction is pending
286 """add a callback to be called when the transaction is pending
287
287
288 The transaction will be given as callback's first argument.
288 The transaction will be given as callback's first argument.
289
289
290 Category is a unique identifier to allow overwriting an old callback
290 Category is a unique identifier to allow overwriting an old callback
291 with a newer callback.
291 with a newer callback.
292 """
292 """
293 self._pendingcallback[category] = callback
293 self._pendingcallback[category] = callback
294
294
295 @active
295 @active
296 def writepending(self):
296 def writepending(self):
297 '''write pending file to temporary version
297 '''write pending file to temporary version
298
298
299 This is used to allow hooks to view a transaction before commit'''
299 This is used to allow hooks to view a transaction before commit'''
300 categories = sorted(self._pendingcallback)
300 categories = sorted(self._pendingcallback)
301 for cat in categories:
301 for cat in categories:
302 # remove callback since the data will have been flushed
302 # remove callback since the data will have been flushed
303 any = self._pendingcallback.pop(cat)(self)
303 any = self._pendingcallback.pop(cat)(self)
304 self._anypending = self._anypending or any
304 self._anypending = self._anypending or any
305 return self._anypending
305 return self._anypending
306
306
307 @active
307 @active
308 def addfinalize(self, category, callback):
308 def addfinalize(self, category, callback):
309 """add a callback to be called when the transaction is closed
309 """add a callback to be called when the transaction is closed
310
310
311 The transaction will be given as callback's first argument.
311 The transaction will be given as callback's first argument.
312
312
313 Category is a unique identifier to allow overwriting old callbacks with
313 Category is a unique identifier to allow overwriting old callbacks with
314 newer callbacks.
314 newer callbacks.
315 """
315 """
316 self._finalizecallback[category] = callback
316 self._finalizecallback[category] = callback
317
317
318 @active
318 @active
319 def addpostclose(self, category, callback):
319 def addpostclose(self, category, callback):
320 """add a callback to be called after the transaction is closed
320 """add a callback to be called after the transaction is closed
321
321
322 The transaction will be given as callback's first argument.
322 The transaction will be given as callback's first argument.
323
323
324 Category is a unique identifier to allow overwriting an old callback
324 Category is a unique identifier to allow overwriting an old callback
325 with a newer callback.
325 with a newer callback.
326 """
326 """
327 self._postclosecallback[category] = callback
327 self._postclosecallback[category] = callback
328
328
329 @active
329 @active
330 def close(self):
330 def close(self):
331 '''commit the transaction'''
331 '''commit the transaction'''
332 if self.count == 1 and self.onclose is not None:
332 if self.count == 1:
333 self._generatefiles()
333 self._generatefiles()
334 categories = sorted(self._finalizecallback)
334 categories = sorted(self._finalizecallback)
335 for cat in categories:
335 for cat in categories:
336 self._finalizecallback[cat](self)
336 self._finalizecallback[cat](self)
337 self.onclose()
337 if self.onclose is not None:
338 self.onclose()
338
339
339 self.count -= 1
340 self.count -= 1
340 if self.count != 0:
341 if self.count != 0:
341 return
342 return
342 self.file.close()
343 self.file.close()
343 self._backupsfile.close()
344 self._backupsfile.close()
344 self.entries = []
345 self.entries = []
345 if self.after:
346 if self.after:
346 self.after()
347 self.after()
347 if self.opener.isfile(self.journal):
348 if self.opener.isfile(self.journal):
348 self.opener.unlink(self.journal)
349 self.opener.unlink(self.journal)
349 if self.opener.isfile(self._backupjournal):
350 if self.opener.isfile(self._backupjournal):
350 self.opener.unlink(self._backupjournal)
351 self.opener.unlink(self._backupjournal)
351 for _f, b in self._backupentries:
352 for _f, b in self._backupentries:
352 if b:
353 if b:
353 self.opener.unlink(b)
354 self.opener.unlink(b)
354 self._backupentries = []
355 self._backupentries = []
355 self.journal = None
356 self.journal = None
356 # run post close action
357 # run post close action
357 categories = sorted(self._postclosecallback)
358 categories = sorted(self._postclosecallback)
358 for cat in categories:
359 for cat in categories:
359 self._postclosecallback[cat](self)
360 self._postclosecallback[cat](self)
360
361
361 @active
362 @active
362 def abort(self):
363 def abort(self):
363 '''abort the transaction (generally called on error, or when the
364 '''abort the transaction (generally called on error, or when the
364 transaction is not explicitly committed before going out of
365 transaction is not explicitly committed before going out of
365 scope)'''
366 scope)'''
366 self._abort()
367 self._abort()
367
368
368 def _abort(self):
369 def _abort(self):
369 self.count = 0
370 self.count = 0
370 self.usages = 0
371 self.usages = 0
371 self.file.close()
372 self.file.close()
372 self._backupsfile.close()
373 self._backupsfile.close()
373
374
374 if self.onabort is not None:
375 if self.onabort is not None:
375 self.onabort()
376 self.onabort()
376
377
377 try:
378 try:
378 if not self.entries and not self._backupentries:
379 if not self.entries and not self._backupentries:
379 if self.journal:
380 if self.journal:
380 self.opener.unlink(self.journal)
381 self.opener.unlink(self.journal)
381 if self._backupjournal:
382 if self._backupjournal:
382 self.opener.unlink(self._backupjournal)
383 self.opener.unlink(self._backupjournal)
383 return
384 return
384
385
385 self.report(_("transaction abort!\n"))
386 self.report(_("transaction abort!\n"))
386
387
387 try:
388 try:
388 _playback(self.journal, self.report, self.opener,
389 _playback(self.journal, self.report, self.opener,
389 self.entries, self._backupentries, False)
390 self.entries, self._backupentries, False)
390 self.report(_("rollback completed\n"))
391 self.report(_("rollback completed\n"))
391 except Exception:
392 except Exception:
392 self.report(_("rollback failed - please run hg recover\n"))
393 self.report(_("rollback failed - please run hg recover\n"))
393 finally:
394 finally:
394 self.journal = None
395 self.journal = None
395
396
396
397
397 def rollback(opener, file, report):
398 def rollback(opener, file, report):
398 """Rolls back the transaction contained in the given file
399 """Rolls back the transaction contained in the given file
399
400
400 Reads the entries in the specified file, and the corresponding
401 Reads the entries in the specified file, and the corresponding
401 '*.backupfiles' file, to recover from an incomplete transaction.
402 '*.backupfiles' file, to recover from an incomplete transaction.
402
403
403 * `file`: a file containing a list of entries, specifying where
404 * `file`: a file containing a list of entries, specifying where
404 to truncate each file. The file should contain a list of
405 to truncate each file. The file should contain a list of
405 file\0offset pairs, delimited by newlines. The corresponding
406 file\0offset pairs, delimited by newlines. The corresponding
406 '*.backupfiles' file should contain a list of file\0backupfile
407 '*.backupfiles' file should contain a list of file\0backupfile
407 pairs, delimited by \0.
408 pairs, delimited by \0.
408 """
409 """
409 entries = []
410 entries = []
410 backupentries = []
411 backupentries = []
411
412
412 fp = opener.open(file)
413 fp = opener.open(file)
413 lines = fp.readlines()
414 lines = fp.readlines()
414 fp.close()
415 fp.close()
415 for l in lines:
416 for l in lines:
416 try:
417 try:
417 f, o = l.split('\0')
418 f, o = l.split('\0')
418 entries.append((f, int(o), None))
419 entries.append((f, int(o), None))
419 except ValueError:
420 except ValueError:
420 report(_("couldn't read journal entry %r!\n") % l)
421 report(_("couldn't read journal entry %r!\n") % l)
421
422
422 backupjournal = "%s.backupfiles" % file
423 backupjournal = "%s.backupfiles" % file
423 if opener.exists(backupjournal):
424 if opener.exists(backupjournal):
424 fp = opener.open(backupjournal)
425 fp = opener.open(backupjournal)
425 lines = fp.readlines()
426 lines = fp.readlines()
426 if lines:
427 if lines:
427 ver = lines[0][:-1]
428 ver = lines[0][:-1]
428 if ver == str(version):
429 if ver == str(version):
429 for line in lines[1:]:
430 for line in lines[1:]:
430 if line:
431 if line:
431 # Shave off the trailing newline
432 # Shave off the trailing newline
432 line = line[:-1]
433 line = line[:-1]
433 f, b = line.split('\0')
434 f, b = line.split('\0')
434 backupentries.append((f, b))
435 backupentries.append((f, b))
435 else:
436 else:
436 report(_("journal was created by a newer version of "
437 report(_("journal was created by a newer version of "
437 "Mercurial"))
438 "Mercurial"))
438
439
439 _playback(file, report, opener, entries, backupentries)
440 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now