##// END OF EJS Templates
transaction: support cache file in backupentries...
Pierre-Yves David -
r23312:006e9ef0 default
parent child Browse files
Show More
@@ -1,468 +1,500 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 0
18 version = 0
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 unlink=True):
29 unlink=True):
30 for f, o, _ignore in entries:
30 for f, o, _ignore in entries:
31 if o or not unlink:
31 if o or not unlink:
32 try:
32 try:
33 fp = opener(f, 'a')
33 fp = opener(f, 'a')
34 fp.truncate(o)
34 fp.truncate(o)
35 fp.close()
35 fp.close()
36 except IOError:
36 except IOError:
37 report(_("failed to truncate %s\n") % f)
37 report(_("failed to truncate %s\n") % f)
38 raise
38 raise
39 else:
39 else:
40 try:
40 try:
41 opener.unlink(f)
41 opener.unlink(f)
42 except (IOError, OSError), inst:
42 except (IOError, OSError), inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45
45
46 backupfiles = []
46 backupfiles = []
47 for l, f, b, c in backupentries:
47 for l, f, b, c in backupentries:
48 if l not in vfsmap and c:
49 report("couldn't handle %s: unknown cache location %s\n"
50 % (b, l))
48 vfs = vfsmap[l]
51 vfs = vfsmap[l]
49 if f and b:
52 try:
50 filepath = vfs.join(f)
53 if f and b:
51 backuppath = vfs.join(b)
54 filepath = vfs.join(f)
52 try:
55 backuppath = vfs.join(b)
53 util.copyfile(backuppath, filepath)
56 try:
54 backupfiles.append(b)
57 util.copyfile(backuppath, filepath)
55 except IOError:
58 backupfiles.append(b)
56 report(_("failed to recover %s\n") % f)
59 except IOError:
60 report(_("failed to recover %s\n") % f)
61 else:
62 target = f or b
63 try:
64 vfs.unlink(target)
65 except (IOError, OSError), inst:
66 if inst.errno != errno.ENOENT:
67 raise
68 except (IOError, OSError, util.Abort), inst:
69 if not c:
57 raise
70 raise
58 else:
59 target = f or b
60 try:
61 vfs.unlink(target)
62 except (IOError, OSError), inst:
63 if inst.errno != errno.ENOENT:
64 raise
65
71
66 opener.unlink(journal)
72 opener.unlink(journal)
67 backuppath = "%s.backupfiles" % journal
73 backuppath = "%s.backupfiles" % journal
68 if opener.exists(backuppath):
74 if opener.exists(backuppath):
69 opener.unlink(backuppath)
75 opener.unlink(backuppath)
70 for f in backupfiles:
76 try:
71 if opener.exists(f):
77 for f in backupfiles:
72 opener.unlink(f)
78 if opener.exists(f):
79 opener.unlink(f)
80 except (IOError, OSError, util.Abort), inst:
81 # only pure backup file remains, it is sage to ignore any error
82 pass
73
83
74 class transaction(object):
84 class transaction(object):
75 def __init__(self, report, opener, vfsmap, journal, after=None,
85 def __init__(self, report, opener, vfsmap, journal, after=None,
76 createmode=None, onclose=None, onabort=None):
86 createmode=None, onclose=None, onabort=None):
77 """Begin a new transaction
87 """Begin a new transaction
78
88
79 Begins a new transaction that allows rolling back writes in the event of
89 Begins a new transaction that allows rolling back writes in the event of
80 an exception.
90 an exception.
81
91
82 * `after`: called after the transaction has been committed
92 * `after`: called after the transaction has been committed
83 * `createmode`: the mode of the journal file that will be created
93 * `createmode`: the mode of the journal file that will be created
84 * `onclose`: called as the transaction is closing, but before it is
94 * `onclose`: called as the transaction is closing, but before it is
85 closed
95 closed
86 * `onabort`: called as the transaction is aborting, but before any files
96 * `onabort`: called as the transaction is aborting, but before any files
87 have been truncated
97 have been truncated
88 """
98 """
89 self.count = 1
99 self.count = 1
90 self.usages = 1
100 self.usages = 1
91 self.report = report
101 self.report = report
92 # a vfs to the store content
102 # a vfs to the store content
93 self.opener = opener
103 self.opener = opener
94 # a map to access file in various {location -> vfs}
104 # a map to access file in various {location -> vfs}
95 vfsmap = vfsmap.copy()
105 vfsmap = vfsmap.copy()
96 vfsmap[''] = opener # set default value
106 vfsmap[''] = opener # set default value
97 self._vfsmap = vfsmap
107 self._vfsmap = vfsmap
98 self.after = after
108 self.after = after
99 self.onclose = onclose
109 self.onclose = onclose
100 self.onabort = onabort
110 self.onabort = onabort
101 self.entries = []
111 self.entries = []
102 self.map = {}
112 self.map = {}
103 self.journal = journal
113 self.journal = journal
104 self._queue = []
114 self._queue = []
105 # a dict of arguments to be passed to hooks
115 # a dict of arguments to be passed to hooks
106 self.hookargs = {}
116 self.hookargs = {}
107 self.file = opener.open(self.journal, "w")
117 self.file = opener.open(self.journal, "w")
108
118
109 # a list of ('location', 'path', 'backuppath', cache) entries.
119 # a list of ('location', 'path', 'backuppath', cache) entries.
110 # - if 'backuppath' is empty, no file existed at backup time
120 # - if 'backuppath' is empty, no file existed at backup time
111 # - if 'path' is empty, this is a temporary transaction file
121 # - if 'path' is empty, this is a temporary transaction file
112 # - if 'location' is not empty, the path is outside main opener reach.
122 # - if 'location' is not empty, the path is outside main opener reach.
113 # use 'location' value as a key in a vfsmap to find the right 'vfs'
123 # use 'location' value as a key in a vfsmap to find the right 'vfs'
114 # (cache is currently unused)
124 # (cache is currently unused)
115 self._backupentries = []
125 self._backupentries = []
116 self._backupmap = {}
126 self._backupmap = {}
117 self._backupjournal = "%s.backupfiles" % journal
127 self._backupjournal = "%s.backupfiles" % journal
118 self._backupsfile = opener.open(self._backupjournal, 'w')
128 self._backupsfile = opener.open(self._backupjournal, 'w')
119 self._backupsfile.write('%d\n' % version)
129 self._backupsfile.write('%d\n' % version)
120
130
121 if createmode is not None:
131 if createmode is not None:
122 opener.chmod(self.journal, createmode & 0666)
132 opener.chmod(self.journal, createmode & 0666)
123 opener.chmod(self._backupjournal, createmode & 0666)
133 opener.chmod(self._backupjournal, createmode & 0666)
124
134
125 # hold file generations to be performed on commit
135 # hold file generations to be performed on commit
126 self._filegenerators = {}
136 self._filegenerators = {}
127 # hold callbalk to write pending data for hooks
137 # hold callbalk to write pending data for hooks
128 self._pendingcallback = {}
138 self._pendingcallback = {}
129 # True is any pending data have been written ever
139 # True is any pending data have been written ever
130 self._anypending = False
140 self._anypending = False
131 # holds callback to call when writing the transaction
141 # holds callback to call when writing the transaction
132 self._finalizecallback = {}
142 self._finalizecallback = {}
133 # hold callbalk for post transaction close
143 # hold callbalk for post transaction close
134 self._postclosecallback = {}
144 self._postclosecallback = {}
135
145
136 def __del__(self):
146 def __del__(self):
137 if self.journal:
147 if self.journal:
138 self._abort()
148 self._abort()
139
149
140 @active
150 @active
141 def startgroup(self):
151 def startgroup(self):
142 """delay registration of file entry
152 """delay registration of file entry
143
153
144 This is used by strip to delay vision of strip offset. The transaction
154 This is used by strip to delay vision of strip offset. The transaction
145 sees either none or all of the strip actions to be done."""
155 sees either none or all of the strip actions to be done."""
146 self._queue.append([])
156 self._queue.append([])
147
157
148 @active
158 @active
149 def endgroup(self):
159 def endgroup(self):
150 """apply delayed registration of file entry.
160 """apply delayed registration of file entry.
151
161
152 This is used by strip to delay vision of strip offset. The transaction
162 This is used by strip to delay vision of strip offset. The transaction
153 sees either none or all of the strip actions to be done."""
163 sees either none or all of the strip actions to be done."""
154 q = self._queue.pop()
164 q = self._queue.pop()
155 for f, o, data in q:
165 for f, o, data in q:
156 self._addentry(f, o, data)
166 self._addentry(f, o, data)
157
167
158 @active
168 @active
159 def add(self, file, offset, data=None):
169 def add(self, file, offset, data=None):
160 """record the state of an append-only file before update"""
170 """record the state of an append-only file before update"""
161 if file in self.map or file in self._backupmap:
171 if file in self.map or file in self._backupmap:
162 return
172 return
163 if self._queue:
173 if self._queue:
164 self._queue[-1].append((file, offset, data))
174 self._queue[-1].append((file, offset, data))
165 return
175 return
166
176
167 self._addentry(file, offset, data)
177 self._addentry(file, offset, data)
168
178
169 def _addentry(self, file, offset, data):
179 def _addentry(self, file, offset, data):
170 """add a append-only entry to memory and on-disk state"""
180 """add a append-only entry to memory and on-disk state"""
171 if file in self.map or file in self._backupmap:
181 if file in self.map or file in self._backupmap:
172 return
182 return
173 self.entries.append((file, offset, data))
183 self.entries.append((file, offset, data))
174 self.map[file] = len(self.entries) - 1
184 self.map[file] = len(self.entries) - 1
175 # add enough data to the journal to do the truncate
185 # add enough data to the journal to do the truncate
176 self.file.write("%s\0%d\n" % (file, offset))
186 self.file.write("%s\0%d\n" % (file, offset))
177 self.file.flush()
187 self.file.flush()
178
188
179 @active
189 @active
180 def addbackup(self, file, hardlink=True, vfs=None):
190 def addbackup(self, file, hardlink=True, vfs=None):
181 """Adds a backup of the file to the transaction
191 """Adds a backup of the file to the transaction
182
192
183 Calling addbackup() creates a hardlink backup of the specified file
193 Calling addbackup() creates a hardlink backup of the specified file
184 that is used to recover the file in the event of the transaction
194 that is used to recover the file in the event of the transaction
185 aborting.
195 aborting.
186
196
187 * `file`: the file path, relative to .hg/store
197 * `file`: the file path, relative to .hg/store
188 * `hardlink`: use a hardlink to quickly create the backup
198 * `hardlink`: use a hardlink to quickly create the backup
189 """
199 """
190 if self._queue:
200 if self._queue:
191 msg = 'cannot use transaction.addbackup inside "group"'
201 msg = 'cannot use transaction.addbackup inside "group"'
192 raise RuntimeError(msg)
202 raise RuntimeError(msg)
193
203
194 if file in self.map or file in self._backupmap:
204 if file in self.map or file in self._backupmap:
195 return
205 return
196 backupfile = "%s.backup.%s" % (self.journal, file)
206 backupfile = "%s.backup.%s" % (self.journal, file)
197 if vfs is None:
207 if vfs is None:
198 vfs = self.opener
208 vfs = self.opener
199 if vfs.exists(file):
209 if vfs.exists(file):
200 filepath = vfs.join(file)
210 filepath = vfs.join(file)
201 backuppath = self.opener.join(backupfile)
211 backuppath = self.opener.join(backupfile)
202 util.copyfiles(filepath, backuppath, hardlink=hardlink)
212 util.copyfiles(filepath, backuppath, hardlink=hardlink)
203 else:
213 else:
204 backupfile = ''
214 backupfile = ''
205
215
206 self._addbackupentry(('', file, backupfile, False))
216 self._addbackupentry(('', file, backupfile, False))
207
217
208 def _addbackupentry(self, entry):
218 def _addbackupentry(self, entry):
209 """register a new backup entry and write it to disk"""
219 """register a new backup entry and write it to disk"""
210 self._backupentries.append(entry)
220 self._backupentries.append(entry)
211 self._backupmap[file] = len(self._backupentries) - 1
221 self._backupmap[file] = len(self._backupentries) - 1
212 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
222 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
213 self._backupsfile.flush()
223 self._backupsfile.flush()
214
224
215 @active
225 @active
216 def registertmp(self, tmpfile):
226 def registertmp(self, tmpfile):
217 """register a temporary transaction file
227 """register a temporary transaction file
218
228
219 Such file will be delete when the transaction exit (on both failure and
229 Such file will be delete when the transaction exit (on both failure and
220 success).
230 success).
221 """
231 """
222 self._addbackupentry(('', '', tmpfile, False))
232 self._addbackupentry(('', '', tmpfile, False))
223
233
224 @active
234 @active
225 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
235 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
226 """add a function to generates some files at transaction commit
236 """add a function to generates some files at transaction commit
227
237
228 The `genfunc` argument is a function capable of generating proper
238 The `genfunc` argument is a function capable of generating proper
229 content of each entry in the `filename` tuple.
239 content of each entry in the `filename` tuple.
230
240
231 At transaction close time, `genfunc` will be called with one file
241 At transaction close time, `genfunc` will be called with one file
232 object argument per entries in `filenames`.
242 object argument per entries in `filenames`.
233
243
234 The transaction itself is responsible for the backup, creation and
244 The transaction itself is responsible for the backup, creation and
235 final write of such file.
245 final write of such file.
236
246
237 The `genid` argument is used to ensure the same set of file is only
247 The `genid` argument is used to ensure the same set of file is only
238 generated once. Call to `addfilegenerator` for a `genid` already
248 generated once. Call to `addfilegenerator` for a `genid` already
239 present will overwrite the old entry.
249 present will overwrite the old entry.
240
250
241 The `order` argument may be used to control the order in which multiple
251 The `order` argument may be used to control the order in which multiple
242 generator will be executed.
252 generator will be executed.
243 """
253 """
244 # For now, we are unable to do proper backup and restore of custom vfs
254 # For now, we are unable to do proper backup and restore of custom vfs
245 # but for bookmarks that are handled outside this mechanism.
255 # but for bookmarks that are handled outside this mechanism.
246 assert vfs is None or filenames == ('bookmarks',)
256 assert vfs is None or filenames == ('bookmarks',)
247 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
257 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
248
258
249 def _generatefiles(self):
259 def _generatefiles(self):
250 # write files registered for generation
260 # write files registered for generation
251 for entry in sorted(self._filegenerators.values()):
261 for entry in sorted(self._filegenerators.values()):
252 order, filenames, genfunc, vfs = entry
262 order, filenames, genfunc, vfs = entry
253 if vfs is None:
263 if vfs is None:
254 vfs = self.opener
264 vfs = self.opener
255 files = []
265 files = []
256 try:
266 try:
257 for name in filenames:
267 for name in filenames:
258 # Some files are already backed up when creating the
268 # Some files are already backed up when creating the
259 # localrepo. Until this is properly fixed we disable the
269 # localrepo. Until this is properly fixed we disable the
260 # backup for them.
270 # backup for them.
261 if name not in ('phaseroots', 'bookmarks'):
271 if name not in ('phaseroots', 'bookmarks'):
262 self.addbackup(name)
272 self.addbackup(name)
263 files.append(vfs(name, 'w', atomictemp=True))
273 files.append(vfs(name, 'w', atomictemp=True))
264 genfunc(*files)
274 genfunc(*files)
265 finally:
275 finally:
266 for f in files:
276 for f in files:
267 f.close()
277 f.close()
268
278
269 @active
279 @active
270 def find(self, file):
280 def find(self, file):
271 if file in self.map:
281 if file in self.map:
272 return self.entries[self.map[file]]
282 return self.entries[self.map[file]]
273 if file in self._backupmap:
283 if file in self._backupmap:
274 return self._backupentries[self._backupmap[file]]
284 return self._backupentries[self._backupmap[file]]
275 return None
285 return None
276
286
277 @active
287 @active
278 def replace(self, file, offset, data=None):
288 def replace(self, file, offset, data=None):
279 '''
289 '''
280 replace can only replace already committed entries
290 replace can only replace already committed entries
281 that are not pending in the queue
291 that are not pending in the queue
282 '''
292 '''
283
293
284 if file not in self.map:
294 if file not in self.map:
285 raise KeyError(file)
295 raise KeyError(file)
286 index = self.map[file]
296 index = self.map[file]
287 self.entries[index] = (file, offset, data)
297 self.entries[index] = (file, offset, data)
288 self.file.write("%s\0%d\n" % (file, offset))
298 self.file.write("%s\0%d\n" % (file, offset))
289 self.file.flush()
299 self.file.flush()
290
300
291 @active
301 @active
292 def nest(self):
302 def nest(self):
293 self.count += 1
303 self.count += 1
294 self.usages += 1
304 self.usages += 1
295 return self
305 return self
296
306
297 def release(self):
307 def release(self):
298 if self.count > 0:
308 if self.count > 0:
299 self.usages -= 1
309 self.usages -= 1
300 # if the transaction scopes are left without being closed, fail
310 # if the transaction scopes are left without being closed, fail
301 if self.count > 0 and self.usages == 0:
311 if self.count > 0 and self.usages == 0:
302 self._abort()
312 self._abort()
303
313
304 def running(self):
314 def running(self):
305 return self.count > 0
315 return self.count > 0
306
316
307 def addpending(self, category, callback):
317 def addpending(self, category, callback):
308 """add a callback to be called when the transaction is pending
318 """add a callback to be called when the transaction is pending
309
319
310 The transaction will be given as callback's first argument.
320 The transaction will be given as callback's first argument.
311
321
312 Category is a unique identifier to allow overwriting an old callback
322 Category is a unique identifier to allow overwriting an old callback
313 with a newer callback.
323 with a newer callback.
314 """
324 """
315 self._pendingcallback[category] = callback
325 self._pendingcallback[category] = callback
316
326
317 @active
327 @active
318 def writepending(self):
328 def writepending(self):
319 '''write pending file to temporary version
329 '''write pending file to temporary version
320
330
321 This is used to allow hooks to view a transaction before commit'''
331 This is used to allow hooks to view a transaction before commit'''
322 categories = sorted(self._pendingcallback)
332 categories = sorted(self._pendingcallback)
323 for cat in categories:
333 for cat in categories:
324 # remove callback since the data will have been flushed
334 # remove callback since the data will have been flushed
325 any = self._pendingcallback.pop(cat)(self)
335 any = self._pendingcallback.pop(cat)(self)
326 self._anypending = self._anypending or any
336 self._anypending = self._anypending or any
327 return self._anypending
337 return self._anypending
328
338
329 @active
339 @active
330 def addfinalize(self, category, callback):
340 def addfinalize(self, category, callback):
331 """add a callback to be called when the transaction is closed
341 """add a callback to be called when the transaction is closed
332
342
333 The transaction will be given as callback's first argument.
343 The transaction will be given as callback's first argument.
334
344
335 Category is a unique identifier to allow overwriting old callbacks with
345 Category is a unique identifier to allow overwriting old callbacks with
336 newer callbacks.
346 newer callbacks.
337 """
347 """
338 self._finalizecallback[category] = callback
348 self._finalizecallback[category] = callback
339
349
340 @active
350 @active
341 def addpostclose(self, category, callback):
351 def addpostclose(self, category, callback):
342 """add a callback to be called after the transaction is closed
352 """add a callback to be called after the transaction is closed
343
353
344 The transaction will be given as callback's first argument.
354 The transaction will be given as callback's first argument.
345
355
346 Category is a unique identifier to allow overwriting an old callback
356 Category is a unique identifier to allow overwriting an old callback
347 with a newer callback.
357 with a newer callback.
348 """
358 """
349 self._postclosecallback[category] = callback
359 self._postclosecallback[category] = callback
350
360
351 @active
361 @active
352 def close(self):
362 def close(self):
353 '''commit the transaction'''
363 '''commit the transaction'''
354 if self.count == 1:
364 if self.count == 1:
355 self._generatefiles()
365 self._generatefiles()
356 categories = sorted(self._finalizecallback)
366 categories = sorted(self._finalizecallback)
357 for cat in categories:
367 for cat in categories:
358 self._finalizecallback[cat](self)
368 self._finalizecallback[cat](self)
359 if self.onclose is not None:
369 if self.onclose is not None:
360 self.onclose()
370 self.onclose()
361
371
362 self.count -= 1
372 self.count -= 1
363 if self.count != 0:
373 if self.count != 0:
364 return
374 return
365 self.file.close()
375 self.file.close()
366 self._backupsfile.close()
376 self._backupsfile.close()
367 # cleanup temporary files
377 # cleanup temporary files
368 for l, f, b, _c in self._backupentries:
378 for l, f, b, c in self._backupentries:
379 if l not in self._vfsmap and c:
380 self.report("couldn't remote %s: unknown cache location %s\n"
381 % (b, l))
382 continue
369 vfs = self._vfsmap[l]
383 vfs = self._vfsmap[l]
370 if not f and b and vfs.exists(b):
384 if not f and b and vfs.exists(b):
371 vfs.unlink(b)
385 try:
386 vfs.unlink(b)
387 except (IOError, OSError, util.Abort), inst:
388 if not c:
389 raise
390 # Abort may be raise by read only opener
391 self.report("couldn't remote %s: %s\n"
392 % (vfs.join(b), inst))
372 self.entries = []
393 self.entries = []
373 if self.after:
394 if self.after:
374 self.after()
395 self.after()
375 if self.opener.isfile(self.journal):
396 if self.opener.isfile(self.journal):
376 self.opener.unlink(self.journal)
397 self.opener.unlink(self.journal)
377 if self.opener.isfile(self._backupjournal):
398 if self.opener.isfile(self._backupjournal):
378 self.opener.unlink(self._backupjournal)
399 self.opener.unlink(self._backupjournal)
379 for _l, _f, b, _c in self._backupentries:
400 for _l, _f, b, c in self._backupentries:
401 if l not in self._vfsmap and c:
402 self.report("couldn't remote %s: unknown cache location"
403 "%s\n" % (b, l))
404 continue
380 vfs = self._vfsmap[l]
405 vfs = self._vfsmap[l]
381 if b and vfs.exists(b):
406 if b and vfs.exists(b):
382 vfs.unlink(b)
407 try:
408 vfs.unlink(b)
409 except (IOError, OSError, util.Abort), inst:
410 if not c:
411 raise
412 # Abort may be raise by read only opener
413 self.report("couldn't remote %s: %s\n"
414 % (vfs.join(b), inst))
383 self._backupentries = []
415 self._backupentries = []
384 self.journal = None
416 self.journal = None
385 # run post close action
417 # run post close action
386 categories = sorted(self._postclosecallback)
418 categories = sorted(self._postclosecallback)
387 for cat in categories:
419 for cat in categories:
388 self._postclosecallback[cat](self)
420 self._postclosecallback[cat](self)
389
421
390 @active
422 @active
391 def abort(self):
423 def abort(self):
392 '''abort the transaction (generally called on error, or when the
424 '''abort the transaction (generally called on error, or when the
393 transaction is not explicitly committed before going out of
425 transaction is not explicitly committed before going out of
394 scope)'''
426 scope)'''
395 self._abort()
427 self._abort()
396
428
397 def _abort(self):
429 def _abort(self):
398 self.count = 0
430 self.count = 0
399 self.usages = 0
431 self.usages = 0
400 self.file.close()
432 self.file.close()
401 self._backupsfile.close()
433 self._backupsfile.close()
402
434
403 if self.onabort is not None:
435 if self.onabort is not None:
404 self.onabort()
436 self.onabort()
405
437
406 try:
438 try:
407 if not self.entries and not self._backupentries:
439 if not self.entries and not self._backupentries:
408 if self.journal:
440 if self.journal:
409 self.opener.unlink(self.journal)
441 self.opener.unlink(self.journal)
410 if self._backupjournal:
442 if self._backupjournal:
411 self.opener.unlink(self._backupjournal)
443 self.opener.unlink(self._backupjournal)
412 return
444 return
413
445
414 self.report(_("transaction abort!\n"))
446 self.report(_("transaction abort!\n"))
415
447
416 try:
448 try:
417 _playback(self.journal, self.report, self.opener, self._vfsmap,
449 _playback(self.journal, self.report, self.opener, self._vfsmap,
418 self.entries, self._backupentries, False)
450 self.entries, self._backupentries, False)
419 self.report(_("rollback completed\n"))
451 self.report(_("rollback completed\n"))
420 except Exception:
452 except Exception:
421 self.report(_("rollback failed - please run hg recover\n"))
453 self.report(_("rollback failed - please run hg recover\n"))
422 finally:
454 finally:
423 self.journal = None
455 self.journal = None
424
456
425
457
426 def rollback(opener, vfsmap, file, report):
458 def rollback(opener, vfsmap, file, report):
427 """Rolls back the transaction contained in the given file
459 """Rolls back the transaction contained in the given file
428
460
429 Reads the entries in the specified file, and the corresponding
461 Reads the entries in the specified file, and the corresponding
430 '*.backupfiles' file, to recover from an incomplete transaction.
462 '*.backupfiles' file, to recover from an incomplete transaction.
431
463
432 * `file`: a file containing a list of entries, specifying where
464 * `file`: a file containing a list of entries, specifying where
433 to truncate each file. The file should contain a list of
465 to truncate each file. The file should contain a list of
434 file\0offset pairs, delimited by newlines. The corresponding
466 file\0offset pairs, delimited by newlines. The corresponding
435 '*.backupfiles' file should contain a list of file\0backupfile
467 '*.backupfiles' file should contain a list of file\0backupfile
436 pairs, delimited by \0.
468 pairs, delimited by \0.
437 """
469 """
438 entries = []
470 entries = []
439 backupentries = []
471 backupentries = []
440
472
441 fp = opener.open(file)
473 fp = opener.open(file)
442 lines = fp.readlines()
474 lines = fp.readlines()
443 fp.close()
475 fp.close()
444 for l in lines:
476 for l in lines:
445 try:
477 try:
446 f, o = l.split('\0')
478 f, o = l.split('\0')
447 entries.append((f, int(o), None))
479 entries.append((f, int(o), None))
448 except ValueError:
480 except ValueError:
449 report(_("couldn't read journal entry %r!\n") % l)
481 report(_("couldn't read journal entry %r!\n") % l)
450
482
451 backupjournal = "%s.backupfiles" % file
483 backupjournal = "%s.backupfiles" % file
452 if opener.exists(backupjournal):
484 if opener.exists(backupjournal):
453 fp = opener.open(backupjournal)
485 fp = opener.open(backupjournal)
454 lines = fp.readlines()
486 lines = fp.readlines()
455 if lines:
487 if lines:
456 ver = lines[0][:-1]
488 ver = lines[0][:-1]
457 if ver == str(version):
489 if ver == str(version):
458 for line in lines[1:]:
490 for line in lines[1:]:
459 if line:
491 if line:
460 # Shave off the trailing newline
492 # Shave off the trailing newline
461 line = line[:-1]
493 line = line[:-1]
462 l, f, b, c = line.split('\0')
494 l, f, b, c = line.split('\0')
463 backupentries.append((l, f, b, bool(c)))
495 backupentries.append((l, f, b, bool(c)))
464 else:
496 else:
465 report(_("journal was created by a different version of "
497 report(_("journal was created by a different version of "
466 "Mercurial"))
498 "Mercurial"))
467
499
468 _playback(file, report, opener, vfsmap, entries, backupentries)
500 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now