##// END OF EJS Templates
transaction: clear callback instances after usage...
Gregory Szorc -
r28960:14e683d6 default
parent child Browse files
Show More
@@ -1,593 +1,599
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = set([
29 postfinalizegenerators = set([
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 ])
32 ])
33
33
34 class GenerationGroup(object):
34 class GenerationGroup(object):
35 ALL='all'
35 ALL='all'
36 PREFINALIZE='prefinalize'
36 PREFINALIZE='prefinalize'
37 POSTFINALIZE='postfinalize'
37 POSTFINALIZE='postfinalize'
38
38
39 def active(func):
39 def active(func):
40 def _active(self, *args, **kwds):
40 def _active(self, *args, **kwds):
41 if self.count == 0:
41 if self.count == 0:
42 raise error.Abort(_(
42 raise error.Abort(_(
43 'cannot use transaction when it is already committed/aborted'))
43 'cannot use transaction when it is already committed/aborted'))
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45 return _active
45 return _active
46
46
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 unlink=True):
48 unlink=True):
49 for f, o, _ignore in entries:
49 for f, o, _ignore in entries:
50 if o or not unlink:
50 if o or not unlink:
51 try:
51 try:
52 fp = opener(f, 'a')
52 fp = opener(f, 'a')
53 fp.truncate(o)
53 fp.truncate(o)
54 fp.close()
54 fp.close()
55 except IOError:
55 except IOError:
56 report(_("failed to truncate %s\n") % f)
56 report(_("failed to truncate %s\n") % f)
57 raise
57 raise
58 else:
58 else:
59 try:
59 try:
60 opener.unlink(f)
60 opener.unlink(f)
61 except (IOError, OSError) as inst:
61 except (IOError, OSError) as inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 backupfiles = []
65 backupfiles = []
66 for l, f, b, c in backupentries:
66 for l, f, b, c in backupentries:
67 if l not in vfsmap and c:
67 if l not in vfsmap and c:
68 report("couldn't handle %s: unknown cache location %s\n"
68 report("couldn't handle %s: unknown cache location %s\n"
69 % (b, l))
69 % (b, l))
70 vfs = vfsmap[l]
70 vfs = vfsmap[l]
71 try:
71 try:
72 if f and b:
72 if f and b:
73 filepath = vfs.join(f)
73 filepath = vfs.join(f)
74 backuppath = vfs.join(b)
74 backuppath = vfs.join(b)
75 try:
75 try:
76 util.copyfile(backuppath, filepath)
76 util.copyfile(backuppath, filepath)
77 backupfiles.append(b)
77 backupfiles.append(b)
78 except IOError:
78 except IOError:
79 report(_("failed to recover %s\n") % f)
79 report(_("failed to recover %s\n") % f)
80 else:
80 else:
81 target = f or b
81 target = f or b
82 try:
82 try:
83 vfs.unlink(target)
83 vfs.unlink(target)
84 except (IOError, OSError) as inst:
84 except (IOError, OSError) as inst:
85 if inst.errno != errno.ENOENT:
85 if inst.errno != errno.ENOENT:
86 raise
86 raise
87 except (IOError, OSError, error.Abort) as inst:
87 except (IOError, OSError, error.Abort) as inst:
88 if not c:
88 if not c:
89 raise
89 raise
90
90
91 backuppath = "%s.backupfiles" % journal
91 backuppath = "%s.backupfiles" % journal
92 if opener.exists(backuppath):
92 if opener.exists(backuppath):
93 opener.unlink(backuppath)
93 opener.unlink(backuppath)
94 opener.unlink(journal)
94 opener.unlink(journal)
95 try:
95 try:
96 for f in backupfiles:
96 for f in backupfiles:
97 if opener.exists(f):
97 if opener.exists(f):
98 opener.unlink(f)
98 opener.unlink(f)
99 except (IOError, OSError, error.Abort) as inst:
99 except (IOError, OSError, error.Abort) as inst:
100 # only pure backup file remains, it is sage to ignore any error
100 # only pure backup file remains, it is sage to ignore any error
101 pass
101 pass
102
102
103 class transaction(object):
103 class transaction(object):
104 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
104 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
105 after=None, createmode=None, validator=None, releasefn=None):
105 after=None, createmode=None, validator=None, releasefn=None):
106 """Begin a new transaction
106 """Begin a new transaction
107
107
108 Begins a new transaction that allows rolling back writes in the event of
108 Begins a new transaction that allows rolling back writes in the event of
109 an exception.
109 an exception.
110
110
111 * `after`: called after the transaction has been committed
111 * `after`: called after the transaction has been committed
112 * `createmode`: the mode of the journal file that will be created
112 * `createmode`: the mode of the journal file that will be created
113 * `releasefn`: called after releasing (with transaction and result)
113 * `releasefn`: called after releasing (with transaction and result)
114 """
114 """
115 self.count = 1
115 self.count = 1
116 self.usages = 1
116 self.usages = 1
117 self.report = report
117 self.report = report
118 # a vfs to the store content
118 # a vfs to the store content
119 self.opener = opener
119 self.opener = opener
120 # a map to access file in various {location -> vfs}
120 # a map to access file in various {location -> vfs}
121 vfsmap = vfsmap.copy()
121 vfsmap = vfsmap.copy()
122 vfsmap[''] = opener # set default value
122 vfsmap[''] = opener # set default value
123 self._vfsmap = vfsmap
123 self._vfsmap = vfsmap
124 self.after = after
124 self.after = after
125 self.entries = []
125 self.entries = []
126 self.map = {}
126 self.map = {}
127 self.journal = journalname
127 self.journal = journalname
128 self.undoname = undoname
128 self.undoname = undoname
129 self._queue = []
129 self._queue = []
130 # A callback to validate transaction content before closing it.
130 # A callback to validate transaction content before closing it.
131 # should raise exception is anything is wrong.
131 # should raise exception is anything is wrong.
132 # target user is repository hooks.
132 # target user is repository hooks.
133 if validator is None:
133 if validator is None:
134 validator = lambda tr: None
134 validator = lambda tr: None
135 self.validator = validator
135 self.validator = validator
136 # A callback to do something just after releasing transaction.
136 # A callback to do something just after releasing transaction.
137 if releasefn is None:
137 if releasefn is None:
138 releasefn = lambda tr, success: None
138 releasefn = lambda tr, success: None
139 self.releasefn = releasefn
139 self.releasefn = releasefn
140
140
141 # a dict of arguments to be passed to hooks
141 # a dict of arguments to be passed to hooks
142 self.hookargs = {}
142 self.hookargs = {}
143 self.file = opener.open(self.journal, "w")
143 self.file = opener.open(self.journal, "w")
144
144
145 # a list of ('location', 'path', 'backuppath', cache) entries.
145 # a list of ('location', 'path', 'backuppath', cache) entries.
146 # - if 'backuppath' is empty, no file existed at backup time
146 # - if 'backuppath' is empty, no file existed at backup time
147 # - if 'path' is empty, this is a temporary transaction file
147 # - if 'path' is empty, this is a temporary transaction file
148 # - if 'location' is not empty, the path is outside main opener reach.
148 # - if 'location' is not empty, the path is outside main opener reach.
149 # use 'location' value as a key in a vfsmap to find the right 'vfs'
149 # use 'location' value as a key in a vfsmap to find the right 'vfs'
150 # (cache is currently unused)
150 # (cache is currently unused)
151 self._backupentries = []
151 self._backupentries = []
152 self._backupmap = {}
152 self._backupmap = {}
153 self._backupjournal = "%s.backupfiles" % self.journal
153 self._backupjournal = "%s.backupfiles" % self.journal
154 self._backupsfile = opener.open(self._backupjournal, 'w')
154 self._backupsfile = opener.open(self._backupjournal, 'w')
155 self._backupsfile.write('%d\n' % version)
155 self._backupsfile.write('%d\n' % version)
156
156
157 if createmode is not None:
157 if createmode is not None:
158 opener.chmod(self.journal, createmode & 0o666)
158 opener.chmod(self.journal, createmode & 0o666)
159 opener.chmod(self._backupjournal, createmode & 0o666)
159 opener.chmod(self._backupjournal, createmode & 0o666)
160
160
161 # hold file generations to be performed on commit
161 # hold file generations to be performed on commit
162 self._filegenerators = {}
162 self._filegenerators = {}
163 # hold callback to write pending data for hooks
163 # hold callback to write pending data for hooks
164 self._pendingcallback = {}
164 self._pendingcallback = {}
165 # True is any pending data have been written ever
165 # True is any pending data have been written ever
166 self._anypending = False
166 self._anypending = False
167 # holds callback to call when writing the transaction
167 # holds callback to call when writing the transaction
168 self._finalizecallback = {}
168 self._finalizecallback = {}
169 # hold callback for post transaction close
169 # hold callback for post transaction close
170 self._postclosecallback = {}
170 self._postclosecallback = {}
171 # holds callbacks to call during abort
171 # holds callbacks to call during abort
172 self._abortcallback = {}
172 self._abortcallback = {}
173
173
174 def __del__(self):
174 def __del__(self):
175 if self.journal:
175 if self.journal:
176 self._abort()
176 self._abort()
177
177
178 @active
178 @active
179 def startgroup(self):
179 def startgroup(self):
180 """delay registration of file entry
180 """delay registration of file entry
181
181
182 This is used by strip to delay vision of strip offset. The transaction
182 This is used by strip to delay vision of strip offset. The transaction
183 sees either none or all of the strip actions to be done."""
183 sees either none or all of the strip actions to be done."""
184 self._queue.append([])
184 self._queue.append([])
185
185
186 @active
186 @active
187 def endgroup(self):
187 def endgroup(self):
188 """apply delayed registration of file entry.
188 """apply delayed registration of file entry.
189
189
190 This is used by strip to delay vision of strip offset. The transaction
190 This is used by strip to delay vision of strip offset. The transaction
191 sees either none or all of the strip actions to be done."""
191 sees either none or all of the strip actions to be done."""
192 q = self._queue.pop()
192 q = self._queue.pop()
193 for f, o, data in q:
193 for f, o, data in q:
194 self._addentry(f, o, data)
194 self._addentry(f, o, data)
195
195
196 @active
196 @active
197 def add(self, file, offset, data=None):
197 def add(self, file, offset, data=None):
198 """record the state of an append-only file before update"""
198 """record the state of an append-only file before update"""
199 if file in self.map or file in self._backupmap:
199 if file in self.map or file in self._backupmap:
200 return
200 return
201 if self._queue:
201 if self._queue:
202 self._queue[-1].append((file, offset, data))
202 self._queue[-1].append((file, offset, data))
203 return
203 return
204
204
205 self._addentry(file, offset, data)
205 self._addentry(file, offset, data)
206
206
207 def _addentry(self, file, offset, data):
207 def _addentry(self, file, offset, data):
208 """add a append-only entry to memory and on-disk state"""
208 """add a append-only entry to memory and on-disk state"""
209 if file in self.map or file in self._backupmap:
209 if file in self.map or file in self._backupmap:
210 return
210 return
211 self.entries.append((file, offset, data))
211 self.entries.append((file, offset, data))
212 self.map[file] = len(self.entries) - 1
212 self.map[file] = len(self.entries) - 1
213 # add enough data to the journal to do the truncate
213 # add enough data to the journal to do the truncate
214 self.file.write("%s\0%d\n" % (file, offset))
214 self.file.write("%s\0%d\n" % (file, offset))
215 self.file.flush()
215 self.file.flush()
216
216
217 @active
217 @active
218 def addbackup(self, file, hardlink=True, location=''):
218 def addbackup(self, file, hardlink=True, location=''):
219 """Adds a backup of the file to the transaction
219 """Adds a backup of the file to the transaction
220
220
221 Calling addbackup() creates a hardlink backup of the specified file
221 Calling addbackup() creates a hardlink backup of the specified file
222 that is used to recover the file in the event of the transaction
222 that is used to recover the file in the event of the transaction
223 aborting.
223 aborting.
224
224
225 * `file`: the file path, relative to .hg/store
225 * `file`: the file path, relative to .hg/store
226 * `hardlink`: use a hardlink to quickly create the backup
226 * `hardlink`: use a hardlink to quickly create the backup
227 """
227 """
228 if self._queue:
228 if self._queue:
229 msg = 'cannot use transaction.addbackup inside "group"'
229 msg = 'cannot use transaction.addbackup inside "group"'
230 raise RuntimeError(msg)
230 raise RuntimeError(msg)
231
231
232 if file in self.map or file in self._backupmap:
232 if file in self.map or file in self._backupmap:
233 return
233 return
234 vfs = self._vfsmap[location]
234 vfs = self._vfsmap[location]
235 dirname, filename = vfs.split(file)
235 dirname, filename = vfs.split(file)
236 backupfilename = "%s.backup.%s" % (self.journal, filename)
236 backupfilename = "%s.backup.%s" % (self.journal, filename)
237 backupfile = vfs.reljoin(dirname, backupfilename)
237 backupfile = vfs.reljoin(dirname, backupfilename)
238 if vfs.exists(file):
238 if vfs.exists(file):
239 filepath = vfs.join(file)
239 filepath = vfs.join(file)
240 backuppath = vfs.join(backupfile)
240 backuppath = vfs.join(backupfile)
241 util.copyfile(filepath, backuppath, hardlink=hardlink)
241 util.copyfile(filepath, backuppath, hardlink=hardlink)
242 else:
242 else:
243 backupfile = ''
243 backupfile = ''
244
244
245 self._addbackupentry((location, file, backupfile, False))
245 self._addbackupentry((location, file, backupfile, False))
246
246
247 def _addbackupentry(self, entry):
247 def _addbackupentry(self, entry):
248 """register a new backup entry and write it to disk"""
248 """register a new backup entry and write it to disk"""
249 self._backupentries.append(entry)
249 self._backupentries.append(entry)
250 self._backupmap[entry[1]] = len(self._backupentries) - 1
250 self._backupmap[entry[1]] = len(self._backupentries) - 1
251 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
251 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
252 self._backupsfile.flush()
252 self._backupsfile.flush()
253
253
254 @active
254 @active
255 def registertmp(self, tmpfile, location=''):
255 def registertmp(self, tmpfile, location=''):
256 """register a temporary transaction file
256 """register a temporary transaction file
257
257
258 Such files will be deleted when the transaction exits (on both
258 Such files will be deleted when the transaction exits (on both
259 failure and success).
259 failure and success).
260 """
260 """
261 self._addbackupentry((location, '', tmpfile, False))
261 self._addbackupentry((location, '', tmpfile, False))
262
262
263 @active
263 @active
264 def addfilegenerator(self, genid, filenames, genfunc, order=0,
264 def addfilegenerator(self, genid, filenames, genfunc, order=0,
265 location=''):
265 location=''):
266 """add a function to generates some files at transaction commit
266 """add a function to generates some files at transaction commit
267
267
268 The `genfunc` argument is a function capable of generating proper
268 The `genfunc` argument is a function capable of generating proper
269 content of each entry in the `filename` tuple.
269 content of each entry in the `filename` tuple.
270
270
271 At transaction close time, `genfunc` will be called with one file
271 At transaction close time, `genfunc` will be called with one file
272 object argument per entries in `filenames`.
272 object argument per entries in `filenames`.
273
273
274 The transaction itself is responsible for the backup, creation and
274 The transaction itself is responsible for the backup, creation and
275 final write of such file.
275 final write of such file.
276
276
277 The `genid` argument is used to ensure the same set of file is only
277 The `genid` argument is used to ensure the same set of file is only
278 generated once. Call to `addfilegenerator` for a `genid` already
278 generated once. Call to `addfilegenerator` for a `genid` already
279 present will overwrite the old entry.
279 present will overwrite the old entry.
280
280
281 The `order` argument may be used to control the order in which multiple
281 The `order` argument may be used to control the order in which multiple
282 generator will be executed.
282 generator will be executed.
283
283
284 The `location` arguments may be used to indicate the files are located
284 The `location` arguments may be used to indicate the files are located
285 outside of the the standard directory for transaction. It should match
285 outside of the the standard directory for transaction. It should match
286 one of the key of the `transaction.vfsmap` dictionary.
286 one of the key of the `transaction.vfsmap` dictionary.
287 """
287 """
288 # For now, we are unable to do proper backup and restore of custom vfs
288 # For now, we are unable to do proper backup and restore of custom vfs
289 # but for bookmarks that are handled outside this mechanism.
289 # but for bookmarks that are handled outside this mechanism.
290 self._filegenerators[genid] = (order, filenames, genfunc, location)
290 self._filegenerators[genid] = (order, filenames, genfunc, location)
291
291
292 def _generatefiles(self, suffix='', group=GenerationGroup.ALL):
292 def _generatefiles(self, suffix='', group=GenerationGroup.ALL):
293 # write files registered for generation
293 # write files registered for generation
294 any = False
294 any = False
295 for id, entry in sorted(self._filegenerators.iteritems()):
295 for id, entry in sorted(self._filegenerators.iteritems()):
296 any = True
296 any = True
297 order, filenames, genfunc, location = entry
297 order, filenames, genfunc, location = entry
298
298
299 # for generation at closing, check if it's before or after finalize
299 # for generation at closing, check if it's before or after finalize
300 postfinalize = group == GenerationGroup.POSTFINALIZE
300 postfinalize = group == GenerationGroup.POSTFINALIZE
301 if (group != GenerationGroup.ALL and
301 if (group != GenerationGroup.ALL and
302 (id in postfinalizegenerators) != (postfinalize)):
302 (id in postfinalizegenerators) != (postfinalize)):
303 continue
303 continue
304
304
305 vfs = self._vfsmap[location]
305 vfs = self._vfsmap[location]
306 files = []
306 files = []
307 try:
307 try:
308 for name in filenames:
308 for name in filenames:
309 name += suffix
309 name += suffix
310 if suffix:
310 if suffix:
311 self.registertmp(name, location=location)
311 self.registertmp(name, location=location)
312 else:
312 else:
313 self.addbackup(name, location=location)
313 self.addbackup(name, location=location)
314 files.append(vfs(name, 'w', atomictemp=True))
314 files.append(vfs(name, 'w', atomictemp=True))
315 genfunc(*files)
315 genfunc(*files)
316 finally:
316 finally:
317 for f in files:
317 for f in files:
318 f.close()
318 f.close()
319 return any
319 return any
320
320
321 @active
321 @active
322 def find(self, file):
322 def find(self, file):
323 if file in self.map:
323 if file in self.map:
324 return self.entries[self.map[file]]
324 return self.entries[self.map[file]]
325 if file in self._backupmap:
325 if file in self._backupmap:
326 return self._backupentries[self._backupmap[file]]
326 return self._backupentries[self._backupmap[file]]
327 return None
327 return None
328
328
329 @active
329 @active
330 def replace(self, file, offset, data=None):
330 def replace(self, file, offset, data=None):
331 '''
331 '''
332 replace can only replace already committed entries
332 replace can only replace already committed entries
333 that are not pending in the queue
333 that are not pending in the queue
334 '''
334 '''
335
335
336 if file not in self.map:
336 if file not in self.map:
337 raise KeyError(file)
337 raise KeyError(file)
338 index = self.map[file]
338 index = self.map[file]
339 self.entries[index] = (file, offset, data)
339 self.entries[index] = (file, offset, data)
340 self.file.write("%s\0%d\n" % (file, offset))
340 self.file.write("%s\0%d\n" % (file, offset))
341 self.file.flush()
341 self.file.flush()
342
342
343 @active
343 @active
344 def nest(self):
344 def nest(self):
345 self.count += 1
345 self.count += 1
346 self.usages += 1
346 self.usages += 1
347 return self
347 return self
348
348
349 def release(self):
349 def release(self):
350 if self.count > 0:
350 if self.count > 0:
351 self.usages -= 1
351 self.usages -= 1
352 # if the transaction scopes are left without being closed, fail
352 # if the transaction scopes are left without being closed, fail
353 if self.count > 0 and self.usages == 0:
353 if self.count > 0 and self.usages == 0:
354 self._abort()
354 self._abort()
355
355
356 def __enter__(self):
356 def __enter__(self):
357 return self
357 return self
358
358
359 def __exit__(self, exc_type, exc_val, exc_tb):
359 def __exit__(self, exc_type, exc_val, exc_tb):
360 try:
360 try:
361 if exc_type is None:
361 if exc_type is None:
362 self.close()
362 self.close()
363 finally:
363 finally:
364 self.release()
364 self.release()
365
365
366 def running(self):
366 def running(self):
367 return self.count > 0
367 return self.count > 0
368
368
369 def addpending(self, category, callback):
369 def addpending(self, category, callback):
370 """add a callback to be called when the transaction is pending
370 """add a callback to be called when the transaction is pending
371
371
372 The transaction will be given as callback's first argument.
372 The transaction will be given as callback's first argument.
373
373
374 Category is a unique identifier to allow overwriting an old callback
374 Category is a unique identifier to allow overwriting an old callback
375 with a newer callback.
375 with a newer callback.
376 """
376 """
377 self._pendingcallback[category] = callback
377 self._pendingcallback[category] = callback
378
378
379 @active
379 @active
380 def writepending(self):
380 def writepending(self):
381 '''write pending file to temporary version
381 '''write pending file to temporary version
382
382
383 This is used to allow hooks to view a transaction before commit'''
383 This is used to allow hooks to view a transaction before commit'''
384 categories = sorted(self._pendingcallback)
384 categories = sorted(self._pendingcallback)
385 for cat in categories:
385 for cat in categories:
386 # remove callback since the data will have been flushed
386 # remove callback since the data will have been flushed
387 any = self._pendingcallback.pop(cat)(self)
387 any = self._pendingcallback.pop(cat)(self)
388 self._anypending = self._anypending or any
388 self._anypending = self._anypending or any
389 self._anypending |= self._generatefiles(suffix='.pending')
389 self._anypending |= self._generatefiles(suffix='.pending')
390 return self._anypending
390 return self._anypending
391
391
392 @active
392 @active
393 def addfinalize(self, category, callback):
393 def addfinalize(self, category, callback):
394 """add a callback to be called when the transaction is closed
394 """add a callback to be called when the transaction is closed
395
395
396 The transaction will be given as callback's first argument.
396 The transaction will be given as callback's first argument.
397
397
398 Category is a unique identifier to allow overwriting old callbacks with
398 Category is a unique identifier to allow overwriting old callbacks with
399 newer callbacks.
399 newer callbacks.
400 """
400 """
401 self._finalizecallback[category] = callback
401 self._finalizecallback[category] = callback
402
402
403 @active
403 @active
404 def addpostclose(self, category, callback):
404 def addpostclose(self, category, callback):
405 """add a callback to be called after the transaction is closed
405 """add a callback to be called after the transaction is closed
406
406
407 The transaction will be given as callback's first argument.
407 The transaction will be given as callback's first argument.
408
408
409 Category is a unique identifier to allow overwriting an old callback
409 Category is a unique identifier to allow overwriting an old callback
410 with a newer callback.
410 with a newer callback.
411 """
411 """
412 self._postclosecallback[category] = callback
412 self._postclosecallback[category] = callback
413
413
414 @active
414 @active
415 def addabort(self, category, callback):
415 def addabort(self, category, callback):
416 """add a callback to be called when the transaction is aborted.
416 """add a callback to be called when the transaction is aborted.
417
417
418 The transaction will be given as the first argument to the callback.
418 The transaction will be given as the first argument to the callback.
419
419
420 Category is a unique identifier to allow overwriting an old callback
420 Category is a unique identifier to allow overwriting an old callback
421 with a newer callback.
421 with a newer callback.
422 """
422 """
423 self._abortcallback[category] = callback
423 self._abortcallback[category] = callback
424
424
425 @active
425 @active
426 def close(self):
426 def close(self):
427 '''commit the transaction'''
427 '''commit the transaction'''
428 if self.count == 1:
428 if self.count == 1:
429 self.validator(self) # will raise exception if needed
429 self.validator(self) # will raise exception if needed
430 self._generatefiles(group=GenerationGroup.PREFINALIZE)
430 self._generatefiles(group=GenerationGroup.PREFINALIZE)
431 categories = sorted(self._finalizecallback)
431 categories = sorted(self._finalizecallback)
432 for cat in categories:
432 for cat in categories:
433 self._finalizecallback[cat](self)
433 self._finalizecallback[cat](self)
434 # Prevent double usage and help clear cycles.
435 self._finalizecallback = None
434 self._generatefiles(group=GenerationGroup.POSTFINALIZE)
436 self._generatefiles(group=GenerationGroup.POSTFINALIZE)
435
437
436 self.count -= 1
438 self.count -= 1
437 if self.count != 0:
439 if self.count != 0:
438 return
440 return
439 self.file.close()
441 self.file.close()
440 self._backupsfile.close()
442 self._backupsfile.close()
441 # cleanup temporary files
443 # cleanup temporary files
442 for l, f, b, c in self._backupentries:
444 for l, f, b, c in self._backupentries:
443 if l not in self._vfsmap and c:
445 if l not in self._vfsmap and c:
444 self.report("couldn't remove %s: unknown cache location %s\n"
446 self.report("couldn't remove %s: unknown cache location %s\n"
445 % (b, l))
447 % (b, l))
446 continue
448 continue
447 vfs = self._vfsmap[l]
449 vfs = self._vfsmap[l]
448 if not f and b and vfs.exists(b):
450 if not f and b and vfs.exists(b):
449 try:
451 try:
450 vfs.unlink(b)
452 vfs.unlink(b)
451 except (IOError, OSError, error.Abort) as inst:
453 except (IOError, OSError, error.Abort) as inst:
452 if not c:
454 if not c:
453 raise
455 raise
454 # Abort may be raise by read only opener
456 # Abort may be raise by read only opener
455 self.report("couldn't remove %s: %s\n"
457 self.report("couldn't remove %s: %s\n"
456 % (vfs.join(b), inst))
458 % (vfs.join(b), inst))
457 self.entries = []
459 self.entries = []
458 self._writeundo()
460 self._writeundo()
459 if self.after:
461 if self.after:
460 self.after()
462 self.after()
461 if self.opener.isfile(self._backupjournal):
463 if self.opener.isfile(self._backupjournal):
462 self.opener.unlink(self._backupjournal)
464 self.opener.unlink(self._backupjournal)
463 if self.opener.isfile(self.journal):
465 if self.opener.isfile(self.journal):
464 self.opener.unlink(self.journal)
466 self.opener.unlink(self.journal)
465 for l, _f, b, c in self._backupentries:
467 for l, _f, b, c in self._backupentries:
466 if l not in self._vfsmap and c:
468 if l not in self._vfsmap and c:
467 self.report("couldn't remove %s: unknown cache location"
469 self.report("couldn't remove %s: unknown cache location"
468 "%s\n" % (b, l))
470 "%s\n" % (b, l))
469 continue
471 continue
470 vfs = self._vfsmap[l]
472 vfs = self._vfsmap[l]
471 if b and vfs.exists(b):
473 if b and vfs.exists(b):
472 try:
474 try:
473 vfs.unlink(b)
475 vfs.unlink(b)
474 except (IOError, OSError, error.Abort) as inst:
476 except (IOError, OSError, error.Abort) as inst:
475 if not c:
477 if not c:
476 raise
478 raise
477 # Abort may be raise by read only opener
479 # Abort may be raise by read only opener
478 self.report("couldn't remove %s: %s\n"
480 self.report("couldn't remove %s: %s\n"
479 % (vfs.join(b), inst))
481 % (vfs.join(b), inst))
480 self._backupentries = []
482 self._backupentries = []
481 self.journal = None
483 self.journal = None
482
484
483 self.releasefn(self, True) # notify success of closing transaction
485 self.releasefn(self, True) # notify success of closing transaction
484
486
485 # run post close action
487 # run post close action
486 categories = sorted(self._postclosecallback)
488 categories = sorted(self._postclosecallback)
487 for cat in categories:
489 for cat in categories:
488 self._postclosecallback[cat](self)
490 self._postclosecallback[cat](self)
491 # Prevent double usage and help clear cycles.
492 self._postclosecallback = None
489
493
490 @active
494 @active
491 def abort(self):
495 def abort(self):
492 '''abort the transaction (generally called on error, or when the
496 '''abort the transaction (generally called on error, or when the
493 transaction is not explicitly committed before going out of
497 transaction is not explicitly committed before going out of
494 scope)'''
498 scope)'''
495 self._abort()
499 self._abort()
496
500
497 def _writeundo(self):
501 def _writeundo(self):
498 """write transaction data for possible future undo call"""
502 """write transaction data for possible future undo call"""
499 if self.undoname is None:
503 if self.undoname is None:
500 return
504 return
501 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
505 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
502 undobackupfile.write('%d\n' % version)
506 undobackupfile.write('%d\n' % version)
503 for l, f, b, c in self._backupentries:
507 for l, f, b, c in self._backupentries:
504 if not f: # temporary file
508 if not f: # temporary file
505 continue
509 continue
506 if not b:
510 if not b:
507 u = ''
511 u = ''
508 else:
512 else:
509 if l not in self._vfsmap and c:
513 if l not in self._vfsmap and c:
510 self.report("couldn't remove %s: unknown cache location"
514 self.report("couldn't remove %s: unknown cache location"
511 "%s\n" % (b, l))
515 "%s\n" % (b, l))
512 continue
516 continue
513 vfs = self._vfsmap[l]
517 vfs = self._vfsmap[l]
514 base, name = vfs.split(b)
518 base, name = vfs.split(b)
515 assert name.startswith(self.journal), name
519 assert name.startswith(self.journal), name
516 uname = name.replace(self.journal, self.undoname, 1)
520 uname = name.replace(self.journal, self.undoname, 1)
517 u = vfs.reljoin(base, uname)
521 u = vfs.reljoin(base, uname)
518 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
522 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
519 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
523 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
520 undobackupfile.close()
524 undobackupfile.close()
521
525
522
526
523 def _abort(self):
527 def _abort(self):
524 self.count = 0
528 self.count = 0
525 self.usages = 0
529 self.usages = 0
526 self.file.close()
530 self.file.close()
527 self._backupsfile.close()
531 self._backupsfile.close()
528
532
529 try:
533 try:
530 if not self.entries and not self._backupentries:
534 if not self.entries and not self._backupentries:
531 if self._backupjournal:
535 if self._backupjournal:
532 self.opener.unlink(self._backupjournal)
536 self.opener.unlink(self._backupjournal)
533 if self.journal:
537 if self.journal:
534 self.opener.unlink(self.journal)
538 self.opener.unlink(self.journal)
535 return
539 return
536
540
537 self.report(_("transaction abort!\n"))
541 self.report(_("transaction abort!\n"))
538
542
539 try:
543 try:
540 for cat in sorted(self._abortcallback):
544 for cat in sorted(self._abortcallback):
541 self._abortcallback[cat](self)
545 self._abortcallback[cat](self)
546 # Prevent double usage and help clear cycles.
547 self._abortcallback = None
542 _playback(self.journal, self.report, self.opener, self._vfsmap,
548 _playback(self.journal, self.report, self.opener, self._vfsmap,
543 self.entries, self._backupentries, False)
549 self.entries, self._backupentries, False)
544 self.report(_("rollback completed\n"))
550 self.report(_("rollback completed\n"))
545 except BaseException:
551 except BaseException:
546 self.report(_("rollback failed - please run hg recover\n"))
552 self.report(_("rollback failed - please run hg recover\n"))
547 finally:
553 finally:
548 self.journal = None
554 self.journal = None
549 self.releasefn(self, False) # notify failure of transaction
555 self.releasefn(self, False) # notify failure of transaction
550
556
551 def rollback(opener, vfsmap, file, report):
557 def rollback(opener, vfsmap, file, report):
552 """Rolls back the transaction contained in the given file
558 """Rolls back the transaction contained in the given file
553
559
554 Reads the entries in the specified file, and the corresponding
560 Reads the entries in the specified file, and the corresponding
555 '*.backupfiles' file, to recover from an incomplete transaction.
561 '*.backupfiles' file, to recover from an incomplete transaction.
556
562
557 * `file`: a file containing a list of entries, specifying where
563 * `file`: a file containing a list of entries, specifying where
558 to truncate each file. The file should contain a list of
564 to truncate each file. The file should contain a list of
559 file\0offset pairs, delimited by newlines. The corresponding
565 file\0offset pairs, delimited by newlines. The corresponding
560 '*.backupfiles' file should contain a list of file\0backupfile
566 '*.backupfiles' file should contain a list of file\0backupfile
561 pairs, delimited by \0.
567 pairs, delimited by \0.
562 """
568 """
563 entries = []
569 entries = []
564 backupentries = []
570 backupentries = []
565
571
566 fp = opener.open(file)
572 fp = opener.open(file)
567 lines = fp.readlines()
573 lines = fp.readlines()
568 fp.close()
574 fp.close()
569 for l in lines:
575 for l in lines:
570 try:
576 try:
571 f, o = l.split('\0')
577 f, o = l.split('\0')
572 entries.append((f, int(o), None))
578 entries.append((f, int(o), None))
573 except ValueError:
579 except ValueError:
574 report(_("couldn't read journal entry %r!\n") % l)
580 report(_("couldn't read journal entry %r!\n") % l)
575
581
576 backupjournal = "%s.backupfiles" % file
582 backupjournal = "%s.backupfiles" % file
577 if opener.exists(backupjournal):
583 if opener.exists(backupjournal):
578 fp = opener.open(backupjournal)
584 fp = opener.open(backupjournal)
579 lines = fp.readlines()
585 lines = fp.readlines()
580 if lines:
586 if lines:
581 ver = lines[0][:-1]
587 ver = lines[0][:-1]
582 if ver == str(version):
588 if ver == str(version):
583 for line in lines[1:]:
589 for line in lines[1:]:
584 if line:
590 if line:
585 # Shave off the trailing newline
591 # Shave off the trailing newline
586 line = line[:-1]
592 line = line[:-1]
587 l, f, b, c = line.split('\0')
593 l, f, b, c = line.split('\0')
588 backupentries.append((l, f, b, bool(c)))
594 backupentries.append((l, f, b, bool(c)))
589 else:
595 else:
590 report(_("journal was created by a different version of "
596 report(_("journal was created by a different version of "
591 "Mercurial\n"))
597 "Mercurial\n"))
592
598
593 _playback(file, report, opener, vfsmap, entries, backupentries)
599 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now