##// END OF EJS Templates
transaction: delete callbacks after use...
Gregory Szorc -
r32558:aa91085c default
parent child Browse files
Show More
@@ -1,603 +1,607 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = {
29 postfinalizegenerators = {
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 }
32 }
33
33
34 gengroupall='all'
34 gengroupall='all'
35 gengroupprefinalize='prefinalize'
35 gengroupprefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
36 gengrouppostfinalize='postfinalize'
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self.count == 0:
40 if self.count == 0:
41 raise error.Abort(_(
41 raise error.Abort(_(
42 'cannot use transaction when it is already committed/aborted'))
42 'cannot use transaction when it is already committed/aborted'))
43 return func(self, *args, **kwds)
43 return func(self, *args, **kwds)
44 return _active
44 return _active
45
45
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 unlink=True):
47 unlink=True):
48 for f, o, _ignore in entries:
48 for f, o, _ignore in entries:
49 if o or not unlink:
49 if o or not unlink:
50 try:
50 try:
51 fp = opener(f, 'a', checkambig=True)
51 fp = opener(f, 'a', checkambig=True)
52 fp.truncate(o)
52 fp.truncate(o)
53 fp.close()
53 fp.close()
54 except IOError:
54 except IOError:
55 report(_("failed to truncate %s\n") % f)
55 report(_("failed to truncate %s\n") % f)
56 raise
56 raise
57 else:
57 else:
58 try:
58 try:
59 opener.unlink(f)
59 opener.unlink(f)
60 except (IOError, OSError) as inst:
60 except (IOError, OSError) as inst:
61 if inst.errno != errno.ENOENT:
61 if inst.errno != errno.ENOENT:
62 raise
62 raise
63
63
64 backupfiles = []
64 backupfiles = []
65 for l, f, b, c in backupentries:
65 for l, f, b, c in backupentries:
66 if l not in vfsmap and c:
66 if l not in vfsmap and c:
67 report("couldn't handle %s: unknown cache location %s\n"
67 report("couldn't handle %s: unknown cache location %s\n"
68 % (b, l))
68 % (b, l))
69 vfs = vfsmap[l]
69 vfs = vfsmap[l]
70 try:
70 try:
71 if f and b:
71 if f and b:
72 filepath = vfs.join(f)
72 filepath = vfs.join(f)
73 backuppath = vfs.join(b)
73 backuppath = vfs.join(b)
74 try:
74 try:
75 util.copyfile(backuppath, filepath, checkambig=True)
75 util.copyfile(backuppath, filepath, checkambig=True)
76 backupfiles.append(b)
76 backupfiles.append(b)
77 except IOError:
77 except IOError:
78 report(_("failed to recover %s\n") % f)
78 report(_("failed to recover %s\n") % f)
79 else:
79 else:
80 target = f or b
80 target = f or b
81 try:
81 try:
82 vfs.unlink(target)
82 vfs.unlink(target)
83 except (IOError, OSError) as inst:
83 except (IOError, OSError) as inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86 except (IOError, OSError, error.Abort) as inst:
86 except (IOError, OSError, error.Abort) as inst:
87 if not c:
87 if not c:
88 raise
88 raise
89
89
90 backuppath = "%s.backupfiles" % journal
90 backuppath = "%s.backupfiles" % journal
91 if opener.exists(backuppath):
91 if opener.exists(backuppath):
92 opener.unlink(backuppath)
92 opener.unlink(backuppath)
93 opener.unlink(journal)
93 opener.unlink(journal)
94 try:
94 try:
95 for f in backupfiles:
95 for f in backupfiles:
96 if opener.exists(f):
96 if opener.exists(f):
97 opener.unlink(f)
97 opener.unlink(f)
98 except (IOError, OSError, error.Abort) as inst:
98 except (IOError, OSError, error.Abort) as inst:
99 # only pure backup file remains, it is sage to ignore any error
99 # only pure backup file remains, it is sage to ignore any error
100 pass
100 pass
101
101
102 class transaction(object):
102 class transaction(object):
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
104 after=None, createmode=None, validator=None, releasefn=None):
104 after=None, createmode=None, validator=None, releasefn=None):
105 """Begin a new transaction
105 """Begin a new transaction
106
106
107 Begins a new transaction that allows rolling back writes in the event of
107 Begins a new transaction that allows rolling back writes in the event of
108 an exception.
108 an exception.
109
109
110 * `after`: called after the transaction has been committed
110 * `after`: called after the transaction has been committed
111 * `createmode`: the mode of the journal file that will be created
111 * `createmode`: the mode of the journal file that will be created
112 * `releasefn`: called after releasing (with transaction and result)
112 * `releasefn`: called after releasing (with transaction and result)
113 """
113 """
114 self.count = 1
114 self.count = 1
115 self.usages = 1
115 self.usages = 1
116 self.report = report
116 self.report = report
117 # a vfs to the store content
117 # a vfs to the store content
118 self.opener = opener
118 self.opener = opener
119 # a map to access file in various {location -> vfs}
119 # a map to access file in various {location -> vfs}
120 vfsmap = vfsmap.copy()
120 vfsmap = vfsmap.copy()
121 vfsmap[''] = opener # set default value
121 vfsmap[''] = opener # set default value
122 self._vfsmap = vfsmap
122 self._vfsmap = vfsmap
123 self.after = after
123 self.after = after
124 self.entries = []
124 self.entries = []
125 self.map = {}
125 self.map = {}
126 self.journal = journalname
126 self.journal = journalname
127 self.undoname = undoname
127 self.undoname = undoname
128 self._queue = []
128 self._queue = []
129 # A callback to validate transaction content before closing it.
129 # A callback to validate transaction content before closing it.
130 # should raise exception is anything is wrong.
130 # should raise exception is anything is wrong.
131 # target user is repository hooks.
131 # target user is repository hooks.
132 if validator is None:
132 if validator is None:
133 validator = lambda tr: None
133 validator = lambda tr: None
134 self.validator = validator
134 self.validator = validator
135 # A callback to do something just after releasing transaction.
135 # A callback to do something just after releasing transaction.
136 if releasefn is None:
136 if releasefn is None:
137 releasefn = lambda tr, success: None
137 releasefn = lambda tr, success: None
138 self.releasefn = releasefn
138 self.releasefn = releasefn
139
139
140 # A dict dedicated to precisely tracking the changes introduced in the
140 # A dict dedicated to precisely tracking the changes introduced in the
141 # transaction.
141 # transaction.
142 self.changes = {}
142 self.changes = {}
143
143
144 # a dict of arguments to be passed to hooks
144 # a dict of arguments to be passed to hooks
145 self.hookargs = {}
145 self.hookargs = {}
146 self.file = opener.open(self.journal, "w")
146 self.file = opener.open(self.journal, "w")
147
147
148 # a list of ('location', 'path', 'backuppath', cache) entries.
148 # a list of ('location', 'path', 'backuppath', cache) entries.
149 # - if 'backuppath' is empty, no file existed at backup time
149 # - if 'backuppath' is empty, no file existed at backup time
150 # - if 'path' is empty, this is a temporary transaction file
150 # - if 'path' is empty, this is a temporary transaction file
151 # - if 'location' is not empty, the path is outside main opener reach.
151 # - if 'location' is not empty, the path is outside main opener reach.
152 # use 'location' value as a key in a vfsmap to find the right 'vfs'
152 # use 'location' value as a key in a vfsmap to find the right 'vfs'
153 # (cache is currently unused)
153 # (cache is currently unused)
154 self._backupentries = []
154 self._backupentries = []
155 self._backupmap = {}
155 self._backupmap = {}
156 self._backupjournal = "%s.backupfiles" % self.journal
156 self._backupjournal = "%s.backupfiles" % self.journal
157 self._backupsfile = opener.open(self._backupjournal, 'w')
157 self._backupsfile = opener.open(self._backupjournal, 'w')
158 self._backupsfile.write('%d\n' % version)
158 self._backupsfile.write('%d\n' % version)
159
159
160 if createmode is not None:
160 if createmode is not None:
161 opener.chmod(self.journal, createmode & 0o666)
161 opener.chmod(self.journal, createmode & 0o666)
162 opener.chmod(self._backupjournal, createmode & 0o666)
162 opener.chmod(self._backupjournal, createmode & 0o666)
163
163
164 # hold file generations to be performed on commit
164 # hold file generations to be performed on commit
165 self._filegenerators = {}
165 self._filegenerators = {}
166 # hold callback to write pending data for hooks
166 # hold callback to write pending data for hooks
167 self._pendingcallback = {}
167 self._pendingcallback = {}
168 # True is any pending data have been written ever
168 # True is any pending data have been written ever
169 self._anypending = False
169 self._anypending = False
170 # holds callback to call when writing the transaction
170 # holds callback to call when writing the transaction
171 self._finalizecallback = {}
171 self._finalizecallback = {}
172 # hold callback for post transaction close
172 # hold callback for post transaction close
173 self._postclosecallback = {}
173 self._postclosecallback = {}
174 # holds callbacks to call during abort
174 # holds callbacks to call during abort
175 self._abortcallback = {}
175 self._abortcallback = {}
176
176
177 def __del__(self):
177 def __del__(self):
178 if self.journal:
178 if self.journal:
179 self._abort()
179 self._abort()
180
180
181 @active
181 @active
182 def startgroup(self):
182 def startgroup(self):
183 """delay registration of file entry
183 """delay registration of file entry
184
184
185 This is used by strip to delay vision of strip offset. The transaction
185 This is used by strip to delay vision of strip offset. The transaction
186 sees either none or all of the strip actions to be done."""
186 sees either none or all of the strip actions to be done."""
187 self._queue.append([])
187 self._queue.append([])
188
188
189 @active
189 @active
190 def endgroup(self):
190 def endgroup(self):
191 """apply delayed registration of file entry.
191 """apply delayed registration of file entry.
192
192
193 This is used by strip to delay vision of strip offset. The transaction
193 This is used by strip to delay vision of strip offset. The transaction
194 sees either none or all of the strip actions to be done."""
194 sees either none or all of the strip actions to be done."""
195 q = self._queue.pop()
195 q = self._queue.pop()
196 for f, o, data in q:
196 for f, o, data in q:
197 self._addentry(f, o, data)
197 self._addentry(f, o, data)
198
198
199 @active
199 @active
200 def add(self, file, offset, data=None):
200 def add(self, file, offset, data=None):
201 """record the state of an append-only file before update"""
201 """record the state of an append-only file before update"""
202 if file in self.map or file in self._backupmap:
202 if file in self.map or file in self._backupmap:
203 return
203 return
204 if self._queue:
204 if self._queue:
205 self._queue[-1].append((file, offset, data))
205 self._queue[-1].append((file, offset, data))
206 return
206 return
207
207
208 self._addentry(file, offset, data)
208 self._addentry(file, offset, data)
209
209
210 def _addentry(self, file, offset, data):
210 def _addentry(self, file, offset, data):
211 """add a append-only entry to memory and on-disk state"""
211 """add a append-only entry to memory and on-disk state"""
212 if file in self.map or file in self._backupmap:
212 if file in self.map or file in self._backupmap:
213 return
213 return
214 self.entries.append((file, offset, data))
214 self.entries.append((file, offset, data))
215 self.map[file] = len(self.entries) - 1
215 self.map[file] = len(self.entries) - 1
216 # add enough data to the journal to do the truncate
216 # add enough data to the journal to do the truncate
217 self.file.write("%s\0%d\n" % (file, offset))
217 self.file.write("%s\0%d\n" % (file, offset))
218 self.file.flush()
218 self.file.flush()
219
219
220 @active
220 @active
221 def addbackup(self, file, hardlink=True, location=''):
221 def addbackup(self, file, hardlink=True, location=''):
222 """Adds a backup of the file to the transaction
222 """Adds a backup of the file to the transaction
223
223
224 Calling addbackup() creates a hardlink backup of the specified file
224 Calling addbackup() creates a hardlink backup of the specified file
225 that is used to recover the file in the event of the transaction
225 that is used to recover the file in the event of the transaction
226 aborting.
226 aborting.
227
227
228 * `file`: the file path, relative to .hg/store
228 * `file`: the file path, relative to .hg/store
229 * `hardlink`: use a hardlink to quickly create the backup
229 * `hardlink`: use a hardlink to quickly create the backup
230 """
230 """
231 if self._queue:
231 if self._queue:
232 msg = 'cannot use transaction.addbackup inside "group"'
232 msg = 'cannot use transaction.addbackup inside "group"'
233 raise error.ProgrammingError(msg)
233 raise error.ProgrammingError(msg)
234
234
235 if file in self.map or file in self._backupmap:
235 if file in self.map or file in self._backupmap:
236 return
236 return
237 vfs = self._vfsmap[location]
237 vfs = self._vfsmap[location]
238 dirname, filename = vfs.split(file)
238 dirname, filename = vfs.split(file)
239 backupfilename = "%s.backup.%s" % (self.journal, filename)
239 backupfilename = "%s.backup.%s" % (self.journal, filename)
240 backupfile = vfs.reljoin(dirname, backupfilename)
240 backupfile = vfs.reljoin(dirname, backupfilename)
241 if vfs.exists(file):
241 if vfs.exists(file):
242 filepath = vfs.join(file)
242 filepath = vfs.join(file)
243 backuppath = vfs.join(backupfile)
243 backuppath = vfs.join(backupfile)
244 util.copyfile(filepath, backuppath, hardlink=hardlink)
244 util.copyfile(filepath, backuppath, hardlink=hardlink)
245 else:
245 else:
246 backupfile = ''
246 backupfile = ''
247
247
248 self._addbackupentry((location, file, backupfile, False))
248 self._addbackupentry((location, file, backupfile, False))
249
249
250 def _addbackupentry(self, entry):
250 def _addbackupentry(self, entry):
251 """register a new backup entry and write it to disk"""
251 """register a new backup entry and write it to disk"""
252 self._backupentries.append(entry)
252 self._backupentries.append(entry)
253 self._backupmap[entry[1]] = len(self._backupentries) - 1
253 self._backupmap[entry[1]] = len(self._backupentries) - 1
254 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
254 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
255 self._backupsfile.flush()
255 self._backupsfile.flush()
256
256
257 @active
257 @active
258 def registertmp(self, tmpfile, location=''):
258 def registertmp(self, tmpfile, location=''):
259 """register a temporary transaction file
259 """register a temporary transaction file
260
260
261 Such files will be deleted when the transaction exits (on both
261 Such files will be deleted when the transaction exits (on both
262 failure and success).
262 failure and success).
263 """
263 """
264 self._addbackupentry((location, '', tmpfile, False))
264 self._addbackupentry((location, '', tmpfile, False))
265
265
266 @active
266 @active
267 def addfilegenerator(self, genid, filenames, genfunc, order=0,
267 def addfilegenerator(self, genid, filenames, genfunc, order=0,
268 location=''):
268 location=''):
269 """add a function to generates some files at transaction commit
269 """add a function to generates some files at transaction commit
270
270
271 The `genfunc` argument is a function capable of generating proper
271 The `genfunc` argument is a function capable of generating proper
272 content of each entry in the `filename` tuple.
272 content of each entry in the `filename` tuple.
273
273
274 At transaction close time, `genfunc` will be called with one file
274 At transaction close time, `genfunc` will be called with one file
275 object argument per entries in `filenames`.
275 object argument per entries in `filenames`.
276
276
277 The transaction itself is responsible for the backup, creation and
277 The transaction itself is responsible for the backup, creation and
278 final write of such file.
278 final write of such file.
279
279
280 The `genid` argument is used to ensure the same set of file is only
280 The `genid` argument is used to ensure the same set of file is only
281 generated once. Call to `addfilegenerator` for a `genid` already
281 generated once. Call to `addfilegenerator` for a `genid` already
282 present will overwrite the old entry.
282 present will overwrite the old entry.
283
283
284 The `order` argument may be used to control the order in which multiple
284 The `order` argument may be used to control the order in which multiple
285 generator will be executed.
285 generator will be executed.
286
286
287 The `location` arguments may be used to indicate the files are located
287 The `location` arguments may be used to indicate the files are located
288 outside of the the standard directory for transaction. It should match
288 outside of the the standard directory for transaction. It should match
289 one of the key of the `transaction.vfsmap` dictionary.
289 one of the key of the `transaction.vfsmap` dictionary.
290 """
290 """
291 # For now, we are unable to do proper backup and restore of custom vfs
291 # For now, we are unable to do proper backup and restore of custom vfs
292 # but for bookmarks that are handled outside this mechanism.
292 # but for bookmarks that are handled outside this mechanism.
293 self._filegenerators[genid] = (order, filenames, genfunc, location)
293 self._filegenerators[genid] = (order, filenames, genfunc, location)
294
294
295 def _generatefiles(self, suffix='', group=gengroupall):
295 def _generatefiles(self, suffix='', group=gengroupall):
296 # write files registered for generation
296 # write files registered for generation
297 any = False
297 any = False
298 for id, entry in sorted(self._filegenerators.iteritems()):
298 for id, entry in sorted(self._filegenerators.iteritems()):
299 any = True
299 any = True
300 order, filenames, genfunc, location = entry
300 order, filenames, genfunc, location = entry
301
301
302 # for generation at closing, check if it's before or after finalize
302 # for generation at closing, check if it's before or after finalize
303 postfinalize = group == gengrouppostfinalize
303 postfinalize = group == gengrouppostfinalize
304 if (group != gengroupall and
304 if (group != gengroupall and
305 (id in postfinalizegenerators) != (postfinalize)):
305 (id in postfinalizegenerators) != (postfinalize)):
306 continue
306 continue
307
307
308 vfs = self._vfsmap[location]
308 vfs = self._vfsmap[location]
309 files = []
309 files = []
310 try:
310 try:
311 for name in filenames:
311 for name in filenames:
312 name += suffix
312 name += suffix
313 if suffix:
313 if suffix:
314 self.registertmp(name, location=location)
314 self.registertmp(name, location=location)
315 else:
315 else:
316 self.addbackup(name, location=location)
316 self.addbackup(name, location=location)
317 files.append(vfs(name, 'w', atomictemp=True,
317 files.append(vfs(name, 'w', atomictemp=True,
318 checkambig=not suffix))
318 checkambig=not suffix))
319 genfunc(*files)
319 genfunc(*files)
320 finally:
320 finally:
321 for f in files:
321 for f in files:
322 f.close()
322 f.close()
323 return any
323 return any
324
324
325 @active
325 @active
326 def find(self, file):
326 def find(self, file):
327 if file in self.map:
327 if file in self.map:
328 return self.entries[self.map[file]]
328 return self.entries[self.map[file]]
329 if file in self._backupmap:
329 if file in self._backupmap:
330 return self._backupentries[self._backupmap[file]]
330 return self._backupentries[self._backupmap[file]]
331 return None
331 return None
332
332
333 @active
333 @active
334 def replace(self, file, offset, data=None):
334 def replace(self, file, offset, data=None):
335 '''
335 '''
336 replace can only replace already committed entries
336 replace can only replace already committed entries
337 that are not pending in the queue
337 that are not pending in the queue
338 '''
338 '''
339
339
340 if file not in self.map:
340 if file not in self.map:
341 raise KeyError(file)
341 raise KeyError(file)
342 index = self.map[file]
342 index = self.map[file]
343 self.entries[index] = (file, offset, data)
343 self.entries[index] = (file, offset, data)
344 self.file.write("%s\0%d\n" % (file, offset))
344 self.file.write("%s\0%d\n" % (file, offset))
345 self.file.flush()
345 self.file.flush()
346
346
347 @active
347 @active
348 def nest(self):
348 def nest(self):
349 self.count += 1
349 self.count += 1
350 self.usages += 1
350 self.usages += 1
351 return self
351 return self
352
352
353 def release(self):
353 def release(self):
354 if self.count > 0:
354 if self.count > 0:
355 self.usages -= 1
355 self.usages -= 1
356 # if the transaction scopes are left without being closed, fail
356 # if the transaction scopes are left without being closed, fail
357 if self.count > 0 and self.usages == 0:
357 if self.count > 0 and self.usages == 0:
358 self._abort()
358 self._abort()
359
359
360 def __enter__(self):
360 def __enter__(self):
361 return self
361 return self
362
362
363 def __exit__(self, exc_type, exc_val, exc_tb):
363 def __exit__(self, exc_type, exc_val, exc_tb):
364 try:
364 try:
365 if exc_type is None:
365 if exc_type is None:
366 self.close()
366 self.close()
367 finally:
367 finally:
368 self.release()
368 self.release()
369
369
370 def running(self):
370 def running(self):
371 return self.count > 0
371 return self.count > 0
372
372
373 def addpending(self, category, callback):
373 def addpending(self, category, callback):
374 """add a callback to be called when the transaction is pending
374 """add a callback to be called when the transaction is pending
375
375
376 The transaction will be given as callback's first argument.
376 The transaction will be given as callback's first argument.
377
377
378 Category is a unique identifier to allow overwriting an old callback
378 Category is a unique identifier to allow overwriting an old callback
379 with a newer callback.
379 with a newer callback.
380 """
380 """
381 self._pendingcallback[category] = callback
381 self._pendingcallback[category] = callback
382
382
383 @active
383 @active
384 def writepending(self):
384 def writepending(self):
385 '''write pending file to temporary version
385 '''write pending file to temporary version
386
386
387 This is used to allow hooks to view a transaction before commit'''
387 This is used to allow hooks to view a transaction before commit'''
388 categories = sorted(self._pendingcallback)
388 categories = sorted(self._pendingcallback)
389 for cat in categories:
389 for cat in categories:
390 # remove callback since the data will have been flushed
390 # remove callback since the data will have been flushed
391 any = self._pendingcallback.pop(cat)(self)
391 any = self._pendingcallback.pop(cat)(self)
392 self._anypending = self._anypending or any
392 self._anypending = self._anypending or any
393 self._anypending |= self._generatefiles(suffix='.pending')
393 self._anypending |= self._generatefiles(suffix='.pending')
394 return self._anypending
394 return self._anypending
395
395
396 @active
396 @active
397 def addfinalize(self, category, callback):
397 def addfinalize(self, category, callback):
398 """add a callback to be called when the transaction is closed
398 """add a callback to be called when the transaction is closed
399
399
400 The transaction will be given as callback's first argument.
400 The transaction will be given as callback's first argument.
401
401
402 Category is a unique identifier to allow overwriting old callbacks with
402 Category is a unique identifier to allow overwriting old callbacks with
403 newer callbacks.
403 newer callbacks.
404 """
404 """
405 self._finalizecallback[category] = callback
405 self._finalizecallback[category] = callback
406
406
407 @active
407 @active
408 def addpostclose(self, category, callback):
408 def addpostclose(self, category, callback):
409 """add a callback to be called after the transaction is closed
409 """add a callback to be called after the transaction is closed
410
410
411 The transaction will be given as callback's first argument.
411 The transaction will be given as callback's first argument.
412
412
413 Category is a unique identifier to allow overwriting an old callback
413 Category is a unique identifier to allow overwriting an old callback
414 with a newer callback.
414 with a newer callback.
415 """
415 """
416 self._postclosecallback[category] = callback
416 self._postclosecallback[category] = callback
417
417
418 @active
418 @active
419 def addabort(self, category, callback):
419 def addabort(self, category, callback):
420 """add a callback to be called when the transaction is aborted.
420 """add a callback to be called when the transaction is aborted.
421
421
422 The transaction will be given as the first argument to the callback.
422 The transaction will be given as the first argument to the callback.
423
423
424 Category is a unique identifier to allow overwriting an old callback
424 Category is a unique identifier to allow overwriting an old callback
425 with a newer callback.
425 with a newer callback.
426 """
426 """
427 self._abortcallback[category] = callback
427 self._abortcallback[category] = callback
428
428
429 @active
429 @active
430 def close(self):
430 def close(self):
431 '''commit the transaction'''
431 '''commit the transaction'''
432 if self.count == 1:
432 if self.count == 1:
433 self.validator(self) # will raise exception if needed
433 self.validator(self) # will raise exception if needed
434 self.validator = None # Help prevent cycles.
434 self._generatefiles(group=gengroupprefinalize)
435 self._generatefiles(group=gengroupprefinalize)
435 categories = sorted(self._finalizecallback)
436 categories = sorted(self._finalizecallback)
436 for cat in categories:
437 for cat in categories:
437 self._finalizecallback[cat](self)
438 self._finalizecallback[cat](self)
438 # Prevent double usage and help clear cycles.
439 # Prevent double usage and help clear cycles.
439 self._finalizecallback = None
440 self._finalizecallback = None
440 self._generatefiles(group=gengrouppostfinalize)
441 self._generatefiles(group=gengrouppostfinalize)
441
442
442 self.count -= 1
443 self.count -= 1
443 if self.count != 0:
444 if self.count != 0:
444 return
445 return
445 self.file.close()
446 self.file.close()
446 self._backupsfile.close()
447 self._backupsfile.close()
447 # cleanup temporary files
448 # cleanup temporary files
448 for l, f, b, c in self._backupentries:
449 for l, f, b, c in self._backupentries:
449 if l not in self._vfsmap and c:
450 if l not in self._vfsmap and c:
450 self.report("couldn't remove %s: unknown cache location %s\n"
451 self.report("couldn't remove %s: unknown cache location %s\n"
451 % (b, l))
452 % (b, l))
452 continue
453 continue
453 vfs = self._vfsmap[l]
454 vfs = self._vfsmap[l]
454 if not f and b and vfs.exists(b):
455 if not f and b and vfs.exists(b):
455 try:
456 try:
456 vfs.unlink(b)
457 vfs.unlink(b)
457 except (IOError, OSError, error.Abort) as inst:
458 except (IOError, OSError, error.Abort) as inst:
458 if not c:
459 if not c:
459 raise
460 raise
460 # Abort may be raise by read only opener
461 # Abort may be raise by read only opener
461 self.report("couldn't remove %s: %s\n"
462 self.report("couldn't remove %s: %s\n"
462 % (vfs.join(b), inst))
463 % (vfs.join(b), inst))
463 self.entries = []
464 self.entries = []
464 self._writeundo()
465 self._writeundo()
465 if self.after:
466 if self.after:
466 self.after()
467 self.after()
468 self.after = None # Help prevent cycles.
467 if self.opener.isfile(self._backupjournal):
469 if self.opener.isfile(self._backupjournal):
468 self.opener.unlink(self._backupjournal)
470 self.opener.unlink(self._backupjournal)
469 if self.opener.isfile(self.journal):
471 if self.opener.isfile(self.journal):
470 self.opener.unlink(self.journal)
472 self.opener.unlink(self.journal)
471 for l, _f, b, c in self._backupentries:
473 for l, _f, b, c in self._backupentries:
472 if l not in self._vfsmap and c:
474 if l not in self._vfsmap and c:
473 self.report("couldn't remove %s: unknown cache location"
475 self.report("couldn't remove %s: unknown cache location"
474 "%s\n" % (b, l))
476 "%s\n" % (b, l))
475 continue
477 continue
476 vfs = self._vfsmap[l]
478 vfs = self._vfsmap[l]
477 if b and vfs.exists(b):
479 if b and vfs.exists(b):
478 try:
480 try:
479 vfs.unlink(b)
481 vfs.unlink(b)
480 except (IOError, OSError, error.Abort) as inst:
482 except (IOError, OSError, error.Abort) as inst:
481 if not c:
483 if not c:
482 raise
484 raise
483 # Abort may be raise by read only opener
485 # Abort may be raise by read only opener
484 self.report("couldn't remove %s: %s\n"
486 self.report("couldn't remove %s: %s\n"
485 % (vfs.join(b), inst))
487 % (vfs.join(b), inst))
486 self._backupentries = []
488 self._backupentries = []
487 self.journal = None
489 self.journal = None
488
490
489 self.releasefn(self, True) # notify success of closing transaction
491 self.releasefn(self, True) # notify success of closing transaction
492 self.releasefn = None # Help prevent cycles.
490
493
491 # run post close action
494 # run post close action
492 categories = sorted(self._postclosecallback)
495 categories = sorted(self._postclosecallback)
493 for cat in categories:
496 for cat in categories:
494 self._postclosecallback[cat](self)
497 self._postclosecallback[cat](self)
495 # Prevent double usage and help clear cycles.
498 # Prevent double usage and help clear cycles.
496 self._postclosecallback = None
499 self._postclosecallback = None
497
500
498 @active
501 @active
499 def abort(self):
502 def abort(self):
500 '''abort the transaction (generally called on error, or when the
503 '''abort the transaction (generally called on error, or when the
501 transaction is not explicitly committed before going out of
504 transaction is not explicitly committed before going out of
502 scope)'''
505 scope)'''
503 self._abort()
506 self._abort()
504
507
505 def _writeundo(self):
508 def _writeundo(self):
506 """write transaction data for possible future undo call"""
509 """write transaction data for possible future undo call"""
507 if self.undoname is None:
510 if self.undoname is None:
508 return
511 return
509 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
512 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
510 undobackupfile.write('%d\n' % version)
513 undobackupfile.write('%d\n' % version)
511 for l, f, b, c in self._backupentries:
514 for l, f, b, c in self._backupentries:
512 if not f: # temporary file
515 if not f: # temporary file
513 continue
516 continue
514 if not b:
517 if not b:
515 u = ''
518 u = ''
516 else:
519 else:
517 if l not in self._vfsmap and c:
520 if l not in self._vfsmap and c:
518 self.report("couldn't remove %s: unknown cache location"
521 self.report("couldn't remove %s: unknown cache location"
519 "%s\n" % (b, l))
522 "%s\n" % (b, l))
520 continue
523 continue
521 vfs = self._vfsmap[l]
524 vfs = self._vfsmap[l]
522 base, name = vfs.split(b)
525 base, name = vfs.split(b)
523 assert name.startswith(self.journal), name
526 assert name.startswith(self.journal), name
524 uname = name.replace(self.journal, self.undoname, 1)
527 uname = name.replace(self.journal, self.undoname, 1)
525 u = vfs.reljoin(base, uname)
528 u = vfs.reljoin(base, uname)
526 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
529 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
527 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
530 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
528 undobackupfile.close()
531 undobackupfile.close()
529
532
530
533
531 def _abort(self):
534 def _abort(self):
532 self.count = 0
535 self.count = 0
533 self.usages = 0
536 self.usages = 0
534 self.file.close()
537 self.file.close()
535 self._backupsfile.close()
538 self._backupsfile.close()
536
539
537 try:
540 try:
538 if not self.entries and not self._backupentries:
541 if not self.entries and not self._backupentries:
539 if self._backupjournal:
542 if self._backupjournal:
540 self.opener.unlink(self._backupjournal)
543 self.opener.unlink(self._backupjournal)
541 if self.journal:
544 if self.journal:
542 self.opener.unlink(self.journal)
545 self.opener.unlink(self.journal)
543 return
546 return
544
547
545 self.report(_("transaction abort!\n"))
548 self.report(_("transaction abort!\n"))
546
549
547 try:
550 try:
548 for cat in sorted(self._abortcallback):
551 for cat in sorted(self._abortcallback):
549 self._abortcallback[cat](self)
552 self._abortcallback[cat](self)
550 # Prevent double usage and help clear cycles.
553 # Prevent double usage and help clear cycles.
551 self._abortcallback = None
554 self._abortcallback = None
552 _playback(self.journal, self.report, self.opener, self._vfsmap,
555 _playback(self.journal, self.report, self.opener, self._vfsmap,
553 self.entries, self._backupentries, False)
556 self.entries, self._backupentries, False)
554 self.report(_("rollback completed\n"))
557 self.report(_("rollback completed\n"))
555 except BaseException:
558 except BaseException:
556 self.report(_("rollback failed - please run hg recover\n"))
559 self.report(_("rollback failed - please run hg recover\n"))
557 finally:
560 finally:
558 self.journal = None
561 self.journal = None
559 self.releasefn(self, False) # notify failure of transaction
562 self.releasefn(self, False) # notify failure of transaction
563 self.releasefn = None # Help prevent cycles.
560
564
561 def rollback(opener, vfsmap, file, report):
565 def rollback(opener, vfsmap, file, report):
562 """Rolls back the transaction contained in the given file
566 """Rolls back the transaction contained in the given file
563
567
564 Reads the entries in the specified file, and the corresponding
568 Reads the entries in the specified file, and the corresponding
565 '*.backupfiles' file, to recover from an incomplete transaction.
569 '*.backupfiles' file, to recover from an incomplete transaction.
566
570
567 * `file`: a file containing a list of entries, specifying where
571 * `file`: a file containing a list of entries, specifying where
568 to truncate each file. The file should contain a list of
572 to truncate each file. The file should contain a list of
569 file\0offset pairs, delimited by newlines. The corresponding
573 file\0offset pairs, delimited by newlines. The corresponding
570 '*.backupfiles' file should contain a list of file\0backupfile
574 '*.backupfiles' file should contain a list of file\0backupfile
571 pairs, delimited by \0.
575 pairs, delimited by \0.
572 """
576 """
573 entries = []
577 entries = []
574 backupentries = []
578 backupentries = []
575
579
576 fp = opener.open(file)
580 fp = opener.open(file)
577 lines = fp.readlines()
581 lines = fp.readlines()
578 fp.close()
582 fp.close()
579 for l in lines:
583 for l in lines:
580 try:
584 try:
581 f, o = l.split('\0')
585 f, o = l.split('\0')
582 entries.append((f, int(o), None))
586 entries.append((f, int(o), None))
583 except ValueError:
587 except ValueError:
584 report(_("couldn't read journal entry %r!\n") % l)
588 report(_("couldn't read journal entry %r!\n") % l)
585
589
586 backupjournal = "%s.backupfiles" % file
590 backupjournal = "%s.backupfiles" % file
587 if opener.exists(backupjournal):
591 if opener.exists(backupjournal):
588 fp = opener.open(backupjournal)
592 fp = opener.open(backupjournal)
589 lines = fp.readlines()
593 lines = fp.readlines()
590 if lines:
594 if lines:
591 ver = lines[0][:-1]
595 ver = lines[0][:-1]
592 if ver == str(version):
596 if ver == str(version):
593 for line in lines[1:]:
597 for line in lines[1:]:
594 if line:
598 if line:
595 # Shave off the trailing newline
599 # Shave off the trailing newline
596 line = line[:-1]
600 line = line[:-1]
597 l, f, b, c = line.split('\0')
601 l, f, b, c = line.split('\0')
598 backupentries.append((l, f, b, bool(c)))
602 backupentries.append((l, f, b, bool(c)))
599 else:
603 else:
600 report(_("journal was created by a different version of "
604 report(_("journal was created by a different version of "
601 "Mercurial\n"))
605 "Mercurial\n"))
602
606
603 _playback(file, report, opener, vfsmap, entries, backupentries)
607 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now