##// END OF EJS Templates
transaction: avoid ambiguity of file stat at closing transaction...
FUJIWARA Katsunori -
r29299:76b07a5c default
parent child Browse files
Show More
@@ -1,598 +1,599
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = set([
29 postfinalizegenerators = set([
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 ])
32 ])
33
33
34 gengroupall='all'
34 gengroupall='all'
35 gengroupprefinalize='prefinalize'
35 gengroupprefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
36 gengrouppostfinalize='postfinalize'
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self.count == 0:
40 if self.count == 0:
41 raise error.Abort(_(
41 raise error.Abort(_(
42 'cannot use transaction when it is already committed/aborted'))
42 'cannot use transaction when it is already committed/aborted'))
43 return func(self, *args, **kwds)
43 return func(self, *args, **kwds)
44 return _active
44 return _active
45
45
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 unlink=True):
47 unlink=True):
48 for f, o, _ignore in entries:
48 for f, o, _ignore in entries:
49 if o or not unlink:
49 if o or not unlink:
50 try:
50 try:
51 fp = opener(f, 'a')
51 fp = opener(f, 'a')
52 fp.truncate(o)
52 fp.truncate(o)
53 fp.close()
53 fp.close()
54 except IOError:
54 except IOError:
55 report(_("failed to truncate %s\n") % f)
55 report(_("failed to truncate %s\n") % f)
56 raise
56 raise
57 else:
57 else:
58 try:
58 try:
59 opener.unlink(f)
59 opener.unlink(f)
60 except (IOError, OSError) as inst:
60 except (IOError, OSError) as inst:
61 if inst.errno != errno.ENOENT:
61 if inst.errno != errno.ENOENT:
62 raise
62 raise
63
63
64 backupfiles = []
64 backupfiles = []
65 for l, f, b, c in backupentries:
65 for l, f, b, c in backupentries:
66 if l not in vfsmap and c:
66 if l not in vfsmap and c:
67 report("couldn't handle %s: unknown cache location %s\n"
67 report("couldn't handle %s: unknown cache location %s\n"
68 % (b, l))
68 % (b, l))
69 vfs = vfsmap[l]
69 vfs = vfsmap[l]
70 try:
70 try:
71 if f and b:
71 if f and b:
72 filepath = vfs.join(f)
72 filepath = vfs.join(f)
73 backuppath = vfs.join(b)
73 backuppath = vfs.join(b)
74 try:
74 try:
75 util.copyfile(backuppath, filepath)
75 util.copyfile(backuppath, filepath)
76 backupfiles.append(b)
76 backupfiles.append(b)
77 except IOError:
77 except IOError:
78 report(_("failed to recover %s\n") % f)
78 report(_("failed to recover %s\n") % f)
79 else:
79 else:
80 target = f or b
80 target = f or b
81 try:
81 try:
82 vfs.unlink(target)
82 vfs.unlink(target)
83 except (IOError, OSError) as inst:
83 except (IOError, OSError) as inst:
84 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
85 raise
85 raise
86 except (IOError, OSError, error.Abort) as inst:
86 except (IOError, OSError, error.Abort) as inst:
87 if not c:
87 if not c:
88 raise
88 raise
89
89
90 backuppath = "%s.backupfiles" % journal
90 backuppath = "%s.backupfiles" % journal
91 if opener.exists(backuppath):
91 if opener.exists(backuppath):
92 opener.unlink(backuppath)
92 opener.unlink(backuppath)
93 opener.unlink(journal)
93 opener.unlink(journal)
94 try:
94 try:
95 for f in backupfiles:
95 for f in backupfiles:
96 if opener.exists(f):
96 if opener.exists(f):
97 opener.unlink(f)
97 opener.unlink(f)
98 except (IOError, OSError, error.Abort) as inst:
98 except (IOError, OSError, error.Abort) as inst:
99 # only pure backup file remains, it is sage to ignore any error
99 # only pure backup file remains, it is sage to ignore any error
100 pass
100 pass
101
101
102 class transaction(object):
102 class transaction(object):
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
104 after=None, createmode=None, validator=None, releasefn=None):
104 after=None, createmode=None, validator=None, releasefn=None):
105 """Begin a new transaction
105 """Begin a new transaction
106
106
107 Begins a new transaction that allows rolling back writes in the event of
107 Begins a new transaction that allows rolling back writes in the event of
108 an exception.
108 an exception.
109
109
110 * `after`: called after the transaction has been committed
110 * `after`: called after the transaction has been committed
111 * `createmode`: the mode of the journal file that will be created
111 * `createmode`: the mode of the journal file that will be created
112 * `releasefn`: called after releasing (with transaction and result)
112 * `releasefn`: called after releasing (with transaction and result)
113 """
113 """
114 self.count = 1
114 self.count = 1
115 self.usages = 1
115 self.usages = 1
116 self.report = report
116 self.report = report
117 # a vfs to the store content
117 # a vfs to the store content
118 self.opener = opener
118 self.opener = opener
119 # a map to access file in various {location -> vfs}
119 # a map to access file in various {location -> vfs}
120 vfsmap = vfsmap.copy()
120 vfsmap = vfsmap.copy()
121 vfsmap[''] = opener # set default value
121 vfsmap[''] = opener # set default value
122 self._vfsmap = vfsmap
122 self._vfsmap = vfsmap
123 self.after = after
123 self.after = after
124 self.entries = []
124 self.entries = []
125 self.map = {}
125 self.map = {}
126 self.journal = journalname
126 self.journal = journalname
127 self.undoname = undoname
127 self.undoname = undoname
128 self._queue = []
128 self._queue = []
129 # A callback to validate transaction content before closing it.
129 # A callback to validate transaction content before closing it.
130 # should raise exception is anything is wrong.
130 # should raise exception is anything is wrong.
131 # target user is repository hooks.
131 # target user is repository hooks.
132 if validator is None:
132 if validator is None:
133 validator = lambda tr: None
133 validator = lambda tr: None
134 self.validator = validator
134 self.validator = validator
135 # A callback to do something just after releasing transaction.
135 # A callback to do something just after releasing transaction.
136 if releasefn is None:
136 if releasefn is None:
137 releasefn = lambda tr, success: None
137 releasefn = lambda tr, success: None
138 self.releasefn = releasefn
138 self.releasefn = releasefn
139
139
140 # a dict of arguments to be passed to hooks
140 # a dict of arguments to be passed to hooks
141 self.hookargs = {}
141 self.hookargs = {}
142 self.file = opener.open(self.journal, "w")
142 self.file = opener.open(self.journal, "w")
143
143
144 # a list of ('location', 'path', 'backuppath', cache) entries.
144 # a list of ('location', 'path', 'backuppath', cache) entries.
145 # - if 'backuppath' is empty, no file existed at backup time
145 # - if 'backuppath' is empty, no file existed at backup time
146 # - if 'path' is empty, this is a temporary transaction file
146 # - if 'path' is empty, this is a temporary transaction file
147 # - if 'location' is not empty, the path is outside main opener reach.
147 # - if 'location' is not empty, the path is outside main opener reach.
148 # use 'location' value as a key in a vfsmap to find the right 'vfs'
148 # use 'location' value as a key in a vfsmap to find the right 'vfs'
149 # (cache is currently unused)
149 # (cache is currently unused)
150 self._backupentries = []
150 self._backupentries = []
151 self._backupmap = {}
151 self._backupmap = {}
152 self._backupjournal = "%s.backupfiles" % self.journal
152 self._backupjournal = "%s.backupfiles" % self.journal
153 self._backupsfile = opener.open(self._backupjournal, 'w')
153 self._backupsfile = opener.open(self._backupjournal, 'w')
154 self._backupsfile.write('%d\n' % version)
154 self._backupsfile.write('%d\n' % version)
155
155
156 if createmode is not None:
156 if createmode is not None:
157 opener.chmod(self.journal, createmode & 0o666)
157 opener.chmod(self.journal, createmode & 0o666)
158 opener.chmod(self._backupjournal, createmode & 0o666)
158 opener.chmod(self._backupjournal, createmode & 0o666)
159
159
160 # hold file generations to be performed on commit
160 # hold file generations to be performed on commit
161 self._filegenerators = {}
161 self._filegenerators = {}
162 # hold callback to write pending data for hooks
162 # hold callback to write pending data for hooks
163 self._pendingcallback = {}
163 self._pendingcallback = {}
164 # True is any pending data have been written ever
164 # True is any pending data have been written ever
165 self._anypending = False
165 self._anypending = False
166 # holds callback to call when writing the transaction
166 # holds callback to call when writing the transaction
167 self._finalizecallback = {}
167 self._finalizecallback = {}
168 # hold callback for post transaction close
168 # hold callback for post transaction close
169 self._postclosecallback = {}
169 self._postclosecallback = {}
170 # holds callbacks to call during abort
170 # holds callbacks to call during abort
171 self._abortcallback = {}
171 self._abortcallback = {}
172
172
173 def __del__(self):
173 def __del__(self):
174 if self.journal:
174 if self.journal:
175 self._abort()
175 self._abort()
176
176
177 @active
177 @active
178 def startgroup(self):
178 def startgroup(self):
179 """delay registration of file entry
179 """delay registration of file entry
180
180
181 This is used by strip to delay vision of strip offset. The transaction
181 This is used by strip to delay vision of strip offset. The transaction
182 sees either none or all of the strip actions to be done."""
182 sees either none or all of the strip actions to be done."""
183 self._queue.append([])
183 self._queue.append([])
184
184
185 @active
185 @active
186 def endgroup(self):
186 def endgroup(self):
187 """apply delayed registration of file entry.
187 """apply delayed registration of file entry.
188
188
189 This is used by strip to delay vision of strip offset. The transaction
189 This is used by strip to delay vision of strip offset. The transaction
190 sees either none or all of the strip actions to be done."""
190 sees either none or all of the strip actions to be done."""
191 q = self._queue.pop()
191 q = self._queue.pop()
192 for f, o, data in q:
192 for f, o, data in q:
193 self._addentry(f, o, data)
193 self._addentry(f, o, data)
194
194
195 @active
195 @active
196 def add(self, file, offset, data=None):
196 def add(self, file, offset, data=None):
197 """record the state of an append-only file before update"""
197 """record the state of an append-only file before update"""
198 if file in self.map or file in self._backupmap:
198 if file in self.map or file in self._backupmap:
199 return
199 return
200 if self._queue:
200 if self._queue:
201 self._queue[-1].append((file, offset, data))
201 self._queue[-1].append((file, offset, data))
202 return
202 return
203
203
204 self._addentry(file, offset, data)
204 self._addentry(file, offset, data)
205
205
206 def _addentry(self, file, offset, data):
206 def _addentry(self, file, offset, data):
207 """add a append-only entry to memory and on-disk state"""
207 """add a append-only entry to memory and on-disk state"""
208 if file in self.map or file in self._backupmap:
208 if file in self.map or file in self._backupmap:
209 return
209 return
210 self.entries.append((file, offset, data))
210 self.entries.append((file, offset, data))
211 self.map[file] = len(self.entries) - 1
211 self.map[file] = len(self.entries) - 1
212 # add enough data to the journal to do the truncate
212 # add enough data to the journal to do the truncate
213 self.file.write("%s\0%d\n" % (file, offset))
213 self.file.write("%s\0%d\n" % (file, offset))
214 self.file.flush()
214 self.file.flush()
215
215
216 @active
216 @active
217 def addbackup(self, file, hardlink=True, location=''):
217 def addbackup(self, file, hardlink=True, location=''):
218 """Adds a backup of the file to the transaction
218 """Adds a backup of the file to the transaction
219
219
220 Calling addbackup() creates a hardlink backup of the specified file
220 Calling addbackup() creates a hardlink backup of the specified file
221 that is used to recover the file in the event of the transaction
221 that is used to recover the file in the event of the transaction
222 aborting.
222 aborting.
223
223
224 * `file`: the file path, relative to .hg/store
224 * `file`: the file path, relative to .hg/store
225 * `hardlink`: use a hardlink to quickly create the backup
225 * `hardlink`: use a hardlink to quickly create the backup
226 """
226 """
227 if self._queue:
227 if self._queue:
228 msg = 'cannot use transaction.addbackup inside "group"'
228 msg = 'cannot use transaction.addbackup inside "group"'
229 raise RuntimeError(msg)
229 raise RuntimeError(msg)
230
230
231 if file in self.map or file in self._backupmap:
231 if file in self.map or file in self._backupmap:
232 return
232 return
233 vfs = self._vfsmap[location]
233 vfs = self._vfsmap[location]
234 dirname, filename = vfs.split(file)
234 dirname, filename = vfs.split(file)
235 backupfilename = "%s.backup.%s" % (self.journal, filename)
235 backupfilename = "%s.backup.%s" % (self.journal, filename)
236 backupfile = vfs.reljoin(dirname, backupfilename)
236 backupfile = vfs.reljoin(dirname, backupfilename)
237 if vfs.exists(file):
237 if vfs.exists(file):
238 filepath = vfs.join(file)
238 filepath = vfs.join(file)
239 backuppath = vfs.join(backupfile)
239 backuppath = vfs.join(backupfile)
240 util.copyfile(filepath, backuppath, hardlink=hardlink)
240 util.copyfile(filepath, backuppath, hardlink=hardlink)
241 else:
241 else:
242 backupfile = ''
242 backupfile = ''
243
243
244 self._addbackupentry((location, file, backupfile, False))
244 self._addbackupentry((location, file, backupfile, False))
245
245
246 def _addbackupentry(self, entry):
246 def _addbackupentry(self, entry):
247 """register a new backup entry and write it to disk"""
247 """register a new backup entry and write it to disk"""
248 self._backupentries.append(entry)
248 self._backupentries.append(entry)
249 self._backupmap[entry[1]] = len(self._backupentries) - 1
249 self._backupmap[entry[1]] = len(self._backupentries) - 1
250 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
250 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
251 self._backupsfile.flush()
251 self._backupsfile.flush()
252
252
253 @active
253 @active
254 def registertmp(self, tmpfile, location=''):
254 def registertmp(self, tmpfile, location=''):
255 """register a temporary transaction file
255 """register a temporary transaction file
256
256
257 Such files will be deleted when the transaction exits (on both
257 Such files will be deleted when the transaction exits (on both
258 failure and success).
258 failure and success).
259 """
259 """
260 self._addbackupentry((location, '', tmpfile, False))
260 self._addbackupentry((location, '', tmpfile, False))
261
261
262 @active
262 @active
263 def addfilegenerator(self, genid, filenames, genfunc, order=0,
263 def addfilegenerator(self, genid, filenames, genfunc, order=0,
264 location=''):
264 location=''):
265 """add a function to generates some files at transaction commit
265 """add a function to generates some files at transaction commit
266
266
267 The `genfunc` argument is a function capable of generating proper
267 The `genfunc` argument is a function capable of generating proper
268 content of each entry in the `filename` tuple.
268 content of each entry in the `filename` tuple.
269
269
270 At transaction close time, `genfunc` will be called with one file
270 At transaction close time, `genfunc` will be called with one file
271 object argument per entries in `filenames`.
271 object argument per entries in `filenames`.
272
272
273 The transaction itself is responsible for the backup, creation and
273 The transaction itself is responsible for the backup, creation and
274 final write of such file.
274 final write of such file.
275
275
276 The `genid` argument is used to ensure the same set of file is only
276 The `genid` argument is used to ensure the same set of file is only
277 generated once. Call to `addfilegenerator` for a `genid` already
277 generated once. Call to `addfilegenerator` for a `genid` already
278 present will overwrite the old entry.
278 present will overwrite the old entry.
279
279
280 The `order` argument may be used to control the order in which multiple
280 The `order` argument may be used to control the order in which multiple
281 generator will be executed.
281 generator will be executed.
282
282
283 The `location` arguments may be used to indicate the files are located
283 The `location` arguments may be used to indicate the files are located
284 outside of the the standard directory for transaction. It should match
284 outside of the the standard directory for transaction. It should match
285 one of the key of the `transaction.vfsmap` dictionary.
285 one of the key of the `transaction.vfsmap` dictionary.
286 """
286 """
287 # For now, we are unable to do proper backup and restore of custom vfs
287 # For now, we are unable to do proper backup and restore of custom vfs
288 # but for bookmarks that are handled outside this mechanism.
288 # but for bookmarks that are handled outside this mechanism.
289 self._filegenerators[genid] = (order, filenames, genfunc, location)
289 self._filegenerators[genid] = (order, filenames, genfunc, location)
290
290
291 def _generatefiles(self, suffix='', group=gengroupall):
291 def _generatefiles(self, suffix='', group=gengroupall):
292 # write files registered for generation
292 # write files registered for generation
293 any = False
293 any = False
294 for id, entry in sorted(self._filegenerators.iteritems()):
294 for id, entry in sorted(self._filegenerators.iteritems()):
295 any = True
295 any = True
296 order, filenames, genfunc, location = entry
296 order, filenames, genfunc, location = entry
297
297
298 # for generation at closing, check if it's before or after finalize
298 # for generation at closing, check if it's before or after finalize
299 postfinalize = group == gengrouppostfinalize
299 postfinalize = group == gengrouppostfinalize
300 if (group != gengroupall and
300 if (group != gengroupall and
301 (id in postfinalizegenerators) != (postfinalize)):
301 (id in postfinalizegenerators) != (postfinalize)):
302 continue
302 continue
303
303
304 vfs = self._vfsmap[location]
304 vfs = self._vfsmap[location]
305 files = []
305 files = []
306 try:
306 try:
307 for name in filenames:
307 for name in filenames:
308 name += suffix
308 name += suffix
309 if suffix:
309 if suffix:
310 self.registertmp(name, location=location)
310 self.registertmp(name, location=location)
311 else:
311 else:
312 self.addbackup(name, location=location)
312 self.addbackup(name, location=location)
313 files.append(vfs(name, 'w', atomictemp=True))
313 files.append(vfs(name, 'w', atomictemp=True,
314 checkambig=not suffix))
314 genfunc(*files)
315 genfunc(*files)
315 finally:
316 finally:
316 for f in files:
317 for f in files:
317 f.close()
318 f.close()
318 return any
319 return any
319
320
320 @active
321 @active
321 def find(self, file):
322 def find(self, file):
322 if file in self.map:
323 if file in self.map:
323 return self.entries[self.map[file]]
324 return self.entries[self.map[file]]
324 if file in self._backupmap:
325 if file in self._backupmap:
325 return self._backupentries[self._backupmap[file]]
326 return self._backupentries[self._backupmap[file]]
326 return None
327 return None
327
328
328 @active
329 @active
329 def replace(self, file, offset, data=None):
330 def replace(self, file, offset, data=None):
330 '''
331 '''
331 replace can only replace already committed entries
332 replace can only replace already committed entries
332 that are not pending in the queue
333 that are not pending in the queue
333 '''
334 '''
334
335
335 if file not in self.map:
336 if file not in self.map:
336 raise KeyError(file)
337 raise KeyError(file)
337 index = self.map[file]
338 index = self.map[file]
338 self.entries[index] = (file, offset, data)
339 self.entries[index] = (file, offset, data)
339 self.file.write("%s\0%d\n" % (file, offset))
340 self.file.write("%s\0%d\n" % (file, offset))
340 self.file.flush()
341 self.file.flush()
341
342
342 @active
343 @active
343 def nest(self):
344 def nest(self):
344 self.count += 1
345 self.count += 1
345 self.usages += 1
346 self.usages += 1
346 return self
347 return self
347
348
348 def release(self):
349 def release(self):
349 if self.count > 0:
350 if self.count > 0:
350 self.usages -= 1
351 self.usages -= 1
351 # if the transaction scopes are left without being closed, fail
352 # if the transaction scopes are left without being closed, fail
352 if self.count > 0 and self.usages == 0:
353 if self.count > 0 and self.usages == 0:
353 self._abort()
354 self._abort()
354
355
355 def __enter__(self):
356 def __enter__(self):
356 return self
357 return self
357
358
358 def __exit__(self, exc_type, exc_val, exc_tb):
359 def __exit__(self, exc_type, exc_val, exc_tb):
359 try:
360 try:
360 if exc_type is None:
361 if exc_type is None:
361 self.close()
362 self.close()
362 finally:
363 finally:
363 self.release()
364 self.release()
364
365
365 def running(self):
366 def running(self):
366 return self.count > 0
367 return self.count > 0
367
368
368 def addpending(self, category, callback):
369 def addpending(self, category, callback):
369 """add a callback to be called when the transaction is pending
370 """add a callback to be called when the transaction is pending
370
371
371 The transaction will be given as callback's first argument.
372 The transaction will be given as callback's first argument.
372
373
373 Category is a unique identifier to allow overwriting an old callback
374 Category is a unique identifier to allow overwriting an old callback
374 with a newer callback.
375 with a newer callback.
375 """
376 """
376 self._pendingcallback[category] = callback
377 self._pendingcallback[category] = callback
377
378
378 @active
379 @active
379 def writepending(self):
380 def writepending(self):
380 '''write pending file to temporary version
381 '''write pending file to temporary version
381
382
382 This is used to allow hooks to view a transaction before commit'''
383 This is used to allow hooks to view a transaction before commit'''
383 categories = sorted(self._pendingcallback)
384 categories = sorted(self._pendingcallback)
384 for cat in categories:
385 for cat in categories:
385 # remove callback since the data will have been flushed
386 # remove callback since the data will have been flushed
386 any = self._pendingcallback.pop(cat)(self)
387 any = self._pendingcallback.pop(cat)(self)
387 self._anypending = self._anypending or any
388 self._anypending = self._anypending or any
388 self._anypending |= self._generatefiles(suffix='.pending')
389 self._anypending |= self._generatefiles(suffix='.pending')
389 return self._anypending
390 return self._anypending
390
391
391 @active
392 @active
392 def addfinalize(self, category, callback):
393 def addfinalize(self, category, callback):
393 """add a callback to be called when the transaction is closed
394 """add a callback to be called when the transaction is closed
394
395
395 The transaction will be given as callback's first argument.
396 The transaction will be given as callback's first argument.
396
397
397 Category is a unique identifier to allow overwriting old callbacks with
398 Category is a unique identifier to allow overwriting old callbacks with
398 newer callbacks.
399 newer callbacks.
399 """
400 """
400 self._finalizecallback[category] = callback
401 self._finalizecallback[category] = callback
401
402
402 @active
403 @active
403 def addpostclose(self, category, callback):
404 def addpostclose(self, category, callback):
404 """add a callback to be called after the transaction is closed
405 """add a callback to be called after the transaction is closed
405
406
406 The transaction will be given as callback's first argument.
407 The transaction will be given as callback's first argument.
407
408
408 Category is a unique identifier to allow overwriting an old callback
409 Category is a unique identifier to allow overwriting an old callback
409 with a newer callback.
410 with a newer callback.
410 """
411 """
411 self._postclosecallback[category] = callback
412 self._postclosecallback[category] = callback
412
413
413 @active
414 @active
414 def addabort(self, category, callback):
415 def addabort(self, category, callback):
415 """add a callback to be called when the transaction is aborted.
416 """add a callback to be called when the transaction is aborted.
416
417
417 The transaction will be given as the first argument to the callback.
418 The transaction will be given as the first argument to the callback.
418
419
419 Category is a unique identifier to allow overwriting an old callback
420 Category is a unique identifier to allow overwriting an old callback
420 with a newer callback.
421 with a newer callback.
421 """
422 """
422 self._abortcallback[category] = callback
423 self._abortcallback[category] = callback
423
424
424 @active
425 @active
425 def close(self):
426 def close(self):
426 '''commit the transaction'''
427 '''commit the transaction'''
427 if self.count == 1:
428 if self.count == 1:
428 self.validator(self) # will raise exception if needed
429 self.validator(self) # will raise exception if needed
429 self._generatefiles(group=gengroupprefinalize)
430 self._generatefiles(group=gengroupprefinalize)
430 categories = sorted(self._finalizecallback)
431 categories = sorted(self._finalizecallback)
431 for cat in categories:
432 for cat in categories:
432 self._finalizecallback[cat](self)
433 self._finalizecallback[cat](self)
433 # Prevent double usage and help clear cycles.
434 # Prevent double usage and help clear cycles.
434 self._finalizecallback = None
435 self._finalizecallback = None
435 self._generatefiles(group=gengrouppostfinalize)
436 self._generatefiles(group=gengrouppostfinalize)
436
437
437 self.count -= 1
438 self.count -= 1
438 if self.count != 0:
439 if self.count != 0:
439 return
440 return
440 self.file.close()
441 self.file.close()
441 self._backupsfile.close()
442 self._backupsfile.close()
442 # cleanup temporary files
443 # cleanup temporary files
443 for l, f, b, c in self._backupentries:
444 for l, f, b, c in self._backupentries:
444 if l not in self._vfsmap and c:
445 if l not in self._vfsmap and c:
445 self.report("couldn't remove %s: unknown cache location %s\n"
446 self.report("couldn't remove %s: unknown cache location %s\n"
446 % (b, l))
447 % (b, l))
447 continue
448 continue
448 vfs = self._vfsmap[l]
449 vfs = self._vfsmap[l]
449 if not f and b and vfs.exists(b):
450 if not f and b and vfs.exists(b):
450 try:
451 try:
451 vfs.unlink(b)
452 vfs.unlink(b)
452 except (IOError, OSError, error.Abort) as inst:
453 except (IOError, OSError, error.Abort) as inst:
453 if not c:
454 if not c:
454 raise
455 raise
455 # Abort may be raise by read only opener
456 # Abort may be raise by read only opener
456 self.report("couldn't remove %s: %s\n"
457 self.report("couldn't remove %s: %s\n"
457 % (vfs.join(b), inst))
458 % (vfs.join(b), inst))
458 self.entries = []
459 self.entries = []
459 self._writeundo()
460 self._writeundo()
460 if self.after:
461 if self.after:
461 self.after()
462 self.after()
462 if self.opener.isfile(self._backupjournal):
463 if self.opener.isfile(self._backupjournal):
463 self.opener.unlink(self._backupjournal)
464 self.opener.unlink(self._backupjournal)
464 if self.opener.isfile(self.journal):
465 if self.opener.isfile(self.journal):
465 self.opener.unlink(self.journal)
466 self.opener.unlink(self.journal)
466 for l, _f, b, c in self._backupentries:
467 for l, _f, b, c in self._backupentries:
467 if l not in self._vfsmap and c:
468 if l not in self._vfsmap and c:
468 self.report("couldn't remove %s: unknown cache location"
469 self.report("couldn't remove %s: unknown cache location"
469 "%s\n" % (b, l))
470 "%s\n" % (b, l))
470 continue
471 continue
471 vfs = self._vfsmap[l]
472 vfs = self._vfsmap[l]
472 if b and vfs.exists(b):
473 if b and vfs.exists(b):
473 try:
474 try:
474 vfs.unlink(b)
475 vfs.unlink(b)
475 except (IOError, OSError, error.Abort) as inst:
476 except (IOError, OSError, error.Abort) as inst:
476 if not c:
477 if not c:
477 raise
478 raise
478 # Abort may be raise by read only opener
479 # Abort may be raise by read only opener
479 self.report("couldn't remove %s: %s\n"
480 self.report("couldn't remove %s: %s\n"
480 % (vfs.join(b), inst))
481 % (vfs.join(b), inst))
481 self._backupentries = []
482 self._backupentries = []
482 self.journal = None
483 self.journal = None
483
484
484 self.releasefn(self, True) # notify success of closing transaction
485 self.releasefn(self, True) # notify success of closing transaction
485
486
486 # run post close action
487 # run post close action
487 categories = sorted(self._postclosecallback)
488 categories = sorted(self._postclosecallback)
488 for cat in categories:
489 for cat in categories:
489 self._postclosecallback[cat](self)
490 self._postclosecallback[cat](self)
490 # Prevent double usage and help clear cycles.
491 # Prevent double usage and help clear cycles.
491 self._postclosecallback = None
492 self._postclosecallback = None
492
493
493 @active
494 @active
494 def abort(self):
495 def abort(self):
495 '''abort the transaction (generally called on error, or when the
496 '''abort the transaction (generally called on error, or when the
496 transaction is not explicitly committed before going out of
497 transaction is not explicitly committed before going out of
497 scope)'''
498 scope)'''
498 self._abort()
499 self._abort()
499
500
500 def _writeundo(self):
501 def _writeundo(self):
501 """write transaction data for possible future undo call"""
502 """write transaction data for possible future undo call"""
502 if self.undoname is None:
503 if self.undoname is None:
503 return
504 return
504 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
505 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
505 undobackupfile.write('%d\n' % version)
506 undobackupfile.write('%d\n' % version)
506 for l, f, b, c in self._backupentries:
507 for l, f, b, c in self._backupentries:
507 if not f: # temporary file
508 if not f: # temporary file
508 continue
509 continue
509 if not b:
510 if not b:
510 u = ''
511 u = ''
511 else:
512 else:
512 if l not in self._vfsmap and c:
513 if l not in self._vfsmap and c:
513 self.report("couldn't remove %s: unknown cache location"
514 self.report("couldn't remove %s: unknown cache location"
514 "%s\n" % (b, l))
515 "%s\n" % (b, l))
515 continue
516 continue
516 vfs = self._vfsmap[l]
517 vfs = self._vfsmap[l]
517 base, name = vfs.split(b)
518 base, name = vfs.split(b)
518 assert name.startswith(self.journal), name
519 assert name.startswith(self.journal), name
519 uname = name.replace(self.journal, self.undoname, 1)
520 uname = name.replace(self.journal, self.undoname, 1)
520 u = vfs.reljoin(base, uname)
521 u = vfs.reljoin(base, uname)
521 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
522 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
522 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
523 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
523 undobackupfile.close()
524 undobackupfile.close()
524
525
525
526
526 def _abort(self):
527 def _abort(self):
527 self.count = 0
528 self.count = 0
528 self.usages = 0
529 self.usages = 0
529 self.file.close()
530 self.file.close()
530 self._backupsfile.close()
531 self._backupsfile.close()
531
532
532 try:
533 try:
533 if not self.entries and not self._backupentries:
534 if not self.entries and not self._backupentries:
534 if self._backupjournal:
535 if self._backupjournal:
535 self.opener.unlink(self._backupjournal)
536 self.opener.unlink(self._backupjournal)
536 if self.journal:
537 if self.journal:
537 self.opener.unlink(self.journal)
538 self.opener.unlink(self.journal)
538 return
539 return
539
540
540 self.report(_("transaction abort!\n"))
541 self.report(_("transaction abort!\n"))
541
542
542 try:
543 try:
543 for cat in sorted(self._abortcallback):
544 for cat in sorted(self._abortcallback):
544 self._abortcallback[cat](self)
545 self._abortcallback[cat](self)
545 # Prevent double usage and help clear cycles.
546 # Prevent double usage and help clear cycles.
546 self._abortcallback = None
547 self._abortcallback = None
547 _playback(self.journal, self.report, self.opener, self._vfsmap,
548 _playback(self.journal, self.report, self.opener, self._vfsmap,
548 self.entries, self._backupentries, False)
549 self.entries, self._backupentries, False)
549 self.report(_("rollback completed\n"))
550 self.report(_("rollback completed\n"))
550 except BaseException:
551 except BaseException:
551 self.report(_("rollback failed - please run hg recover\n"))
552 self.report(_("rollback failed - please run hg recover\n"))
552 finally:
553 finally:
553 self.journal = None
554 self.journal = None
554 self.releasefn(self, False) # notify failure of transaction
555 self.releasefn(self, False) # notify failure of transaction
555
556
556 def rollback(opener, vfsmap, file, report):
557 def rollback(opener, vfsmap, file, report):
557 """Rolls back the transaction contained in the given file
558 """Rolls back the transaction contained in the given file
558
559
559 Reads the entries in the specified file, and the corresponding
560 Reads the entries in the specified file, and the corresponding
560 '*.backupfiles' file, to recover from an incomplete transaction.
561 '*.backupfiles' file, to recover from an incomplete transaction.
561
562
562 * `file`: a file containing a list of entries, specifying where
563 * `file`: a file containing a list of entries, specifying where
563 to truncate each file. The file should contain a list of
564 to truncate each file. The file should contain a list of
564 file\0offset pairs, delimited by newlines. The corresponding
565 file\0offset pairs, delimited by newlines. The corresponding
565 '*.backupfiles' file should contain a list of file\0backupfile
566 '*.backupfiles' file should contain a list of file\0backupfile
566 pairs, delimited by \0.
567 pairs, delimited by \0.
567 """
568 """
568 entries = []
569 entries = []
569 backupentries = []
570 backupentries = []
570
571
571 fp = opener.open(file)
572 fp = opener.open(file)
572 lines = fp.readlines()
573 lines = fp.readlines()
573 fp.close()
574 fp.close()
574 for l in lines:
575 for l in lines:
575 try:
576 try:
576 f, o = l.split('\0')
577 f, o = l.split('\0')
577 entries.append((f, int(o), None))
578 entries.append((f, int(o), None))
578 except ValueError:
579 except ValueError:
579 report(_("couldn't read journal entry %r!\n") % l)
580 report(_("couldn't read journal entry %r!\n") % l)
580
581
581 backupjournal = "%s.backupfiles" % file
582 backupjournal = "%s.backupfiles" % file
582 if opener.exists(backupjournal):
583 if opener.exists(backupjournal):
583 fp = opener.open(backupjournal)
584 fp = opener.open(backupjournal)
584 lines = fp.readlines()
585 lines = fp.readlines()
585 if lines:
586 if lines:
586 ver = lines[0][:-1]
587 ver = lines[0][:-1]
587 if ver == str(version):
588 if ver == str(version):
588 for line in lines[1:]:
589 for line in lines[1:]:
589 if line:
590 if line:
590 # Shave off the trailing newline
591 # Shave off the trailing newline
591 line = line[:-1]
592 line = line[:-1]
592 l, f, b, c = line.split('\0')
593 l, f, b, c = line.split('\0')
593 backupentries.append((l, f, b, bool(c)))
594 backupentries.append((l, f, b, bool(c)))
594 else:
595 else:
595 report(_("journal was created by a different version of "
596 report(_("journal was created by a different version of "
596 "Mercurial\n"))
597 "Mercurial\n"))
597
598
598 _playback(file, report, opener, vfsmap, entries, backupentries)
599 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now