##// END OF EJS Templates
style: remove namespace class...
Pierre-Yves David -
r29297:50fef825 default
parent child Browse files
Show More
@@ -1,599 +1,598
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = set([
29 postfinalizegenerators = set([
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 ])
32 ])
33
33
34 class GenerationGroup(object):
34 gengroupall='all'
35 all='all'
35 gengroupprefinalize='prefinalize'
36 prefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
37 postfinalize='postfinalize'
38
37
39 def active(func):
38 def active(func):
40 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
41 if self.count == 0:
40 if self.count == 0:
42 raise error.Abort(_(
41 raise error.Abort(_(
43 'cannot use transaction when it is already committed/aborted'))
42 'cannot use transaction when it is already committed/aborted'))
44 return func(self, *args, **kwds)
43 return func(self, *args, **kwds)
45 return _active
44 return _active
46
45
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 unlink=True):
47 unlink=True):
49 for f, o, _ignore in entries:
48 for f, o, _ignore in entries:
50 if o or not unlink:
49 if o or not unlink:
51 try:
50 try:
52 fp = opener(f, 'a')
51 fp = opener(f, 'a')
53 fp.truncate(o)
52 fp.truncate(o)
54 fp.close()
53 fp.close()
55 except IOError:
54 except IOError:
56 report(_("failed to truncate %s\n") % f)
55 report(_("failed to truncate %s\n") % f)
57 raise
56 raise
58 else:
57 else:
59 try:
58 try:
60 opener.unlink(f)
59 opener.unlink(f)
61 except (IOError, OSError) as inst:
60 except (IOError, OSError) as inst:
62 if inst.errno != errno.ENOENT:
61 if inst.errno != errno.ENOENT:
63 raise
62 raise
64
63
65 backupfiles = []
64 backupfiles = []
66 for l, f, b, c in backupentries:
65 for l, f, b, c in backupentries:
67 if l not in vfsmap and c:
66 if l not in vfsmap and c:
68 report("couldn't handle %s: unknown cache location %s\n"
67 report("couldn't handle %s: unknown cache location %s\n"
69 % (b, l))
68 % (b, l))
70 vfs = vfsmap[l]
69 vfs = vfsmap[l]
71 try:
70 try:
72 if f and b:
71 if f and b:
73 filepath = vfs.join(f)
72 filepath = vfs.join(f)
74 backuppath = vfs.join(b)
73 backuppath = vfs.join(b)
75 try:
74 try:
76 util.copyfile(backuppath, filepath)
75 util.copyfile(backuppath, filepath)
77 backupfiles.append(b)
76 backupfiles.append(b)
78 except IOError:
77 except IOError:
79 report(_("failed to recover %s\n") % f)
78 report(_("failed to recover %s\n") % f)
80 else:
79 else:
81 target = f or b
80 target = f or b
82 try:
81 try:
83 vfs.unlink(target)
82 vfs.unlink(target)
84 except (IOError, OSError) as inst:
83 except (IOError, OSError) as inst:
85 if inst.errno != errno.ENOENT:
84 if inst.errno != errno.ENOENT:
86 raise
85 raise
87 except (IOError, OSError, error.Abort) as inst:
86 except (IOError, OSError, error.Abort) as inst:
88 if not c:
87 if not c:
89 raise
88 raise
90
89
91 backuppath = "%s.backupfiles" % journal
90 backuppath = "%s.backupfiles" % journal
92 if opener.exists(backuppath):
91 if opener.exists(backuppath):
93 opener.unlink(backuppath)
92 opener.unlink(backuppath)
94 opener.unlink(journal)
93 opener.unlink(journal)
95 try:
94 try:
96 for f in backupfiles:
95 for f in backupfiles:
97 if opener.exists(f):
96 if opener.exists(f):
98 opener.unlink(f)
97 opener.unlink(f)
99 except (IOError, OSError, error.Abort) as inst:
98 except (IOError, OSError, error.Abort) as inst:
100 # only pure backup file remains, it is sage to ignore any error
99 # only pure backup file remains, it is sage to ignore any error
101 pass
100 pass
102
101
103 class transaction(object):
102 class transaction(object):
104 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
103 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
105 after=None, createmode=None, validator=None, releasefn=None):
104 after=None, createmode=None, validator=None, releasefn=None):
106 """Begin a new transaction
105 """Begin a new transaction
107
106
108 Begins a new transaction that allows rolling back writes in the event of
107 Begins a new transaction that allows rolling back writes in the event of
109 an exception.
108 an exception.
110
109
111 * `after`: called after the transaction has been committed
110 * `after`: called after the transaction has been committed
112 * `createmode`: the mode of the journal file that will be created
111 * `createmode`: the mode of the journal file that will be created
113 * `releasefn`: called after releasing (with transaction and result)
112 * `releasefn`: called after releasing (with transaction and result)
114 """
113 """
115 self.count = 1
114 self.count = 1
116 self.usages = 1
115 self.usages = 1
117 self.report = report
116 self.report = report
118 # a vfs to the store content
117 # a vfs to the store content
119 self.opener = opener
118 self.opener = opener
120 # a map to access file in various {location -> vfs}
119 # a map to access file in various {location -> vfs}
121 vfsmap = vfsmap.copy()
120 vfsmap = vfsmap.copy()
122 vfsmap[''] = opener # set default value
121 vfsmap[''] = opener # set default value
123 self._vfsmap = vfsmap
122 self._vfsmap = vfsmap
124 self.after = after
123 self.after = after
125 self.entries = []
124 self.entries = []
126 self.map = {}
125 self.map = {}
127 self.journal = journalname
126 self.journal = journalname
128 self.undoname = undoname
127 self.undoname = undoname
129 self._queue = []
128 self._queue = []
130 # A callback to validate transaction content before closing it.
129 # A callback to validate transaction content before closing it.
131 # should raise exception is anything is wrong.
130 # should raise exception is anything is wrong.
132 # target user is repository hooks.
131 # target user is repository hooks.
133 if validator is None:
132 if validator is None:
134 validator = lambda tr: None
133 validator = lambda tr: None
135 self.validator = validator
134 self.validator = validator
136 # A callback to do something just after releasing transaction.
135 # A callback to do something just after releasing transaction.
137 if releasefn is None:
136 if releasefn is None:
138 releasefn = lambda tr, success: None
137 releasefn = lambda tr, success: None
139 self.releasefn = releasefn
138 self.releasefn = releasefn
140
139
141 # a dict of arguments to be passed to hooks
140 # a dict of arguments to be passed to hooks
142 self.hookargs = {}
141 self.hookargs = {}
143 self.file = opener.open(self.journal, "w")
142 self.file = opener.open(self.journal, "w")
144
143
145 # a list of ('location', 'path', 'backuppath', cache) entries.
144 # a list of ('location', 'path', 'backuppath', cache) entries.
146 # - if 'backuppath' is empty, no file existed at backup time
145 # - if 'backuppath' is empty, no file existed at backup time
147 # - if 'path' is empty, this is a temporary transaction file
146 # - if 'path' is empty, this is a temporary transaction file
148 # - if 'location' is not empty, the path is outside main opener reach.
147 # - if 'location' is not empty, the path is outside main opener reach.
149 # use 'location' value as a key in a vfsmap to find the right 'vfs'
148 # use 'location' value as a key in a vfsmap to find the right 'vfs'
150 # (cache is currently unused)
149 # (cache is currently unused)
151 self._backupentries = []
150 self._backupentries = []
152 self._backupmap = {}
151 self._backupmap = {}
153 self._backupjournal = "%s.backupfiles" % self.journal
152 self._backupjournal = "%s.backupfiles" % self.journal
154 self._backupsfile = opener.open(self._backupjournal, 'w')
153 self._backupsfile = opener.open(self._backupjournal, 'w')
155 self._backupsfile.write('%d\n' % version)
154 self._backupsfile.write('%d\n' % version)
156
155
157 if createmode is not None:
156 if createmode is not None:
158 opener.chmod(self.journal, createmode & 0o666)
157 opener.chmod(self.journal, createmode & 0o666)
159 opener.chmod(self._backupjournal, createmode & 0o666)
158 opener.chmod(self._backupjournal, createmode & 0o666)
160
159
161 # hold file generations to be performed on commit
160 # hold file generations to be performed on commit
162 self._filegenerators = {}
161 self._filegenerators = {}
163 # hold callback to write pending data for hooks
162 # hold callback to write pending data for hooks
164 self._pendingcallback = {}
163 self._pendingcallback = {}
165 # True is any pending data have been written ever
164 # True is any pending data have been written ever
166 self._anypending = False
165 self._anypending = False
167 # holds callback to call when writing the transaction
166 # holds callback to call when writing the transaction
168 self._finalizecallback = {}
167 self._finalizecallback = {}
169 # hold callback for post transaction close
168 # hold callback for post transaction close
170 self._postclosecallback = {}
169 self._postclosecallback = {}
171 # holds callbacks to call during abort
170 # holds callbacks to call during abort
172 self._abortcallback = {}
171 self._abortcallback = {}
173
172
174 def __del__(self):
173 def __del__(self):
175 if self.journal:
174 if self.journal:
176 self._abort()
175 self._abort()
177
176
178 @active
177 @active
179 def startgroup(self):
178 def startgroup(self):
180 """delay registration of file entry
179 """delay registration of file entry
181
180
182 This is used by strip to delay vision of strip offset. The transaction
181 This is used by strip to delay vision of strip offset. The transaction
183 sees either none or all of the strip actions to be done."""
182 sees either none or all of the strip actions to be done."""
184 self._queue.append([])
183 self._queue.append([])
185
184
186 @active
185 @active
187 def endgroup(self):
186 def endgroup(self):
188 """apply delayed registration of file entry.
187 """apply delayed registration of file entry.
189
188
190 This is used by strip to delay vision of strip offset. The transaction
189 This is used by strip to delay vision of strip offset. The transaction
191 sees either none or all of the strip actions to be done."""
190 sees either none or all of the strip actions to be done."""
192 q = self._queue.pop()
191 q = self._queue.pop()
193 for f, o, data in q:
192 for f, o, data in q:
194 self._addentry(f, o, data)
193 self._addentry(f, o, data)
195
194
196 @active
195 @active
197 def add(self, file, offset, data=None):
196 def add(self, file, offset, data=None):
198 """record the state of an append-only file before update"""
197 """record the state of an append-only file before update"""
199 if file in self.map or file in self._backupmap:
198 if file in self.map or file in self._backupmap:
200 return
199 return
201 if self._queue:
200 if self._queue:
202 self._queue[-1].append((file, offset, data))
201 self._queue[-1].append((file, offset, data))
203 return
202 return
204
203
205 self._addentry(file, offset, data)
204 self._addentry(file, offset, data)
206
205
207 def _addentry(self, file, offset, data):
206 def _addentry(self, file, offset, data):
208 """add a append-only entry to memory and on-disk state"""
207 """add a append-only entry to memory and on-disk state"""
209 if file in self.map or file in self._backupmap:
208 if file in self.map or file in self._backupmap:
210 return
209 return
211 self.entries.append((file, offset, data))
210 self.entries.append((file, offset, data))
212 self.map[file] = len(self.entries) - 1
211 self.map[file] = len(self.entries) - 1
213 # add enough data to the journal to do the truncate
212 # add enough data to the journal to do the truncate
214 self.file.write("%s\0%d\n" % (file, offset))
213 self.file.write("%s\0%d\n" % (file, offset))
215 self.file.flush()
214 self.file.flush()
216
215
217 @active
216 @active
218 def addbackup(self, file, hardlink=True, location=''):
217 def addbackup(self, file, hardlink=True, location=''):
219 """Adds a backup of the file to the transaction
218 """Adds a backup of the file to the transaction
220
219
221 Calling addbackup() creates a hardlink backup of the specified file
220 Calling addbackup() creates a hardlink backup of the specified file
222 that is used to recover the file in the event of the transaction
221 that is used to recover the file in the event of the transaction
223 aborting.
222 aborting.
224
223
225 * `file`: the file path, relative to .hg/store
224 * `file`: the file path, relative to .hg/store
226 * `hardlink`: use a hardlink to quickly create the backup
225 * `hardlink`: use a hardlink to quickly create the backup
227 """
226 """
228 if self._queue:
227 if self._queue:
229 msg = 'cannot use transaction.addbackup inside "group"'
228 msg = 'cannot use transaction.addbackup inside "group"'
230 raise RuntimeError(msg)
229 raise RuntimeError(msg)
231
230
232 if file in self.map or file in self._backupmap:
231 if file in self.map or file in self._backupmap:
233 return
232 return
234 vfs = self._vfsmap[location]
233 vfs = self._vfsmap[location]
235 dirname, filename = vfs.split(file)
234 dirname, filename = vfs.split(file)
236 backupfilename = "%s.backup.%s" % (self.journal, filename)
235 backupfilename = "%s.backup.%s" % (self.journal, filename)
237 backupfile = vfs.reljoin(dirname, backupfilename)
236 backupfile = vfs.reljoin(dirname, backupfilename)
238 if vfs.exists(file):
237 if vfs.exists(file):
239 filepath = vfs.join(file)
238 filepath = vfs.join(file)
240 backuppath = vfs.join(backupfile)
239 backuppath = vfs.join(backupfile)
241 util.copyfile(filepath, backuppath, hardlink=hardlink)
240 util.copyfile(filepath, backuppath, hardlink=hardlink)
242 else:
241 else:
243 backupfile = ''
242 backupfile = ''
244
243
245 self._addbackupentry((location, file, backupfile, False))
244 self._addbackupentry((location, file, backupfile, False))
246
245
247 def _addbackupentry(self, entry):
246 def _addbackupentry(self, entry):
248 """register a new backup entry and write it to disk"""
247 """register a new backup entry and write it to disk"""
249 self._backupentries.append(entry)
248 self._backupentries.append(entry)
250 self._backupmap[entry[1]] = len(self._backupentries) - 1
249 self._backupmap[entry[1]] = len(self._backupentries) - 1
251 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
250 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
252 self._backupsfile.flush()
251 self._backupsfile.flush()
253
252
254 @active
253 @active
255 def registertmp(self, tmpfile, location=''):
254 def registertmp(self, tmpfile, location=''):
256 """register a temporary transaction file
255 """register a temporary transaction file
257
256
258 Such files will be deleted when the transaction exits (on both
257 Such files will be deleted when the transaction exits (on both
259 failure and success).
258 failure and success).
260 """
259 """
261 self._addbackupentry((location, '', tmpfile, False))
260 self._addbackupentry((location, '', tmpfile, False))
262
261
263 @active
262 @active
264 def addfilegenerator(self, genid, filenames, genfunc, order=0,
263 def addfilegenerator(self, genid, filenames, genfunc, order=0,
265 location=''):
264 location=''):
266 """add a function to generates some files at transaction commit
265 """add a function to generates some files at transaction commit
267
266
268 The `genfunc` argument is a function capable of generating proper
267 The `genfunc` argument is a function capable of generating proper
269 content of each entry in the `filename` tuple.
268 content of each entry in the `filename` tuple.
270
269
271 At transaction close time, `genfunc` will be called with one file
270 At transaction close time, `genfunc` will be called with one file
272 object argument per entries in `filenames`.
271 object argument per entries in `filenames`.
273
272
274 The transaction itself is responsible for the backup, creation and
273 The transaction itself is responsible for the backup, creation and
275 final write of such file.
274 final write of such file.
276
275
277 The `genid` argument is used to ensure the same set of file is only
276 The `genid` argument is used to ensure the same set of file is only
278 generated once. Call to `addfilegenerator` for a `genid` already
277 generated once. Call to `addfilegenerator` for a `genid` already
279 present will overwrite the old entry.
278 present will overwrite the old entry.
280
279
281 The `order` argument may be used to control the order in which multiple
280 The `order` argument may be used to control the order in which multiple
282 generator will be executed.
281 generator will be executed.
283
282
284 The `location` arguments may be used to indicate the files are located
283 The `location` arguments may be used to indicate the files are located
285 outside of the the standard directory for transaction. It should match
284 outside of the the standard directory for transaction. It should match
286 one of the key of the `transaction.vfsmap` dictionary.
285 one of the key of the `transaction.vfsmap` dictionary.
287 """
286 """
288 # For now, we are unable to do proper backup and restore of custom vfs
287 # For now, we are unable to do proper backup and restore of custom vfs
289 # but for bookmarks that are handled outside this mechanism.
288 # but for bookmarks that are handled outside this mechanism.
290 self._filegenerators[genid] = (order, filenames, genfunc, location)
289 self._filegenerators[genid] = (order, filenames, genfunc, location)
291
290
292 def _generatefiles(self, suffix='', group=GenerationGroup.all):
291 def _generatefiles(self, suffix='', group=gengroupall):
293 # write files registered for generation
292 # write files registered for generation
294 any = False
293 any = False
295 for id, entry in sorted(self._filegenerators.iteritems()):
294 for id, entry in sorted(self._filegenerators.iteritems()):
296 any = True
295 any = True
297 order, filenames, genfunc, location = entry
296 order, filenames, genfunc, location = entry
298
297
299 # for generation at closing, check if it's before or after finalize
298 # for generation at closing, check if it's before or after finalize
300 postfinalize = group == GenerationGroup.postfinalize
299 postfinalize = group == gengrouppostfinalize
301 if (group != GenerationGroup.all and
300 if (group != gengroupall and
302 (id in postfinalizegenerators) != (postfinalize)):
301 (id in postfinalizegenerators) != (postfinalize)):
303 continue
302 continue
304
303
305 vfs = self._vfsmap[location]
304 vfs = self._vfsmap[location]
306 files = []
305 files = []
307 try:
306 try:
308 for name in filenames:
307 for name in filenames:
309 name += suffix
308 name += suffix
310 if suffix:
309 if suffix:
311 self.registertmp(name, location=location)
310 self.registertmp(name, location=location)
312 else:
311 else:
313 self.addbackup(name, location=location)
312 self.addbackup(name, location=location)
314 files.append(vfs(name, 'w', atomictemp=True))
313 files.append(vfs(name, 'w', atomictemp=True))
315 genfunc(*files)
314 genfunc(*files)
316 finally:
315 finally:
317 for f in files:
316 for f in files:
318 f.close()
317 f.close()
319 return any
318 return any
320
319
321 @active
320 @active
322 def find(self, file):
321 def find(self, file):
323 if file in self.map:
322 if file in self.map:
324 return self.entries[self.map[file]]
323 return self.entries[self.map[file]]
325 if file in self._backupmap:
324 if file in self._backupmap:
326 return self._backupentries[self._backupmap[file]]
325 return self._backupentries[self._backupmap[file]]
327 return None
326 return None
328
327
329 @active
328 @active
330 def replace(self, file, offset, data=None):
329 def replace(self, file, offset, data=None):
331 '''
330 '''
332 replace can only replace already committed entries
331 replace can only replace already committed entries
333 that are not pending in the queue
332 that are not pending in the queue
334 '''
333 '''
335
334
336 if file not in self.map:
335 if file not in self.map:
337 raise KeyError(file)
336 raise KeyError(file)
338 index = self.map[file]
337 index = self.map[file]
339 self.entries[index] = (file, offset, data)
338 self.entries[index] = (file, offset, data)
340 self.file.write("%s\0%d\n" % (file, offset))
339 self.file.write("%s\0%d\n" % (file, offset))
341 self.file.flush()
340 self.file.flush()
342
341
343 @active
342 @active
344 def nest(self):
343 def nest(self):
345 self.count += 1
344 self.count += 1
346 self.usages += 1
345 self.usages += 1
347 return self
346 return self
348
347
349 def release(self):
348 def release(self):
350 if self.count > 0:
349 if self.count > 0:
351 self.usages -= 1
350 self.usages -= 1
352 # if the transaction scopes are left without being closed, fail
351 # if the transaction scopes are left without being closed, fail
353 if self.count > 0 and self.usages == 0:
352 if self.count > 0 and self.usages == 0:
354 self._abort()
353 self._abort()
355
354
356 def __enter__(self):
355 def __enter__(self):
357 return self
356 return self
358
357
359 def __exit__(self, exc_type, exc_val, exc_tb):
358 def __exit__(self, exc_type, exc_val, exc_tb):
360 try:
359 try:
361 if exc_type is None:
360 if exc_type is None:
362 self.close()
361 self.close()
363 finally:
362 finally:
364 self.release()
363 self.release()
365
364
366 def running(self):
365 def running(self):
367 return self.count > 0
366 return self.count > 0
368
367
369 def addpending(self, category, callback):
368 def addpending(self, category, callback):
370 """add a callback to be called when the transaction is pending
369 """add a callback to be called when the transaction is pending
371
370
372 The transaction will be given as callback's first argument.
371 The transaction will be given as callback's first argument.
373
372
374 Category is a unique identifier to allow overwriting an old callback
373 Category is a unique identifier to allow overwriting an old callback
375 with a newer callback.
374 with a newer callback.
376 """
375 """
377 self._pendingcallback[category] = callback
376 self._pendingcallback[category] = callback
378
377
379 @active
378 @active
380 def writepending(self):
379 def writepending(self):
381 '''write pending file to temporary version
380 '''write pending file to temporary version
382
381
383 This is used to allow hooks to view a transaction before commit'''
382 This is used to allow hooks to view a transaction before commit'''
384 categories = sorted(self._pendingcallback)
383 categories = sorted(self._pendingcallback)
385 for cat in categories:
384 for cat in categories:
386 # remove callback since the data will have been flushed
385 # remove callback since the data will have been flushed
387 any = self._pendingcallback.pop(cat)(self)
386 any = self._pendingcallback.pop(cat)(self)
388 self._anypending = self._anypending or any
387 self._anypending = self._anypending or any
389 self._anypending |= self._generatefiles(suffix='.pending')
388 self._anypending |= self._generatefiles(suffix='.pending')
390 return self._anypending
389 return self._anypending
391
390
392 @active
391 @active
393 def addfinalize(self, category, callback):
392 def addfinalize(self, category, callback):
394 """add a callback to be called when the transaction is closed
393 """add a callback to be called when the transaction is closed
395
394
396 The transaction will be given as callback's first argument.
395 The transaction will be given as callback's first argument.
397
396
398 Category is a unique identifier to allow overwriting old callbacks with
397 Category is a unique identifier to allow overwriting old callbacks with
399 newer callbacks.
398 newer callbacks.
400 """
399 """
401 self._finalizecallback[category] = callback
400 self._finalizecallback[category] = callback
402
401
403 @active
402 @active
404 def addpostclose(self, category, callback):
403 def addpostclose(self, category, callback):
405 """add a callback to be called after the transaction is closed
404 """add a callback to be called after the transaction is closed
406
405
407 The transaction will be given as callback's first argument.
406 The transaction will be given as callback's first argument.
408
407
409 Category is a unique identifier to allow overwriting an old callback
408 Category is a unique identifier to allow overwriting an old callback
410 with a newer callback.
409 with a newer callback.
411 """
410 """
412 self._postclosecallback[category] = callback
411 self._postclosecallback[category] = callback
413
412
414 @active
413 @active
415 def addabort(self, category, callback):
414 def addabort(self, category, callback):
416 """add a callback to be called when the transaction is aborted.
415 """add a callback to be called when the transaction is aborted.
417
416
418 The transaction will be given as the first argument to the callback.
417 The transaction will be given as the first argument to the callback.
419
418
420 Category is a unique identifier to allow overwriting an old callback
419 Category is a unique identifier to allow overwriting an old callback
421 with a newer callback.
420 with a newer callback.
422 """
421 """
423 self._abortcallback[category] = callback
422 self._abortcallback[category] = callback
424
423
425 @active
424 @active
426 def close(self):
425 def close(self):
427 '''commit the transaction'''
426 '''commit the transaction'''
428 if self.count == 1:
427 if self.count == 1:
429 self.validator(self) # will raise exception if needed
428 self.validator(self) # will raise exception if needed
430 self._generatefiles(group=GenerationGroup.prefinalize)
429 self._generatefiles(group=gengroupprefinalize)
431 categories = sorted(self._finalizecallback)
430 categories = sorted(self._finalizecallback)
432 for cat in categories:
431 for cat in categories:
433 self._finalizecallback[cat](self)
432 self._finalizecallback[cat](self)
434 # Prevent double usage and help clear cycles.
433 # Prevent double usage and help clear cycles.
435 self._finalizecallback = None
434 self._finalizecallback = None
436 self._generatefiles(group=GenerationGroup.postfinalize)
435 self._generatefiles(group=gengrouppostfinalize)
437
436
438 self.count -= 1
437 self.count -= 1
439 if self.count != 0:
438 if self.count != 0:
440 return
439 return
441 self.file.close()
440 self.file.close()
442 self._backupsfile.close()
441 self._backupsfile.close()
443 # cleanup temporary files
442 # cleanup temporary files
444 for l, f, b, c in self._backupentries:
443 for l, f, b, c in self._backupentries:
445 if l not in self._vfsmap and c:
444 if l not in self._vfsmap and c:
446 self.report("couldn't remove %s: unknown cache location %s\n"
445 self.report("couldn't remove %s: unknown cache location %s\n"
447 % (b, l))
446 % (b, l))
448 continue
447 continue
449 vfs = self._vfsmap[l]
448 vfs = self._vfsmap[l]
450 if not f and b and vfs.exists(b):
449 if not f and b and vfs.exists(b):
451 try:
450 try:
452 vfs.unlink(b)
451 vfs.unlink(b)
453 except (IOError, OSError, error.Abort) as inst:
452 except (IOError, OSError, error.Abort) as inst:
454 if not c:
453 if not c:
455 raise
454 raise
456 # Abort may be raise by read only opener
455 # Abort may be raise by read only opener
457 self.report("couldn't remove %s: %s\n"
456 self.report("couldn't remove %s: %s\n"
458 % (vfs.join(b), inst))
457 % (vfs.join(b), inst))
459 self.entries = []
458 self.entries = []
460 self._writeundo()
459 self._writeundo()
461 if self.after:
460 if self.after:
462 self.after()
461 self.after()
463 if self.opener.isfile(self._backupjournal):
462 if self.opener.isfile(self._backupjournal):
464 self.opener.unlink(self._backupjournal)
463 self.opener.unlink(self._backupjournal)
465 if self.opener.isfile(self.journal):
464 if self.opener.isfile(self.journal):
466 self.opener.unlink(self.journal)
465 self.opener.unlink(self.journal)
467 for l, _f, b, c in self._backupentries:
466 for l, _f, b, c in self._backupentries:
468 if l not in self._vfsmap and c:
467 if l not in self._vfsmap and c:
469 self.report("couldn't remove %s: unknown cache location"
468 self.report("couldn't remove %s: unknown cache location"
470 "%s\n" % (b, l))
469 "%s\n" % (b, l))
471 continue
470 continue
472 vfs = self._vfsmap[l]
471 vfs = self._vfsmap[l]
473 if b and vfs.exists(b):
472 if b and vfs.exists(b):
474 try:
473 try:
475 vfs.unlink(b)
474 vfs.unlink(b)
476 except (IOError, OSError, error.Abort) as inst:
475 except (IOError, OSError, error.Abort) as inst:
477 if not c:
476 if not c:
478 raise
477 raise
479 # Abort may be raise by read only opener
478 # Abort may be raise by read only opener
480 self.report("couldn't remove %s: %s\n"
479 self.report("couldn't remove %s: %s\n"
481 % (vfs.join(b), inst))
480 % (vfs.join(b), inst))
482 self._backupentries = []
481 self._backupentries = []
483 self.journal = None
482 self.journal = None
484
483
485 self.releasefn(self, True) # notify success of closing transaction
484 self.releasefn(self, True) # notify success of closing transaction
486
485
487 # run post close action
486 # run post close action
488 categories = sorted(self._postclosecallback)
487 categories = sorted(self._postclosecallback)
489 for cat in categories:
488 for cat in categories:
490 self._postclosecallback[cat](self)
489 self._postclosecallback[cat](self)
491 # Prevent double usage and help clear cycles.
490 # Prevent double usage and help clear cycles.
492 self._postclosecallback = None
491 self._postclosecallback = None
493
492
494 @active
493 @active
495 def abort(self):
494 def abort(self):
496 '''abort the transaction (generally called on error, or when the
495 '''abort the transaction (generally called on error, or when the
497 transaction is not explicitly committed before going out of
496 transaction is not explicitly committed before going out of
498 scope)'''
497 scope)'''
499 self._abort()
498 self._abort()
500
499
501 def _writeundo(self):
500 def _writeundo(self):
502 """write transaction data for possible future undo call"""
501 """write transaction data for possible future undo call"""
503 if self.undoname is None:
502 if self.undoname is None:
504 return
503 return
505 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
504 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
506 undobackupfile.write('%d\n' % version)
505 undobackupfile.write('%d\n' % version)
507 for l, f, b, c in self._backupentries:
506 for l, f, b, c in self._backupentries:
508 if not f: # temporary file
507 if not f: # temporary file
509 continue
508 continue
510 if not b:
509 if not b:
511 u = ''
510 u = ''
512 else:
511 else:
513 if l not in self._vfsmap and c:
512 if l not in self._vfsmap and c:
514 self.report("couldn't remove %s: unknown cache location"
513 self.report("couldn't remove %s: unknown cache location"
515 "%s\n" % (b, l))
514 "%s\n" % (b, l))
516 continue
515 continue
517 vfs = self._vfsmap[l]
516 vfs = self._vfsmap[l]
518 base, name = vfs.split(b)
517 base, name = vfs.split(b)
519 assert name.startswith(self.journal), name
518 assert name.startswith(self.journal), name
520 uname = name.replace(self.journal, self.undoname, 1)
519 uname = name.replace(self.journal, self.undoname, 1)
521 u = vfs.reljoin(base, uname)
520 u = vfs.reljoin(base, uname)
522 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
521 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
523 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
522 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
524 undobackupfile.close()
523 undobackupfile.close()
525
524
526
525
527 def _abort(self):
526 def _abort(self):
528 self.count = 0
527 self.count = 0
529 self.usages = 0
528 self.usages = 0
530 self.file.close()
529 self.file.close()
531 self._backupsfile.close()
530 self._backupsfile.close()
532
531
533 try:
532 try:
534 if not self.entries and not self._backupentries:
533 if not self.entries and not self._backupentries:
535 if self._backupjournal:
534 if self._backupjournal:
536 self.opener.unlink(self._backupjournal)
535 self.opener.unlink(self._backupjournal)
537 if self.journal:
536 if self.journal:
538 self.opener.unlink(self.journal)
537 self.opener.unlink(self.journal)
539 return
538 return
540
539
541 self.report(_("transaction abort!\n"))
540 self.report(_("transaction abort!\n"))
542
541
543 try:
542 try:
544 for cat in sorted(self._abortcallback):
543 for cat in sorted(self._abortcallback):
545 self._abortcallback[cat](self)
544 self._abortcallback[cat](self)
546 # Prevent double usage and help clear cycles.
545 # Prevent double usage and help clear cycles.
547 self._abortcallback = None
546 self._abortcallback = None
548 _playback(self.journal, self.report, self.opener, self._vfsmap,
547 _playback(self.journal, self.report, self.opener, self._vfsmap,
549 self.entries, self._backupentries, False)
548 self.entries, self._backupentries, False)
550 self.report(_("rollback completed\n"))
549 self.report(_("rollback completed\n"))
551 except BaseException:
550 except BaseException:
552 self.report(_("rollback failed - please run hg recover\n"))
551 self.report(_("rollback failed - please run hg recover\n"))
553 finally:
552 finally:
554 self.journal = None
553 self.journal = None
555 self.releasefn(self, False) # notify failure of transaction
554 self.releasefn(self, False) # notify failure of transaction
556
555
557 def rollback(opener, vfsmap, file, report):
556 def rollback(opener, vfsmap, file, report):
558 """Rolls back the transaction contained in the given file
557 """Rolls back the transaction contained in the given file
559
558
560 Reads the entries in the specified file, and the corresponding
559 Reads the entries in the specified file, and the corresponding
561 '*.backupfiles' file, to recover from an incomplete transaction.
560 '*.backupfiles' file, to recover from an incomplete transaction.
562
561
563 * `file`: a file containing a list of entries, specifying where
562 * `file`: a file containing a list of entries, specifying where
564 to truncate each file. The file should contain a list of
563 to truncate each file. The file should contain a list of
565 file\0offset pairs, delimited by newlines. The corresponding
564 file\0offset pairs, delimited by newlines. The corresponding
566 '*.backupfiles' file should contain a list of file\0backupfile
565 '*.backupfiles' file should contain a list of file\0backupfile
567 pairs, delimited by \0.
566 pairs, delimited by \0.
568 """
567 """
569 entries = []
568 entries = []
570 backupentries = []
569 backupentries = []
571
570
572 fp = opener.open(file)
571 fp = opener.open(file)
573 lines = fp.readlines()
572 lines = fp.readlines()
574 fp.close()
573 fp.close()
575 for l in lines:
574 for l in lines:
576 try:
575 try:
577 f, o = l.split('\0')
576 f, o = l.split('\0')
578 entries.append((f, int(o), None))
577 entries.append((f, int(o), None))
579 except ValueError:
578 except ValueError:
580 report(_("couldn't read journal entry %r!\n") % l)
579 report(_("couldn't read journal entry %r!\n") % l)
581
580
582 backupjournal = "%s.backupfiles" % file
581 backupjournal = "%s.backupfiles" % file
583 if opener.exists(backupjournal):
582 if opener.exists(backupjournal):
584 fp = opener.open(backupjournal)
583 fp = opener.open(backupjournal)
585 lines = fp.readlines()
584 lines = fp.readlines()
586 if lines:
585 if lines:
587 ver = lines[0][:-1]
586 ver = lines[0][:-1]
588 if ver == str(version):
587 if ver == str(version):
589 for line in lines[1:]:
588 for line in lines[1:]:
590 if line:
589 if line:
591 # Shave off the trailing newline
590 # Shave off the trailing newline
592 line = line[:-1]
591 line = line[:-1]
593 l, f, b, c = line.split('\0')
592 l, f, b, c = line.split('\0')
594 backupentries.append((l, f, b, bool(c)))
593 backupentries.append((l, f, b, bool(c)))
595 else:
594 else:
596 report(_("journal was created by a different version of "
595 report(_("journal was created by a different version of "
597 "Mercurial\n"))
596 "Mercurial\n"))
598
597
599 _playback(file, report, opener, vfsmap, entries, backupentries)
598 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now