##// END OF EJS Templates
transaction: allow finalizer to add finalizer...
marmoute -
r44556:2f1d6180 default
parent child Browse files
Show More
@@ -1,718 +1,721 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 gengroupall = b'all'
33 gengroupall = b'all'
34 gengroupprefinalize = b'prefinalize'
34 gengroupprefinalize = b'prefinalize'
35 gengrouppostfinalize = b'postfinalize'
35 gengrouppostfinalize = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.Abort(
41 raise error.Abort(
42 _(
42 _(
43 b'cannot use transaction when it is already committed/aborted'
43 b'cannot use transaction when it is already committed/aborted'
44 )
44 )
45 )
45 )
46 return func(self, *args, **kwds)
46 return func(self, *args, **kwds)
47
47
48 return _active
48 return _active
49
49
50
50
51 def _playback(
51 def _playback(
52 journal,
52 journal,
53 report,
53 report,
54 opener,
54 opener,
55 vfsmap,
55 vfsmap,
56 entries,
56 entries,
57 backupentries,
57 backupentries,
58 unlink=True,
58 unlink=True,
59 checkambigfiles=None,
59 checkambigfiles=None,
60 ):
60 ):
61 for f, o, _ignore in entries:
61 for f, o, _ignore in entries:
62 if o or not unlink:
62 if o or not unlink:
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 try:
64 try:
65 fp = opener(f, b'a', checkambig=checkambig)
65 fp = opener(f, b'a', checkambig=checkambig)
66 if fp.tell() < o:
66 if fp.tell() < o:
67 raise error.Abort(
67 raise error.Abort(
68 _(
68 _(
69 b"attempted to truncate %s to %d bytes, but it was "
69 b"attempted to truncate %s to %d bytes, but it was "
70 b"already %d bytes\n"
70 b"already %d bytes\n"
71 )
71 )
72 % (f, o, fp.tell())
72 % (f, o, fp.tell())
73 )
73 )
74 fp.truncate(o)
74 fp.truncate(o)
75 fp.close()
75 fp.close()
76 except IOError:
76 except IOError:
77 report(_(b"failed to truncate %s\n") % f)
77 report(_(b"failed to truncate %s\n") % f)
78 raise
78 raise
79 else:
79 else:
80 try:
80 try:
81 opener.unlink(f)
81 opener.unlink(f)
82 except (IOError, OSError) as inst:
82 except (IOError, OSError) as inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 backupfiles = []
86 backupfiles = []
87 for l, f, b, c in backupentries:
87 for l, f, b, c in backupentries:
88 if l not in vfsmap and c:
88 if l not in vfsmap and c:
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 vfs = vfsmap[l]
90 vfs = vfsmap[l]
91 try:
91 try:
92 if f and b:
92 if f and b:
93 filepath = vfs.join(f)
93 filepath = vfs.join(f)
94 backuppath = vfs.join(b)
94 backuppath = vfs.join(b)
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 try:
96 try:
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 backupfiles.append(b)
98 backupfiles.append(b)
99 except IOError:
99 except IOError:
100 report(_(b"failed to recover %s\n") % f)
100 report(_(b"failed to recover %s\n") % f)
101 else:
101 else:
102 target = f or b
102 target = f or b
103 try:
103 try:
104 vfs.unlink(target)
104 vfs.unlink(target)
105 except (IOError, OSError) as inst:
105 except (IOError, OSError) as inst:
106 if inst.errno != errno.ENOENT:
106 if inst.errno != errno.ENOENT:
107 raise
107 raise
108 except (IOError, OSError, error.Abort):
108 except (IOError, OSError, error.Abort):
109 if not c:
109 if not c:
110 raise
110 raise
111
111
112 backuppath = b"%s.backupfiles" % journal
112 backuppath = b"%s.backupfiles" % journal
113 if opener.exists(backuppath):
113 if opener.exists(backuppath):
114 opener.unlink(backuppath)
114 opener.unlink(backuppath)
115 opener.unlink(journal)
115 opener.unlink(journal)
116 try:
116 try:
117 for f in backupfiles:
117 for f in backupfiles:
118 if opener.exists(f):
118 if opener.exists(f):
119 opener.unlink(f)
119 opener.unlink(f)
120 except (IOError, OSError, error.Abort):
120 except (IOError, OSError, error.Abort):
121 # only pure backup file remains, it is sage to ignore any error
121 # only pure backup file remains, it is sage to ignore any error
122 pass
122 pass
123
123
124
124
125 class transaction(util.transactional):
125 class transaction(util.transactional):
126 def __init__(
126 def __init__(
127 self,
127 self,
128 report,
128 report,
129 opener,
129 opener,
130 vfsmap,
130 vfsmap,
131 journalname,
131 journalname,
132 undoname=None,
132 undoname=None,
133 after=None,
133 after=None,
134 createmode=None,
134 createmode=None,
135 validator=None,
135 validator=None,
136 releasefn=None,
136 releasefn=None,
137 checkambigfiles=None,
137 checkambigfiles=None,
138 name='<unnamed>',
138 name='<unnamed>',
139 ):
139 ):
140 """Begin a new transaction
140 """Begin a new transaction
141
141
142 Begins a new transaction that allows rolling back writes in the event of
142 Begins a new transaction that allows rolling back writes in the event of
143 an exception.
143 an exception.
144
144
145 * `after`: called after the transaction has been committed
145 * `after`: called after the transaction has been committed
146 * `createmode`: the mode of the journal file that will be created
146 * `createmode`: the mode of the journal file that will be created
147 * `releasefn`: called after releasing (with transaction and result)
147 * `releasefn`: called after releasing (with transaction and result)
148
148
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 which determine whether file stat ambiguity should be avoided
150 which determine whether file stat ambiguity should be avoided
151 for corresponded files.
151 for corresponded files.
152 """
152 """
153 self._count = 1
153 self._count = 1
154 self._usages = 1
154 self._usages = 1
155 self._report = report
155 self._report = report
156 # a vfs to the store content
156 # a vfs to the store content
157 self._opener = opener
157 self._opener = opener
158 # a map to access file in various {location -> vfs}
158 # a map to access file in various {location -> vfs}
159 vfsmap = vfsmap.copy()
159 vfsmap = vfsmap.copy()
160 vfsmap[b''] = opener # set default value
160 vfsmap[b''] = opener # set default value
161 self._vfsmap = vfsmap
161 self._vfsmap = vfsmap
162 self._after = after
162 self._after = after
163 self._entries = []
163 self._entries = []
164 self._map = {}
164 self._map = {}
165 self._journal = journalname
165 self._journal = journalname
166 self._undoname = undoname
166 self._undoname = undoname
167 self._queue = []
167 self._queue = []
168 # A callback to validate transaction content before closing it.
168 # A callback to validate transaction content before closing it.
169 # should raise exception is anything is wrong.
169 # should raise exception is anything is wrong.
170 # target user is repository hooks.
170 # target user is repository hooks.
171 if validator is None:
171 if validator is None:
172 validator = lambda tr: None
172 validator = lambda tr: None
173 self._validator = validator
173 self._validator = validator
174 # A callback to do something just after releasing transaction.
174 # A callback to do something just after releasing transaction.
175 if releasefn is None:
175 if releasefn is None:
176 releasefn = lambda tr, success: None
176 releasefn = lambda tr, success: None
177 self._releasefn = releasefn
177 self._releasefn = releasefn
178
178
179 self._checkambigfiles = set()
179 self._checkambigfiles = set()
180 if checkambigfiles:
180 if checkambigfiles:
181 self._checkambigfiles.update(checkambigfiles)
181 self._checkambigfiles.update(checkambigfiles)
182
182
183 self._names = [name]
183 self._names = [name]
184
184
185 # A dict dedicated to precisely tracking the changes introduced in the
185 # A dict dedicated to precisely tracking the changes introduced in the
186 # transaction.
186 # transaction.
187 self.changes = {}
187 self.changes = {}
188
188
189 # a dict of arguments to be passed to hooks
189 # a dict of arguments to be passed to hooks
190 self.hookargs = {}
190 self.hookargs = {}
191 self._file = opener.open(self._journal, b"w")
191 self._file = opener.open(self._journal, b"w")
192
192
193 # a list of ('location', 'path', 'backuppath', cache) entries.
193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 # - if 'backuppath' is empty, no file existed at backup time
194 # - if 'backuppath' is empty, no file existed at backup time
195 # - if 'path' is empty, this is a temporary transaction file
195 # - if 'path' is empty, this is a temporary transaction file
196 # - if 'location' is not empty, the path is outside main opener reach.
196 # - if 'location' is not empty, the path is outside main opener reach.
197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 # (cache is currently unused)
198 # (cache is currently unused)
199 self._backupentries = []
199 self._backupentries = []
200 self._backupmap = {}
200 self._backupmap = {}
201 self._backupjournal = b"%s.backupfiles" % self._journal
201 self._backupjournal = b"%s.backupfiles" % self._journal
202 self._backupsfile = opener.open(self._backupjournal, b'w')
202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 self._backupsfile.write(b'%d\n' % version)
203 self._backupsfile.write(b'%d\n' % version)
204
204
205 if createmode is not None:
205 if createmode is not None:
206 opener.chmod(self._journal, createmode & 0o666)
206 opener.chmod(self._journal, createmode & 0o666)
207 opener.chmod(self._backupjournal, createmode & 0o666)
207 opener.chmod(self._backupjournal, createmode & 0o666)
208
208
209 # hold file generations to be performed on commit
209 # hold file generations to be performed on commit
210 self._filegenerators = {}
210 self._filegenerators = {}
211 # hold callback to write pending data for hooks
211 # hold callback to write pending data for hooks
212 self._pendingcallback = {}
212 self._pendingcallback = {}
213 # True is any pending data have been written ever
213 # True is any pending data have been written ever
214 self._anypending = False
214 self._anypending = False
215 # holds callback to call when writing the transaction
215 # holds callback to call when writing the transaction
216 self._finalizecallback = {}
216 self._finalizecallback = {}
217 # hold callback for post transaction close
217 # hold callback for post transaction close
218 self._postclosecallback = {}
218 self._postclosecallback = {}
219 # holds callbacks to call during abort
219 # holds callbacks to call during abort
220 self._abortcallback = {}
220 self._abortcallback = {}
221
221
222 def __repr__(self):
222 def __repr__(self):
223 name = '/'.join(self._names)
223 name = '/'.join(self._names)
224 return '<transaction name=%s, count=%d, usages=%d>' % (
224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 name,
225 name,
226 self._count,
226 self._count,
227 self._usages,
227 self._usages,
228 )
228 )
229
229
230 def __del__(self):
230 def __del__(self):
231 if self._journal:
231 if self._journal:
232 self._abort()
232 self._abort()
233
233
234 @active
234 @active
235 def startgroup(self):
235 def startgroup(self):
236 """delay registration of file entry
236 """delay registration of file entry
237
237
238 This is used by strip to delay vision of strip offset. The transaction
238 This is used by strip to delay vision of strip offset. The transaction
239 sees either none or all of the strip actions to be done."""
239 sees either none or all of the strip actions to be done."""
240 self._queue.append([])
240 self._queue.append([])
241
241
242 @active
242 @active
243 def endgroup(self):
243 def endgroup(self):
244 """apply delayed registration of file entry.
244 """apply delayed registration of file entry.
245
245
246 This is used by strip to delay vision of strip offset. The transaction
246 This is used by strip to delay vision of strip offset. The transaction
247 sees either none or all of the strip actions to be done."""
247 sees either none or all of the strip actions to be done."""
248 q = self._queue.pop()
248 q = self._queue.pop()
249 for f, o, data in q:
249 for f, o, data in q:
250 self._addentry(f, o, data)
250 self._addentry(f, o, data)
251
251
252 @active
252 @active
253 def add(self, file, offset, data=None):
253 def add(self, file, offset, data=None):
254 """record the state of an append-only file before update"""
254 """record the state of an append-only file before update"""
255 if file in self._map or file in self._backupmap:
255 if file in self._map or file in self._backupmap:
256 return
256 return
257 if self._queue:
257 if self._queue:
258 self._queue[-1].append((file, offset, data))
258 self._queue[-1].append((file, offset, data))
259 return
259 return
260
260
261 self._addentry(file, offset, data)
261 self._addentry(file, offset, data)
262
262
263 def _addentry(self, file, offset, data):
263 def _addentry(self, file, offset, data):
264 """add a append-only entry to memory and on-disk state"""
264 """add a append-only entry to memory and on-disk state"""
265 if file in self._map or file in self._backupmap:
265 if file in self._map or file in self._backupmap:
266 return
266 return
267 self._entries.append((file, offset, data))
267 self._entries.append((file, offset, data))
268 self._map[file] = len(self._entries) - 1
268 self._map[file] = len(self._entries) - 1
269 # add enough data to the journal to do the truncate
269 # add enough data to the journal to do the truncate
270 self._file.write(b"%s\0%d\n" % (file, offset))
270 self._file.write(b"%s\0%d\n" % (file, offset))
271 self._file.flush()
271 self._file.flush()
272
272
273 @active
273 @active
274 def addbackup(self, file, hardlink=True, location=b''):
274 def addbackup(self, file, hardlink=True, location=b''):
275 """Adds a backup of the file to the transaction
275 """Adds a backup of the file to the transaction
276
276
277 Calling addbackup() creates a hardlink backup of the specified file
277 Calling addbackup() creates a hardlink backup of the specified file
278 that is used to recover the file in the event of the transaction
278 that is used to recover the file in the event of the transaction
279 aborting.
279 aborting.
280
280
281 * `file`: the file path, relative to .hg/store
281 * `file`: the file path, relative to .hg/store
282 * `hardlink`: use a hardlink to quickly create the backup
282 * `hardlink`: use a hardlink to quickly create the backup
283 """
283 """
284 if self._queue:
284 if self._queue:
285 msg = b'cannot use transaction.addbackup inside "group"'
285 msg = b'cannot use transaction.addbackup inside "group"'
286 raise error.ProgrammingError(msg)
286 raise error.ProgrammingError(msg)
287
287
288 if file in self._map or file in self._backupmap:
288 if file in self._map or file in self._backupmap:
289 return
289 return
290 vfs = self._vfsmap[location]
290 vfs = self._vfsmap[location]
291 dirname, filename = vfs.split(file)
291 dirname, filename = vfs.split(file)
292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 backupfile = vfs.reljoin(dirname, backupfilename)
293 backupfile = vfs.reljoin(dirname, backupfilename)
294 if vfs.exists(file):
294 if vfs.exists(file):
295 filepath = vfs.join(file)
295 filepath = vfs.join(file)
296 backuppath = vfs.join(backupfile)
296 backuppath = vfs.join(backupfile)
297 util.copyfile(filepath, backuppath, hardlink=hardlink)
297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 else:
298 else:
299 backupfile = b''
299 backupfile = b''
300
300
301 self._addbackupentry((location, file, backupfile, False))
301 self._addbackupentry((location, file, backupfile, False))
302
302
303 def _addbackupentry(self, entry):
303 def _addbackupentry(self, entry):
304 """register a new backup entry and write it to disk"""
304 """register a new backup entry and write it to disk"""
305 self._backupentries.append(entry)
305 self._backupentries.append(entry)
306 self._backupmap[entry[1]] = len(self._backupentries) - 1
306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 self._backupsfile.flush()
308 self._backupsfile.flush()
309
309
310 @active
310 @active
311 def registertmp(self, tmpfile, location=b''):
311 def registertmp(self, tmpfile, location=b''):
312 """register a temporary transaction file
312 """register a temporary transaction file
313
313
314 Such files will be deleted when the transaction exits (on both
314 Such files will be deleted when the transaction exits (on both
315 failure and success).
315 failure and success).
316 """
316 """
317 self._addbackupentry((location, b'', tmpfile, False))
317 self._addbackupentry((location, b'', tmpfile, False))
318
318
319 @active
319 @active
320 def addfilegenerator(
320 def addfilegenerator(
321 self, genid, filenames, genfunc, order=0, location=b''
321 self, genid, filenames, genfunc, order=0, location=b''
322 ):
322 ):
323 """add a function to generates some files at transaction commit
323 """add a function to generates some files at transaction commit
324
324
325 The `genfunc` argument is a function capable of generating proper
325 The `genfunc` argument is a function capable of generating proper
326 content of each entry in the `filename` tuple.
326 content of each entry in the `filename` tuple.
327
327
328 At transaction close time, `genfunc` will be called with one file
328 At transaction close time, `genfunc` will be called with one file
329 object argument per entries in `filenames`.
329 object argument per entries in `filenames`.
330
330
331 The transaction itself is responsible for the backup, creation and
331 The transaction itself is responsible for the backup, creation and
332 final write of such file.
332 final write of such file.
333
333
334 The `genid` argument is used to ensure the same set of file is only
334 The `genid` argument is used to ensure the same set of file is only
335 generated once. Call to `addfilegenerator` for a `genid` already
335 generated once. Call to `addfilegenerator` for a `genid` already
336 present will overwrite the old entry.
336 present will overwrite the old entry.
337
337
338 The `order` argument may be used to control the order in which multiple
338 The `order` argument may be used to control the order in which multiple
339 generator will be executed.
339 generator will be executed.
340
340
341 The `location` arguments may be used to indicate the files are located
341 The `location` arguments may be used to indicate the files are located
342 outside of the the standard directory for transaction. It should match
342 outside of the the standard directory for transaction. It should match
343 one of the key of the `transaction.vfsmap` dictionary.
343 one of the key of the `transaction.vfsmap` dictionary.
344 """
344 """
345 # For now, we are unable to do proper backup and restore of custom vfs
345 # For now, we are unable to do proper backup and restore of custom vfs
346 # but for bookmarks that are handled outside this mechanism.
346 # but for bookmarks that are handled outside this mechanism.
347 self._filegenerators[genid] = (order, filenames, genfunc, location)
347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348
348
349 @active
349 @active
350 def removefilegenerator(self, genid):
350 def removefilegenerator(self, genid):
351 """reverse of addfilegenerator, remove a file generator function"""
351 """reverse of addfilegenerator, remove a file generator function"""
352 if genid in self._filegenerators:
352 if genid in self._filegenerators:
353 del self._filegenerators[genid]
353 del self._filegenerators[genid]
354
354
355 def _generatefiles(self, suffix=b'', group=gengroupall):
355 def _generatefiles(self, suffix=b'', group=gengroupall):
356 # write files registered for generation
356 # write files registered for generation
357 any = False
357 any = False
358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 any = True
359 any = True
360 order, filenames, genfunc, location = entry
360 order, filenames, genfunc, location = entry
361
361
362 # for generation at closing, check if it's before or after finalize
362 # for generation at closing, check if it's before or after finalize
363 postfinalize = group == gengrouppostfinalize
363 postfinalize = group == gengrouppostfinalize
364 if (
364 if (
365 group != gengroupall
365 group != gengroupall
366 and (id in postfinalizegenerators) != postfinalize
366 and (id in postfinalizegenerators) != postfinalize
367 ):
367 ):
368 continue
368 continue
369
369
370 vfs = self._vfsmap[location]
370 vfs = self._vfsmap[location]
371 files = []
371 files = []
372 try:
372 try:
373 for name in filenames:
373 for name in filenames:
374 name += suffix
374 name += suffix
375 if suffix:
375 if suffix:
376 self.registertmp(name, location=location)
376 self.registertmp(name, location=location)
377 checkambig = False
377 checkambig = False
378 else:
378 else:
379 self.addbackup(name, location=location)
379 self.addbackup(name, location=location)
380 checkambig = (name, location) in self._checkambigfiles
380 checkambig = (name, location) in self._checkambigfiles
381 files.append(
381 files.append(
382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 )
383 )
384 genfunc(*files)
384 genfunc(*files)
385 for f in files:
385 for f in files:
386 f.close()
386 f.close()
387 # skip discard() loop since we're sure no open file remains
387 # skip discard() loop since we're sure no open file remains
388 del files[:]
388 del files[:]
389 finally:
389 finally:
390 for f in files:
390 for f in files:
391 f.discard()
391 f.discard()
392 return any
392 return any
393
393
394 @active
394 @active
395 def find(self, file):
395 def find(self, file):
396 if file in self._map:
396 if file in self._map:
397 return self._entries[self._map[file]]
397 return self._entries[self._map[file]]
398 if file in self._backupmap:
398 if file in self._backupmap:
399 return self._backupentries[self._backupmap[file]]
399 return self._backupentries[self._backupmap[file]]
400 return None
400 return None
401
401
402 @active
402 @active
403 def replace(self, file, offset, data=None):
403 def replace(self, file, offset, data=None):
404 '''
404 '''
405 replace can only replace already committed entries
405 replace can only replace already committed entries
406 that are not pending in the queue
406 that are not pending in the queue
407 '''
407 '''
408
408
409 if file not in self._map:
409 if file not in self._map:
410 raise KeyError(file)
410 raise KeyError(file)
411 index = self._map[file]
411 index = self._map[file]
412 self._entries[index] = (file, offset, data)
412 self._entries[index] = (file, offset, data)
413 self._file.write(b"%s\0%d\n" % (file, offset))
413 self._file.write(b"%s\0%d\n" % (file, offset))
414 self._file.flush()
414 self._file.flush()
415
415
416 @active
416 @active
417 def nest(self, name='<unnamed>'):
417 def nest(self, name='<unnamed>'):
418 self._count += 1
418 self._count += 1
419 self._usages += 1
419 self._usages += 1
420 self._names.append(name)
420 self._names.append(name)
421 return self
421 return self
422
422
423 def release(self):
423 def release(self):
424 if self._count > 0:
424 if self._count > 0:
425 self._usages -= 1
425 self._usages -= 1
426 if self._names:
426 if self._names:
427 self._names.pop()
427 self._names.pop()
428 # if the transaction scopes are left without being closed, fail
428 # if the transaction scopes are left without being closed, fail
429 if self._count > 0 and self._usages == 0:
429 if self._count > 0 and self._usages == 0:
430 self._abort()
430 self._abort()
431
431
432 def running(self):
432 def running(self):
433 return self._count > 0
433 return self._count > 0
434
434
435 def addpending(self, category, callback):
435 def addpending(self, category, callback):
436 """add a callback to be called when the transaction is pending
436 """add a callback to be called when the transaction is pending
437
437
438 The transaction will be given as callback's first argument.
438 The transaction will be given as callback's first argument.
439
439
440 Category is a unique identifier to allow overwriting an old callback
440 Category is a unique identifier to allow overwriting an old callback
441 with a newer callback.
441 with a newer callback.
442 """
442 """
443 self._pendingcallback[category] = callback
443 self._pendingcallback[category] = callback
444
444
445 @active
445 @active
446 def writepending(self):
446 def writepending(self):
447 '''write pending file to temporary version
447 '''write pending file to temporary version
448
448
449 This is used to allow hooks to view a transaction before commit'''
449 This is used to allow hooks to view a transaction before commit'''
450 categories = sorted(self._pendingcallback)
450 categories = sorted(self._pendingcallback)
451 for cat in categories:
451 for cat in categories:
452 # remove callback since the data will have been flushed
452 # remove callback since the data will have been flushed
453 any = self._pendingcallback.pop(cat)(self)
453 any = self._pendingcallback.pop(cat)(self)
454 self._anypending = self._anypending or any
454 self._anypending = self._anypending or any
455 self._anypending |= self._generatefiles(suffix=b'.pending')
455 self._anypending |= self._generatefiles(suffix=b'.pending')
456 return self._anypending
456 return self._anypending
457
457
458 @active
458 @active
459 def hasfinalize(self, category):
459 def hasfinalize(self, category):
460 """check is a callback already exist for a category
460 """check is a callback already exist for a category
461 """
461 """
462 return category in self._finalizecallback
462 return category in self._finalizecallback
463
463
464 @active
464 @active
465 def addfinalize(self, category, callback):
465 def addfinalize(self, category, callback):
466 """add a callback to be called when the transaction is closed
466 """add a callback to be called when the transaction is closed
467
467
468 The transaction will be given as callback's first argument.
468 The transaction will be given as callback's first argument.
469
469
470 Category is a unique identifier to allow overwriting old callbacks with
470 Category is a unique identifier to allow overwriting old callbacks with
471 newer callbacks.
471 newer callbacks.
472 """
472 """
473 self._finalizecallback[category] = callback
473 self._finalizecallback[category] = callback
474
474
475 @active
475 @active
476 def addpostclose(self, category, callback):
476 def addpostclose(self, category, callback):
477 """add or replace a callback to be called after the transaction closed
477 """add or replace a callback to be called after the transaction closed
478
478
479 The transaction will be given as callback's first argument.
479 The transaction will be given as callback's first argument.
480
480
481 Category is a unique identifier to allow overwriting an old callback
481 Category is a unique identifier to allow overwriting an old callback
482 with a newer callback.
482 with a newer callback.
483 """
483 """
484 self._postclosecallback[category] = callback
484 self._postclosecallback[category] = callback
485
485
486 @active
486 @active
487 def getpostclose(self, category):
487 def getpostclose(self, category):
488 """return a postclose callback added before, or None"""
488 """return a postclose callback added before, or None"""
489 return self._postclosecallback.get(category, None)
489 return self._postclosecallback.get(category, None)
490
490
491 @active
491 @active
492 def addabort(self, category, callback):
492 def addabort(self, category, callback):
493 """add a callback to be called when the transaction is aborted.
493 """add a callback to be called when the transaction is aborted.
494
494
495 The transaction will be given as the first argument to the callback.
495 The transaction will be given as the first argument to the callback.
496
496
497 Category is a unique identifier to allow overwriting an old callback
497 Category is a unique identifier to allow overwriting an old callback
498 with a newer callback.
498 with a newer callback.
499 """
499 """
500 self._abortcallback[category] = callback
500 self._abortcallback[category] = callback
501
501
502 @active
502 @active
503 def close(self):
503 def close(self):
504 '''commit the transaction'''
504 '''commit the transaction'''
505 if self._count == 1:
505 if self._count == 1:
506 self._validator(self) # will raise exception if needed
506 self._validator(self) # will raise exception if needed
507 self._validator = None # Help prevent cycles.
507 self._validator = None # Help prevent cycles.
508 self._generatefiles(group=gengroupprefinalize)
508 self._generatefiles(group=gengroupprefinalize)
509 categories = sorted(self._finalizecallback)
509 while self._finalizecallback:
510 callbacks = self._finalizecallback
511 self._finalizecallback = {}
512 categories = sorted(callbacks)
510 for cat in categories:
513 for cat in categories:
511 self._finalizecallback[cat](self)
514 callbacks[cat](self)
512 # Prevent double usage and help clear cycles.
515 # Prevent double usage and help clear cycles.
513 self._finalizecallback = None
516 self._finalizecallback = None
514 self._generatefiles(group=gengrouppostfinalize)
517 self._generatefiles(group=gengrouppostfinalize)
515
518
516 self._count -= 1
519 self._count -= 1
517 if self._count != 0:
520 if self._count != 0:
518 return
521 return
519 self._file.close()
522 self._file.close()
520 self._backupsfile.close()
523 self._backupsfile.close()
521 # cleanup temporary files
524 # cleanup temporary files
522 for l, f, b, c in self._backupentries:
525 for l, f, b, c in self._backupentries:
523 if l not in self._vfsmap and c:
526 if l not in self._vfsmap and c:
524 self._report(
527 self._report(
525 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
528 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
526 )
529 )
527 continue
530 continue
528 vfs = self._vfsmap[l]
531 vfs = self._vfsmap[l]
529 if not f and b and vfs.exists(b):
532 if not f and b and vfs.exists(b):
530 try:
533 try:
531 vfs.unlink(b)
534 vfs.unlink(b)
532 except (IOError, OSError, error.Abort) as inst:
535 except (IOError, OSError, error.Abort) as inst:
533 if not c:
536 if not c:
534 raise
537 raise
535 # Abort may be raise by read only opener
538 # Abort may be raise by read only opener
536 self._report(
539 self._report(
537 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
540 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
538 )
541 )
539 self._entries = []
542 self._entries = []
540 self._writeundo()
543 self._writeundo()
541 if self._after:
544 if self._after:
542 self._after()
545 self._after()
543 self._after = None # Help prevent cycles.
546 self._after = None # Help prevent cycles.
544 if self._opener.isfile(self._backupjournal):
547 if self._opener.isfile(self._backupjournal):
545 self._opener.unlink(self._backupjournal)
548 self._opener.unlink(self._backupjournal)
546 if self._opener.isfile(self._journal):
549 if self._opener.isfile(self._journal):
547 self._opener.unlink(self._journal)
550 self._opener.unlink(self._journal)
548 for l, _f, b, c in self._backupentries:
551 for l, _f, b, c in self._backupentries:
549 if l not in self._vfsmap and c:
552 if l not in self._vfsmap and c:
550 self._report(
553 self._report(
551 b"couldn't remove %s: unknown cache location"
554 b"couldn't remove %s: unknown cache location"
552 b"%s\n" % (b, l)
555 b"%s\n" % (b, l)
553 )
556 )
554 continue
557 continue
555 vfs = self._vfsmap[l]
558 vfs = self._vfsmap[l]
556 if b and vfs.exists(b):
559 if b and vfs.exists(b):
557 try:
560 try:
558 vfs.unlink(b)
561 vfs.unlink(b)
559 except (IOError, OSError, error.Abort) as inst:
562 except (IOError, OSError, error.Abort) as inst:
560 if not c:
563 if not c:
561 raise
564 raise
562 # Abort may be raise by read only opener
565 # Abort may be raise by read only opener
563 self._report(
566 self._report(
564 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
567 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
565 )
568 )
566 self._backupentries = []
569 self._backupentries = []
567 self._journal = None
570 self._journal = None
568
571
569 self._releasefn(self, True) # notify success of closing transaction
572 self._releasefn(self, True) # notify success of closing transaction
570 self._releasefn = None # Help prevent cycles.
573 self._releasefn = None # Help prevent cycles.
571
574
572 # run post close action
575 # run post close action
573 categories = sorted(self._postclosecallback)
576 categories = sorted(self._postclosecallback)
574 for cat in categories:
577 for cat in categories:
575 self._postclosecallback[cat](self)
578 self._postclosecallback[cat](self)
576 # Prevent double usage and help clear cycles.
579 # Prevent double usage and help clear cycles.
577 self._postclosecallback = None
580 self._postclosecallback = None
578
581
579 @active
582 @active
580 def abort(self):
583 def abort(self):
581 '''abort the transaction (generally called on error, or when the
584 '''abort the transaction (generally called on error, or when the
582 transaction is not explicitly committed before going out of
585 transaction is not explicitly committed before going out of
583 scope)'''
586 scope)'''
584 self._abort()
587 self._abort()
585
588
586 def _writeundo(self):
589 def _writeundo(self):
587 """write transaction data for possible future undo call"""
590 """write transaction data for possible future undo call"""
588 if self._undoname is None:
591 if self._undoname is None:
589 return
592 return
590 undobackupfile = self._opener.open(
593 undobackupfile = self._opener.open(
591 b"%s.backupfiles" % self._undoname, b'w'
594 b"%s.backupfiles" % self._undoname, b'w'
592 )
595 )
593 undobackupfile.write(b'%d\n' % version)
596 undobackupfile.write(b'%d\n' % version)
594 for l, f, b, c in self._backupentries:
597 for l, f, b, c in self._backupentries:
595 if not f: # temporary file
598 if not f: # temporary file
596 continue
599 continue
597 if not b:
600 if not b:
598 u = b''
601 u = b''
599 else:
602 else:
600 if l not in self._vfsmap and c:
603 if l not in self._vfsmap and c:
601 self._report(
604 self._report(
602 b"couldn't remove %s: unknown cache location"
605 b"couldn't remove %s: unknown cache location"
603 b"%s\n" % (b, l)
606 b"%s\n" % (b, l)
604 )
607 )
605 continue
608 continue
606 vfs = self._vfsmap[l]
609 vfs = self._vfsmap[l]
607 base, name = vfs.split(b)
610 base, name = vfs.split(b)
608 assert name.startswith(self._journal), name
611 assert name.startswith(self._journal), name
609 uname = name.replace(self._journal, self._undoname, 1)
612 uname = name.replace(self._journal, self._undoname, 1)
610 u = vfs.reljoin(base, uname)
613 u = vfs.reljoin(base, uname)
611 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
614 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
612 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
615 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
613 undobackupfile.close()
616 undobackupfile.close()
614
617
615 def _abort(self):
618 def _abort(self):
616 self._count = 0
619 self._count = 0
617 self._usages = 0
620 self._usages = 0
618 self._file.close()
621 self._file.close()
619 self._backupsfile.close()
622 self._backupsfile.close()
620
623
621 try:
624 try:
622 if not self._entries and not self._backupentries:
625 if not self._entries and not self._backupentries:
623 if self._backupjournal:
626 if self._backupjournal:
624 self._opener.unlink(self._backupjournal)
627 self._opener.unlink(self._backupjournal)
625 if self._journal:
628 if self._journal:
626 self._opener.unlink(self._journal)
629 self._opener.unlink(self._journal)
627 return
630 return
628
631
629 self._report(_(b"transaction abort!\n"))
632 self._report(_(b"transaction abort!\n"))
630
633
631 try:
634 try:
632 for cat in sorted(self._abortcallback):
635 for cat in sorted(self._abortcallback):
633 self._abortcallback[cat](self)
636 self._abortcallback[cat](self)
634 # Prevent double usage and help clear cycles.
637 # Prevent double usage and help clear cycles.
635 self._abortcallback = None
638 self._abortcallback = None
636 _playback(
639 _playback(
637 self._journal,
640 self._journal,
638 self._report,
641 self._report,
639 self._opener,
642 self._opener,
640 self._vfsmap,
643 self._vfsmap,
641 self._entries,
644 self._entries,
642 self._backupentries,
645 self._backupentries,
643 False,
646 False,
644 checkambigfiles=self._checkambigfiles,
647 checkambigfiles=self._checkambigfiles,
645 )
648 )
646 self._report(_(b"rollback completed\n"))
649 self._report(_(b"rollback completed\n"))
647 except BaseException as exc:
650 except BaseException as exc:
648 self._report(_(b"rollback failed - please run hg recover\n"))
651 self._report(_(b"rollback failed - please run hg recover\n"))
649 self._report(
652 self._report(
650 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
653 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
651 )
654 )
652 finally:
655 finally:
653 self._journal = None
656 self._journal = None
654 self._releasefn(self, False) # notify failure of transaction
657 self._releasefn(self, False) # notify failure of transaction
655 self._releasefn = None # Help prevent cycles.
658 self._releasefn = None # Help prevent cycles.
656
659
657
660
658 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
661 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
659 """Rolls back the transaction contained in the given file
662 """Rolls back the transaction contained in the given file
660
663
661 Reads the entries in the specified file, and the corresponding
664 Reads the entries in the specified file, and the corresponding
662 '*.backupfiles' file, to recover from an incomplete transaction.
665 '*.backupfiles' file, to recover from an incomplete transaction.
663
666
664 * `file`: a file containing a list of entries, specifying where
667 * `file`: a file containing a list of entries, specifying where
665 to truncate each file. The file should contain a list of
668 to truncate each file. The file should contain a list of
666 file\0offset pairs, delimited by newlines. The corresponding
669 file\0offset pairs, delimited by newlines. The corresponding
667 '*.backupfiles' file should contain a list of file\0backupfile
670 '*.backupfiles' file should contain a list of file\0backupfile
668 pairs, delimited by \0.
671 pairs, delimited by \0.
669
672
670 `checkambigfiles` is a set of (path, vfs-location) tuples,
673 `checkambigfiles` is a set of (path, vfs-location) tuples,
671 which determine whether file stat ambiguity should be avoided at
674 which determine whether file stat ambiguity should be avoided at
672 restoring corresponded files.
675 restoring corresponded files.
673 """
676 """
674 entries = []
677 entries = []
675 backupentries = []
678 backupentries = []
676
679
677 fp = opener.open(file)
680 fp = opener.open(file)
678 lines = fp.readlines()
681 lines = fp.readlines()
679 fp.close()
682 fp.close()
680 for l in lines:
683 for l in lines:
681 try:
684 try:
682 f, o = l.split(b'\0')
685 f, o = l.split(b'\0')
683 entries.append((f, int(o), None))
686 entries.append((f, int(o), None))
684 except ValueError:
687 except ValueError:
685 report(
688 report(
686 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
689 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
687 )
690 )
688
691
689 backupjournal = b"%s.backupfiles" % file
692 backupjournal = b"%s.backupfiles" % file
690 if opener.exists(backupjournal):
693 if opener.exists(backupjournal):
691 fp = opener.open(backupjournal)
694 fp = opener.open(backupjournal)
692 lines = fp.readlines()
695 lines = fp.readlines()
693 if lines:
696 if lines:
694 ver = lines[0][:-1]
697 ver = lines[0][:-1]
695 if ver == (b'%d' % version):
698 if ver == (b'%d' % version):
696 for line in lines[1:]:
699 for line in lines[1:]:
697 if line:
700 if line:
698 # Shave off the trailing newline
701 # Shave off the trailing newline
699 line = line[:-1]
702 line = line[:-1]
700 l, f, b, c = line.split(b'\0')
703 l, f, b, c = line.split(b'\0')
701 backupentries.append((l, f, b, bool(c)))
704 backupentries.append((l, f, b, bool(c)))
702 else:
705 else:
703 report(
706 report(
704 _(
707 _(
705 b"journal was created by a different version of "
708 b"journal was created by a different version of "
706 b"Mercurial\n"
709 b"Mercurial\n"
707 )
710 )
708 )
711 )
709
712
710 _playback(
713 _playback(
711 file,
714 file,
712 report,
715 report,
713 opener,
716 opener,
714 vfsmap,
717 vfsmap,
715 entries,
718 entries,
716 backupentries,
719 backupentries,
717 checkambigfiles=checkambigfiles,
720 checkambigfiles=checkambigfiles,
718 )
721 )
General Comments 0
You need to be logged in to leave comments. Login now