##// END OF EJS Templates
transaction: add a `hasfinalize` method...
marmoute -
r44508:8e095512 default
parent child Browse files
Show More
@@ -1,712 +1,718 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 gengroupall = b'all'
33 gengroupall = b'all'
34 gengroupprefinalize = b'prefinalize'
34 gengroupprefinalize = b'prefinalize'
35 gengrouppostfinalize = b'postfinalize'
35 gengrouppostfinalize = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.Abort(
41 raise error.Abort(
42 _(
42 _(
43 b'cannot use transaction when it is already committed/aborted'
43 b'cannot use transaction when it is already committed/aborted'
44 )
44 )
45 )
45 )
46 return func(self, *args, **kwds)
46 return func(self, *args, **kwds)
47
47
48 return _active
48 return _active
49
49
50
50
51 def _playback(
51 def _playback(
52 journal,
52 journal,
53 report,
53 report,
54 opener,
54 opener,
55 vfsmap,
55 vfsmap,
56 entries,
56 entries,
57 backupentries,
57 backupentries,
58 unlink=True,
58 unlink=True,
59 checkambigfiles=None,
59 checkambigfiles=None,
60 ):
60 ):
61 for f, o, _ignore in entries:
61 for f, o, _ignore in entries:
62 if o or not unlink:
62 if o or not unlink:
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 try:
64 try:
65 fp = opener(f, b'a', checkambig=checkambig)
65 fp = opener(f, b'a', checkambig=checkambig)
66 if fp.tell() < o:
66 if fp.tell() < o:
67 raise error.Abort(
67 raise error.Abort(
68 _(
68 _(
69 b"attempted to truncate %s to %d bytes, but it was "
69 b"attempted to truncate %s to %d bytes, but it was "
70 b"already %d bytes\n"
70 b"already %d bytes\n"
71 )
71 )
72 % (f, o, fp.tell())
72 % (f, o, fp.tell())
73 )
73 )
74 fp.truncate(o)
74 fp.truncate(o)
75 fp.close()
75 fp.close()
76 except IOError:
76 except IOError:
77 report(_(b"failed to truncate %s\n") % f)
77 report(_(b"failed to truncate %s\n") % f)
78 raise
78 raise
79 else:
79 else:
80 try:
80 try:
81 opener.unlink(f)
81 opener.unlink(f)
82 except (IOError, OSError) as inst:
82 except (IOError, OSError) as inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 backupfiles = []
86 backupfiles = []
87 for l, f, b, c in backupentries:
87 for l, f, b, c in backupentries:
88 if l not in vfsmap and c:
88 if l not in vfsmap and c:
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 vfs = vfsmap[l]
90 vfs = vfsmap[l]
91 try:
91 try:
92 if f and b:
92 if f and b:
93 filepath = vfs.join(f)
93 filepath = vfs.join(f)
94 backuppath = vfs.join(b)
94 backuppath = vfs.join(b)
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 try:
96 try:
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 backupfiles.append(b)
98 backupfiles.append(b)
99 except IOError:
99 except IOError:
100 report(_(b"failed to recover %s\n") % f)
100 report(_(b"failed to recover %s\n") % f)
101 else:
101 else:
102 target = f or b
102 target = f or b
103 try:
103 try:
104 vfs.unlink(target)
104 vfs.unlink(target)
105 except (IOError, OSError) as inst:
105 except (IOError, OSError) as inst:
106 if inst.errno != errno.ENOENT:
106 if inst.errno != errno.ENOENT:
107 raise
107 raise
108 except (IOError, OSError, error.Abort):
108 except (IOError, OSError, error.Abort):
109 if not c:
109 if not c:
110 raise
110 raise
111
111
112 backuppath = b"%s.backupfiles" % journal
112 backuppath = b"%s.backupfiles" % journal
113 if opener.exists(backuppath):
113 if opener.exists(backuppath):
114 opener.unlink(backuppath)
114 opener.unlink(backuppath)
115 opener.unlink(journal)
115 opener.unlink(journal)
116 try:
116 try:
117 for f in backupfiles:
117 for f in backupfiles:
118 if opener.exists(f):
118 if opener.exists(f):
119 opener.unlink(f)
119 opener.unlink(f)
120 except (IOError, OSError, error.Abort):
120 except (IOError, OSError, error.Abort):
121 # only pure backup file remains, it is sage to ignore any error
121 # only pure backup file remains, it is sage to ignore any error
122 pass
122 pass
123
123
124
124
125 class transaction(util.transactional):
125 class transaction(util.transactional):
126 def __init__(
126 def __init__(
127 self,
127 self,
128 report,
128 report,
129 opener,
129 opener,
130 vfsmap,
130 vfsmap,
131 journalname,
131 journalname,
132 undoname=None,
132 undoname=None,
133 after=None,
133 after=None,
134 createmode=None,
134 createmode=None,
135 validator=None,
135 validator=None,
136 releasefn=None,
136 releasefn=None,
137 checkambigfiles=None,
137 checkambigfiles=None,
138 name='<unnamed>',
138 name='<unnamed>',
139 ):
139 ):
140 """Begin a new transaction
140 """Begin a new transaction
141
141
142 Begins a new transaction that allows rolling back writes in the event of
142 Begins a new transaction that allows rolling back writes in the event of
143 an exception.
143 an exception.
144
144
145 * `after`: called after the transaction has been committed
145 * `after`: called after the transaction has been committed
146 * `createmode`: the mode of the journal file that will be created
146 * `createmode`: the mode of the journal file that will be created
147 * `releasefn`: called after releasing (with transaction and result)
147 * `releasefn`: called after releasing (with transaction and result)
148
148
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 which determine whether file stat ambiguity should be avoided
150 which determine whether file stat ambiguity should be avoided
151 for corresponded files.
151 for corresponded files.
152 """
152 """
153 self._count = 1
153 self._count = 1
154 self._usages = 1
154 self._usages = 1
155 self._report = report
155 self._report = report
156 # a vfs to the store content
156 # a vfs to the store content
157 self._opener = opener
157 self._opener = opener
158 # a map to access file in various {location -> vfs}
158 # a map to access file in various {location -> vfs}
159 vfsmap = vfsmap.copy()
159 vfsmap = vfsmap.copy()
160 vfsmap[b''] = opener # set default value
160 vfsmap[b''] = opener # set default value
161 self._vfsmap = vfsmap
161 self._vfsmap = vfsmap
162 self._after = after
162 self._after = after
163 self._entries = []
163 self._entries = []
164 self._map = {}
164 self._map = {}
165 self._journal = journalname
165 self._journal = journalname
166 self._undoname = undoname
166 self._undoname = undoname
167 self._queue = []
167 self._queue = []
168 # A callback to validate transaction content before closing it.
168 # A callback to validate transaction content before closing it.
169 # should raise exception is anything is wrong.
169 # should raise exception is anything is wrong.
170 # target user is repository hooks.
170 # target user is repository hooks.
171 if validator is None:
171 if validator is None:
172 validator = lambda tr: None
172 validator = lambda tr: None
173 self._validator = validator
173 self._validator = validator
174 # A callback to do something just after releasing transaction.
174 # A callback to do something just after releasing transaction.
175 if releasefn is None:
175 if releasefn is None:
176 releasefn = lambda tr, success: None
176 releasefn = lambda tr, success: None
177 self._releasefn = releasefn
177 self._releasefn = releasefn
178
178
179 self._checkambigfiles = set()
179 self._checkambigfiles = set()
180 if checkambigfiles:
180 if checkambigfiles:
181 self._checkambigfiles.update(checkambigfiles)
181 self._checkambigfiles.update(checkambigfiles)
182
182
183 self._names = [name]
183 self._names = [name]
184
184
185 # A dict dedicated to precisely tracking the changes introduced in the
185 # A dict dedicated to precisely tracking the changes introduced in the
186 # transaction.
186 # transaction.
187 self.changes = {}
187 self.changes = {}
188
188
189 # a dict of arguments to be passed to hooks
189 # a dict of arguments to be passed to hooks
190 self.hookargs = {}
190 self.hookargs = {}
191 self._file = opener.open(self._journal, b"w")
191 self._file = opener.open(self._journal, b"w")
192
192
193 # a list of ('location', 'path', 'backuppath', cache) entries.
193 # a list of ('location', 'path', 'backuppath', cache) entries.
194 # - if 'backuppath' is empty, no file existed at backup time
194 # - if 'backuppath' is empty, no file existed at backup time
195 # - if 'path' is empty, this is a temporary transaction file
195 # - if 'path' is empty, this is a temporary transaction file
196 # - if 'location' is not empty, the path is outside main opener reach.
196 # - if 'location' is not empty, the path is outside main opener reach.
197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 # (cache is currently unused)
198 # (cache is currently unused)
199 self._backupentries = []
199 self._backupentries = []
200 self._backupmap = {}
200 self._backupmap = {}
201 self._backupjournal = b"%s.backupfiles" % self._journal
201 self._backupjournal = b"%s.backupfiles" % self._journal
202 self._backupsfile = opener.open(self._backupjournal, b'w')
202 self._backupsfile = opener.open(self._backupjournal, b'w')
203 self._backupsfile.write(b'%d\n' % version)
203 self._backupsfile.write(b'%d\n' % version)
204
204
205 if createmode is not None:
205 if createmode is not None:
206 opener.chmod(self._journal, createmode & 0o666)
206 opener.chmod(self._journal, createmode & 0o666)
207 opener.chmod(self._backupjournal, createmode & 0o666)
207 opener.chmod(self._backupjournal, createmode & 0o666)
208
208
209 # hold file generations to be performed on commit
209 # hold file generations to be performed on commit
210 self._filegenerators = {}
210 self._filegenerators = {}
211 # hold callback to write pending data for hooks
211 # hold callback to write pending data for hooks
212 self._pendingcallback = {}
212 self._pendingcallback = {}
213 # True is any pending data have been written ever
213 # True is any pending data have been written ever
214 self._anypending = False
214 self._anypending = False
215 # holds callback to call when writing the transaction
215 # holds callback to call when writing the transaction
216 self._finalizecallback = {}
216 self._finalizecallback = {}
217 # hold callback for post transaction close
217 # hold callback for post transaction close
218 self._postclosecallback = {}
218 self._postclosecallback = {}
219 # holds callbacks to call during abort
219 # holds callbacks to call during abort
220 self._abortcallback = {}
220 self._abortcallback = {}
221
221
222 def __repr__(self):
222 def __repr__(self):
223 name = '/'.join(self._names)
223 name = '/'.join(self._names)
224 return '<transaction name=%s, count=%d, usages=%d>' % (
224 return '<transaction name=%s, count=%d, usages=%d>' % (
225 name,
225 name,
226 self._count,
226 self._count,
227 self._usages,
227 self._usages,
228 )
228 )
229
229
230 def __del__(self):
230 def __del__(self):
231 if self._journal:
231 if self._journal:
232 self._abort()
232 self._abort()
233
233
234 @active
234 @active
235 def startgroup(self):
235 def startgroup(self):
236 """delay registration of file entry
236 """delay registration of file entry
237
237
238 This is used by strip to delay vision of strip offset. The transaction
238 This is used by strip to delay vision of strip offset. The transaction
239 sees either none or all of the strip actions to be done."""
239 sees either none or all of the strip actions to be done."""
240 self._queue.append([])
240 self._queue.append([])
241
241
242 @active
242 @active
243 def endgroup(self):
243 def endgroup(self):
244 """apply delayed registration of file entry.
244 """apply delayed registration of file entry.
245
245
246 This is used by strip to delay vision of strip offset. The transaction
246 This is used by strip to delay vision of strip offset. The transaction
247 sees either none or all of the strip actions to be done."""
247 sees either none or all of the strip actions to be done."""
248 q = self._queue.pop()
248 q = self._queue.pop()
249 for f, o, data in q:
249 for f, o, data in q:
250 self._addentry(f, o, data)
250 self._addentry(f, o, data)
251
251
252 @active
252 @active
253 def add(self, file, offset, data=None):
253 def add(self, file, offset, data=None):
254 """record the state of an append-only file before update"""
254 """record the state of an append-only file before update"""
255 if file in self._map or file in self._backupmap:
255 if file in self._map or file in self._backupmap:
256 return
256 return
257 if self._queue:
257 if self._queue:
258 self._queue[-1].append((file, offset, data))
258 self._queue[-1].append((file, offset, data))
259 return
259 return
260
260
261 self._addentry(file, offset, data)
261 self._addentry(file, offset, data)
262
262
263 def _addentry(self, file, offset, data):
263 def _addentry(self, file, offset, data):
264 """add a append-only entry to memory and on-disk state"""
264 """add a append-only entry to memory and on-disk state"""
265 if file in self._map or file in self._backupmap:
265 if file in self._map or file in self._backupmap:
266 return
266 return
267 self._entries.append((file, offset, data))
267 self._entries.append((file, offset, data))
268 self._map[file] = len(self._entries) - 1
268 self._map[file] = len(self._entries) - 1
269 # add enough data to the journal to do the truncate
269 # add enough data to the journal to do the truncate
270 self._file.write(b"%s\0%d\n" % (file, offset))
270 self._file.write(b"%s\0%d\n" % (file, offset))
271 self._file.flush()
271 self._file.flush()
272
272
273 @active
273 @active
274 def addbackup(self, file, hardlink=True, location=b''):
274 def addbackup(self, file, hardlink=True, location=b''):
275 """Adds a backup of the file to the transaction
275 """Adds a backup of the file to the transaction
276
276
277 Calling addbackup() creates a hardlink backup of the specified file
277 Calling addbackup() creates a hardlink backup of the specified file
278 that is used to recover the file in the event of the transaction
278 that is used to recover the file in the event of the transaction
279 aborting.
279 aborting.
280
280
281 * `file`: the file path, relative to .hg/store
281 * `file`: the file path, relative to .hg/store
282 * `hardlink`: use a hardlink to quickly create the backup
282 * `hardlink`: use a hardlink to quickly create the backup
283 """
283 """
284 if self._queue:
284 if self._queue:
285 msg = b'cannot use transaction.addbackup inside "group"'
285 msg = b'cannot use transaction.addbackup inside "group"'
286 raise error.ProgrammingError(msg)
286 raise error.ProgrammingError(msg)
287
287
288 if file in self._map or file in self._backupmap:
288 if file in self._map or file in self._backupmap:
289 return
289 return
290 vfs = self._vfsmap[location]
290 vfs = self._vfsmap[location]
291 dirname, filename = vfs.split(file)
291 dirname, filename = vfs.split(file)
292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 backupfile = vfs.reljoin(dirname, backupfilename)
293 backupfile = vfs.reljoin(dirname, backupfilename)
294 if vfs.exists(file):
294 if vfs.exists(file):
295 filepath = vfs.join(file)
295 filepath = vfs.join(file)
296 backuppath = vfs.join(backupfile)
296 backuppath = vfs.join(backupfile)
297 util.copyfile(filepath, backuppath, hardlink=hardlink)
297 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 else:
298 else:
299 backupfile = b''
299 backupfile = b''
300
300
301 self._addbackupentry((location, file, backupfile, False))
301 self._addbackupentry((location, file, backupfile, False))
302
302
303 def _addbackupentry(self, entry):
303 def _addbackupentry(self, entry):
304 """register a new backup entry and write it to disk"""
304 """register a new backup entry and write it to disk"""
305 self._backupentries.append(entry)
305 self._backupentries.append(entry)
306 self._backupmap[entry[1]] = len(self._backupentries) - 1
306 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 self._backupsfile.flush()
308 self._backupsfile.flush()
309
309
310 @active
310 @active
311 def registertmp(self, tmpfile, location=b''):
311 def registertmp(self, tmpfile, location=b''):
312 """register a temporary transaction file
312 """register a temporary transaction file
313
313
314 Such files will be deleted when the transaction exits (on both
314 Such files will be deleted when the transaction exits (on both
315 failure and success).
315 failure and success).
316 """
316 """
317 self._addbackupentry((location, b'', tmpfile, False))
317 self._addbackupentry((location, b'', tmpfile, False))
318
318
319 @active
319 @active
320 def addfilegenerator(
320 def addfilegenerator(
321 self, genid, filenames, genfunc, order=0, location=b''
321 self, genid, filenames, genfunc, order=0, location=b''
322 ):
322 ):
323 """add a function to generates some files at transaction commit
323 """add a function to generates some files at transaction commit
324
324
325 The `genfunc` argument is a function capable of generating proper
325 The `genfunc` argument is a function capable of generating proper
326 content of each entry in the `filename` tuple.
326 content of each entry in the `filename` tuple.
327
327
328 At transaction close time, `genfunc` will be called with one file
328 At transaction close time, `genfunc` will be called with one file
329 object argument per entries in `filenames`.
329 object argument per entries in `filenames`.
330
330
331 The transaction itself is responsible for the backup, creation and
331 The transaction itself is responsible for the backup, creation and
332 final write of such file.
332 final write of such file.
333
333
334 The `genid` argument is used to ensure the same set of file is only
334 The `genid` argument is used to ensure the same set of file is only
335 generated once. Call to `addfilegenerator` for a `genid` already
335 generated once. Call to `addfilegenerator` for a `genid` already
336 present will overwrite the old entry.
336 present will overwrite the old entry.
337
337
338 The `order` argument may be used to control the order in which multiple
338 The `order` argument may be used to control the order in which multiple
339 generator will be executed.
339 generator will be executed.
340
340
341 The `location` arguments may be used to indicate the files are located
341 The `location` arguments may be used to indicate the files are located
342 outside of the the standard directory for transaction. It should match
342 outside of the the standard directory for transaction. It should match
343 one of the key of the `transaction.vfsmap` dictionary.
343 one of the key of the `transaction.vfsmap` dictionary.
344 """
344 """
345 # For now, we are unable to do proper backup and restore of custom vfs
345 # For now, we are unable to do proper backup and restore of custom vfs
346 # but for bookmarks that are handled outside this mechanism.
346 # but for bookmarks that are handled outside this mechanism.
347 self._filegenerators[genid] = (order, filenames, genfunc, location)
347 self._filegenerators[genid] = (order, filenames, genfunc, location)
348
348
349 @active
349 @active
350 def removefilegenerator(self, genid):
350 def removefilegenerator(self, genid):
351 """reverse of addfilegenerator, remove a file generator function"""
351 """reverse of addfilegenerator, remove a file generator function"""
352 if genid in self._filegenerators:
352 if genid in self._filegenerators:
353 del self._filegenerators[genid]
353 del self._filegenerators[genid]
354
354
355 def _generatefiles(self, suffix=b'', group=gengroupall):
355 def _generatefiles(self, suffix=b'', group=gengroupall):
356 # write files registered for generation
356 # write files registered for generation
357 any = False
357 any = False
358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
358 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
359 any = True
359 any = True
360 order, filenames, genfunc, location = entry
360 order, filenames, genfunc, location = entry
361
361
362 # for generation at closing, check if it's before or after finalize
362 # for generation at closing, check if it's before or after finalize
363 postfinalize = group == gengrouppostfinalize
363 postfinalize = group == gengrouppostfinalize
364 if (
364 if (
365 group != gengroupall
365 group != gengroupall
366 and (id in postfinalizegenerators) != postfinalize
366 and (id in postfinalizegenerators) != postfinalize
367 ):
367 ):
368 continue
368 continue
369
369
370 vfs = self._vfsmap[location]
370 vfs = self._vfsmap[location]
371 files = []
371 files = []
372 try:
372 try:
373 for name in filenames:
373 for name in filenames:
374 name += suffix
374 name += suffix
375 if suffix:
375 if suffix:
376 self.registertmp(name, location=location)
376 self.registertmp(name, location=location)
377 checkambig = False
377 checkambig = False
378 else:
378 else:
379 self.addbackup(name, location=location)
379 self.addbackup(name, location=location)
380 checkambig = (name, location) in self._checkambigfiles
380 checkambig = (name, location) in self._checkambigfiles
381 files.append(
381 files.append(
382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
382 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 )
383 )
384 genfunc(*files)
384 genfunc(*files)
385 for f in files:
385 for f in files:
386 f.close()
386 f.close()
387 # skip discard() loop since we're sure no open file remains
387 # skip discard() loop since we're sure no open file remains
388 del files[:]
388 del files[:]
389 finally:
389 finally:
390 for f in files:
390 for f in files:
391 f.discard()
391 f.discard()
392 return any
392 return any
393
393
394 @active
394 @active
395 def find(self, file):
395 def find(self, file):
396 if file in self._map:
396 if file in self._map:
397 return self._entries[self._map[file]]
397 return self._entries[self._map[file]]
398 if file in self._backupmap:
398 if file in self._backupmap:
399 return self._backupentries[self._backupmap[file]]
399 return self._backupentries[self._backupmap[file]]
400 return None
400 return None
401
401
402 @active
402 @active
403 def replace(self, file, offset, data=None):
403 def replace(self, file, offset, data=None):
404 '''
404 '''
405 replace can only replace already committed entries
405 replace can only replace already committed entries
406 that are not pending in the queue
406 that are not pending in the queue
407 '''
407 '''
408
408
409 if file not in self._map:
409 if file not in self._map:
410 raise KeyError(file)
410 raise KeyError(file)
411 index = self._map[file]
411 index = self._map[file]
412 self._entries[index] = (file, offset, data)
412 self._entries[index] = (file, offset, data)
413 self._file.write(b"%s\0%d\n" % (file, offset))
413 self._file.write(b"%s\0%d\n" % (file, offset))
414 self._file.flush()
414 self._file.flush()
415
415
416 @active
416 @active
417 def nest(self, name='<unnamed>'):
417 def nest(self, name='<unnamed>'):
418 self._count += 1
418 self._count += 1
419 self._usages += 1
419 self._usages += 1
420 self._names.append(name)
420 self._names.append(name)
421 return self
421 return self
422
422
423 def release(self):
423 def release(self):
424 if self._count > 0:
424 if self._count > 0:
425 self._usages -= 1
425 self._usages -= 1
426 if self._names:
426 if self._names:
427 self._names.pop()
427 self._names.pop()
428 # if the transaction scopes are left without being closed, fail
428 # if the transaction scopes are left without being closed, fail
429 if self._count > 0 and self._usages == 0:
429 if self._count > 0 and self._usages == 0:
430 self._abort()
430 self._abort()
431
431
432 def running(self):
432 def running(self):
433 return self._count > 0
433 return self._count > 0
434
434
435 def addpending(self, category, callback):
435 def addpending(self, category, callback):
436 """add a callback to be called when the transaction is pending
436 """add a callback to be called when the transaction is pending
437
437
438 The transaction will be given as callback's first argument.
438 The transaction will be given as callback's first argument.
439
439
440 Category is a unique identifier to allow overwriting an old callback
440 Category is a unique identifier to allow overwriting an old callback
441 with a newer callback.
441 with a newer callback.
442 """
442 """
443 self._pendingcallback[category] = callback
443 self._pendingcallback[category] = callback
444
444
445 @active
445 @active
446 def writepending(self):
446 def writepending(self):
447 '''write pending file to temporary version
447 '''write pending file to temporary version
448
448
449 This is used to allow hooks to view a transaction before commit'''
449 This is used to allow hooks to view a transaction before commit'''
450 categories = sorted(self._pendingcallback)
450 categories = sorted(self._pendingcallback)
451 for cat in categories:
451 for cat in categories:
452 # remove callback since the data will have been flushed
452 # remove callback since the data will have been flushed
453 any = self._pendingcallback.pop(cat)(self)
453 any = self._pendingcallback.pop(cat)(self)
454 self._anypending = self._anypending or any
454 self._anypending = self._anypending or any
455 self._anypending |= self._generatefiles(suffix=b'.pending')
455 self._anypending |= self._generatefiles(suffix=b'.pending')
456 return self._anypending
456 return self._anypending
457
457
458 @active
458 @active
459 def hasfinalize(self, category):
460 """check is a callback already exist for a category
461 """
462 return category in self._finalizecallback
463
464 @active
459 def addfinalize(self, category, callback):
465 def addfinalize(self, category, callback):
460 """add a callback to be called when the transaction is closed
466 """add a callback to be called when the transaction is closed
461
467
462 The transaction will be given as callback's first argument.
468 The transaction will be given as callback's first argument.
463
469
464 Category is a unique identifier to allow overwriting old callbacks with
470 Category is a unique identifier to allow overwriting old callbacks with
465 newer callbacks.
471 newer callbacks.
466 """
472 """
467 self._finalizecallback[category] = callback
473 self._finalizecallback[category] = callback
468
474
469 @active
475 @active
470 def addpostclose(self, category, callback):
476 def addpostclose(self, category, callback):
471 """add or replace a callback to be called after the transaction closed
477 """add or replace a callback to be called after the transaction closed
472
478
473 The transaction will be given as callback's first argument.
479 The transaction will be given as callback's first argument.
474
480
475 Category is a unique identifier to allow overwriting an old callback
481 Category is a unique identifier to allow overwriting an old callback
476 with a newer callback.
482 with a newer callback.
477 """
483 """
478 self._postclosecallback[category] = callback
484 self._postclosecallback[category] = callback
479
485
480 @active
486 @active
481 def getpostclose(self, category):
487 def getpostclose(self, category):
482 """return a postclose callback added before, or None"""
488 """return a postclose callback added before, or None"""
483 return self._postclosecallback.get(category, None)
489 return self._postclosecallback.get(category, None)
484
490
485 @active
491 @active
486 def addabort(self, category, callback):
492 def addabort(self, category, callback):
487 """add a callback to be called when the transaction is aborted.
493 """add a callback to be called when the transaction is aborted.
488
494
489 The transaction will be given as the first argument to the callback.
495 The transaction will be given as the first argument to the callback.
490
496
491 Category is a unique identifier to allow overwriting an old callback
497 Category is a unique identifier to allow overwriting an old callback
492 with a newer callback.
498 with a newer callback.
493 """
499 """
494 self._abortcallback[category] = callback
500 self._abortcallback[category] = callback
495
501
496 @active
502 @active
497 def close(self):
503 def close(self):
498 '''commit the transaction'''
504 '''commit the transaction'''
499 if self._count == 1:
505 if self._count == 1:
500 self._validator(self) # will raise exception if needed
506 self._validator(self) # will raise exception if needed
501 self._validator = None # Help prevent cycles.
507 self._validator = None # Help prevent cycles.
502 self._generatefiles(group=gengroupprefinalize)
508 self._generatefiles(group=gengroupprefinalize)
503 categories = sorted(self._finalizecallback)
509 categories = sorted(self._finalizecallback)
504 for cat in categories:
510 for cat in categories:
505 self._finalizecallback[cat](self)
511 self._finalizecallback[cat](self)
506 # Prevent double usage and help clear cycles.
512 # Prevent double usage and help clear cycles.
507 self._finalizecallback = None
513 self._finalizecallback = None
508 self._generatefiles(group=gengrouppostfinalize)
514 self._generatefiles(group=gengrouppostfinalize)
509
515
510 self._count -= 1
516 self._count -= 1
511 if self._count != 0:
517 if self._count != 0:
512 return
518 return
513 self._file.close()
519 self._file.close()
514 self._backupsfile.close()
520 self._backupsfile.close()
515 # cleanup temporary files
521 # cleanup temporary files
516 for l, f, b, c in self._backupentries:
522 for l, f, b, c in self._backupentries:
517 if l not in self._vfsmap and c:
523 if l not in self._vfsmap and c:
518 self._report(
524 self._report(
519 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
525 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
520 )
526 )
521 continue
527 continue
522 vfs = self._vfsmap[l]
528 vfs = self._vfsmap[l]
523 if not f and b and vfs.exists(b):
529 if not f and b and vfs.exists(b):
524 try:
530 try:
525 vfs.unlink(b)
531 vfs.unlink(b)
526 except (IOError, OSError, error.Abort) as inst:
532 except (IOError, OSError, error.Abort) as inst:
527 if not c:
533 if not c:
528 raise
534 raise
529 # Abort may be raise by read only opener
535 # Abort may be raise by read only opener
530 self._report(
536 self._report(
531 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
537 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
532 )
538 )
533 self._entries = []
539 self._entries = []
534 self._writeundo()
540 self._writeundo()
535 if self._after:
541 if self._after:
536 self._after()
542 self._after()
537 self._after = None # Help prevent cycles.
543 self._after = None # Help prevent cycles.
538 if self._opener.isfile(self._backupjournal):
544 if self._opener.isfile(self._backupjournal):
539 self._opener.unlink(self._backupjournal)
545 self._opener.unlink(self._backupjournal)
540 if self._opener.isfile(self._journal):
546 if self._opener.isfile(self._journal):
541 self._opener.unlink(self._journal)
547 self._opener.unlink(self._journal)
542 for l, _f, b, c in self._backupentries:
548 for l, _f, b, c in self._backupentries:
543 if l not in self._vfsmap and c:
549 if l not in self._vfsmap and c:
544 self._report(
550 self._report(
545 b"couldn't remove %s: unknown cache location"
551 b"couldn't remove %s: unknown cache location"
546 b"%s\n" % (b, l)
552 b"%s\n" % (b, l)
547 )
553 )
548 continue
554 continue
549 vfs = self._vfsmap[l]
555 vfs = self._vfsmap[l]
550 if b and vfs.exists(b):
556 if b and vfs.exists(b):
551 try:
557 try:
552 vfs.unlink(b)
558 vfs.unlink(b)
553 except (IOError, OSError, error.Abort) as inst:
559 except (IOError, OSError, error.Abort) as inst:
554 if not c:
560 if not c:
555 raise
561 raise
556 # Abort may be raise by read only opener
562 # Abort may be raise by read only opener
557 self._report(
563 self._report(
558 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
564 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
559 )
565 )
560 self._backupentries = []
566 self._backupentries = []
561 self._journal = None
567 self._journal = None
562
568
563 self._releasefn(self, True) # notify success of closing transaction
569 self._releasefn(self, True) # notify success of closing transaction
564 self._releasefn = None # Help prevent cycles.
570 self._releasefn = None # Help prevent cycles.
565
571
566 # run post close action
572 # run post close action
567 categories = sorted(self._postclosecallback)
573 categories = sorted(self._postclosecallback)
568 for cat in categories:
574 for cat in categories:
569 self._postclosecallback[cat](self)
575 self._postclosecallback[cat](self)
570 # Prevent double usage and help clear cycles.
576 # Prevent double usage and help clear cycles.
571 self._postclosecallback = None
577 self._postclosecallback = None
572
578
573 @active
579 @active
574 def abort(self):
580 def abort(self):
575 '''abort the transaction (generally called on error, or when the
581 '''abort the transaction (generally called on error, or when the
576 transaction is not explicitly committed before going out of
582 transaction is not explicitly committed before going out of
577 scope)'''
583 scope)'''
578 self._abort()
584 self._abort()
579
585
580 def _writeundo(self):
586 def _writeundo(self):
581 """write transaction data for possible future undo call"""
587 """write transaction data for possible future undo call"""
582 if self._undoname is None:
588 if self._undoname is None:
583 return
589 return
584 undobackupfile = self._opener.open(
590 undobackupfile = self._opener.open(
585 b"%s.backupfiles" % self._undoname, b'w'
591 b"%s.backupfiles" % self._undoname, b'w'
586 )
592 )
587 undobackupfile.write(b'%d\n' % version)
593 undobackupfile.write(b'%d\n' % version)
588 for l, f, b, c in self._backupentries:
594 for l, f, b, c in self._backupentries:
589 if not f: # temporary file
595 if not f: # temporary file
590 continue
596 continue
591 if not b:
597 if not b:
592 u = b''
598 u = b''
593 else:
599 else:
594 if l not in self._vfsmap and c:
600 if l not in self._vfsmap and c:
595 self._report(
601 self._report(
596 b"couldn't remove %s: unknown cache location"
602 b"couldn't remove %s: unknown cache location"
597 b"%s\n" % (b, l)
603 b"%s\n" % (b, l)
598 )
604 )
599 continue
605 continue
600 vfs = self._vfsmap[l]
606 vfs = self._vfsmap[l]
601 base, name = vfs.split(b)
607 base, name = vfs.split(b)
602 assert name.startswith(self._journal), name
608 assert name.startswith(self._journal), name
603 uname = name.replace(self._journal, self._undoname, 1)
609 uname = name.replace(self._journal, self._undoname, 1)
604 u = vfs.reljoin(base, uname)
610 u = vfs.reljoin(base, uname)
605 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
611 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
606 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
612 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
607 undobackupfile.close()
613 undobackupfile.close()
608
614
609 def _abort(self):
615 def _abort(self):
610 self._count = 0
616 self._count = 0
611 self._usages = 0
617 self._usages = 0
612 self._file.close()
618 self._file.close()
613 self._backupsfile.close()
619 self._backupsfile.close()
614
620
615 try:
621 try:
616 if not self._entries and not self._backupentries:
622 if not self._entries and not self._backupentries:
617 if self._backupjournal:
623 if self._backupjournal:
618 self._opener.unlink(self._backupjournal)
624 self._opener.unlink(self._backupjournal)
619 if self._journal:
625 if self._journal:
620 self._opener.unlink(self._journal)
626 self._opener.unlink(self._journal)
621 return
627 return
622
628
623 self._report(_(b"transaction abort!\n"))
629 self._report(_(b"transaction abort!\n"))
624
630
625 try:
631 try:
626 for cat in sorted(self._abortcallback):
632 for cat in sorted(self._abortcallback):
627 self._abortcallback[cat](self)
633 self._abortcallback[cat](self)
628 # Prevent double usage and help clear cycles.
634 # Prevent double usage and help clear cycles.
629 self._abortcallback = None
635 self._abortcallback = None
630 _playback(
636 _playback(
631 self._journal,
637 self._journal,
632 self._report,
638 self._report,
633 self._opener,
639 self._opener,
634 self._vfsmap,
640 self._vfsmap,
635 self._entries,
641 self._entries,
636 self._backupentries,
642 self._backupentries,
637 False,
643 False,
638 checkambigfiles=self._checkambigfiles,
644 checkambigfiles=self._checkambigfiles,
639 )
645 )
640 self._report(_(b"rollback completed\n"))
646 self._report(_(b"rollback completed\n"))
641 except BaseException as exc:
647 except BaseException as exc:
642 self._report(_(b"rollback failed - please run hg recover\n"))
648 self._report(_(b"rollback failed - please run hg recover\n"))
643 self._report(
649 self._report(
644 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
650 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
645 )
651 )
646 finally:
652 finally:
647 self._journal = None
653 self._journal = None
648 self._releasefn(self, False) # notify failure of transaction
654 self._releasefn(self, False) # notify failure of transaction
649 self._releasefn = None # Help prevent cycles.
655 self._releasefn = None # Help prevent cycles.
650
656
651
657
652 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
658 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
653 """Rolls back the transaction contained in the given file
659 """Rolls back the transaction contained in the given file
654
660
655 Reads the entries in the specified file, and the corresponding
661 Reads the entries in the specified file, and the corresponding
656 '*.backupfiles' file, to recover from an incomplete transaction.
662 '*.backupfiles' file, to recover from an incomplete transaction.
657
663
658 * `file`: a file containing a list of entries, specifying where
664 * `file`: a file containing a list of entries, specifying where
659 to truncate each file. The file should contain a list of
665 to truncate each file. The file should contain a list of
660 file\0offset pairs, delimited by newlines. The corresponding
666 file\0offset pairs, delimited by newlines. The corresponding
661 '*.backupfiles' file should contain a list of file\0backupfile
667 '*.backupfiles' file should contain a list of file\0backupfile
662 pairs, delimited by \0.
668 pairs, delimited by \0.
663
669
664 `checkambigfiles` is a set of (path, vfs-location) tuples,
670 `checkambigfiles` is a set of (path, vfs-location) tuples,
665 which determine whether file stat ambiguity should be avoided at
671 which determine whether file stat ambiguity should be avoided at
666 restoring corresponded files.
672 restoring corresponded files.
667 """
673 """
668 entries = []
674 entries = []
669 backupentries = []
675 backupentries = []
670
676
671 fp = opener.open(file)
677 fp = opener.open(file)
672 lines = fp.readlines()
678 lines = fp.readlines()
673 fp.close()
679 fp.close()
674 for l in lines:
680 for l in lines:
675 try:
681 try:
676 f, o = l.split(b'\0')
682 f, o = l.split(b'\0')
677 entries.append((f, int(o), None))
683 entries.append((f, int(o), None))
678 except ValueError:
684 except ValueError:
679 report(
685 report(
680 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
686 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
681 )
687 )
682
688
683 backupjournal = b"%s.backupfiles" % file
689 backupjournal = b"%s.backupfiles" % file
684 if opener.exists(backupjournal):
690 if opener.exists(backupjournal):
685 fp = opener.open(backupjournal)
691 fp = opener.open(backupjournal)
686 lines = fp.readlines()
692 lines = fp.readlines()
687 if lines:
693 if lines:
688 ver = lines[0][:-1]
694 ver = lines[0][:-1]
689 if ver == (b'%d' % version):
695 if ver == (b'%d' % version):
690 for line in lines[1:]:
696 for line in lines[1:]:
691 if line:
697 if line:
692 # Shave off the trailing newline
698 # Shave off the trailing newline
693 line = line[:-1]
699 line = line[:-1]
694 l, f, b, c = line.split(b'\0')
700 l, f, b, c = line.split(b'\0')
695 backupentries.append((l, f, b, bool(c)))
701 backupentries.append((l, f, b, bool(c)))
696 else:
702 else:
697 report(
703 report(
698 _(
704 _(
699 b"journal was created by a different version of "
705 b"journal was created by a different version of "
700 b"Mercurial\n"
706 b"Mercurial\n"
701 )
707 )
702 )
708 )
703
709
704 _playback(
710 _playback(
705 file,
711 file,
706 report,
712 report,
707 opener,
713 opener,
708 vfsmap,
714 vfsmap,
709 entries,
715 entries,
710 backupentries,
716 backupentries,
711 checkambigfiles=checkambigfiles,
717 checkambigfiles=checkambigfiles,
712 )
718 )
General Comments 0
You need to be logged in to leave comments. Login now