##// END OF EJS Templates
transaction: add functionality to have multiple validators...
Pulkit Goyal -
r45031:36f08ae8 default
parent child Browse files
Show More
@@ -1,727 +1,736 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 GEN_GROUP_ALL = b'all'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.Abort(
41 raise error.Abort(
42 _(
42 _(
43 b'cannot use transaction when it is already committed/aborted'
43 b'cannot use transaction when it is already committed/aborted'
44 )
44 )
45 )
45 )
46 return func(self, *args, **kwds)
46 return func(self, *args, **kwds)
47
47
48 return _active
48 return _active
49
49
50
50
51 def _playback(
51 def _playback(
52 journal,
52 journal,
53 report,
53 report,
54 opener,
54 opener,
55 vfsmap,
55 vfsmap,
56 entries,
56 entries,
57 backupentries,
57 backupentries,
58 unlink=True,
58 unlink=True,
59 checkambigfiles=None,
59 checkambigfiles=None,
60 ):
60 ):
61 for f, o, _ignore in entries:
61 for f, o, _ignore in entries:
62 if o or not unlink:
62 if o or not unlink:
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 try:
64 try:
65 fp = opener(f, b'a', checkambig=checkambig)
65 fp = opener(f, b'a', checkambig=checkambig)
66 if fp.tell() < o:
66 if fp.tell() < o:
67 raise error.Abort(
67 raise error.Abort(
68 _(
68 _(
69 b"attempted to truncate %s to %d bytes, but it was "
69 b"attempted to truncate %s to %d bytes, but it was "
70 b"already %d bytes\n"
70 b"already %d bytes\n"
71 )
71 )
72 % (f, o, fp.tell())
72 % (f, o, fp.tell())
73 )
73 )
74 fp.truncate(o)
74 fp.truncate(o)
75 fp.close()
75 fp.close()
76 except IOError:
76 except IOError:
77 report(_(b"failed to truncate %s\n") % f)
77 report(_(b"failed to truncate %s\n") % f)
78 raise
78 raise
79 else:
79 else:
80 try:
80 try:
81 opener.unlink(f)
81 opener.unlink(f)
82 except (IOError, OSError) as inst:
82 except (IOError, OSError) as inst:
83 if inst.errno != errno.ENOENT:
83 if inst.errno != errno.ENOENT:
84 raise
84 raise
85
85
86 backupfiles = []
86 backupfiles = []
87 for l, f, b, c in backupentries:
87 for l, f, b, c in backupentries:
88 if l not in vfsmap and c:
88 if l not in vfsmap and c:
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 vfs = vfsmap[l]
90 vfs = vfsmap[l]
91 try:
91 try:
92 if f and b:
92 if f and b:
93 filepath = vfs.join(f)
93 filepath = vfs.join(f)
94 backuppath = vfs.join(b)
94 backuppath = vfs.join(b)
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 try:
96 try:
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 backupfiles.append(b)
98 backupfiles.append(b)
99 except IOError:
99 except IOError:
100 report(_(b"failed to recover %s\n") % f)
100 report(_(b"failed to recover %s\n") % f)
101 else:
101 else:
102 target = f or b
102 target = f or b
103 try:
103 try:
104 vfs.unlink(target)
104 vfs.unlink(target)
105 except (IOError, OSError) as inst:
105 except (IOError, OSError) as inst:
106 if inst.errno != errno.ENOENT:
106 if inst.errno != errno.ENOENT:
107 raise
107 raise
108 except (IOError, OSError, error.Abort):
108 except (IOError, OSError, error.Abort):
109 if not c:
109 if not c:
110 raise
110 raise
111
111
112 backuppath = b"%s.backupfiles" % journal
112 backuppath = b"%s.backupfiles" % journal
113 if opener.exists(backuppath):
113 if opener.exists(backuppath):
114 opener.unlink(backuppath)
114 opener.unlink(backuppath)
115 opener.unlink(journal)
115 opener.unlink(journal)
116 try:
116 try:
117 for f in backupfiles:
117 for f in backupfiles:
118 if opener.exists(f):
118 if opener.exists(f):
119 opener.unlink(f)
119 opener.unlink(f)
120 except (IOError, OSError, error.Abort):
120 except (IOError, OSError, error.Abort):
121 # only pure backup file remains, it is sage to ignore any error
121 # only pure backup file remains, it is sage to ignore any error
122 pass
122 pass
123
123
124
124
125 class transaction(util.transactional):
125 class transaction(util.transactional):
126 def __init__(
126 def __init__(
127 self,
127 self,
128 report,
128 report,
129 opener,
129 opener,
130 vfsmap,
130 vfsmap,
131 journalname,
131 journalname,
132 undoname=None,
132 undoname=None,
133 after=None,
133 after=None,
134 createmode=None,
134 createmode=None,
135 validator=None,
135 validator=None,
136 releasefn=None,
136 releasefn=None,
137 checkambigfiles=None,
137 checkambigfiles=None,
138 name='<unnamed>',
138 name='<unnamed>',
139 ):
139 ):
140 """Begin a new transaction
140 """Begin a new transaction
141
141
142 Begins a new transaction that allows rolling back writes in the event of
142 Begins a new transaction that allows rolling back writes in the event of
143 an exception.
143 an exception.
144
144
145 * `after`: called after the transaction has been committed
145 * `after`: called after the transaction has been committed
146 * `createmode`: the mode of the journal file that will be created
146 * `createmode`: the mode of the journal file that will be created
147 * `releasefn`: called after releasing (with transaction and result)
147 * `releasefn`: called after releasing (with transaction and result)
148
148
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 which determine whether file stat ambiguity should be avoided
150 which determine whether file stat ambiguity should be avoided
151 for corresponded files.
151 for corresponded files.
152 """
152 """
153 self._count = 1
153 self._count = 1
154 self._usages = 1
154 self._usages = 1
155 self._report = report
155 self._report = report
156 # a vfs to the store content
156 # a vfs to the store content
157 self._opener = opener
157 self._opener = opener
158 # a map to access file in various {location -> vfs}
158 # a map to access file in various {location -> vfs}
159 vfsmap = vfsmap.copy()
159 vfsmap = vfsmap.copy()
160 vfsmap[b''] = opener # set default value
160 vfsmap[b''] = opener # set default value
161 self._vfsmap = vfsmap
161 self._vfsmap = vfsmap
162 self._after = after
162 self._after = after
163 self._entries = []
163 self._entries = []
164 self._map = {}
164 self._map = {}
165 self._journal = journalname
165 self._journal = journalname
166 self._undoname = undoname
166 self._undoname = undoname
167 self._queue = []
167 self._queue = []
168 # A callback to validate transaction content before closing it.
169 # should raise exception is anything is wrong.
170 # target user is repository hooks.
171 if validator is None:
172 validator = lambda tr: None
173 self._validator = validator
174 # A callback to do something just after releasing transaction.
168 # A callback to do something just after releasing transaction.
175 if releasefn is None:
169 if releasefn is None:
176 releasefn = lambda tr, success: None
170 releasefn = lambda tr, success: None
177 self._releasefn = releasefn
171 self._releasefn = releasefn
178
172
179 self._checkambigfiles = set()
173 self._checkambigfiles = set()
180 if checkambigfiles:
174 if checkambigfiles:
181 self._checkambigfiles.update(checkambigfiles)
175 self._checkambigfiles.update(checkambigfiles)
182
176
183 self._names = [name]
177 self._names = [name]
184
178
185 # A dict dedicated to precisely tracking the changes introduced in the
179 # A dict dedicated to precisely tracking the changes introduced in the
186 # transaction.
180 # transaction.
187 self.changes = {}
181 self.changes = {}
188
182
189 # a dict of arguments to be passed to hooks
183 # a dict of arguments to be passed to hooks
190 self.hookargs = {}
184 self.hookargs = {}
191 self._file = opener.open(self._journal, b"w")
185 self._file = opener.open(self._journal, b"w")
192
186
193 # a list of ('location', 'path', 'backuppath', cache) entries.
187 # a list of ('location', 'path', 'backuppath', cache) entries.
194 # - if 'backuppath' is empty, no file existed at backup time
188 # - if 'backuppath' is empty, no file existed at backup time
195 # - if 'path' is empty, this is a temporary transaction file
189 # - if 'path' is empty, this is a temporary transaction file
196 # - if 'location' is not empty, the path is outside main opener reach.
190 # - if 'location' is not empty, the path is outside main opener reach.
197 # use 'location' value as a key in a vfsmap to find the right 'vfs'
191 # use 'location' value as a key in a vfsmap to find the right 'vfs'
198 # (cache is currently unused)
192 # (cache is currently unused)
199 self._backupentries = []
193 self._backupentries = []
200 self._backupmap = {}
194 self._backupmap = {}
201 self._backupjournal = b"%s.backupfiles" % self._journal
195 self._backupjournal = b"%s.backupfiles" % self._journal
202 self._backupsfile = opener.open(self._backupjournal, b'w')
196 self._backupsfile = opener.open(self._backupjournal, b'w')
203 self._backupsfile.write(b'%d\n' % version)
197 self._backupsfile.write(b'%d\n' % version)
204
198
205 if createmode is not None:
199 if createmode is not None:
206 opener.chmod(self._journal, createmode & 0o666)
200 opener.chmod(self._journal, createmode & 0o666)
207 opener.chmod(self._backupjournal, createmode & 0o666)
201 opener.chmod(self._backupjournal, createmode & 0o666)
208
202
209 # hold file generations to be performed on commit
203 # hold file generations to be performed on commit
210 self._filegenerators = {}
204 self._filegenerators = {}
211 # hold callback to write pending data for hooks
205 # hold callback to write pending data for hooks
212 self._pendingcallback = {}
206 self._pendingcallback = {}
213 # True is any pending data have been written ever
207 # True is any pending data have been written ever
214 self._anypending = False
208 self._anypending = False
215 # holds callback to call when writing the transaction
209 # holds callback to call when writing the transaction
216 self._finalizecallback = {}
210 self._finalizecallback = {}
211 # holds callback to call when validating the transaction
212 # should raise exception if anything is wrong
213 self._validatecallback = {}
214 if validator is not None:
215 self._validatecallback[b'001-userhooks'] = validator
217 # hold callback for post transaction close
216 # hold callback for post transaction close
218 self._postclosecallback = {}
217 self._postclosecallback = {}
219 # holds callbacks to call during abort
218 # holds callbacks to call during abort
220 self._abortcallback = {}
219 self._abortcallback = {}
221
220
222 def __repr__(self):
221 def __repr__(self):
223 name = '/'.join(self._names)
222 name = '/'.join(self._names)
224 return '<transaction name=%s, count=%d, usages=%d>' % (
223 return '<transaction name=%s, count=%d, usages=%d>' % (
225 name,
224 name,
226 self._count,
225 self._count,
227 self._usages,
226 self._usages,
228 )
227 )
229
228
230 def __del__(self):
229 def __del__(self):
231 if self._journal:
230 if self._journal:
232 self._abort()
231 self._abort()
233
232
234 @active
233 @active
235 def startgroup(self):
234 def startgroup(self):
236 """delay registration of file entry
235 """delay registration of file entry
237
236
238 This is used by strip to delay vision of strip offset. The transaction
237 This is used by strip to delay vision of strip offset. The transaction
239 sees either none or all of the strip actions to be done."""
238 sees either none or all of the strip actions to be done."""
240 self._queue.append([])
239 self._queue.append([])
241
240
242 @active
241 @active
243 def endgroup(self):
242 def endgroup(self):
244 """apply delayed registration of file entry.
243 """apply delayed registration of file entry.
245
244
246 This is used by strip to delay vision of strip offset. The transaction
245 This is used by strip to delay vision of strip offset. The transaction
247 sees either none or all of the strip actions to be done."""
246 sees either none or all of the strip actions to be done."""
248 q = self._queue.pop()
247 q = self._queue.pop()
249 for f, o, data in q:
248 for f, o, data in q:
250 self._addentry(f, o, data)
249 self._addentry(f, o, data)
251
250
252 @active
251 @active
253 def add(self, file, offset, data=None):
252 def add(self, file, offset, data=None):
254 """record the state of an append-only file before update"""
253 """record the state of an append-only file before update"""
255 if file in self._map or file in self._backupmap:
254 if file in self._map or file in self._backupmap:
256 return
255 return
257 if self._queue:
256 if self._queue:
258 self._queue[-1].append((file, offset, data))
257 self._queue[-1].append((file, offset, data))
259 return
258 return
260
259
261 self._addentry(file, offset, data)
260 self._addentry(file, offset, data)
262
261
263 def _addentry(self, file, offset, data):
262 def _addentry(self, file, offset, data):
264 """add a append-only entry to memory and on-disk state"""
263 """add a append-only entry to memory and on-disk state"""
265 if file in self._map or file in self._backupmap:
264 if file in self._map or file in self._backupmap:
266 return
265 return
267 self._entries.append((file, offset, data))
266 self._entries.append((file, offset, data))
268 self._map[file] = len(self._entries) - 1
267 self._map[file] = len(self._entries) - 1
269 # add enough data to the journal to do the truncate
268 # add enough data to the journal to do the truncate
270 self._file.write(b"%s\0%d\n" % (file, offset))
269 self._file.write(b"%s\0%d\n" % (file, offset))
271 self._file.flush()
270 self._file.flush()
272
271
273 @active
272 @active
274 def addbackup(self, file, hardlink=True, location=b''):
273 def addbackup(self, file, hardlink=True, location=b''):
275 """Adds a backup of the file to the transaction
274 """Adds a backup of the file to the transaction
276
275
277 Calling addbackup() creates a hardlink backup of the specified file
276 Calling addbackup() creates a hardlink backup of the specified file
278 that is used to recover the file in the event of the transaction
277 that is used to recover the file in the event of the transaction
279 aborting.
278 aborting.
280
279
281 * `file`: the file path, relative to .hg/store
280 * `file`: the file path, relative to .hg/store
282 * `hardlink`: use a hardlink to quickly create the backup
281 * `hardlink`: use a hardlink to quickly create the backup
283 """
282 """
284 if self._queue:
283 if self._queue:
285 msg = b'cannot use transaction.addbackup inside "group"'
284 msg = b'cannot use transaction.addbackup inside "group"'
286 raise error.ProgrammingError(msg)
285 raise error.ProgrammingError(msg)
287
286
288 if file in self._map or file in self._backupmap:
287 if file in self._map or file in self._backupmap:
289 return
288 return
290 vfs = self._vfsmap[location]
289 vfs = self._vfsmap[location]
291 dirname, filename = vfs.split(file)
290 dirname, filename = vfs.split(file)
292 backupfilename = b"%s.backup.%s" % (self._journal, filename)
291 backupfilename = b"%s.backup.%s" % (self._journal, filename)
293 backupfile = vfs.reljoin(dirname, backupfilename)
292 backupfile = vfs.reljoin(dirname, backupfilename)
294 if vfs.exists(file):
293 if vfs.exists(file):
295 filepath = vfs.join(file)
294 filepath = vfs.join(file)
296 backuppath = vfs.join(backupfile)
295 backuppath = vfs.join(backupfile)
297 util.copyfile(filepath, backuppath, hardlink=hardlink)
296 util.copyfile(filepath, backuppath, hardlink=hardlink)
298 else:
297 else:
299 backupfile = b''
298 backupfile = b''
300
299
301 self._addbackupentry((location, file, backupfile, False))
300 self._addbackupentry((location, file, backupfile, False))
302
301
303 def _addbackupentry(self, entry):
302 def _addbackupentry(self, entry):
304 """register a new backup entry and write it to disk"""
303 """register a new backup entry and write it to disk"""
305 self._backupentries.append(entry)
304 self._backupentries.append(entry)
306 self._backupmap[entry[1]] = len(self._backupentries) - 1
305 self._backupmap[entry[1]] = len(self._backupentries) - 1
307 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
306 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
308 self._backupsfile.flush()
307 self._backupsfile.flush()
309
308
310 @active
309 @active
311 def registertmp(self, tmpfile, location=b''):
310 def registertmp(self, tmpfile, location=b''):
312 """register a temporary transaction file
311 """register a temporary transaction file
313
312
314 Such files will be deleted when the transaction exits (on both
313 Such files will be deleted when the transaction exits (on both
315 failure and success).
314 failure and success).
316 """
315 """
317 self._addbackupentry((location, b'', tmpfile, False))
316 self._addbackupentry((location, b'', tmpfile, False))
318
317
319 @active
318 @active
320 def addfilegenerator(
319 def addfilegenerator(
321 self, genid, filenames, genfunc, order=0, location=b''
320 self, genid, filenames, genfunc, order=0, location=b''
322 ):
321 ):
323 """add a function to generates some files at transaction commit
322 """add a function to generates some files at transaction commit
324
323
325 The `genfunc` argument is a function capable of generating proper
324 The `genfunc` argument is a function capable of generating proper
326 content of each entry in the `filename` tuple.
325 content of each entry in the `filename` tuple.
327
326
328 At transaction close time, `genfunc` will be called with one file
327 At transaction close time, `genfunc` will be called with one file
329 object argument per entries in `filenames`.
328 object argument per entries in `filenames`.
330
329
331 The transaction itself is responsible for the backup, creation and
330 The transaction itself is responsible for the backup, creation and
332 final write of such file.
331 final write of such file.
333
332
334 The `genid` argument is used to ensure the same set of file is only
333 The `genid` argument is used to ensure the same set of file is only
335 generated once. Call to `addfilegenerator` for a `genid` already
334 generated once. Call to `addfilegenerator` for a `genid` already
336 present will overwrite the old entry.
335 present will overwrite the old entry.
337
336
338 The `order` argument may be used to control the order in which multiple
337 The `order` argument may be used to control the order in which multiple
339 generator will be executed.
338 generator will be executed.
340
339
341 The `location` arguments may be used to indicate the files are located
340 The `location` arguments may be used to indicate the files are located
342 outside of the the standard directory for transaction. It should match
341 outside of the the standard directory for transaction. It should match
343 one of the key of the `transaction.vfsmap` dictionary.
342 one of the key of the `transaction.vfsmap` dictionary.
344 """
343 """
345 # For now, we are unable to do proper backup and restore of custom vfs
344 # For now, we are unable to do proper backup and restore of custom vfs
346 # but for bookmarks that are handled outside this mechanism.
345 # but for bookmarks that are handled outside this mechanism.
347 self._filegenerators[genid] = (order, filenames, genfunc, location)
346 self._filegenerators[genid] = (order, filenames, genfunc, location)
348
347
349 @active
348 @active
350 def removefilegenerator(self, genid):
349 def removefilegenerator(self, genid):
351 """reverse of addfilegenerator, remove a file generator function"""
350 """reverse of addfilegenerator, remove a file generator function"""
352 if genid in self._filegenerators:
351 if genid in self._filegenerators:
353 del self._filegenerators[genid]
352 del self._filegenerators[genid]
354
353
355 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
354 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
356 # write files registered for generation
355 # write files registered for generation
357 any = False
356 any = False
358
357
359 if group == GEN_GROUP_ALL:
358 if group == GEN_GROUP_ALL:
360 skip_post = skip_pre = False
359 skip_post = skip_pre = False
361 else:
360 else:
362 skip_pre = group == GEN_GROUP_POST_FINALIZE
361 skip_pre = group == GEN_GROUP_POST_FINALIZE
363 skip_post = group == GEN_GROUP_PRE_FINALIZE
362 skip_post = group == GEN_GROUP_PRE_FINALIZE
364
363
365 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
364 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
366 any = True
365 any = True
367 order, filenames, genfunc, location = entry
366 order, filenames, genfunc, location = entry
368
367
369 # for generation at closing, check if it's before or after finalize
368 # for generation at closing, check if it's before or after finalize
370 is_post = id in postfinalizegenerators
369 is_post = id in postfinalizegenerators
371 if skip_post and is_post:
370 if skip_post and is_post:
372 continue
371 continue
373 elif skip_pre and not is_post:
372 elif skip_pre and not is_post:
374 continue
373 continue
375
374
376 vfs = self._vfsmap[location]
375 vfs = self._vfsmap[location]
377 files = []
376 files = []
378 try:
377 try:
379 for name in filenames:
378 for name in filenames:
380 name += suffix
379 name += suffix
381 if suffix:
380 if suffix:
382 self.registertmp(name, location=location)
381 self.registertmp(name, location=location)
383 checkambig = False
382 checkambig = False
384 else:
383 else:
385 self.addbackup(name, location=location)
384 self.addbackup(name, location=location)
386 checkambig = (name, location) in self._checkambigfiles
385 checkambig = (name, location) in self._checkambigfiles
387 files.append(
386 files.append(
388 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
387 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
389 )
388 )
390 genfunc(*files)
389 genfunc(*files)
391 for f in files:
390 for f in files:
392 f.close()
391 f.close()
393 # skip discard() loop since we're sure no open file remains
392 # skip discard() loop since we're sure no open file remains
394 del files[:]
393 del files[:]
395 finally:
394 finally:
396 for f in files:
395 for f in files:
397 f.discard()
396 f.discard()
398 return any
397 return any
399
398
400 @active
399 @active
401 def find(self, file):
400 def find(self, file):
402 if file in self._map:
401 if file in self._map:
403 return self._entries[self._map[file]]
402 return self._entries[self._map[file]]
404 if file in self._backupmap:
403 if file in self._backupmap:
405 return self._backupentries[self._backupmap[file]]
404 return self._backupentries[self._backupmap[file]]
406 return None
405 return None
407
406
408 @active
407 @active
409 def replace(self, file, offset, data=None):
408 def replace(self, file, offset, data=None):
410 '''
409 '''
411 replace can only replace already committed entries
410 replace can only replace already committed entries
412 that are not pending in the queue
411 that are not pending in the queue
413 '''
412 '''
414
413
415 if file not in self._map:
414 if file not in self._map:
416 raise KeyError(file)
415 raise KeyError(file)
417 index = self._map[file]
416 index = self._map[file]
418 self._entries[index] = (file, offset, data)
417 self._entries[index] = (file, offset, data)
419 self._file.write(b"%s\0%d\n" % (file, offset))
418 self._file.write(b"%s\0%d\n" % (file, offset))
420 self._file.flush()
419 self._file.flush()
421
420
422 @active
421 @active
423 def nest(self, name='<unnamed>'):
422 def nest(self, name='<unnamed>'):
424 self._count += 1
423 self._count += 1
425 self._usages += 1
424 self._usages += 1
426 self._names.append(name)
425 self._names.append(name)
427 return self
426 return self
428
427
429 def release(self):
428 def release(self):
430 if self._count > 0:
429 if self._count > 0:
431 self._usages -= 1
430 self._usages -= 1
432 if self._names:
431 if self._names:
433 self._names.pop()
432 self._names.pop()
434 # if the transaction scopes are left without being closed, fail
433 # if the transaction scopes are left without being closed, fail
435 if self._count > 0 and self._usages == 0:
434 if self._count > 0 and self._usages == 0:
436 self._abort()
435 self._abort()
437
436
438 def running(self):
437 def running(self):
439 return self._count > 0
438 return self._count > 0
440
439
441 def addpending(self, category, callback):
440 def addpending(self, category, callback):
442 """add a callback to be called when the transaction is pending
441 """add a callback to be called when the transaction is pending
443
442
444 The transaction will be given as callback's first argument.
443 The transaction will be given as callback's first argument.
445
444
446 Category is a unique identifier to allow overwriting an old callback
445 Category is a unique identifier to allow overwriting an old callback
447 with a newer callback.
446 with a newer callback.
448 """
447 """
449 self._pendingcallback[category] = callback
448 self._pendingcallback[category] = callback
450
449
451 @active
450 @active
452 def writepending(self):
451 def writepending(self):
453 '''write pending file to temporary version
452 '''write pending file to temporary version
454
453
455 This is used to allow hooks to view a transaction before commit'''
454 This is used to allow hooks to view a transaction before commit'''
456 categories = sorted(self._pendingcallback)
455 categories = sorted(self._pendingcallback)
457 for cat in categories:
456 for cat in categories:
458 # remove callback since the data will have been flushed
457 # remove callback since the data will have been flushed
459 any = self._pendingcallback.pop(cat)(self)
458 any = self._pendingcallback.pop(cat)(self)
460 self._anypending = self._anypending or any
459 self._anypending = self._anypending or any
461 self._anypending |= self._generatefiles(suffix=b'.pending')
460 self._anypending |= self._generatefiles(suffix=b'.pending')
462 return self._anypending
461 return self._anypending
463
462
464 @active
463 @active
465 def hasfinalize(self, category):
464 def hasfinalize(self, category):
466 """check is a callback already exist for a category
465 """check is a callback already exist for a category
467 """
466 """
468 return category in self._finalizecallback
467 return category in self._finalizecallback
469
468
470 @active
469 @active
471 def addfinalize(self, category, callback):
470 def addfinalize(self, category, callback):
472 """add a callback to be called when the transaction is closed
471 """add a callback to be called when the transaction is closed
473
472
474 The transaction will be given as callback's first argument.
473 The transaction will be given as callback's first argument.
475
474
476 Category is a unique identifier to allow overwriting old callbacks with
475 Category is a unique identifier to allow overwriting old callbacks with
477 newer callbacks.
476 newer callbacks.
478 """
477 """
479 self._finalizecallback[category] = callback
478 self._finalizecallback[category] = callback
480
479
481 @active
480 @active
482 def addpostclose(self, category, callback):
481 def addpostclose(self, category, callback):
483 """add or replace a callback to be called after the transaction closed
482 """add or replace a callback to be called after the transaction closed
484
483
485 The transaction will be given as callback's first argument.
484 The transaction will be given as callback's first argument.
486
485
487 Category is a unique identifier to allow overwriting an old callback
486 Category is a unique identifier to allow overwriting an old callback
488 with a newer callback.
487 with a newer callback.
489 """
488 """
490 self._postclosecallback[category] = callback
489 self._postclosecallback[category] = callback
491
490
492 @active
491 @active
493 def getpostclose(self, category):
492 def getpostclose(self, category):
494 """return a postclose callback added before, or None"""
493 """return a postclose callback added before, or None"""
495 return self._postclosecallback.get(category, None)
494 return self._postclosecallback.get(category, None)
496
495
497 @active
496 @active
498 def addabort(self, category, callback):
497 def addabort(self, category, callback):
499 """add a callback to be called when the transaction is aborted.
498 """add a callback to be called when the transaction is aborted.
500
499
501 The transaction will be given as the first argument to the callback.
500 The transaction will be given as the first argument to the callback.
502
501
503 Category is a unique identifier to allow overwriting an old callback
502 Category is a unique identifier to allow overwriting an old callback
504 with a newer callback.
503 with a newer callback.
505 """
504 """
506 self._abortcallback[category] = callback
505 self._abortcallback[category] = callback
507
506
508 @active
507 @active
508 def addvalidator(self, category, callback):
509 """ adds a callback to be called when validating the transaction.
510
511 The transaction will be given as the first argument to the callback.
512
513 callback should raise exception if to abort transaction """
514 self._validatecallback[category] = callback
515
516 @active
509 def close(self):
517 def close(self):
510 '''commit the transaction'''
518 '''commit the transaction'''
511 if self._count == 1:
519 if self._count == 1:
512 self._validator(self) # will raise exception if needed
520 for category in sorted(self._validatecallback):
513 self._validator = None # Help prevent cycles.
521 self._validatecallback[category](self)
522 self._validatecallback = None # Help prevent cycles.
514 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
523 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
515 while self._finalizecallback:
524 while self._finalizecallback:
516 callbacks = self._finalizecallback
525 callbacks = self._finalizecallback
517 self._finalizecallback = {}
526 self._finalizecallback = {}
518 categories = sorted(callbacks)
527 categories = sorted(callbacks)
519 for cat in categories:
528 for cat in categories:
520 callbacks[cat](self)
529 callbacks[cat](self)
521 # Prevent double usage and help clear cycles.
530 # Prevent double usage and help clear cycles.
522 self._finalizecallback = None
531 self._finalizecallback = None
523 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
532 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
524
533
525 self._count -= 1
534 self._count -= 1
526 if self._count != 0:
535 if self._count != 0:
527 return
536 return
528 self._file.close()
537 self._file.close()
529 self._backupsfile.close()
538 self._backupsfile.close()
530 # cleanup temporary files
539 # cleanup temporary files
531 for l, f, b, c in self._backupentries:
540 for l, f, b, c in self._backupentries:
532 if l not in self._vfsmap and c:
541 if l not in self._vfsmap and c:
533 self._report(
542 self._report(
534 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
543 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
535 )
544 )
536 continue
545 continue
537 vfs = self._vfsmap[l]
546 vfs = self._vfsmap[l]
538 if not f and b and vfs.exists(b):
547 if not f and b and vfs.exists(b):
539 try:
548 try:
540 vfs.unlink(b)
549 vfs.unlink(b)
541 except (IOError, OSError, error.Abort) as inst:
550 except (IOError, OSError, error.Abort) as inst:
542 if not c:
551 if not c:
543 raise
552 raise
544 # Abort may be raise by read only opener
553 # Abort may be raise by read only opener
545 self._report(
554 self._report(
546 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
555 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
547 )
556 )
548 self._entries = []
557 self._entries = []
549 self._writeundo()
558 self._writeundo()
550 if self._after:
559 if self._after:
551 self._after()
560 self._after()
552 self._after = None # Help prevent cycles.
561 self._after = None # Help prevent cycles.
553 if self._opener.isfile(self._backupjournal):
562 if self._opener.isfile(self._backupjournal):
554 self._opener.unlink(self._backupjournal)
563 self._opener.unlink(self._backupjournal)
555 if self._opener.isfile(self._journal):
564 if self._opener.isfile(self._journal):
556 self._opener.unlink(self._journal)
565 self._opener.unlink(self._journal)
557 for l, _f, b, c in self._backupentries:
566 for l, _f, b, c in self._backupentries:
558 if l not in self._vfsmap and c:
567 if l not in self._vfsmap and c:
559 self._report(
568 self._report(
560 b"couldn't remove %s: unknown cache location"
569 b"couldn't remove %s: unknown cache location"
561 b"%s\n" % (b, l)
570 b"%s\n" % (b, l)
562 )
571 )
563 continue
572 continue
564 vfs = self._vfsmap[l]
573 vfs = self._vfsmap[l]
565 if b and vfs.exists(b):
574 if b and vfs.exists(b):
566 try:
575 try:
567 vfs.unlink(b)
576 vfs.unlink(b)
568 except (IOError, OSError, error.Abort) as inst:
577 except (IOError, OSError, error.Abort) as inst:
569 if not c:
578 if not c:
570 raise
579 raise
571 # Abort may be raise by read only opener
580 # Abort may be raise by read only opener
572 self._report(
581 self._report(
573 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
574 )
583 )
575 self._backupentries = []
584 self._backupentries = []
576 self._journal = None
585 self._journal = None
577
586
578 self._releasefn(self, True) # notify success of closing transaction
587 self._releasefn(self, True) # notify success of closing transaction
579 self._releasefn = None # Help prevent cycles.
588 self._releasefn = None # Help prevent cycles.
580
589
581 # run post close action
590 # run post close action
582 categories = sorted(self._postclosecallback)
591 categories = sorted(self._postclosecallback)
583 for cat in categories:
592 for cat in categories:
584 self._postclosecallback[cat](self)
593 self._postclosecallback[cat](self)
585 # Prevent double usage and help clear cycles.
594 # Prevent double usage and help clear cycles.
586 self._postclosecallback = None
595 self._postclosecallback = None
587
596
588 @active
597 @active
589 def abort(self):
598 def abort(self):
590 '''abort the transaction (generally called on error, or when the
599 '''abort the transaction (generally called on error, or when the
591 transaction is not explicitly committed before going out of
600 transaction is not explicitly committed before going out of
592 scope)'''
601 scope)'''
593 self._abort()
602 self._abort()
594
603
595 def _writeundo(self):
604 def _writeundo(self):
596 """write transaction data for possible future undo call"""
605 """write transaction data for possible future undo call"""
597 if self._undoname is None:
606 if self._undoname is None:
598 return
607 return
599 undobackupfile = self._opener.open(
608 undobackupfile = self._opener.open(
600 b"%s.backupfiles" % self._undoname, b'w'
609 b"%s.backupfiles" % self._undoname, b'w'
601 )
610 )
602 undobackupfile.write(b'%d\n' % version)
611 undobackupfile.write(b'%d\n' % version)
603 for l, f, b, c in self._backupentries:
612 for l, f, b, c in self._backupentries:
604 if not f: # temporary file
613 if not f: # temporary file
605 continue
614 continue
606 if not b:
615 if not b:
607 u = b''
616 u = b''
608 else:
617 else:
609 if l not in self._vfsmap and c:
618 if l not in self._vfsmap and c:
610 self._report(
619 self._report(
611 b"couldn't remove %s: unknown cache location"
620 b"couldn't remove %s: unknown cache location"
612 b"%s\n" % (b, l)
621 b"%s\n" % (b, l)
613 )
622 )
614 continue
623 continue
615 vfs = self._vfsmap[l]
624 vfs = self._vfsmap[l]
616 base, name = vfs.split(b)
625 base, name = vfs.split(b)
617 assert name.startswith(self._journal), name
626 assert name.startswith(self._journal), name
618 uname = name.replace(self._journal, self._undoname, 1)
627 uname = name.replace(self._journal, self._undoname, 1)
619 u = vfs.reljoin(base, uname)
628 u = vfs.reljoin(base, uname)
620 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
629 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
621 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
630 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
622 undobackupfile.close()
631 undobackupfile.close()
623
632
624 def _abort(self):
633 def _abort(self):
625 self._count = 0
634 self._count = 0
626 self._usages = 0
635 self._usages = 0
627 self._file.close()
636 self._file.close()
628 self._backupsfile.close()
637 self._backupsfile.close()
629
638
630 try:
639 try:
631 if not self._entries and not self._backupentries:
640 if not self._entries and not self._backupentries:
632 if self._backupjournal:
641 if self._backupjournal:
633 self._opener.unlink(self._backupjournal)
642 self._opener.unlink(self._backupjournal)
634 if self._journal:
643 if self._journal:
635 self._opener.unlink(self._journal)
644 self._opener.unlink(self._journal)
636 return
645 return
637
646
638 self._report(_(b"transaction abort!\n"))
647 self._report(_(b"transaction abort!\n"))
639
648
640 try:
649 try:
641 for cat in sorted(self._abortcallback):
650 for cat in sorted(self._abortcallback):
642 self._abortcallback[cat](self)
651 self._abortcallback[cat](self)
643 # Prevent double usage and help clear cycles.
652 # Prevent double usage and help clear cycles.
644 self._abortcallback = None
653 self._abortcallback = None
645 _playback(
654 _playback(
646 self._journal,
655 self._journal,
647 self._report,
656 self._report,
648 self._opener,
657 self._opener,
649 self._vfsmap,
658 self._vfsmap,
650 self._entries,
659 self._entries,
651 self._backupentries,
660 self._backupentries,
652 False,
661 False,
653 checkambigfiles=self._checkambigfiles,
662 checkambigfiles=self._checkambigfiles,
654 )
663 )
655 self._report(_(b"rollback completed\n"))
664 self._report(_(b"rollback completed\n"))
656 except BaseException as exc:
665 except BaseException as exc:
657 self._report(_(b"rollback failed - please run hg recover\n"))
666 self._report(_(b"rollback failed - please run hg recover\n"))
658 self._report(
667 self._report(
659 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
668 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
660 )
669 )
661 finally:
670 finally:
662 self._journal = None
671 self._journal = None
663 self._releasefn(self, False) # notify failure of transaction
672 self._releasefn(self, False) # notify failure of transaction
664 self._releasefn = None # Help prevent cycles.
673 self._releasefn = None # Help prevent cycles.
665
674
666
675
667 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
676 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
668 """Rolls back the transaction contained in the given file
677 """Rolls back the transaction contained in the given file
669
678
670 Reads the entries in the specified file, and the corresponding
679 Reads the entries in the specified file, and the corresponding
671 '*.backupfiles' file, to recover from an incomplete transaction.
680 '*.backupfiles' file, to recover from an incomplete transaction.
672
681
673 * `file`: a file containing a list of entries, specifying where
682 * `file`: a file containing a list of entries, specifying where
674 to truncate each file. The file should contain a list of
683 to truncate each file. The file should contain a list of
675 file\0offset pairs, delimited by newlines. The corresponding
684 file\0offset pairs, delimited by newlines. The corresponding
676 '*.backupfiles' file should contain a list of file\0backupfile
685 '*.backupfiles' file should contain a list of file\0backupfile
677 pairs, delimited by \0.
686 pairs, delimited by \0.
678
687
679 `checkambigfiles` is a set of (path, vfs-location) tuples,
688 `checkambigfiles` is a set of (path, vfs-location) tuples,
680 which determine whether file stat ambiguity should be avoided at
689 which determine whether file stat ambiguity should be avoided at
681 restoring corresponded files.
690 restoring corresponded files.
682 """
691 """
683 entries = []
692 entries = []
684 backupentries = []
693 backupentries = []
685
694
686 fp = opener.open(file)
695 fp = opener.open(file)
687 lines = fp.readlines()
696 lines = fp.readlines()
688 fp.close()
697 fp.close()
689 for l in lines:
698 for l in lines:
690 try:
699 try:
691 f, o = l.split(b'\0')
700 f, o = l.split(b'\0')
692 entries.append((f, int(o), None))
701 entries.append((f, int(o), None))
693 except ValueError:
702 except ValueError:
694 report(
703 report(
695 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
704 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
696 )
705 )
697
706
698 backupjournal = b"%s.backupfiles" % file
707 backupjournal = b"%s.backupfiles" % file
699 if opener.exists(backupjournal):
708 if opener.exists(backupjournal):
700 fp = opener.open(backupjournal)
709 fp = opener.open(backupjournal)
701 lines = fp.readlines()
710 lines = fp.readlines()
702 if lines:
711 if lines:
703 ver = lines[0][:-1]
712 ver = lines[0][:-1]
704 if ver == (b'%d' % version):
713 if ver == (b'%d' % version):
705 for line in lines[1:]:
714 for line in lines[1:]:
706 if line:
715 if line:
707 # Shave off the trailing newline
716 # Shave off the trailing newline
708 line = line[:-1]
717 line = line[:-1]
709 l, f, b, c = line.split(b'\0')
718 l, f, b, c = line.split(b'\0')
710 backupentries.append((l, f, b, bool(c)))
719 backupentries.append((l, f, b, bool(c)))
711 else:
720 else:
712 report(
721 report(
713 _(
722 _(
714 b"journal was created by a different version of "
723 b"journal was created by a different version of "
715 b"Mercurial\n"
724 b"Mercurial\n"
716 )
725 )
717 )
726 )
718
727
719 _playback(
728 _playback(
720 file,
729 file,
721 report,
730 report,
722 opener,
731 opener,
723 vfsmap,
732 vfsmap,
724 entries,
733 entries,
725 backupentries,
734 backupentries,
726 checkambigfiles=checkambigfiles,
735 checkambigfiles=checkambigfiles,
727 )
736 )
General Comments 0
You need to be logged in to leave comments. Login now