##// END OF EJS Templates
transaction: use ProgrammingError for when an committed transaction is used...
Martin von Zweigbergk -
r46330:5df1655e default
parent child Browse files
Show More
@@ -1,736 +1,734 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 GEN_GROUP_ALL = b'all'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.Abort(
41 raise error.ProgrammingError(
42 _(
42 b'cannot use transaction when it is already committed/aborted'
43 b'cannot use transaction when it is already committed/aborted'
44 )
45 )
43 )
46 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
47
45
48 return _active
46 return _active
49
47
50
48
51 def _playback(
49 def _playback(
52 journal,
50 journal,
53 report,
51 report,
54 opener,
52 opener,
55 vfsmap,
53 vfsmap,
56 entries,
54 entries,
57 backupentries,
55 backupentries,
58 unlink=True,
56 unlink=True,
59 checkambigfiles=None,
57 checkambigfiles=None,
60 ):
58 ):
61 for f, o, _ignore in entries:
59 for f, o, _ignore in entries:
62 if o or not unlink:
60 if o or not unlink:
63 checkambig = checkambigfiles and (f, b'') in checkambigfiles
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
64 try:
62 try:
65 fp = opener(f, b'a', checkambig=checkambig)
63 fp = opener(f, b'a', checkambig=checkambig)
66 if fp.tell() < o:
64 if fp.tell() < o:
67 raise error.Abort(
65 raise error.Abort(
68 _(
66 _(
69 b"attempted to truncate %s to %d bytes, but it was "
67 b"attempted to truncate %s to %d bytes, but it was "
70 b"already %d bytes\n"
68 b"already %d bytes\n"
71 )
69 )
72 % (f, o, fp.tell())
70 % (f, o, fp.tell())
73 )
71 )
74 fp.truncate(o)
72 fp.truncate(o)
75 fp.close()
73 fp.close()
76 except IOError:
74 except IOError:
77 report(_(b"failed to truncate %s\n") % f)
75 report(_(b"failed to truncate %s\n") % f)
78 raise
76 raise
79 else:
77 else:
80 try:
78 try:
81 opener.unlink(f)
79 opener.unlink(f)
82 except (IOError, OSError) as inst:
80 except (IOError, OSError) as inst:
83 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
84 raise
82 raise
85
83
86 backupfiles = []
84 backupfiles = []
87 for l, f, b, c in backupentries:
85 for l, f, b, c in backupentries:
88 if l not in vfsmap and c:
86 if l not in vfsmap and c:
89 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
90 vfs = vfsmap[l]
88 vfs = vfsmap[l]
91 try:
89 try:
92 if f and b:
90 if f and b:
93 filepath = vfs.join(f)
91 filepath = vfs.join(f)
94 backuppath = vfs.join(b)
92 backuppath = vfs.join(b)
95 checkambig = checkambigfiles and (f, l) in checkambigfiles
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
96 try:
94 try:
97 util.copyfile(backuppath, filepath, checkambig=checkambig)
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
98 backupfiles.append(b)
96 backupfiles.append(b)
99 except IOError:
97 except IOError:
100 report(_(b"failed to recover %s\n") % f)
98 report(_(b"failed to recover %s\n") % f)
101 else:
99 else:
102 target = f or b
100 target = f or b
103 try:
101 try:
104 vfs.unlink(target)
102 vfs.unlink(target)
105 except (IOError, OSError) as inst:
103 except (IOError, OSError) as inst:
106 if inst.errno != errno.ENOENT:
104 if inst.errno != errno.ENOENT:
107 raise
105 raise
108 except (IOError, OSError, error.Abort):
106 except (IOError, OSError, error.Abort):
109 if not c:
107 if not c:
110 raise
108 raise
111
109
112 backuppath = b"%s.backupfiles" % journal
110 backuppath = b"%s.backupfiles" % journal
113 if opener.exists(backuppath):
111 if opener.exists(backuppath):
114 opener.unlink(backuppath)
112 opener.unlink(backuppath)
115 opener.unlink(journal)
113 opener.unlink(journal)
116 try:
114 try:
117 for f in backupfiles:
115 for f in backupfiles:
118 if opener.exists(f):
116 if opener.exists(f):
119 opener.unlink(f)
117 opener.unlink(f)
120 except (IOError, OSError, error.Abort):
118 except (IOError, OSError, error.Abort):
121 # only pure backup file remains, it is sage to ignore any error
119 # only pure backup file remains, it is sage to ignore any error
122 pass
120 pass
123
121
124
122
125 class transaction(util.transactional):
123 class transaction(util.transactional):
126 def __init__(
124 def __init__(
127 self,
125 self,
128 report,
126 report,
129 opener,
127 opener,
130 vfsmap,
128 vfsmap,
131 journalname,
129 journalname,
132 undoname=None,
130 undoname=None,
133 after=None,
131 after=None,
134 createmode=None,
132 createmode=None,
135 validator=None,
133 validator=None,
136 releasefn=None,
134 releasefn=None,
137 checkambigfiles=None,
135 checkambigfiles=None,
138 name='<unnamed>',
136 name='<unnamed>',
139 ):
137 ):
140 """Begin a new transaction
138 """Begin a new transaction
141
139
142 Begins a new transaction that allows rolling back writes in the event of
140 Begins a new transaction that allows rolling back writes in the event of
143 an exception.
141 an exception.
144
142
145 * `after`: called after the transaction has been committed
143 * `after`: called after the transaction has been committed
146 * `createmode`: the mode of the journal file that will be created
144 * `createmode`: the mode of the journal file that will be created
147 * `releasefn`: called after releasing (with transaction and result)
145 * `releasefn`: called after releasing (with transaction and result)
148
146
149 `checkambigfiles` is a set of (path, vfs-location) tuples,
147 `checkambigfiles` is a set of (path, vfs-location) tuples,
150 which determine whether file stat ambiguity should be avoided
148 which determine whether file stat ambiguity should be avoided
151 for corresponded files.
149 for corresponded files.
152 """
150 """
153 self._count = 1
151 self._count = 1
154 self._usages = 1
152 self._usages = 1
155 self._report = report
153 self._report = report
156 # a vfs to the store content
154 # a vfs to the store content
157 self._opener = opener
155 self._opener = opener
158 # a map to access file in various {location -> vfs}
156 # a map to access file in various {location -> vfs}
159 vfsmap = vfsmap.copy()
157 vfsmap = vfsmap.copy()
160 vfsmap[b''] = opener # set default value
158 vfsmap[b''] = opener # set default value
161 self._vfsmap = vfsmap
159 self._vfsmap = vfsmap
162 self._after = after
160 self._after = after
163 self._entries = []
161 self._entries = []
164 self._map = {}
162 self._map = {}
165 self._journal = journalname
163 self._journal = journalname
166 self._undoname = undoname
164 self._undoname = undoname
167 self._queue = []
165 self._queue = []
168 # A callback to do something just after releasing transaction.
166 # A callback to do something just after releasing transaction.
169 if releasefn is None:
167 if releasefn is None:
170 releasefn = lambda tr, success: None
168 releasefn = lambda tr, success: None
171 self._releasefn = releasefn
169 self._releasefn = releasefn
172
170
173 self._checkambigfiles = set()
171 self._checkambigfiles = set()
174 if checkambigfiles:
172 if checkambigfiles:
175 self._checkambigfiles.update(checkambigfiles)
173 self._checkambigfiles.update(checkambigfiles)
176
174
177 self._names = [name]
175 self._names = [name]
178
176
179 # A dict dedicated to precisely tracking the changes introduced in the
177 # A dict dedicated to precisely tracking the changes introduced in the
180 # transaction.
178 # transaction.
181 self.changes = {}
179 self.changes = {}
182
180
183 # a dict of arguments to be passed to hooks
181 # a dict of arguments to be passed to hooks
184 self.hookargs = {}
182 self.hookargs = {}
185 self._file = opener.open(self._journal, b"w")
183 self._file = opener.open(self._journal, b"w")
186
184
187 # a list of ('location', 'path', 'backuppath', cache) entries.
185 # a list of ('location', 'path', 'backuppath', cache) entries.
188 # - if 'backuppath' is empty, no file existed at backup time
186 # - if 'backuppath' is empty, no file existed at backup time
189 # - if 'path' is empty, this is a temporary transaction file
187 # - if 'path' is empty, this is a temporary transaction file
190 # - if 'location' is not empty, the path is outside main opener reach.
188 # - if 'location' is not empty, the path is outside main opener reach.
191 # use 'location' value as a key in a vfsmap to find the right 'vfs'
189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
192 # (cache is currently unused)
190 # (cache is currently unused)
193 self._backupentries = []
191 self._backupentries = []
194 self._backupmap = {}
192 self._backupmap = {}
195 self._backupjournal = b"%s.backupfiles" % self._journal
193 self._backupjournal = b"%s.backupfiles" % self._journal
196 self._backupsfile = opener.open(self._backupjournal, b'w')
194 self._backupsfile = opener.open(self._backupjournal, b'w')
197 self._backupsfile.write(b'%d\n' % version)
195 self._backupsfile.write(b'%d\n' % version)
198
196
199 if createmode is not None:
197 if createmode is not None:
200 opener.chmod(self._journal, createmode & 0o666)
198 opener.chmod(self._journal, createmode & 0o666)
201 opener.chmod(self._backupjournal, createmode & 0o666)
199 opener.chmod(self._backupjournal, createmode & 0o666)
202
200
203 # hold file generations to be performed on commit
201 # hold file generations to be performed on commit
204 self._filegenerators = {}
202 self._filegenerators = {}
205 # hold callback to write pending data for hooks
203 # hold callback to write pending data for hooks
206 self._pendingcallback = {}
204 self._pendingcallback = {}
207 # True is any pending data have been written ever
205 # True is any pending data have been written ever
208 self._anypending = False
206 self._anypending = False
209 # holds callback to call when writing the transaction
207 # holds callback to call when writing the transaction
210 self._finalizecallback = {}
208 self._finalizecallback = {}
211 # holds callback to call when validating the transaction
209 # holds callback to call when validating the transaction
212 # should raise exception if anything is wrong
210 # should raise exception if anything is wrong
213 self._validatecallback = {}
211 self._validatecallback = {}
214 if validator is not None:
212 if validator is not None:
215 self._validatecallback[b'001-userhooks'] = validator
213 self._validatecallback[b'001-userhooks'] = validator
216 # hold callback for post transaction close
214 # hold callback for post transaction close
217 self._postclosecallback = {}
215 self._postclosecallback = {}
218 # holds callbacks to call during abort
216 # holds callbacks to call during abort
219 self._abortcallback = {}
217 self._abortcallback = {}
220
218
221 def __repr__(self):
219 def __repr__(self):
222 name = '/'.join(self._names)
220 name = '/'.join(self._names)
223 return '<transaction name=%s, count=%d, usages=%d>' % (
221 return '<transaction name=%s, count=%d, usages=%d>' % (
224 name,
222 name,
225 self._count,
223 self._count,
226 self._usages,
224 self._usages,
227 )
225 )
228
226
229 def __del__(self):
227 def __del__(self):
230 if self._journal:
228 if self._journal:
231 self._abort()
229 self._abort()
232
230
233 @active
231 @active
234 def startgroup(self):
232 def startgroup(self):
235 """delay registration of file entry
233 """delay registration of file entry
236
234
237 This is used by strip to delay vision of strip offset. The transaction
235 This is used by strip to delay vision of strip offset. The transaction
238 sees either none or all of the strip actions to be done."""
236 sees either none or all of the strip actions to be done."""
239 self._queue.append([])
237 self._queue.append([])
240
238
241 @active
239 @active
242 def endgroup(self):
240 def endgroup(self):
243 """apply delayed registration of file entry.
241 """apply delayed registration of file entry.
244
242
245 This is used by strip to delay vision of strip offset. The transaction
243 This is used by strip to delay vision of strip offset. The transaction
246 sees either none or all of the strip actions to be done."""
244 sees either none or all of the strip actions to be done."""
247 q = self._queue.pop()
245 q = self._queue.pop()
248 for f, o, data in q:
246 for f, o, data in q:
249 self._addentry(f, o, data)
247 self._addentry(f, o, data)
250
248
251 @active
249 @active
252 def add(self, file, offset, data=None):
250 def add(self, file, offset, data=None):
253 """record the state of an append-only file before update"""
251 """record the state of an append-only file before update"""
254 if file in self._map or file in self._backupmap:
252 if file in self._map or file in self._backupmap:
255 return
253 return
256 if self._queue:
254 if self._queue:
257 self._queue[-1].append((file, offset, data))
255 self._queue[-1].append((file, offset, data))
258 return
256 return
259
257
260 self._addentry(file, offset, data)
258 self._addentry(file, offset, data)
261
259
262 def _addentry(self, file, offset, data):
260 def _addentry(self, file, offset, data):
263 """add a append-only entry to memory and on-disk state"""
261 """add a append-only entry to memory and on-disk state"""
264 if file in self._map or file in self._backupmap:
262 if file in self._map or file in self._backupmap:
265 return
263 return
266 self._entries.append((file, offset, data))
264 self._entries.append((file, offset, data))
267 self._map[file] = len(self._entries) - 1
265 self._map[file] = len(self._entries) - 1
268 # add enough data to the journal to do the truncate
266 # add enough data to the journal to do the truncate
269 self._file.write(b"%s\0%d\n" % (file, offset))
267 self._file.write(b"%s\0%d\n" % (file, offset))
270 self._file.flush()
268 self._file.flush()
271
269
272 @active
270 @active
273 def addbackup(self, file, hardlink=True, location=b''):
271 def addbackup(self, file, hardlink=True, location=b''):
274 """Adds a backup of the file to the transaction
272 """Adds a backup of the file to the transaction
275
273
276 Calling addbackup() creates a hardlink backup of the specified file
274 Calling addbackup() creates a hardlink backup of the specified file
277 that is used to recover the file in the event of the transaction
275 that is used to recover the file in the event of the transaction
278 aborting.
276 aborting.
279
277
280 * `file`: the file path, relative to .hg/store
278 * `file`: the file path, relative to .hg/store
281 * `hardlink`: use a hardlink to quickly create the backup
279 * `hardlink`: use a hardlink to quickly create the backup
282 """
280 """
283 if self._queue:
281 if self._queue:
284 msg = b'cannot use transaction.addbackup inside "group"'
282 msg = b'cannot use transaction.addbackup inside "group"'
285 raise error.ProgrammingError(msg)
283 raise error.ProgrammingError(msg)
286
284
287 if file in self._map or file in self._backupmap:
285 if file in self._map or file in self._backupmap:
288 return
286 return
289 vfs = self._vfsmap[location]
287 vfs = self._vfsmap[location]
290 dirname, filename = vfs.split(file)
288 dirname, filename = vfs.split(file)
291 backupfilename = b"%s.backup.%s" % (self._journal, filename)
289 backupfilename = b"%s.backup.%s" % (self._journal, filename)
292 backupfile = vfs.reljoin(dirname, backupfilename)
290 backupfile = vfs.reljoin(dirname, backupfilename)
293 if vfs.exists(file):
291 if vfs.exists(file):
294 filepath = vfs.join(file)
292 filepath = vfs.join(file)
295 backuppath = vfs.join(backupfile)
293 backuppath = vfs.join(backupfile)
296 util.copyfile(filepath, backuppath, hardlink=hardlink)
294 util.copyfile(filepath, backuppath, hardlink=hardlink)
297 else:
295 else:
298 backupfile = b''
296 backupfile = b''
299
297
300 self._addbackupentry((location, file, backupfile, False))
298 self._addbackupentry((location, file, backupfile, False))
301
299
302 def _addbackupentry(self, entry):
300 def _addbackupentry(self, entry):
303 """register a new backup entry and write it to disk"""
301 """register a new backup entry and write it to disk"""
304 self._backupentries.append(entry)
302 self._backupentries.append(entry)
305 self._backupmap[entry[1]] = len(self._backupentries) - 1
303 self._backupmap[entry[1]] = len(self._backupentries) - 1
306 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
304 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
307 self._backupsfile.flush()
305 self._backupsfile.flush()
308
306
309 @active
307 @active
310 def registertmp(self, tmpfile, location=b''):
308 def registertmp(self, tmpfile, location=b''):
311 """register a temporary transaction file
309 """register a temporary transaction file
312
310
313 Such files will be deleted when the transaction exits (on both
311 Such files will be deleted when the transaction exits (on both
314 failure and success).
312 failure and success).
315 """
313 """
316 self._addbackupentry((location, b'', tmpfile, False))
314 self._addbackupentry((location, b'', tmpfile, False))
317
315
318 @active
316 @active
319 def addfilegenerator(
317 def addfilegenerator(
320 self, genid, filenames, genfunc, order=0, location=b''
318 self, genid, filenames, genfunc, order=0, location=b''
321 ):
319 ):
322 """add a function to generates some files at transaction commit
320 """add a function to generates some files at transaction commit
323
321
324 The `genfunc` argument is a function capable of generating proper
322 The `genfunc` argument is a function capable of generating proper
325 content of each entry in the `filename` tuple.
323 content of each entry in the `filename` tuple.
326
324
327 At transaction close time, `genfunc` will be called with one file
325 At transaction close time, `genfunc` will be called with one file
328 object argument per entries in `filenames`.
326 object argument per entries in `filenames`.
329
327
330 The transaction itself is responsible for the backup, creation and
328 The transaction itself is responsible for the backup, creation and
331 final write of such file.
329 final write of such file.
332
330
333 The `genid` argument is used to ensure the same set of file is only
331 The `genid` argument is used to ensure the same set of file is only
334 generated once. Call to `addfilegenerator` for a `genid` already
332 generated once. Call to `addfilegenerator` for a `genid` already
335 present will overwrite the old entry.
333 present will overwrite the old entry.
336
334
337 The `order` argument may be used to control the order in which multiple
335 The `order` argument may be used to control the order in which multiple
338 generator will be executed.
336 generator will be executed.
339
337
340 The `location` arguments may be used to indicate the files are located
338 The `location` arguments may be used to indicate the files are located
341 outside of the the standard directory for transaction. It should match
339 outside of the the standard directory for transaction. It should match
342 one of the key of the `transaction.vfsmap` dictionary.
340 one of the key of the `transaction.vfsmap` dictionary.
343 """
341 """
344 # For now, we are unable to do proper backup and restore of custom vfs
342 # For now, we are unable to do proper backup and restore of custom vfs
345 # but for bookmarks that are handled outside this mechanism.
343 # but for bookmarks that are handled outside this mechanism.
346 self._filegenerators[genid] = (order, filenames, genfunc, location)
344 self._filegenerators[genid] = (order, filenames, genfunc, location)
347
345
348 @active
346 @active
349 def removefilegenerator(self, genid):
347 def removefilegenerator(self, genid):
350 """reverse of addfilegenerator, remove a file generator function"""
348 """reverse of addfilegenerator, remove a file generator function"""
351 if genid in self._filegenerators:
349 if genid in self._filegenerators:
352 del self._filegenerators[genid]
350 del self._filegenerators[genid]
353
351
354 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
352 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
355 # write files registered for generation
353 # write files registered for generation
356 any = False
354 any = False
357
355
358 if group == GEN_GROUP_ALL:
356 if group == GEN_GROUP_ALL:
359 skip_post = skip_pre = False
357 skip_post = skip_pre = False
360 else:
358 else:
361 skip_pre = group == GEN_GROUP_POST_FINALIZE
359 skip_pre = group == GEN_GROUP_POST_FINALIZE
362 skip_post = group == GEN_GROUP_PRE_FINALIZE
360 skip_post = group == GEN_GROUP_PRE_FINALIZE
363
361
364 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
362 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
365 any = True
363 any = True
366 order, filenames, genfunc, location = entry
364 order, filenames, genfunc, location = entry
367
365
368 # for generation at closing, check if it's before or after finalize
366 # for generation at closing, check if it's before or after finalize
369 is_post = id in postfinalizegenerators
367 is_post = id in postfinalizegenerators
370 if skip_post and is_post:
368 if skip_post and is_post:
371 continue
369 continue
372 elif skip_pre and not is_post:
370 elif skip_pre and not is_post:
373 continue
371 continue
374
372
375 vfs = self._vfsmap[location]
373 vfs = self._vfsmap[location]
376 files = []
374 files = []
377 try:
375 try:
378 for name in filenames:
376 for name in filenames:
379 name += suffix
377 name += suffix
380 if suffix:
378 if suffix:
381 self.registertmp(name, location=location)
379 self.registertmp(name, location=location)
382 checkambig = False
380 checkambig = False
383 else:
381 else:
384 self.addbackup(name, location=location)
382 self.addbackup(name, location=location)
385 checkambig = (name, location) in self._checkambigfiles
383 checkambig = (name, location) in self._checkambigfiles
386 files.append(
384 files.append(
387 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
385 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
388 )
386 )
389 genfunc(*files)
387 genfunc(*files)
390 for f in files:
388 for f in files:
391 f.close()
389 f.close()
392 # skip discard() loop since we're sure no open file remains
390 # skip discard() loop since we're sure no open file remains
393 del files[:]
391 del files[:]
394 finally:
392 finally:
395 for f in files:
393 for f in files:
396 f.discard()
394 f.discard()
397 return any
395 return any
398
396
399 @active
397 @active
400 def find(self, file):
398 def find(self, file):
401 if file in self._map:
399 if file in self._map:
402 return self._entries[self._map[file]]
400 return self._entries[self._map[file]]
403 if file in self._backupmap:
401 if file in self._backupmap:
404 return self._backupentries[self._backupmap[file]]
402 return self._backupentries[self._backupmap[file]]
405 return None
403 return None
406
404
407 @active
405 @active
408 def replace(self, file, offset, data=None):
406 def replace(self, file, offset, data=None):
409 '''
407 '''
410 replace can only replace already committed entries
408 replace can only replace already committed entries
411 that are not pending in the queue
409 that are not pending in the queue
412 '''
410 '''
413
411
414 if file not in self._map:
412 if file not in self._map:
415 raise KeyError(file)
413 raise KeyError(file)
416 index = self._map[file]
414 index = self._map[file]
417 self._entries[index] = (file, offset, data)
415 self._entries[index] = (file, offset, data)
418 self._file.write(b"%s\0%d\n" % (file, offset))
416 self._file.write(b"%s\0%d\n" % (file, offset))
419 self._file.flush()
417 self._file.flush()
420
418
421 @active
419 @active
422 def nest(self, name='<unnamed>'):
420 def nest(self, name='<unnamed>'):
423 self._count += 1
421 self._count += 1
424 self._usages += 1
422 self._usages += 1
425 self._names.append(name)
423 self._names.append(name)
426 return self
424 return self
427
425
428 def release(self):
426 def release(self):
429 if self._count > 0:
427 if self._count > 0:
430 self._usages -= 1
428 self._usages -= 1
431 if self._names:
429 if self._names:
432 self._names.pop()
430 self._names.pop()
433 # if the transaction scopes are left without being closed, fail
431 # if the transaction scopes are left without being closed, fail
434 if self._count > 0 and self._usages == 0:
432 if self._count > 0 and self._usages == 0:
435 self._abort()
433 self._abort()
436
434
437 def running(self):
435 def running(self):
438 return self._count > 0
436 return self._count > 0
439
437
440 def addpending(self, category, callback):
438 def addpending(self, category, callback):
441 """add a callback to be called when the transaction is pending
439 """add a callback to be called when the transaction is pending
442
440
443 The transaction will be given as callback's first argument.
441 The transaction will be given as callback's first argument.
444
442
445 Category is a unique identifier to allow overwriting an old callback
443 Category is a unique identifier to allow overwriting an old callback
446 with a newer callback.
444 with a newer callback.
447 """
445 """
448 self._pendingcallback[category] = callback
446 self._pendingcallback[category] = callback
449
447
450 @active
448 @active
451 def writepending(self):
449 def writepending(self):
452 '''write pending file to temporary version
450 '''write pending file to temporary version
453
451
454 This is used to allow hooks to view a transaction before commit'''
452 This is used to allow hooks to view a transaction before commit'''
455 categories = sorted(self._pendingcallback)
453 categories = sorted(self._pendingcallback)
456 for cat in categories:
454 for cat in categories:
457 # remove callback since the data will have been flushed
455 # remove callback since the data will have been flushed
458 any = self._pendingcallback.pop(cat)(self)
456 any = self._pendingcallback.pop(cat)(self)
459 self._anypending = self._anypending or any
457 self._anypending = self._anypending or any
460 self._anypending |= self._generatefiles(suffix=b'.pending')
458 self._anypending |= self._generatefiles(suffix=b'.pending')
461 return self._anypending
459 return self._anypending
462
460
463 @active
461 @active
464 def hasfinalize(self, category):
462 def hasfinalize(self, category):
465 """check is a callback already exist for a category
463 """check is a callback already exist for a category
466 """
464 """
467 return category in self._finalizecallback
465 return category in self._finalizecallback
468
466
469 @active
467 @active
470 def addfinalize(self, category, callback):
468 def addfinalize(self, category, callback):
471 """add a callback to be called when the transaction is closed
469 """add a callback to be called when the transaction is closed
472
470
473 The transaction will be given as callback's first argument.
471 The transaction will be given as callback's first argument.
474
472
475 Category is a unique identifier to allow overwriting old callbacks with
473 Category is a unique identifier to allow overwriting old callbacks with
476 newer callbacks.
474 newer callbacks.
477 """
475 """
478 self._finalizecallback[category] = callback
476 self._finalizecallback[category] = callback
479
477
480 @active
478 @active
481 def addpostclose(self, category, callback):
479 def addpostclose(self, category, callback):
482 """add or replace a callback to be called after the transaction closed
480 """add or replace a callback to be called after the transaction closed
483
481
484 The transaction will be given as callback's first argument.
482 The transaction will be given as callback's first argument.
485
483
486 Category is a unique identifier to allow overwriting an old callback
484 Category is a unique identifier to allow overwriting an old callback
487 with a newer callback.
485 with a newer callback.
488 """
486 """
489 self._postclosecallback[category] = callback
487 self._postclosecallback[category] = callback
490
488
491 @active
489 @active
492 def getpostclose(self, category):
490 def getpostclose(self, category):
493 """return a postclose callback added before, or None"""
491 """return a postclose callback added before, or None"""
494 return self._postclosecallback.get(category, None)
492 return self._postclosecallback.get(category, None)
495
493
496 @active
494 @active
497 def addabort(self, category, callback):
495 def addabort(self, category, callback):
498 """add a callback to be called when the transaction is aborted.
496 """add a callback to be called when the transaction is aborted.
499
497
500 The transaction will be given as the first argument to the callback.
498 The transaction will be given as the first argument to the callback.
501
499
502 Category is a unique identifier to allow overwriting an old callback
500 Category is a unique identifier to allow overwriting an old callback
503 with a newer callback.
501 with a newer callback.
504 """
502 """
505 self._abortcallback[category] = callback
503 self._abortcallback[category] = callback
506
504
507 @active
505 @active
508 def addvalidator(self, category, callback):
506 def addvalidator(self, category, callback):
509 """ adds a callback to be called when validating the transaction.
507 """ adds a callback to be called when validating the transaction.
510
508
511 The transaction will be given as the first argument to the callback.
509 The transaction will be given as the first argument to the callback.
512
510
513 callback should raise exception if to abort transaction """
511 callback should raise exception if to abort transaction """
514 self._validatecallback[category] = callback
512 self._validatecallback[category] = callback
515
513
516 @active
514 @active
517 def close(self):
515 def close(self):
518 '''commit the transaction'''
516 '''commit the transaction'''
519 if self._count == 1:
517 if self._count == 1:
520 for category in sorted(self._validatecallback):
518 for category in sorted(self._validatecallback):
521 self._validatecallback[category](self)
519 self._validatecallback[category](self)
522 self._validatecallback = None # Help prevent cycles.
520 self._validatecallback = None # Help prevent cycles.
523 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
521 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
524 while self._finalizecallback:
522 while self._finalizecallback:
525 callbacks = self._finalizecallback
523 callbacks = self._finalizecallback
526 self._finalizecallback = {}
524 self._finalizecallback = {}
527 categories = sorted(callbacks)
525 categories = sorted(callbacks)
528 for cat in categories:
526 for cat in categories:
529 callbacks[cat](self)
527 callbacks[cat](self)
530 # Prevent double usage and help clear cycles.
528 # Prevent double usage and help clear cycles.
531 self._finalizecallback = None
529 self._finalizecallback = None
532 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
530 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
533
531
534 self._count -= 1
532 self._count -= 1
535 if self._count != 0:
533 if self._count != 0:
536 return
534 return
537 self._file.close()
535 self._file.close()
538 self._backupsfile.close()
536 self._backupsfile.close()
539 # cleanup temporary files
537 # cleanup temporary files
540 for l, f, b, c in self._backupentries:
538 for l, f, b, c in self._backupentries:
541 if l not in self._vfsmap and c:
539 if l not in self._vfsmap and c:
542 self._report(
540 self._report(
543 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
541 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
544 )
542 )
545 continue
543 continue
546 vfs = self._vfsmap[l]
544 vfs = self._vfsmap[l]
547 if not f and b and vfs.exists(b):
545 if not f and b and vfs.exists(b):
548 try:
546 try:
549 vfs.unlink(b)
547 vfs.unlink(b)
550 except (IOError, OSError, error.Abort) as inst:
548 except (IOError, OSError, error.Abort) as inst:
551 if not c:
549 if not c:
552 raise
550 raise
553 # Abort may be raise by read only opener
551 # Abort may be raise by read only opener
554 self._report(
552 self._report(
555 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
553 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
556 )
554 )
557 self._entries = []
555 self._entries = []
558 self._writeundo()
556 self._writeundo()
559 if self._after:
557 if self._after:
560 self._after()
558 self._after()
561 self._after = None # Help prevent cycles.
559 self._after = None # Help prevent cycles.
562 if self._opener.isfile(self._backupjournal):
560 if self._opener.isfile(self._backupjournal):
563 self._opener.unlink(self._backupjournal)
561 self._opener.unlink(self._backupjournal)
564 if self._opener.isfile(self._journal):
562 if self._opener.isfile(self._journal):
565 self._opener.unlink(self._journal)
563 self._opener.unlink(self._journal)
566 for l, _f, b, c in self._backupentries:
564 for l, _f, b, c in self._backupentries:
567 if l not in self._vfsmap and c:
565 if l not in self._vfsmap and c:
568 self._report(
566 self._report(
569 b"couldn't remove %s: unknown cache location"
567 b"couldn't remove %s: unknown cache location"
570 b"%s\n" % (b, l)
568 b"%s\n" % (b, l)
571 )
569 )
572 continue
570 continue
573 vfs = self._vfsmap[l]
571 vfs = self._vfsmap[l]
574 if b and vfs.exists(b):
572 if b and vfs.exists(b):
575 try:
573 try:
576 vfs.unlink(b)
574 vfs.unlink(b)
577 except (IOError, OSError, error.Abort) as inst:
575 except (IOError, OSError, error.Abort) as inst:
578 if not c:
576 if not c:
579 raise
577 raise
580 # Abort may be raise by read only opener
578 # Abort may be raise by read only opener
581 self._report(
579 self._report(
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
580 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 )
581 )
584 self._backupentries = []
582 self._backupentries = []
585 self._journal = None
583 self._journal = None
586
584
587 self._releasefn(self, True) # notify success of closing transaction
585 self._releasefn(self, True) # notify success of closing transaction
588 self._releasefn = None # Help prevent cycles.
586 self._releasefn = None # Help prevent cycles.
589
587
590 # run post close action
588 # run post close action
591 categories = sorted(self._postclosecallback)
589 categories = sorted(self._postclosecallback)
592 for cat in categories:
590 for cat in categories:
593 self._postclosecallback[cat](self)
591 self._postclosecallback[cat](self)
594 # Prevent double usage and help clear cycles.
592 # Prevent double usage and help clear cycles.
595 self._postclosecallback = None
593 self._postclosecallback = None
596
594
597 @active
595 @active
598 def abort(self):
596 def abort(self):
599 '''abort the transaction (generally called on error, or when the
597 '''abort the transaction (generally called on error, or when the
600 transaction is not explicitly committed before going out of
598 transaction is not explicitly committed before going out of
601 scope)'''
599 scope)'''
602 self._abort()
600 self._abort()
603
601
604 def _writeundo(self):
602 def _writeundo(self):
605 """write transaction data for possible future undo call"""
603 """write transaction data for possible future undo call"""
606 if self._undoname is None:
604 if self._undoname is None:
607 return
605 return
608 undobackupfile = self._opener.open(
606 undobackupfile = self._opener.open(
609 b"%s.backupfiles" % self._undoname, b'w'
607 b"%s.backupfiles" % self._undoname, b'w'
610 )
608 )
611 undobackupfile.write(b'%d\n' % version)
609 undobackupfile.write(b'%d\n' % version)
612 for l, f, b, c in self._backupentries:
610 for l, f, b, c in self._backupentries:
613 if not f: # temporary file
611 if not f: # temporary file
614 continue
612 continue
615 if not b:
613 if not b:
616 u = b''
614 u = b''
617 else:
615 else:
618 if l not in self._vfsmap and c:
616 if l not in self._vfsmap and c:
619 self._report(
617 self._report(
620 b"couldn't remove %s: unknown cache location"
618 b"couldn't remove %s: unknown cache location"
621 b"%s\n" % (b, l)
619 b"%s\n" % (b, l)
622 )
620 )
623 continue
621 continue
624 vfs = self._vfsmap[l]
622 vfs = self._vfsmap[l]
625 base, name = vfs.split(b)
623 base, name = vfs.split(b)
626 assert name.startswith(self._journal), name
624 assert name.startswith(self._journal), name
627 uname = name.replace(self._journal, self._undoname, 1)
625 uname = name.replace(self._journal, self._undoname, 1)
628 u = vfs.reljoin(base, uname)
626 u = vfs.reljoin(base, uname)
629 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
627 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
630 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
628 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
631 undobackupfile.close()
629 undobackupfile.close()
632
630
633 def _abort(self):
631 def _abort(self):
634 self._count = 0
632 self._count = 0
635 self._usages = 0
633 self._usages = 0
636 self._file.close()
634 self._file.close()
637 self._backupsfile.close()
635 self._backupsfile.close()
638
636
639 try:
637 try:
640 if not self._entries and not self._backupentries:
638 if not self._entries and not self._backupentries:
641 if self._backupjournal:
639 if self._backupjournal:
642 self._opener.unlink(self._backupjournal)
640 self._opener.unlink(self._backupjournal)
643 if self._journal:
641 if self._journal:
644 self._opener.unlink(self._journal)
642 self._opener.unlink(self._journal)
645 return
643 return
646
644
647 self._report(_(b"transaction abort!\n"))
645 self._report(_(b"transaction abort!\n"))
648
646
649 try:
647 try:
650 for cat in sorted(self._abortcallback):
648 for cat in sorted(self._abortcallback):
651 self._abortcallback[cat](self)
649 self._abortcallback[cat](self)
652 # Prevent double usage and help clear cycles.
650 # Prevent double usage and help clear cycles.
653 self._abortcallback = None
651 self._abortcallback = None
654 _playback(
652 _playback(
655 self._journal,
653 self._journal,
656 self._report,
654 self._report,
657 self._opener,
655 self._opener,
658 self._vfsmap,
656 self._vfsmap,
659 self._entries,
657 self._entries,
660 self._backupentries,
658 self._backupentries,
661 False,
659 False,
662 checkambigfiles=self._checkambigfiles,
660 checkambigfiles=self._checkambigfiles,
663 )
661 )
664 self._report(_(b"rollback completed\n"))
662 self._report(_(b"rollback completed\n"))
665 except BaseException as exc:
663 except BaseException as exc:
666 self._report(_(b"rollback failed - please run hg recover\n"))
664 self._report(_(b"rollback failed - please run hg recover\n"))
667 self._report(
665 self._report(
668 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
666 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
669 )
667 )
670 finally:
668 finally:
671 self._journal = None
669 self._journal = None
672 self._releasefn(self, False) # notify failure of transaction
670 self._releasefn(self, False) # notify failure of transaction
673 self._releasefn = None # Help prevent cycles.
671 self._releasefn = None # Help prevent cycles.
674
672
675
673
676 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
674 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
677 """Rolls back the transaction contained in the given file
675 """Rolls back the transaction contained in the given file
678
676
679 Reads the entries in the specified file, and the corresponding
677 Reads the entries in the specified file, and the corresponding
680 '*.backupfiles' file, to recover from an incomplete transaction.
678 '*.backupfiles' file, to recover from an incomplete transaction.
681
679
682 * `file`: a file containing a list of entries, specifying where
680 * `file`: a file containing a list of entries, specifying where
683 to truncate each file. The file should contain a list of
681 to truncate each file. The file should contain a list of
684 file\0offset pairs, delimited by newlines. The corresponding
682 file\0offset pairs, delimited by newlines. The corresponding
685 '*.backupfiles' file should contain a list of file\0backupfile
683 '*.backupfiles' file should contain a list of file\0backupfile
686 pairs, delimited by \0.
684 pairs, delimited by \0.
687
685
688 `checkambigfiles` is a set of (path, vfs-location) tuples,
686 `checkambigfiles` is a set of (path, vfs-location) tuples,
689 which determine whether file stat ambiguity should be avoided at
687 which determine whether file stat ambiguity should be avoided at
690 restoring corresponded files.
688 restoring corresponded files.
691 """
689 """
692 entries = []
690 entries = []
693 backupentries = []
691 backupentries = []
694
692
695 fp = opener.open(file)
693 fp = opener.open(file)
696 lines = fp.readlines()
694 lines = fp.readlines()
697 fp.close()
695 fp.close()
698 for l in lines:
696 for l in lines:
699 try:
697 try:
700 f, o = l.split(b'\0')
698 f, o = l.split(b'\0')
701 entries.append((f, int(o), None))
699 entries.append((f, int(o), None))
702 except ValueError:
700 except ValueError:
703 report(
701 report(
704 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
702 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
705 )
703 )
706
704
707 backupjournal = b"%s.backupfiles" % file
705 backupjournal = b"%s.backupfiles" % file
708 if opener.exists(backupjournal):
706 if opener.exists(backupjournal):
709 fp = opener.open(backupjournal)
707 fp = opener.open(backupjournal)
710 lines = fp.readlines()
708 lines = fp.readlines()
711 if lines:
709 if lines:
712 ver = lines[0][:-1]
710 ver = lines[0][:-1]
713 if ver == (b'%d' % version):
711 if ver == (b'%d' % version):
714 for line in lines[1:]:
712 for line in lines[1:]:
715 if line:
713 if line:
716 # Shave off the trailing newline
714 # Shave off the trailing newline
717 line = line[:-1]
715 line = line[:-1]
718 l, f, b, c = line.split(b'\0')
716 l, f, b, c = line.split(b'\0')
719 backupentries.append((l, f, b, bool(c)))
717 backupentries.append((l, f, b, bool(c)))
720 else:
718 else:
721 report(
719 report(
722 _(
720 _(
723 b"journal was created by a different version of "
721 b"journal was created by a different version of "
724 b"Mercurial\n"
722 b"Mercurial\n"
725 )
723 )
726 )
724 )
727
725
728 _playback(
726 _playback(
729 file,
727 file,
730 report,
728 report,
731 opener,
729 opener,
732 vfsmap,
730 vfsmap,
733 entries,
731 entries,
734 backupentries,
732 backupentries,
735 checkambigfiles=checkambigfiles,
733 checkambigfiles=checkambigfiles,
736 )
734 )
General Comments 0
You need to be logged in to leave comments. Login now