##// END OF EJS Templates
transaction: clarify a conditionnal about version check...
marmoute -
r48213:e7ad2490 default
parent child Browse files
Show More
@@ -1,764 +1,764 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 GEN_GROUP_ALL = b'all'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.ProgrammingError(
41 raise error.ProgrammingError(
42 b'cannot use transaction when it is already committed/aborted'
42 b'cannot use transaction when it is already committed/aborted'
43 )
43 )
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45
45
46 return _active
46 return _active
47
47
48
48
49 def _playback(
49 def _playback(
50 journal,
50 journal,
51 report,
51 report,
52 opener,
52 opener,
53 vfsmap,
53 vfsmap,
54 entries,
54 entries,
55 backupentries,
55 backupentries,
56 unlink=True,
56 unlink=True,
57 checkambigfiles=None,
57 checkambigfiles=None,
58 ):
58 ):
59 for f, o in sorted(dict(entries).items()):
59 for f, o in sorted(dict(entries).items()):
60 if o or not unlink:
60 if o or not unlink:
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 try:
62 try:
63 fp = opener(f, b'a', checkambig=checkambig)
63 fp = opener(f, b'a', checkambig=checkambig)
64 if fp.tell() < o:
64 if fp.tell() < o:
65 raise error.Abort(
65 raise error.Abort(
66 _(
66 _(
67 b"attempted to truncate %s to %d bytes, but it was "
67 b"attempted to truncate %s to %d bytes, but it was "
68 b"already %d bytes\n"
68 b"already %d bytes\n"
69 )
69 )
70 % (f, o, fp.tell())
70 % (f, o, fp.tell())
71 )
71 )
72 fp.truncate(o)
72 fp.truncate(o)
73 fp.close()
73 fp.close()
74 except IOError:
74 except IOError:
75 report(_(b"failed to truncate %s\n") % f)
75 report(_(b"failed to truncate %s\n") % f)
76 raise
76 raise
77 else:
77 else:
78 try:
78 try:
79 opener.unlink(f)
79 opener.unlink(f)
80 except (IOError, OSError) as inst:
80 except (IOError, OSError) as inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83
83
84 backupfiles = []
84 backupfiles = []
85 for l, f, b, c in backupentries:
85 for l, f, b, c in backupentries:
86 if l not in vfsmap and c:
86 if l not in vfsmap and c:
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 vfs = vfsmap[l]
88 vfs = vfsmap[l]
89 try:
89 try:
90 if f and b:
90 if f and b:
91 filepath = vfs.join(f)
91 filepath = vfs.join(f)
92 backuppath = vfs.join(b)
92 backuppath = vfs.join(b)
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 try:
94 try:
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 backupfiles.append(b)
96 backupfiles.append(b)
97 except IOError as exc:
97 except IOError as exc:
98 e_msg = stringutil.forcebytestr(exc)
98 e_msg = stringutil.forcebytestr(exc)
99 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
99 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
100 else:
100 else:
101 target = f or b
101 target = f or b
102 try:
102 try:
103 vfs.unlink(target)
103 vfs.unlink(target)
104 except (IOError, OSError) as inst:
104 except (IOError, OSError) as inst:
105 if inst.errno != errno.ENOENT:
105 if inst.errno != errno.ENOENT:
106 raise
106 raise
107 except (IOError, OSError, error.Abort):
107 except (IOError, OSError, error.Abort):
108 if not c:
108 if not c:
109 raise
109 raise
110
110
111 backuppath = b"%s.backupfiles" % journal
111 backuppath = b"%s.backupfiles" % journal
112 if opener.exists(backuppath):
112 if opener.exists(backuppath):
113 opener.unlink(backuppath)
113 opener.unlink(backuppath)
114 opener.unlink(journal)
114 opener.unlink(journal)
115 try:
115 try:
116 for f in backupfiles:
116 for f in backupfiles:
117 if opener.exists(f):
117 if opener.exists(f):
118 opener.unlink(f)
118 opener.unlink(f)
119 except (IOError, OSError, error.Abort):
119 except (IOError, OSError, error.Abort):
120 # only pure backup file remains, it is sage to ignore any error
120 # only pure backup file remains, it is sage to ignore any error
121 pass
121 pass
122
122
123
123
124 class transaction(util.transactional):
124 class transaction(util.transactional):
125 def __init__(
125 def __init__(
126 self,
126 self,
127 report,
127 report,
128 opener,
128 opener,
129 vfsmap,
129 vfsmap,
130 journalname,
130 journalname,
131 undoname=None,
131 undoname=None,
132 after=None,
132 after=None,
133 createmode=None,
133 createmode=None,
134 validator=None,
134 validator=None,
135 releasefn=None,
135 releasefn=None,
136 checkambigfiles=None,
136 checkambigfiles=None,
137 name='<unnamed>',
137 name='<unnamed>',
138 ):
138 ):
139 """Begin a new transaction
139 """Begin a new transaction
140
140
141 Begins a new transaction that allows rolling back writes in the event of
141 Begins a new transaction that allows rolling back writes in the event of
142 an exception.
142 an exception.
143
143
144 * `after`: called after the transaction has been committed
144 * `after`: called after the transaction has been committed
145 * `createmode`: the mode of the journal file that will be created
145 * `createmode`: the mode of the journal file that will be created
146 * `releasefn`: called after releasing (with transaction and result)
146 * `releasefn`: called after releasing (with transaction and result)
147
147
148 `checkambigfiles` is a set of (path, vfs-location) tuples,
148 `checkambigfiles` is a set of (path, vfs-location) tuples,
149 which determine whether file stat ambiguity should be avoided
149 which determine whether file stat ambiguity should be avoided
150 for corresponded files.
150 for corresponded files.
151 """
151 """
152 self._count = 1
152 self._count = 1
153 self._usages = 1
153 self._usages = 1
154 self._report = report
154 self._report = report
155 # a vfs to the store content
155 # a vfs to the store content
156 self._opener = opener
156 self._opener = opener
157 # a map to access file in various {location -> vfs}
157 # a map to access file in various {location -> vfs}
158 vfsmap = vfsmap.copy()
158 vfsmap = vfsmap.copy()
159 vfsmap[b''] = opener # set default value
159 vfsmap[b''] = opener # set default value
160 self._vfsmap = vfsmap
160 self._vfsmap = vfsmap
161 self._after = after
161 self._after = after
162 self._offsetmap = {}
162 self._offsetmap = {}
163 self._newfiles = set()
163 self._newfiles = set()
164 self._journal = journalname
164 self._journal = journalname
165 self._undoname = undoname
165 self._undoname = undoname
166 self._queue = []
166 self._queue = []
167 # A callback to do something just after releasing transaction.
167 # A callback to do something just after releasing transaction.
168 if releasefn is None:
168 if releasefn is None:
169 releasefn = lambda tr, success: None
169 releasefn = lambda tr, success: None
170 self._releasefn = releasefn
170 self._releasefn = releasefn
171
171
172 self._checkambigfiles = set()
172 self._checkambigfiles = set()
173 if checkambigfiles:
173 if checkambigfiles:
174 self._checkambigfiles.update(checkambigfiles)
174 self._checkambigfiles.update(checkambigfiles)
175
175
176 self._names = [name]
176 self._names = [name]
177
177
178 # A dict dedicated to precisely tracking the changes introduced in the
178 # A dict dedicated to precisely tracking the changes introduced in the
179 # transaction.
179 # transaction.
180 self.changes = {}
180 self.changes = {}
181
181
182 # a dict of arguments to be passed to hooks
182 # a dict of arguments to be passed to hooks
183 self.hookargs = {}
183 self.hookargs = {}
184 self._file = opener.open(self._journal, b"w+")
184 self._file = opener.open(self._journal, b"w+")
185
185
186 # a list of ('location', 'path', 'backuppath', cache) entries.
186 # a list of ('location', 'path', 'backuppath', cache) entries.
187 # - if 'backuppath' is empty, no file existed at backup time
187 # - if 'backuppath' is empty, no file existed at backup time
188 # - if 'path' is empty, this is a temporary transaction file
188 # - if 'path' is empty, this is a temporary transaction file
189 # - if 'location' is not empty, the path is outside main opener reach.
189 # - if 'location' is not empty, the path is outside main opener reach.
190 # use 'location' value as a key in a vfsmap to find the right 'vfs'
190 # use 'location' value as a key in a vfsmap to find the right 'vfs'
191 # (cache is currently unused)
191 # (cache is currently unused)
192 self._backupentries = []
192 self._backupentries = []
193 self._backupmap = {}
193 self._backupmap = {}
194 self._backupjournal = b"%s.backupfiles" % self._journal
194 self._backupjournal = b"%s.backupfiles" % self._journal
195 self._backupsfile = opener.open(self._backupjournal, b'w')
195 self._backupsfile = opener.open(self._backupjournal, b'w')
196 self._backupsfile.write(b'%d\n' % version)
196 self._backupsfile.write(b'%d\n' % version)
197
197
198 if createmode is not None:
198 if createmode is not None:
199 opener.chmod(self._journal, createmode & 0o666)
199 opener.chmod(self._journal, createmode & 0o666)
200 opener.chmod(self._backupjournal, createmode & 0o666)
200 opener.chmod(self._backupjournal, createmode & 0o666)
201
201
202 # hold file generations to be performed on commit
202 # hold file generations to be performed on commit
203 self._filegenerators = {}
203 self._filegenerators = {}
204 # hold callback to write pending data for hooks
204 # hold callback to write pending data for hooks
205 self._pendingcallback = {}
205 self._pendingcallback = {}
206 # True is any pending data have been written ever
206 # True is any pending data have been written ever
207 self._anypending = False
207 self._anypending = False
208 # holds callback to call when writing the transaction
208 # holds callback to call when writing the transaction
209 self._finalizecallback = {}
209 self._finalizecallback = {}
210 # holds callback to call when validating the transaction
210 # holds callback to call when validating the transaction
211 # should raise exception if anything is wrong
211 # should raise exception if anything is wrong
212 self._validatecallback = {}
212 self._validatecallback = {}
213 if validator is not None:
213 if validator is not None:
214 self._validatecallback[b'001-userhooks'] = validator
214 self._validatecallback[b'001-userhooks'] = validator
215 # hold callback for post transaction close
215 # hold callback for post transaction close
216 self._postclosecallback = {}
216 self._postclosecallback = {}
217 # holds callbacks to call during abort
217 # holds callbacks to call during abort
218 self._abortcallback = {}
218 self._abortcallback = {}
219
219
220 def __repr__(self):
220 def __repr__(self):
221 name = '/'.join(self._names)
221 name = '/'.join(self._names)
222 return '<transaction name=%s, count=%d, usages=%d>' % (
222 return '<transaction name=%s, count=%d, usages=%d>' % (
223 name,
223 name,
224 self._count,
224 self._count,
225 self._usages,
225 self._usages,
226 )
226 )
227
227
228 def __del__(self):
228 def __del__(self):
229 if self._journal:
229 if self._journal:
230 self._abort()
230 self._abort()
231
231
232 @active
232 @active
233 def startgroup(self):
233 def startgroup(self):
234 """delay registration of file entry
234 """delay registration of file entry
235
235
236 This is used by strip to delay vision of strip offset. The transaction
236 This is used by strip to delay vision of strip offset. The transaction
237 sees either none or all of the strip actions to be done."""
237 sees either none or all of the strip actions to be done."""
238 self._queue.append([])
238 self._queue.append([])
239
239
240 @active
240 @active
241 def endgroup(self):
241 def endgroup(self):
242 """apply delayed registration of file entry.
242 """apply delayed registration of file entry.
243
243
244 This is used by strip to delay vision of strip offset. The transaction
244 This is used by strip to delay vision of strip offset. The transaction
245 sees either none or all of the strip actions to be done."""
245 sees either none or all of the strip actions to be done."""
246 q = self._queue.pop()
246 q = self._queue.pop()
247 for f, o in q:
247 for f, o in q:
248 self._addentry(f, o)
248 self._addentry(f, o)
249
249
250 @active
250 @active
251 def add(self, file, offset):
251 def add(self, file, offset):
252 """record the state of an append-only file before update"""
252 """record the state of an append-only file before update"""
253 if (
253 if (
254 file in self._newfiles
254 file in self._newfiles
255 or file in self._offsetmap
255 or file in self._offsetmap
256 or file in self._backupmap
256 or file in self._backupmap
257 ):
257 ):
258 return
258 return
259 if self._queue:
259 if self._queue:
260 self._queue[-1].append((file, offset))
260 self._queue[-1].append((file, offset))
261 return
261 return
262
262
263 self._addentry(file, offset)
263 self._addentry(file, offset)
264
264
265 def _addentry(self, file, offset):
265 def _addentry(self, file, offset):
266 """add a append-only entry to memory and on-disk state"""
266 """add a append-only entry to memory and on-disk state"""
267 if (
267 if (
268 file in self._newfiles
268 file in self._newfiles
269 or file in self._offsetmap
269 or file in self._offsetmap
270 or file in self._backupmap
270 or file in self._backupmap
271 ):
271 ):
272 return
272 return
273 if offset:
273 if offset:
274 self._offsetmap[file] = offset
274 self._offsetmap[file] = offset
275 else:
275 else:
276 self._newfiles.add(file)
276 self._newfiles.add(file)
277 # add enough data to the journal to do the truncate
277 # add enough data to the journal to do the truncate
278 self._file.write(b"%s\0%d\n" % (file, offset))
278 self._file.write(b"%s\0%d\n" % (file, offset))
279 self._file.flush()
279 self._file.flush()
280
280
281 @active
281 @active
282 def addbackup(self, file, hardlink=True, location=b''):
282 def addbackup(self, file, hardlink=True, location=b''):
283 """Adds a backup of the file to the transaction
283 """Adds a backup of the file to the transaction
284
284
285 Calling addbackup() creates a hardlink backup of the specified file
285 Calling addbackup() creates a hardlink backup of the specified file
286 that is used to recover the file in the event of the transaction
286 that is used to recover the file in the event of the transaction
287 aborting.
287 aborting.
288
288
289 * `file`: the file path, relative to .hg/store
289 * `file`: the file path, relative to .hg/store
290 * `hardlink`: use a hardlink to quickly create the backup
290 * `hardlink`: use a hardlink to quickly create the backup
291 """
291 """
292 if self._queue:
292 if self._queue:
293 msg = b'cannot use transaction.addbackup inside "group"'
293 msg = b'cannot use transaction.addbackup inside "group"'
294 raise error.ProgrammingError(msg)
294 raise error.ProgrammingError(msg)
295
295
296 if (
296 if (
297 file in self._newfiles
297 file in self._newfiles
298 or file in self._offsetmap
298 or file in self._offsetmap
299 or file in self._backupmap
299 or file in self._backupmap
300 ):
300 ):
301 return
301 return
302 vfs = self._vfsmap[location]
302 vfs = self._vfsmap[location]
303 dirname, filename = vfs.split(file)
303 dirname, filename = vfs.split(file)
304 backupfilename = b"%s.backup.%s" % (self._journal, filename)
304 backupfilename = b"%s.backup.%s" % (self._journal, filename)
305 backupfile = vfs.reljoin(dirname, backupfilename)
305 backupfile = vfs.reljoin(dirname, backupfilename)
306 if vfs.exists(file):
306 if vfs.exists(file):
307 filepath = vfs.join(file)
307 filepath = vfs.join(file)
308 backuppath = vfs.join(backupfile)
308 backuppath = vfs.join(backupfile)
309 util.copyfile(filepath, backuppath, hardlink=hardlink)
309 util.copyfile(filepath, backuppath, hardlink=hardlink)
310 else:
310 else:
311 backupfile = b''
311 backupfile = b''
312
312
313 self._addbackupentry((location, file, backupfile, False))
313 self._addbackupentry((location, file, backupfile, False))
314
314
315 def _addbackupentry(self, entry):
315 def _addbackupentry(self, entry):
316 """register a new backup entry and write it to disk"""
316 """register a new backup entry and write it to disk"""
317 self._backupentries.append(entry)
317 self._backupentries.append(entry)
318 self._backupmap[entry[1]] = len(self._backupentries) - 1
318 self._backupmap[entry[1]] = len(self._backupentries) - 1
319 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
319 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
320 self._backupsfile.flush()
320 self._backupsfile.flush()
321
321
322 @active
322 @active
323 def registertmp(self, tmpfile, location=b''):
323 def registertmp(self, tmpfile, location=b''):
324 """register a temporary transaction file
324 """register a temporary transaction file
325
325
326 Such files will be deleted when the transaction exits (on both
326 Such files will be deleted when the transaction exits (on both
327 failure and success).
327 failure and success).
328 """
328 """
329 self._addbackupentry((location, b'', tmpfile, False))
329 self._addbackupentry((location, b'', tmpfile, False))
330
330
331 @active
331 @active
332 def addfilegenerator(
332 def addfilegenerator(
333 self, genid, filenames, genfunc, order=0, location=b''
333 self, genid, filenames, genfunc, order=0, location=b''
334 ):
334 ):
335 """add a function to generates some files at transaction commit
335 """add a function to generates some files at transaction commit
336
336
337 The `genfunc` argument is a function capable of generating proper
337 The `genfunc` argument is a function capable of generating proper
338 content of each entry in the `filename` tuple.
338 content of each entry in the `filename` tuple.
339
339
340 At transaction close time, `genfunc` will be called with one file
340 At transaction close time, `genfunc` will be called with one file
341 object argument per entries in `filenames`.
341 object argument per entries in `filenames`.
342
342
343 The transaction itself is responsible for the backup, creation and
343 The transaction itself is responsible for the backup, creation and
344 final write of such file.
344 final write of such file.
345
345
346 The `genid` argument is used to ensure the same set of file is only
346 The `genid` argument is used to ensure the same set of file is only
347 generated once. Call to `addfilegenerator` for a `genid` already
347 generated once. Call to `addfilegenerator` for a `genid` already
348 present will overwrite the old entry.
348 present will overwrite the old entry.
349
349
350 The `order` argument may be used to control the order in which multiple
350 The `order` argument may be used to control the order in which multiple
351 generator will be executed.
351 generator will be executed.
352
352
353 The `location` arguments may be used to indicate the files are located
353 The `location` arguments may be used to indicate the files are located
354 outside of the the standard directory for transaction. It should match
354 outside of the the standard directory for transaction. It should match
355 one of the key of the `transaction.vfsmap` dictionary.
355 one of the key of the `transaction.vfsmap` dictionary.
356 """
356 """
357 # For now, we are unable to do proper backup and restore of custom vfs
357 # For now, we are unable to do proper backup and restore of custom vfs
358 # but for bookmarks that are handled outside this mechanism.
358 # but for bookmarks that are handled outside this mechanism.
359 self._filegenerators[genid] = (order, filenames, genfunc, location)
359 self._filegenerators[genid] = (order, filenames, genfunc, location)
360
360
361 @active
361 @active
362 def removefilegenerator(self, genid):
362 def removefilegenerator(self, genid):
363 """reverse of addfilegenerator, remove a file generator function"""
363 """reverse of addfilegenerator, remove a file generator function"""
364 if genid in self._filegenerators:
364 if genid in self._filegenerators:
365 del self._filegenerators[genid]
365 del self._filegenerators[genid]
366
366
367 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
367 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
368 # write files registered for generation
368 # write files registered for generation
369 any = False
369 any = False
370
370
371 if group == GEN_GROUP_ALL:
371 if group == GEN_GROUP_ALL:
372 skip_post = skip_pre = False
372 skip_post = skip_pre = False
373 else:
373 else:
374 skip_pre = group == GEN_GROUP_POST_FINALIZE
374 skip_pre = group == GEN_GROUP_POST_FINALIZE
375 skip_post = group == GEN_GROUP_PRE_FINALIZE
375 skip_post = group == GEN_GROUP_PRE_FINALIZE
376
376
377 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
377 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
378 any = True
378 any = True
379 order, filenames, genfunc, location = entry
379 order, filenames, genfunc, location = entry
380
380
381 # for generation at closing, check if it's before or after finalize
381 # for generation at closing, check if it's before or after finalize
382 is_post = id in postfinalizegenerators
382 is_post = id in postfinalizegenerators
383 if skip_post and is_post:
383 if skip_post and is_post:
384 continue
384 continue
385 elif skip_pre and not is_post:
385 elif skip_pre and not is_post:
386 continue
386 continue
387
387
388 vfs = self._vfsmap[location]
388 vfs = self._vfsmap[location]
389 files = []
389 files = []
390 try:
390 try:
391 for name in filenames:
391 for name in filenames:
392 name += suffix
392 name += suffix
393 if suffix:
393 if suffix:
394 self.registertmp(name, location=location)
394 self.registertmp(name, location=location)
395 checkambig = False
395 checkambig = False
396 else:
396 else:
397 self.addbackup(name, location=location)
397 self.addbackup(name, location=location)
398 checkambig = (name, location) in self._checkambigfiles
398 checkambig = (name, location) in self._checkambigfiles
399 files.append(
399 files.append(
400 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
400 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
401 )
401 )
402 genfunc(*files)
402 genfunc(*files)
403 for f in files:
403 for f in files:
404 f.close()
404 f.close()
405 # skip discard() loop since we're sure no open file remains
405 # skip discard() loop since we're sure no open file remains
406 del files[:]
406 del files[:]
407 finally:
407 finally:
408 for f in files:
408 for f in files:
409 f.discard()
409 f.discard()
410 return any
410 return any
411
411
412 @active
412 @active
413 def findoffset(self, file):
413 def findoffset(self, file):
414 if file in self._newfiles:
414 if file in self._newfiles:
415 return 0
415 return 0
416 return self._offsetmap.get(file)
416 return self._offsetmap.get(file)
417
417
418 @active
418 @active
419 def readjournal(self):
419 def readjournal(self):
420 self._file.seek(0)
420 self._file.seek(0)
421 entries = []
421 entries = []
422 for l in self._file.readlines():
422 for l in self._file.readlines():
423 file, troffset = l.split(b'\0')
423 file, troffset = l.split(b'\0')
424 entries.append((file, int(troffset)))
424 entries.append((file, int(troffset)))
425 return entries
425 return entries
426
426
427 @active
427 @active
428 def replace(self, file, offset):
428 def replace(self, file, offset):
429 """
429 """
430 replace can only replace already committed entries
430 replace can only replace already committed entries
431 that are not pending in the queue
431 that are not pending in the queue
432 """
432 """
433 if file in self._newfiles:
433 if file in self._newfiles:
434 if not offset:
434 if not offset:
435 return
435 return
436 self._newfiles.remove(file)
436 self._newfiles.remove(file)
437 self._offsetmap[file] = offset
437 self._offsetmap[file] = offset
438 elif file in self._offsetmap:
438 elif file in self._offsetmap:
439 if not offset:
439 if not offset:
440 del self._offsetmap[file]
440 del self._offsetmap[file]
441 self._newfiles.add(file)
441 self._newfiles.add(file)
442 else:
442 else:
443 self._offsetmap[file] = offset
443 self._offsetmap[file] = offset
444 else:
444 else:
445 raise KeyError(file)
445 raise KeyError(file)
446 self._file.write(b"%s\0%d\n" % (file, offset))
446 self._file.write(b"%s\0%d\n" % (file, offset))
447 self._file.flush()
447 self._file.flush()
448
448
449 @active
449 @active
450 def nest(self, name='<unnamed>'):
450 def nest(self, name='<unnamed>'):
451 self._count += 1
451 self._count += 1
452 self._usages += 1
452 self._usages += 1
453 self._names.append(name)
453 self._names.append(name)
454 return self
454 return self
455
455
456 def release(self):
456 def release(self):
457 if self._count > 0:
457 if self._count > 0:
458 self._usages -= 1
458 self._usages -= 1
459 if self._names:
459 if self._names:
460 self._names.pop()
460 self._names.pop()
461 # if the transaction scopes are left without being closed, fail
461 # if the transaction scopes are left without being closed, fail
462 if self._count > 0 and self._usages == 0:
462 if self._count > 0 and self._usages == 0:
463 self._abort()
463 self._abort()
464
464
465 def running(self):
465 def running(self):
466 return self._count > 0
466 return self._count > 0
467
467
468 def addpending(self, category, callback):
468 def addpending(self, category, callback):
469 """add a callback to be called when the transaction is pending
469 """add a callback to be called when the transaction is pending
470
470
471 The transaction will be given as callback's first argument.
471 The transaction will be given as callback's first argument.
472
472
473 Category is a unique identifier to allow overwriting an old callback
473 Category is a unique identifier to allow overwriting an old callback
474 with a newer callback.
474 with a newer callback.
475 """
475 """
476 self._pendingcallback[category] = callback
476 self._pendingcallback[category] = callback
477
477
478 @active
478 @active
479 def writepending(self):
479 def writepending(self):
480 """write pending file to temporary version
480 """write pending file to temporary version
481
481
482 This is used to allow hooks to view a transaction before commit"""
482 This is used to allow hooks to view a transaction before commit"""
483 categories = sorted(self._pendingcallback)
483 categories = sorted(self._pendingcallback)
484 for cat in categories:
484 for cat in categories:
485 # remove callback since the data will have been flushed
485 # remove callback since the data will have been flushed
486 any = self._pendingcallback.pop(cat)(self)
486 any = self._pendingcallback.pop(cat)(self)
487 self._anypending = self._anypending or any
487 self._anypending = self._anypending or any
488 self._anypending |= self._generatefiles(suffix=b'.pending')
488 self._anypending |= self._generatefiles(suffix=b'.pending')
489 return self._anypending
489 return self._anypending
490
490
491 @active
491 @active
492 def hasfinalize(self, category):
492 def hasfinalize(self, category):
493 """check is a callback already exist for a category"""
493 """check is a callback already exist for a category"""
494 return category in self._finalizecallback
494 return category in self._finalizecallback
495
495
496 @active
496 @active
497 def addfinalize(self, category, callback):
497 def addfinalize(self, category, callback):
498 """add a callback to be called when the transaction is closed
498 """add a callback to be called when the transaction is closed
499
499
500 The transaction will be given as callback's first argument.
500 The transaction will be given as callback's first argument.
501
501
502 Category is a unique identifier to allow overwriting old callbacks with
502 Category is a unique identifier to allow overwriting old callbacks with
503 newer callbacks.
503 newer callbacks.
504 """
504 """
505 self._finalizecallback[category] = callback
505 self._finalizecallback[category] = callback
506
506
507 @active
507 @active
508 def addpostclose(self, category, callback):
508 def addpostclose(self, category, callback):
509 """add or replace a callback to be called after the transaction closed
509 """add or replace a callback to be called after the transaction closed
510
510
511 The transaction will be given as callback's first argument.
511 The transaction will be given as callback's first argument.
512
512
513 Category is a unique identifier to allow overwriting an old callback
513 Category is a unique identifier to allow overwriting an old callback
514 with a newer callback.
514 with a newer callback.
515 """
515 """
516 self._postclosecallback[category] = callback
516 self._postclosecallback[category] = callback
517
517
518 @active
518 @active
519 def getpostclose(self, category):
519 def getpostclose(self, category):
520 """return a postclose callback added before, or None"""
520 """return a postclose callback added before, or None"""
521 return self._postclosecallback.get(category, None)
521 return self._postclosecallback.get(category, None)
522
522
523 @active
523 @active
524 def addabort(self, category, callback):
524 def addabort(self, category, callback):
525 """add a callback to be called when the transaction is aborted.
525 """add a callback to be called when the transaction is aborted.
526
526
527 The transaction will be given as the first argument to the callback.
527 The transaction will be given as the first argument to the callback.
528
528
529 Category is a unique identifier to allow overwriting an old callback
529 Category is a unique identifier to allow overwriting an old callback
530 with a newer callback.
530 with a newer callback.
531 """
531 """
532 self._abortcallback[category] = callback
532 self._abortcallback[category] = callback
533
533
534 @active
534 @active
535 def addvalidator(self, category, callback):
535 def addvalidator(self, category, callback):
536 """adds a callback to be called when validating the transaction.
536 """adds a callback to be called when validating the transaction.
537
537
538 The transaction will be given as the first argument to the callback.
538 The transaction will be given as the first argument to the callback.
539
539
540 callback should raise exception if to abort transaction"""
540 callback should raise exception if to abort transaction"""
541 self._validatecallback[category] = callback
541 self._validatecallback[category] = callback
542
542
543 @active
543 @active
544 def close(self):
544 def close(self):
545 '''commit the transaction'''
545 '''commit the transaction'''
546 if self._count == 1:
546 if self._count == 1:
547 for category in sorted(self._validatecallback):
547 for category in sorted(self._validatecallback):
548 self._validatecallback[category](self)
548 self._validatecallback[category](self)
549 self._validatecallback = None # Help prevent cycles.
549 self._validatecallback = None # Help prevent cycles.
550 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
550 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
551 while self._finalizecallback:
551 while self._finalizecallback:
552 callbacks = self._finalizecallback
552 callbacks = self._finalizecallback
553 self._finalizecallback = {}
553 self._finalizecallback = {}
554 categories = sorted(callbacks)
554 categories = sorted(callbacks)
555 for cat in categories:
555 for cat in categories:
556 callbacks[cat](self)
556 callbacks[cat](self)
557 # Prevent double usage and help clear cycles.
557 # Prevent double usage and help clear cycles.
558 self._finalizecallback = None
558 self._finalizecallback = None
559 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
559 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
560
560
561 self._count -= 1
561 self._count -= 1
562 if self._count != 0:
562 if self._count != 0:
563 return
563 return
564 self._file.close()
564 self._file.close()
565 self._backupsfile.close()
565 self._backupsfile.close()
566 # cleanup temporary files
566 # cleanup temporary files
567 for l, f, b, c in self._backupentries:
567 for l, f, b, c in self._backupentries:
568 if l not in self._vfsmap and c:
568 if l not in self._vfsmap and c:
569 self._report(
569 self._report(
570 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
570 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
571 )
571 )
572 continue
572 continue
573 vfs = self._vfsmap[l]
573 vfs = self._vfsmap[l]
574 if not f and b and vfs.exists(b):
574 if not f and b and vfs.exists(b):
575 try:
575 try:
576 vfs.unlink(b)
576 vfs.unlink(b)
577 except (IOError, OSError, error.Abort) as inst:
577 except (IOError, OSError, error.Abort) as inst:
578 if not c:
578 if not c:
579 raise
579 raise
580 # Abort may be raise by read only opener
580 # Abort may be raise by read only opener
581 self._report(
581 self._report(
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 )
583 )
584 self._offsetmap = {}
584 self._offsetmap = {}
585 self._newfiles = set()
585 self._newfiles = set()
586 self._writeundo()
586 self._writeundo()
587 if self._after:
587 if self._after:
588 self._after()
588 self._after()
589 self._after = None # Help prevent cycles.
589 self._after = None # Help prevent cycles.
590 if self._opener.isfile(self._backupjournal):
590 if self._opener.isfile(self._backupjournal):
591 self._opener.unlink(self._backupjournal)
591 self._opener.unlink(self._backupjournal)
592 if self._opener.isfile(self._journal):
592 if self._opener.isfile(self._journal):
593 self._opener.unlink(self._journal)
593 self._opener.unlink(self._journal)
594 for l, _f, b, c in self._backupentries:
594 for l, _f, b, c in self._backupentries:
595 if l not in self._vfsmap and c:
595 if l not in self._vfsmap and c:
596 self._report(
596 self._report(
597 b"couldn't remove %s: unknown cache location"
597 b"couldn't remove %s: unknown cache location"
598 b"%s\n" % (b, l)
598 b"%s\n" % (b, l)
599 )
599 )
600 continue
600 continue
601 vfs = self._vfsmap[l]
601 vfs = self._vfsmap[l]
602 if b and vfs.exists(b):
602 if b and vfs.exists(b):
603 try:
603 try:
604 vfs.unlink(b)
604 vfs.unlink(b)
605 except (IOError, OSError, error.Abort) as inst:
605 except (IOError, OSError, error.Abort) as inst:
606 if not c:
606 if not c:
607 raise
607 raise
608 # Abort may be raise by read only opener
608 # Abort may be raise by read only opener
609 self._report(
609 self._report(
610 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
610 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
611 )
611 )
612 self._backupentries = []
612 self._backupentries = []
613 self._journal = None
613 self._journal = None
614
614
615 self._releasefn(self, True) # notify success of closing transaction
615 self._releasefn(self, True) # notify success of closing transaction
616 self._releasefn = None # Help prevent cycles.
616 self._releasefn = None # Help prevent cycles.
617
617
618 # run post close action
618 # run post close action
619 categories = sorted(self._postclosecallback)
619 categories = sorted(self._postclosecallback)
620 for cat in categories:
620 for cat in categories:
621 self._postclosecallback[cat](self)
621 self._postclosecallback[cat](self)
622 # Prevent double usage and help clear cycles.
622 # Prevent double usage and help clear cycles.
623 self._postclosecallback = None
623 self._postclosecallback = None
624
624
625 @active
625 @active
626 def abort(self):
626 def abort(self):
627 """abort the transaction (generally called on error, or when the
627 """abort the transaction (generally called on error, or when the
628 transaction is not explicitly committed before going out of
628 transaction is not explicitly committed before going out of
629 scope)"""
629 scope)"""
630 self._abort()
630 self._abort()
631
631
632 def _writeundo(self):
632 def _writeundo(self):
633 """write transaction data for possible future undo call"""
633 """write transaction data for possible future undo call"""
634 if self._undoname is None:
634 if self._undoname is None:
635 return
635 return
636 undobackupfile = self._opener.open(
636 undobackupfile = self._opener.open(
637 b"%s.backupfiles" % self._undoname, b'w'
637 b"%s.backupfiles" % self._undoname, b'w'
638 )
638 )
639 undobackupfile.write(b'%d\n' % version)
639 undobackupfile.write(b'%d\n' % version)
640 for l, f, b, c in self._backupentries:
640 for l, f, b, c in self._backupentries:
641 if not f: # temporary file
641 if not f: # temporary file
642 continue
642 continue
643 if not b:
643 if not b:
644 u = b''
644 u = b''
645 else:
645 else:
646 if l not in self._vfsmap and c:
646 if l not in self._vfsmap and c:
647 self._report(
647 self._report(
648 b"couldn't remove %s: unknown cache location"
648 b"couldn't remove %s: unknown cache location"
649 b"%s\n" % (b, l)
649 b"%s\n" % (b, l)
650 )
650 )
651 continue
651 continue
652 vfs = self._vfsmap[l]
652 vfs = self._vfsmap[l]
653 base, name = vfs.split(b)
653 base, name = vfs.split(b)
654 assert name.startswith(self._journal), name
654 assert name.startswith(self._journal), name
655 uname = name.replace(self._journal, self._undoname, 1)
655 uname = name.replace(self._journal, self._undoname, 1)
656 u = vfs.reljoin(base, uname)
656 u = vfs.reljoin(base, uname)
657 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
657 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
658 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
658 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
659 undobackupfile.close()
659 undobackupfile.close()
660
660
661 def _abort(self):
661 def _abort(self):
662 entries = self.readjournal()
662 entries = self.readjournal()
663 self._count = 0
663 self._count = 0
664 self._usages = 0
664 self._usages = 0
665 self._file.close()
665 self._file.close()
666 self._backupsfile.close()
666 self._backupsfile.close()
667
667
668 try:
668 try:
669 if not entries and not self._backupentries:
669 if not entries and not self._backupentries:
670 if self._backupjournal:
670 if self._backupjournal:
671 self._opener.unlink(self._backupjournal)
671 self._opener.unlink(self._backupjournal)
672 if self._journal:
672 if self._journal:
673 self._opener.unlink(self._journal)
673 self._opener.unlink(self._journal)
674 return
674 return
675
675
676 self._report(_(b"transaction abort!\n"))
676 self._report(_(b"transaction abort!\n"))
677
677
678 try:
678 try:
679 for cat in sorted(self._abortcallback):
679 for cat in sorted(self._abortcallback):
680 self._abortcallback[cat](self)
680 self._abortcallback[cat](self)
681 # Prevent double usage and help clear cycles.
681 # Prevent double usage and help clear cycles.
682 self._abortcallback = None
682 self._abortcallback = None
683 _playback(
683 _playback(
684 self._journal,
684 self._journal,
685 self._report,
685 self._report,
686 self._opener,
686 self._opener,
687 self._vfsmap,
687 self._vfsmap,
688 entries,
688 entries,
689 self._backupentries,
689 self._backupentries,
690 False,
690 False,
691 checkambigfiles=self._checkambigfiles,
691 checkambigfiles=self._checkambigfiles,
692 )
692 )
693 self._report(_(b"rollback completed\n"))
693 self._report(_(b"rollback completed\n"))
694 except BaseException as exc:
694 except BaseException as exc:
695 self._report(_(b"rollback failed - please run hg recover\n"))
695 self._report(_(b"rollback failed - please run hg recover\n"))
696 self._report(
696 self._report(
697 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
697 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
698 )
698 )
699 finally:
699 finally:
700 self._journal = None
700 self._journal = None
701 self._releasefn(self, False) # notify failure of transaction
701 self._releasefn(self, False) # notify failure of transaction
702 self._releasefn = None # Help prevent cycles.
702 self._releasefn = None # Help prevent cycles.
703
703
704
704
705 BAD_VERSION_MSG = _(
705 BAD_VERSION_MSG = _(
706 b"journal was created by a different version of Mercurial\n"
706 b"journal was created by a different version of Mercurial\n"
707 )
707 )
708
708
709
709
710 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
710 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
711 """Rolls back the transaction contained in the given file
711 """Rolls back the transaction contained in the given file
712
712
713 Reads the entries in the specified file, and the corresponding
713 Reads the entries in the specified file, and the corresponding
714 '*.backupfiles' file, to recover from an incomplete transaction.
714 '*.backupfiles' file, to recover from an incomplete transaction.
715
715
716 * `file`: a file containing a list of entries, specifying where
716 * `file`: a file containing a list of entries, specifying where
717 to truncate each file. The file should contain a list of
717 to truncate each file. The file should contain a list of
718 file\0offset pairs, delimited by newlines. The corresponding
718 file\0offset pairs, delimited by newlines. The corresponding
719 '*.backupfiles' file should contain a list of file\0backupfile
719 '*.backupfiles' file should contain a list of file\0backupfile
720 pairs, delimited by \0.
720 pairs, delimited by \0.
721
721
722 `checkambigfiles` is a set of (path, vfs-location) tuples,
722 `checkambigfiles` is a set of (path, vfs-location) tuples,
723 which determine whether file stat ambiguity should be avoided at
723 which determine whether file stat ambiguity should be avoided at
724 restoring corresponded files.
724 restoring corresponded files.
725 """
725 """
726 entries = []
726 entries = []
727 backupentries = []
727 backupentries = []
728
728
729 with opener.open(file) as fp:
729 with opener.open(file) as fp:
730 lines = fp.readlines()
730 lines = fp.readlines()
731 for l in lines:
731 for l in lines:
732 try:
732 try:
733 f, o = l.split(b'\0')
733 f, o = l.split(b'\0')
734 entries.append((f, int(o)))
734 entries.append((f, int(o)))
735 except ValueError:
735 except ValueError:
736 report(
736 report(
737 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
737 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
738 )
738 )
739
739
740 backupjournal = b"%s.backupfiles" % file
740 backupjournal = b"%s.backupfiles" % file
741 if opener.exists(backupjournal):
741 if opener.exists(backupjournal):
742 fp = opener.open(backupjournal)
742 fp = opener.open(backupjournal)
743 lines = fp.readlines()
743 lines = fp.readlines()
744 if lines:
744 if lines:
745 ver = lines[0][:-1]
745 ver = lines[0][:-1]
746 if ver == (b'%d' % version):
746 if ver != (b'%d' % version):
747 report(BAD_VERSION_MSG)
748 else:
747 for line in lines[1:]:
749 for line in lines[1:]:
748 if line:
750 if line:
749 # Shave off the trailing newline
751 # Shave off the trailing newline
750 line = line[:-1]
752 line = line[:-1]
751 l, f, b, c = line.split(b'\0')
753 l, f, b, c = line.split(b'\0')
752 backupentries.append((l, f, b, bool(c)))
754 backupentries.append((l, f, b, bool(c)))
753 else:
754 report(BAD_VERSION_MSG)
755
755
756 _playback(
756 _playback(
757 file,
757 file,
758 report,
758 report,
759 opener,
759 opener,
760 vfsmap,
760 vfsmap,
761 entries,
761 entries,
762 backupentries,
762 backupentries,
763 checkambigfiles=checkambigfiles,
763 checkambigfiles=checkambigfiles,
764 )
764 )
General Comments 0
You need to be logged in to leave comments. Login now