##// END OF EJS Templates
transaction: detect an attempt to truncate-to-extend on playback, raise error...
Kyle Lippincott -
r43233:8502f76d default
parent child Browse files
Show More
@@ -1,649 +1,653 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 stringutil,
25 stringutil,
26 )
26 )
27
27
28 version = 2
28 version = 2
29
29
30 # These are the file generators that should only be executed after the
30 # These are the file generators that should only be executed after the
31 # finalizers are done, since they rely on the output of the finalizers (like
31 # finalizers are done, since they rely on the output of the finalizers (like
32 # the changelog having been written).
32 # the changelog having been written).
33 postfinalizegenerators = {
33 postfinalizegenerators = {
34 'bookmarks',
34 'bookmarks',
35 'dirstate'
35 'dirstate'
36 }
36 }
37
37
38 gengroupall='all'
38 gengroupall='all'
39 gengroupprefinalize='prefinalize'
39 gengroupprefinalize='prefinalize'
40 gengrouppostfinalize='postfinalize'
40 gengrouppostfinalize='postfinalize'
41
41
42 def active(func):
42 def active(func):
43 def _active(self, *args, **kwds):
43 def _active(self, *args, **kwds):
44 if self._count == 0:
44 if self._count == 0:
45 raise error.Abort(_(
45 raise error.Abort(_(
46 'cannot use transaction when it is already committed/aborted'))
46 'cannot use transaction when it is already committed/aborted'))
47 return func(self, *args, **kwds)
47 return func(self, *args, **kwds)
48 return _active
48 return _active
49
49
50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
51 unlink=True, checkambigfiles=None):
51 unlink=True, checkambigfiles=None):
52 for f, o, _ignore in entries:
52 for f, o, _ignore in entries:
53 if o or not unlink:
53 if o or not unlink:
54 checkambig = checkambigfiles and (f, '') in checkambigfiles
54 checkambig = checkambigfiles and (f, '') in checkambigfiles
55 try:
55 try:
56 fp = opener(f, 'a', checkambig=checkambig)
56 fp = opener(f, 'a', checkambig=checkambig)
57 if fp.tell() < o:
58 raise error.Abort(_(
59 "attempted to truncate %s to %d bytes, but it was "
60 "already %d bytes\n") % (f, o, fp.tell()))
57 fp.truncate(o)
61 fp.truncate(o)
58 fp.close()
62 fp.close()
59 except IOError:
63 except IOError:
60 report(_("failed to truncate %s\n") % f)
64 report(_("failed to truncate %s\n") % f)
61 raise
65 raise
62 else:
66 else:
63 try:
67 try:
64 opener.unlink(f)
68 opener.unlink(f)
65 except (IOError, OSError) as inst:
69 except (IOError, OSError) as inst:
66 if inst.errno != errno.ENOENT:
70 if inst.errno != errno.ENOENT:
67 raise
71 raise
68
72
69 backupfiles = []
73 backupfiles = []
70 for l, f, b, c in backupentries:
74 for l, f, b, c in backupentries:
71 if l not in vfsmap and c:
75 if l not in vfsmap and c:
72 report("couldn't handle %s: unknown cache location %s\n"
76 report("couldn't handle %s: unknown cache location %s\n"
73 % (b, l))
77 % (b, l))
74 vfs = vfsmap[l]
78 vfs = vfsmap[l]
75 try:
79 try:
76 if f and b:
80 if f and b:
77 filepath = vfs.join(f)
81 filepath = vfs.join(f)
78 backuppath = vfs.join(b)
82 backuppath = vfs.join(b)
79 checkambig = checkambigfiles and (f, l) in checkambigfiles
83 checkambig = checkambigfiles and (f, l) in checkambigfiles
80 try:
84 try:
81 util.copyfile(backuppath, filepath, checkambig=checkambig)
85 util.copyfile(backuppath, filepath, checkambig=checkambig)
82 backupfiles.append(b)
86 backupfiles.append(b)
83 except IOError:
87 except IOError:
84 report(_("failed to recover %s\n") % f)
88 report(_("failed to recover %s\n") % f)
85 else:
89 else:
86 target = f or b
90 target = f or b
87 try:
91 try:
88 vfs.unlink(target)
92 vfs.unlink(target)
89 except (IOError, OSError) as inst:
93 except (IOError, OSError) as inst:
90 if inst.errno != errno.ENOENT:
94 if inst.errno != errno.ENOENT:
91 raise
95 raise
92 except (IOError, OSError, error.Abort):
96 except (IOError, OSError, error.Abort):
93 if not c:
97 if not c:
94 raise
98 raise
95
99
96 backuppath = "%s.backupfiles" % journal
100 backuppath = "%s.backupfiles" % journal
97 if opener.exists(backuppath):
101 if opener.exists(backuppath):
98 opener.unlink(backuppath)
102 opener.unlink(backuppath)
99 opener.unlink(journal)
103 opener.unlink(journal)
100 try:
104 try:
101 for f in backupfiles:
105 for f in backupfiles:
102 if opener.exists(f):
106 if opener.exists(f):
103 opener.unlink(f)
107 opener.unlink(f)
104 except (IOError, OSError, error.Abort):
108 except (IOError, OSError, error.Abort):
105 # only pure backup file remains, it is sage to ignore any error
109 # only pure backup file remains, it is sage to ignore any error
106 pass
110 pass
107
111
108 class transaction(util.transactional):
112 class transaction(util.transactional):
109 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
113 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
110 after=None, createmode=None, validator=None, releasefn=None,
114 after=None, createmode=None, validator=None, releasefn=None,
111 checkambigfiles=None, name=r'<unnamed>'):
115 checkambigfiles=None, name=r'<unnamed>'):
112 """Begin a new transaction
116 """Begin a new transaction
113
117
114 Begins a new transaction that allows rolling back writes in the event of
118 Begins a new transaction that allows rolling back writes in the event of
115 an exception.
119 an exception.
116
120
117 * `after`: called after the transaction has been committed
121 * `after`: called after the transaction has been committed
118 * `createmode`: the mode of the journal file that will be created
122 * `createmode`: the mode of the journal file that will be created
119 * `releasefn`: called after releasing (with transaction and result)
123 * `releasefn`: called after releasing (with transaction and result)
120
124
121 `checkambigfiles` is a set of (path, vfs-location) tuples,
125 `checkambigfiles` is a set of (path, vfs-location) tuples,
122 which determine whether file stat ambiguity should be avoided
126 which determine whether file stat ambiguity should be avoided
123 for corresponded files.
127 for corresponded files.
124 """
128 """
125 self._count = 1
129 self._count = 1
126 self._usages = 1
130 self._usages = 1
127 self._report = report
131 self._report = report
128 # a vfs to the store content
132 # a vfs to the store content
129 self._opener = opener
133 self._opener = opener
130 # a map to access file in various {location -> vfs}
134 # a map to access file in various {location -> vfs}
131 vfsmap = vfsmap.copy()
135 vfsmap = vfsmap.copy()
132 vfsmap[''] = opener # set default value
136 vfsmap[''] = opener # set default value
133 self._vfsmap = vfsmap
137 self._vfsmap = vfsmap
134 self._after = after
138 self._after = after
135 self._entries = []
139 self._entries = []
136 self._map = {}
140 self._map = {}
137 self._journal = journalname
141 self._journal = journalname
138 self._undoname = undoname
142 self._undoname = undoname
139 self._queue = []
143 self._queue = []
140 # A callback to validate transaction content before closing it.
144 # A callback to validate transaction content before closing it.
141 # should raise exception is anything is wrong.
145 # should raise exception is anything is wrong.
142 # target user is repository hooks.
146 # target user is repository hooks.
143 if validator is None:
147 if validator is None:
144 validator = lambda tr: None
148 validator = lambda tr: None
145 self._validator = validator
149 self._validator = validator
146 # A callback to do something just after releasing transaction.
150 # A callback to do something just after releasing transaction.
147 if releasefn is None:
151 if releasefn is None:
148 releasefn = lambda tr, success: None
152 releasefn = lambda tr, success: None
149 self._releasefn = releasefn
153 self._releasefn = releasefn
150
154
151 self._checkambigfiles = set()
155 self._checkambigfiles = set()
152 if checkambigfiles:
156 if checkambigfiles:
153 self._checkambigfiles.update(checkambigfiles)
157 self._checkambigfiles.update(checkambigfiles)
154
158
155 self._names = [name]
159 self._names = [name]
156
160
157 # A dict dedicated to precisely tracking the changes introduced in the
161 # A dict dedicated to precisely tracking the changes introduced in the
158 # transaction.
162 # transaction.
159 self.changes = {}
163 self.changes = {}
160
164
161 # a dict of arguments to be passed to hooks
165 # a dict of arguments to be passed to hooks
162 self.hookargs = {}
166 self.hookargs = {}
163 self._file = opener.open(self._journal, "w")
167 self._file = opener.open(self._journal, "w")
164
168
165 # a list of ('location', 'path', 'backuppath', cache) entries.
169 # a list of ('location', 'path', 'backuppath', cache) entries.
166 # - if 'backuppath' is empty, no file existed at backup time
170 # - if 'backuppath' is empty, no file existed at backup time
167 # - if 'path' is empty, this is a temporary transaction file
171 # - if 'path' is empty, this is a temporary transaction file
168 # - if 'location' is not empty, the path is outside main opener reach.
172 # - if 'location' is not empty, the path is outside main opener reach.
169 # use 'location' value as a key in a vfsmap to find the right 'vfs'
173 # use 'location' value as a key in a vfsmap to find the right 'vfs'
170 # (cache is currently unused)
174 # (cache is currently unused)
171 self._backupentries = []
175 self._backupentries = []
172 self._backupmap = {}
176 self._backupmap = {}
173 self._backupjournal = "%s.backupfiles" % self._journal
177 self._backupjournal = "%s.backupfiles" % self._journal
174 self._backupsfile = opener.open(self._backupjournal, 'w')
178 self._backupsfile = opener.open(self._backupjournal, 'w')
175 self._backupsfile.write('%d\n' % version)
179 self._backupsfile.write('%d\n' % version)
176
180
177 if createmode is not None:
181 if createmode is not None:
178 opener.chmod(self._journal, createmode & 0o666)
182 opener.chmod(self._journal, createmode & 0o666)
179 opener.chmod(self._backupjournal, createmode & 0o666)
183 opener.chmod(self._backupjournal, createmode & 0o666)
180
184
181 # hold file generations to be performed on commit
185 # hold file generations to be performed on commit
182 self._filegenerators = {}
186 self._filegenerators = {}
183 # hold callback to write pending data for hooks
187 # hold callback to write pending data for hooks
184 self._pendingcallback = {}
188 self._pendingcallback = {}
185 # True is any pending data have been written ever
189 # True is any pending data have been written ever
186 self._anypending = False
190 self._anypending = False
187 # holds callback to call when writing the transaction
191 # holds callback to call when writing the transaction
188 self._finalizecallback = {}
192 self._finalizecallback = {}
189 # hold callback for post transaction close
193 # hold callback for post transaction close
190 self._postclosecallback = {}
194 self._postclosecallback = {}
191 # holds callbacks to call during abort
195 # holds callbacks to call during abort
192 self._abortcallback = {}
196 self._abortcallback = {}
193
197
194 def __repr__(self):
198 def __repr__(self):
195 name = r'/'.join(self._names)
199 name = r'/'.join(self._names)
196 return (r'<transaction name=%s, count=%d, usages=%d>' %
200 return (r'<transaction name=%s, count=%d, usages=%d>' %
197 (name, self._count, self._usages))
201 (name, self._count, self._usages))
198
202
199 def __del__(self):
203 def __del__(self):
200 if self._journal:
204 if self._journal:
201 self._abort()
205 self._abort()
202
206
203 @active
207 @active
204 def startgroup(self):
208 def startgroup(self):
205 """delay registration of file entry
209 """delay registration of file entry
206
210
207 This is used by strip to delay vision of strip offset. The transaction
211 This is used by strip to delay vision of strip offset. The transaction
208 sees either none or all of the strip actions to be done."""
212 sees either none or all of the strip actions to be done."""
209 self._queue.append([])
213 self._queue.append([])
210
214
211 @active
215 @active
212 def endgroup(self):
216 def endgroup(self):
213 """apply delayed registration of file entry.
217 """apply delayed registration of file entry.
214
218
215 This is used by strip to delay vision of strip offset. The transaction
219 This is used by strip to delay vision of strip offset. The transaction
216 sees either none or all of the strip actions to be done."""
220 sees either none or all of the strip actions to be done."""
217 q = self._queue.pop()
221 q = self._queue.pop()
218 for f, o, data in q:
222 for f, o, data in q:
219 self._addentry(f, o, data)
223 self._addentry(f, o, data)
220
224
221 @active
225 @active
222 def add(self, file, offset, data=None):
226 def add(self, file, offset, data=None):
223 """record the state of an append-only file before update"""
227 """record the state of an append-only file before update"""
224 if file in self._map or file in self._backupmap:
228 if file in self._map or file in self._backupmap:
225 return
229 return
226 if self._queue:
230 if self._queue:
227 self._queue[-1].append((file, offset, data))
231 self._queue[-1].append((file, offset, data))
228 return
232 return
229
233
230 self._addentry(file, offset, data)
234 self._addentry(file, offset, data)
231
235
232 def _addentry(self, file, offset, data):
236 def _addentry(self, file, offset, data):
233 """add a append-only entry to memory and on-disk state"""
237 """add a append-only entry to memory and on-disk state"""
234 if file in self._map or file in self._backupmap:
238 if file in self._map or file in self._backupmap:
235 return
239 return
236 self._entries.append((file, offset, data))
240 self._entries.append((file, offset, data))
237 self._map[file] = len(self._entries) - 1
241 self._map[file] = len(self._entries) - 1
238 # add enough data to the journal to do the truncate
242 # add enough data to the journal to do the truncate
239 self._file.write("%s\0%d\n" % (file, offset))
243 self._file.write("%s\0%d\n" % (file, offset))
240 self._file.flush()
244 self._file.flush()
241
245
242 @active
246 @active
243 def addbackup(self, file, hardlink=True, location=''):
247 def addbackup(self, file, hardlink=True, location=''):
244 """Adds a backup of the file to the transaction
248 """Adds a backup of the file to the transaction
245
249
246 Calling addbackup() creates a hardlink backup of the specified file
250 Calling addbackup() creates a hardlink backup of the specified file
247 that is used to recover the file in the event of the transaction
251 that is used to recover the file in the event of the transaction
248 aborting.
252 aborting.
249
253
250 * `file`: the file path, relative to .hg/store
254 * `file`: the file path, relative to .hg/store
251 * `hardlink`: use a hardlink to quickly create the backup
255 * `hardlink`: use a hardlink to quickly create the backup
252 """
256 """
253 if self._queue:
257 if self._queue:
254 msg = 'cannot use transaction.addbackup inside "group"'
258 msg = 'cannot use transaction.addbackup inside "group"'
255 raise error.ProgrammingError(msg)
259 raise error.ProgrammingError(msg)
256
260
257 if file in self._map or file in self._backupmap:
261 if file in self._map or file in self._backupmap:
258 return
262 return
259 vfs = self._vfsmap[location]
263 vfs = self._vfsmap[location]
260 dirname, filename = vfs.split(file)
264 dirname, filename = vfs.split(file)
261 backupfilename = "%s.backup.%s" % (self._journal, filename)
265 backupfilename = "%s.backup.%s" % (self._journal, filename)
262 backupfile = vfs.reljoin(dirname, backupfilename)
266 backupfile = vfs.reljoin(dirname, backupfilename)
263 if vfs.exists(file):
267 if vfs.exists(file):
264 filepath = vfs.join(file)
268 filepath = vfs.join(file)
265 backuppath = vfs.join(backupfile)
269 backuppath = vfs.join(backupfile)
266 util.copyfile(filepath, backuppath, hardlink=hardlink)
270 util.copyfile(filepath, backuppath, hardlink=hardlink)
267 else:
271 else:
268 backupfile = ''
272 backupfile = ''
269
273
270 self._addbackupentry((location, file, backupfile, False))
274 self._addbackupentry((location, file, backupfile, False))
271
275
272 def _addbackupentry(self, entry):
276 def _addbackupentry(self, entry):
273 """register a new backup entry and write it to disk"""
277 """register a new backup entry and write it to disk"""
274 self._backupentries.append(entry)
278 self._backupentries.append(entry)
275 self._backupmap[entry[1]] = len(self._backupentries) - 1
279 self._backupmap[entry[1]] = len(self._backupentries) - 1
276 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
280 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
277 self._backupsfile.flush()
281 self._backupsfile.flush()
278
282
279 @active
283 @active
280 def registertmp(self, tmpfile, location=''):
284 def registertmp(self, tmpfile, location=''):
281 """register a temporary transaction file
285 """register a temporary transaction file
282
286
283 Such files will be deleted when the transaction exits (on both
287 Such files will be deleted when the transaction exits (on both
284 failure and success).
288 failure and success).
285 """
289 """
286 self._addbackupentry((location, '', tmpfile, False))
290 self._addbackupentry((location, '', tmpfile, False))
287
291
288 @active
292 @active
289 def addfilegenerator(self, genid, filenames, genfunc, order=0,
293 def addfilegenerator(self, genid, filenames, genfunc, order=0,
290 location=''):
294 location=''):
291 """add a function to generates some files at transaction commit
295 """add a function to generates some files at transaction commit
292
296
293 The `genfunc` argument is a function capable of generating proper
297 The `genfunc` argument is a function capable of generating proper
294 content of each entry in the `filename` tuple.
298 content of each entry in the `filename` tuple.
295
299
296 At transaction close time, `genfunc` will be called with one file
300 At transaction close time, `genfunc` will be called with one file
297 object argument per entries in `filenames`.
301 object argument per entries in `filenames`.
298
302
299 The transaction itself is responsible for the backup, creation and
303 The transaction itself is responsible for the backup, creation and
300 final write of such file.
304 final write of such file.
301
305
302 The `genid` argument is used to ensure the same set of file is only
306 The `genid` argument is used to ensure the same set of file is only
303 generated once. Call to `addfilegenerator` for a `genid` already
307 generated once. Call to `addfilegenerator` for a `genid` already
304 present will overwrite the old entry.
308 present will overwrite the old entry.
305
309
306 The `order` argument may be used to control the order in which multiple
310 The `order` argument may be used to control the order in which multiple
307 generator will be executed.
311 generator will be executed.
308
312
309 The `location` arguments may be used to indicate the files are located
313 The `location` arguments may be used to indicate the files are located
310 outside of the the standard directory for transaction. It should match
314 outside of the the standard directory for transaction. It should match
311 one of the key of the `transaction.vfsmap` dictionary.
315 one of the key of the `transaction.vfsmap` dictionary.
312 """
316 """
313 # For now, we are unable to do proper backup and restore of custom vfs
317 # For now, we are unable to do proper backup and restore of custom vfs
314 # but for bookmarks that are handled outside this mechanism.
318 # but for bookmarks that are handled outside this mechanism.
315 self._filegenerators[genid] = (order, filenames, genfunc, location)
319 self._filegenerators[genid] = (order, filenames, genfunc, location)
316
320
317 @active
321 @active
318 def removefilegenerator(self, genid):
322 def removefilegenerator(self, genid):
319 """reverse of addfilegenerator, remove a file generator function"""
323 """reverse of addfilegenerator, remove a file generator function"""
320 if genid in self._filegenerators:
324 if genid in self._filegenerators:
321 del self._filegenerators[genid]
325 del self._filegenerators[genid]
322
326
323 def _generatefiles(self, suffix='', group=gengroupall):
327 def _generatefiles(self, suffix='', group=gengroupall):
324 # write files registered for generation
328 # write files registered for generation
325 any = False
329 any = False
326 for id, entry in sorted(self._filegenerators.iteritems()):
330 for id, entry in sorted(self._filegenerators.iteritems()):
327 any = True
331 any = True
328 order, filenames, genfunc, location = entry
332 order, filenames, genfunc, location = entry
329
333
330 # for generation at closing, check if it's before or after finalize
334 # for generation at closing, check if it's before or after finalize
331 postfinalize = group == gengrouppostfinalize
335 postfinalize = group == gengrouppostfinalize
332 if (group != gengroupall and
336 if (group != gengroupall and
333 (id in postfinalizegenerators) != (postfinalize)):
337 (id in postfinalizegenerators) != (postfinalize)):
334 continue
338 continue
335
339
336 vfs = self._vfsmap[location]
340 vfs = self._vfsmap[location]
337 files = []
341 files = []
338 try:
342 try:
339 for name in filenames:
343 for name in filenames:
340 name += suffix
344 name += suffix
341 if suffix:
345 if suffix:
342 self.registertmp(name, location=location)
346 self.registertmp(name, location=location)
343 checkambig = False
347 checkambig = False
344 else:
348 else:
345 self.addbackup(name, location=location)
349 self.addbackup(name, location=location)
346 checkambig = (name, location) in self._checkambigfiles
350 checkambig = (name, location) in self._checkambigfiles
347 files.append(vfs(name, 'w', atomictemp=True,
351 files.append(vfs(name, 'w', atomictemp=True,
348 checkambig=checkambig))
352 checkambig=checkambig))
349 genfunc(*files)
353 genfunc(*files)
350 for f in files:
354 for f in files:
351 f.close()
355 f.close()
352 # skip discard() loop since we're sure no open file remains
356 # skip discard() loop since we're sure no open file remains
353 del files[:]
357 del files[:]
354 finally:
358 finally:
355 for f in files:
359 for f in files:
356 f.discard()
360 f.discard()
357 return any
361 return any
358
362
359 @active
363 @active
360 def find(self, file):
364 def find(self, file):
361 if file in self._map:
365 if file in self._map:
362 return self._entries[self._map[file]]
366 return self._entries[self._map[file]]
363 if file in self._backupmap:
367 if file in self._backupmap:
364 return self._backupentries[self._backupmap[file]]
368 return self._backupentries[self._backupmap[file]]
365 return None
369 return None
366
370
367 @active
371 @active
368 def replace(self, file, offset, data=None):
372 def replace(self, file, offset, data=None):
369 '''
373 '''
370 replace can only replace already committed entries
374 replace can only replace already committed entries
371 that are not pending in the queue
375 that are not pending in the queue
372 '''
376 '''
373
377
374 if file not in self._map:
378 if file not in self._map:
375 raise KeyError(file)
379 raise KeyError(file)
376 index = self._map[file]
380 index = self._map[file]
377 self._entries[index] = (file, offset, data)
381 self._entries[index] = (file, offset, data)
378 self._file.write("%s\0%d\n" % (file, offset))
382 self._file.write("%s\0%d\n" % (file, offset))
379 self._file.flush()
383 self._file.flush()
380
384
381 @active
385 @active
382 def nest(self, name=r'<unnamed>'):
386 def nest(self, name=r'<unnamed>'):
383 self._count += 1
387 self._count += 1
384 self._usages += 1
388 self._usages += 1
385 self._names.append(name)
389 self._names.append(name)
386 return self
390 return self
387
391
388 def release(self):
392 def release(self):
389 if self._count > 0:
393 if self._count > 0:
390 self._usages -= 1
394 self._usages -= 1
391 if self._names:
395 if self._names:
392 self._names.pop()
396 self._names.pop()
393 # if the transaction scopes are left without being closed, fail
397 # if the transaction scopes are left without being closed, fail
394 if self._count > 0 and self._usages == 0:
398 if self._count > 0 and self._usages == 0:
395 self._abort()
399 self._abort()
396
400
397 def running(self):
401 def running(self):
398 return self._count > 0
402 return self._count > 0
399
403
400 def addpending(self, category, callback):
404 def addpending(self, category, callback):
401 """add a callback to be called when the transaction is pending
405 """add a callback to be called when the transaction is pending
402
406
403 The transaction will be given as callback's first argument.
407 The transaction will be given as callback's first argument.
404
408
405 Category is a unique identifier to allow overwriting an old callback
409 Category is a unique identifier to allow overwriting an old callback
406 with a newer callback.
410 with a newer callback.
407 """
411 """
408 self._pendingcallback[category] = callback
412 self._pendingcallback[category] = callback
409
413
410 @active
414 @active
411 def writepending(self):
415 def writepending(self):
412 '''write pending file to temporary version
416 '''write pending file to temporary version
413
417
414 This is used to allow hooks to view a transaction before commit'''
418 This is used to allow hooks to view a transaction before commit'''
415 categories = sorted(self._pendingcallback)
419 categories = sorted(self._pendingcallback)
416 for cat in categories:
420 for cat in categories:
417 # remove callback since the data will have been flushed
421 # remove callback since the data will have been flushed
418 any = self._pendingcallback.pop(cat)(self)
422 any = self._pendingcallback.pop(cat)(self)
419 self._anypending = self._anypending or any
423 self._anypending = self._anypending or any
420 self._anypending |= self._generatefiles(suffix='.pending')
424 self._anypending |= self._generatefiles(suffix='.pending')
421 return self._anypending
425 return self._anypending
422
426
423 @active
427 @active
424 def addfinalize(self, category, callback):
428 def addfinalize(self, category, callback):
425 """add a callback to be called when the transaction is closed
429 """add a callback to be called when the transaction is closed
426
430
427 The transaction will be given as callback's first argument.
431 The transaction will be given as callback's first argument.
428
432
429 Category is a unique identifier to allow overwriting old callbacks with
433 Category is a unique identifier to allow overwriting old callbacks with
430 newer callbacks.
434 newer callbacks.
431 """
435 """
432 self._finalizecallback[category] = callback
436 self._finalizecallback[category] = callback
433
437
434 @active
438 @active
435 def addpostclose(self, category, callback):
439 def addpostclose(self, category, callback):
436 """add or replace a callback to be called after the transaction closed
440 """add or replace a callback to be called after the transaction closed
437
441
438 The transaction will be given as callback's first argument.
442 The transaction will be given as callback's first argument.
439
443
440 Category is a unique identifier to allow overwriting an old callback
444 Category is a unique identifier to allow overwriting an old callback
441 with a newer callback.
445 with a newer callback.
442 """
446 """
443 self._postclosecallback[category] = callback
447 self._postclosecallback[category] = callback
444
448
445 @active
449 @active
446 def getpostclose(self, category):
450 def getpostclose(self, category):
447 """return a postclose callback added before, or None"""
451 """return a postclose callback added before, or None"""
448 return self._postclosecallback.get(category, None)
452 return self._postclosecallback.get(category, None)
449
453
450 @active
454 @active
451 def addabort(self, category, callback):
455 def addabort(self, category, callback):
452 """add a callback to be called when the transaction is aborted.
456 """add a callback to be called when the transaction is aborted.
453
457
454 The transaction will be given as the first argument to the callback.
458 The transaction will be given as the first argument to the callback.
455
459
456 Category is a unique identifier to allow overwriting an old callback
460 Category is a unique identifier to allow overwriting an old callback
457 with a newer callback.
461 with a newer callback.
458 """
462 """
459 self._abortcallback[category] = callback
463 self._abortcallback[category] = callback
460
464
461 @active
465 @active
462 def close(self):
466 def close(self):
463 '''commit the transaction'''
467 '''commit the transaction'''
464 if self._count == 1:
468 if self._count == 1:
465 self._validator(self) # will raise exception if needed
469 self._validator(self) # will raise exception if needed
466 self._validator = None # Help prevent cycles.
470 self._validator = None # Help prevent cycles.
467 self._generatefiles(group=gengroupprefinalize)
471 self._generatefiles(group=gengroupprefinalize)
468 categories = sorted(self._finalizecallback)
472 categories = sorted(self._finalizecallback)
469 for cat in categories:
473 for cat in categories:
470 self._finalizecallback[cat](self)
474 self._finalizecallback[cat](self)
471 # Prevent double usage and help clear cycles.
475 # Prevent double usage and help clear cycles.
472 self._finalizecallback = None
476 self._finalizecallback = None
473 self._generatefiles(group=gengrouppostfinalize)
477 self._generatefiles(group=gengrouppostfinalize)
474
478
475 self._count -= 1
479 self._count -= 1
476 if self._count != 0:
480 if self._count != 0:
477 return
481 return
478 self._file.close()
482 self._file.close()
479 self._backupsfile.close()
483 self._backupsfile.close()
480 # cleanup temporary files
484 # cleanup temporary files
481 for l, f, b, c in self._backupentries:
485 for l, f, b, c in self._backupentries:
482 if l not in self._vfsmap and c:
486 if l not in self._vfsmap and c:
483 self._report("couldn't remove %s: unknown cache location %s\n"
487 self._report("couldn't remove %s: unknown cache location %s\n"
484 % (b, l))
488 % (b, l))
485 continue
489 continue
486 vfs = self._vfsmap[l]
490 vfs = self._vfsmap[l]
487 if not f and b and vfs.exists(b):
491 if not f and b and vfs.exists(b):
488 try:
492 try:
489 vfs.unlink(b)
493 vfs.unlink(b)
490 except (IOError, OSError, error.Abort) as inst:
494 except (IOError, OSError, error.Abort) as inst:
491 if not c:
495 if not c:
492 raise
496 raise
493 # Abort may be raise by read only opener
497 # Abort may be raise by read only opener
494 self._report("couldn't remove %s: %s\n"
498 self._report("couldn't remove %s: %s\n"
495 % (vfs.join(b), inst))
499 % (vfs.join(b), inst))
496 self._entries = []
500 self._entries = []
497 self._writeundo()
501 self._writeundo()
498 if self._after:
502 if self._after:
499 self._after()
503 self._after()
500 self._after = None # Help prevent cycles.
504 self._after = None # Help prevent cycles.
501 if self._opener.isfile(self._backupjournal):
505 if self._opener.isfile(self._backupjournal):
502 self._opener.unlink(self._backupjournal)
506 self._opener.unlink(self._backupjournal)
503 if self._opener.isfile(self._journal):
507 if self._opener.isfile(self._journal):
504 self._opener.unlink(self._journal)
508 self._opener.unlink(self._journal)
505 for l, _f, b, c in self._backupentries:
509 for l, _f, b, c in self._backupentries:
506 if l not in self._vfsmap and c:
510 if l not in self._vfsmap and c:
507 self._report("couldn't remove %s: unknown cache location"
511 self._report("couldn't remove %s: unknown cache location"
508 "%s\n" % (b, l))
512 "%s\n" % (b, l))
509 continue
513 continue
510 vfs = self._vfsmap[l]
514 vfs = self._vfsmap[l]
511 if b and vfs.exists(b):
515 if b and vfs.exists(b):
512 try:
516 try:
513 vfs.unlink(b)
517 vfs.unlink(b)
514 except (IOError, OSError, error.Abort) as inst:
518 except (IOError, OSError, error.Abort) as inst:
515 if not c:
519 if not c:
516 raise
520 raise
517 # Abort may be raise by read only opener
521 # Abort may be raise by read only opener
518 self._report("couldn't remove %s: %s\n"
522 self._report("couldn't remove %s: %s\n"
519 % (vfs.join(b), inst))
523 % (vfs.join(b), inst))
520 self._backupentries = []
524 self._backupentries = []
521 self._journal = None
525 self._journal = None
522
526
523 self._releasefn(self, True) # notify success of closing transaction
527 self._releasefn(self, True) # notify success of closing transaction
524 self._releasefn = None # Help prevent cycles.
528 self._releasefn = None # Help prevent cycles.
525
529
526 # run post close action
530 # run post close action
527 categories = sorted(self._postclosecallback)
531 categories = sorted(self._postclosecallback)
528 for cat in categories:
532 for cat in categories:
529 self._postclosecallback[cat](self)
533 self._postclosecallback[cat](self)
530 # Prevent double usage and help clear cycles.
534 # Prevent double usage and help clear cycles.
531 self._postclosecallback = None
535 self._postclosecallback = None
532
536
533 @active
537 @active
534 def abort(self):
538 def abort(self):
535 '''abort the transaction (generally called on error, or when the
539 '''abort the transaction (generally called on error, or when the
536 transaction is not explicitly committed before going out of
540 transaction is not explicitly committed before going out of
537 scope)'''
541 scope)'''
538 self._abort()
542 self._abort()
539
543
540 def _writeundo(self):
544 def _writeundo(self):
541 """write transaction data for possible future undo call"""
545 """write transaction data for possible future undo call"""
542 if self._undoname is None:
546 if self._undoname is None:
543 return
547 return
544 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
548 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
545 'w')
549 'w')
546 undobackupfile.write('%d\n' % version)
550 undobackupfile.write('%d\n' % version)
547 for l, f, b, c in self._backupentries:
551 for l, f, b, c in self._backupentries:
548 if not f: # temporary file
552 if not f: # temporary file
549 continue
553 continue
550 if not b:
554 if not b:
551 u = ''
555 u = ''
552 else:
556 else:
553 if l not in self._vfsmap and c:
557 if l not in self._vfsmap and c:
554 self._report("couldn't remove %s: unknown cache location"
558 self._report("couldn't remove %s: unknown cache location"
555 "%s\n" % (b, l))
559 "%s\n" % (b, l))
556 continue
560 continue
557 vfs = self._vfsmap[l]
561 vfs = self._vfsmap[l]
558 base, name = vfs.split(b)
562 base, name = vfs.split(b)
559 assert name.startswith(self._journal), name
563 assert name.startswith(self._journal), name
560 uname = name.replace(self._journal, self._undoname, 1)
564 uname = name.replace(self._journal, self._undoname, 1)
561 u = vfs.reljoin(base, uname)
565 u = vfs.reljoin(base, uname)
562 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
566 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
563 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
567 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
564 undobackupfile.close()
568 undobackupfile.close()
565
569
566
570
567 def _abort(self):
571 def _abort(self):
568 self._count = 0
572 self._count = 0
569 self._usages = 0
573 self._usages = 0
570 self._file.close()
574 self._file.close()
571 self._backupsfile.close()
575 self._backupsfile.close()
572
576
573 try:
577 try:
574 if not self._entries and not self._backupentries:
578 if not self._entries and not self._backupentries:
575 if self._backupjournal:
579 if self._backupjournal:
576 self._opener.unlink(self._backupjournal)
580 self._opener.unlink(self._backupjournal)
577 if self._journal:
581 if self._journal:
578 self._opener.unlink(self._journal)
582 self._opener.unlink(self._journal)
579 return
583 return
580
584
581 self._report(_("transaction abort!\n"))
585 self._report(_("transaction abort!\n"))
582
586
583 try:
587 try:
584 for cat in sorted(self._abortcallback):
588 for cat in sorted(self._abortcallback):
585 self._abortcallback[cat](self)
589 self._abortcallback[cat](self)
586 # Prevent double usage and help clear cycles.
590 # Prevent double usage and help clear cycles.
587 self._abortcallback = None
591 self._abortcallback = None
588 _playback(self._journal, self._report, self._opener,
592 _playback(self._journal, self._report, self._opener,
589 self._vfsmap, self._entries, self._backupentries,
593 self._vfsmap, self._entries, self._backupentries,
590 False, checkambigfiles=self._checkambigfiles)
594 False, checkambigfiles=self._checkambigfiles)
591 self._report(_("rollback completed\n"))
595 self._report(_("rollback completed\n"))
592 except BaseException as exc:
596 except BaseException as exc:
593 self._report(_("rollback failed - please run hg recover\n"))
597 self._report(_("rollback failed - please run hg recover\n"))
594 self._report(_("(failure reason: %s)\n")
598 self._report(_("(failure reason: %s)\n")
595 % stringutil.forcebytestr(exc))
599 % stringutil.forcebytestr(exc))
596 finally:
600 finally:
597 self._journal = None
601 self._journal = None
598 self._releasefn(self, False) # notify failure of transaction
602 self._releasefn(self, False) # notify failure of transaction
599 self._releasefn = None # Help prevent cycles.
603 self._releasefn = None # Help prevent cycles.
600
604
601 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
605 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
602 """Rolls back the transaction contained in the given file
606 """Rolls back the transaction contained in the given file
603
607
604 Reads the entries in the specified file, and the corresponding
608 Reads the entries in the specified file, and the corresponding
605 '*.backupfiles' file, to recover from an incomplete transaction.
609 '*.backupfiles' file, to recover from an incomplete transaction.
606
610
607 * `file`: a file containing a list of entries, specifying where
611 * `file`: a file containing a list of entries, specifying where
608 to truncate each file. The file should contain a list of
612 to truncate each file. The file should contain a list of
609 file\0offset pairs, delimited by newlines. The corresponding
613 file\0offset pairs, delimited by newlines. The corresponding
610 '*.backupfiles' file should contain a list of file\0backupfile
614 '*.backupfiles' file should contain a list of file\0backupfile
611 pairs, delimited by \0.
615 pairs, delimited by \0.
612
616
613 `checkambigfiles` is a set of (path, vfs-location) tuples,
617 `checkambigfiles` is a set of (path, vfs-location) tuples,
614 which determine whether file stat ambiguity should be avoided at
618 which determine whether file stat ambiguity should be avoided at
615 restoring corresponded files.
619 restoring corresponded files.
616 """
620 """
617 entries = []
621 entries = []
618 backupentries = []
622 backupentries = []
619
623
620 fp = opener.open(file)
624 fp = opener.open(file)
621 lines = fp.readlines()
625 lines = fp.readlines()
622 fp.close()
626 fp.close()
623 for l in lines:
627 for l in lines:
624 try:
628 try:
625 f, o = l.split('\0')
629 f, o = l.split('\0')
626 entries.append((f, int(o), None))
630 entries.append((f, int(o), None))
627 except ValueError:
631 except ValueError:
628 report(
632 report(
629 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
633 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
630
634
631 backupjournal = "%s.backupfiles" % file
635 backupjournal = "%s.backupfiles" % file
632 if opener.exists(backupjournal):
636 if opener.exists(backupjournal):
633 fp = opener.open(backupjournal)
637 fp = opener.open(backupjournal)
634 lines = fp.readlines()
638 lines = fp.readlines()
635 if lines:
639 if lines:
636 ver = lines[0][:-1]
640 ver = lines[0][:-1]
637 if ver == (b'%d' % version):
641 if ver == (b'%d' % version):
638 for line in lines[1:]:
642 for line in lines[1:]:
639 if line:
643 if line:
640 # Shave off the trailing newline
644 # Shave off the trailing newline
641 line = line[:-1]
645 line = line[:-1]
642 l, f, b, c = line.split('\0')
646 l, f, b, c = line.split('\0')
643 backupentries.append((l, f, b, bool(c)))
647 backupentries.append((l, f, b, bool(c)))
644 else:
648 else:
645 report(_("journal was created by a different version of "
649 report(_("journal was created by a different version of "
646 "Mercurial\n"))
650 "Mercurial\n"))
647
651
648 _playback(file, report, opener, vfsmap, entries, backupentries,
652 _playback(file, report, opener, vfsmap, entries, backupentries,
649 checkambigfiles=checkambigfiles)
653 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now