##// END OF EJS Templates
transaction: do not overwrite atomic-temp files on error...
Yuya Nishihara -
r41140:3e2c0283 default
parent child Browse files
Show More
@@ -1,645 +1,649 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import (
24 from .utils import (
25 stringutil,
25 stringutil,
26 )
26 )
27
27
28 version = 2
28 version = 2
29
29
30 # These are the file generators that should only be executed after the
30 # These are the file generators that should only be executed after the
31 # finalizers are done, since they rely on the output of the finalizers (like
31 # finalizers are done, since they rely on the output of the finalizers (like
32 # the changelog having been written).
32 # the changelog having been written).
33 postfinalizegenerators = {
33 postfinalizegenerators = {
34 'bookmarks',
34 'bookmarks',
35 'dirstate'
35 'dirstate'
36 }
36 }
37
37
38 gengroupall='all'
38 gengroupall='all'
39 gengroupprefinalize='prefinalize'
39 gengroupprefinalize='prefinalize'
40 gengrouppostfinalize='postfinalize'
40 gengrouppostfinalize='postfinalize'
41
41
42 def active(func):
42 def active(func):
43 def _active(self, *args, **kwds):
43 def _active(self, *args, **kwds):
44 if self._count == 0:
44 if self._count == 0:
45 raise error.Abort(_(
45 raise error.Abort(_(
46 'cannot use transaction when it is already committed/aborted'))
46 'cannot use transaction when it is already committed/aborted'))
47 return func(self, *args, **kwds)
47 return func(self, *args, **kwds)
48 return _active
48 return _active
49
49
50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
50 def _playback(journal, report, opener, vfsmap, entries, backupentries,
51 unlink=True, checkambigfiles=None):
51 unlink=True, checkambigfiles=None):
52 for f, o, _ignore in entries:
52 for f, o, _ignore in entries:
53 if o or not unlink:
53 if o or not unlink:
54 checkambig = checkambigfiles and (f, '') in checkambigfiles
54 checkambig = checkambigfiles and (f, '') in checkambigfiles
55 try:
55 try:
56 fp = opener(f, 'a', checkambig=checkambig)
56 fp = opener(f, 'a', checkambig=checkambig)
57 fp.truncate(o)
57 fp.truncate(o)
58 fp.close()
58 fp.close()
59 except IOError:
59 except IOError:
60 report(_("failed to truncate %s\n") % f)
60 report(_("failed to truncate %s\n") % f)
61 raise
61 raise
62 else:
62 else:
63 try:
63 try:
64 opener.unlink(f)
64 opener.unlink(f)
65 except (IOError, OSError) as inst:
65 except (IOError, OSError) as inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68
68
69 backupfiles = []
69 backupfiles = []
70 for l, f, b, c in backupentries:
70 for l, f, b, c in backupentries:
71 if l not in vfsmap and c:
71 if l not in vfsmap and c:
72 report("couldn't handle %s: unknown cache location %s\n"
72 report("couldn't handle %s: unknown cache location %s\n"
73 % (b, l))
73 % (b, l))
74 vfs = vfsmap[l]
74 vfs = vfsmap[l]
75 try:
75 try:
76 if f and b:
76 if f and b:
77 filepath = vfs.join(f)
77 filepath = vfs.join(f)
78 backuppath = vfs.join(b)
78 backuppath = vfs.join(b)
79 checkambig = checkambigfiles and (f, l) in checkambigfiles
79 checkambig = checkambigfiles and (f, l) in checkambigfiles
80 try:
80 try:
81 util.copyfile(backuppath, filepath, checkambig=checkambig)
81 util.copyfile(backuppath, filepath, checkambig=checkambig)
82 backupfiles.append(b)
82 backupfiles.append(b)
83 except IOError:
83 except IOError:
84 report(_("failed to recover %s\n") % f)
84 report(_("failed to recover %s\n") % f)
85 else:
85 else:
86 target = f or b
86 target = f or b
87 try:
87 try:
88 vfs.unlink(target)
88 vfs.unlink(target)
89 except (IOError, OSError) as inst:
89 except (IOError, OSError) as inst:
90 if inst.errno != errno.ENOENT:
90 if inst.errno != errno.ENOENT:
91 raise
91 raise
92 except (IOError, OSError, error.Abort) as inst:
92 except (IOError, OSError, error.Abort) as inst:
93 if not c:
93 if not c:
94 raise
94 raise
95
95
96 backuppath = "%s.backupfiles" % journal
96 backuppath = "%s.backupfiles" % journal
97 if opener.exists(backuppath):
97 if opener.exists(backuppath):
98 opener.unlink(backuppath)
98 opener.unlink(backuppath)
99 opener.unlink(journal)
99 opener.unlink(journal)
100 try:
100 try:
101 for f in backupfiles:
101 for f in backupfiles:
102 if opener.exists(f):
102 if opener.exists(f):
103 opener.unlink(f)
103 opener.unlink(f)
104 except (IOError, OSError, error.Abort) as inst:
104 except (IOError, OSError, error.Abort) as inst:
105 # only pure backup file remains, it is sage to ignore any error
105 # only pure backup file remains, it is sage to ignore any error
106 pass
106 pass
107
107
108 class transaction(util.transactional):
108 class transaction(util.transactional):
109 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
109 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
110 after=None, createmode=None, validator=None, releasefn=None,
110 after=None, createmode=None, validator=None, releasefn=None,
111 checkambigfiles=None, name=r'<unnamed>'):
111 checkambigfiles=None, name=r'<unnamed>'):
112 """Begin a new transaction
112 """Begin a new transaction
113
113
114 Begins a new transaction that allows rolling back writes in the event of
114 Begins a new transaction that allows rolling back writes in the event of
115 an exception.
115 an exception.
116
116
117 * `after`: called after the transaction has been committed
117 * `after`: called after the transaction has been committed
118 * `createmode`: the mode of the journal file that will be created
118 * `createmode`: the mode of the journal file that will be created
119 * `releasefn`: called after releasing (with transaction and result)
119 * `releasefn`: called after releasing (with transaction and result)
120
120
121 `checkambigfiles` is a set of (path, vfs-location) tuples,
121 `checkambigfiles` is a set of (path, vfs-location) tuples,
122 which determine whether file stat ambiguity should be avoided
122 which determine whether file stat ambiguity should be avoided
123 for corresponded files.
123 for corresponded files.
124 """
124 """
125 self._count = 1
125 self._count = 1
126 self._usages = 1
126 self._usages = 1
127 self._report = report
127 self._report = report
128 # a vfs to the store content
128 # a vfs to the store content
129 self._opener = opener
129 self._opener = opener
130 # a map to access file in various {location -> vfs}
130 # a map to access file in various {location -> vfs}
131 vfsmap = vfsmap.copy()
131 vfsmap = vfsmap.copy()
132 vfsmap[''] = opener # set default value
132 vfsmap[''] = opener # set default value
133 self._vfsmap = vfsmap
133 self._vfsmap = vfsmap
134 self._after = after
134 self._after = after
135 self._entries = []
135 self._entries = []
136 self._map = {}
136 self._map = {}
137 self._journal = journalname
137 self._journal = journalname
138 self._undoname = undoname
138 self._undoname = undoname
139 self._queue = []
139 self._queue = []
140 # A callback to validate transaction content before closing it.
140 # A callback to validate transaction content before closing it.
141 # should raise exception is anything is wrong.
141 # should raise exception is anything is wrong.
142 # target user is repository hooks.
142 # target user is repository hooks.
143 if validator is None:
143 if validator is None:
144 validator = lambda tr: None
144 validator = lambda tr: None
145 self._validator = validator
145 self._validator = validator
146 # A callback to do something just after releasing transaction.
146 # A callback to do something just after releasing transaction.
147 if releasefn is None:
147 if releasefn is None:
148 releasefn = lambda tr, success: None
148 releasefn = lambda tr, success: None
149 self._releasefn = releasefn
149 self._releasefn = releasefn
150
150
151 self._checkambigfiles = set()
151 self._checkambigfiles = set()
152 if checkambigfiles:
152 if checkambigfiles:
153 self._checkambigfiles.update(checkambigfiles)
153 self._checkambigfiles.update(checkambigfiles)
154
154
155 self._names = [name]
155 self._names = [name]
156
156
157 # A dict dedicated to precisely tracking the changes introduced in the
157 # A dict dedicated to precisely tracking the changes introduced in the
158 # transaction.
158 # transaction.
159 self.changes = {}
159 self.changes = {}
160
160
161 # a dict of arguments to be passed to hooks
161 # a dict of arguments to be passed to hooks
162 self.hookargs = {}
162 self.hookargs = {}
163 self._file = opener.open(self._journal, "w")
163 self._file = opener.open(self._journal, "w")
164
164
165 # a list of ('location', 'path', 'backuppath', cache) entries.
165 # a list of ('location', 'path', 'backuppath', cache) entries.
166 # - if 'backuppath' is empty, no file existed at backup time
166 # - if 'backuppath' is empty, no file existed at backup time
167 # - if 'path' is empty, this is a temporary transaction file
167 # - if 'path' is empty, this is a temporary transaction file
168 # - if 'location' is not empty, the path is outside main opener reach.
168 # - if 'location' is not empty, the path is outside main opener reach.
169 # use 'location' value as a key in a vfsmap to find the right 'vfs'
169 # use 'location' value as a key in a vfsmap to find the right 'vfs'
170 # (cache is currently unused)
170 # (cache is currently unused)
171 self._backupentries = []
171 self._backupentries = []
172 self._backupmap = {}
172 self._backupmap = {}
173 self._backupjournal = "%s.backupfiles" % self._journal
173 self._backupjournal = "%s.backupfiles" % self._journal
174 self._backupsfile = opener.open(self._backupjournal, 'w')
174 self._backupsfile = opener.open(self._backupjournal, 'w')
175 self._backupsfile.write('%d\n' % version)
175 self._backupsfile.write('%d\n' % version)
176
176
177 if createmode is not None:
177 if createmode is not None:
178 opener.chmod(self._journal, createmode & 0o666)
178 opener.chmod(self._journal, createmode & 0o666)
179 opener.chmod(self._backupjournal, createmode & 0o666)
179 opener.chmod(self._backupjournal, createmode & 0o666)
180
180
181 # hold file generations to be performed on commit
181 # hold file generations to be performed on commit
182 self._filegenerators = {}
182 self._filegenerators = {}
183 # hold callback to write pending data for hooks
183 # hold callback to write pending data for hooks
184 self._pendingcallback = {}
184 self._pendingcallback = {}
185 # True is any pending data have been written ever
185 # True is any pending data have been written ever
186 self._anypending = False
186 self._anypending = False
187 # holds callback to call when writing the transaction
187 # holds callback to call when writing the transaction
188 self._finalizecallback = {}
188 self._finalizecallback = {}
189 # hold callback for post transaction close
189 # hold callback for post transaction close
190 self._postclosecallback = {}
190 self._postclosecallback = {}
191 # holds callbacks to call during abort
191 # holds callbacks to call during abort
192 self._abortcallback = {}
192 self._abortcallback = {}
193
193
194 def __repr__(self):
194 def __repr__(self):
195 name = r'/'.join(self._names)
195 name = r'/'.join(self._names)
196 return (r'<transaction name=%s, count=%d, usages=%d>' %
196 return (r'<transaction name=%s, count=%d, usages=%d>' %
197 (name, self._count, self._usages))
197 (name, self._count, self._usages))
198
198
199 def __del__(self):
199 def __del__(self):
200 if self._journal:
200 if self._journal:
201 self._abort()
201 self._abort()
202
202
203 @active
203 @active
204 def startgroup(self):
204 def startgroup(self):
205 """delay registration of file entry
205 """delay registration of file entry
206
206
207 This is used by strip to delay vision of strip offset. The transaction
207 This is used by strip to delay vision of strip offset. The transaction
208 sees either none or all of the strip actions to be done."""
208 sees either none or all of the strip actions to be done."""
209 self._queue.append([])
209 self._queue.append([])
210
210
211 @active
211 @active
212 def endgroup(self):
212 def endgroup(self):
213 """apply delayed registration of file entry.
213 """apply delayed registration of file entry.
214
214
215 This is used by strip to delay vision of strip offset. The transaction
215 This is used by strip to delay vision of strip offset. The transaction
216 sees either none or all of the strip actions to be done."""
216 sees either none or all of the strip actions to be done."""
217 q = self._queue.pop()
217 q = self._queue.pop()
218 for f, o, data in q:
218 for f, o, data in q:
219 self._addentry(f, o, data)
219 self._addentry(f, o, data)
220
220
221 @active
221 @active
222 def add(self, file, offset, data=None):
222 def add(self, file, offset, data=None):
223 """record the state of an append-only file before update"""
223 """record the state of an append-only file before update"""
224 if file in self._map or file in self._backupmap:
224 if file in self._map or file in self._backupmap:
225 return
225 return
226 if self._queue:
226 if self._queue:
227 self._queue[-1].append((file, offset, data))
227 self._queue[-1].append((file, offset, data))
228 return
228 return
229
229
230 self._addentry(file, offset, data)
230 self._addentry(file, offset, data)
231
231
232 def _addentry(self, file, offset, data):
232 def _addentry(self, file, offset, data):
233 """add a append-only entry to memory and on-disk state"""
233 """add a append-only entry to memory and on-disk state"""
234 if file in self._map or file in self._backupmap:
234 if file in self._map or file in self._backupmap:
235 return
235 return
236 self._entries.append((file, offset, data))
236 self._entries.append((file, offset, data))
237 self._map[file] = len(self._entries) - 1
237 self._map[file] = len(self._entries) - 1
238 # add enough data to the journal to do the truncate
238 # add enough data to the journal to do the truncate
239 self._file.write("%s\0%d\n" % (file, offset))
239 self._file.write("%s\0%d\n" % (file, offset))
240 self._file.flush()
240 self._file.flush()
241
241
242 @active
242 @active
243 def addbackup(self, file, hardlink=True, location=''):
243 def addbackup(self, file, hardlink=True, location=''):
244 """Adds a backup of the file to the transaction
244 """Adds a backup of the file to the transaction
245
245
246 Calling addbackup() creates a hardlink backup of the specified file
246 Calling addbackup() creates a hardlink backup of the specified file
247 that is used to recover the file in the event of the transaction
247 that is used to recover the file in the event of the transaction
248 aborting.
248 aborting.
249
249
250 * `file`: the file path, relative to .hg/store
250 * `file`: the file path, relative to .hg/store
251 * `hardlink`: use a hardlink to quickly create the backup
251 * `hardlink`: use a hardlink to quickly create the backup
252 """
252 """
253 if self._queue:
253 if self._queue:
254 msg = 'cannot use transaction.addbackup inside "group"'
254 msg = 'cannot use transaction.addbackup inside "group"'
255 raise error.ProgrammingError(msg)
255 raise error.ProgrammingError(msg)
256
256
257 if file in self._map or file in self._backupmap:
257 if file in self._map or file in self._backupmap:
258 return
258 return
259 vfs = self._vfsmap[location]
259 vfs = self._vfsmap[location]
260 dirname, filename = vfs.split(file)
260 dirname, filename = vfs.split(file)
261 backupfilename = "%s.backup.%s" % (self._journal, filename)
261 backupfilename = "%s.backup.%s" % (self._journal, filename)
262 backupfile = vfs.reljoin(dirname, backupfilename)
262 backupfile = vfs.reljoin(dirname, backupfilename)
263 if vfs.exists(file):
263 if vfs.exists(file):
264 filepath = vfs.join(file)
264 filepath = vfs.join(file)
265 backuppath = vfs.join(backupfile)
265 backuppath = vfs.join(backupfile)
266 util.copyfile(filepath, backuppath, hardlink=hardlink)
266 util.copyfile(filepath, backuppath, hardlink=hardlink)
267 else:
267 else:
268 backupfile = ''
268 backupfile = ''
269
269
270 self._addbackupentry((location, file, backupfile, False))
270 self._addbackupentry((location, file, backupfile, False))
271
271
272 def _addbackupentry(self, entry):
272 def _addbackupentry(self, entry):
273 """register a new backup entry and write it to disk"""
273 """register a new backup entry and write it to disk"""
274 self._backupentries.append(entry)
274 self._backupentries.append(entry)
275 self._backupmap[entry[1]] = len(self._backupentries) - 1
275 self._backupmap[entry[1]] = len(self._backupentries) - 1
276 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
276 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
277 self._backupsfile.flush()
277 self._backupsfile.flush()
278
278
279 @active
279 @active
280 def registertmp(self, tmpfile, location=''):
280 def registertmp(self, tmpfile, location=''):
281 """register a temporary transaction file
281 """register a temporary transaction file
282
282
283 Such files will be deleted when the transaction exits (on both
283 Such files will be deleted when the transaction exits (on both
284 failure and success).
284 failure and success).
285 """
285 """
286 self._addbackupentry((location, '', tmpfile, False))
286 self._addbackupentry((location, '', tmpfile, False))
287
287
288 @active
288 @active
289 def addfilegenerator(self, genid, filenames, genfunc, order=0,
289 def addfilegenerator(self, genid, filenames, genfunc, order=0,
290 location=''):
290 location=''):
291 """add a function to generates some files at transaction commit
291 """add a function to generates some files at transaction commit
292
292
293 The `genfunc` argument is a function capable of generating proper
293 The `genfunc` argument is a function capable of generating proper
294 content of each entry in the `filename` tuple.
294 content of each entry in the `filename` tuple.
295
295
296 At transaction close time, `genfunc` will be called with one file
296 At transaction close time, `genfunc` will be called with one file
297 object argument per entries in `filenames`.
297 object argument per entries in `filenames`.
298
298
299 The transaction itself is responsible for the backup, creation and
299 The transaction itself is responsible for the backup, creation and
300 final write of such file.
300 final write of such file.
301
301
302 The `genid` argument is used to ensure the same set of file is only
302 The `genid` argument is used to ensure the same set of file is only
303 generated once. Call to `addfilegenerator` for a `genid` already
303 generated once. Call to `addfilegenerator` for a `genid` already
304 present will overwrite the old entry.
304 present will overwrite the old entry.
305
305
306 The `order` argument may be used to control the order in which multiple
306 The `order` argument may be used to control the order in which multiple
307 generator will be executed.
307 generator will be executed.
308
308
309 The `location` arguments may be used to indicate the files are located
309 The `location` arguments may be used to indicate the files are located
310 outside of the the standard directory for transaction. It should match
310 outside of the the standard directory for transaction. It should match
311 one of the key of the `transaction.vfsmap` dictionary.
311 one of the key of the `transaction.vfsmap` dictionary.
312 """
312 """
313 # For now, we are unable to do proper backup and restore of custom vfs
313 # For now, we are unable to do proper backup and restore of custom vfs
314 # but for bookmarks that are handled outside this mechanism.
314 # but for bookmarks that are handled outside this mechanism.
315 self._filegenerators[genid] = (order, filenames, genfunc, location)
315 self._filegenerators[genid] = (order, filenames, genfunc, location)
316
316
317 @active
317 @active
318 def removefilegenerator(self, genid):
318 def removefilegenerator(self, genid):
319 """reverse of addfilegenerator, remove a file generator function"""
319 """reverse of addfilegenerator, remove a file generator function"""
320 if genid in self._filegenerators:
320 if genid in self._filegenerators:
321 del self._filegenerators[genid]
321 del self._filegenerators[genid]
322
322
323 def _generatefiles(self, suffix='', group=gengroupall):
323 def _generatefiles(self, suffix='', group=gengroupall):
324 # write files registered for generation
324 # write files registered for generation
325 any = False
325 any = False
326 for id, entry in sorted(self._filegenerators.iteritems()):
326 for id, entry in sorted(self._filegenerators.iteritems()):
327 any = True
327 any = True
328 order, filenames, genfunc, location = entry
328 order, filenames, genfunc, location = entry
329
329
330 # for generation at closing, check if it's before or after finalize
330 # for generation at closing, check if it's before or after finalize
331 postfinalize = group == gengrouppostfinalize
331 postfinalize = group == gengrouppostfinalize
332 if (group != gengroupall and
332 if (group != gengroupall and
333 (id in postfinalizegenerators) != (postfinalize)):
333 (id in postfinalizegenerators) != (postfinalize)):
334 continue
334 continue
335
335
336 vfs = self._vfsmap[location]
336 vfs = self._vfsmap[location]
337 files = []
337 files = []
338 try:
338 try:
339 for name in filenames:
339 for name in filenames:
340 name += suffix
340 name += suffix
341 if suffix:
341 if suffix:
342 self.registertmp(name, location=location)
342 self.registertmp(name, location=location)
343 checkambig = False
343 checkambig = False
344 else:
344 else:
345 self.addbackup(name, location=location)
345 self.addbackup(name, location=location)
346 checkambig = (name, location) in self._checkambigfiles
346 checkambig = (name, location) in self._checkambigfiles
347 files.append(vfs(name, 'w', atomictemp=True,
347 files.append(vfs(name, 'w', atomictemp=True,
348 checkambig=checkambig))
348 checkambig=checkambig))
349 genfunc(*files)
349 genfunc(*files)
350 for f in files:
351 f.close()
352 # skip discard() loop since we're sure no open file remains
353 del files[:]
350 finally:
354 finally:
351 for f in files:
355 for f in files:
352 f.close()
356 f.discard()
353 return any
357 return any
354
358
355 @active
359 @active
356 def find(self, file):
360 def find(self, file):
357 if file in self._map:
361 if file in self._map:
358 return self._entries[self._map[file]]
362 return self._entries[self._map[file]]
359 if file in self._backupmap:
363 if file in self._backupmap:
360 return self._backupentries[self._backupmap[file]]
364 return self._backupentries[self._backupmap[file]]
361 return None
365 return None
362
366
363 @active
367 @active
364 def replace(self, file, offset, data=None):
368 def replace(self, file, offset, data=None):
365 '''
369 '''
366 replace can only replace already committed entries
370 replace can only replace already committed entries
367 that are not pending in the queue
371 that are not pending in the queue
368 '''
372 '''
369
373
370 if file not in self._map:
374 if file not in self._map:
371 raise KeyError(file)
375 raise KeyError(file)
372 index = self._map[file]
376 index = self._map[file]
373 self._entries[index] = (file, offset, data)
377 self._entries[index] = (file, offset, data)
374 self._file.write("%s\0%d\n" % (file, offset))
378 self._file.write("%s\0%d\n" % (file, offset))
375 self._file.flush()
379 self._file.flush()
376
380
377 @active
381 @active
378 def nest(self, name=r'<unnamed>'):
382 def nest(self, name=r'<unnamed>'):
379 self._count += 1
383 self._count += 1
380 self._usages += 1
384 self._usages += 1
381 self._names.append(name)
385 self._names.append(name)
382 return self
386 return self
383
387
384 def release(self):
388 def release(self):
385 if self._count > 0:
389 if self._count > 0:
386 self._usages -= 1
390 self._usages -= 1
387 if self._names:
391 if self._names:
388 self._names.pop()
392 self._names.pop()
389 # if the transaction scopes are left without being closed, fail
393 # if the transaction scopes are left without being closed, fail
390 if self._count > 0 and self._usages == 0:
394 if self._count > 0 and self._usages == 0:
391 self._abort()
395 self._abort()
392
396
393 def running(self):
397 def running(self):
394 return self._count > 0
398 return self._count > 0
395
399
396 def addpending(self, category, callback):
400 def addpending(self, category, callback):
397 """add a callback to be called when the transaction is pending
401 """add a callback to be called when the transaction is pending
398
402
399 The transaction will be given as callback's first argument.
403 The transaction will be given as callback's first argument.
400
404
401 Category is a unique identifier to allow overwriting an old callback
405 Category is a unique identifier to allow overwriting an old callback
402 with a newer callback.
406 with a newer callback.
403 """
407 """
404 self._pendingcallback[category] = callback
408 self._pendingcallback[category] = callback
405
409
406 @active
410 @active
407 def writepending(self):
411 def writepending(self):
408 '''write pending file to temporary version
412 '''write pending file to temporary version
409
413
410 This is used to allow hooks to view a transaction before commit'''
414 This is used to allow hooks to view a transaction before commit'''
411 categories = sorted(self._pendingcallback)
415 categories = sorted(self._pendingcallback)
412 for cat in categories:
416 for cat in categories:
413 # remove callback since the data will have been flushed
417 # remove callback since the data will have been flushed
414 any = self._pendingcallback.pop(cat)(self)
418 any = self._pendingcallback.pop(cat)(self)
415 self._anypending = self._anypending or any
419 self._anypending = self._anypending or any
416 self._anypending |= self._generatefiles(suffix='.pending')
420 self._anypending |= self._generatefiles(suffix='.pending')
417 return self._anypending
421 return self._anypending
418
422
419 @active
423 @active
420 def addfinalize(self, category, callback):
424 def addfinalize(self, category, callback):
421 """add a callback to be called when the transaction is closed
425 """add a callback to be called when the transaction is closed
422
426
423 The transaction will be given as callback's first argument.
427 The transaction will be given as callback's first argument.
424
428
425 Category is a unique identifier to allow overwriting old callbacks with
429 Category is a unique identifier to allow overwriting old callbacks with
426 newer callbacks.
430 newer callbacks.
427 """
431 """
428 self._finalizecallback[category] = callback
432 self._finalizecallback[category] = callback
429
433
430 @active
434 @active
431 def addpostclose(self, category, callback):
435 def addpostclose(self, category, callback):
432 """add or replace a callback to be called after the transaction closed
436 """add or replace a callback to be called after the transaction closed
433
437
434 The transaction will be given as callback's first argument.
438 The transaction will be given as callback's first argument.
435
439
436 Category is a unique identifier to allow overwriting an old callback
440 Category is a unique identifier to allow overwriting an old callback
437 with a newer callback.
441 with a newer callback.
438 """
442 """
439 self._postclosecallback[category] = callback
443 self._postclosecallback[category] = callback
440
444
441 @active
445 @active
442 def getpostclose(self, category):
446 def getpostclose(self, category):
443 """return a postclose callback added before, or None"""
447 """return a postclose callback added before, or None"""
444 return self._postclosecallback.get(category, None)
448 return self._postclosecallback.get(category, None)
445
449
446 @active
450 @active
447 def addabort(self, category, callback):
451 def addabort(self, category, callback):
448 """add a callback to be called when the transaction is aborted.
452 """add a callback to be called when the transaction is aborted.
449
453
450 The transaction will be given as the first argument to the callback.
454 The transaction will be given as the first argument to the callback.
451
455
452 Category is a unique identifier to allow overwriting an old callback
456 Category is a unique identifier to allow overwriting an old callback
453 with a newer callback.
457 with a newer callback.
454 """
458 """
455 self._abortcallback[category] = callback
459 self._abortcallback[category] = callback
456
460
457 @active
461 @active
458 def close(self):
462 def close(self):
459 '''commit the transaction'''
463 '''commit the transaction'''
460 if self._count == 1:
464 if self._count == 1:
461 self._validator(self) # will raise exception if needed
465 self._validator(self) # will raise exception if needed
462 self._validator = None # Help prevent cycles.
466 self._validator = None # Help prevent cycles.
463 self._generatefiles(group=gengroupprefinalize)
467 self._generatefiles(group=gengroupprefinalize)
464 categories = sorted(self._finalizecallback)
468 categories = sorted(self._finalizecallback)
465 for cat in categories:
469 for cat in categories:
466 self._finalizecallback[cat](self)
470 self._finalizecallback[cat](self)
467 # Prevent double usage and help clear cycles.
471 # Prevent double usage and help clear cycles.
468 self._finalizecallback = None
472 self._finalizecallback = None
469 self._generatefiles(group=gengrouppostfinalize)
473 self._generatefiles(group=gengrouppostfinalize)
470
474
471 self._count -= 1
475 self._count -= 1
472 if self._count != 0:
476 if self._count != 0:
473 return
477 return
474 self._file.close()
478 self._file.close()
475 self._backupsfile.close()
479 self._backupsfile.close()
476 # cleanup temporary files
480 # cleanup temporary files
477 for l, f, b, c in self._backupentries:
481 for l, f, b, c in self._backupentries:
478 if l not in self._vfsmap and c:
482 if l not in self._vfsmap and c:
479 self._report("couldn't remove %s: unknown cache location %s\n"
483 self._report("couldn't remove %s: unknown cache location %s\n"
480 % (b, l))
484 % (b, l))
481 continue
485 continue
482 vfs = self._vfsmap[l]
486 vfs = self._vfsmap[l]
483 if not f and b and vfs.exists(b):
487 if not f and b and vfs.exists(b):
484 try:
488 try:
485 vfs.unlink(b)
489 vfs.unlink(b)
486 except (IOError, OSError, error.Abort) as inst:
490 except (IOError, OSError, error.Abort) as inst:
487 if not c:
491 if not c:
488 raise
492 raise
489 # Abort may be raise by read only opener
493 # Abort may be raise by read only opener
490 self._report("couldn't remove %s: %s\n"
494 self._report("couldn't remove %s: %s\n"
491 % (vfs.join(b), inst))
495 % (vfs.join(b), inst))
492 self._entries = []
496 self._entries = []
493 self._writeundo()
497 self._writeundo()
494 if self._after:
498 if self._after:
495 self._after()
499 self._after()
496 self._after = None # Help prevent cycles.
500 self._after = None # Help prevent cycles.
497 if self._opener.isfile(self._backupjournal):
501 if self._opener.isfile(self._backupjournal):
498 self._opener.unlink(self._backupjournal)
502 self._opener.unlink(self._backupjournal)
499 if self._opener.isfile(self._journal):
503 if self._opener.isfile(self._journal):
500 self._opener.unlink(self._journal)
504 self._opener.unlink(self._journal)
501 for l, _f, b, c in self._backupentries:
505 for l, _f, b, c in self._backupentries:
502 if l not in self._vfsmap and c:
506 if l not in self._vfsmap and c:
503 self._report("couldn't remove %s: unknown cache location"
507 self._report("couldn't remove %s: unknown cache location"
504 "%s\n" % (b, l))
508 "%s\n" % (b, l))
505 continue
509 continue
506 vfs = self._vfsmap[l]
510 vfs = self._vfsmap[l]
507 if b and vfs.exists(b):
511 if b and vfs.exists(b):
508 try:
512 try:
509 vfs.unlink(b)
513 vfs.unlink(b)
510 except (IOError, OSError, error.Abort) as inst:
514 except (IOError, OSError, error.Abort) as inst:
511 if not c:
515 if not c:
512 raise
516 raise
513 # Abort may be raise by read only opener
517 # Abort may be raise by read only opener
514 self._report("couldn't remove %s: %s\n"
518 self._report("couldn't remove %s: %s\n"
515 % (vfs.join(b), inst))
519 % (vfs.join(b), inst))
516 self._backupentries = []
520 self._backupentries = []
517 self._journal = None
521 self._journal = None
518
522
519 self._releasefn(self, True) # notify success of closing transaction
523 self._releasefn(self, True) # notify success of closing transaction
520 self._releasefn = None # Help prevent cycles.
524 self._releasefn = None # Help prevent cycles.
521
525
522 # run post close action
526 # run post close action
523 categories = sorted(self._postclosecallback)
527 categories = sorted(self._postclosecallback)
524 for cat in categories:
528 for cat in categories:
525 self._postclosecallback[cat](self)
529 self._postclosecallback[cat](self)
526 # Prevent double usage and help clear cycles.
530 # Prevent double usage and help clear cycles.
527 self._postclosecallback = None
531 self._postclosecallback = None
528
532
529 @active
533 @active
530 def abort(self):
534 def abort(self):
531 '''abort the transaction (generally called on error, or when the
535 '''abort the transaction (generally called on error, or when the
532 transaction is not explicitly committed before going out of
536 transaction is not explicitly committed before going out of
533 scope)'''
537 scope)'''
534 self._abort()
538 self._abort()
535
539
536 def _writeundo(self):
540 def _writeundo(self):
537 """write transaction data for possible future undo call"""
541 """write transaction data for possible future undo call"""
538 if self._undoname is None:
542 if self._undoname is None:
539 return
543 return
540 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
544 undobackupfile = self._opener.open("%s.backupfiles" % self._undoname,
541 'w')
545 'w')
542 undobackupfile.write('%d\n' % version)
546 undobackupfile.write('%d\n' % version)
543 for l, f, b, c in self._backupentries:
547 for l, f, b, c in self._backupentries:
544 if not f: # temporary file
548 if not f: # temporary file
545 continue
549 continue
546 if not b:
550 if not b:
547 u = ''
551 u = ''
548 else:
552 else:
549 if l not in self._vfsmap and c:
553 if l not in self._vfsmap and c:
550 self._report("couldn't remove %s: unknown cache location"
554 self._report("couldn't remove %s: unknown cache location"
551 "%s\n" % (b, l))
555 "%s\n" % (b, l))
552 continue
556 continue
553 vfs = self._vfsmap[l]
557 vfs = self._vfsmap[l]
554 base, name = vfs.split(b)
558 base, name = vfs.split(b)
555 assert name.startswith(self._journal), name
559 assert name.startswith(self._journal), name
556 uname = name.replace(self._journal, self._undoname, 1)
560 uname = name.replace(self._journal, self._undoname, 1)
557 u = vfs.reljoin(base, uname)
561 u = vfs.reljoin(base, uname)
558 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
562 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
559 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
563 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
560 undobackupfile.close()
564 undobackupfile.close()
561
565
562
566
563 def _abort(self):
567 def _abort(self):
564 self._count = 0
568 self._count = 0
565 self._usages = 0
569 self._usages = 0
566 self._file.close()
570 self._file.close()
567 self._backupsfile.close()
571 self._backupsfile.close()
568
572
569 try:
573 try:
570 if not self._entries and not self._backupentries:
574 if not self._entries and not self._backupentries:
571 if self._backupjournal:
575 if self._backupjournal:
572 self._opener.unlink(self._backupjournal)
576 self._opener.unlink(self._backupjournal)
573 if self._journal:
577 if self._journal:
574 self._opener.unlink(self._journal)
578 self._opener.unlink(self._journal)
575 return
579 return
576
580
577 self._report(_("transaction abort!\n"))
581 self._report(_("transaction abort!\n"))
578
582
579 try:
583 try:
580 for cat in sorted(self._abortcallback):
584 for cat in sorted(self._abortcallback):
581 self._abortcallback[cat](self)
585 self._abortcallback[cat](self)
582 # Prevent double usage and help clear cycles.
586 # Prevent double usage and help clear cycles.
583 self._abortcallback = None
587 self._abortcallback = None
584 _playback(self._journal, self._report, self._opener,
588 _playback(self._journal, self._report, self._opener,
585 self._vfsmap, self._entries, self._backupentries,
589 self._vfsmap, self._entries, self._backupentries,
586 False, checkambigfiles=self._checkambigfiles)
590 False, checkambigfiles=self._checkambigfiles)
587 self._report(_("rollback completed\n"))
591 self._report(_("rollback completed\n"))
588 except BaseException as exc:
592 except BaseException as exc:
589 self._report(_("rollback failed - please run hg recover\n"))
593 self._report(_("rollback failed - please run hg recover\n"))
590 self._report(_("(failure reason: %s)\n")
594 self._report(_("(failure reason: %s)\n")
591 % stringutil.forcebytestr(exc))
595 % stringutil.forcebytestr(exc))
592 finally:
596 finally:
593 self._journal = None
597 self._journal = None
594 self._releasefn(self, False) # notify failure of transaction
598 self._releasefn(self, False) # notify failure of transaction
595 self._releasefn = None # Help prevent cycles.
599 self._releasefn = None # Help prevent cycles.
596
600
597 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
601 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
598 """Rolls back the transaction contained in the given file
602 """Rolls back the transaction contained in the given file
599
603
600 Reads the entries in the specified file, and the corresponding
604 Reads the entries in the specified file, and the corresponding
601 '*.backupfiles' file, to recover from an incomplete transaction.
605 '*.backupfiles' file, to recover from an incomplete transaction.
602
606
603 * `file`: a file containing a list of entries, specifying where
607 * `file`: a file containing a list of entries, specifying where
604 to truncate each file. The file should contain a list of
608 to truncate each file. The file should contain a list of
605 file\0offset pairs, delimited by newlines. The corresponding
609 file\0offset pairs, delimited by newlines. The corresponding
606 '*.backupfiles' file should contain a list of file\0backupfile
610 '*.backupfiles' file should contain a list of file\0backupfile
607 pairs, delimited by \0.
611 pairs, delimited by \0.
608
612
609 `checkambigfiles` is a set of (path, vfs-location) tuples,
613 `checkambigfiles` is a set of (path, vfs-location) tuples,
610 which determine whether file stat ambiguity should be avoided at
614 which determine whether file stat ambiguity should be avoided at
611 restoring corresponded files.
615 restoring corresponded files.
612 """
616 """
613 entries = []
617 entries = []
614 backupentries = []
618 backupentries = []
615
619
616 fp = opener.open(file)
620 fp = opener.open(file)
617 lines = fp.readlines()
621 lines = fp.readlines()
618 fp.close()
622 fp.close()
619 for l in lines:
623 for l in lines:
620 try:
624 try:
621 f, o = l.split('\0')
625 f, o = l.split('\0')
622 entries.append((f, int(o), None))
626 entries.append((f, int(o), None))
623 except ValueError:
627 except ValueError:
624 report(
628 report(
625 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
629 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
626
630
627 backupjournal = "%s.backupfiles" % file
631 backupjournal = "%s.backupfiles" % file
628 if opener.exists(backupjournal):
632 if opener.exists(backupjournal):
629 fp = opener.open(backupjournal)
633 fp = opener.open(backupjournal)
630 lines = fp.readlines()
634 lines = fp.readlines()
631 if lines:
635 if lines:
632 ver = lines[0][:-1]
636 ver = lines[0][:-1]
633 if ver == (b'%d' % version):
637 if ver == (b'%d' % version):
634 for line in lines[1:]:
638 for line in lines[1:]:
635 if line:
639 if line:
636 # Shave off the trailing newline
640 # Shave off the trailing newline
637 line = line[:-1]
641 line = line[:-1]
638 l, f, b, c = line.split('\0')
642 l, f, b, c = line.split('\0')
639 backupentries.append((l, f, b, bool(c)))
643 backupentries.append((l, f, b, bool(c)))
640 else:
644 else:
641 report(_("journal was created by a different version of "
645 report(_("journal was created by a different version of "
642 "Mercurial\n"))
646 "Mercurial\n"))
643
647
644 _playback(file, report, opener, vfsmap, entries, backupentries,
648 _playback(file, report, opener, vfsmap, entries, backupentries,
645 checkambigfiles=checkambigfiles)
649 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now