##// END OF EJS Templates
transaction: make validator a private attribute...
Gregory Szorc -
r39715:ebbba3ba default
parent child Browse files
Show More
@@ -1,640 +1,640 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24
24
25 version = 2
25 version = 2
26
26
27 # These are the file generators that should only be executed after the
27 # These are the file generators that should only be executed after the
28 # finalizers are done, since they rely on the output of the finalizers (like
28 # finalizers are done, since they rely on the output of the finalizers (like
29 # the changelog having been written).
29 # the changelog having been written).
30 postfinalizegenerators = {
30 postfinalizegenerators = {
31 'bookmarks',
31 'bookmarks',
32 'dirstate'
32 'dirstate'
33 }
33 }
34
34
35 gengroupall='all'
35 gengroupall='all'
36 gengroupprefinalize='prefinalize'
36 gengroupprefinalize='prefinalize'
37 gengrouppostfinalize='postfinalize'
37 gengrouppostfinalize='postfinalize'
38
38
39 def active(func):
39 def active(func):
40 def _active(self, *args, **kwds):
40 def _active(self, *args, **kwds):
41 if self._count == 0:
41 if self._count == 0:
42 raise error.Abort(_(
42 raise error.Abort(_(
43 'cannot use transaction when it is already committed/aborted'))
43 'cannot use transaction when it is already committed/aborted'))
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45 return _active
45 return _active
46
46
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 def _playback(journal, report, opener, vfsmap, entries, backupentries,
48 unlink=True, checkambigfiles=None):
48 unlink=True, checkambigfiles=None):
49 for f, o, _ignore in entries:
49 for f, o, _ignore in entries:
50 if o or not unlink:
50 if o or not unlink:
51 checkambig = checkambigfiles and (f, '') in checkambigfiles
51 checkambig = checkambigfiles and (f, '') in checkambigfiles
52 try:
52 try:
53 fp = opener(f, 'a', checkambig=checkambig)
53 fp = opener(f, 'a', checkambig=checkambig)
54 fp.truncate(o)
54 fp.truncate(o)
55 fp.close()
55 fp.close()
56 except IOError:
56 except IOError:
57 report(_("failed to truncate %s\n") % f)
57 report(_("failed to truncate %s\n") % f)
58 raise
58 raise
59 else:
59 else:
60 try:
60 try:
61 opener.unlink(f)
61 opener.unlink(f)
62 except (IOError, OSError) as inst:
62 except (IOError, OSError) as inst:
63 if inst.errno != errno.ENOENT:
63 if inst.errno != errno.ENOENT:
64 raise
64 raise
65
65
66 backupfiles = []
66 backupfiles = []
67 for l, f, b, c in backupentries:
67 for l, f, b, c in backupentries:
68 if l not in vfsmap and c:
68 if l not in vfsmap and c:
69 report("couldn't handle %s: unknown cache location %s\n"
69 report("couldn't handle %s: unknown cache location %s\n"
70 % (b, l))
70 % (b, l))
71 vfs = vfsmap[l]
71 vfs = vfsmap[l]
72 try:
72 try:
73 if f and b:
73 if f and b:
74 filepath = vfs.join(f)
74 filepath = vfs.join(f)
75 backuppath = vfs.join(b)
75 backuppath = vfs.join(b)
76 checkambig = checkambigfiles and (f, l) in checkambigfiles
76 checkambig = checkambigfiles and (f, l) in checkambigfiles
77 try:
77 try:
78 util.copyfile(backuppath, filepath, checkambig=checkambig)
78 util.copyfile(backuppath, filepath, checkambig=checkambig)
79 backupfiles.append(b)
79 backupfiles.append(b)
80 except IOError:
80 except IOError:
81 report(_("failed to recover %s\n") % f)
81 report(_("failed to recover %s\n") % f)
82 else:
82 else:
83 target = f or b
83 target = f or b
84 try:
84 try:
85 vfs.unlink(target)
85 vfs.unlink(target)
86 except (IOError, OSError) as inst:
86 except (IOError, OSError) as inst:
87 if inst.errno != errno.ENOENT:
87 if inst.errno != errno.ENOENT:
88 raise
88 raise
89 except (IOError, OSError, error.Abort) as inst:
89 except (IOError, OSError, error.Abort) as inst:
90 if not c:
90 if not c:
91 raise
91 raise
92
92
93 backuppath = "%s.backupfiles" % journal
93 backuppath = "%s.backupfiles" % journal
94 if opener.exists(backuppath):
94 if opener.exists(backuppath):
95 opener.unlink(backuppath)
95 opener.unlink(backuppath)
96 opener.unlink(journal)
96 opener.unlink(journal)
97 try:
97 try:
98 for f in backupfiles:
98 for f in backupfiles:
99 if opener.exists(f):
99 if opener.exists(f):
100 opener.unlink(f)
100 opener.unlink(f)
101 except (IOError, OSError, error.Abort) as inst:
101 except (IOError, OSError, error.Abort) as inst:
102 # only pure backup file remains, it is sage to ignore any error
102 # only pure backup file remains, it is sage to ignore any error
103 pass
103 pass
104
104
105 class transaction(util.transactional):
105 class transaction(util.transactional):
106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
106 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
107 after=None, createmode=None, validator=None, releasefn=None,
107 after=None, createmode=None, validator=None, releasefn=None,
108 checkambigfiles=None, name=r'<unnamed>'):
108 checkambigfiles=None, name=r'<unnamed>'):
109 """Begin a new transaction
109 """Begin a new transaction
110
110
111 Begins a new transaction that allows rolling back writes in the event of
111 Begins a new transaction that allows rolling back writes in the event of
112 an exception.
112 an exception.
113
113
114 * `after`: called after the transaction has been committed
114 * `after`: called after the transaction has been committed
115 * `createmode`: the mode of the journal file that will be created
115 * `createmode`: the mode of the journal file that will be created
116 * `releasefn`: called after releasing (with transaction and result)
116 * `releasefn`: called after releasing (with transaction and result)
117
117
118 `checkambigfiles` is a set of (path, vfs-location) tuples,
118 `checkambigfiles` is a set of (path, vfs-location) tuples,
119 which determine whether file stat ambiguity should be avoided
119 which determine whether file stat ambiguity should be avoided
120 for corresponded files.
120 for corresponded files.
121 """
121 """
122 self._count = 1
122 self._count = 1
123 self._usages = 1
123 self._usages = 1
124 self.report = report
124 self.report = report
125 # a vfs to the store content
125 # a vfs to the store content
126 self.opener = opener
126 self.opener = opener
127 # a map to access file in various {location -> vfs}
127 # a map to access file in various {location -> vfs}
128 vfsmap = vfsmap.copy()
128 vfsmap = vfsmap.copy()
129 vfsmap[''] = opener # set default value
129 vfsmap[''] = opener # set default value
130 self._vfsmap = vfsmap
130 self._vfsmap = vfsmap
131 self.after = after
131 self.after = after
132 self.entries = []
132 self.entries = []
133 self.map = {}
133 self.map = {}
134 self._journal = journalname
134 self._journal = journalname
135 self._undoname = undoname
135 self._undoname = undoname
136 self._queue = []
136 self._queue = []
137 # A callback to validate transaction content before closing it.
137 # A callback to validate transaction content before closing it.
138 # should raise exception is anything is wrong.
138 # should raise exception is anything is wrong.
139 # target user is repository hooks.
139 # target user is repository hooks.
140 if validator is None:
140 if validator is None:
141 validator = lambda tr: None
141 validator = lambda tr: None
142 self.validator = validator
142 self._validator = validator
143 # A callback to do something just after releasing transaction.
143 # A callback to do something just after releasing transaction.
144 if releasefn is None:
144 if releasefn is None:
145 releasefn = lambda tr, success: None
145 releasefn = lambda tr, success: None
146 self._releasefn = releasefn
146 self._releasefn = releasefn
147
147
148 self.checkambigfiles = set()
148 self.checkambigfiles = set()
149 if checkambigfiles:
149 if checkambigfiles:
150 self.checkambigfiles.update(checkambigfiles)
150 self.checkambigfiles.update(checkambigfiles)
151
151
152 self.names = [name]
152 self.names = [name]
153
153
154 # A dict dedicated to precisely tracking the changes introduced in the
154 # A dict dedicated to precisely tracking the changes introduced in the
155 # transaction.
155 # transaction.
156 self.changes = {}
156 self.changes = {}
157
157
158 # a dict of arguments to be passed to hooks
158 # a dict of arguments to be passed to hooks
159 self.hookargs = {}
159 self.hookargs = {}
160 self._file = opener.open(self._journal, "w")
160 self._file = opener.open(self._journal, "w")
161
161
162 # a list of ('location', 'path', 'backuppath', cache) entries.
162 # a list of ('location', 'path', 'backuppath', cache) entries.
163 # - if 'backuppath' is empty, no file existed at backup time
163 # - if 'backuppath' is empty, no file existed at backup time
164 # - if 'path' is empty, this is a temporary transaction file
164 # - if 'path' is empty, this is a temporary transaction file
165 # - if 'location' is not empty, the path is outside main opener reach.
165 # - if 'location' is not empty, the path is outside main opener reach.
166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
166 # use 'location' value as a key in a vfsmap to find the right 'vfs'
167 # (cache is currently unused)
167 # (cache is currently unused)
168 self._backupentries = []
168 self._backupentries = []
169 self._backupmap = {}
169 self._backupmap = {}
170 self._backupjournal = "%s.backupfiles" % self._journal
170 self._backupjournal = "%s.backupfiles" % self._journal
171 self._backupsfile = opener.open(self._backupjournal, 'w')
171 self._backupsfile = opener.open(self._backupjournal, 'w')
172 self._backupsfile.write('%d\n' % version)
172 self._backupsfile.write('%d\n' % version)
173
173
174 if createmode is not None:
174 if createmode is not None:
175 opener.chmod(self._journal, createmode & 0o666)
175 opener.chmod(self._journal, createmode & 0o666)
176 opener.chmod(self._backupjournal, createmode & 0o666)
176 opener.chmod(self._backupjournal, createmode & 0o666)
177
177
178 # hold file generations to be performed on commit
178 # hold file generations to be performed on commit
179 self._filegenerators = {}
179 self._filegenerators = {}
180 # hold callback to write pending data for hooks
180 # hold callback to write pending data for hooks
181 self._pendingcallback = {}
181 self._pendingcallback = {}
182 # True is any pending data have been written ever
182 # True is any pending data have been written ever
183 self._anypending = False
183 self._anypending = False
184 # holds callback to call when writing the transaction
184 # holds callback to call when writing the transaction
185 self._finalizecallback = {}
185 self._finalizecallback = {}
186 # hold callback for post transaction close
186 # hold callback for post transaction close
187 self._postclosecallback = {}
187 self._postclosecallback = {}
188 # holds callbacks to call during abort
188 # holds callbacks to call during abort
189 self._abortcallback = {}
189 self._abortcallback = {}
190
190
191 def __repr__(self):
191 def __repr__(self):
192 name = r'/'.join(self.names)
192 name = r'/'.join(self.names)
193 return (r'<transaction name=%s, count=%d, usages=%d>' %
193 return (r'<transaction name=%s, count=%d, usages=%d>' %
194 (name, self._count, self._usages))
194 (name, self._count, self._usages))
195
195
196 def __del__(self):
196 def __del__(self):
197 if self._journal:
197 if self._journal:
198 self._abort()
198 self._abort()
199
199
200 @active
200 @active
201 def startgroup(self):
201 def startgroup(self):
202 """delay registration of file entry
202 """delay registration of file entry
203
203
204 This is used by strip to delay vision of strip offset. The transaction
204 This is used by strip to delay vision of strip offset. The transaction
205 sees either none or all of the strip actions to be done."""
205 sees either none or all of the strip actions to be done."""
206 self._queue.append([])
206 self._queue.append([])
207
207
208 @active
208 @active
209 def endgroup(self):
209 def endgroup(self):
210 """apply delayed registration of file entry.
210 """apply delayed registration of file entry.
211
211
212 This is used by strip to delay vision of strip offset. The transaction
212 This is used by strip to delay vision of strip offset. The transaction
213 sees either none or all of the strip actions to be done."""
213 sees either none or all of the strip actions to be done."""
214 q = self._queue.pop()
214 q = self._queue.pop()
215 for f, o, data in q:
215 for f, o, data in q:
216 self._addentry(f, o, data)
216 self._addentry(f, o, data)
217
217
218 @active
218 @active
219 def add(self, file, offset, data=None):
219 def add(self, file, offset, data=None):
220 """record the state of an append-only file before update"""
220 """record the state of an append-only file before update"""
221 if file in self.map or file in self._backupmap:
221 if file in self.map or file in self._backupmap:
222 return
222 return
223 if self._queue:
223 if self._queue:
224 self._queue[-1].append((file, offset, data))
224 self._queue[-1].append((file, offset, data))
225 return
225 return
226
226
227 self._addentry(file, offset, data)
227 self._addentry(file, offset, data)
228
228
229 def _addentry(self, file, offset, data):
229 def _addentry(self, file, offset, data):
230 """add a append-only entry to memory and on-disk state"""
230 """add a append-only entry to memory and on-disk state"""
231 if file in self.map or file in self._backupmap:
231 if file in self.map or file in self._backupmap:
232 return
232 return
233 self.entries.append((file, offset, data))
233 self.entries.append((file, offset, data))
234 self.map[file] = len(self.entries) - 1
234 self.map[file] = len(self.entries) - 1
235 # add enough data to the journal to do the truncate
235 # add enough data to the journal to do the truncate
236 self._file.write("%s\0%d\n" % (file, offset))
236 self._file.write("%s\0%d\n" % (file, offset))
237 self._file.flush()
237 self._file.flush()
238
238
239 @active
239 @active
240 def addbackup(self, file, hardlink=True, location=''):
240 def addbackup(self, file, hardlink=True, location=''):
241 """Adds a backup of the file to the transaction
241 """Adds a backup of the file to the transaction
242
242
243 Calling addbackup() creates a hardlink backup of the specified file
243 Calling addbackup() creates a hardlink backup of the specified file
244 that is used to recover the file in the event of the transaction
244 that is used to recover the file in the event of the transaction
245 aborting.
245 aborting.
246
246
247 * `file`: the file path, relative to .hg/store
247 * `file`: the file path, relative to .hg/store
248 * `hardlink`: use a hardlink to quickly create the backup
248 * `hardlink`: use a hardlink to quickly create the backup
249 """
249 """
250 if self._queue:
250 if self._queue:
251 msg = 'cannot use transaction.addbackup inside "group"'
251 msg = 'cannot use transaction.addbackup inside "group"'
252 raise error.ProgrammingError(msg)
252 raise error.ProgrammingError(msg)
253
253
254 if file in self.map or file in self._backupmap:
254 if file in self.map or file in self._backupmap:
255 return
255 return
256 vfs = self._vfsmap[location]
256 vfs = self._vfsmap[location]
257 dirname, filename = vfs.split(file)
257 dirname, filename = vfs.split(file)
258 backupfilename = "%s.backup.%s" % (self._journal, filename)
258 backupfilename = "%s.backup.%s" % (self._journal, filename)
259 backupfile = vfs.reljoin(dirname, backupfilename)
259 backupfile = vfs.reljoin(dirname, backupfilename)
260 if vfs.exists(file):
260 if vfs.exists(file):
261 filepath = vfs.join(file)
261 filepath = vfs.join(file)
262 backuppath = vfs.join(backupfile)
262 backuppath = vfs.join(backupfile)
263 util.copyfile(filepath, backuppath, hardlink=hardlink)
263 util.copyfile(filepath, backuppath, hardlink=hardlink)
264 else:
264 else:
265 backupfile = ''
265 backupfile = ''
266
266
267 self._addbackupentry((location, file, backupfile, False))
267 self._addbackupentry((location, file, backupfile, False))
268
268
269 def _addbackupentry(self, entry):
269 def _addbackupentry(self, entry):
270 """register a new backup entry and write it to disk"""
270 """register a new backup entry and write it to disk"""
271 self._backupentries.append(entry)
271 self._backupentries.append(entry)
272 self._backupmap[entry[1]] = len(self._backupentries) - 1
272 self._backupmap[entry[1]] = len(self._backupentries) - 1
273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
273 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
274 self._backupsfile.flush()
274 self._backupsfile.flush()
275
275
276 @active
276 @active
277 def registertmp(self, tmpfile, location=''):
277 def registertmp(self, tmpfile, location=''):
278 """register a temporary transaction file
278 """register a temporary transaction file
279
279
280 Such files will be deleted when the transaction exits (on both
280 Such files will be deleted when the transaction exits (on both
281 failure and success).
281 failure and success).
282 """
282 """
283 self._addbackupentry((location, '', tmpfile, False))
283 self._addbackupentry((location, '', tmpfile, False))
284
284
285 @active
285 @active
286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
286 def addfilegenerator(self, genid, filenames, genfunc, order=0,
287 location=''):
287 location=''):
288 """add a function to generates some files at transaction commit
288 """add a function to generates some files at transaction commit
289
289
290 The `genfunc` argument is a function capable of generating proper
290 The `genfunc` argument is a function capable of generating proper
291 content of each entry in the `filename` tuple.
291 content of each entry in the `filename` tuple.
292
292
293 At transaction close time, `genfunc` will be called with one file
293 At transaction close time, `genfunc` will be called with one file
294 object argument per entries in `filenames`.
294 object argument per entries in `filenames`.
295
295
296 The transaction itself is responsible for the backup, creation and
296 The transaction itself is responsible for the backup, creation and
297 final write of such file.
297 final write of such file.
298
298
299 The `genid` argument is used to ensure the same set of file is only
299 The `genid` argument is used to ensure the same set of file is only
300 generated once. Call to `addfilegenerator` for a `genid` already
300 generated once. Call to `addfilegenerator` for a `genid` already
301 present will overwrite the old entry.
301 present will overwrite the old entry.
302
302
303 The `order` argument may be used to control the order in which multiple
303 The `order` argument may be used to control the order in which multiple
304 generator will be executed.
304 generator will be executed.
305
305
306 The `location` arguments may be used to indicate the files are located
306 The `location` arguments may be used to indicate the files are located
307 outside of the the standard directory for transaction. It should match
307 outside of the the standard directory for transaction. It should match
308 one of the key of the `transaction.vfsmap` dictionary.
308 one of the key of the `transaction.vfsmap` dictionary.
309 """
309 """
310 # For now, we are unable to do proper backup and restore of custom vfs
310 # For now, we are unable to do proper backup and restore of custom vfs
311 # but for bookmarks that are handled outside this mechanism.
311 # but for bookmarks that are handled outside this mechanism.
312 self._filegenerators[genid] = (order, filenames, genfunc, location)
312 self._filegenerators[genid] = (order, filenames, genfunc, location)
313
313
314 @active
314 @active
315 def removefilegenerator(self, genid):
315 def removefilegenerator(self, genid):
316 """reverse of addfilegenerator, remove a file generator function"""
316 """reverse of addfilegenerator, remove a file generator function"""
317 if genid in self._filegenerators:
317 if genid in self._filegenerators:
318 del self._filegenerators[genid]
318 del self._filegenerators[genid]
319
319
320 def _generatefiles(self, suffix='', group=gengroupall):
320 def _generatefiles(self, suffix='', group=gengroupall):
321 # write files registered for generation
321 # write files registered for generation
322 any = False
322 any = False
323 for id, entry in sorted(self._filegenerators.iteritems()):
323 for id, entry in sorted(self._filegenerators.iteritems()):
324 any = True
324 any = True
325 order, filenames, genfunc, location = entry
325 order, filenames, genfunc, location = entry
326
326
327 # for generation at closing, check if it's before or after finalize
327 # for generation at closing, check if it's before or after finalize
328 postfinalize = group == gengrouppostfinalize
328 postfinalize = group == gengrouppostfinalize
329 if (group != gengroupall and
329 if (group != gengroupall and
330 (id in postfinalizegenerators) != (postfinalize)):
330 (id in postfinalizegenerators) != (postfinalize)):
331 continue
331 continue
332
332
333 vfs = self._vfsmap[location]
333 vfs = self._vfsmap[location]
334 files = []
334 files = []
335 try:
335 try:
336 for name in filenames:
336 for name in filenames:
337 name += suffix
337 name += suffix
338 if suffix:
338 if suffix:
339 self.registertmp(name, location=location)
339 self.registertmp(name, location=location)
340 checkambig = False
340 checkambig = False
341 else:
341 else:
342 self.addbackup(name, location=location)
342 self.addbackup(name, location=location)
343 checkambig = (name, location) in self.checkambigfiles
343 checkambig = (name, location) in self.checkambigfiles
344 files.append(vfs(name, 'w', atomictemp=True,
344 files.append(vfs(name, 'w', atomictemp=True,
345 checkambig=checkambig))
345 checkambig=checkambig))
346 genfunc(*files)
346 genfunc(*files)
347 finally:
347 finally:
348 for f in files:
348 for f in files:
349 f.close()
349 f.close()
350 return any
350 return any
351
351
352 @active
352 @active
353 def find(self, file):
353 def find(self, file):
354 if file in self.map:
354 if file in self.map:
355 return self.entries[self.map[file]]
355 return self.entries[self.map[file]]
356 if file in self._backupmap:
356 if file in self._backupmap:
357 return self._backupentries[self._backupmap[file]]
357 return self._backupentries[self._backupmap[file]]
358 return None
358 return None
359
359
360 @active
360 @active
361 def replace(self, file, offset, data=None):
361 def replace(self, file, offset, data=None):
362 '''
362 '''
363 replace can only replace already committed entries
363 replace can only replace already committed entries
364 that are not pending in the queue
364 that are not pending in the queue
365 '''
365 '''
366
366
367 if file not in self.map:
367 if file not in self.map:
368 raise KeyError(file)
368 raise KeyError(file)
369 index = self.map[file]
369 index = self.map[file]
370 self.entries[index] = (file, offset, data)
370 self.entries[index] = (file, offset, data)
371 self._file.write("%s\0%d\n" % (file, offset))
371 self._file.write("%s\0%d\n" % (file, offset))
372 self._file.flush()
372 self._file.flush()
373
373
374 @active
374 @active
375 def nest(self, name=r'<unnamed>'):
375 def nest(self, name=r'<unnamed>'):
376 self._count += 1
376 self._count += 1
377 self._usages += 1
377 self._usages += 1
378 self.names.append(name)
378 self.names.append(name)
379 return self
379 return self
380
380
381 def release(self):
381 def release(self):
382 if self._count > 0:
382 if self._count > 0:
383 self._usages -= 1
383 self._usages -= 1
384 if self.names:
384 if self.names:
385 self.names.pop()
385 self.names.pop()
386 # if the transaction scopes are left without being closed, fail
386 # if the transaction scopes are left without being closed, fail
387 if self._count > 0 and self._usages == 0:
387 if self._count > 0 and self._usages == 0:
388 self._abort()
388 self._abort()
389
389
390 def running(self):
390 def running(self):
391 return self._count > 0
391 return self._count > 0
392
392
393 def addpending(self, category, callback):
393 def addpending(self, category, callback):
394 """add a callback to be called when the transaction is pending
394 """add a callback to be called when the transaction is pending
395
395
396 The transaction will be given as callback's first argument.
396 The transaction will be given as callback's first argument.
397
397
398 Category is a unique identifier to allow overwriting an old callback
398 Category is a unique identifier to allow overwriting an old callback
399 with a newer callback.
399 with a newer callback.
400 """
400 """
401 self._pendingcallback[category] = callback
401 self._pendingcallback[category] = callback
402
402
403 @active
403 @active
404 def writepending(self):
404 def writepending(self):
405 '''write pending file to temporary version
405 '''write pending file to temporary version
406
406
407 This is used to allow hooks to view a transaction before commit'''
407 This is used to allow hooks to view a transaction before commit'''
408 categories = sorted(self._pendingcallback)
408 categories = sorted(self._pendingcallback)
409 for cat in categories:
409 for cat in categories:
410 # remove callback since the data will have been flushed
410 # remove callback since the data will have been flushed
411 any = self._pendingcallback.pop(cat)(self)
411 any = self._pendingcallback.pop(cat)(self)
412 self._anypending = self._anypending or any
412 self._anypending = self._anypending or any
413 self._anypending |= self._generatefiles(suffix='.pending')
413 self._anypending |= self._generatefiles(suffix='.pending')
414 return self._anypending
414 return self._anypending
415
415
416 @active
416 @active
417 def addfinalize(self, category, callback):
417 def addfinalize(self, category, callback):
418 """add a callback to be called when the transaction is closed
418 """add a callback to be called when the transaction is closed
419
419
420 The transaction will be given as callback's first argument.
420 The transaction will be given as callback's first argument.
421
421
422 Category is a unique identifier to allow overwriting old callbacks with
422 Category is a unique identifier to allow overwriting old callbacks with
423 newer callbacks.
423 newer callbacks.
424 """
424 """
425 self._finalizecallback[category] = callback
425 self._finalizecallback[category] = callback
426
426
427 @active
427 @active
428 def addpostclose(self, category, callback):
428 def addpostclose(self, category, callback):
429 """add or replace a callback to be called after the transaction closed
429 """add or replace a callback to be called after the transaction closed
430
430
431 The transaction will be given as callback's first argument.
431 The transaction will be given as callback's first argument.
432
432
433 Category is a unique identifier to allow overwriting an old callback
433 Category is a unique identifier to allow overwriting an old callback
434 with a newer callback.
434 with a newer callback.
435 """
435 """
436 self._postclosecallback[category] = callback
436 self._postclosecallback[category] = callback
437
437
438 @active
438 @active
439 def getpostclose(self, category):
439 def getpostclose(self, category):
440 """return a postclose callback added before, or None"""
440 """return a postclose callback added before, or None"""
441 return self._postclosecallback.get(category, None)
441 return self._postclosecallback.get(category, None)
442
442
443 @active
443 @active
444 def addabort(self, category, callback):
444 def addabort(self, category, callback):
445 """add a callback to be called when the transaction is aborted.
445 """add a callback to be called when the transaction is aborted.
446
446
447 The transaction will be given as the first argument to the callback.
447 The transaction will be given as the first argument to the callback.
448
448
449 Category is a unique identifier to allow overwriting an old callback
449 Category is a unique identifier to allow overwriting an old callback
450 with a newer callback.
450 with a newer callback.
451 """
451 """
452 self._abortcallback[category] = callback
452 self._abortcallback[category] = callback
453
453
454 @active
454 @active
455 def close(self):
455 def close(self):
456 '''commit the transaction'''
456 '''commit the transaction'''
457 if self._count == 1:
457 if self._count == 1:
458 self.validator(self) # will raise exception if needed
458 self._validator(self) # will raise exception if needed
459 self.validator = None # Help prevent cycles.
459 self._validator = None # Help prevent cycles.
460 self._generatefiles(group=gengroupprefinalize)
460 self._generatefiles(group=gengroupprefinalize)
461 categories = sorted(self._finalizecallback)
461 categories = sorted(self._finalizecallback)
462 for cat in categories:
462 for cat in categories:
463 self._finalizecallback[cat](self)
463 self._finalizecallback[cat](self)
464 # Prevent double usage and help clear cycles.
464 # Prevent double usage and help clear cycles.
465 self._finalizecallback = None
465 self._finalizecallback = None
466 self._generatefiles(group=gengrouppostfinalize)
466 self._generatefiles(group=gengrouppostfinalize)
467
467
468 self._count -= 1
468 self._count -= 1
469 if self._count != 0:
469 if self._count != 0:
470 return
470 return
471 self._file.close()
471 self._file.close()
472 self._backupsfile.close()
472 self._backupsfile.close()
473 # cleanup temporary files
473 # cleanup temporary files
474 for l, f, b, c in self._backupentries:
474 for l, f, b, c in self._backupentries:
475 if l not in self._vfsmap and c:
475 if l not in self._vfsmap and c:
476 self.report("couldn't remove %s: unknown cache location %s\n"
476 self.report("couldn't remove %s: unknown cache location %s\n"
477 % (b, l))
477 % (b, l))
478 continue
478 continue
479 vfs = self._vfsmap[l]
479 vfs = self._vfsmap[l]
480 if not f and b and vfs.exists(b):
480 if not f and b and vfs.exists(b):
481 try:
481 try:
482 vfs.unlink(b)
482 vfs.unlink(b)
483 except (IOError, OSError, error.Abort) as inst:
483 except (IOError, OSError, error.Abort) as inst:
484 if not c:
484 if not c:
485 raise
485 raise
486 # Abort may be raise by read only opener
486 # Abort may be raise by read only opener
487 self.report("couldn't remove %s: %s\n"
487 self.report("couldn't remove %s: %s\n"
488 % (vfs.join(b), inst))
488 % (vfs.join(b), inst))
489 self.entries = []
489 self.entries = []
490 self._writeundo()
490 self._writeundo()
491 if self.after:
491 if self.after:
492 self.after()
492 self.after()
493 self.after = None # Help prevent cycles.
493 self.after = None # Help prevent cycles.
494 if self.opener.isfile(self._backupjournal):
494 if self.opener.isfile(self._backupjournal):
495 self.opener.unlink(self._backupjournal)
495 self.opener.unlink(self._backupjournal)
496 if self.opener.isfile(self._journal):
496 if self.opener.isfile(self._journal):
497 self.opener.unlink(self._journal)
497 self.opener.unlink(self._journal)
498 for l, _f, b, c in self._backupentries:
498 for l, _f, b, c in self._backupentries:
499 if l not in self._vfsmap and c:
499 if l not in self._vfsmap and c:
500 self.report("couldn't remove %s: unknown cache location"
500 self.report("couldn't remove %s: unknown cache location"
501 "%s\n" % (b, l))
501 "%s\n" % (b, l))
502 continue
502 continue
503 vfs = self._vfsmap[l]
503 vfs = self._vfsmap[l]
504 if b and vfs.exists(b):
504 if b and vfs.exists(b):
505 try:
505 try:
506 vfs.unlink(b)
506 vfs.unlink(b)
507 except (IOError, OSError, error.Abort) as inst:
507 except (IOError, OSError, error.Abort) as inst:
508 if not c:
508 if not c:
509 raise
509 raise
510 # Abort may be raise by read only opener
510 # Abort may be raise by read only opener
511 self.report("couldn't remove %s: %s\n"
511 self.report("couldn't remove %s: %s\n"
512 % (vfs.join(b), inst))
512 % (vfs.join(b), inst))
513 self._backupentries = []
513 self._backupentries = []
514 self._journal = None
514 self._journal = None
515
515
516 self._releasefn(self, True) # notify success of closing transaction
516 self._releasefn(self, True) # notify success of closing transaction
517 self._releasefn = None # Help prevent cycles.
517 self._releasefn = None # Help prevent cycles.
518
518
519 # run post close action
519 # run post close action
520 categories = sorted(self._postclosecallback)
520 categories = sorted(self._postclosecallback)
521 for cat in categories:
521 for cat in categories:
522 self._postclosecallback[cat](self)
522 self._postclosecallback[cat](self)
523 # Prevent double usage and help clear cycles.
523 # Prevent double usage and help clear cycles.
524 self._postclosecallback = None
524 self._postclosecallback = None
525
525
526 @active
526 @active
527 def abort(self):
527 def abort(self):
528 '''abort the transaction (generally called on error, or when the
528 '''abort the transaction (generally called on error, or when the
529 transaction is not explicitly committed before going out of
529 transaction is not explicitly committed before going out of
530 scope)'''
530 scope)'''
531 self._abort()
531 self._abort()
532
532
533 def _writeundo(self):
533 def _writeundo(self):
534 """write transaction data for possible future undo call"""
534 """write transaction data for possible future undo call"""
535 if self._undoname is None:
535 if self._undoname is None:
536 return
536 return
537 undobackupfile = self.opener.open("%s.backupfiles" % self._undoname,
537 undobackupfile = self.opener.open("%s.backupfiles" % self._undoname,
538 'w')
538 'w')
539 undobackupfile.write('%d\n' % version)
539 undobackupfile.write('%d\n' % version)
540 for l, f, b, c in self._backupentries:
540 for l, f, b, c in self._backupentries:
541 if not f: # temporary file
541 if not f: # temporary file
542 continue
542 continue
543 if not b:
543 if not b:
544 u = ''
544 u = ''
545 else:
545 else:
546 if l not in self._vfsmap and c:
546 if l not in self._vfsmap and c:
547 self.report("couldn't remove %s: unknown cache location"
547 self.report("couldn't remove %s: unknown cache location"
548 "%s\n" % (b, l))
548 "%s\n" % (b, l))
549 continue
549 continue
550 vfs = self._vfsmap[l]
550 vfs = self._vfsmap[l]
551 base, name = vfs.split(b)
551 base, name = vfs.split(b)
552 assert name.startswith(self._journal), name
552 assert name.startswith(self._journal), name
553 uname = name.replace(self._journal, self._undoname, 1)
553 uname = name.replace(self._journal, self._undoname, 1)
554 u = vfs.reljoin(base, uname)
554 u = vfs.reljoin(base, uname)
555 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
555 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
556 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
556 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
557 undobackupfile.close()
557 undobackupfile.close()
558
558
559
559
560 def _abort(self):
560 def _abort(self):
561 self._count = 0
561 self._count = 0
562 self._usages = 0
562 self._usages = 0
563 self._file.close()
563 self._file.close()
564 self._backupsfile.close()
564 self._backupsfile.close()
565
565
566 try:
566 try:
567 if not self.entries and not self._backupentries:
567 if not self.entries and not self._backupentries:
568 if self._backupjournal:
568 if self._backupjournal:
569 self.opener.unlink(self._backupjournal)
569 self.opener.unlink(self._backupjournal)
570 if self._journal:
570 if self._journal:
571 self.opener.unlink(self._journal)
571 self.opener.unlink(self._journal)
572 return
572 return
573
573
574 self.report(_("transaction abort!\n"))
574 self.report(_("transaction abort!\n"))
575
575
576 try:
576 try:
577 for cat in sorted(self._abortcallback):
577 for cat in sorted(self._abortcallback):
578 self._abortcallback[cat](self)
578 self._abortcallback[cat](self)
579 # Prevent double usage and help clear cycles.
579 # Prevent double usage and help clear cycles.
580 self._abortcallback = None
580 self._abortcallback = None
581 _playback(self._journal, self.report, self.opener, self._vfsmap,
581 _playback(self._journal, self.report, self.opener, self._vfsmap,
582 self.entries, self._backupentries, False,
582 self.entries, self._backupentries, False,
583 checkambigfiles=self.checkambigfiles)
583 checkambigfiles=self.checkambigfiles)
584 self.report(_("rollback completed\n"))
584 self.report(_("rollback completed\n"))
585 except BaseException:
585 except BaseException:
586 self.report(_("rollback failed - please run hg recover\n"))
586 self.report(_("rollback failed - please run hg recover\n"))
587 finally:
587 finally:
588 self._journal = None
588 self._journal = None
589 self._releasefn(self, False) # notify failure of transaction
589 self._releasefn(self, False) # notify failure of transaction
590 self._releasefn = None # Help prevent cycles.
590 self._releasefn = None # Help prevent cycles.
591
591
592 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
592 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
593 """Rolls back the transaction contained in the given file
593 """Rolls back the transaction contained in the given file
594
594
595 Reads the entries in the specified file, and the corresponding
595 Reads the entries in the specified file, and the corresponding
596 '*.backupfiles' file, to recover from an incomplete transaction.
596 '*.backupfiles' file, to recover from an incomplete transaction.
597
597
598 * `file`: a file containing a list of entries, specifying where
598 * `file`: a file containing a list of entries, specifying where
599 to truncate each file. The file should contain a list of
599 to truncate each file. The file should contain a list of
600 file\0offset pairs, delimited by newlines. The corresponding
600 file\0offset pairs, delimited by newlines. The corresponding
601 '*.backupfiles' file should contain a list of file\0backupfile
601 '*.backupfiles' file should contain a list of file\0backupfile
602 pairs, delimited by \0.
602 pairs, delimited by \0.
603
603
604 `checkambigfiles` is a set of (path, vfs-location) tuples,
604 `checkambigfiles` is a set of (path, vfs-location) tuples,
605 which determine whether file stat ambiguity should be avoided at
605 which determine whether file stat ambiguity should be avoided at
606 restoring corresponded files.
606 restoring corresponded files.
607 """
607 """
608 entries = []
608 entries = []
609 backupentries = []
609 backupentries = []
610
610
611 fp = opener.open(file)
611 fp = opener.open(file)
612 lines = fp.readlines()
612 lines = fp.readlines()
613 fp.close()
613 fp.close()
614 for l in lines:
614 for l in lines:
615 try:
615 try:
616 f, o = l.split('\0')
616 f, o = l.split('\0')
617 entries.append((f, int(o), None))
617 entries.append((f, int(o), None))
618 except ValueError:
618 except ValueError:
619 report(
619 report(
620 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
620 _("couldn't read journal entry %r!\n") % pycompat.bytestr(l))
621
621
622 backupjournal = "%s.backupfiles" % file
622 backupjournal = "%s.backupfiles" % file
623 if opener.exists(backupjournal):
623 if opener.exists(backupjournal):
624 fp = opener.open(backupjournal)
624 fp = opener.open(backupjournal)
625 lines = fp.readlines()
625 lines = fp.readlines()
626 if lines:
626 if lines:
627 ver = lines[0][:-1]
627 ver = lines[0][:-1]
628 if ver == (b'%d' % version):
628 if ver == (b'%d' % version):
629 for line in lines[1:]:
629 for line in lines[1:]:
630 if line:
630 if line:
631 # Shave off the trailing newline
631 # Shave off the trailing newline
632 line = line[:-1]
632 line = line[:-1]
633 l, f, b, c = line.split('\0')
633 l, f, b, c = line.split('\0')
634 backupentries.append((l, f, b, bool(c)))
634 backupentries.append((l, f, b, bool(c)))
635 else:
635 else:
636 report(_("journal was created by a different version of "
636 report(_("journal was created by a different version of "
637 "Mercurial\n"))
637 "Mercurial\n"))
638
638
639 _playback(file, report, opener, vfsmap, entries, backupentries,
639 _playback(file, report, opener, vfsmap, entries, backupentries,
640 checkambigfiles=checkambigfiles)
640 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now