##// END OF EJS Templates
transaction: fix hg version check when loading journal...
Augie Fackler -
r35850:aad39713 default
parent child Browse files
Show More
@@ -1,627 +1,627
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 # These are the file generators that should only be executed after the
26 # These are the file generators that should only be executed after the
27 # finalizers are done, since they rely on the output of the finalizers (like
27 # finalizers are done, since they rely on the output of the finalizers (like
28 # the changelog having been written).
28 # the changelog having been written).
29 postfinalizegenerators = {
29 postfinalizegenerators = {
30 'bookmarks',
30 'bookmarks',
31 'dirstate'
31 'dirstate'
32 }
32 }
33
33
34 gengroupall='all'
34 gengroupall='all'
35 gengroupprefinalize='prefinalize'
35 gengroupprefinalize='prefinalize'
36 gengrouppostfinalize='postfinalize'
36 gengrouppostfinalize='postfinalize'
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self.count == 0:
40 if self.count == 0:
41 raise error.Abort(_(
41 raise error.Abort(_(
42 'cannot use transaction when it is already committed/aborted'))
42 'cannot use transaction when it is already committed/aborted'))
43 return func(self, *args, **kwds)
43 return func(self, *args, **kwds)
44 return _active
44 return _active
45
45
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
46 def _playback(journal, report, opener, vfsmap, entries, backupentries,
47 unlink=True, checkambigfiles=None):
47 unlink=True, checkambigfiles=None):
48 for f, o, _ignore in entries:
48 for f, o, _ignore in entries:
49 if o or not unlink:
49 if o or not unlink:
50 checkambig = checkambigfiles and (f, '') in checkambigfiles
50 checkambig = checkambigfiles and (f, '') in checkambigfiles
51 try:
51 try:
52 fp = opener(f, 'a', checkambig=checkambig)
52 fp = opener(f, 'a', checkambig=checkambig)
53 fp.truncate(o)
53 fp.truncate(o)
54 fp.close()
54 fp.close()
55 except IOError:
55 except IOError:
56 report(_("failed to truncate %s\n") % f)
56 report(_("failed to truncate %s\n") % f)
57 raise
57 raise
58 else:
58 else:
59 try:
59 try:
60 opener.unlink(f)
60 opener.unlink(f)
61 except (IOError, OSError) as inst:
61 except (IOError, OSError) as inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 backupfiles = []
65 backupfiles = []
66 for l, f, b, c in backupentries:
66 for l, f, b, c in backupentries:
67 if l not in vfsmap and c:
67 if l not in vfsmap and c:
68 report("couldn't handle %s: unknown cache location %s\n"
68 report("couldn't handle %s: unknown cache location %s\n"
69 % (b, l))
69 % (b, l))
70 vfs = vfsmap[l]
70 vfs = vfsmap[l]
71 try:
71 try:
72 if f and b:
72 if f and b:
73 filepath = vfs.join(f)
73 filepath = vfs.join(f)
74 backuppath = vfs.join(b)
74 backuppath = vfs.join(b)
75 checkambig = checkambigfiles and (f, l) in checkambigfiles
75 checkambig = checkambigfiles and (f, l) in checkambigfiles
76 try:
76 try:
77 util.copyfile(backuppath, filepath, checkambig=checkambig)
77 util.copyfile(backuppath, filepath, checkambig=checkambig)
78 backupfiles.append(b)
78 backupfiles.append(b)
79 except IOError:
79 except IOError:
80 report(_("failed to recover %s\n") % f)
80 report(_("failed to recover %s\n") % f)
81 else:
81 else:
82 target = f or b
82 target = f or b
83 try:
83 try:
84 vfs.unlink(target)
84 vfs.unlink(target)
85 except (IOError, OSError) as inst:
85 except (IOError, OSError) as inst:
86 if inst.errno != errno.ENOENT:
86 if inst.errno != errno.ENOENT:
87 raise
87 raise
88 except (IOError, OSError, error.Abort) as inst:
88 except (IOError, OSError, error.Abort) as inst:
89 if not c:
89 if not c:
90 raise
90 raise
91
91
92 backuppath = "%s.backupfiles" % journal
92 backuppath = "%s.backupfiles" % journal
93 if opener.exists(backuppath):
93 if opener.exists(backuppath):
94 opener.unlink(backuppath)
94 opener.unlink(backuppath)
95 opener.unlink(journal)
95 opener.unlink(journal)
96 try:
96 try:
97 for f in backupfiles:
97 for f in backupfiles:
98 if opener.exists(f):
98 if opener.exists(f):
99 opener.unlink(f)
99 opener.unlink(f)
100 except (IOError, OSError, error.Abort) as inst:
100 except (IOError, OSError, error.Abort) as inst:
101 # only pure backup file remains, it is sage to ignore any error
101 # only pure backup file remains, it is sage to ignore any error
102 pass
102 pass
103
103
104 class transaction(util.transactional):
104 class transaction(util.transactional):
105 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
105 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
106 after=None, createmode=None, validator=None, releasefn=None,
106 after=None, createmode=None, validator=None, releasefn=None,
107 checkambigfiles=None):
107 checkambigfiles=None):
108 """Begin a new transaction
108 """Begin a new transaction
109
109
110 Begins a new transaction that allows rolling back writes in the event of
110 Begins a new transaction that allows rolling back writes in the event of
111 an exception.
111 an exception.
112
112
113 * `after`: called after the transaction has been committed
113 * `after`: called after the transaction has been committed
114 * `createmode`: the mode of the journal file that will be created
114 * `createmode`: the mode of the journal file that will be created
115 * `releasefn`: called after releasing (with transaction and result)
115 * `releasefn`: called after releasing (with transaction and result)
116
116
117 `checkambigfiles` is a set of (path, vfs-location) tuples,
117 `checkambigfiles` is a set of (path, vfs-location) tuples,
118 which determine whether file stat ambiguity should be avoided
118 which determine whether file stat ambiguity should be avoided
119 for corresponded files.
119 for corresponded files.
120 """
120 """
121 self.count = 1
121 self.count = 1
122 self.usages = 1
122 self.usages = 1
123 self.report = report
123 self.report = report
124 # a vfs to the store content
124 # a vfs to the store content
125 self.opener = opener
125 self.opener = opener
126 # a map to access file in various {location -> vfs}
126 # a map to access file in various {location -> vfs}
127 vfsmap = vfsmap.copy()
127 vfsmap = vfsmap.copy()
128 vfsmap[''] = opener # set default value
128 vfsmap[''] = opener # set default value
129 self._vfsmap = vfsmap
129 self._vfsmap = vfsmap
130 self.after = after
130 self.after = after
131 self.entries = []
131 self.entries = []
132 self.map = {}
132 self.map = {}
133 self.journal = journalname
133 self.journal = journalname
134 self.undoname = undoname
134 self.undoname = undoname
135 self._queue = []
135 self._queue = []
136 # A callback to validate transaction content before closing it.
136 # A callback to validate transaction content before closing it.
137 # should raise exception is anything is wrong.
137 # should raise exception is anything is wrong.
138 # target user is repository hooks.
138 # target user is repository hooks.
139 if validator is None:
139 if validator is None:
140 validator = lambda tr: None
140 validator = lambda tr: None
141 self.validator = validator
141 self.validator = validator
142 # A callback to do something just after releasing transaction.
142 # A callback to do something just after releasing transaction.
143 if releasefn is None:
143 if releasefn is None:
144 releasefn = lambda tr, success: None
144 releasefn = lambda tr, success: None
145 self.releasefn = releasefn
145 self.releasefn = releasefn
146
146
147 self.checkambigfiles = set()
147 self.checkambigfiles = set()
148 if checkambigfiles:
148 if checkambigfiles:
149 self.checkambigfiles.update(checkambigfiles)
149 self.checkambigfiles.update(checkambigfiles)
150
150
151 # A dict dedicated to precisely tracking the changes introduced in the
151 # A dict dedicated to precisely tracking the changes introduced in the
152 # transaction.
152 # transaction.
153 self.changes = {}
153 self.changes = {}
154
154
155 # a dict of arguments to be passed to hooks
155 # a dict of arguments to be passed to hooks
156 self.hookargs = {}
156 self.hookargs = {}
157 self.file = opener.open(self.journal, "w")
157 self.file = opener.open(self.journal, "w")
158
158
159 # a list of ('location', 'path', 'backuppath', cache) entries.
159 # a list of ('location', 'path', 'backuppath', cache) entries.
160 # - if 'backuppath' is empty, no file existed at backup time
160 # - if 'backuppath' is empty, no file existed at backup time
161 # - if 'path' is empty, this is a temporary transaction file
161 # - if 'path' is empty, this is a temporary transaction file
162 # - if 'location' is not empty, the path is outside main opener reach.
162 # - if 'location' is not empty, the path is outside main opener reach.
163 # use 'location' value as a key in a vfsmap to find the right 'vfs'
163 # use 'location' value as a key in a vfsmap to find the right 'vfs'
164 # (cache is currently unused)
164 # (cache is currently unused)
165 self._backupentries = []
165 self._backupentries = []
166 self._backupmap = {}
166 self._backupmap = {}
167 self._backupjournal = "%s.backupfiles" % self.journal
167 self._backupjournal = "%s.backupfiles" % self.journal
168 self._backupsfile = opener.open(self._backupjournal, 'w')
168 self._backupsfile = opener.open(self._backupjournal, 'w')
169 self._backupsfile.write('%d\n' % version)
169 self._backupsfile.write('%d\n' % version)
170
170
171 if createmode is not None:
171 if createmode is not None:
172 opener.chmod(self.journal, createmode & 0o666)
172 opener.chmod(self.journal, createmode & 0o666)
173 opener.chmod(self._backupjournal, createmode & 0o666)
173 opener.chmod(self._backupjournal, createmode & 0o666)
174
174
175 # hold file generations to be performed on commit
175 # hold file generations to be performed on commit
176 self._filegenerators = {}
176 self._filegenerators = {}
177 # hold callback to write pending data for hooks
177 # hold callback to write pending data for hooks
178 self._pendingcallback = {}
178 self._pendingcallback = {}
179 # True is any pending data have been written ever
179 # True is any pending data have been written ever
180 self._anypending = False
180 self._anypending = False
181 # holds callback to call when writing the transaction
181 # holds callback to call when writing the transaction
182 self._finalizecallback = {}
182 self._finalizecallback = {}
183 # hold callback for post transaction close
183 # hold callback for post transaction close
184 self._postclosecallback = {}
184 self._postclosecallback = {}
185 # holds callbacks to call during abort
185 # holds callbacks to call during abort
186 self._abortcallback = {}
186 self._abortcallback = {}
187
187
188 def __del__(self):
188 def __del__(self):
189 if self.journal:
189 if self.journal:
190 self._abort()
190 self._abort()
191
191
192 @active
192 @active
193 def startgroup(self):
193 def startgroup(self):
194 """delay registration of file entry
194 """delay registration of file entry
195
195
196 This is used by strip to delay vision of strip offset. The transaction
196 This is used by strip to delay vision of strip offset. The transaction
197 sees either none or all of the strip actions to be done."""
197 sees either none or all of the strip actions to be done."""
198 self._queue.append([])
198 self._queue.append([])
199
199
200 @active
200 @active
201 def endgroup(self):
201 def endgroup(self):
202 """apply delayed registration of file entry.
202 """apply delayed registration of file entry.
203
203
204 This is used by strip to delay vision of strip offset. The transaction
204 This is used by strip to delay vision of strip offset. The transaction
205 sees either none or all of the strip actions to be done."""
205 sees either none or all of the strip actions to be done."""
206 q = self._queue.pop()
206 q = self._queue.pop()
207 for f, o, data in q:
207 for f, o, data in q:
208 self._addentry(f, o, data)
208 self._addentry(f, o, data)
209
209
210 @active
210 @active
211 def add(self, file, offset, data=None):
211 def add(self, file, offset, data=None):
212 """record the state of an append-only file before update"""
212 """record the state of an append-only file before update"""
213 if file in self.map or file in self._backupmap:
213 if file in self.map or file in self._backupmap:
214 return
214 return
215 if self._queue:
215 if self._queue:
216 self._queue[-1].append((file, offset, data))
216 self._queue[-1].append((file, offset, data))
217 return
217 return
218
218
219 self._addentry(file, offset, data)
219 self._addentry(file, offset, data)
220
220
221 def _addentry(self, file, offset, data):
221 def _addentry(self, file, offset, data):
222 """add a append-only entry to memory and on-disk state"""
222 """add a append-only entry to memory and on-disk state"""
223 if file in self.map or file in self._backupmap:
223 if file in self.map or file in self._backupmap:
224 return
224 return
225 self.entries.append((file, offset, data))
225 self.entries.append((file, offset, data))
226 self.map[file] = len(self.entries) - 1
226 self.map[file] = len(self.entries) - 1
227 # add enough data to the journal to do the truncate
227 # add enough data to the journal to do the truncate
228 self.file.write("%s\0%d\n" % (file, offset))
228 self.file.write("%s\0%d\n" % (file, offset))
229 self.file.flush()
229 self.file.flush()
230
230
231 @active
231 @active
232 def addbackup(self, file, hardlink=True, location=''):
232 def addbackup(self, file, hardlink=True, location=''):
233 """Adds a backup of the file to the transaction
233 """Adds a backup of the file to the transaction
234
234
235 Calling addbackup() creates a hardlink backup of the specified file
235 Calling addbackup() creates a hardlink backup of the specified file
236 that is used to recover the file in the event of the transaction
236 that is used to recover the file in the event of the transaction
237 aborting.
237 aborting.
238
238
239 * `file`: the file path, relative to .hg/store
239 * `file`: the file path, relative to .hg/store
240 * `hardlink`: use a hardlink to quickly create the backup
240 * `hardlink`: use a hardlink to quickly create the backup
241 """
241 """
242 if self._queue:
242 if self._queue:
243 msg = 'cannot use transaction.addbackup inside "group"'
243 msg = 'cannot use transaction.addbackup inside "group"'
244 raise error.ProgrammingError(msg)
244 raise error.ProgrammingError(msg)
245
245
246 if file in self.map or file in self._backupmap:
246 if file in self.map or file in self._backupmap:
247 return
247 return
248 vfs = self._vfsmap[location]
248 vfs = self._vfsmap[location]
249 dirname, filename = vfs.split(file)
249 dirname, filename = vfs.split(file)
250 backupfilename = "%s.backup.%s" % (self.journal, filename)
250 backupfilename = "%s.backup.%s" % (self.journal, filename)
251 backupfile = vfs.reljoin(dirname, backupfilename)
251 backupfile = vfs.reljoin(dirname, backupfilename)
252 if vfs.exists(file):
252 if vfs.exists(file):
253 filepath = vfs.join(file)
253 filepath = vfs.join(file)
254 backuppath = vfs.join(backupfile)
254 backuppath = vfs.join(backupfile)
255 util.copyfile(filepath, backuppath, hardlink=hardlink)
255 util.copyfile(filepath, backuppath, hardlink=hardlink)
256 else:
256 else:
257 backupfile = ''
257 backupfile = ''
258
258
259 self._addbackupentry((location, file, backupfile, False))
259 self._addbackupentry((location, file, backupfile, False))
260
260
261 def _addbackupentry(self, entry):
261 def _addbackupentry(self, entry):
262 """register a new backup entry and write it to disk"""
262 """register a new backup entry and write it to disk"""
263 self._backupentries.append(entry)
263 self._backupentries.append(entry)
264 self._backupmap[entry[1]] = len(self._backupentries) - 1
264 self._backupmap[entry[1]] = len(self._backupentries) - 1
265 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
265 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
266 self._backupsfile.flush()
266 self._backupsfile.flush()
267
267
268 @active
268 @active
269 def registertmp(self, tmpfile, location=''):
269 def registertmp(self, tmpfile, location=''):
270 """register a temporary transaction file
270 """register a temporary transaction file
271
271
272 Such files will be deleted when the transaction exits (on both
272 Such files will be deleted when the transaction exits (on both
273 failure and success).
273 failure and success).
274 """
274 """
275 self._addbackupentry((location, '', tmpfile, False))
275 self._addbackupentry((location, '', tmpfile, False))
276
276
277 @active
277 @active
278 def addfilegenerator(self, genid, filenames, genfunc, order=0,
278 def addfilegenerator(self, genid, filenames, genfunc, order=0,
279 location=''):
279 location=''):
280 """add a function to generates some files at transaction commit
280 """add a function to generates some files at transaction commit
281
281
282 The `genfunc` argument is a function capable of generating proper
282 The `genfunc` argument is a function capable of generating proper
283 content of each entry in the `filename` tuple.
283 content of each entry in the `filename` tuple.
284
284
285 At transaction close time, `genfunc` will be called with one file
285 At transaction close time, `genfunc` will be called with one file
286 object argument per entries in `filenames`.
286 object argument per entries in `filenames`.
287
287
288 The transaction itself is responsible for the backup, creation and
288 The transaction itself is responsible for the backup, creation and
289 final write of such file.
289 final write of such file.
290
290
291 The `genid` argument is used to ensure the same set of file is only
291 The `genid` argument is used to ensure the same set of file is only
292 generated once. Call to `addfilegenerator` for a `genid` already
292 generated once. Call to `addfilegenerator` for a `genid` already
293 present will overwrite the old entry.
293 present will overwrite the old entry.
294
294
295 The `order` argument may be used to control the order in which multiple
295 The `order` argument may be used to control the order in which multiple
296 generator will be executed.
296 generator will be executed.
297
297
298 The `location` arguments may be used to indicate the files are located
298 The `location` arguments may be used to indicate the files are located
299 outside of the the standard directory for transaction. It should match
299 outside of the the standard directory for transaction. It should match
300 one of the key of the `transaction.vfsmap` dictionary.
300 one of the key of the `transaction.vfsmap` dictionary.
301 """
301 """
302 # For now, we are unable to do proper backup and restore of custom vfs
302 # For now, we are unable to do proper backup and restore of custom vfs
303 # but for bookmarks that are handled outside this mechanism.
303 # but for bookmarks that are handled outside this mechanism.
304 self._filegenerators[genid] = (order, filenames, genfunc, location)
304 self._filegenerators[genid] = (order, filenames, genfunc, location)
305
305
306 @active
306 @active
307 def removefilegenerator(self, genid):
307 def removefilegenerator(self, genid):
308 """reverse of addfilegenerator, remove a file generator function"""
308 """reverse of addfilegenerator, remove a file generator function"""
309 if genid in self._filegenerators:
309 if genid in self._filegenerators:
310 del self._filegenerators[genid]
310 del self._filegenerators[genid]
311
311
312 def _generatefiles(self, suffix='', group=gengroupall):
312 def _generatefiles(self, suffix='', group=gengroupall):
313 # write files registered for generation
313 # write files registered for generation
314 any = False
314 any = False
315 for id, entry in sorted(self._filegenerators.iteritems()):
315 for id, entry in sorted(self._filegenerators.iteritems()):
316 any = True
316 any = True
317 order, filenames, genfunc, location = entry
317 order, filenames, genfunc, location = entry
318
318
319 # for generation at closing, check if it's before or after finalize
319 # for generation at closing, check if it's before or after finalize
320 postfinalize = group == gengrouppostfinalize
320 postfinalize = group == gengrouppostfinalize
321 if (group != gengroupall and
321 if (group != gengroupall and
322 (id in postfinalizegenerators) != (postfinalize)):
322 (id in postfinalizegenerators) != (postfinalize)):
323 continue
323 continue
324
324
325 vfs = self._vfsmap[location]
325 vfs = self._vfsmap[location]
326 files = []
326 files = []
327 try:
327 try:
328 for name in filenames:
328 for name in filenames:
329 name += suffix
329 name += suffix
330 if suffix:
330 if suffix:
331 self.registertmp(name, location=location)
331 self.registertmp(name, location=location)
332 checkambig = False
332 checkambig = False
333 else:
333 else:
334 self.addbackup(name, location=location)
334 self.addbackup(name, location=location)
335 checkambig = (name, location) in self.checkambigfiles
335 checkambig = (name, location) in self.checkambigfiles
336 files.append(vfs(name, 'w', atomictemp=True,
336 files.append(vfs(name, 'w', atomictemp=True,
337 checkambig=checkambig))
337 checkambig=checkambig))
338 genfunc(*files)
338 genfunc(*files)
339 finally:
339 finally:
340 for f in files:
340 for f in files:
341 f.close()
341 f.close()
342 return any
342 return any
343
343
344 @active
344 @active
345 def find(self, file):
345 def find(self, file):
346 if file in self.map:
346 if file in self.map:
347 return self.entries[self.map[file]]
347 return self.entries[self.map[file]]
348 if file in self._backupmap:
348 if file in self._backupmap:
349 return self._backupentries[self._backupmap[file]]
349 return self._backupentries[self._backupmap[file]]
350 return None
350 return None
351
351
352 @active
352 @active
353 def replace(self, file, offset, data=None):
353 def replace(self, file, offset, data=None):
354 '''
354 '''
355 replace can only replace already committed entries
355 replace can only replace already committed entries
356 that are not pending in the queue
356 that are not pending in the queue
357 '''
357 '''
358
358
359 if file not in self.map:
359 if file not in self.map:
360 raise KeyError(file)
360 raise KeyError(file)
361 index = self.map[file]
361 index = self.map[file]
362 self.entries[index] = (file, offset, data)
362 self.entries[index] = (file, offset, data)
363 self.file.write("%s\0%d\n" % (file, offset))
363 self.file.write("%s\0%d\n" % (file, offset))
364 self.file.flush()
364 self.file.flush()
365
365
366 @active
366 @active
367 def nest(self):
367 def nest(self):
368 self.count += 1
368 self.count += 1
369 self.usages += 1
369 self.usages += 1
370 return self
370 return self
371
371
372 def release(self):
372 def release(self):
373 if self.count > 0:
373 if self.count > 0:
374 self.usages -= 1
374 self.usages -= 1
375 # if the transaction scopes are left without being closed, fail
375 # if the transaction scopes are left without being closed, fail
376 if self.count > 0 and self.usages == 0:
376 if self.count > 0 and self.usages == 0:
377 self._abort()
377 self._abort()
378
378
379 def running(self):
379 def running(self):
380 return self.count > 0
380 return self.count > 0
381
381
382 def addpending(self, category, callback):
382 def addpending(self, category, callback):
383 """add a callback to be called when the transaction is pending
383 """add a callback to be called when the transaction is pending
384
384
385 The transaction will be given as callback's first argument.
385 The transaction will be given as callback's first argument.
386
386
387 Category is a unique identifier to allow overwriting an old callback
387 Category is a unique identifier to allow overwriting an old callback
388 with a newer callback.
388 with a newer callback.
389 """
389 """
390 self._pendingcallback[category] = callback
390 self._pendingcallback[category] = callback
391
391
392 @active
392 @active
393 def writepending(self):
393 def writepending(self):
394 '''write pending file to temporary version
394 '''write pending file to temporary version
395
395
396 This is used to allow hooks to view a transaction before commit'''
396 This is used to allow hooks to view a transaction before commit'''
397 categories = sorted(self._pendingcallback)
397 categories = sorted(self._pendingcallback)
398 for cat in categories:
398 for cat in categories:
399 # remove callback since the data will have been flushed
399 # remove callback since the data will have been flushed
400 any = self._pendingcallback.pop(cat)(self)
400 any = self._pendingcallback.pop(cat)(self)
401 self._anypending = self._anypending or any
401 self._anypending = self._anypending or any
402 self._anypending |= self._generatefiles(suffix='.pending')
402 self._anypending |= self._generatefiles(suffix='.pending')
403 return self._anypending
403 return self._anypending
404
404
405 @active
405 @active
406 def addfinalize(self, category, callback):
406 def addfinalize(self, category, callback):
407 """add a callback to be called when the transaction is closed
407 """add a callback to be called when the transaction is closed
408
408
409 The transaction will be given as callback's first argument.
409 The transaction will be given as callback's first argument.
410
410
411 Category is a unique identifier to allow overwriting old callbacks with
411 Category is a unique identifier to allow overwriting old callbacks with
412 newer callbacks.
412 newer callbacks.
413 """
413 """
414 self._finalizecallback[category] = callback
414 self._finalizecallback[category] = callback
415
415
416 @active
416 @active
417 def addpostclose(self, category, callback):
417 def addpostclose(self, category, callback):
418 """add or replace a callback to be called after the transaction closed
418 """add or replace a callback to be called after the transaction closed
419
419
420 The transaction will be given as callback's first argument.
420 The transaction will be given as callback's first argument.
421
421
422 Category is a unique identifier to allow overwriting an old callback
422 Category is a unique identifier to allow overwriting an old callback
423 with a newer callback.
423 with a newer callback.
424 """
424 """
425 self._postclosecallback[category] = callback
425 self._postclosecallback[category] = callback
426
426
427 @active
427 @active
428 def getpostclose(self, category):
428 def getpostclose(self, category):
429 """return a postclose callback added before, or None"""
429 """return a postclose callback added before, or None"""
430 return self._postclosecallback.get(category, None)
430 return self._postclosecallback.get(category, None)
431
431
432 @active
432 @active
433 def addabort(self, category, callback):
433 def addabort(self, category, callback):
434 """add a callback to be called when the transaction is aborted.
434 """add a callback to be called when the transaction is aborted.
435
435
436 The transaction will be given as the first argument to the callback.
436 The transaction will be given as the first argument to the callback.
437
437
438 Category is a unique identifier to allow overwriting an old callback
438 Category is a unique identifier to allow overwriting an old callback
439 with a newer callback.
439 with a newer callback.
440 """
440 """
441 self._abortcallback[category] = callback
441 self._abortcallback[category] = callback
442
442
443 @active
443 @active
444 def close(self):
444 def close(self):
445 '''commit the transaction'''
445 '''commit the transaction'''
446 if self.count == 1:
446 if self.count == 1:
447 self.validator(self) # will raise exception if needed
447 self.validator(self) # will raise exception if needed
448 self.validator = None # Help prevent cycles.
448 self.validator = None # Help prevent cycles.
449 self._generatefiles(group=gengroupprefinalize)
449 self._generatefiles(group=gengroupprefinalize)
450 categories = sorted(self._finalizecallback)
450 categories = sorted(self._finalizecallback)
451 for cat in categories:
451 for cat in categories:
452 self._finalizecallback[cat](self)
452 self._finalizecallback[cat](self)
453 # Prevent double usage and help clear cycles.
453 # Prevent double usage and help clear cycles.
454 self._finalizecallback = None
454 self._finalizecallback = None
455 self._generatefiles(group=gengrouppostfinalize)
455 self._generatefiles(group=gengrouppostfinalize)
456
456
457 self.count -= 1
457 self.count -= 1
458 if self.count != 0:
458 if self.count != 0:
459 return
459 return
460 self.file.close()
460 self.file.close()
461 self._backupsfile.close()
461 self._backupsfile.close()
462 # cleanup temporary files
462 # cleanup temporary files
463 for l, f, b, c in self._backupentries:
463 for l, f, b, c in self._backupentries:
464 if l not in self._vfsmap and c:
464 if l not in self._vfsmap and c:
465 self.report("couldn't remove %s: unknown cache location %s\n"
465 self.report("couldn't remove %s: unknown cache location %s\n"
466 % (b, l))
466 % (b, l))
467 continue
467 continue
468 vfs = self._vfsmap[l]
468 vfs = self._vfsmap[l]
469 if not f and b and vfs.exists(b):
469 if not f and b and vfs.exists(b):
470 try:
470 try:
471 vfs.unlink(b)
471 vfs.unlink(b)
472 except (IOError, OSError, error.Abort) as inst:
472 except (IOError, OSError, error.Abort) as inst:
473 if not c:
473 if not c:
474 raise
474 raise
475 # Abort may be raise by read only opener
475 # Abort may be raise by read only opener
476 self.report("couldn't remove %s: %s\n"
476 self.report("couldn't remove %s: %s\n"
477 % (vfs.join(b), inst))
477 % (vfs.join(b), inst))
478 self.entries = []
478 self.entries = []
479 self._writeundo()
479 self._writeundo()
480 if self.after:
480 if self.after:
481 self.after()
481 self.after()
482 self.after = None # Help prevent cycles.
482 self.after = None # Help prevent cycles.
483 if self.opener.isfile(self._backupjournal):
483 if self.opener.isfile(self._backupjournal):
484 self.opener.unlink(self._backupjournal)
484 self.opener.unlink(self._backupjournal)
485 if self.opener.isfile(self.journal):
485 if self.opener.isfile(self.journal):
486 self.opener.unlink(self.journal)
486 self.opener.unlink(self.journal)
487 for l, _f, b, c in self._backupentries:
487 for l, _f, b, c in self._backupentries:
488 if l not in self._vfsmap and c:
488 if l not in self._vfsmap and c:
489 self.report("couldn't remove %s: unknown cache location"
489 self.report("couldn't remove %s: unknown cache location"
490 "%s\n" % (b, l))
490 "%s\n" % (b, l))
491 continue
491 continue
492 vfs = self._vfsmap[l]
492 vfs = self._vfsmap[l]
493 if b and vfs.exists(b):
493 if b and vfs.exists(b):
494 try:
494 try:
495 vfs.unlink(b)
495 vfs.unlink(b)
496 except (IOError, OSError, error.Abort) as inst:
496 except (IOError, OSError, error.Abort) as inst:
497 if not c:
497 if not c:
498 raise
498 raise
499 # Abort may be raise by read only opener
499 # Abort may be raise by read only opener
500 self.report("couldn't remove %s: %s\n"
500 self.report("couldn't remove %s: %s\n"
501 % (vfs.join(b), inst))
501 % (vfs.join(b), inst))
502 self._backupentries = []
502 self._backupentries = []
503 self.journal = None
503 self.journal = None
504
504
505 self.releasefn(self, True) # notify success of closing transaction
505 self.releasefn(self, True) # notify success of closing transaction
506 self.releasefn = None # Help prevent cycles.
506 self.releasefn = None # Help prevent cycles.
507
507
508 # run post close action
508 # run post close action
509 categories = sorted(self._postclosecallback)
509 categories = sorted(self._postclosecallback)
510 for cat in categories:
510 for cat in categories:
511 self._postclosecallback[cat](self)
511 self._postclosecallback[cat](self)
512 # Prevent double usage and help clear cycles.
512 # Prevent double usage and help clear cycles.
513 self._postclosecallback = None
513 self._postclosecallback = None
514
514
515 @active
515 @active
516 def abort(self):
516 def abort(self):
517 '''abort the transaction (generally called on error, or when the
517 '''abort the transaction (generally called on error, or when the
518 transaction is not explicitly committed before going out of
518 transaction is not explicitly committed before going out of
519 scope)'''
519 scope)'''
520 self._abort()
520 self._abort()
521
521
522 def _writeundo(self):
522 def _writeundo(self):
523 """write transaction data for possible future undo call"""
523 """write transaction data for possible future undo call"""
524 if self.undoname is None:
524 if self.undoname is None:
525 return
525 return
526 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
526 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
527 undobackupfile.write('%d\n' % version)
527 undobackupfile.write('%d\n' % version)
528 for l, f, b, c in self._backupentries:
528 for l, f, b, c in self._backupentries:
529 if not f: # temporary file
529 if not f: # temporary file
530 continue
530 continue
531 if not b:
531 if not b:
532 u = ''
532 u = ''
533 else:
533 else:
534 if l not in self._vfsmap and c:
534 if l not in self._vfsmap and c:
535 self.report("couldn't remove %s: unknown cache location"
535 self.report("couldn't remove %s: unknown cache location"
536 "%s\n" % (b, l))
536 "%s\n" % (b, l))
537 continue
537 continue
538 vfs = self._vfsmap[l]
538 vfs = self._vfsmap[l]
539 base, name = vfs.split(b)
539 base, name = vfs.split(b)
540 assert name.startswith(self.journal), name
540 assert name.startswith(self.journal), name
541 uname = name.replace(self.journal, self.undoname, 1)
541 uname = name.replace(self.journal, self.undoname, 1)
542 u = vfs.reljoin(base, uname)
542 u = vfs.reljoin(base, uname)
543 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
543 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
544 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
544 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
545 undobackupfile.close()
545 undobackupfile.close()
546
546
547
547
548 def _abort(self):
548 def _abort(self):
549 self.count = 0
549 self.count = 0
550 self.usages = 0
550 self.usages = 0
551 self.file.close()
551 self.file.close()
552 self._backupsfile.close()
552 self._backupsfile.close()
553
553
554 try:
554 try:
555 if not self.entries and not self._backupentries:
555 if not self.entries and not self._backupentries:
556 if self._backupjournal:
556 if self._backupjournal:
557 self.opener.unlink(self._backupjournal)
557 self.opener.unlink(self._backupjournal)
558 if self.journal:
558 if self.journal:
559 self.opener.unlink(self.journal)
559 self.opener.unlink(self.journal)
560 return
560 return
561
561
562 self.report(_("transaction abort!\n"))
562 self.report(_("transaction abort!\n"))
563
563
564 try:
564 try:
565 for cat in sorted(self._abortcallback):
565 for cat in sorted(self._abortcallback):
566 self._abortcallback[cat](self)
566 self._abortcallback[cat](self)
567 # Prevent double usage and help clear cycles.
567 # Prevent double usage and help clear cycles.
568 self._abortcallback = None
568 self._abortcallback = None
569 _playback(self.journal, self.report, self.opener, self._vfsmap,
569 _playback(self.journal, self.report, self.opener, self._vfsmap,
570 self.entries, self._backupentries, False,
570 self.entries, self._backupentries, False,
571 checkambigfiles=self.checkambigfiles)
571 checkambigfiles=self.checkambigfiles)
572 self.report(_("rollback completed\n"))
572 self.report(_("rollback completed\n"))
573 except BaseException:
573 except BaseException:
574 self.report(_("rollback failed - please run hg recover\n"))
574 self.report(_("rollback failed - please run hg recover\n"))
575 finally:
575 finally:
576 self.journal = None
576 self.journal = None
577 self.releasefn(self, False) # notify failure of transaction
577 self.releasefn(self, False) # notify failure of transaction
578 self.releasefn = None # Help prevent cycles.
578 self.releasefn = None # Help prevent cycles.
579
579
580 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
580 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
581 """Rolls back the transaction contained in the given file
581 """Rolls back the transaction contained in the given file
582
582
583 Reads the entries in the specified file, and the corresponding
583 Reads the entries in the specified file, and the corresponding
584 '*.backupfiles' file, to recover from an incomplete transaction.
584 '*.backupfiles' file, to recover from an incomplete transaction.
585
585
586 * `file`: a file containing a list of entries, specifying where
586 * `file`: a file containing a list of entries, specifying where
587 to truncate each file. The file should contain a list of
587 to truncate each file. The file should contain a list of
588 file\0offset pairs, delimited by newlines. The corresponding
588 file\0offset pairs, delimited by newlines. The corresponding
589 '*.backupfiles' file should contain a list of file\0backupfile
589 '*.backupfiles' file should contain a list of file\0backupfile
590 pairs, delimited by \0.
590 pairs, delimited by \0.
591
591
592 `checkambigfiles` is a set of (path, vfs-location) tuples,
592 `checkambigfiles` is a set of (path, vfs-location) tuples,
593 which determine whether file stat ambiguity should be avoided at
593 which determine whether file stat ambiguity should be avoided at
594 restoring corresponded files.
594 restoring corresponded files.
595 """
595 """
596 entries = []
596 entries = []
597 backupentries = []
597 backupentries = []
598
598
599 fp = opener.open(file)
599 fp = opener.open(file)
600 lines = fp.readlines()
600 lines = fp.readlines()
601 fp.close()
601 fp.close()
602 for l in lines:
602 for l in lines:
603 try:
603 try:
604 f, o = l.split('\0')
604 f, o = l.split('\0')
605 entries.append((f, int(o), None))
605 entries.append((f, int(o), None))
606 except ValueError:
606 except ValueError:
607 report(_("couldn't read journal entry %r!\n") % l)
607 report(_("couldn't read journal entry %r!\n") % l)
608
608
609 backupjournal = "%s.backupfiles" % file
609 backupjournal = "%s.backupfiles" % file
610 if opener.exists(backupjournal):
610 if opener.exists(backupjournal):
611 fp = opener.open(backupjournal)
611 fp = opener.open(backupjournal)
612 lines = fp.readlines()
612 lines = fp.readlines()
613 if lines:
613 if lines:
614 ver = lines[0][:-1]
614 ver = lines[0][:-1]
615 if ver == str(version):
615 if ver == (b'%d' % version):
616 for line in lines[1:]:
616 for line in lines[1:]:
617 if line:
617 if line:
618 # Shave off the trailing newline
618 # Shave off the trailing newline
619 line = line[:-1]
619 line = line[:-1]
620 l, f, b, c = line.split('\0')
620 l, f, b, c = line.split('\0')
621 backupentries.append((l, f, b, bool(c)))
621 backupentries.append((l, f, b, bool(c)))
622 else:
622 else:
623 report(_("journal was created by a different version of "
623 report(_("journal was created by a different version of "
624 "Mercurial\n"))
624 "Mercurial\n"))
625
625
626 _playback(file, report, opener, vfsmap, entries, backupentries,
626 _playback(file, report, opener, vfsmap, entries, backupentries,
627 checkambigfiles=checkambigfiles)
627 checkambigfiles=checkambigfiles)
General Comments 0
You need to be logged in to leave comments. Login now