##// END OF EJS Templates
spelling: fix typo in transaction error messages
Matt Mackall -
r26754:e7e1528c default
parent child Browse files
Show More
@@ -1,563 +1,563 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 def active(func):
26 def active(func):
27 def _active(self, *args, **kwds):
27 def _active(self, *args, **kwds):
28 if self.count == 0:
28 if self.count == 0:
29 raise error.Abort(_(
29 raise error.Abort(_(
30 'cannot use transaction when it is already committed/aborted'))
30 'cannot use transaction when it is already committed/aborted'))
31 return func(self, *args, **kwds)
31 return func(self, *args, **kwds)
32 return _active
32 return _active
33
33
34 def _playback(journal, report, opener, vfsmap, entries, backupentries,
34 def _playback(journal, report, opener, vfsmap, entries, backupentries,
35 unlink=True):
35 unlink=True):
36 for f, o, _ignore in entries:
36 for f, o, _ignore in entries:
37 if o or not unlink:
37 if o or not unlink:
38 try:
38 try:
39 fp = opener(f, 'a')
39 fp = opener(f, 'a')
40 fp.truncate(o)
40 fp.truncate(o)
41 fp.close()
41 fp.close()
42 except IOError:
42 except IOError:
43 report(_("failed to truncate %s\n") % f)
43 report(_("failed to truncate %s\n") % f)
44 raise
44 raise
45 else:
45 else:
46 try:
46 try:
47 opener.unlink(f)
47 opener.unlink(f)
48 except (IOError, OSError) as inst:
48 except (IOError, OSError) as inst:
49 if inst.errno != errno.ENOENT:
49 if inst.errno != errno.ENOENT:
50 raise
50 raise
51
51
52 backupfiles = []
52 backupfiles = []
53 for l, f, b, c in backupentries:
53 for l, f, b, c in backupentries:
54 if l not in vfsmap and c:
54 if l not in vfsmap and c:
55 report("couldn't handle %s: unknown cache location %s\n"
55 report("couldn't handle %s: unknown cache location %s\n"
56 % (b, l))
56 % (b, l))
57 vfs = vfsmap[l]
57 vfs = vfsmap[l]
58 try:
58 try:
59 if f and b:
59 if f and b:
60 filepath = vfs.join(f)
60 filepath = vfs.join(f)
61 backuppath = vfs.join(b)
61 backuppath = vfs.join(b)
62 try:
62 try:
63 util.copyfile(backuppath, filepath)
63 util.copyfile(backuppath, filepath)
64 backupfiles.append(b)
64 backupfiles.append(b)
65 except IOError:
65 except IOError:
66 report(_("failed to recover %s\n") % f)
66 report(_("failed to recover %s\n") % f)
67 else:
67 else:
68 target = f or b
68 target = f or b
69 try:
69 try:
70 vfs.unlink(target)
70 vfs.unlink(target)
71 except (IOError, OSError) as inst:
71 except (IOError, OSError) as inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 except (IOError, OSError, error.Abort) as inst:
74 except (IOError, OSError, error.Abort) as inst:
75 if not c:
75 if not c:
76 raise
76 raise
77
77
78 backuppath = "%s.backupfiles" % journal
78 backuppath = "%s.backupfiles" % journal
79 if opener.exists(backuppath):
79 if opener.exists(backuppath):
80 opener.unlink(backuppath)
80 opener.unlink(backuppath)
81 opener.unlink(journal)
81 opener.unlink(journal)
82 try:
82 try:
83 for f in backupfiles:
83 for f in backupfiles:
84 if opener.exists(f):
84 if opener.exists(f):
85 opener.unlink(f)
85 opener.unlink(f)
86 except (IOError, OSError, error.Abort) as inst:
86 except (IOError, OSError, error.Abort) as inst:
87 # only pure backup file remains, it is sage to ignore any error
87 # only pure backup file remains, it is sage to ignore any error
88 pass
88 pass
89
89
90 class transaction(object):
90 class transaction(object):
91 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
91 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
92 after=None, createmode=None, validator=None, releasefn=None):
92 after=None, createmode=None, validator=None, releasefn=None):
93 """Begin a new transaction
93 """Begin a new transaction
94
94
95 Begins a new transaction that allows rolling back writes in the event of
95 Begins a new transaction that allows rolling back writes in the event of
96 an exception.
96 an exception.
97
97
98 * `after`: called after the transaction has been committed
98 * `after`: called after the transaction has been committed
99 * `createmode`: the mode of the journal file that will be created
99 * `createmode`: the mode of the journal file that will be created
100 * `releasefn`: called after releasing (with transaction and result)
100 * `releasefn`: called after releasing (with transaction and result)
101 """
101 """
102 self.count = 1
102 self.count = 1
103 self.usages = 1
103 self.usages = 1
104 self.report = report
104 self.report = report
105 # a vfs to the store content
105 # a vfs to the store content
106 self.opener = opener
106 self.opener = opener
107 # a map to access file in various {location -> vfs}
107 # a map to access file in various {location -> vfs}
108 vfsmap = vfsmap.copy()
108 vfsmap = vfsmap.copy()
109 vfsmap[''] = opener # set default value
109 vfsmap[''] = opener # set default value
110 self._vfsmap = vfsmap
110 self._vfsmap = vfsmap
111 self.after = after
111 self.after = after
112 self.entries = []
112 self.entries = []
113 self.map = {}
113 self.map = {}
114 self.journal = journalname
114 self.journal = journalname
115 self.undoname = undoname
115 self.undoname = undoname
116 self._queue = []
116 self._queue = []
117 # A callback to validate transaction content before closing it.
117 # A callback to validate transaction content before closing it.
118 # should raise exception is anything is wrong.
118 # should raise exception is anything is wrong.
119 # target user is repository hooks.
119 # target user is repository hooks.
120 if validator is None:
120 if validator is None:
121 validator = lambda tr: None
121 validator = lambda tr: None
122 self.validator = validator
122 self.validator = validator
123 # A callback to do something just after releasing transaction.
123 # A callback to do something just after releasing transaction.
124 if releasefn is None:
124 if releasefn is None:
125 releasefn = lambda tr, success: None
125 releasefn = lambda tr, success: None
126 self.releasefn = releasefn
126 self.releasefn = releasefn
127
127
128 # a dict of arguments to be passed to hooks
128 # a dict of arguments to be passed to hooks
129 self.hookargs = {}
129 self.hookargs = {}
130 self.file = opener.open(self.journal, "w")
130 self.file = opener.open(self.journal, "w")
131
131
132 # a list of ('location', 'path', 'backuppath', cache) entries.
132 # a list of ('location', 'path', 'backuppath', cache) entries.
133 # - if 'backuppath' is empty, no file existed at backup time
133 # - if 'backuppath' is empty, no file existed at backup time
134 # - if 'path' is empty, this is a temporary transaction file
134 # - if 'path' is empty, this is a temporary transaction file
135 # - if 'location' is not empty, the path is outside main opener reach.
135 # - if 'location' is not empty, the path is outside main opener reach.
136 # use 'location' value as a key in a vfsmap to find the right 'vfs'
136 # use 'location' value as a key in a vfsmap to find the right 'vfs'
137 # (cache is currently unused)
137 # (cache is currently unused)
138 self._backupentries = []
138 self._backupentries = []
139 self._backupmap = {}
139 self._backupmap = {}
140 self._backupjournal = "%s.backupfiles" % self.journal
140 self._backupjournal = "%s.backupfiles" % self.journal
141 self._backupsfile = opener.open(self._backupjournal, 'w')
141 self._backupsfile = opener.open(self._backupjournal, 'w')
142 self._backupsfile.write('%d\n' % version)
142 self._backupsfile.write('%d\n' % version)
143
143
144 if createmode is not None:
144 if createmode is not None:
145 opener.chmod(self.journal, createmode & 0o666)
145 opener.chmod(self.journal, createmode & 0o666)
146 opener.chmod(self._backupjournal, createmode & 0o666)
146 opener.chmod(self._backupjournal, createmode & 0o666)
147
147
148 # hold file generations to be performed on commit
148 # hold file generations to be performed on commit
149 self._filegenerators = {}
149 self._filegenerators = {}
150 # hold callback to write pending data for hooks
150 # hold callback to write pending data for hooks
151 self._pendingcallback = {}
151 self._pendingcallback = {}
152 # True is any pending data have been written ever
152 # True is any pending data have been written ever
153 self._anypending = False
153 self._anypending = False
154 # holds callback to call when writing the transaction
154 # holds callback to call when writing the transaction
155 self._finalizecallback = {}
155 self._finalizecallback = {}
156 # hold callback for post transaction close
156 # hold callback for post transaction close
157 self._postclosecallback = {}
157 self._postclosecallback = {}
158 # holds callbacks to call during abort
158 # holds callbacks to call during abort
159 self._abortcallback = {}
159 self._abortcallback = {}
160
160
161 def __del__(self):
161 def __del__(self):
162 if self.journal:
162 if self.journal:
163 self._abort()
163 self._abort()
164
164
165 @active
165 @active
166 def startgroup(self):
166 def startgroup(self):
167 """delay registration of file entry
167 """delay registration of file entry
168
168
169 This is used by strip to delay vision of strip offset. The transaction
169 This is used by strip to delay vision of strip offset. The transaction
170 sees either none or all of the strip actions to be done."""
170 sees either none or all of the strip actions to be done."""
171 self._queue.append([])
171 self._queue.append([])
172
172
173 @active
173 @active
174 def endgroup(self):
174 def endgroup(self):
175 """apply delayed registration of file entry.
175 """apply delayed registration of file entry.
176
176
177 This is used by strip to delay vision of strip offset. The transaction
177 This is used by strip to delay vision of strip offset. The transaction
178 sees either none or all of the strip actions to be done."""
178 sees either none or all of the strip actions to be done."""
179 q = self._queue.pop()
179 q = self._queue.pop()
180 for f, o, data in q:
180 for f, o, data in q:
181 self._addentry(f, o, data)
181 self._addentry(f, o, data)
182
182
183 @active
183 @active
184 def add(self, file, offset, data=None):
184 def add(self, file, offset, data=None):
185 """record the state of an append-only file before update"""
185 """record the state of an append-only file before update"""
186 if file in self.map or file in self._backupmap:
186 if file in self.map or file in self._backupmap:
187 return
187 return
188 if self._queue:
188 if self._queue:
189 self._queue[-1].append((file, offset, data))
189 self._queue[-1].append((file, offset, data))
190 return
190 return
191
191
192 self._addentry(file, offset, data)
192 self._addentry(file, offset, data)
193
193
194 def _addentry(self, file, offset, data):
194 def _addentry(self, file, offset, data):
195 """add a append-only entry to memory and on-disk state"""
195 """add a append-only entry to memory and on-disk state"""
196 if file in self.map or file in self._backupmap:
196 if file in self.map or file in self._backupmap:
197 return
197 return
198 self.entries.append((file, offset, data))
198 self.entries.append((file, offset, data))
199 self.map[file] = len(self.entries) - 1
199 self.map[file] = len(self.entries) - 1
200 # add enough data to the journal to do the truncate
200 # add enough data to the journal to do the truncate
201 self.file.write("%s\0%d\n" % (file, offset))
201 self.file.write("%s\0%d\n" % (file, offset))
202 self.file.flush()
202 self.file.flush()
203
203
204 @active
204 @active
205 def addbackup(self, file, hardlink=True, location=''):
205 def addbackup(self, file, hardlink=True, location=''):
206 """Adds a backup of the file to the transaction
206 """Adds a backup of the file to the transaction
207
207
208 Calling addbackup() creates a hardlink backup of the specified file
208 Calling addbackup() creates a hardlink backup of the specified file
209 that is used to recover the file in the event of the transaction
209 that is used to recover the file in the event of the transaction
210 aborting.
210 aborting.
211
211
212 * `file`: the file path, relative to .hg/store
212 * `file`: the file path, relative to .hg/store
213 * `hardlink`: use a hardlink to quickly create the backup
213 * `hardlink`: use a hardlink to quickly create the backup
214 """
214 """
215 if self._queue:
215 if self._queue:
216 msg = 'cannot use transaction.addbackup inside "group"'
216 msg = 'cannot use transaction.addbackup inside "group"'
217 raise RuntimeError(msg)
217 raise RuntimeError(msg)
218
218
219 if file in self.map or file in self._backupmap:
219 if file in self.map or file in self._backupmap:
220 return
220 return
221 vfs = self._vfsmap[location]
221 vfs = self._vfsmap[location]
222 dirname, filename = vfs.split(file)
222 dirname, filename = vfs.split(file)
223 backupfilename = "%s.backup.%s" % (self.journal, filename)
223 backupfilename = "%s.backup.%s" % (self.journal, filename)
224 backupfile = vfs.reljoin(dirname, backupfilename)
224 backupfile = vfs.reljoin(dirname, backupfilename)
225 if vfs.exists(file):
225 if vfs.exists(file):
226 filepath = vfs.join(file)
226 filepath = vfs.join(file)
227 backuppath = vfs.join(backupfile)
227 backuppath = vfs.join(backupfile)
228 util.copyfile(filepath, backuppath, hardlink=hardlink)
228 util.copyfile(filepath, backuppath, hardlink=hardlink)
229 else:
229 else:
230 backupfile = ''
230 backupfile = ''
231
231
232 self._addbackupentry((location, file, backupfile, False))
232 self._addbackupentry((location, file, backupfile, False))
233
233
234 def _addbackupentry(self, entry):
234 def _addbackupentry(self, entry):
235 """register a new backup entry and write it to disk"""
235 """register a new backup entry and write it to disk"""
236 self._backupentries.append(entry)
236 self._backupentries.append(entry)
237 self._backupmap[entry[1]] = len(self._backupentries) - 1
237 self._backupmap[entry[1]] = len(self._backupentries) - 1
238 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
238 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
239 self._backupsfile.flush()
239 self._backupsfile.flush()
240
240
241 @active
241 @active
242 def registertmp(self, tmpfile, location=''):
242 def registertmp(self, tmpfile, location=''):
243 """register a temporary transaction file
243 """register a temporary transaction file
244
244
245 Such files will be deleted when the transaction exits (on both
245 Such files will be deleted when the transaction exits (on both
246 failure and success).
246 failure and success).
247 """
247 """
248 self._addbackupentry((location, '', tmpfile, False))
248 self._addbackupentry((location, '', tmpfile, False))
249
249
250 @active
250 @active
251 def addfilegenerator(self, genid, filenames, genfunc, order=0,
251 def addfilegenerator(self, genid, filenames, genfunc, order=0,
252 location=''):
252 location=''):
253 """add a function to generates some files at transaction commit
253 """add a function to generates some files at transaction commit
254
254
255 The `genfunc` argument is a function capable of generating proper
255 The `genfunc` argument is a function capable of generating proper
256 content of each entry in the `filename` tuple.
256 content of each entry in the `filename` tuple.
257
257
258 At transaction close time, `genfunc` will be called with one file
258 At transaction close time, `genfunc` will be called with one file
259 object argument per entries in `filenames`.
259 object argument per entries in `filenames`.
260
260
261 The transaction itself is responsible for the backup, creation and
261 The transaction itself is responsible for the backup, creation and
262 final write of such file.
262 final write of such file.
263
263
264 The `genid` argument is used to ensure the same set of file is only
264 The `genid` argument is used to ensure the same set of file is only
265 generated once. Call to `addfilegenerator` for a `genid` already
265 generated once. Call to `addfilegenerator` for a `genid` already
266 present will overwrite the old entry.
266 present will overwrite the old entry.
267
267
268 The `order` argument may be used to control the order in which multiple
268 The `order` argument may be used to control the order in which multiple
269 generator will be executed.
269 generator will be executed.
270
270
271 The `location` arguments may be used to indicate the files are located
271 The `location` arguments may be used to indicate the files are located
272 outside of the the standard directory for transaction. It should match
272 outside of the the standard directory for transaction. It should match
273 one of the key of the `transaction.vfsmap` dictionary.
273 one of the key of the `transaction.vfsmap` dictionary.
274 """
274 """
275 # For now, we are unable to do proper backup and restore of custom vfs
275 # For now, we are unable to do proper backup and restore of custom vfs
276 # but for bookmarks that are handled outside this mechanism.
276 # but for bookmarks that are handled outside this mechanism.
277 self._filegenerators[genid] = (order, filenames, genfunc, location)
277 self._filegenerators[genid] = (order, filenames, genfunc, location)
278
278
279 def _generatefiles(self, suffix=''):
279 def _generatefiles(self, suffix=''):
280 # write files registered for generation
280 # write files registered for generation
281 any = False
281 any = False
282 for entry in sorted(self._filegenerators.values()):
282 for entry in sorted(self._filegenerators.values()):
283 any = True
283 any = True
284 order, filenames, genfunc, location = entry
284 order, filenames, genfunc, location = entry
285 vfs = self._vfsmap[location]
285 vfs = self._vfsmap[location]
286 files = []
286 files = []
287 try:
287 try:
288 for name in filenames:
288 for name in filenames:
289 name += suffix
289 name += suffix
290 if suffix:
290 if suffix:
291 self.registertmp(name, location=location)
291 self.registertmp(name, location=location)
292 else:
292 else:
293 self.addbackup(name, location=location)
293 self.addbackup(name, location=location)
294 files.append(vfs(name, 'w', atomictemp=True))
294 files.append(vfs(name, 'w', atomictemp=True))
295 genfunc(*files)
295 genfunc(*files)
296 finally:
296 finally:
297 for f in files:
297 for f in files:
298 f.close()
298 f.close()
299 return any
299 return any
300
300
301 @active
301 @active
302 def find(self, file):
302 def find(self, file):
303 if file in self.map:
303 if file in self.map:
304 return self.entries[self.map[file]]
304 return self.entries[self.map[file]]
305 if file in self._backupmap:
305 if file in self._backupmap:
306 return self._backupentries[self._backupmap[file]]
306 return self._backupentries[self._backupmap[file]]
307 return None
307 return None
308
308
309 @active
309 @active
310 def replace(self, file, offset, data=None):
310 def replace(self, file, offset, data=None):
311 '''
311 '''
312 replace can only replace already committed entries
312 replace can only replace already committed entries
313 that are not pending in the queue
313 that are not pending in the queue
314 '''
314 '''
315
315
316 if file not in self.map:
316 if file not in self.map:
317 raise KeyError(file)
317 raise KeyError(file)
318 index = self.map[file]
318 index = self.map[file]
319 self.entries[index] = (file, offset, data)
319 self.entries[index] = (file, offset, data)
320 self.file.write("%s\0%d\n" % (file, offset))
320 self.file.write("%s\0%d\n" % (file, offset))
321 self.file.flush()
321 self.file.flush()
322
322
323 @active
323 @active
324 def nest(self):
324 def nest(self):
325 self.count += 1
325 self.count += 1
326 self.usages += 1
326 self.usages += 1
327 return self
327 return self
328
328
329 def release(self):
329 def release(self):
330 if self.count > 0:
330 if self.count > 0:
331 self.usages -= 1
331 self.usages -= 1
332 # if the transaction scopes are left without being closed, fail
332 # if the transaction scopes are left without being closed, fail
333 if self.count > 0 and self.usages == 0:
333 if self.count > 0 and self.usages == 0:
334 self._abort()
334 self._abort()
335
335
336 def running(self):
336 def running(self):
337 return self.count > 0
337 return self.count > 0
338
338
339 def addpending(self, category, callback):
339 def addpending(self, category, callback):
340 """add a callback to be called when the transaction is pending
340 """add a callback to be called when the transaction is pending
341
341
342 The transaction will be given as callback's first argument.
342 The transaction will be given as callback's first argument.
343
343
344 Category is a unique identifier to allow overwriting an old callback
344 Category is a unique identifier to allow overwriting an old callback
345 with a newer callback.
345 with a newer callback.
346 """
346 """
347 self._pendingcallback[category] = callback
347 self._pendingcallback[category] = callback
348
348
349 @active
349 @active
350 def writepending(self):
350 def writepending(self):
351 '''write pending file to temporary version
351 '''write pending file to temporary version
352
352
353 This is used to allow hooks to view a transaction before commit'''
353 This is used to allow hooks to view a transaction before commit'''
354 categories = sorted(self._pendingcallback)
354 categories = sorted(self._pendingcallback)
355 for cat in categories:
355 for cat in categories:
356 # remove callback since the data will have been flushed
356 # remove callback since the data will have been flushed
357 any = self._pendingcallback.pop(cat)(self)
357 any = self._pendingcallback.pop(cat)(self)
358 self._anypending = self._anypending or any
358 self._anypending = self._anypending or any
359 self._anypending |= self._generatefiles(suffix='.pending')
359 self._anypending |= self._generatefiles(suffix='.pending')
360 return self._anypending
360 return self._anypending
361
361
362 @active
362 @active
363 def addfinalize(self, category, callback):
363 def addfinalize(self, category, callback):
364 """add a callback to be called when the transaction is closed
364 """add a callback to be called when the transaction is closed
365
365
366 The transaction will be given as callback's first argument.
366 The transaction will be given as callback's first argument.
367
367
368 Category is a unique identifier to allow overwriting old callbacks with
368 Category is a unique identifier to allow overwriting old callbacks with
369 newer callbacks.
369 newer callbacks.
370 """
370 """
371 self._finalizecallback[category] = callback
371 self._finalizecallback[category] = callback
372
372
373 @active
373 @active
374 def addpostclose(self, category, callback):
374 def addpostclose(self, category, callback):
375 """add a callback to be called after the transaction is closed
375 """add a callback to be called after the transaction is closed
376
376
377 The transaction will be given as callback's first argument.
377 The transaction will be given as callback's first argument.
378
378
379 Category is a unique identifier to allow overwriting an old callback
379 Category is a unique identifier to allow overwriting an old callback
380 with a newer callback.
380 with a newer callback.
381 """
381 """
382 self._postclosecallback[category] = callback
382 self._postclosecallback[category] = callback
383
383
384 @active
384 @active
385 def addabort(self, category, callback):
385 def addabort(self, category, callback):
386 """add a callback to be called when the transaction is aborted.
386 """add a callback to be called when the transaction is aborted.
387
387
388 The transaction will be given as the first argument to the callback.
388 The transaction will be given as the first argument to the callback.
389
389
390 Category is a unique identifier to allow overwriting an old callback
390 Category is a unique identifier to allow overwriting an old callback
391 with a newer callback.
391 with a newer callback.
392 """
392 """
393 self._abortcallback[category] = callback
393 self._abortcallback[category] = callback
394
394
395 @active
395 @active
396 def close(self):
396 def close(self):
397 '''commit the transaction'''
397 '''commit the transaction'''
398 if self.count == 1:
398 if self.count == 1:
399 self.validator(self) # will raise exception if needed
399 self.validator(self) # will raise exception if needed
400 self._generatefiles()
400 self._generatefiles()
401 categories = sorted(self._finalizecallback)
401 categories = sorted(self._finalizecallback)
402 for cat in categories:
402 for cat in categories:
403 self._finalizecallback[cat](self)
403 self._finalizecallback[cat](self)
404
404
405 self.count -= 1
405 self.count -= 1
406 if self.count != 0:
406 if self.count != 0:
407 return
407 return
408 self.file.close()
408 self.file.close()
409 self._backupsfile.close()
409 self._backupsfile.close()
410 # cleanup temporary files
410 # cleanup temporary files
411 for l, f, b, c in self._backupentries:
411 for l, f, b, c in self._backupentries:
412 if l not in self._vfsmap and c:
412 if l not in self._vfsmap and c:
413 self.report("couldn't remote %s: unknown cache location %s\n"
413 self.report("couldn't remove %s: unknown cache location %s\n"
414 % (b, l))
414 % (b, l))
415 continue
415 continue
416 vfs = self._vfsmap[l]
416 vfs = self._vfsmap[l]
417 if not f and b and vfs.exists(b):
417 if not f and b and vfs.exists(b):
418 try:
418 try:
419 vfs.unlink(b)
419 vfs.unlink(b)
420 except (IOError, OSError, error.Abort) as inst:
420 except (IOError, OSError, error.Abort) as inst:
421 if not c:
421 if not c:
422 raise
422 raise
423 # Abort may be raise by read only opener
423 # Abort may be raise by read only opener
424 self.report("couldn't remote %s: %s\n"
424 self.report("couldn't remove %s: %s\n"
425 % (vfs.join(b), inst))
425 % (vfs.join(b), inst))
426 self.entries = []
426 self.entries = []
427 self._writeundo()
427 self._writeundo()
428 if self.after:
428 if self.after:
429 self.after()
429 self.after()
430 if self.opener.isfile(self._backupjournal):
430 if self.opener.isfile(self._backupjournal):
431 self.opener.unlink(self._backupjournal)
431 self.opener.unlink(self._backupjournal)
432 if self.opener.isfile(self.journal):
432 if self.opener.isfile(self.journal):
433 self.opener.unlink(self.journal)
433 self.opener.unlink(self.journal)
434 if True:
434 if True:
435 for l, _f, b, c in self._backupentries:
435 for l, _f, b, c in self._backupentries:
436 if l not in self._vfsmap and c:
436 if l not in self._vfsmap and c:
437 self.report("couldn't remote %s: unknown cache location"
437 self.report("couldn't remove %s: unknown cache location"
438 "%s\n" % (b, l))
438 "%s\n" % (b, l))
439 continue
439 continue
440 vfs = self._vfsmap[l]
440 vfs = self._vfsmap[l]
441 if b and vfs.exists(b):
441 if b and vfs.exists(b):
442 try:
442 try:
443 vfs.unlink(b)
443 vfs.unlink(b)
444 except (IOError, OSError, error.Abort) as inst:
444 except (IOError, OSError, error.Abort) as inst:
445 if not c:
445 if not c:
446 raise
446 raise
447 # Abort may be raise by read only opener
447 # Abort may be raise by read only opener
448 self.report("couldn't remote %s: %s\n"
448 self.report("couldn't remove %s: %s\n"
449 % (vfs.join(b), inst))
449 % (vfs.join(b), inst))
450 self._backupentries = []
450 self._backupentries = []
451 self.journal = None
451 self.journal = None
452
452
453 self.releasefn(self, True) # notify success of closing transaction
453 self.releasefn(self, True) # notify success of closing transaction
454
454
455 # run post close action
455 # run post close action
456 categories = sorted(self._postclosecallback)
456 categories = sorted(self._postclosecallback)
457 for cat in categories:
457 for cat in categories:
458 self._postclosecallback[cat](self)
458 self._postclosecallback[cat](self)
459
459
460 @active
460 @active
461 def abort(self):
461 def abort(self):
462 '''abort the transaction (generally called on error, or when the
462 '''abort the transaction (generally called on error, or when the
463 transaction is not explicitly committed before going out of
463 transaction is not explicitly committed before going out of
464 scope)'''
464 scope)'''
465 self._abort()
465 self._abort()
466
466
467 def _writeundo(self):
467 def _writeundo(self):
468 """write transaction data for possible future undo call"""
468 """write transaction data for possible future undo call"""
469 if self.undoname is None:
469 if self.undoname is None:
470 return
470 return
471 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
471 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
472 undobackupfile.write('%d\n' % version)
472 undobackupfile.write('%d\n' % version)
473 for l, f, b, c in self._backupentries:
473 for l, f, b, c in self._backupentries:
474 if not f: # temporary file
474 if not f: # temporary file
475 continue
475 continue
476 if not b:
476 if not b:
477 u = ''
477 u = ''
478 else:
478 else:
479 if l not in self._vfsmap and c:
479 if l not in self._vfsmap and c:
480 self.report("couldn't remote %s: unknown cache location"
480 self.report("couldn't remove %s: unknown cache location"
481 "%s\n" % (b, l))
481 "%s\n" % (b, l))
482 continue
482 continue
483 vfs = self._vfsmap[l]
483 vfs = self._vfsmap[l]
484 base, name = vfs.split(b)
484 base, name = vfs.split(b)
485 assert name.startswith(self.journal), name
485 assert name.startswith(self.journal), name
486 uname = name.replace(self.journal, self.undoname, 1)
486 uname = name.replace(self.journal, self.undoname, 1)
487 u = vfs.reljoin(base, uname)
487 u = vfs.reljoin(base, uname)
488 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
488 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
489 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
489 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
490 undobackupfile.close()
490 undobackupfile.close()
491
491
492
492
493 def _abort(self):
493 def _abort(self):
494 self.count = 0
494 self.count = 0
495 self.usages = 0
495 self.usages = 0
496 self.file.close()
496 self.file.close()
497 self._backupsfile.close()
497 self._backupsfile.close()
498
498
499 try:
499 try:
500 if not self.entries and not self._backupentries:
500 if not self.entries and not self._backupentries:
501 if self._backupjournal:
501 if self._backupjournal:
502 self.opener.unlink(self._backupjournal)
502 self.opener.unlink(self._backupjournal)
503 if self.journal:
503 if self.journal:
504 self.opener.unlink(self.journal)
504 self.opener.unlink(self.journal)
505 return
505 return
506
506
507 self.report(_("transaction abort!\n"))
507 self.report(_("transaction abort!\n"))
508
508
509 try:
509 try:
510 for cat in sorted(self._abortcallback):
510 for cat in sorted(self._abortcallback):
511 self._abortcallback[cat](self)
511 self._abortcallback[cat](self)
512 _playback(self.journal, self.report, self.opener, self._vfsmap,
512 _playback(self.journal, self.report, self.opener, self._vfsmap,
513 self.entries, self._backupentries, False)
513 self.entries, self._backupentries, False)
514 self.report(_("rollback completed\n"))
514 self.report(_("rollback completed\n"))
515 except BaseException:
515 except BaseException:
516 self.report(_("rollback failed - please run hg recover\n"))
516 self.report(_("rollback failed - please run hg recover\n"))
517 finally:
517 finally:
518 self.journal = None
518 self.journal = None
519 self.releasefn(self, False) # notify failure of transaction
519 self.releasefn(self, False) # notify failure of transaction
520
520
521 def rollback(opener, vfsmap, file, report):
521 def rollback(opener, vfsmap, file, report):
522 """Rolls back the transaction contained in the given file
522 """Rolls back the transaction contained in the given file
523
523
524 Reads the entries in the specified file, and the corresponding
524 Reads the entries in the specified file, and the corresponding
525 '*.backupfiles' file, to recover from an incomplete transaction.
525 '*.backupfiles' file, to recover from an incomplete transaction.
526
526
527 * `file`: a file containing a list of entries, specifying where
527 * `file`: a file containing a list of entries, specifying where
528 to truncate each file. The file should contain a list of
528 to truncate each file. The file should contain a list of
529 file\0offset pairs, delimited by newlines. The corresponding
529 file\0offset pairs, delimited by newlines. The corresponding
530 '*.backupfiles' file should contain a list of file\0backupfile
530 '*.backupfiles' file should contain a list of file\0backupfile
531 pairs, delimited by \0.
531 pairs, delimited by \0.
532 """
532 """
533 entries = []
533 entries = []
534 backupentries = []
534 backupentries = []
535
535
536 fp = opener.open(file)
536 fp = opener.open(file)
537 lines = fp.readlines()
537 lines = fp.readlines()
538 fp.close()
538 fp.close()
539 for l in lines:
539 for l in lines:
540 try:
540 try:
541 f, o = l.split('\0')
541 f, o = l.split('\0')
542 entries.append((f, int(o), None))
542 entries.append((f, int(o), None))
543 except ValueError:
543 except ValueError:
544 report(_("couldn't read journal entry %r!\n") % l)
544 report(_("couldn't read journal entry %r!\n") % l)
545
545
546 backupjournal = "%s.backupfiles" % file
546 backupjournal = "%s.backupfiles" % file
547 if opener.exists(backupjournal):
547 if opener.exists(backupjournal):
548 fp = opener.open(backupjournal)
548 fp = opener.open(backupjournal)
549 lines = fp.readlines()
549 lines = fp.readlines()
550 if lines:
550 if lines:
551 ver = lines[0][:-1]
551 ver = lines[0][:-1]
552 if ver == str(version):
552 if ver == str(version):
553 for line in lines[1:]:
553 for line in lines[1:]:
554 if line:
554 if line:
555 # Shave off the trailing newline
555 # Shave off the trailing newline
556 line = line[:-1]
556 line = line[:-1]
557 l, f, b, c = line.split('\0')
557 l, f, b, c = line.split('\0')
558 backupentries.append((l, f, b, bool(c)))
558 backupentries.append((l, f, b, bool(c)))
559 else:
559 else:
560 report(_("journal was created by a different version of "
560 report(_("journal was created by a different version of "
561 "Mercurial\n"))
561 "Mercurial\n"))
562
562
563 _playback(file, report, opener, vfsmap, entries, backupentries)
563 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now