##// END OF EJS Templates
transaction: add releasefn to notify the end of a transaction scope...
FUJIWARA Katsunori -
r26576:9e0aaac0 default
parent child Browse files
Show More
@@ -1,553 +1,562 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 util,
21 util,
22 )
22 )
23
23
24 version = 2
24 version = 2
25
25
26 def active(func):
26 def active(func):
27 def _active(self, *args, **kwds):
27 def _active(self, *args, **kwds):
28 if self.count == 0:
28 if self.count == 0:
29 raise error.Abort(_(
29 raise error.Abort(_(
30 'cannot use transaction when it is already committed/aborted'))
30 'cannot use transaction when it is already committed/aborted'))
31 return func(self, *args, **kwds)
31 return func(self, *args, **kwds)
32 return _active
32 return _active
33
33
34 def _playback(journal, report, opener, vfsmap, entries, backupentries,
34 def _playback(journal, report, opener, vfsmap, entries, backupentries,
35 unlink=True):
35 unlink=True):
36 for f, o, _ignore in entries:
36 for f, o, _ignore in entries:
37 if o or not unlink:
37 if o or not unlink:
38 try:
38 try:
39 fp = opener(f, 'a')
39 fp = opener(f, 'a')
40 fp.truncate(o)
40 fp.truncate(o)
41 fp.close()
41 fp.close()
42 except IOError:
42 except IOError:
43 report(_("failed to truncate %s\n") % f)
43 report(_("failed to truncate %s\n") % f)
44 raise
44 raise
45 else:
45 else:
46 try:
46 try:
47 opener.unlink(f)
47 opener.unlink(f)
48 except (IOError, OSError) as inst:
48 except (IOError, OSError) as inst:
49 if inst.errno != errno.ENOENT:
49 if inst.errno != errno.ENOENT:
50 raise
50 raise
51
51
52 backupfiles = []
52 backupfiles = []
53 for l, f, b, c in backupentries:
53 for l, f, b, c in backupentries:
54 if l not in vfsmap and c:
54 if l not in vfsmap and c:
55 report("couldn't handle %s: unknown cache location %s\n"
55 report("couldn't handle %s: unknown cache location %s\n"
56 % (b, l))
56 % (b, l))
57 vfs = vfsmap[l]
57 vfs = vfsmap[l]
58 try:
58 try:
59 if f and b:
59 if f and b:
60 filepath = vfs.join(f)
60 filepath = vfs.join(f)
61 backuppath = vfs.join(b)
61 backuppath = vfs.join(b)
62 try:
62 try:
63 util.copyfile(backuppath, filepath)
63 util.copyfile(backuppath, filepath)
64 backupfiles.append(b)
64 backupfiles.append(b)
65 except IOError:
65 except IOError:
66 report(_("failed to recover %s\n") % f)
66 report(_("failed to recover %s\n") % f)
67 else:
67 else:
68 target = f or b
68 target = f or b
69 try:
69 try:
70 vfs.unlink(target)
70 vfs.unlink(target)
71 except (IOError, OSError) as inst:
71 except (IOError, OSError) as inst:
72 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
73 raise
73 raise
74 except (IOError, OSError, util.Abort) as inst:
74 except (IOError, OSError, util.Abort) as inst:
75 if not c:
75 if not c:
76 raise
76 raise
77
77
78 opener.unlink(journal)
78 opener.unlink(journal)
79 backuppath = "%s.backupfiles" % journal
79 backuppath = "%s.backupfiles" % journal
80 if opener.exists(backuppath):
80 if opener.exists(backuppath):
81 opener.unlink(backuppath)
81 opener.unlink(backuppath)
82 try:
82 try:
83 for f in backupfiles:
83 for f in backupfiles:
84 if opener.exists(f):
84 if opener.exists(f):
85 opener.unlink(f)
85 opener.unlink(f)
86 except (IOError, OSError, util.Abort) as inst:
86 except (IOError, OSError, util.Abort) as inst:
87 # only pure backup file remains, it is sage to ignore any error
87 # only pure backup file remains, it is sage to ignore any error
88 pass
88 pass
89
89
90 class transaction(object):
90 class transaction(object):
91 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
91 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
92 after=None, createmode=None, validator=None):
92 after=None, createmode=None, validator=None, releasefn=None):
93 """Begin a new transaction
93 """Begin a new transaction
94
94
95 Begins a new transaction that allows rolling back writes in the event of
95 Begins a new transaction that allows rolling back writes in the event of
96 an exception.
96 an exception.
97
97
98 * `after`: called after the transaction has been committed
98 * `after`: called after the transaction has been committed
99 * `createmode`: the mode of the journal file that will be created
99 * `createmode`: the mode of the journal file that will be created
100 * `releasefn`: called after releasing (with transaction and result)
100 """
101 """
101 self.count = 1
102 self.count = 1
102 self.usages = 1
103 self.usages = 1
103 self.report = report
104 self.report = report
104 # a vfs to the store content
105 # a vfs to the store content
105 self.opener = opener
106 self.opener = opener
106 # a map to access file in various {location -> vfs}
107 # a map to access file in various {location -> vfs}
107 vfsmap = vfsmap.copy()
108 vfsmap = vfsmap.copy()
108 vfsmap[''] = opener # set default value
109 vfsmap[''] = opener # set default value
109 self._vfsmap = vfsmap
110 self._vfsmap = vfsmap
110 self.after = after
111 self.after = after
111 self.entries = []
112 self.entries = []
112 self.map = {}
113 self.map = {}
113 self.journal = journalname
114 self.journal = journalname
114 self.undoname = undoname
115 self.undoname = undoname
115 self._queue = []
116 self._queue = []
116 # A callback to validate transaction content before closing it.
117 # A callback to validate transaction content before closing it.
117 # should raise exception is anything is wrong.
118 # should raise exception is anything is wrong.
118 # target user is repository hooks.
119 # target user is repository hooks.
119 if validator is None:
120 if validator is None:
120 validator = lambda tr: None
121 validator = lambda tr: None
121 self.validator = validator
122 self.validator = validator
123 # A callback to do something just after releasing transaction.
124 if releasefn is None:
125 releasefn = lambda tr, success: None
126 self.releasefn = releasefn
127
122 # a dict of arguments to be passed to hooks
128 # a dict of arguments to be passed to hooks
123 self.hookargs = {}
129 self.hookargs = {}
124 self.file = opener.open(self.journal, "w")
130 self.file = opener.open(self.journal, "w")
125
131
126 # a list of ('location', 'path', 'backuppath', cache) entries.
132 # a list of ('location', 'path', 'backuppath', cache) entries.
127 # - if 'backuppath' is empty, no file existed at backup time
133 # - if 'backuppath' is empty, no file existed at backup time
128 # - if 'path' is empty, this is a temporary transaction file
134 # - if 'path' is empty, this is a temporary transaction file
129 # - if 'location' is not empty, the path is outside main opener reach.
135 # - if 'location' is not empty, the path is outside main opener reach.
130 # use 'location' value as a key in a vfsmap to find the right 'vfs'
136 # use 'location' value as a key in a vfsmap to find the right 'vfs'
131 # (cache is currently unused)
137 # (cache is currently unused)
132 self._backupentries = []
138 self._backupentries = []
133 self._backupmap = {}
139 self._backupmap = {}
134 self._backupjournal = "%s.backupfiles" % self.journal
140 self._backupjournal = "%s.backupfiles" % self.journal
135 self._backupsfile = opener.open(self._backupjournal, 'w')
141 self._backupsfile = opener.open(self._backupjournal, 'w')
136 self._backupsfile.write('%d\n' % version)
142 self._backupsfile.write('%d\n' % version)
137
143
138 if createmode is not None:
144 if createmode is not None:
139 opener.chmod(self.journal, createmode & 0o666)
145 opener.chmod(self.journal, createmode & 0o666)
140 opener.chmod(self._backupjournal, createmode & 0o666)
146 opener.chmod(self._backupjournal, createmode & 0o666)
141
147
142 # hold file generations to be performed on commit
148 # hold file generations to be performed on commit
143 self._filegenerators = {}
149 self._filegenerators = {}
144 # hold callback to write pending data for hooks
150 # hold callback to write pending data for hooks
145 self._pendingcallback = {}
151 self._pendingcallback = {}
146 # True is any pending data have been written ever
152 # True is any pending data have been written ever
147 self._anypending = False
153 self._anypending = False
148 # holds callback to call when writing the transaction
154 # holds callback to call when writing the transaction
149 self._finalizecallback = {}
155 self._finalizecallback = {}
150 # hold callback for post transaction close
156 # hold callback for post transaction close
151 self._postclosecallback = {}
157 self._postclosecallback = {}
152 # holds callbacks to call during abort
158 # holds callbacks to call during abort
153 self._abortcallback = {}
159 self._abortcallback = {}
154
160
155 def __del__(self):
161 def __del__(self):
156 if self.journal:
162 if self.journal:
157 self._abort()
163 self._abort()
158
164
159 @active
165 @active
160 def startgroup(self):
166 def startgroup(self):
161 """delay registration of file entry
167 """delay registration of file entry
162
168
163 This is used by strip to delay vision of strip offset. The transaction
169 This is used by strip to delay vision of strip offset. The transaction
164 sees either none or all of the strip actions to be done."""
170 sees either none or all of the strip actions to be done."""
165 self._queue.append([])
171 self._queue.append([])
166
172
167 @active
173 @active
168 def endgroup(self):
174 def endgroup(self):
169 """apply delayed registration of file entry.
175 """apply delayed registration of file entry.
170
176
171 This is used by strip to delay vision of strip offset. The transaction
177 This is used by strip to delay vision of strip offset. The transaction
172 sees either none or all of the strip actions to be done."""
178 sees either none or all of the strip actions to be done."""
173 q = self._queue.pop()
179 q = self._queue.pop()
174 for f, o, data in q:
180 for f, o, data in q:
175 self._addentry(f, o, data)
181 self._addentry(f, o, data)
176
182
177 @active
183 @active
178 def add(self, file, offset, data=None):
184 def add(self, file, offset, data=None):
179 """record the state of an append-only file before update"""
185 """record the state of an append-only file before update"""
180 if file in self.map or file in self._backupmap:
186 if file in self.map or file in self._backupmap:
181 return
187 return
182 if self._queue:
188 if self._queue:
183 self._queue[-1].append((file, offset, data))
189 self._queue[-1].append((file, offset, data))
184 return
190 return
185
191
186 self._addentry(file, offset, data)
192 self._addentry(file, offset, data)
187
193
188 def _addentry(self, file, offset, data):
194 def _addentry(self, file, offset, data):
189 """add a append-only entry to memory and on-disk state"""
195 """add a append-only entry to memory and on-disk state"""
190 if file in self.map or file in self._backupmap:
196 if file in self.map or file in self._backupmap:
191 return
197 return
192 self.entries.append((file, offset, data))
198 self.entries.append((file, offset, data))
193 self.map[file] = len(self.entries) - 1
199 self.map[file] = len(self.entries) - 1
194 # add enough data to the journal to do the truncate
200 # add enough data to the journal to do the truncate
195 self.file.write("%s\0%d\n" % (file, offset))
201 self.file.write("%s\0%d\n" % (file, offset))
196 self.file.flush()
202 self.file.flush()
197
203
198 @active
204 @active
199 def addbackup(self, file, hardlink=True, location=''):
205 def addbackup(self, file, hardlink=True, location=''):
200 """Adds a backup of the file to the transaction
206 """Adds a backup of the file to the transaction
201
207
202 Calling addbackup() creates a hardlink backup of the specified file
208 Calling addbackup() creates a hardlink backup of the specified file
203 that is used to recover the file in the event of the transaction
209 that is used to recover the file in the event of the transaction
204 aborting.
210 aborting.
205
211
206 * `file`: the file path, relative to .hg/store
212 * `file`: the file path, relative to .hg/store
207 * `hardlink`: use a hardlink to quickly create the backup
213 * `hardlink`: use a hardlink to quickly create the backup
208 """
214 """
209 if self._queue:
215 if self._queue:
210 msg = 'cannot use transaction.addbackup inside "group"'
216 msg = 'cannot use transaction.addbackup inside "group"'
211 raise RuntimeError(msg)
217 raise RuntimeError(msg)
212
218
213 if file in self.map or file in self._backupmap:
219 if file in self.map or file in self._backupmap:
214 return
220 return
215 vfs = self._vfsmap[location]
221 vfs = self._vfsmap[location]
216 dirname, filename = vfs.split(file)
222 dirname, filename = vfs.split(file)
217 backupfilename = "%s.backup.%s" % (self.journal, filename)
223 backupfilename = "%s.backup.%s" % (self.journal, filename)
218 backupfile = vfs.reljoin(dirname, backupfilename)
224 backupfile = vfs.reljoin(dirname, backupfilename)
219 if vfs.exists(file):
225 if vfs.exists(file):
220 filepath = vfs.join(file)
226 filepath = vfs.join(file)
221 backuppath = vfs.join(backupfile)
227 backuppath = vfs.join(backupfile)
222 util.copyfile(filepath, backuppath, hardlink=hardlink)
228 util.copyfile(filepath, backuppath, hardlink=hardlink)
223 else:
229 else:
224 backupfile = ''
230 backupfile = ''
225
231
226 self._addbackupentry((location, file, backupfile, False))
232 self._addbackupentry((location, file, backupfile, False))
227
233
228 def _addbackupentry(self, entry):
234 def _addbackupentry(self, entry):
229 """register a new backup entry and write it to disk"""
235 """register a new backup entry and write it to disk"""
230 self._backupentries.append(entry)
236 self._backupentries.append(entry)
231 self._backupmap[entry[1]] = len(self._backupentries) - 1
237 self._backupmap[entry[1]] = len(self._backupentries) - 1
232 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
238 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
233 self._backupsfile.flush()
239 self._backupsfile.flush()
234
240
235 @active
241 @active
236 def registertmp(self, tmpfile, location=''):
242 def registertmp(self, tmpfile, location=''):
237 """register a temporary transaction file
243 """register a temporary transaction file
238
244
239 Such files will be deleted when the transaction exits (on both
245 Such files will be deleted when the transaction exits (on both
240 failure and success).
246 failure and success).
241 """
247 """
242 self._addbackupentry((location, '', tmpfile, False))
248 self._addbackupentry((location, '', tmpfile, False))
243
249
244 @active
250 @active
245 def addfilegenerator(self, genid, filenames, genfunc, order=0,
251 def addfilegenerator(self, genid, filenames, genfunc, order=0,
246 location=''):
252 location=''):
247 """add a function to generates some files at transaction commit
253 """add a function to generates some files at transaction commit
248
254
249 The `genfunc` argument is a function capable of generating proper
255 The `genfunc` argument is a function capable of generating proper
250 content of each entry in the `filename` tuple.
256 content of each entry in the `filename` tuple.
251
257
252 At transaction close time, `genfunc` will be called with one file
258 At transaction close time, `genfunc` will be called with one file
253 object argument per entries in `filenames`.
259 object argument per entries in `filenames`.
254
260
255 The transaction itself is responsible for the backup, creation and
261 The transaction itself is responsible for the backup, creation and
256 final write of such file.
262 final write of such file.
257
263
258 The `genid` argument is used to ensure the same set of file is only
264 The `genid` argument is used to ensure the same set of file is only
259 generated once. Call to `addfilegenerator` for a `genid` already
265 generated once. Call to `addfilegenerator` for a `genid` already
260 present will overwrite the old entry.
266 present will overwrite the old entry.
261
267
262 The `order` argument may be used to control the order in which multiple
268 The `order` argument may be used to control the order in which multiple
263 generator will be executed.
269 generator will be executed.
264
270
265 The `location` arguments may be used to indicate the files are located
271 The `location` arguments may be used to indicate the files are located
266 outside of the the standard directory for transaction. It should match
272 outside of the the standard directory for transaction. It should match
267 one of the key of the `transaction.vfsmap` dictionary.
273 one of the key of the `transaction.vfsmap` dictionary.
268 """
274 """
269 # For now, we are unable to do proper backup and restore of custom vfs
275 # For now, we are unable to do proper backup and restore of custom vfs
270 # but for bookmarks that are handled outside this mechanism.
276 # but for bookmarks that are handled outside this mechanism.
271 self._filegenerators[genid] = (order, filenames, genfunc, location)
277 self._filegenerators[genid] = (order, filenames, genfunc, location)
272
278
273 def _generatefiles(self, suffix=''):
279 def _generatefiles(self, suffix=''):
274 # write files registered for generation
280 # write files registered for generation
275 any = False
281 any = False
276 for entry in sorted(self._filegenerators.values()):
282 for entry in sorted(self._filegenerators.values()):
277 any = True
283 any = True
278 order, filenames, genfunc, location = entry
284 order, filenames, genfunc, location = entry
279 vfs = self._vfsmap[location]
285 vfs = self._vfsmap[location]
280 files = []
286 files = []
281 try:
287 try:
282 for name in filenames:
288 for name in filenames:
283 name += suffix
289 name += suffix
284 if suffix:
290 if suffix:
285 self.registertmp(name, location=location)
291 self.registertmp(name, location=location)
286 else:
292 else:
287 self.addbackup(name, location=location)
293 self.addbackup(name, location=location)
288 files.append(vfs(name, 'w', atomictemp=True))
294 files.append(vfs(name, 'w', atomictemp=True))
289 genfunc(*files)
295 genfunc(*files)
290 finally:
296 finally:
291 for f in files:
297 for f in files:
292 f.close()
298 f.close()
293 return any
299 return any
294
300
295 @active
301 @active
296 def find(self, file):
302 def find(self, file):
297 if file in self.map:
303 if file in self.map:
298 return self.entries[self.map[file]]
304 return self.entries[self.map[file]]
299 if file in self._backupmap:
305 if file in self._backupmap:
300 return self._backupentries[self._backupmap[file]]
306 return self._backupentries[self._backupmap[file]]
301 return None
307 return None
302
308
303 @active
309 @active
304 def replace(self, file, offset, data=None):
310 def replace(self, file, offset, data=None):
305 '''
311 '''
306 replace can only replace already committed entries
312 replace can only replace already committed entries
307 that are not pending in the queue
313 that are not pending in the queue
308 '''
314 '''
309
315
310 if file not in self.map:
316 if file not in self.map:
311 raise KeyError(file)
317 raise KeyError(file)
312 index = self.map[file]
318 index = self.map[file]
313 self.entries[index] = (file, offset, data)
319 self.entries[index] = (file, offset, data)
314 self.file.write("%s\0%d\n" % (file, offset))
320 self.file.write("%s\0%d\n" % (file, offset))
315 self.file.flush()
321 self.file.flush()
316
322
317 @active
323 @active
318 def nest(self):
324 def nest(self):
319 self.count += 1
325 self.count += 1
320 self.usages += 1
326 self.usages += 1
321 return self
327 return self
322
328
323 def release(self):
329 def release(self):
324 if self.count > 0:
330 if self.count > 0:
325 self.usages -= 1
331 self.usages -= 1
326 # if the transaction scopes are left without being closed, fail
332 # if the transaction scopes are left without being closed, fail
327 if self.count > 0 and self.usages == 0:
333 if self.count > 0 and self.usages == 0:
328 self._abort()
334 self._abort()
329
335
330 def running(self):
336 def running(self):
331 return self.count > 0
337 return self.count > 0
332
338
333 def addpending(self, category, callback):
339 def addpending(self, category, callback):
334 """add a callback to be called when the transaction is pending
340 """add a callback to be called when the transaction is pending
335
341
336 The transaction will be given as callback's first argument.
342 The transaction will be given as callback's first argument.
337
343
338 Category is a unique identifier to allow overwriting an old callback
344 Category is a unique identifier to allow overwriting an old callback
339 with a newer callback.
345 with a newer callback.
340 """
346 """
341 self._pendingcallback[category] = callback
347 self._pendingcallback[category] = callback
342
348
343 @active
349 @active
344 def writepending(self):
350 def writepending(self):
345 '''write pending file to temporary version
351 '''write pending file to temporary version
346
352
347 This is used to allow hooks to view a transaction before commit'''
353 This is used to allow hooks to view a transaction before commit'''
348 categories = sorted(self._pendingcallback)
354 categories = sorted(self._pendingcallback)
349 for cat in categories:
355 for cat in categories:
350 # remove callback since the data will have been flushed
356 # remove callback since the data will have been flushed
351 any = self._pendingcallback.pop(cat)(self)
357 any = self._pendingcallback.pop(cat)(self)
352 self._anypending = self._anypending or any
358 self._anypending = self._anypending or any
353 self._anypending |= self._generatefiles(suffix='.pending')
359 self._anypending |= self._generatefiles(suffix='.pending')
354 return self._anypending
360 return self._anypending
355
361
356 @active
362 @active
357 def addfinalize(self, category, callback):
363 def addfinalize(self, category, callback):
358 """add a callback to be called when the transaction is closed
364 """add a callback to be called when the transaction is closed
359
365
360 The transaction will be given as callback's first argument.
366 The transaction will be given as callback's first argument.
361
367
362 Category is a unique identifier to allow overwriting old callbacks with
368 Category is a unique identifier to allow overwriting old callbacks with
363 newer callbacks.
369 newer callbacks.
364 """
370 """
365 self._finalizecallback[category] = callback
371 self._finalizecallback[category] = callback
366
372
367 @active
373 @active
368 def addpostclose(self, category, callback):
374 def addpostclose(self, category, callback):
369 """add a callback to be called after the transaction is closed
375 """add a callback to be called after the transaction is closed
370
376
371 The transaction will be given as callback's first argument.
377 The transaction will be given as callback's first argument.
372
378
373 Category is a unique identifier to allow overwriting an old callback
379 Category is a unique identifier to allow overwriting an old callback
374 with a newer callback.
380 with a newer callback.
375 """
381 """
376 self._postclosecallback[category] = callback
382 self._postclosecallback[category] = callback
377
383
378 @active
384 @active
379 def addabort(self, category, callback):
385 def addabort(self, category, callback):
380 """add a callback to be called when the transaction is aborted.
386 """add a callback to be called when the transaction is aborted.
381
387
382 The transaction will be given as the first argument to the callback.
388 The transaction will be given as the first argument to the callback.
383
389
384 Category is a unique identifier to allow overwriting an old callback
390 Category is a unique identifier to allow overwriting an old callback
385 with a newer callback.
391 with a newer callback.
386 """
392 """
387 self._abortcallback[category] = callback
393 self._abortcallback[category] = callback
388
394
389 @active
395 @active
390 def close(self):
396 def close(self):
391 '''commit the transaction'''
397 '''commit the transaction'''
392 if self.count == 1:
398 if self.count == 1:
393 self.validator(self) # will raise exception if needed
399 self.validator(self) # will raise exception if needed
394 self._generatefiles()
400 self._generatefiles()
395 categories = sorted(self._finalizecallback)
401 categories = sorted(self._finalizecallback)
396 for cat in categories:
402 for cat in categories:
397 self._finalizecallback[cat](self)
403 self._finalizecallback[cat](self)
398
404
399 self.count -= 1
405 self.count -= 1
400 if self.count != 0:
406 if self.count != 0:
401 return
407 return
402 self.file.close()
408 self.file.close()
403 self._backupsfile.close()
409 self._backupsfile.close()
404 # cleanup temporary files
410 # cleanup temporary files
405 for l, f, b, c in self._backupentries:
411 for l, f, b, c in self._backupentries:
406 if l not in self._vfsmap and c:
412 if l not in self._vfsmap and c:
407 self.report("couldn't remote %s: unknown cache location %s\n"
413 self.report("couldn't remote %s: unknown cache location %s\n"
408 % (b, l))
414 % (b, l))
409 continue
415 continue
410 vfs = self._vfsmap[l]
416 vfs = self._vfsmap[l]
411 if not f and b and vfs.exists(b):
417 if not f and b and vfs.exists(b):
412 try:
418 try:
413 vfs.unlink(b)
419 vfs.unlink(b)
414 except (IOError, OSError, util.Abort) as inst:
420 except (IOError, OSError, util.Abort) as inst:
415 if not c:
421 if not c:
416 raise
422 raise
417 # Abort may be raise by read only opener
423 # Abort may be raise by read only opener
418 self.report("couldn't remote %s: %s\n"
424 self.report("couldn't remote %s: %s\n"
419 % (vfs.join(b), inst))
425 % (vfs.join(b), inst))
420 self.entries = []
426 self.entries = []
421 self._writeundo()
427 self._writeundo()
422 if self.after:
428 if self.after:
423 self.after()
429 self.after()
424 if self.opener.isfile(self.journal):
430 if self.opener.isfile(self.journal):
425 self.opener.unlink(self.journal)
431 self.opener.unlink(self.journal)
426 if self.opener.isfile(self._backupjournal):
432 if self.opener.isfile(self._backupjournal):
427 self.opener.unlink(self._backupjournal)
433 self.opener.unlink(self._backupjournal)
428 for l, _f, b, c in self._backupentries:
434 for l, _f, b, c in self._backupentries:
429 if l not in self._vfsmap and c:
435 if l not in self._vfsmap and c:
430 self.report("couldn't remote %s: unknown cache location"
436 self.report("couldn't remote %s: unknown cache location"
431 "%s\n" % (b, l))
437 "%s\n" % (b, l))
432 continue
438 continue
433 vfs = self._vfsmap[l]
439 vfs = self._vfsmap[l]
434 if b and vfs.exists(b):
440 if b and vfs.exists(b):
435 try:
441 try:
436 vfs.unlink(b)
442 vfs.unlink(b)
437 except (IOError, OSError, util.Abort) as inst:
443 except (IOError, OSError, util.Abort) as inst:
438 if not c:
444 if not c:
439 raise
445 raise
440 # Abort may be raise by read only opener
446 # Abort may be raise by read only opener
441 self.report("couldn't remote %s: %s\n"
447 self.report("couldn't remote %s: %s\n"
442 % (vfs.join(b), inst))
448 % (vfs.join(b), inst))
443 self._backupentries = []
449 self._backupentries = []
444 self.journal = None
450 self.journal = None
451
452 self.releasefn(self, True) # notify success of closing transaction
453
445 # run post close action
454 # run post close action
446 categories = sorted(self._postclosecallback)
455 categories = sorted(self._postclosecallback)
447 for cat in categories:
456 for cat in categories:
448 self._postclosecallback[cat](self)
457 self._postclosecallback[cat](self)
449
458
450 @active
459 @active
451 def abort(self):
460 def abort(self):
452 '''abort the transaction (generally called on error, or when the
461 '''abort the transaction (generally called on error, or when the
453 transaction is not explicitly committed before going out of
462 transaction is not explicitly committed before going out of
454 scope)'''
463 scope)'''
455 self._abort()
464 self._abort()
456
465
457 def _writeundo(self):
466 def _writeundo(self):
458 """write transaction data for possible future undo call"""
467 """write transaction data for possible future undo call"""
459 if self.undoname is None:
468 if self.undoname is None:
460 return
469 return
461 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
470 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
462 undobackupfile.write('%d\n' % version)
471 undobackupfile.write('%d\n' % version)
463 for l, f, b, c in self._backupentries:
472 for l, f, b, c in self._backupentries:
464 if not f: # temporary file
473 if not f: # temporary file
465 continue
474 continue
466 if not b:
475 if not b:
467 u = ''
476 u = ''
468 else:
477 else:
469 if l not in self._vfsmap and c:
478 if l not in self._vfsmap and c:
470 self.report("couldn't remote %s: unknown cache location"
479 self.report("couldn't remote %s: unknown cache location"
471 "%s\n" % (b, l))
480 "%s\n" % (b, l))
472 continue
481 continue
473 vfs = self._vfsmap[l]
482 vfs = self._vfsmap[l]
474 base, name = vfs.split(b)
483 base, name = vfs.split(b)
475 assert name.startswith(self.journal), name
484 assert name.startswith(self.journal), name
476 uname = name.replace(self.journal, self.undoname, 1)
485 uname = name.replace(self.journal, self.undoname, 1)
477 u = vfs.reljoin(base, uname)
486 u = vfs.reljoin(base, uname)
478 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
487 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
479 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
488 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
480 undobackupfile.close()
489 undobackupfile.close()
481
490
482
491
483 def _abort(self):
492 def _abort(self):
484 self.count = 0
493 self.count = 0
485 self.usages = 0
494 self.usages = 0
486 self.file.close()
495 self.file.close()
487 self._backupsfile.close()
496 self._backupsfile.close()
488
497
489 try:
498 try:
490 if not self.entries and not self._backupentries:
499 if not self.entries and not self._backupentries:
491 if self.journal:
500 if self.journal:
492 self.opener.unlink(self.journal)
501 self.opener.unlink(self.journal)
493 if self._backupjournal:
502 if self._backupjournal:
494 self.opener.unlink(self._backupjournal)
503 self.opener.unlink(self._backupjournal)
495 return
504 return
496
505
497 self.report(_("transaction abort!\n"))
506 self.report(_("transaction abort!\n"))
498
507
499 try:
508 try:
500 for cat in sorted(self._abortcallback):
509 for cat in sorted(self._abortcallback):
501 self._abortcallback[cat](self)
510 self._abortcallback[cat](self)
502 _playback(self.journal, self.report, self.opener, self._vfsmap,
511 _playback(self.journal, self.report, self.opener, self._vfsmap,
503 self.entries, self._backupentries, False)
512 self.entries, self._backupentries, False)
504 self.report(_("rollback completed\n"))
513 self.report(_("rollback completed\n"))
505 except BaseException:
514 except BaseException:
506 self.report(_("rollback failed - please run hg recover\n"))
515 self.report(_("rollback failed - please run hg recover\n"))
507 finally:
516 finally:
508 self.journal = None
517 self.journal = None
509
518 self.releasefn(self, False) # notify failure of transaction
510
519
511 def rollback(opener, vfsmap, file, report):
520 def rollback(opener, vfsmap, file, report):
512 """Rolls back the transaction contained in the given file
521 """Rolls back the transaction contained in the given file
513
522
514 Reads the entries in the specified file, and the corresponding
523 Reads the entries in the specified file, and the corresponding
515 '*.backupfiles' file, to recover from an incomplete transaction.
524 '*.backupfiles' file, to recover from an incomplete transaction.
516
525
517 * `file`: a file containing a list of entries, specifying where
526 * `file`: a file containing a list of entries, specifying where
518 to truncate each file. The file should contain a list of
527 to truncate each file. The file should contain a list of
519 file\0offset pairs, delimited by newlines. The corresponding
528 file\0offset pairs, delimited by newlines. The corresponding
520 '*.backupfiles' file should contain a list of file\0backupfile
529 '*.backupfiles' file should contain a list of file\0backupfile
521 pairs, delimited by \0.
530 pairs, delimited by \0.
522 """
531 """
523 entries = []
532 entries = []
524 backupentries = []
533 backupentries = []
525
534
526 fp = opener.open(file)
535 fp = opener.open(file)
527 lines = fp.readlines()
536 lines = fp.readlines()
528 fp.close()
537 fp.close()
529 for l in lines:
538 for l in lines:
530 try:
539 try:
531 f, o = l.split('\0')
540 f, o = l.split('\0')
532 entries.append((f, int(o), None))
541 entries.append((f, int(o), None))
533 except ValueError:
542 except ValueError:
534 report(_("couldn't read journal entry %r!\n") % l)
543 report(_("couldn't read journal entry %r!\n") % l)
535
544
536 backupjournal = "%s.backupfiles" % file
545 backupjournal = "%s.backupfiles" % file
537 if opener.exists(backupjournal):
546 if opener.exists(backupjournal):
538 fp = opener.open(backupjournal)
547 fp = opener.open(backupjournal)
539 lines = fp.readlines()
548 lines = fp.readlines()
540 if lines:
549 if lines:
541 ver = lines[0][:-1]
550 ver = lines[0][:-1]
542 if ver == str(version):
551 if ver == str(version):
543 for line in lines[1:]:
552 for line in lines[1:]:
544 if line:
553 if line:
545 # Shave off the trailing newline
554 # Shave off the trailing newline
546 line = line[:-1]
555 line = line[:-1]
547 l, f, b, c = line.split('\0')
556 l, f, b, c = line.split('\0')
548 backupentries.append((l, f, b, bool(c)))
557 backupentries.append((l, f, b, bool(c)))
549 else:
558 else:
550 report(_("journal was created by a different version of "
559 report(_("journal was created by a different version of "
551 "Mercurial\n"))
560 "Mercurial\n"))
552
561
553 _playback(file, report, opener, vfsmap, entries, backupentries)
562 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now