##// END OF EJS Templates
transaction: use absolute_import
Gregory Szorc -
r25986:89049011 default
parent child Browse files
Show More
@@ -1,547 +1,553
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from __future__ import absolute_import
15
15 import errno
16 import errno
16 import error, util
17
18 from .i18n import _
19 from . import (
20 error,
21 util,
22 )
17
23
18 version = 2
24 version = 2
19
25
20 def active(func):
26 def active(func):
21 def _active(self, *args, **kwds):
27 def _active(self, *args, **kwds):
22 if self.count == 0:
28 if self.count == 0:
23 raise error.Abort(_(
29 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
30 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
31 return func(self, *args, **kwds)
26 return _active
32 return _active
27
33
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
34 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 unlink=True):
35 unlink=True):
30 for f, o, _ignore in entries:
36 for f, o, _ignore in entries:
31 if o or not unlink:
37 if o or not unlink:
32 try:
38 try:
33 fp = opener(f, 'a')
39 fp = opener(f, 'a')
34 fp.truncate(o)
40 fp.truncate(o)
35 fp.close()
41 fp.close()
36 except IOError:
42 except IOError:
37 report(_("failed to truncate %s\n") % f)
43 report(_("failed to truncate %s\n") % f)
38 raise
44 raise
39 else:
45 else:
40 try:
46 try:
41 opener.unlink(f)
47 opener.unlink(f)
42 except (IOError, OSError) as inst:
48 except (IOError, OSError) as inst:
43 if inst.errno != errno.ENOENT:
49 if inst.errno != errno.ENOENT:
44 raise
50 raise
45
51
46 backupfiles = []
52 backupfiles = []
47 for l, f, b, c in backupentries:
53 for l, f, b, c in backupentries:
48 if l not in vfsmap and c:
54 if l not in vfsmap and c:
49 report("couldn't handle %s: unknown cache location %s\n"
55 report("couldn't handle %s: unknown cache location %s\n"
50 % (b, l))
56 % (b, l))
51 vfs = vfsmap[l]
57 vfs = vfsmap[l]
52 try:
58 try:
53 if f and b:
59 if f and b:
54 filepath = vfs.join(f)
60 filepath = vfs.join(f)
55 backuppath = vfs.join(b)
61 backuppath = vfs.join(b)
56 try:
62 try:
57 util.copyfile(backuppath, filepath)
63 util.copyfile(backuppath, filepath)
58 backupfiles.append(b)
64 backupfiles.append(b)
59 except IOError:
65 except IOError:
60 report(_("failed to recover %s\n") % f)
66 report(_("failed to recover %s\n") % f)
61 else:
67 else:
62 target = f or b
68 target = f or b
63 try:
69 try:
64 vfs.unlink(target)
70 vfs.unlink(target)
65 except (IOError, OSError) as inst:
71 except (IOError, OSError) as inst:
66 if inst.errno != errno.ENOENT:
72 if inst.errno != errno.ENOENT:
67 raise
73 raise
68 except (IOError, OSError, util.Abort) as inst:
74 except (IOError, OSError, util.Abort) as inst:
69 if not c:
75 if not c:
70 raise
76 raise
71
77
72 opener.unlink(journal)
78 opener.unlink(journal)
73 backuppath = "%s.backupfiles" % journal
79 backuppath = "%s.backupfiles" % journal
74 if opener.exists(backuppath):
80 if opener.exists(backuppath):
75 opener.unlink(backuppath)
81 opener.unlink(backuppath)
76 try:
82 try:
77 for f in backupfiles:
83 for f in backupfiles:
78 if opener.exists(f):
84 if opener.exists(f):
79 opener.unlink(f)
85 opener.unlink(f)
80 except (IOError, OSError, util.Abort) as inst:
86 except (IOError, OSError, util.Abort) as inst:
81 # only pure backup file remains, it is sage to ignore any error
87 # only pure backup file remains, it is sage to ignore any error
82 pass
88 pass
83
89
84 class transaction(object):
90 class transaction(object):
85 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
91 def __init__(self, report, opener, vfsmap, journalname, undoname=None,
86 after=None, createmode=None, validator=None):
92 after=None, createmode=None, validator=None):
87 """Begin a new transaction
93 """Begin a new transaction
88
94
89 Begins a new transaction that allows rolling back writes in the event of
95 Begins a new transaction that allows rolling back writes in the event of
90 an exception.
96 an exception.
91
97
92 * `after`: called after the transaction has been committed
98 * `after`: called after the transaction has been committed
93 * `createmode`: the mode of the journal file that will be created
99 * `createmode`: the mode of the journal file that will be created
94 """
100 """
95 self.count = 1
101 self.count = 1
96 self.usages = 1
102 self.usages = 1
97 self.report = report
103 self.report = report
98 # a vfs to the store content
104 # a vfs to the store content
99 self.opener = opener
105 self.opener = opener
100 # a map to access file in various {location -> vfs}
106 # a map to access file in various {location -> vfs}
101 vfsmap = vfsmap.copy()
107 vfsmap = vfsmap.copy()
102 vfsmap[''] = opener # set default value
108 vfsmap[''] = opener # set default value
103 self._vfsmap = vfsmap
109 self._vfsmap = vfsmap
104 self.after = after
110 self.after = after
105 self.entries = []
111 self.entries = []
106 self.map = {}
112 self.map = {}
107 self.journal = journalname
113 self.journal = journalname
108 self.undoname = undoname
114 self.undoname = undoname
109 self._queue = []
115 self._queue = []
110 # A callback to validate transaction content before closing it.
116 # A callback to validate transaction content before closing it.
111 # should raise exception is anything is wrong.
117 # should raise exception is anything is wrong.
112 # target user is repository hooks.
118 # target user is repository hooks.
113 if validator is None:
119 if validator is None:
114 validator = lambda tr: None
120 validator = lambda tr: None
115 self.validator = validator
121 self.validator = validator
116 # a dict of arguments to be passed to hooks
122 # a dict of arguments to be passed to hooks
117 self.hookargs = {}
123 self.hookargs = {}
118 self.file = opener.open(self.journal, "w")
124 self.file = opener.open(self.journal, "w")
119
125
120 # a list of ('location', 'path', 'backuppath', cache) entries.
126 # a list of ('location', 'path', 'backuppath', cache) entries.
121 # - if 'backuppath' is empty, no file existed at backup time
127 # - if 'backuppath' is empty, no file existed at backup time
122 # - if 'path' is empty, this is a temporary transaction file
128 # - if 'path' is empty, this is a temporary transaction file
123 # - if 'location' is not empty, the path is outside main opener reach.
129 # - if 'location' is not empty, the path is outside main opener reach.
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
130 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 # (cache is currently unused)
131 # (cache is currently unused)
126 self._backupentries = []
132 self._backupentries = []
127 self._backupmap = {}
133 self._backupmap = {}
128 self._backupjournal = "%s.backupfiles" % self.journal
134 self._backupjournal = "%s.backupfiles" % self.journal
129 self._backupsfile = opener.open(self._backupjournal, 'w')
135 self._backupsfile = opener.open(self._backupjournal, 'w')
130 self._backupsfile.write('%d\n' % version)
136 self._backupsfile.write('%d\n' % version)
131
137
132 if createmode is not None:
138 if createmode is not None:
133 opener.chmod(self.journal, createmode & 0o666)
139 opener.chmod(self.journal, createmode & 0o666)
134 opener.chmod(self._backupjournal, createmode & 0o666)
140 opener.chmod(self._backupjournal, createmode & 0o666)
135
141
136 # hold file generations to be performed on commit
142 # hold file generations to be performed on commit
137 self._filegenerators = {}
143 self._filegenerators = {}
138 # hold callback to write pending data for hooks
144 # hold callback to write pending data for hooks
139 self._pendingcallback = {}
145 self._pendingcallback = {}
140 # True is any pending data have been written ever
146 # True is any pending data have been written ever
141 self._anypending = False
147 self._anypending = False
142 # holds callback to call when writing the transaction
148 # holds callback to call when writing the transaction
143 self._finalizecallback = {}
149 self._finalizecallback = {}
144 # hold callback for post transaction close
150 # hold callback for post transaction close
145 self._postclosecallback = {}
151 self._postclosecallback = {}
146 # holds callbacks to call during abort
152 # holds callbacks to call during abort
147 self._abortcallback = {}
153 self._abortcallback = {}
148
154
149 def __del__(self):
155 def __del__(self):
150 if self.journal:
156 if self.journal:
151 self._abort()
157 self._abort()
152
158
153 @active
159 @active
154 def startgroup(self):
160 def startgroup(self):
155 """delay registration of file entry
161 """delay registration of file entry
156
162
157 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
158 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
159 self._queue.append([])
165 self._queue.append([])
160
166
161 @active
167 @active
162 def endgroup(self):
168 def endgroup(self):
163 """apply delayed registration of file entry.
169 """apply delayed registration of file entry.
164
170
165 This is used by strip to delay vision of strip offset. The transaction
171 This is used by strip to delay vision of strip offset. The transaction
166 sees either none or all of the strip actions to be done."""
172 sees either none or all of the strip actions to be done."""
167 q = self._queue.pop()
173 q = self._queue.pop()
168 for f, o, data in q:
174 for f, o, data in q:
169 self._addentry(f, o, data)
175 self._addentry(f, o, data)
170
176
171 @active
177 @active
172 def add(self, file, offset, data=None):
178 def add(self, file, offset, data=None):
173 """record the state of an append-only file before update"""
179 """record the state of an append-only file before update"""
174 if file in self.map or file in self._backupmap:
180 if file in self.map or file in self._backupmap:
175 return
181 return
176 if self._queue:
182 if self._queue:
177 self._queue[-1].append((file, offset, data))
183 self._queue[-1].append((file, offset, data))
178 return
184 return
179
185
180 self._addentry(file, offset, data)
186 self._addentry(file, offset, data)
181
187
182 def _addentry(self, file, offset, data):
188 def _addentry(self, file, offset, data):
183 """add a append-only entry to memory and on-disk state"""
189 """add a append-only entry to memory and on-disk state"""
184 if file in self.map or file in self._backupmap:
190 if file in self.map or file in self._backupmap:
185 return
191 return
186 self.entries.append((file, offset, data))
192 self.entries.append((file, offset, data))
187 self.map[file] = len(self.entries) - 1
193 self.map[file] = len(self.entries) - 1
188 # add enough data to the journal to do the truncate
194 # add enough data to the journal to do the truncate
189 self.file.write("%s\0%d\n" % (file, offset))
195 self.file.write("%s\0%d\n" % (file, offset))
190 self.file.flush()
196 self.file.flush()
191
197
192 @active
198 @active
193 def addbackup(self, file, hardlink=True, location=''):
199 def addbackup(self, file, hardlink=True, location=''):
194 """Adds a backup of the file to the transaction
200 """Adds a backup of the file to the transaction
195
201
196 Calling addbackup() creates a hardlink backup of the specified file
202 Calling addbackup() creates a hardlink backup of the specified file
197 that is used to recover the file in the event of the transaction
203 that is used to recover the file in the event of the transaction
198 aborting.
204 aborting.
199
205
200 * `file`: the file path, relative to .hg/store
206 * `file`: the file path, relative to .hg/store
201 * `hardlink`: use a hardlink to quickly create the backup
207 * `hardlink`: use a hardlink to quickly create the backup
202 """
208 """
203 if self._queue:
209 if self._queue:
204 msg = 'cannot use transaction.addbackup inside "group"'
210 msg = 'cannot use transaction.addbackup inside "group"'
205 raise RuntimeError(msg)
211 raise RuntimeError(msg)
206
212
207 if file in self.map or file in self._backupmap:
213 if file in self.map or file in self._backupmap:
208 return
214 return
209 vfs = self._vfsmap[location]
215 vfs = self._vfsmap[location]
210 dirname, filename = vfs.split(file)
216 dirname, filename = vfs.split(file)
211 backupfilename = "%s.backup.%s" % (self.journal, filename)
217 backupfilename = "%s.backup.%s" % (self.journal, filename)
212 backupfile = vfs.reljoin(dirname, backupfilename)
218 backupfile = vfs.reljoin(dirname, backupfilename)
213 if vfs.exists(file):
219 if vfs.exists(file):
214 filepath = vfs.join(file)
220 filepath = vfs.join(file)
215 backuppath = vfs.join(backupfile)
221 backuppath = vfs.join(backupfile)
216 util.copyfile(filepath, backuppath, hardlink=hardlink)
222 util.copyfile(filepath, backuppath, hardlink=hardlink)
217 else:
223 else:
218 backupfile = ''
224 backupfile = ''
219
225
220 self._addbackupentry((location, file, backupfile, False))
226 self._addbackupentry((location, file, backupfile, False))
221
227
222 def _addbackupentry(self, entry):
228 def _addbackupentry(self, entry):
223 """register a new backup entry and write it to disk"""
229 """register a new backup entry and write it to disk"""
224 self._backupentries.append(entry)
230 self._backupentries.append(entry)
225 self._backupmap[entry[1]] = len(self._backupentries) - 1
231 self._backupmap[entry[1]] = len(self._backupentries) - 1
226 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
232 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
227 self._backupsfile.flush()
233 self._backupsfile.flush()
228
234
229 @active
235 @active
230 def registertmp(self, tmpfile, location=''):
236 def registertmp(self, tmpfile, location=''):
231 """register a temporary transaction file
237 """register a temporary transaction file
232
238
233 Such files will be deleted when the transaction exits (on both
239 Such files will be deleted when the transaction exits (on both
234 failure and success).
240 failure and success).
235 """
241 """
236 self._addbackupentry((location, '', tmpfile, False))
242 self._addbackupentry((location, '', tmpfile, False))
237
243
238 @active
244 @active
239 def addfilegenerator(self, genid, filenames, genfunc, order=0,
245 def addfilegenerator(self, genid, filenames, genfunc, order=0,
240 location=''):
246 location=''):
241 """add a function to generates some files at transaction commit
247 """add a function to generates some files at transaction commit
242
248
243 The `genfunc` argument is a function capable of generating proper
249 The `genfunc` argument is a function capable of generating proper
244 content of each entry in the `filename` tuple.
250 content of each entry in the `filename` tuple.
245
251
246 At transaction close time, `genfunc` will be called with one file
252 At transaction close time, `genfunc` will be called with one file
247 object argument per entries in `filenames`.
253 object argument per entries in `filenames`.
248
254
249 The transaction itself is responsible for the backup, creation and
255 The transaction itself is responsible for the backup, creation and
250 final write of such file.
256 final write of such file.
251
257
252 The `genid` argument is used to ensure the same set of file is only
258 The `genid` argument is used to ensure the same set of file is only
253 generated once. Call to `addfilegenerator` for a `genid` already
259 generated once. Call to `addfilegenerator` for a `genid` already
254 present will overwrite the old entry.
260 present will overwrite the old entry.
255
261
256 The `order` argument may be used to control the order in which multiple
262 The `order` argument may be used to control the order in which multiple
257 generator will be executed.
263 generator will be executed.
258
264
259 The `location` arguments may be used to indicate the files are located
265 The `location` arguments may be used to indicate the files are located
260 outside of the the standard directory for transaction. It should match
266 outside of the the standard directory for transaction. It should match
261 one of the key of the `transaction.vfsmap` dictionary.
267 one of the key of the `transaction.vfsmap` dictionary.
262 """
268 """
263 # For now, we are unable to do proper backup and restore of custom vfs
269 # For now, we are unable to do proper backup and restore of custom vfs
264 # but for bookmarks that are handled outside this mechanism.
270 # but for bookmarks that are handled outside this mechanism.
265 self._filegenerators[genid] = (order, filenames, genfunc, location)
271 self._filegenerators[genid] = (order, filenames, genfunc, location)
266
272
267 def _generatefiles(self, suffix=''):
273 def _generatefiles(self, suffix=''):
268 # write files registered for generation
274 # write files registered for generation
269 any = False
275 any = False
270 for entry in sorted(self._filegenerators.values()):
276 for entry in sorted(self._filegenerators.values()):
271 any = True
277 any = True
272 order, filenames, genfunc, location = entry
278 order, filenames, genfunc, location = entry
273 vfs = self._vfsmap[location]
279 vfs = self._vfsmap[location]
274 files = []
280 files = []
275 try:
281 try:
276 for name in filenames:
282 for name in filenames:
277 name += suffix
283 name += suffix
278 if suffix:
284 if suffix:
279 self.registertmp(name, location=location)
285 self.registertmp(name, location=location)
280 else:
286 else:
281 self.addbackup(name, location=location)
287 self.addbackup(name, location=location)
282 files.append(vfs(name, 'w', atomictemp=True))
288 files.append(vfs(name, 'w', atomictemp=True))
283 genfunc(*files)
289 genfunc(*files)
284 finally:
290 finally:
285 for f in files:
291 for f in files:
286 f.close()
292 f.close()
287 return any
293 return any
288
294
289 @active
295 @active
290 def find(self, file):
296 def find(self, file):
291 if file in self.map:
297 if file in self.map:
292 return self.entries[self.map[file]]
298 return self.entries[self.map[file]]
293 if file in self._backupmap:
299 if file in self._backupmap:
294 return self._backupentries[self._backupmap[file]]
300 return self._backupentries[self._backupmap[file]]
295 return None
301 return None
296
302
297 @active
303 @active
298 def replace(self, file, offset, data=None):
304 def replace(self, file, offset, data=None):
299 '''
305 '''
300 replace can only replace already committed entries
306 replace can only replace already committed entries
301 that are not pending in the queue
307 that are not pending in the queue
302 '''
308 '''
303
309
304 if file not in self.map:
310 if file not in self.map:
305 raise KeyError(file)
311 raise KeyError(file)
306 index = self.map[file]
312 index = self.map[file]
307 self.entries[index] = (file, offset, data)
313 self.entries[index] = (file, offset, data)
308 self.file.write("%s\0%d\n" % (file, offset))
314 self.file.write("%s\0%d\n" % (file, offset))
309 self.file.flush()
315 self.file.flush()
310
316
311 @active
317 @active
312 def nest(self):
318 def nest(self):
313 self.count += 1
319 self.count += 1
314 self.usages += 1
320 self.usages += 1
315 return self
321 return self
316
322
317 def release(self):
323 def release(self):
318 if self.count > 0:
324 if self.count > 0:
319 self.usages -= 1
325 self.usages -= 1
320 # if the transaction scopes are left without being closed, fail
326 # if the transaction scopes are left without being closed, fail
321 if self.count > 0 and self.usages == 0:
327 if self.count > 0 and self.usages == 0:
322 self._abort()
328 self._abort()
323
329
324 def running(self):
330 def running(self):
325 return self.count > 0
331 return self.count > 0
326
332
327 def addpending(self, category, callback):
333 def addpending(self, category, callback):
328 """add a callback to be called when the transaction is pending
334 """add a callback to be called when the transaction is pending
329
335
330 The transaction will be given as callback's first argument.
336 The transaction will be given as callback's first argument.
331
337
332 Category is a unique identifier to allow overwriting an old callback
338 Category is a unique identifier to allow overwriting an old callback
333 with a newer callback.
339 with a newer callback.
334 """
340 """
335 self._pendingcallback[category] = callback
341 self._pendingcallback[category] = callback
336
342
337 @active
343 @active
338 def writepending(self):
344 def writepending(self):
339 '''write pending file to temporary version
345 '''write pending file to temporary version
340
346
341 This is used to allow hooks to view a transaction before commit'''
347 This is used to allow hooks to view a transaction before commit'''
342 categories = sorted(self._pendingcallback)
348 categories = sorted(self._pendingcallback)
343 for cat in categories:
349 for cat in categories:
344 # remove callback since the data will have been flushed
350 # remove callback since the data will have been flushed
345 any = self._pendingcallback.pop(cat)(self)
351 any = self._pendingcallback.pop(cat)(self)
346 self._anypending = self._anypending or any
352 self._anypending = self._anypending or any
347 self._anypending |= self._generatefiles(suffix='.pending')
353 self._anypending |= self._generatefiles(suffix='.pending')
348 return self._anypending
354 return self._anypending
349
355
350 @active
356 @active
351 def addfinalize(self, category, callback):
357 def addfinalize(self, category, callback):
352 """add a callback to be called when the transaction is closed
358 """add a callback to be called when the transaction is closed
353
359
354 The transaction will be given as callback's first argument.
360 The transaction will be given as callback's first argument.
355
361
356 Category is a unique identifier to allow overwriting old callbacks with
362 Category is a unique identifier to allow overwriting old callbacks with
357 newer callbacks.
363 newer callbacks.
358 """
364 """
359 self._finalizecallback[category] = callback
365 self._finalizecallback[category] = callback
360
366
361 @active
367 @active
362 def addpostclose(self, category, callback):
368 def addpostclose(self, category, callback):
363 """add a callback to be called after the transaction is closed
369 """add a callback to be called after the transaction is closed
364
370
365 The transaction will be given as callback's first argument.
371 The transaction will be given as callback's first argument.
366
372
367 Category is a unique identifier to allow overwriting an old callback
373 Category is a unique identifier to allow overwriting an old callback
368 with a newer callback.
374 with a newer callback.
369 """
375 """
370 self._postclosecallback[category] = callback
376 self._postclosecallback[category] = callback
371
377
372 @active
378 @active
373 def addabort(self, category, callback):
379 def addabort(self, category, callback):
374 """add a callback to be called when the transaction is aborted.
380 """add a callback to be called when the transaction is aborted.
375
381
376 The transaction will be given as the first argument to the callback.
382 The transaction will be given as the first argument to the callback.
377
383
378 Category is a unique identifier to allow overwriting an old callback
384 Category is a unique identifier to allow overwriting an old callback
379 with a newer callback.
385 with a newer callback.
380 """
386 """
381 self._abortcallback[category] = callback
387 self._abortcallback[category] = callback
382
388
383 @active
389 @active
384 def close(self):
390 def close(self):
385 '''commit the transaction'''
391 '''commit the transaction'''
386 if self.count == 1:
392 if self.count == 1:
387 self.validator(self) # will raise exception if needed
393 self.validator(self) # will raise exception if needed
388 self._generatefiles()
394 self._generatefiles()
389 categories = sorted(self._finalizecallback)
395 categories = sorted(self._finalizecallback)
390 for cat in categories:
396 for cat in categories:
391 self._finalizecallback[cat](self)
397 self._finalizecallback[cat](self)
392
398
393 self.count -= 1
399 self.count -= 1
394 if self.count != 0:
400 if self.count != 0:
395 return
401 return
396 self.file.close()
402 self.file.close()
397 self._backupsfile.close()
403 self._backupsfile.close()
398 # cleanup temporary files
404 # cleanup temporary files
399 for l, f, b, c in self._backupentries:
405 for l, f, b, c in self._backupentries:
400 if l not in self._vfsmap and c:
406 if l not in self._vfsmap and c:
401 self.report("couldn't remote %s: unknown cache location %s\n"
407 self.report("couldn't remote %s: unknown cache location %s\n"
402 % (b, l))
408 % (b, l))
403 continue
409 continue
404 vfs = self._vfsmap[l]
410 vfs = self._vfsmap[l]
405 if not f and b and vfs.exists(b):
411 if not f and b and vfs.exists(b):
406 try:
412 try:
407 vfs.unlink(b)
413 vfs.unlink(b)
408 except (IOError, OSError, util.Abort) as inst:
414 except (IOError, OSError, util.Abort) as inst:
409 if not c:
415 if not c:
410 raise
416 raise
411 # Abort may be raise by read only opener
417 # Abort may be raise by read only opener
412 self.report("couldn't remote %s: %s\n"
418 self.report("couldn't remote %s: %s\n"
413 % (vfs.join(b), inst))
419 % (vfs.join(b), inst))
414 self.entries = []
420 self.entries = []
415 self._writeundo()
421 self._writeundo()
416 if self.after:
422 if self.after:
417 self.after()
423 self.after()
418 if self.opener.isfile(self.journal):
424 if self.opener.isfile(self.journal):
419 self.opener.unlink(self.journal)
425 self.opener.unlink(self.journal)
420 if self.opener.isfile(self._backupjournal):
426 if self.opener.isfile(self._backupjournal):
421 self.opener.unlink(self._backupjournal)
427 self.opener.unlink(self._backupjournal)
422 for l, _f, b, c in self._backupentries:
428 for l, _f, b, c in self._backupentries:
423 if l not in self._vfsmap and c:
429 if l not in self._vfsmap and c:
424 self.report("couldn't remote %s: unknown cache location"
430 self.report("couldn't remote %s: unknown cache location"
425 "%s\n" % (b, l))
431 "%s\n" % (b, l))
426 continue
432 continue
427 vfs = self._vfsmap[l]
433 vfs = self._vfsmap[l]
428 if b and vfs.exists(b):
434 if b and vfs.exists(b):
429 try:
435 try:
430 vfs.unlink(b)
436 vfs.unlink(b)
431 except (IOError, OSError, util.Abort) as inst:
437 except (IOError, OSError, util.Abort) as inst:
432 if not c:
438 if not c:
433 raise
439 raise
434 # Abort may be raise by read only opener
440 # Abort may be raise by read only opener
435 self.report("couldn't remote %s: %s\n"
441 self.report("couldn't remote %s: %s\n"
436 % (vfs.join(b), inst))
442 % (vfs.join(b), inst))
437 self._backupentries = []
443 self._backupentries = []
438 self.journal = None
444 self.journal = None
439 # run post close action
445 # run post close action
440 categories = sorted(self._postclosecallback)
446 categories = sorted(self._postclosecallback)
441 for cat in categories:
447 for cat in categories:
442 self._postclosecallback[cat](self)
448 self._postclosecallback[cat](self)
443
449
444 @active
450 @active
445 def abort(self):
451 def abort(self):
446 '''abort the transaction (generally called on error, or when the
452 '''abort the transaction (generally called on error, or when the
447 transaction is not explicitly committed before going out of
453 transaction is not explicitly committed before going out of
448 scope)'''
454 scope)'''
449 self._abort()
455 self._abort()
450
456
451 def _writeundo(self):
457 def _writeundo(self):
452 """write transaction data for possible future undo call"""
458 """write transaction data for possible future undo call"""
453 if self.undoname is None:
459 if self.undoname is None:
454 return
460 return
455 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
461 undobackupfile = self.opener.open("%s.backupfiles" % self.undoname, 'w')
456 undobackupfile.write('%d\n' % version)
462 undobackupfile.write('%d\n' % version)
457 for l, f, b, c in self._backupentries:
463 for l, f, b, c in self._backupentries:
458 if not f: # temporary file
464 if not f: # temporary file
459 continue
465 continue
460 if not b:
466 if not b:
461 u = ''
467 u = ''
462 else:
468 else:
463 if l not in self._vfsmap and c:
469 if l not in self._vfsmap and c:
464 self.report("couldn't remote %s: unknown cache location"
470 self.report("couldn't remote %s: unknown cache location"
465 "%s\n" % (b, l))
471 "%s\n" % (b, l))
466 continue
472 continue
467 vfs = self._vfsmap[l]
473 vfs = self._vfsmap[l]
468 base, name = vfs.split(b)
474 base, name = vfs.split(b)
469 assert name.startswith(self.journal), name
475 assert name.startswith(self.journal), name
470 uname = name.replace(self.journal, self.undoname, 1)
476 uname = name.replace(self.journal, self.undoname, 1)
471 u = vfs.reljoin(base, uname)
477 u = vfs.reljoin(base, uname)
472 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
478 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
473 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
479 undobackupfile.write("%s\0%s\0%s\0%d\n" % (l, f, u, c))
474 undobackupfile.close()
480 undobackupfile.close()
475
481
476
482
477 def _abort(self):
483 def _abort(self):
478 self.count = 0
484 self.count = 0
479 self.usages = 0
485 self.usages = 0
480 self.file.close()
486 self.file.close()
481 self._backupsfile.close()
487 self._backupsfile.close()
482
488
483 try:
489 try:
484 if not self.entries and not self._backupentries:
490 if not self.entries and not self._backupentries:
485 if self.journal:
491 if self.journal:
486 self.opener.unlink(self.journal)
492 self.opener.unlink(self.journal)
487 if self._backupjournal:
493 if self._backupjournal:
488 self.opener.unlink(self._backupjournal)
494 self.opener.unlink(self._backupjournal)
489 return
495 return
490
496
491 self.report(_("transaction abort!\n"))
497 self.report(_("transaction abort!\n"))
492
498
493 try:
499 try:
494 for cat in sorted(self._abortcallback):
500 for cat in sorted(self._abortcallback):
495 self._abortcallback[cat](self)
501 self._abortcallback[cat](self)
496 _playback(self.journal, self.report, self.opener, self._vfsmap,
502 _playback(self.journal, self.report, self.opener, self._vfsmap,
497 self.entries, self._backupentries, False)
503 self.entries, self._backupentries, False)
498 self.report(_("rollback completed\n"))
504 self.report(_("rollback completed\n"))
499 except BaseException:
505 except BaseException:
500 self.report(_("rollback failed - please run hg recover\n"))
506 self.report(_("rollback failed - please run hg recover\n"))
501 finally:
507 finally:
502 self.journal = None
508 self.journal = None
503
509
504
510
505 def rollback(opener, vfsmap, file, report):
511 def rollback(opener, vfsmap, file, report):
506 """Rolls back the transaction contained in the given file
512 """Rolls back the transaction contained in the given file
507
513
508 Reads the entries in the specified file, and the corresponding
514 Reads the entries in the specified file, and the corresponding
509 '*.backupfiles' file, to recover from an incomplete transaction.
515 '*.backupfiles' file, to recover from an incomplete transaction.
510
516
511 * `file`: a file containing a list of entries, specifying where
517 * `file`: a file containing a list of entries, specifying where
512 to truncate each file. The file should contain a list of
518 to truncate each file. The file should contain a list of
513 file\0offset pairs, delimited by newlines. The corresponding
519 file\0offset pairs, delimited by newlines. The corresponding
514 '*.backupfiles' file should contain a list of file\0backupfile
520 '*.backupfiles' file should contain a list of file\0backupfile
515 pairs, delimited by \0.
521 pairs, delimited by \0.
516 """
522 """
517 entries = []
523 entries = []
518 backupentries = []
524 backupentries = []
519
525
520 fp = opener.open(file)
526 fp = opener.open(file)
521 lines = fp.readlines()
527 lines = fp.readlines()
522 fp.close()
528 fp.close()
523 for l in lines:
529 for l in lines:
524 try:
530 try:
525 f, o = l.split('\0')
531 f, o = l.split('\0')
526 entries.append((f, int(o), None))
532 entries.append((f, int(o), None))
527 except ValueError:
533 except ValueError:
528 report(_("couldn't read journal entry %r!\n") % l)
534 report(_("couldn't read journal entry %r!\n") % l)
529
535
530 backupjournal = "%s.backupfiles" % file
536 backupjournal = "%s.backupfiles" % file
531 if opener.exists(backupjournal):
537 if opener.exists(backupjournal):
532 fp = opener.open(backupjournal)
538 fp = opener.open(backupjournal)
533 lines = fp.readlines()
539 lines = fp.readlines()
534 if lines:
540 if lines:
535 ver = lines[0][:-1]
541 ver = lines[0][:-1]
536 if ver == str(version):
542 if ver == str(version):
537 for line in lines[1:]:
543 for line in lines[1:]:
538 if line:
544 if line:
539 # Shave off the trailing newline
545 # Shave off the trailing newline
540 line = line[:-1]
546 line = line[:-1]
541 l, f, b, c = line.split('\0')
547 l, f, b, c = line.split('\0')
542 backupentries.append((l, f, b, bool(c)))
548 backupentries.append((l, f, b, bool(c)))
543 else:
549 else:
544 report(_("journal was created by a different version of "
550 report(_("journal was created by a different version of "
545 "Mercurial\n"))
551 "Mercurial\n"))
546
552
547 _playback(file, report, opener, vfsmap, entries, backupentries)
553 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now