##// END OF EJS Templates
transaction: remove the 'onabort' mechanism...
Pierre-Yves David -
r23513:4c7ea2d9 default
parent child Browse files
Show More
@@ -1,504 +1,498 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16 import errno
16 import errno
17 import error, util
17 import error, util
18
18
19 version = 2
19 version = 2
20
20
21 def active(func):
21 def active(func):
22 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
23 if self.count == 0:
23 if self.count == 0:
24 raise error.Abort(_(
24 raise error.Abort(_(
25 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
26 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
27 return _active
27 return _active
28
28
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 unlink=True):
30 unlink=True):
31 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
32 if o or not unlink:
32 if o or not unlink:
33 try:
33 try:
34 fp = opener(f, 'a')
34 fp = opener(f, 'a')
35 fp.truncate(o)
35 fp.truncate(o)
36 fp.close()
36 fp.close()
37 except IOError:
37 except IOError:
38 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
39 raise
39 raise
40 else:
40 else:
41 try:
41 try:
42 opener.unlink(f)
42 opener.unlink(f)
43 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
44 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
45 raise
45 raise
46
46
47 backupfiles = []
47 backupfiles = []
48 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
49 if l not in vfsmap and c:
49 if l not in vfsmap and c:
50 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
51 % (b, l))
51 % (b, l))
52 vfs = vfsmap[l]
52 vfs = vfsmap[l]
53 try:
53 try:
54 if f and b:
54 if f and b:
55 filepath = vfs.join(f)
55 filepath = vfs.join(f)
56 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
57 try:
57 try:
58 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
59 backupfiles.append(b)
59 backupfiles.append(b)
60 except IOError:
60 except IOError:
61 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
62 else:
62 else:
63 target = f or b
63 target = f or b
64 try:
64 try:
65 vfs.unlink(target)
65 vfs.unlink(target)
66 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
70 if not c:
70 if not c:
71 raise
71 raise
72
72
73 opener.unlink(journal)
73 opener.unlink(journal)
74 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
75 if opener.exists(backuppath):
75 if opener.exists(backuppath):
76 opener.unlink(backuppath)
76 opener.unlink(backuppath)
77 try:
77 try:
78 for f in backupfiles:
78 for f in backupfiles:
79 if opener.exists(f):
79 if opener.exists(f):
80 opener.unlink(f)
80 opener.unlink(f)
81 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
82 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
83 pass
83 pass
84
84
85 class transaction(object):
85 class transaction(object):
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 createmode=None, onabort=None):
87 createmode=None):
88 """Begin a new transaction
88 """Begin a new transaction
89
89
90 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
91 an exception.
91 an exception.
92
92
93 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
94 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
95 * `onabort`: called as the transaction is aborting, but before any files
96 have been truncated
97 """
95 """
98 self.count = 1
96 self.count = 1
99 self.usages = 1
97 self.usages = 1
100 self.report = report
98 self.report = report
101 # a vfs to the store content
99 # a vfs to the store content
102 self.opener = opener
100 self.opener = opener
103 # a map to access file in various {location -> vfs}
101 # a map to access file in various {location -> vfs}
104 vfsmap = vfsmap.copy()
102 vfsmap = vfsmap.copy()
105 vfsmap[''] = opener # set default value
103 vfsmap[''] = opener # set default value
106 self._vfsmap = vfsmap
104 self._vfsmap = vfsmap
107 self.after = after
105 self.after = after
108 self.onabort = onabort
109 self.entries = []
106 self.entries = []
110 self.map = {}
107 self.map = {}
111 self.journal = journal
108 self.journal = journal
112 self._queue = []
109 self._queue = []
113 # a dict of arguments to be passed to hooks
110 # a dict of arguments to be passed to hooks
114 self.hookargs = {}
111 self.hookargs = {}
115 self.file = opener.open(self.journal, "w")
112 self.file = opener.open(self.journal, "w")
116
113
117 # a list of ('location', 'path', 'backuppath', cache) entries.
114 # a list of ('location', 'path', 'backuppath', cache) entries.
118 # - if 'backuppath' is empty, no file existed at backup time
115 # - if 'backuppath' is empty, no file existed at backup time
119 # - if 'path' is empty, this is a temporary transaction file
116 # - if 'path' is empty, this is a temporary transaction file
120 # - if 'location' is not empty, the path is outside main opener reach.
117 # - if 'location' is not empty, the path is outside main opener reach.
121 # use 'location' value as a key in a vfsmap to find the right 'vfs'
118 # use 'location' value as a key in a vfsmap to find the right 'vfs'
122 # (cache is currently unused)
119 # (cache is currently unused)
123 self._backupentries = []
120 self._backupentries = []
124 self._backupmap = {}
121 self._backupmap = {}
125 self._backupjournal = "%s.backupfiles" % journal
122 self._backupjournal = "%s.backupfiles" % journal
126 self._backupsfile = opener.open(self._backupjournal, 'w')
123 self._backupsfile = opener.open(self._backupjournal, 'w')
127 self._backupsfile.write('%d\n' % version)
124 self._backupsfile.write('%d\n' % version)
128
125
129 if createmode is not None:
126 if createmode is not None:
130 opener.chmod(self.journal, createmode & 0666)
127 opener.chmod(self.journal, createmode & 0666)
131 opener.chmod(self._backupjournal, createmode & 0666)
128 opener.chmod(self._backupjournal, createmode & 0666)
132
129
133 # hold file generations to be performed on commit
130 # hold file generations to be performed on commit
134 self._filegenerators = {}
131 self._filegenerators = {}
135 # hold callbalk to write pending data for hooks
132 # hold callbalk to write pending data for hooks
136 self._pendingcallback = {}
133 self._pendingcallback = {}
137 # True is any pending data have been written ever
134 # True is any pending data have been written ever
138 self._anypending = False
135 self._anypending = False
139 # holds callback to call when writing the transaction
136 # holds callback to call when writing the transaction
140 self._finalizecallback = {}
137 self._finalizecallback = {}
141 # hold callbalk for post transaction close
138 # hold callbalk for post transaction close
142 self._postclosecallback = {}
139 self._postclosecallback = {}
143
140
144 def __del__(self):
141 def __del__(self):
145 if self.journal:
142 if self.journal:
146 self._abort()
143 self._abort()
147
144
148 @active
145 @active
149 def startgroup(self):
146 def startgroup(self):
150 """delay registration of file entry
147 """delay registration of file entry
151
148
152 This is used by strip to delay vision of strip offset. The transaction
149 This is used by strip to delay vision of strip offset. The transaction
153 sees either none or all of the strip actions to be done."""
150 sees either none or all of the strip actions to be done."""
154 self._queue.append([])
151 self._queue.append([])
155
152
156 @active
153 @active
157 def endgroup(self):
154 def endgroup(self):
158 """apply delayed registration of file entry.
155 """apply delayed registration of file entry.
159
156
160 This is used by strip to delay vision of strip offset. The transaction
157 This is used by strip to delay vision of strip offset. The transaction
161 sees either none or all of the strip actions to be done."""
158 sees either none or all of the strip actions to be done."""
162 q = self._queue.pop()
159 q = self._queue.pop()
163 for f, o, data in q:
160 for f, o, data in q:
164 self._addentry(f, o, data)
161 self._addentry(f, o, data)
165
162
166 @active
163 @active
167 def add(self, file, offset, data=None):
164 def add(self, file, offset, data=None):
168 """record the state of an append-only file before update"""
165 """record the state of an append-only file before update"""
169 if file in self.map or file in self._backupmap:
166 if file in self.map or file in self._backupmap:
170 return
167 return
171 if self._queue:
168 if self._queue:
172 self._queue[-1].append((file, offset, data))
169 self._queue[-1].append((file, offset, data))
173 return
170 return
174
171
175 self._addentry(file, offset, data)
172 self._addentry(file, offset, data)
176
173
177 def _addentry(self, file, offset, data):
174 def _addentry(self, file, offset, data):
178 """add a append-only entry to memory and on-disk state"""
175 """add a append-only entry to memory and on-disk state"""
179 if file in self.map or file in self._backupmap:
176 if file in self.map or file in self._backupmap:
180 return
177 return
181 self.entries.append((file, offset, data))
178 self.entries.append((file, offset, data))
182 self.map[file] = len(self.entries) - 1
179 self.map[file] = len(self.entries) - 1
183 # add enough data to the journal to do the truncate
180 # add enough data to the journal to do the truncate
184 self.file.write("%s\0%d\n" % (file, offset))
181 self.file.write("%s\0%d\n" % (file, offset))
185 self.file.flush()
182 self.file.flush()
186
183
187 @active
184 @active
188 def addbackup(self, file, hardlink=True, location=''):
185 def addbackup(self, file, hardlink=True, location=''):
189 """Adds a backup of the file to the transaction
186 """Adds a backup of the file to the transaction
190
187
191 Calling addbackup() creates a hardlink backup of the specified file
188 Calling addbackup() creates a hardlink backup of the specified file
192 that is used to recover the file in the event of the transaction
189 that is used to recover the file in the event of the transaction
193 aborting.
190 aborting.
194
191
195 * `file`: the file path, relative to .hg/store
192 * `file`: the file path, relative to .hg/store
196 * `hardlink`: use a hardlink to quickly create the backup
193 * `hardlink`: use a hardlink to quickly create the backup
197 """
194 """
198 if self._queue:
195 if self._queue:
199 msg = 'cannot use transaction.addbackup inside "group"'
196 msg = 'cannot use transaction.addbackup inside "group"'
200 raise RuntimeError(msg)
197 raise RuntimeError(msg)
201
198
202 if file in self.map or file in self._backupmap:
199 if file in self.map or file in self._backupmap:
203 return
200 return
204 dirname, filename = os.path.split(file)
201 dirname, filename = os.path.split(file)
205 backupfilename = "%s.backup.%s" % (self.journal, filename)
202 backupfilename = "%s.backup.%s" % (self.journal, filename)
206 backupfile = os.path.join(dirname, backupfilename)
203 backupfile = os.path.join(dirname, backupfilename)
207 vfs = self._vfsmap[location]
204 vfs = self._vfsmap[location]
208 if vfs.exists(file):
205 if vfs.exists(file):
209 filepath = vfs.join(file)
206 filepath = vfs.join(file)
210 backuppath = vfs.join(backupfile)
207 backuppath = vfs.join(backupfile)
211 util.copyfiles(filepath, backuppath, hardlink=hardlink)
208 util.copyfiles(filepath, backuppath, hardlink=hardlink)
212 else:
209 else:
213 backupfile = ''
210 backupfile = ''
214
211
215 self._addbackupentry((location, file, backupfile, False))
212 self._addbackupentry((location, file, backupfile, False))
216
213
217 def _addbackupentry(self, entry):
214 def _addbackupentry(self, entry):
218 """register a new backup entry and write it to disk"""
215 """register a new backup entry and write it to disk"""
219 self._backupentries.append(entry)
216 self._backupentries.append(entry)
220 self._backupmap[file] = len(self._backupentries) - 1
217 self._backupmap[file] = len(self._backupentries) - 1
221 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
218 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
222 self._backupsfile.flush()
219 self._backupsfile.flush()
223
220
224 @active
221 @active
225 def registertmp(self, tmpfile, location=''):
222 def registertmp(self, tmpfile, location=''):
226 """register a temporary transaction file
223 """register a temporary transaction file
227
224
228 Such files will be deleted when the transaction exits (on both
225 Such files will be deleted when the transaction exits (on both
229 failure and success).
226 failure and success).
230 """
227 """
231 self._addbackupentry((location, '', tmpfile, False))
228 self._addbackupentry((location, '', tmpfile, False))
232
229
233 @active
230 @active
234 def addfilegenerator(self, genid, filenames, genfunc, order=0,
231 def addfilegenerator(self, genid, filenames, genfunc, order=0,
235 location=''):
232 location=''):
236 """add a function to generates some files at transaction commit
233 """add a function to generates some files at transaction commit
237
234
238 The `genfunc` argument is a function capable of generating proper
235 The `genfunc` argument is a function capable of generating proper
239 content of each entry in the `filename` tuple.
236 content of each entry in the `filename` tuple.
240
237
241 At transaction close time, `genfunc` will be called with one file
238 At transaction close time, `genfunc` will be called with one file
242 object argument per entries in `filenames`.
239 object argument per entries in `filenames`.
243
240
244 The transaction itself is responsible for the backup, creation and
241 The transaction itself is responsible for the backup, creation and
245 final write of such file.
242 final write of such file.
246
243
247 The `genid` argument is used to ensure the same set of file is only
244 The `genid` argument is used to ensure the same set of file is only
248 generated once. Call to `addfilegenerator` for a `genid` already
245 generated once. Call to `addfilegenerator` for a `genid` already
249 present will overwrite the old entry.
246 present will overwrite the old entry.
250
247
251 The `order` argument may be used to control the order in which multiple
248 The `order` argument may be used to control the order in which multiple
252 generator will be executed.
249 generator will be executed.
253
250
254 The `location` arguments may be used to indicate the files are located
251 The `location` arguments may be used to indicate the files are located
255 outside of the the standard directory for transaction. It should match
252 outside of the the standard directory for transaction. It should match
256 one of the key of the `transaction.vfsmap` dictionnary.
253 one of the key of the `transaction.vfsmap` dictionnary.
257 """
254 """
258 # For now, we are unable to do proper backup and restore of custom vfs
255 # For now, we are unable to do proper backup and restore of custom vfs
259 # but for bookmarks that are handled outside this mechanism.
256 # but for bookmarks that are handled outside this mechanism.
260 self._filegenerators[genid] = (order, filenames, genfunc, location)
257 self._filegenerators[genid] = (order, filenames, genfunc, location)
261
258
262 def _generatefiles(self, suffix=''):
259 def _generatefiles(self, suffix=''):
263 # write files registered for generation
260 # write files registered for generation
264 any = False
261 any = False
265 for entry in sorted(self._filegenerators.values()):
262 for entry in sorted(self._filegenerators.values()):
266 any = True
263 any = True
267 order, filenames, genfunc, location = entry
264 order, filenames, genfunc, location = entry
268 vfs = self._vfsmap[location]
265 vfs = self._vfsmap[location]
269 files = []
266 files = []
270 try:
267 try:
271 for name in filenames:
268 for name in filenames:
272 name += suffix
269 name += suffix
273 if suffix:
270 if suffix:
274 self.registertmp(name, location=location)
271 self.registertmp(name, location=location)
275 else:
272 else:
276 self.addbackup(name, location=location)
273 self.addbackup(name, location=location)
277 files.append(vfs(name, 'w', atomictemp=True))
274 files.append(vfs(name, 'w', atomictemp=True))
278 genfunc(*files)
275 genfunc(*files)
279 finally:
276 finally:
280 for f in files:
277 for f in files:
281 f.close()
278 f.close()
282 return any
279 return any
283
280
284 @active
281 @active
285 def find(self, file):
282 def find(self, file):
286 if file in self.map:
283 if file in self.map:
287 return self.entries[self.map[file]]
284 return self.entries[self.map[file]]
288 if file in self._backupmap:
285 if file in self._backupmap:
289 return self._backupentries[self._backupmap[file]]
286 return self._backupentries[self._backupmap[file]]
290 return None
287 return None
291
288
292 @active
289 @active
293 def replace(self, file, offset, data=None):
290 def replace(self, file, offset, data=None):
294 '''
291 '''
295 replace can only replace already committed entries
292 replace can only replace already committed entries
296 that are not pending in the queue
293 that are not pending in the queue
297 '''
294 '''
298
295
299 if file not in self.map:
296 if file not in self.map:
300 raise KeyError(file)
297 raise KeyError(file)
301 index = self.map[file]
298 index = self.map[file]
302 self.entries[index] = (file, offset, data)
299 self.entries[index] = (file, offset, data)
303 self.file.write("%s\0%d\n" % (file, offset))
300 self.file.write("%s\0%d\n" % (file, offset))
304 self.file.flush()
301 self.file.flush()
305
302
306 @active
303 @active
307 def nest(self):
304 def nest(self):
308 self.count += 1
305 self.count += 1
309 self.usages += 1
306 self.usages += 1
310 return self
307 return self
311
308
312 def release(self):
309 def release(self):
313 if self.count > 0:
310 if self.count > 0:
314 self.usages -= 1
311 self.usages -= 1
315 # if the transaction scopes are left without being closed, fail
312 # if the transaction scopes are left without being closed, fail
316 if self.count > 0 and self.usages == 0:
313 if self.count > 0 and self.usages == 0:
317 self._abort()
314 self._abort()
318
315
319 def running(self):
316 def running(self):
320 return self.count > 0
317 return self.count > 0
321
318
322 def addpending(self, category, callback):
319 def addpending(self, category, callback):
323 """add a callback to be called when the transaction is pending
320 """add a callback to be called when the transaction is pending
324
321
325 The transaction will be given as callback's first argument.
322 The transaction will be given as callback's first argument.
326
323
327 Category is a unique identifier to allow overwriting an old callback
324 Category is a unique identifier to allow overwriting an old callback
328 with a newer callback.
325 with a newer callback.
329 """
326 """
330 self._pendingcallback[category] = callback
327 self._pendingcallback[category] = callback
331
328
332 @active
329 @active
333 def writepending(self):
330 def writepending(self):
334 '''write pending file to temporary version
331 '''write pending file to temporary version
335
332
336 This is used to allow hooks to view a transaction before commit'''
333 This is used to allow hooks to view a transaction before commit'''
337 categories = sorted(self._pendingcallback)
334 categories = sorted(self._pendingcallback)
338 for cat in categories:
335 for cat in categories:
339 # remove callback since the data will have been flushed
336 # remove callback since the data will have been flushed
340 any = self._pendingcallback.pop(cat)(self)
337 any = self._pendingcallback.pop(cat)(self)
341 self._anypending = self._anypending or any
338 self._anypending = self._anypending or any
342 self._anypending |= self._generatefiles(suffix='.pending')
339 self._anypending |= self._generatefiles(suffix='.pending')
343 return self._anypending
340 return self._anypending
344
341
345 @active
342 @active
346 def addfinalize(self, category, callback):
343 def addfinalize(self, category, callback):
347 """add a callback to be called when the transaction is closed
344 """add a callback to be called when the transaction is closed
348
345
349 The transaction will be given as callback's first argument.
346 The transaction will be given as callback's first argument.
350
347
351 Category is a unique identifier to allow overwriting old callbacks with
348 Category is a unique identifier to allow overwriting old callbacks with
352 newer callbacks.
349 newer callbacks.
353 """
350 """
354 self._finalizecallback[category] = callback
351 self._finalizecallback[category] = callback
355
352
356 @active
353 @active
357 def addpostclose(self, category, callback):
354 def addpostclose(self, category, callback):
358 """add a callback to be called after the transaction is closed
355 """add a callback to be called after the transaction is closed
359
356
360 The transaction will be given as callback's first argument.
357 The transaction will be given as callback's first argument.
361
358
362 Category is a unique identifier to allow overwriting an old callback
359 Category is a unique identifier to allow overwriting an old callback
363 with a newer callback.
360 with a newer callback.
364 """
361 """
365 self._postclosecallback[category] = callback
362 self._postclosecallback[category] = callback
366
363
367 @active
364 @active
368 def close(self):
365 def close(self):
369 '''commit the transaction'''
366 '''commit the transaction'''
370 if self.count == 1:
367 if self.count == 1:
371 self._generatefiles()
368 self._generatefiles()
372 categories = sorted(self._finalizecallback)
369 categories = sorted(self._finalizecallback)
373 for cat in categories:
370 for cat in categories:
374 self._finalizecallback[cat](self)
371 self._finalizecallback[cat](self)
375
372
376 self.count -= 1
373 self.count -= 1
377 if self.count != 0:
374 if self.count != 0:
378 return
375 return
379 self.file.close()
376 self.file.close()
380 self._backupsfile.close()
377 self._backupsfile.close()
381 # cleanup temporary files
378 # cleanup temporary files
382 for l, f, b, c in self._backupentries:
379 for l, f, b, c in self._backupentries:
383 if l not in self._vfsmap and c:
380 if l not in self._vfsmap and c:
384 self.report("couldn't remote %s: unknown cache location %s\n"
381 self.report("couldn't remote %s: unknown cache location %s\n"
385 % (b, l))
382 % (b, l))
386 continue
383 continue
387 vfs = self._vfsmap[l]
384 vfs = self._vfsmap[l]
388 if not f and b and vfs.exists(b):
385 if not f and b and vfs.exists(b):
389 try:
386 try:
390 vfs.unlink(b)
387 vfs.unlink(b)
391 except (IOError, OSError, util.Abort), inst:
388 except (IOError, OSError, util.Abort), inst:
392 if not c:
389 if not c:
393 raise
390 raise
394 # Abort may be raise by read only opener
391 # Abort may be raise by read only opener
395 self.report("couldn't remote %s: %s\n"
392 self.report("couldn't remote %s: %s\n"
396 % (vfs.join(b), inst))
393 % (vfs.join(b), inst))
397 self.entries = []
394 self.entries = []
398 if self.after:
395 if self.after:
399 self.after()
396 self.after()
400 if self.opener.isfile(self.journal):
397 if self.opener.isfile(self.journal):
401 self.opener.unlink(self.journal)
398 self.opener.unlink(self.journal)
402 if self.opener.isfile(self._backupjournal):
399 if self.opener.isfile(self._backupjournal):
403 self.opener.unlink(self._backupjournal)
400 self.opener.unlink(self._backupjournal)
404 for _l, _f, b, c in self._backupentries:
401 for _l, _f, b, c in self._backupentries:
405 if l not in self._vfsmap and c:
402 if l not in self._vfsmap and c:
406 self.report("couldn't remote %s: unknown cache location"
403 self.report("couldn't remote %s: unknown cache location"
407 "%s\n" % (b, l))
404 "%s\n" % (b, l))
408 continue
405 continue
409 vfs = self._vfsmap[l]
406 vfs = self._vfsmap[l]
410 if b and vfs.exists(b):
407 if b and vfs.exists(b):
411 try:
408 try:
412 vfs.unlink(b)
409 vfs.unlink(b)
413 except (IOError, OSError, util.Abort), inst:
410 except (IOError, OSError, util.Abort), inst:
414 if not c:
411 if not c:
415 raise
412 raise
416 # Abort may be raise by read only opener
413 # Abort may be raise by read only opener
417 self.report("couldn't remote %s: %s\n"
414 self.report("couldn't remote %s: %s\n"
418 % (vfs.join(b), inst))
415 % (vfs.join(b), inst))
419 self._backupentries = []
416 self._backupentries = []
420 self.journal = None
417 self.journal = None
421 # run post close action
418 # run post close action
422 categories = sorted(self._postclosecallback)
419 categories = sorted(self._postclosecallback)
423 for cat in categories:
420 for cat in categories:
424 self._postclosecallback[cat](self)
421 self._postclosecallback[cat](self)
425
422
426 @active
423 @active
427 def abort(self):
424 def abort(self):
428 '''abort the transaction (generally called on error, or when the
425 '''abort the transaction (generally called on error, or when the
429 transaction is not explicitly committed before going out of
426 transaction is not explicitly committed before going out of
430 scope)'''
427 scope)'''
431 self._abort()
428 self._abort()
432
429
433 def _abort(self):
430 def _abort(self):
434 self.count = 0
431 self.count = 0
435 self.usages = 0
432 self.usages = 0
436 self.file.close()
433 self.file.close()
437 self._backupsfile.close()
434 self._backupsfile.close()
438
435
439 if self.onabort is not None:
440 self.onabort()
441
442 try:
436 try:
443 if not self.entries and not self._backupentries:
437 if not self.entries and not self._backupentries:
444 if self.journal:
438 if self.journal:
445 self.opener.unlink(self.journal)
439 self.opener.unlink(self.journal)
446 if self._backupjournal:
440 if self._backupjournal:
447 self.opener.unlink(self._backupjournal)
441 self.opener.unlink(self._backupjournal)
448 return
442 return
449
443
450 self.report(_("transaction abort!\n"))
444 self.report(_("transaction abort!\n"))
451
445
452 try:
446 try:
453 _playback(self.journal, self.report, self.opener, self._vfsmap,
447 _playback(self.journal, self.report, self.opener, self._vfsmap,
454 self.entries, self._backupentries, False)
448 self.entries, self._backupentries, False)
455 self.report(_("rollback completed\n"))
449 self.report(_("rollback completed\n"))
456 except Exception:
450 except Exception:
457 self.report(_("rollback failed - please run hg recover\n"))
451 self.report(_("rollback failed - please run hg recover\n"))
458 finally:
452 finally:
459 self.journal = None
453 self.journal = None
460
454
461
455
462 def rollback(opener, vfsmap, file, report):
456 def rollback(opener, vfsmap, file, report):
463 """Rolls back the transaction contained in the given file
457 """Rolls back the transaction contained in the given file
464
458
465 Reads the entries in the specified file, and the corresponding
459 Reads the entries in the specified file, and the corresponding
466 '*.backupfiles' file, to recover from an incomplete transaction.
460 '*.backupfiles' file, to recover from an incomplete transaction.
467
461
468 * `file`: a file containing a list of entries, specifying where
462 * `file`: a file containing a list of entries, specifying where
469 to truncate each file. The file should contain a list of
463 to truncate each file. The file should contain a list of
470 file\0offset pairs, delimited by newlines. The corresponding
464 file\0offset pairs, delimited by newlines. The corresponding
471 '*.backupfiles' file should contain a list of file\0backupfile
465 '*.backupfiles' file should contain a list of file\0backupfile
472 pairs, delimited by \0.
466 pairs, delimited by \0.
473 """
467 """
474 entries = []
468 entries = []
475 backupentries = []
469 backupentries = []
476
470
477 fp = opener.open(file)
471 fp = opener.open(file)
478 lines = fp.readlines()
472 lines = fp.readlines()
479 fp.close()
473 fp.close()
480 for l in lines:
474 for l in lines:
481 try:
475 try:
482 f, o = l.split('\0')
476 f, o = l.split('\0')
483 entries.append((f, int(o), None))
477 entries.append((f, int(o), None))
484 except ValueError:
478 except ValueError:
485 report(_("couldn't read journal entry %r!\n") % l)
479 report(_("couldn't read journal entry %r!\n") % l)
486
480
487 backupjournal = "%s.backupfiles" % file
481 backupjournal = "%s.backupfiles" % file
488 if opener.exists(backupjournal):
482 if opener.exists(backupjournal):
489 fp = opener.open(backupjournal)
483 fp = opener.open(backupjournal)
490 lines = fp.readlines()
484 lines = fp.readlines()
491 if lines:
485 if lines:
492 ver = lines[0][:-1]
486 ver = lines[0][:-1]
493 if ver == str(version):
487 if ver == str(version):
494 for line in lines[1:]:
488 for line in lines[1:]:
495 if line:
489 if line:
496 # Shave off the trailing newline
490 # Shave off the trailing newline
497 line = line[:-1]
491 line = line[:-1]
498 l, f, b, c = line.split('\0')
492 l, f, b, c = line.split('\0')
499 backupentries.append((l, f, b, bool(c)))
493 backupentries.append((l, f, b, bool(c)))
500 else:
494 else:
501 report(_("journal was created by a different version of "
495 report(_("journal was created by a different version of "
502 "Mercurial"))
496 "Mercurial"))
503
497
504 _playback(file, report, opener, vfsmap, entries, backupentries)
498 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now