##// END OF EJS Templates
transaction: accept a 'location' argument for registertmp...
Pierre-Yves David -
r23354:918c7777 default
parent child Browse files
Show More
@@ -1,501 +1,501 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16 import errno
16 import errno
17 import error, util
17 import error, util
18
18
19 version = 2
19 version = 2
20
20
21 def active(func):
21 def active(func):
22 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
23 if self.count == 0:
23 if self.count == 0:
24 raise error.Abort(_(
24 raise error.Abort(_(
25 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
26 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
27 return _active
27 return _active
28
28
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 unlink=True):
30 unlink=True):
31 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
32 if o or not unlink:
32 if o or not unlink:
33 try:
33 try:
34 fp = opener(f, 'a')
34 fp = opener(f, 'a')
35 fp.truncate(o)
35 fp.truncate(o)
36 fp.close()
36 fp.close()
37 except IOError:
37 except IOError:
38 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
39 raise
39 raise
40 else:
40 else:
41 try:
41 try:
42 opener.unlink(f)
42 opener.unlink(f)
43 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
44 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
45 raise
45 raise
46
46
47 backupfiles = []
47 backupfiles = []
48 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
49 if l not in vfsmap and c:
49 if l not in vfsmap and c:
50 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
51 % (b, l))
51 % (b, l))
52 vfs = vfsmap[l]
52 vfs = vfsmap[l]
53 try:
53 try:
54 if f and b:
54 if f and b:
55 filepath = vfs.join(f)
55 filepath = vfs.join(f)
56 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
57 try:
57 try:
58 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
59 backupfiles.append(b)
59 backupfiles.append(b)
60 except IOError:
60 except IOError:
61 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
62 else:
62 else:
63 target = f or b
63 target = f or b
64 try:
64 try:
65 vfs.unlink(target)
65 vfs.unlink(target)
66 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
70 if not c:
70 if not c:
71 raise
71 raise
72
72
73 opener.unlink(journal)
73 opener.unlink(journal)
74 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
75 if opener.exists(backuppath):
75 if opener.exists(backuppath):
76 opener.unlink(backuppath)
76 opener.unlink(backuppath)
77 try:
77 try:
78 for f in backupfiles:
78 for f in backupfiles:
79 if opener.exists(f):
79 if opener.exists(f):
80 opener.unlink(f)
80 opener.unlink(f)
81 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
82 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
83 pass
83 pass
84
84
85 class transaction(object):
85 class transaction(object):
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 createmode=None, onclose=None, onabort=None):
87 createmode=None, onclose=None, onabort=None):
88 """Begin a new transaction
88 """Begin a new transaction
89
89
90 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
91 an exception.
91 an exception.
92
92
93 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
94 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
95 * `onclose`: called as the transaction is closing, but before it is
95 * `onclose`: called as the transaction is closing, but before it is
96 closed
96 closed
97 * `onabort`: called as the transaction is aborting, but before any files
97 * `onabort`: called as the transaction is aborting, but before any files
98 have been truncated
98 have been truncated
99 """
99 """
100 self.count = 1
100 self.count = 1
101 self.usages = 1
101 self.usages = 1
102 self.report = report
102 self.report = report
103 # a vfs to the store content
103 # a vfs to the store content
104 self.opener = opener
104 self.opener = opener
105 # a map to access file in various {location -> vfs}
105 # a map to access file in various {location -> vfs}
106 vfsmap = vfsmap.copy()
106 vfsmap = vfsmap.copy()
107 vfsmap[''] = opener # set default value
107 vfsmap[''] = opener # set default value
108 self._vfsmap = vfsmap
108 self._vfsmap = vfsmap
109 self.after = after
109 self.after = after
110 self.onclose = onclose
110 self.onclose = onclose
111 self.onabort = onabort
111 self.onabort = onabort
112 self.entries = []
112 self.entries = []
113 self.map = {}
113 self.map = {}
114 self.journal = journal
114 self.journal = journal
115 self._queue = []
115 self._queue = []
116 # a dict of arguments to be passed to hooks
116 # a dict of arguments to be passed to hooks
117 self.hookargs = {}
117 self.hookargs = {}
118 self.file = opener.open(self.journal, "w")
118 self.file = opener.open(self.journal, "w")
119
119
120 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'backuppath' is empty, no file existed at backup time
122 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'path' is empty, this is a temporary transaction file
123 # - if 'location' is not empty, the path is outside main opener reach.
123 # - if 'location' is not empty, the path is outside main opener reach.
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 # (cache is currently unused)
125 # (cache is currently unused)
126 self._backupentries = []
126 self._backupentries = []
127 self._backupmap = {}
127 self._backupmap = {}
128 self._backupjournal = "%s.backupfiles" % journal
128 self._backupjournal = "%s.backupfiles" % journal
129 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 self._backupsfile.write('%d\n' % version)
130 self._backupsfile.write('%d\n' % version)
131
131
132 if createmode is not None:
132 if createmode is not None:
133 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self.journal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
135
135
136 # hold file generations to be performed on commit
136 # hold file generations to be performed on commit
137 self._filegenerators = {}
137 self._filegenerators = {}
138 # hold callbalk to write pending data for hooks
138 # hold callbalk to write pending data for hooks
139 self._pendingcallback = {}
139 self._pendingcallback = {}
140 # True is any pending data have been written ever
140 # True is any pending data have been written ever
141 self._anypending = False
141 self._anypending = False
142 # holds callback to call when writing the transaction
142 # holds callback to call when writing the transaction
143 self._finalizecallback = {}
143 self._finalizecallback = {}
144 # hold callbalk for post transaction close
144 # hold callbalk for post transaction close
145 self._postclosecallback = {}
145 self._postclosecallback = {}
146
146
147 def __del__(self):
147 def __del__(self):
148 if self.journal:
148 if self.journal:
149 self._abort()
149 self._abort()
150
150
151 @active
151 @active
152 def startgroup(self):
152 def startgroup(self):
153 """delay registration of file entry
153 """delay registration of file entry
154
154
155 This is used by strip to delay vision of strip offset. The transaction
155 This is used by strip to delay vision of strip offset. The transaction
156 sees either none or all of the strip actions to be done."""
156 sees either none or all of the strip actions to be done."""
157 self._queue.append([])
157 self._queue.append([])
158
158
159 @active
159 @active
160 def endgroup(self):
160 def endgroup(self):
161 """apply delayed registration of file entry.
161 """apply delayed registration of file entry.
162
162
163 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
164 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
165 q = self._queue.pop()
165 q = self._queue.pop()
166 for f, o, data in q:
166 for f, o, data in q:
167 self._addentry(f, o, data)
167 self._addentry(f, o, data)
168
168
169 @active
169 @active
170 def add(self, file, offset, data=None):
170 def add(self, file, offset, data=None):
171 """record the state of an append-only file before update"""
171 """record the state of an append-only file before update"""
172 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
173 return
173 return
174 if self._queue:
174 if self._queue:
175 self._queue[-1].append((file, offset, data))
175 self._queue[-1].append((file, offset, data))
176 return
176 return
177
177
178 self._addentry(file, offset, data)
178 self._addentry(file, offset, data)
179
179
180 def _addentry(self, file, offset, data):
180 def _addentry(self, file, offset, data):
181 """add a append-only entry to memory and on-disk state"""
181 """add a append-only entry to memory and on-disk state"""
182 if file in self.map or file in self._backupmap:
182 if file in self.map or file in self._backupmap:
183 return
183 return
184 self.entries.append((file, offset, data))
184 self.entries.append((file, offset, data))
185 self.map[file] = len(self.entries) - 1
185 self.map[file] = len(self.entries) - 1
186 # add enough data to the journal to do the truncate
186 # add enough data to the journal to do the truncate
187 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.write("%s\0%d\n" % (file, offset))
188 self.file.flush()
188 self.file.flush()
189
189
190 @active
190 @active
191 def addbackup(self, file, hardlink=True, location=''):
191 def addbackup(self, file, hardlink=True, location=''):
192 """Adds a backup of the file to the transaction
192 """Adds a backup of the file to the transaction
193
193
194 Calling addbackup() creates a hardlink backup of the specified file
194 Calling addbackup() creates a hardlink backup of the specified file
195 that is used to recover the file in the event of the transaction
195 that is used to recover the file in the event of the transaction
196 aborting.
196 aborting.
197
197
198 * `file`: the file path, relative to .hg/store
198 * `file`: the file path, relative to .hg/store
199 * `hardlink`: use a hardlink to quickly create the backup
199 * `hardlink`: use a hardlink to quickly create the backup
200 """
200 """
201 if self._queue:
201 if self._queue:
202 msg = 'cannot use transaction.addbackup inside "group"'
202 msg = 'cannot use transaction.addbackup inside "group"'
203 raise RuntimeError(msg)
203 raise RuntimeError(msg)
204
204
205 if file in self.map or file in self._backupmap:
205 if file in self.map or file in self._backupmap:
206 return
206 return
207 dirname, filename = os.path.split(file)
207 dirname, filename = os.path.split(file)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 backupfile = os.path.join(dirname, backupfilename)
209 backupfile = os.path.join(dirname, backupfilename)
210 vfs = self._vfsmap[location]
210 vfs = self._vfsmap[location]
211 if vfs.exists(file):
211 if vfs.exists(file):
212 filepath = vfs.join(file)
212 filepath = vfs.join(file)
213 backuppath = vfs.join(backupfile)
213 backuppath = vfs.join(backupfile)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 else:
215 else:
216 backupfile = ''
216 backupfile = ''
217
217
218 self._addbackupentry((location, file, backupfile, False))
218 self._addbackupentry((location, file, backupfile, False))
219
219
220 def _addbackupentry(self, entry):
220 def _addbackupentry(self, entry):
221 """register a new backup entry and write it to disk"""
221 """register a new backup entry and write it to disk"""
222 self._backupentries.append(entry)
222 self._backupentries.append(entry)
223 self._backupmap[file] = len(self._backupentries) - 1
223 self._backupmap[file] = len(self._backupentries) - 1
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 self._backupsfile.flush()
225 self._backupsfile.flush()
226
226
227 @active
227 @active
228 def registertmp(self, tmpfile):
228 def registertmp(self, tmpfile, location=''):
229 """register a temporary transaction file
229 """register a temporary transaction file
230
230
231 Such file will be delete when the transaction exit (on both failure and
231 Such file will be delete when the transaction exit (on both failure and
232 success).
232 success).
233 """
233 """
234 self._addbackupentry(('', '', tmpfile, False))
234 self._addbackupentry((location, '', tmpfile, False))
235
235
236 @active
236 @active
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 location=''):
238 location=''):
239 """add a function to generates some files at transaction commit
239 """add a function to generates some files at transaction commit
240
240
241 The `genfunc` argument is a function capable of generating proper
241 The `genfunc` argument is a function capable of generating proper
242 content of each entry in the `filename` tuple.
242 content of each entry in the `filename` tuple.
243
243
244 At transaction close time, `genfunc` will be called with one file
244 At transaction close time, `genfunc` will be called with one file
245 object argument per entries in `filenames`.
245 object argument per entries in `filenames`.
246
246
247 The transaction itself is responsible for the backup, creation and
247 The transaction itself is responsible for the backup, creation and
248 final write of such file.
248 final write of such file.
249
249
250 The `genid` argument is used to ensure the same set of file is only
250 The `genid` argument is used to ensure the same set of file is only
251 generated once. Call to `addfilegenerator` for a `genid` already
251 generated once. Call to `addfilegenerator` for a `genid` already
252 present will overwrite the old entry.
252 present will overwrite the old entry.
253
253
254 The `order` argument may be used to control the order in which multiple
254 The `order` argument may be used to control the order in which multiple
255 generator will be executed.
255 generator will be executed.
256
256
257 The `location` arguments may be used to indicate the files are located
257 The `location` arguments may be used to indicate the files are located
258 outside of the the standard directory for transaction. It should match
258 outside of the the standard directory for transaction. It should match
259 one of the key of the `transaction.vfsmap` dictionnary.
259 one of the key of the `transaction.vfsmap` dictionnary.
260 """
260 """
261 # For now, we are unable to do proper backup and restore of custom vfs
261 # For now, we are unable to do proper backup and restore of custom vfs
262 # but for bookmarks that are handled outside this mechanism.
262 # but for bookmarks that are handled outside this mechanism.
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
264
264
265 def _generatefiles(self):
265 def _generatefiles(self):
266 # write files registered for generation
266 # write files registered for generation
267 for entry in sorted(self._filegenerators.values()):
267 for entry in sorted(self._filegenerators.values()):
268 order, filenames, genfunc, location = entry
268 order, filenames, genfunc, location = entry
269 vfs = self._vfsmap[location]
269 vfs = self._vfsmap[location]
270 files = []
270 files = []
271 try:
271 try:
272 for name in filenames:
272 for name in filenames:
273 self.addbackup(name, location=location)
273 self.addbackup(name, location=location)
274 files.append(vfs(name, 'w', atomictemp=True))
274 files.append(vfs(name, 'w', atomictemp=True))
275 genfunc(*files)
275 genfunc(*files)
276 finally:
276 finally:
277 for f in files:
277 for f in files:
278 f.close()
278 f.close()
279
279
280 @active
280 @active
281 def find(self, file):
281 def find(self, file):
282 if file in self.map:
282 if file in self.map:
283 return self.entries[self.map[file]]
283 return self.entries[self.map[file]]
284 if file in self._backupmap:
284 if file in self._backupmap:
285 return self._backupentries[self._backupmap[file]]
285 return self._backupentries[self._backupmap[file]]
286 return None
286 return None
287
287
288 @active
288 @active
289 def replace(self, file, offset, data=None):
289 def replace(self, file, offset, data=None):
290 '''
290 '''
291 replace can only replace already committed entries
291 replace can only replace already committed entries
292 that are not pending in the queue
292 that are not pending in the queue
293 '''
293 '''
294
294
295 if file not in self.map:
295 if file not in self.map:
296 raise KeyError(file)
296 raise KeyError(file)
297 index = self.map[file]
297 index = self.map[file]
298 self.entries[index] = (file, offset, data)
298 self.entries[index] = (file, offset, data)
299 self.file.write("%s\0%d\n" % (file, offset))
299 self.file.write("%s\0%d\n" % (file, offset))
300 self.file.flush()
300 self.file.flush()
301
301
302 @active
302 @active
303 def nest(self):
303 def nest(self):
304 self.count += 1
304 self.count += 1
305 self.usages += 1
305 self.usages += 1
306 return self
306 return self
307
307
308 def release(self):
308 def release(self):
309 if self.count > 0:
309 if self.count > 0:
310 self.usages -= 1
310 self.usages -= 1
311 # if the transaction scopes are left without being closed, fail
311 # if the transaction scopes are left without being closed, fail
312 if self.count > 0 and self.usages == 0:
312 if self.count > 0 and self.usages == 0:
313 self._abort()
313 self._abort()
314
314
315 def running(self):
315 def running(self):
316 return self.count > 0
316 return self.count > 0
317
317
318 def addpending(self, category, callback):
318 def addpending(self, category, callback):
319 """add a callback to be called when the transaction is pending
319 """add a callback to be called when the transaction is pending
320
320
321 The transaction will be given as callback's first argument.
321 The transaction will be given as callback's first argument.
322
322
323 Category is a unique identifier to allow overwriting an old callback
323 Category is a unique identifier to allow overwriting an old callback
324 with a newer callback.
324 with a newer callback.
325 """
325 """
326 self._pendingcallback[category] = callback
326 self._pendingcallback[category] = callback
327
327
328 @active
328 @active
329 def writepending(self):
329 def writepending(self):
330 '''write pending file to temporary version
330 '''write pending file to temporary version
331
331
332 This is used to allow hooks to view a transaction before commit'''
332 This is used to allow hooks to view a transaction before commit'''
333 categories = sorted(self._pendingcallback)
333 categories = sorted(self._pendingcallback)
334 for cat in categories:
334 for cat in categories:
335 # remove callback since the data will have been flushed
335 # remove callback since the data will have been flushed
336 any = self._pendingcallback.pop(cat)(self)
336 any = self._pendingcallback.pop(cat)(self)
337 self._anypending = self._anypending or any
337 self._anypending = self._anypending or any
338 return self._anypending
338 return self._anypending
339
339
340 @active
340 @active
341 def addfinalize(self, category, callback):
341 def addfinalize(self, category, callback):
342 """add a callback to be called when the transaction is closed
342 """add a callback to be called when the transaction is closed
343
343
344 The transaction will be given as callback's first argument.
344 The transaction will be given as callback's first argument.
345
345
346 Category is a unique identifier to allow overwriting old callbacks with
346 Category is a unique identifier to allow overwriting old callbacks with
347 newer callbacks.
347 newer callbacks.
348 """
348 """
349 self._finalizecallback[category] = callback
349 self._finalizecallback[category] = callback
350
350
351 @active
351 @active
352 def addpostclose(self, category, callback):
352 def addpostclose(self, category, callback):
353 """add a callback to be called after the transaction is closed
353 """add a callback to be called after the transaction is closed
354
354
355 The transaction will be given as callback's first argument.
355 The transaction will be given as callback's first argument.
356
356
357 Category is a unique identifier to allow overwriting an old callback
357 Category is a unique identifier to allow overwriting an old callback
358 with a newer callback.
358 with a newer callback.
359 """
359 """
360 self._postclosecallback[category] = callback
360 self._postclosecallback[category] = callback
361
361
362 @active
362 @active
363 def close(self):
363 def close(self):
364 '''commit the transaction'''
364 '''commit the transaction'''
365 if self.count == 1:
365 if self.count == 1:
366 self._generatefiles()
366 self._generatefiles()
367 categories = sorted(self._finalizecallback)
367 categories = sorted(self._finalizecallback)
368 for cat in categories:
368 for cat in categories:
369 self._finalizecallback[cat](self)
369 self._finalizecallback[cat](self)
370 if self.onclose is not None:
370 if self.onclose is not None:
371 self.onclose()
371 self.onclose()
372
372
373 self.count -= 1
373 self.count -= 1
374 if self.count != 0:
374 if self.count != 0:
375 return
375 return
376 self.file.close()
376 self.file.close()
377 self._backupsfile.close()
377 self._backupsfile.close()
378 # cleanup temporary files
378 # cleanup temporary files
379 for l, f, b, c in self._backupentries:
379 for l, f, b, c in self._backupentries:
380 if l not in self._vfsmap and c:
380 if l not in self._vfsmap and c:
381 self.report("couldn't remote %s: unknown cache location %s\n"
381 self.report("couldn't remote %s: unknown cache location %s\n"
382 % (b, l))
382 % (b, l))
383 continue
383 continue
384 vfs = self._vfsmap[l]
384 vfs = self._vfsmap[l]
385 if not f and b and vfs.exists(b):
385 if not f and b and vfs.exists(b):
386 try:
386 try:
387 vfs.unlink(b)
387 vfs.unlink(b)
388 except (IOError, OSError, util.Abort), inst:
388 except (IOError, OSError, util.Abort), inst:
389 if not c:
389 if not c:
390 raise
390 raise
391 # Abort may be raise by read only opener
391 # Abort may be raise by read only opener
392 self.report("couldn't remote %s: %s\n"
392 self.report("couldn't remote %s: %s\n"
393 % (vfs.join(b), inst))
393 % (vfs.join(b), inst))
394 self.entries = []
394 self.entries = []
395 if self.after:
395 if self.after:
396 self.after()
396 self.after()
397 if self.opener.isfile(self.journal):
397 if self.opener.isfile(self.journal):
398 self.opener.unlink(self.journal)
398 self.opener.unlink(self.journal)
399 if self.opener.isfile(self._backupjournal):
399 if self.opener.isfile(self._backupjournal):
400 self.opener.unlink(self._backupjournal)
400 self.opener.unlink(self._backupjournal)
401 for _l, _f, b, c in self._backupentries:
401 for _l, _f, b, c in self._backupentries:
402 if l not in self._vfsmap and c:
402 if l not in self._vfsmap and c:
403 self.report("couldn't remote %s: unknown cache location"
403 self.report("couldn't remote %s: unknown cache location"
404 "%s\n" % (b, l))
404 "%s\n" % (b, l))
405 continue
405 continue
406 vfs = self._vfsmap[l]
406 vfs = self._vfsmap[l]
407 if b and vfs.exists(b):
407 if b and vfs.exists(b):
408 try:
408 try:
409 vfs.unlink(b)
409 vfs.unlink(b)
410 except (IOError, OSError, util.Abort), inst:
410 except (IOError, OSError, util.Abort), inst:
411 if not c:
411 if not c:
412 raise
412 raise
413 # Abort may be raise by read only opener
413 # Abort may be raise by read only opener
414 self.report("couldn't remote %s: %s\n"
414 self.report("couldn't remote %s: %s\n"
415 % (vfs.join(b), inst))
415 % (vfs.join(b), inst))
416 self._backupentries = []
416 self._backupentries = []
417 self.journal = None
417 self.journal = None
418 # run post close action
418 # run post close action
419 categories = sorted(self._postclosecallback)
419 categories = sorted(self._postclosecallback)
420 for cat in categories:
420 for cat in categories:
421 self._postclosecallback[cat](self)
421 self._postclosecallback[cat](self)
422
422
423 @active
423 @active
424 def abort(self):
424 def abort(self):
425 '''abort the transaction (generally called on error, or when the
425 '''abort the transaction (generally called on error, or when the
426 transaction is not explicitly committed before going out of
426 transaction is not explicitly committed before going out of
427 scope)'''
427 scope)'''
428 self._abort()
428 self._abort()
429
429
430 def _abort(self):
430 def _abort(self):
431 self.count = 0
431 self.count = 0
432 self.usages = 0
432 self.usages = 0
433 self.file.close()
433 self.file.close()
434 self._backupsfile.close()
434 self._backupsfile.close()
435
435
436 if self.onabort is not None:
436 if self.onabort is not None:
437 self.onabort()
437 self.onabort()
438
438
439 try:
439 try:
440 if not self.entries and not self._backupentries:
440 if not self.entries and not self._backupentries:
441 if self.journal:
441 if self.journal:
442 self.opener.unlink(self.journal)
442 self.opener.unlink(self.journal)
443 if self._backupjournal:
443 if self._backupjournal:
444 self.opener.unlink(self._backupjournal)
444 self.opener.unlink(self._backupjournal)
445 return
445 return
446
446
447 self.report(_("transaction abort!\n"))
447 self.report(_("transaction abort!\n"))
448
448
449 try:
449 try:
450 _playback(self.journal, self.report, self.opener, self._vfsmap,
450 _playback(self.journal, self.report, self.opener, self._vfsmap,
451 self.entries, self._backupentries, False)
451 self.entries, self._backupentries, False)
452 self.report(_("rollback completed\n"))
452 self.report(_("rollback completed\n"))
453 except Exception:
453 except Exception:
454 self.report(_("rollback failed - please run hg recover\n"))
454 self.report(_("rollback failed - please run hg recover\n"))
455 finally:
455 finally:
456 self.journal = None
456 self.journal = None
457
457
458
458
459 def rollback(opener, vfsmap, file, report):
459 def rollback(opener, vfsmap, file, report):
460 """Rolls back the transaction contained in the given file
460 """Rolls back the transaction contained in the given file
461
461
462 Reads the entries in the specified file, and the corresponding
462 Reads the entries in the specified file, and the corresponding
463 '*.backupfiles' file, to recover from an incomplete transaction.
463 '*.backupfiles' file, to recover from an incomplete transaction.
464
464
465 * `file`: a file containing a list of entries, specifying where
465 * `file`: a file containing a list of entries, specifying where
466 to truncate each file. The file should contain a list of
466 to truncate each file. The file should contain a list of
467 file\0offset pairs, delimited by newlines. The corresponding
467 file\0offset pairs, delimited by newlines. The corresponding
468 '*.backupfiles' file should contain a list of file\0backupfile
468 '*.backupfiles' file should contain a list of file\0backupfile
469 pairs, delimited by \0.
469 pairs, delimited by \0.
470 """
470 """
471 entries = []
471 entries = []
472 backupentries = []
472 backupentries = []
473
473
474 fp = opener.open(file)
474 fp = opener.open(file)
475 lines = fp.readlines()
475 lines = fp.readlines()
476 fp.close()
476 fp.close()
477 for l in lines:
477 for l in lines:
478 try:
478 try:
479 f, o = l.split('\0')
479 f, o = l.split('\0')
480 entries.append((f, int(o), None))
480 entries.append((f, int(o), None))
481 except ValueError:
481 except ValueError:
482 report(_("couldn't read journal entry %r!\n") % l)
482 report(_("couldn't read journal entry %r!\n") % l)
483
483
484 backupjournal = "%s.backupfiles" % file
484 backupjournal = "%s.backupfiles" % file
485 if opener.exists(backupjournal):
485 if opener.exists(backupjournal):
486 fp = opener.open(backupjournal)
486 fp = opener.open(backupjournal)
487 lines = fp.readlines()
487 lines = fp.readlines()
488 if lines:
488 if lines:
489 ver = lines[0][:-1]
489 ver = lines[0][:-1]
490 if ver == str(version):
490 if ver == str(version):
491 for line in lines[1:]:
491 for line in lines[1:]:
492 if line:
492 if line:
493 # Shave off the trailing newline
493 # Shave off the trailing newline
494 line = line[:-1]
494 line = line[:-1]
495 l, f, b, c = line.split('\0')
495 l, f, b, c = line.split('\0')
496 backupentries.append((l, f, b, bool(c)))
496 backupentries.append((l, f, b, bool(c)))
497 else:
497 else:
498 report(_("journal was created by a different version of "
498 report(_("journal was created by a different version of "
499 "Mercurial"))
499 "Mercurial"))
500
500
501 _playback(file, report, opener, vfsmap, entries, backupentries)
501 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now