##// END OF EJS Templates
transaction: set backupentries version to proper value...
Pierre-Yves David -
r23313:99109857 default
parent child Browse files
Show More
@@ -1,500 +1,500 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 0
18 version = 2
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 unlink=True):
29 unlink=True):
30 for f, o, _ignore in entries:
30 for f, o, _ignore in entries:
31 if o or not unlink:
31 if o or not unlink:
32 try:
32 try:
33 fp = opener(f, 'a')
33 fp = opener(f, 'a')
34 fp.truncate(o)
34 fp.truncate(o)
35 fp.close()
35 fp.close()
36 except IOError:
36 except IOError:
37 report(_("failed to truncate %s\n") % f)
37 report(_("failed to truncate %s\n") % f)
38 raise
38 raise
39 else:
39 else:
40 try:
40 try:
41 opener.unlink(f)
41 opener.unlink(f)
42 except (IOError, OSError), inst:
42 except (IOError, OSError), inst:
43 if inst.errno != errno.ENOENT:
43 if inst.errno != errno.ENOENT:
44 raise
44 raise
45
45
46 backupfiles = []
46 backupfiles = []
47 for l, f, b, c in backupentries:
47 for l, f, b, c in backupentries:
48 if l not in vfsmap and c:
48 if l not in vfsmap and c:
49 report("couldn't handle %s: unknown cache location %s\n"
49 report("couldn't handle %s: unknown cache location %s\n"
50 % (b, l))
50 % (b, l))
51 vfs = vfsmap[l]
51 vfs = vfsmap[l]
52 try:
52 try:
53 if f and b:
53 if f and b:
54 filepath = vfs.join(f)
54 filepath = vfs.join(f)
55 backuppath = vfs.join(b)
55 backuppath = vfs.join(b)
56 try:
56 try:
57 util.copyfile(backuppath, filepath)
57 util.copyfile(backuppath, filepath)
58 backupfiles.append(b)
58 backupfiles.append(b)
59 except IOError:
59 except IOError:
60 report(_("failed to recover %s\n") % f)
60 report(_("failed to recover %s\n") % f)
61 else:
61 else:
62 target = f or b
62 target = f or b
63 try:
63 try:
64 vfs.unlink(target)
64 vfs.unlink(target)
65 except (IOError, OSError), inst:
65 except (IOError, OSError), inst:
66 if inst.errno != errno.ENOENT:
66 if inst.errno != errno.ENOENT:
67 raise
67 raise
68 except (IOError, OSError, util.Abort), inst:
68 except (IOError, OSError, util.Abort), inst:
69 if not c:
69 if not c:
70 raise
70 raise
71
71
72 opener.unlink(journal)
72 opener.unlink(journal)
73 backuppath = "%s.backupfiles" % journal
73 backuppath = "%s.backupfiles" % journal
74 if opener.exists(backuppath):
74 if opener.exists(backuppath):
75 opener.unlink(backuppath)
75 opener.unlink(backuppath)
76 try:
76 try:
77 for f in backupfiles:
77 for f in backupfiles:
78 if opener.exists(f):
78 if opener.exists(f):
79 opener.unlink(f)
79 opener.unlink(f)
80 except (IOError, OSError, util.Abort), inst:
80 except (IOError, OSError, util.Abort), inst:
81 # only pure backup file remains, it is sage to ignore any error
81 # only pure backup file remains, it is sage to ignore any error
82 pass
82 pass
83
83
84 class transaction(object):
84 class transaction(object):
85 def __init__(self, report, opener, vfsmap, journal, after=None,
85 def __init__(self, report, opener, vfsmap, journal, after=None,
86 createmode=None, onclose=None, onabort=None):
86 createmode=None, onclose=None, onabort=None):
87 """Begin a new transaction
87 """Begin a new transaction
88
88
89 Begins a new transaction that allows rolling back writes in the event of
89 Begins a new transaction that allows rolling back writes in the event of
90 an exception.
90 an exception.
91
91
92 * `after`: called after the transaction has been committed
92 * `after`: called after the transaction has been committed
93 * `createmode`: the mode of the journal file that will be created
93 * `createmode`: the mode of the journal file that will be created
94 * `onclose`: called as the transaction is closing, but before it is
94 * `onclose`: called as the transaction is closing, but before it is
95 closed
95 closed
96 * `onabort`: called as the transaction is aborting, but before any files
96 * `onabort`: called as the transaction is aborting, but before any files
97 have been truncated
97 have been truncated
98 """
98 """
99 self.count = 1
99 self.count = 1
100 self.usages = 1
100 self.usages = 1
101 self.report = report
101 self.report = report
102 # a vfs to the store content
102 # a vfs to the store content
103 self.opener = opener
103 self.opener = opener
104 # a map to access file in various {location -> vfs}
104 # a map to access file in various {location -> vfs}
105 vfsmap = vfsmap.copy()
105 vfsmap = vfsmap.copy()
106 vfsmap[''] = opener # set default value
106 vfsmap[''] = opener # set default value
107 self._vfsmap = vfsmap
107 self._vfsmap = vfsmap
108 self.after = after
108 self.after = after
109 self.onclose = onclose
109 self.onclose = onclose
110 self.onabort = onabort
110 self.onabort = onabort
111 self.entries = []
111 self.entries = []
112 self.map = {}
112 self.map = {}
113 self.journal = journal
113 self.journal = journal
114 self._queue = []
114 self._queue = []
115 # a dict of arguments to be passed to hooks
115 # a dict of arguments to be passed to hooks
116 self.hookargs = {}
116 self.hookargs = {}
117 self.file = opener.open(self.journal, "w")
117 self.file = opener.open(self.journal, "w")
118
118
119 # a list of ('location', 'path', 'backuppath', cache) entries.
119 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # - if 'backuppath' is empty, no file existed at backup time
120 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'path' is empty, this is a temporary transaction file
121 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'location' is not empty, the path is outside main opener reach.
122 # - if 'location' is not empty, the path is outside main opener reach.
123 # use 'location' value as a key in a vfsmap to find the right 'vfs'
123 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # (cache is currently unused)
124 # (cache is currently unused)
125 self._backupentries = []
125 self._backupentries = []
126 self._backupmap = {}
126 self._backupmap = {}
127 self._backupjournal = "%s.backupfiles" % journal
127 self._backupjournal = "%s.backupfiles" % journal
128 self._backupsfile = opener.open(self._backupjournal, 'w')
128 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile.write('%d\n' % version)
129 self._backupsfile.write('%d\n' % version)
130
130
131 if createmode is not None:
131 if createmode is not None:
132 opener.chmod(self.journal, createmode & 0666)
132 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self._backupjournal, createmode & 0666)
133 opener.chmod(self._backupjournal, createmode & 0666)
134
134
135 # hold file generations to be performed on commit
135 # hold file generations to be performed on commit
136 self._filegenerators = {}
136 self._filegenerators = {}
137 # hold callbalk to write pending data for hooks
137 # hold callbalk to write pending data for hooks
138 self._pendingcallback = {}
138 self._pendingcallback = {}
139 # True is any pending data have been written ever
139 # True is any pending data have been written ever
140 self._anypending = False
140 self._anypending = False
141 # holds callback to call when writing the transaction
141 # holds callback to call when writing the transaction
142 self._finalizecallback = {}
142 self._finalizecallback = {}
143 # hold callbalk for post transaction close
143 # hold callbalk for post transaction close
144 self._postclosecallback = {}
144 self._postclosecallback = {}
145
145
146 def __del__(self):
146 def __del__(self):
147 if self.journal:
147 if self.journal:
148 self._abort()
148 self._abort()
149
149
150 @active
150 @active
151 def startgroup(self):
151 def startgroup(self):
152 """delay registration of file entry
152 """delay registration of file entry
153
153
154 This is used by strip to delay vision of strip offset. The transaction
154 This is used by strip to delay vision of strip offset. The transaction
155 sees either none or all of the strip actions to be done."""
155 sees either none or all of the strip actions to be done."""
156 self._queue.append([])
156 self._queue.append([])
157
157
158 @active
158 @active
159 def endgroup(self):
159 def endgroup(self):
160 """apply delayed registration of file entry.
160 """apply delayed registration of file entry.
161
161
162 This is used by strip to delay vision of strip offset. The transaction
162 This is used by strip to delay vision of strip offset. The transaction
163 sees either none or all of the strip actions to be done."""
163 sees either none or all of the strip actions to be done."""
164 q = self._queue.pop()
164 q = self._queue.pop()
165 for f, o, data in q:
165 for f, o, data in q:
166 self._addentry(f, o, data)
166 self._addentry(f, o, data)
167
167
168 @active
168 @active
169 def add(self, file, offset, data=None):
169 def add(self, file, offset, data=None):
170 """record the state of an append-only file before update"""
170 """record the state of an append-only file before update"""
171 if file in self.map or file in self._backupmap:
171 if file in self.map or file in self._backupmap:
172 return
172 return
173 if self._queue:
173 if self._queue:
174 self._queue[-1].append((file, offset, data))
174 self._queue[-1].append((file, offset, data))
175 return
175 return
176
176
177 self._addentry(file, offset, data)
177 self._addentry(file, offset, data)
178
178
179 def _addentry(self, file, offset, data):
179 def _addentry(self, file, offset, data):
180 """add a append-only entry to memory and on-disk state"""
180 """add a append-only entry to memory and on-disk state"""
181 if file in self.map or file in self._backupmap:
181 if file in self.map or file in self._backupmap:
182 return
182 return
183 self.entries.append((file, offset, data))
183 self.entries.append((file, offset, data))
184 self.map[file] = len(self.entries) - 1
184 self.map[file] = len(self.entries) - 1
185 # add enough data to the journal to do the truncate
185 # add enough data to the journal to do the truncate
186 self.file.write("%s\0%d\n" % (file, offset))
186 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.flush()
187 self.file.flush()
188
188
189 @active
189 @active
190 def addbackup(self, file, hardlink=True, vfs=None):
190 def addbackup(self, file, hardlink=True, vfs=None):
191 """Adds a backup of the file to the transaction
191 """Adds a backup of the file to the transaction
192
192
193 Calling addbackup() creates a hardlink backup of the specified file
193 Calling addbackup() creates a hardlink backup of the specified file
194 that is used to recover the file in the event of the transaction
194 that is used to recover the file in the event of the transaction
195 aborting.
195 aborting.
196
196
197 * `file`: the file path, relative to .hg/store
197 * `file`: the file path, relative to .hg/store
198 * `hardlink`: use a hardlink to quickly create the backup
198 * `hardlink`: use a hardlink to quickly create the backup
199 """
199 """
200 if self._queue:
200 if self._queue:
201 msg = 'cannot use transaction.addbackup inside "group"'
201 msg = 'cannot use transaction.addbackup inside "group"'
202 raise RuntimeError(msg)
202 raise RuntimeError(msg)
203
203
204 if file in self.map or file in self._backupmap:
204 if file in self.map or file in self._backupmap:
205 return
205 return
206 backupfile = "%s.backup.%s" % (self.journal, file)
206 backupfile = "%s.backup.%s" % (self.journal, file)
207 if vfs is None:
207 if vfs is None:
208 vfs = self.opener
208 vfs = self.opener
209 if vfs.exists(file):
209 if vfs.exists(file):
210 filepath = vfs.join(file)
210 filepath = vfs.join(file)
211 backuppath = self.opener.join(backupfile)
211 backuppath = self.opener.join(backupfile)
212 util.copyfiles(filepath, backuppath, hardlink=hardlink)
212 util.copyfiles(filepath, backuppath, hardlink=hardlink)
213 else:
213 else:
214 backupfile = ''
214 backupfile = ''
215
215
216 self._addbackupentry(('', file, backupfile, False))
216 self._addbackupentry(('', file, backupfile, False))
217
217
218 def _addbackupentry(self, entry):
218 def _addbackupentry(self, entry):
219 """register a new backup entry and write it to disk"""
219 """register a new backup entry and write it to disk"""
220 self._backupentries.append(entry)
220 self._backupentries.append(entry)
221 self._backupmap[file] = len(self._backupentries) - 1
221 self._backupmap[file] = len(self._backupentries) - 1
222 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
222 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
223 self._backupsfile.flush()
223 self._backupsfile.flush()
224
224
225 @active
225 @active
226 def registertmp(self, tmpfile):
226 def registertmp(self, tmpfile):
227 """register a temporary transaction file
227 """register a temporary transaction file
228
228
229 Such file will be delete when the transaction exit (on both failure and
229 Such file will be delete when the transaction exit (on both failure and
230 success).
230 success).
231 """
231 """
232 self._addbackupentry(('', '', tmpfile, False))
232 self._addbackupentry(('', '', tmpfile, False))
233
233
234 @active
234 @active
235 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
235 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
236 """add a function to generates some files at transaction commit
236 """add a function to generates some files at transaction commit
237
237
238 The `genfunc` argument is a function capable of generating proper
238 The `genfunc` argument is a function capable of generating proper
239 content of each entry in the `filename` tuple.
239 content of each entry in the `filename` tuple.
240
240
241 At transaction close time, `genfunc` will be called with one file
241 At transaction close time, `genfunc` will be called with one file
242 object argument per entries in `filenames`.
242 object argument per entries in `filenames`.
243
243
244 The transaction itself is responsible for the backup, creation and
244 The transaction itself is responsible for the backup, creation and
245 final write of such file.
245 final write of such file.
246
246
247 The `genid` argument is used to ensure the same set of file is only
247 The `genid` argument is used to ensure the same set of file is only
248 generated once. Call to `addfilegenerator` for a `genid` already
248 generated once. Call to `addfilegenerator` for a `genid` already
249 present will overwrite the old entry.
249 present will overwrite the old entry.
250
250
251 The `order` argument may be used to control the order in which multiple
251 The `order` argument may be used to control the order in which multiple
252 generator will be executed.
252 generator will be executed.
253 """
253 """
254 # For now, we are unable to do proper backup and restore of custom vfs
254 # For now, we are unable to do proper backup and restore of custom vfs
255 # but for bookmarks that are handled outside this mechanism.
255 # but for bookmarks that are handled outside this mechanism.
256 assert vfs is None or filenames == ('bookmarks',)
256 assert vfs is None or filenames == ('bookmarks',)
257 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
257 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
258
258
259 def _generatefiles(self):
259 def _generatefiles(self):
260 # write files registered for generation
260 # write files registered for generation
261 for entry in sorted(self._filegenerators.values()):
261 for entry in sorted(self._filegenerators.values()):
262 order, filenames, genfunc, vfs = entry
262 order, filenames, genfunc, vfs = entry
263 if vfs is None:
263 if vfs is None:
264 vfs = self.opener
264 vfs = self.opener
265 files = []
265 files = []
266 try:
266 try:
267 for name in filenames:
267 for name in filenames:
268 # Some files are already backed up when creating the
268 # Some files are already backed up when creating the
269 # localrepo. Until this is properly fixed we disable the
269 # localrepo. Until this is properly fixed we disable the
270 # backup for them.
270 # backup for them.
271 if name not in ('phaseroots', 'bookmarks'):
271 if name not in ('phaseroots', 'bookmarks'):
272 self.addbackup(name)
272 self.addbackup(name)
273 files.append(vfs(name, 'w', atomictemp=True))
273 files.append(vfs(name, 'w', atomictemp=True))
274 genfunc(*files)
274 genfunc(*files)
275 finally:
275 finally:
276 for f in files:
276 for f in files:
277 f.close()
277 f.close()
278
278
279 @active
279 @active
280 def find(self, file):
280 def find(self, file):
281 if file in self.map:
281 if file in self.map:
282 return self.entries[self.map[file]]
282 return self.entries[self.map[file]]
283 if file in self._backupmap:
283 if file in self._backupmap:
284 return self._backupentries[self._backupmap[file]]
284 return self._backupentries[self._backupmap[file]]
285 return None
285 return None
286
286
287 @active
287 @active
288 def replace(self, file, offset, data=None):
288 def replace(self, file, offset, data=None):
289 '''
289 '''
290 replace can only replace already committed entries
290 replace can only replace already committed entries
291 that are not pending in the queue
291 that are not pending in the queue
292 '''
292 '''
293
293
294 if file not in self.map:
294 if file not in self.map:
295 raise KeyError(file)
295 raise KeyError(file)
296 index = self.map[file]
296 index = self.map[file]
297 self.entries[index] = (file, offset, data)
297 self.entries[index] = (file, offset, data)
298 self.file.write("%s\0%d\n" % (file, offset))
298 self.file.write("%s\0%d\n" % (file, offset))
299 self.file.flush()
299 self.file.flush()
300
300
301 @active
301 @active
302 def nest(self):
302 def nest(self):
303 self.count += 1
303 self.count += 1
304 self.usages += 1
304 self.usages += 1
305 return self
305 return self
306
306
307 def release(self):
307 def release(self):
308 if self.count > 0:
308 if self.count > 0:
309 self.usages -= 1
309 self.usages -= 1
310 # if the transaction scopes are left without being closed, fail
310 # if the transaction scopes are left without being closed, fail
311 if self.count > 0 and self.usages == 0:
311 if self.count > 0 and self.usages == 0:
312 self._abort()
312 self._abort()
313
313
314 def running(self):
314 def running(self):
315 return self.count > 0
315 return self.count > 0
316
316
317 def addpending(self, category, callback):
317 def addpending(self, category, callback):
318 """add a callback to be called when the transaction is pending
318 """add a callback to be called when the transaction is pending
319
319
320 The transaction will be given as callback's first argument.
320 The transaction will be given as callback's first argument.
321
321
322 Category is a unique identifier to allow overwriting an old callback
322 Category is a unique identifier to allow overwriting an old callback
323 with a newer callback.
323 with a newer callback.
324 """
324 """
325 self._pendingcallback[category] = callback
325 self._pendingcallback[category] = callback
326
326
327 @active
327 @active
328 def writepending(self):
328 def writepending(self):
329 '''write pending file to temporary version
329 '''write pending file to temporary version
330
330
331 This is used to allow hooks to view a transaction before commit'''
331 This is used to allow hooks to view a transaction before commit'''
332 categories = sorted(self._pendingcallback)
332 categories = sorted(self._pendingcallback)
333 for cat in categories:
333 for cat in categories:
334 # remove callback since the data will have been flushed
334 # remove callback since the data will have been flushed
335 any = self._pendingcallback.pop(cat)(self)
335 any = self._pendingcallback.pop(cat)(self)
336 self._anypending = self._anypending or any
336 self._anypending = self._anypending or any
337 return self._anypending
337 return self._anypending
338
338
339 @active
339 @active
340 def addfinalize(self, category, callback):
340 def addfinalize(self, category, callback):
341 """add a callback to be called when the transaction is closed
341 """add a callback to be called when the transaction is closed
342
342
343 The transaction will be given as callback's first argument.
343 The transaction will be given as callback's first argument.
344
344
345 Category is a unique identifier to allow overwriting old callbacks with
345 Category is a unique identifier to allow overwriting old callbacks with
346 newer callbacks.
346 newer callbacks.
347 """
347 """
348 self._finalizecallback[category] = callback
348 self._finalizecallback[category] = callback
349
349
350 @active
350 @active
351 def addpostclose(self, category, callback):
351 def addpostclose(self, category, callback):
352 """add a callback to be called after the transaction is closed
352 """add a callback to be called after the transaction is closed
353
353
354 The transaction will be given as callback's first argument.
354 The transaction will be given as callback's first argument.
355
355
356 Category is a unique identifier to allow overwriting an old callback
356 Category is a unique identifier to allow overwriting an old callback
357 with a newer callback.
357 with a newer callback.
358 """
358 """
359 self._postclosecallback[category] = callback
359 self._postclosecallback[category] = callback
360
360
361 @active
361 @active
362 def close(self):
362 def close(self):
363 '''commit the transaction'''
363 '''commit the transaction'''
364 if self.count == 1:
364 if self.count == 1:
365 self._generatefiles()
365 self._generatefiles()
366 categories = sorted(self._finalizecallback)
366 categories = sorted(self._finalizecallback)
367 for cat in categories:
367 for cat in categories:
368 self._finalizecallback[cat](self)
368 self._finalizecallback[cat](self)
369 if self.onclose is not None:
369 if self.onclose is not None:
370 self.onclose()
370 self.onclose()
371
371
372 self.count -= 1
372 self.count -= 1
373 if self.count != 0:
373 if self.count != 0:
374 return
374 return
375 self.file.close()
375 self.file.close()
376 self._backupsfile.close()
376 self._backupsfile.close()
377 # cleanup temporary files
377 # cleanup temporary files
378 for l, f, b, c in self._backupentries:
378 for l, f, b, c in self._backupentries:
379 if l not in self._vfsmap and c:
379 if l not in self._vfsmap and c:
380 self.report("couldn't remote %s: unknown cache location %s\n"
380 self.report("couldn't remote %s: unknown cache location %s\n"
381 % (b, l))
381 % (b, l))
382 continue
382 continue
383 vfs = self._vfsmap[l]
383 vfs = self._vfsmap[l]
384 if not f and b and vfs.exists(b):
384 if not f and b and vfs.exists(b):
385 try:
385 try:
386 vfs.unlink(b)
386 vfs.unlink(b)
387 except (IOError, OSError, util.Abort), inst:
387 except (IOError, OSError, util.Abort), inst:
388 if not c:
388 if not c:
389 raise
389 raise
390 # Abort may be raise by read only opener
390 # Abort may be raise by read only opener
391 self.report("couldn't remote %s: %s\n"
391 self.report("couldn't remote %s: %s\n"
392 % (vfs.join(b), inst))
392 % (vfs.join(b), inst))
393 self.entries = []
393 self.entries = []
394 if self.after:
394 if self.after:
395 self.after()
395 self.after()
396 if self.opener.isfile(self.journal):
396 if self.opener.isfile(self.journal):
397 self.opener.unlink(self.journal)
397 self.opener.unlink(self.journal)
398 if self.opener.isfile(self._backupjournal):
398 if self.opener.isfile(self._backupjournal):
399 self.opener.unlink(self._backupjournal)
399 self.opener.unlink(self._backupjournal)
400 for _l, _f, b, c in self._backupentries:
400 for _l, _f, b, c in self._backupentries:
401 if l not in self._vfsmap and c:
401 if l not in self._vfsmap and c:
402 self.report("couldn't remote %s: unknown cache location"
402 self.report("couldn't remote %s: unknown cache location"
403 "%s\n" % (b, l))
403 "%s\n" % (b, l))
404 continue
404 continue
405 vfs = self._vfsmap[l]
405 vfs = self._vfsmap[l]
406 if b and vfs.exists(b):
406 if b and vfs.exists(b):
407 try:
407 try:
408 vfs.unlink(b)
408 vfs.unlink(b)
409 except (IOError, OSError, util.Abort), inst:
409 except (IOError, OSError, util.Abort), inst:
410 if not c:
410 if not c:
411 raise
411 raise
412 # Abort may be raise by read only opener
412 # Abort may be raise by read only opener
413 self.report("couldn't remote %s: %s\n"
413 self.report("couldn't remote %s: %s\n"
414 % (vfs.join(b), inst))
414 % (vfs.join(b), inst))
415 self._backupentries = []
415 self._backupentries = []
416 self.journal = None
416 self.journal = None
417 # run post close action
417 # run post close action
418 categories = sorted(self._postclosecallback)
418 categories = sorted(self._postclosecallback)
419 for cat in categories:
419 for cat in categories:
420 self._postclosecallback[cat](self)
420 self._postclosecallback[cat](self)
421
421
422 @active
422 @active
423 def abort(self):
423 def abort(self):
424 '''abort the transaction (generally called on error, or when the
424 '''abort the transaction (generally called on error, or when the
425 transaction is not explicitly committed before going out of
425 transaction is not explicitly committed before going out of
426 scope)'''
426 scope)'''
427 self._abort()
427 self._abort()
428
428
429 def _abort(self):
429 def _abort(self):
430 self.count = 0
430 self.count = 0
431 self.usages = 0
431 self.usages = 0
432 self.file.close()
432 self.file.close()
433 self._backupsfile.close()
433 self._backupsfile.close()
434
434
435 if self.onabort is not None:
435 if self.onabort is not None:
436 self.onabort()
436 self.onabort()
437
437
438 try:
438 try:
439 if not self.entries and not self._backupentries:
439 if not self.entries and not self._backupentries:
440 if self.journal:
440 if self.journal:
441 self.opener.unlink(self.journal)
441 self.opener.unlink(self.journal)
442 if self._backupjournal:
442 if self._backupjournal:
443 self.opener.unlink(self._backupjournal)
443 self.opener.unlink(self._backupjournal)
444 return
444 return
445
445
446 self.report(_("transaction abort!\n"))
446 self.report(_("transaction abort!\n"))
447
447
448 try:
448 try:
449 _playback(self.journal, self.report, self.opener, self._vfsmap,
449 _playback(self.journal, self.report, self.opener, self._vfsmap,
450 self.entries, self._backupentries, False)
450 self.entries, self._backupentries, False)
451 self.report(_("rollback completed\n"))
451 self.report(_("rollback completed\n"))
452 except Exception:
452 except Exception:
453 self.report(_("rollback failed - please run hg recover\n"))
453 self.report(_("rollback failed - please run hg recover\n"))
454 finally:
454 finally:
455 self.journal = None
455 self.journal = None
456
456
457
457
458 def rollback(opener, vfsmap, file, report):
458 def rollback(opener, vfsmap, file, report):
459 """Rolls back the transaction contained in the given file
459 """Rolls back the transaction contained in the given file
460
460
461 Reads the entries in the specified file, and the corresponding
461 Reads the entries in the specified file, and the corresponding
462 '*.backupfiles' file, to recover from an incomplete transaction.
462 '*.backupfiles' file, to recover from an incomplete transaction.
463
463
464 * `file`: a file containing a list of entries, specifying where
464 * `file`: a file containing a list of entries, specifying where
465 to truncate each file. The file should contain a list of
465 to truncate each file. The file should contain a list of
466 file\0offset pairs, delimited by newlines. The corresponding
466 file\0offset pairs, delimited by newlines. The corresponding
467 '*.backupfiles' file should contain a list of file\0backupfile
467 '*.backupfiles' file should contain a list of file\0backupfile
468 pairs, delimited by \0.
468 pairs, delimited by \0.
469 """
469 """
470 entries = []
470 entries = []
471 backupentries = []
471 backupentries = []
472
472
473 fp = opener.open(file)
473 fp = opener.open(file)
474 lines = fp.readlines()
474 lines = fp.readlines()
475 fp.close()
475 fp.close()
476 for l in lines:
476 for l in lines:
477 try:
477 try:
478 f, o = l.split('\0')
478 f, o = l.split('\0')
479 entries.append((f, int(o), None))
479 entries.append((f, int(o), None))
480 except ValueError:
480 except ValueError:
481 report(_("couldn't read journal entry %r!\n") % l)
481 report(_("couldn't read journal entry %r!\n") % l)
482
482
483 backupjournal = "%s.backupfiles" % file
483 backupjournal = "%s.backupfiles" % file
484 if opener.exists(backupjournal):
484 if opener.exists(backupjournal):
485 fp = opener.open(backupjournal)
485 fp = opener.open(backupjournal)
486 lines = fp.readlines()
486 lines = fp.readlines()
487 if lines:
487 if lines:
488 ver = lines[0][:-1]
488 ver = lines[0][:-1]
489 if ver == str(version):
489 if ver == str(version):
490 for line in lines[1:]:
490 for line in lines[1:]:
491 if line:
491 if line:
492 # Shave off the trailing newline
492 # Shave off the trailing newline
493 line = line[:-1]
493 line = line[:-1]
494 l, f, b, c = line.split('\0')
494 l, f, b, c = line.split('\0')
495 backupentries.append((l, f, b, bool(c)))
495 backupentries.append((l, f, b, bool(c)))
496 else:
496 else:
497 report(_("journal was created by a different version of "
497 report(_("journal was created by a different version of "
498 "Mercurial"))
498 "Mercurial"))
499
499
500 _playback(file, report, opener, vfsmap, entries, backupentries)
500 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now