##// END OF EJS Templates
transaction: drop special handling for phases and bookmarks generation...
Pierre-Yves David -
r23318:fc73293f default
parent child Browse files
Show More
@@ -1,505 +1,501 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16 import errno
16 import errno
17 import error, util
17 import error, util
18
18
19 version = 2
19 version = 2
20
20
21 def active(func):
21 def active(func):
22 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
23 if self.count == 0:
23 if self.count == 0:
24 raise error.Abort(_(
24 raise error.Abort(_(
25 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
26 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
27 return _active
27 return _active
28
28
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 unlink=True):
30 unlink=True):
31 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
32 if o or not unlink:
32 if o or not unlink:
33 try:
33 try:
34 fp = opener(f, 'a')
34 fp = opener(f, 'a')
35 fp.truncate(o)
35 fp.truncate(o)
36 fp.close()
36 fp.close()
37 except IOError:
37 except IOError:
38 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
39 raise
39 raise
40 else:
40 else:
41 try:
41 try:
42 opener.unlink(f)
42 opener.unlink(f)
43 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
44 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
45 raise
45 raise
46
46
47 backupfiles = []
47 backupfiles = []
48 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
49 if l not in vfsmap and c:
49 if l not in vfsmap and c:
50 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
51 % (b, l))
51 % (b, l))
52 vfs = vfsmap[l]
52 vfs = vfsmap[l]
53 try:
53 try:
54 if f and b:
54 if f and b:
55 filepath = vfs.join(f)
55 filepath = vfs.join(f)
56 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
57 try:
57 try:
58 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
59 backupfiles.append(b)
59 backupfiles.append(b)
60 except IOError:
60 except IOError:
61 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
62 else:
62 else:
63 target = f or b
63 target = f or b
64 try:
64 try:
65 vfs.unlink(target)
65 vfs.unlink(target)
66 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
70 if not c:
70 if not c:
71 raise
71 raise
72
72
73 opener.unlink(journal)
73 opener.unlink(journal)
74 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
75 if opener.exists(backuppath):
75 if opener.exists(backuppath):
76 opener.unlink(backuppath)
76 opener.unlink(backuppath)
77 try:
77 try:
78 for f in backupfiles:
78 for f in backupfiles:
79 if opener.exists(f):
79 if opener.exists(f):
80 opener.unlink(f)
80 opener.unlink(f)
81 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
82 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
83 pass
83 pass
84
84
85 class transaction(object):
85 class transaction(object):
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 createmode=None, onclose=None, onabort=None):
87 createmode=None, onclose=None, onabort=None):
88 """Begin a new transaction
88 """Begin a new transaction
89
89
90 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
91 an exception.
91 an exception.
92
92
93 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
94 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
95 * `onclose`: called as the transaction is closing, but before it is
95 * `onclose`: called as the transaction is closing, but before it is
96 closed
96 closed
97 * `onabort`: called as the transaction is aborting, but before any files
97 * `onabort`: called as the transaction is aborting, but before any files
98 have been truncated
98 have been truncated
99 """
99 """
100 self.count = 1
100 self.count = 1
101 self.usages = 1
101 self.usages = 1
102 self.report = report
102 self.report = report
103 # a vfs to the store content
103 # a vfs to the store content
104 self.opener = opener
104 self.opener = opener
105 # a map to access file in various {location -> vfs}
105 # a map to access file in various {location -> vfs}
106 vfsmap = vfsmap.copy()
106 vfsmap = vfsmap.copy()
107 vfsmap[''] = opener # set default value
107 vfsmap[''] = opener # set default value
108 self._vfsmap = vfsmap
108 self._vfsmap = vfsmap
109 self.after = after
109 self.after = after
110 self.onclose = onclose
110 self.onclose = onclose
111 self.onabort = onabort
111 self.onabort = onabort
112 self.entries = []
112 self.entries = []
113 self.map = {}
113 self.map = {}
114 self.journal = journal
114 self.journal = journal
115 self._queue = []
115 self._queue = []
116 # a dict of arguments to be passed to hooks
116 # a dict of arguments to be passed to hooks
117 self.hookargs = {}
117 self.hookargs = {}
118 self.file = opener.open(self.journal, "w")
118 self.file = opener.open(self.journal, "w")
119
119
120 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'backuppath' is empty, no file existed at backup time
122 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'path' is empty, this is a temporary transaction file
123 # - if 'location' is not empty, the path is outside main opener reach.
123 # - if 'location' is not empty, the path is outside main opener reach.
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 # (cache is currently unused)
125 # (cache is currently unused)
126 self._backupentries = []
126 self._backupentries = []
127 self._backupmap = {}
127 self._backupmap = {}
128 self._backupjournal = "%s.backupfiles" % journal
128 self._backupjournal = "%s.backupfiles" % journal
129 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 self._backupsfile.write('%d\n' % version)
130 self._backupsfile.write('%d\n' % version)
131
131
132 if createmode is not None:
132 if createmode is not None:
133 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self.journal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
135
135
136 # hold file generations to be performed on commit
136 # hold file generations to be performed on commit
137 self._filegenerators = {}
137 self._filegenerators = {}
138 # hold callbalk to write pending data for hooks
138 # hold callbalk to write pending data for hooks
139 self._pendingcallback = {}
139 self._pendingcallback = {}
140 # True is any pending data have been written ever
140 # True is any pending data have been written ever
141 self._anypending = False
141 self._anypending = False
142 # holds callback to call when writing the transaction
142 # holds callback to call when writing the transaction
143 self._finalizecallback = {}
143 self._finalizecallback = {}
144 # hold callbalk for post transaction close
144 # hold callbalk for post transaction close
145 self._postclosecallback = {}
145 self._postclosecallback = {}
146
146
147 def __del__(self):
147 def __del__(self):
148 if self.journal:
148 if self.journal:
149 self._abort()
149 self._abort()
150
150
151 @active
151 @active
152 def startgroup(self):
152 def startgroup(self):
153 """delay registration of file entry
153 """delay registration of file entry
154
154
155 This is used by strip to delay vision of strip offset. The transaction
155 This is used by strip to delay vision of strip offset. The transaction
156 sees either none or all of the strip actions to be done."""
156 sees either none or all of the strip actions to be done."""
157 self._queue.append([])
157 self._queue.append([])
158
158
159 @active
159 @active
160 def endgroup(self):
160 def endgroup(self):
161 """apply delayed registration of file entry.
161 """apply delayed registration of file entry.
162
162
163 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
164 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
165 q = self._queue.pop()
165 q = self._queue.pop()
166 for f, o, data in q:
166 for f, o, data in q:
167 self._addentry(f, o, data)
167 self._addentry(f, o, data)
168
168
169 @active
169 @active
170 def add(self, file, offset, data=None):
170 def add(self, file, offset, data=None):
171 """record the state of an append-only file before update"""
171 """record the state of an append-only file before update"""
172 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
173 return
173 return
174 if self._queue:
174 if self._queue:
175 self._queue[-1].append((file, offset, data))
175 self._queue[-1].append((file, offset, data))
176 return
176 return
177
177
178 self._addentry(file, offset, data)
178 self._addentry(file, offset, data)
179
179
180 def _addentry(self, file, offset, data):
180 def _addentry(self, file, offset, data):
181 """add a append-only entry to memory and on-disk state"""
181 """add a append-only entry to memory and on-disk state"""
182 if file in self.map or file in self._backupmap:
182 if file in self.map or file in self._backupmap:
183 return
183 return
184 self.entries.append((file, offset, data))
184 self.entries.append((file, offset, data))
185 self.map[file] = len(self.entries) - 1
185 self.map[file] = len(self.entries) - 1
186 # add enough data to the journal to do the truncate
186 # add enough data to the journal to do the truncate
187 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.write("%s\0%d\n" % (file, offset))
188 self.file.flush()
188 self.file.flush()
189
189
190 @active
190 @active
191 def addbackup(self, file, hardlink=True, location=''):
191 def addbackup(self, file, hardlink=True, location=''):
192 """Adds a backup of the file to the transaction
192 """Adds a backup of the file to the transaction
193
193
194 Calling addbackup() creates a hardlink backup of the specified file
194 Calling addbackup() creates a hardlink backup of the specified file
195 that is used to recover the file in the event of the transaction
195 that is used to recover the file in the event of the transaction
196 aborting.
196 aborting.
197
197
198 * `file`: the file path, relative to .hg/store
198 * `file`: the file path, relative to .hg/store
199 * `hardlink`: use a hardlink to quickly create the backup
199 * `hardlink`: use a hardlink to quickly create the backup
200 """
200 """
201 if self._queue:
201 if self._queue:
202 msg = 'cannot use transaction.addbackup inside "group"'
202 msg = 'cannot use transaction.addbackup inside "group"'
203 raise RuntimeError(msg)
203 raise RuntimeError(msg)
204
204
205 if file in self.map or file in self._backupmap:
205 if file in self.map or file in self._backupmap:
206 return
206 return
207 dirname, filename = os.path.split(file)
207 dirname, filename = os.path.split(file)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 backupfile = os.path.join(dirname, backupfilename)
209 backupfile = os.path.join(dirname, backupfilename)
210 vfs = self._vfsmap[location]
210 vfs = self._vfsmap[location]
211 if vfs.exists(file):
211 if vfs.exists(file):
212 filepath = vfs.join(file)
212 filepath = vfs.join(file)
213 backuppath = vfs.join(backupfile)
213 backuppath = vfs.join(backupfile)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 else:
215 else:
216 backupfile = ''
216 backupfile = ''
217
217
218 self._addbackupentry((location, file, backupfile, False))
218 self._addbackupentry((location, file, backupfile, False))
219
219
220 def _addbackupentry(self, entry):
220 def _addbackupentry(self, entry):
221 """register a new backup entry and write it to disk"""
221 """register a new backup entry and write it to disk"""
222 self._backupentries.append(entry)
222 self._backupentries.append(entry)
223 self._backupmap[file] = len(self._backupentries) - 1
223 self._backupmap[file] = len(self._backupentries) - 1
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 self._backupsfile.flush()
225 self._backupsfile.flush()
226
226
227 @active
227 @active
228 def registertmp(self, tmpfile):
228 def registertmp(self, tmpfile):
229 """register a temporary transaction file
229 """register a temporary transaction file
230
230
231 Such file will be delete when the transaction exit (on both failure and
231 Such file will be delete when the transaction exit (on both failure and
232 success).
232 success).
233 """
233 """
234 self._addbackupentry(('', '', tmpfile, False))
234 self._addbackupentry(('', '', tmpfile, False))
235
235
236 @active
236 @active
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 location=''):
238 location=''):
239 """add a function to generates some files at transaction commit
239 """add a function to generates some files at transaction commit
240
240
241 The `genfunc` argument is a function capable of generating proper
241 The `genfunc` argument is a function capable of generating proper
242 content of each entry in the `filename` tuple.
242 content of each entry in the `filename` tuple.
243
243
244 At transaction close time, `genfunc` will be called with one file
244 At transaction close time, `genfunc` will be called with one file
245 object argument per entries in `filenames`.
245 object argument per entries in `filenames`.
246
246
247 The transaction itself is responsible for the backup, creation and
247 The transaction itself is responsible for the backup, creation and
248 final write of such file.
248 final write of such file.
249
249
250 The `genid` argument is used to ensure the same set of file is only
250 The `genid` argument is used to ensure the same set of file is only
251 generated once. Call to `addfilegenerator` for a `genid` already
251 generated once. Call to `addfilegenerator` for a `genid` already
252 present will overwrite the old entry.
252 present will overwrite the old entry.
253
253
254 The `order` argument may be used to control the order in which multiple
254 The `order` argument may be used to control the order in which multiple
255 generator will be executed.
255 generator will be executed.
256
256
257 The `location` arguments may be used to indicate the files are located
257 The `location` arguments may be used to indicate the files are located
258 outside of the the standard directory for transaction. It should match
258 outside of the the standard directory for transaction. It should match
259 one of the key of the `transaction.vfsmap` dictionnary.
259 one of the key of the `transaction.vfsmap` dictionnary.
260 """
260 """
261 # For now, we are unable to do proper backup and restore of custom vfs
261 # For now, we are unable to do proper backup and restore of custom vfs
262 # but for bookmarks that are handled outside this mechanism.
262 # but for bookmarks that are handled outside this mechanism.
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
264
264
265 def _generatefiles(self):
265 def _generatefiles(self):
266 # write files registered for generation
266 # write files registered for generation
267 for entry in sorted(self._filegenerators.values()):
267 for entry in sorted(self._filegenerators.values()):
268 order, filenames, genfunc, location = entry
268 order, filenames, genfunc, location = entry
269 vfs = self._vfsmap[location]
269 vfs = self._vfsmap[location]
270 files = []
270 files = []
271 try:
271 try:
272 for name in filenames:
272 for name in filenames:
273 # Some files are already backed up when creating the
273 self.addbackup(name, location=location)
274 # localrepo. Until this is properly fixed we disable the
275 # backup for them.
276 if name not in ('phaseroots', 'bookmarks'):
277 self.addbackup(name, location=location)
278 files.append(vfs(name, 'w', atomictemp=True))
274 files.append(vfs(name, 'w', atomictemp=True))
279 genfunc(*files)
275 genfunc(*files)
280 finally:
276 finally:
281 for f in files:
277 for f in files:
282 f.close()
278 f.close()
283
279
284 @active
280 @active
285 def find(self, file):
281 def find(self, file):
286 if file in self.map:
282 if file in self.map:
287 return self.entries[self.map[file]]
283 return self.entries[self.map[file]]
288 if file in self._backupmap:
284 if file in self._backupmap:
289 return self._backupentries[self._backupmap[file]]
285 return self._backupentries[self._backupmap[file]]
290 return None
286 return None
291
287
292 @active
288 @active
293 def replace(self, file, offset, data=None):
289 def replace(self, file, offset, data=None):
294 '''
290 '''
295 replace can only replace already committed entries
291 replace can only replace already committed entries
296 that are not pending in the queue
292 that are not pending in the queue
297 '''
293 '''
298
294
299 if file not in self.map:
295 if file not in self.map:
300 raise KeyError(file)
296 raise KeyError(file)
301 index = self.map[file]
297 index = self.map[file]
302 self.entries[index] = (file, offset, data)
298 self.entries[index] = (file, offset, data)
303 self.file.write("%s\0%d\n" % (file, offset))
299 self.file.write("%s\0%d\n" % (file, offset))
304 self.file.flush()
300 self.file.flush()
305
301
306 @active
302 @active
307 def nest(self):
303 def nest(self):
308 self.count += 1
304 self.count += 1
309 self.usages += 1
305 self.usages += 1
310 return self
306 return self
311
307
312 def release(self):
308 def release(self):
313 if self.count > 0:
309 if self.count > 0:
314 self.usages -= 1
310 self.usages -= 1
315 # if the transaction scopes are left without being closed, fail
311 # if the transaction scopes are left without being closed, fail
316 if self.count > 0 and self.usages == 0:
312 if self.count > 0 and self.usages == 0:
317 self._abort()
313 self._abort()
318
314
319 def running(self):
315 def running(self):
320 return self.count > 0
316 return self.count > 0
321
317
322 def addpending(self, category, callback):
318 def addpending(self, category, callback):
323 """add a callback to be called when the transaction is pending
319 """add a callback to be called when the transaction is pending
324
320
325 The transaction will be given as callback's first argument.
321 The transaction will be given as callback's first argument.
326
322
327 Category is a unique identifier to allow overwriting an old callback
323 Category is a unique identifier to allow overwriting an old callback
328 with a newer callback.
324 with a newer callback.
329 """
325 """
330 self._pendingcallback[category] = callback
326 self._pendingcallback[category] = callback
331
327
332 @active
328 @active
333 def writepending(self):
329 def writepending(self):
334 '''write pending file to temporary version
330 '''write pending file to temporary version
335
331
336 This is used to allow hooks to view a transaction before commit'''
332 This is used to allow hooks to view a transaction before commit'''
337 categories = sorted(self._pendingcallback)
333 categories = sorted(self._pendingcallback)
338 for cat in categories:
334 for cat in categories:
339 # remove callback since the data will have been flushed
335 # remove callback since the data will have been flushed
340 any = self._pendingcallback.pop(cat)(self)
336 any = self._pendingcallback.pop(cat)(self)
341 self._anypending = self._anypending or any
337 self._anypending = self._anypending or any
342 return self._anypending
338 return self._anypending
343
339
344 @active
340 @active
345 def addfinalize(self, category, callback):
341 def addfinalize(self, category, callback):
346 """add a callback to be called when the transaction is closed
342 """add a callback to be called when the transaction is closed
347
343
348 The transaction will be given as callback's first argument.
344 The transaction will be given as callback's first argument.
349
345
350 Category is a unique identifier to allow overwriting old callbacks with
346 Category is a unique identifier to allow overwriting old callbacks with
351 newer callbacks.
347 newer callbacks.
352 """
348 """
353 self._finalizecallback[category] = callback
349 self._finalizecallback[category] = callback
354
350
355 @active
351 @active
356 def addpostclose(self, category, callback):
352 def addpostclose(self, category, callback):
357 """add a callback to be called after the transaction is closed
353 """add a callback to be called after the transaction is closed
358
354
359 The transaction will be given as callback's first argument.
355 The transaction will be given as callback's first argument.
360
356
361 Category is a unique identifier to allow overwriting an old callback
357 Category is a unique identifier to allow overwriting an old callback
362 with a newer callback.
358 with a newer callback.
363 """
359 """
364 self._postclosecallback[category] = callback
360 self._postclosecallback[category] = callback
365
361
366 @active
362 @active
367 def close(self):
363 def close(self):
368 '''commit the transaction'''
364 '''commit the transaction'''
369 if self.count == 1:
365 if self.count == 1:
370 self._generatefiles()
366 self._generatefiles()
371 categories = sorted(self._finalizecallback)
367 categories = sorted(self._finalizecallback)
372 for cat in categories:
368 for cat in categories:
373 self._finalizecallback[cat](self)
369 self._finalizecallback[cat](self)
374 if self.onclose is not None:
370 if self.onclose is not None:
375 self.onclose()
371 self.onclose()
376
372
377 self.count -= 1
373 self.count -= 1
378 if self.count != 0:
374 if self.count != 0:
379 return
375 return
380 self.file.close()
376 self.file.close()
381 self._backupsfile.close()
377 self._backupsfile.close()
382 # cleanup temporary files
378 # cleanup temporary files
383 for l, f, b, c in self._backupentries:
379 for l, f, b, c in self._backupentries:
384 if l not in self._vfsmap and c:
380 if l not in self._vfsmap and c:
385 self.report("couldn't remote %s: unknown cache location %s\n"
381 self.report("couldn't remote %s: unknown cache location %s\n"
386 % (b, l))
382 % (b, l))
387 continue
383 continue
388 vfs = self._vfsmap[l]
384 vfs = self._vfsmap[l]
389 if not f and b and vfs.exists(b):
385 if not f and b and vfs.exists(b):
390 try:
386 try:
391 vfs.unlink(b)
387 vfs.unlink(b)
392 except (IOError, OSError, util.Abort), inst:
388 except (IOError, OSError, util.Abort), inst:
393 if not c:
389 if not c:
394 raise
390 raise
395 # Abort may be raise by read only opener
391 # Abort may be raise by read only opener
396 self.report("couldn't remote %s: %s\n"
392 self.report("couldn't remote %s: %s\n"
397 % (vfs.join(b), inst))
393 % (vfs.join(b), inst))
398 self.entries = []
394 self.entries = []
399 if self.after:
395 if self.after:
400 self.after()
396 self.after()
401 if self.opener.isfile(self.journal):
397 if self.opener.isfile(self.journal):
402 self.opener.unlink(self.journal)
398 self.opener.unlink(self.journal)
403 if self.opener.isfile(self._backupjournal):
399 if self.opener.isfile(self._backupjournal):
404 self.opener.unlink(self._backupjournal)
400 self.opener.unlink(self._backupjournal)
405 for _l, _f, b, c in self._backupentries:
401 for _l, _f, b, c in self._backupentries:
406 if l not in self._vfsmap and c:
402 if l not in self._vfsmap and c:
407 self.report("couldn't remote %s: unknown cache location"
403 self.report("couldn't remote %s: unknown cache location"
408 "%s\n" % (b, l))
404 "%s\n" % (b, l))
409 continue
405 continue
410 vfs = self._vfsmap[l]
406 vfs = self._vfsmap[l]
411 if b and vfs.exists(b):
407 if b and vfs.exists(b):
412 try:
408 try:
413 vfs.unlink(b)
409 vfs.unlink(b)
414 except (IOError, OSError, util.Abort), inst:
410 except (IOError, OSError, util.Abort), inst:
415 if not c:
411 if not c:
416 raise
412 raise
417 # Abort may be raise by read only opener
413 # Abort may be raise by read only opener
418 self.report("couldn't remote %s: %s\n"
414 self.report("couldn't remote %s: %s\n"
419 % (vfs.join(b), inst))
415 % (vfs.join(b), inst))
420 self._backupentries = []
416 self._backupentries = []
421 self.journal = None
417 self.journal = None
422 # run post close action
418 # run post close action
423 categories = sorted(self._postclosecallback)
419 categories = sorted(self._postclosecallback)
424 for cat in categories:
420 for cat in categories:
425 self._postclosecallback[cat](self)
421 self._postclosecallback[cat](self)
426
422
427 @active
423 @active
428 def abort(self):
424 def abort(self):
429 '''abort the transaction (generally called on error, or when the
425 '''abort the transaction (generally called on error, or when the
430 transaction is not explicitly committed before going out of
426 transaction is not explicitly committed before going out of
431 scope)'''
427 scope)'''
432 self._abort()
428 self._abort()
433
429
434 def _abort(self):
430 def _abort(self):
435 self.count = 0
431 self.count = 0
436 self.usages = 0
432 self.usages = 0
437 self.file.close()
433 self.file.close()
438 self._backupsfile.close()
434 self._backupsfile.close()
439
435
440 if self.onabort is not None:
436 if self.onabort is not None:
441 self.onabort()
437 self.onabort()
442
438
443 try:
439 try:
444 if not self.entries and not self._backupentries:
440 if not self.entries and not self._backupentries:
445 if self.journal:
441 if self.journal:
446 self.opener.unlink(self.journal)
442 self.opener.unlink(self.journal)
447 if self._backupjournal:
443 if self._backupjournal:
448 self.opener.unlink(self._backupjournal)
444 self.opener.unlink(self._backupjournal)
449 return
445 return
450
446
451 self.report(_("transaction abort!\n"))
447 self.report(_("transaction abort!\n"))
452
448
453 try:
449 try:
454 _playback(self.journal, self.report, self.opener, self._vfsmap,
450 _playback(self.journal, self.report, self.opener, self._vfsmap,
455 self.entries, self._backupentries, False)
451 self.entries, self._backupentries, False)
456 self.report(_("rollback completed\n"))
452 self.report(_("rollback completed\n"))
457 except Exception:
453 except Exception:
458 self.report(_("rollback failed - please run hg recover\n"))
454 self.report(_("rollback failed - please run hg recover\n"))
459 finally:
455 finally:
460 self.journal = None
456 self.journal = None
461
457
462
458
463 def rollback(opener, vfsmap, file, report):
459 def rollback(opener, vfsmap, file, report):
464 """Rolls back the transaction contained in the given file
460 """Rolls back the transaction contained in the given file
465
461
466 Reads the entries in the specified file, and the corresponding
462 Reads the entries in the specified file, and the corresponding
467 '*.backupfiles' file, to recover from an incomplete transaction.
463 '*.backupfiles' file, to recover from an incomplete transaction.
468
464
469 * `file`: a file containing a list of entries, specifying where
465 * `file`: a file containing a list of entries, specifying where
470 to truncate each file. The file should contain a list of
466 to truncate each file. The file should contain a list of
471 file\0offset pairs, delimited by newlines. The corresponding
467 file\0offset pairs, delimited by newlines. The corresponding
472 '*.backupfiles' file should contain a list of file\0backupfile
468 '*.backupfiles' file should contain a list of file\0backupfile
473 pairs, delimited by \0.
469 pairs, delimited by \0.
474 """
470 """
475 entries = []
471 entries = []
476 backupentries = []
472 backupentries = []
477
473
478 fp = opener.open(file)
474 fp = opener.open(file)
479 lines = fp.readlines()
475 lines = fp.readlines()
480 fp.close()
476 fp.close()
481 for l in lines:
477 for l in lines:
482 try:
478 try:
483 f, o = l.split('\0')
479 f, o = l.split('\0')
484 entries.append((f, int(o), None))
480 entries.append((f, int(o), None))
485 except ValueError:
481 except ValueError:
486 report(_("couldn't read journal entry %r!\n") % l)
482 report(_("couldn't read journal entry %r!\n") % l)
487
483
488 backupjournal = "%s.backupfiles" % file
484 backupjournal = "%s.backupfiles" % file
489 if opener.exists(backupjournal):
485 if opener.exists(backupjournal):
490 fp = opener.open(backupjournal)
486 fp = opener.open(backupjournal)
491 lines = fp.readlines()
487 lines = fp.readlines()
492 if lines:
488 if lines:
493 ver = lines[0][:-1]
489 ver = lines[0][:-1]
494 if ver == str(version):
490 if ver == str(version):
495 for line in lines[1:]:
491 for line in lines[1:]:
496 if line:
492 if line:
497 # Shave off the trailing newline
493 # Shave off the trailing newline
498 line = line[:-1]
494 line = line[:-1]
499 l, f, b, c = line.split('\0')
495 l, f, b, c = line.split('\0')
500 backupentries.append((l, f, b, bool(c)))
496 backupentries.append((l, f, b, bool(c)))
501 else:
497 else:
502 report(_("journal was created by a different version of "
498 report(_("journal was created by a different version of "
503 "Mercurial"))
499 "Mercurial"))
504
500
505 _playback(file, report, opener, vfsmap, entries, backupentries)
501 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now