##// END OF EJS Templates
addbackup: handle file in subdirectory...
Pierre-Yves David -
r23315:66275ecc default
parent child Browse files
Show More
@@ -1,500 +1,504 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import errno
16 import errno
16 import error, util
17 import error, util
17
18
18 version = 2
19 version = 2
19
20
20 def active(func):
21 def active(func):
21 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
22 if self.count == 0:
23 if self.count == 0:
23 raise error.Abort(_(
24 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
26 return _active
27 return _active
27
28
28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 unlink=True):
30 unlink=True):
30 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
31 if o or not unlink:
32 if o or not unlink:
32 try:
33 try:
33 fp = opener(f, 'a')
34 fp = opener(f, 'a')
34 fp.truncate(o)
35 fp.truncate(o)
35 fp.close()
36 fp.close()
36 except IOError:
37 except IOError:
37 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
38 raise
39 raise
39 else:
40 else:
40 try:
41 try:
41 opener.unlink(f)
42 opener.unlink(f)
42 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
43 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
44 raise
45 raise
45
46
46 backupfiles = []
47 backupfiles = []
47 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
48 if l not in vfsmap and c:
49 if l not in vfsmap and c:
49 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
50 % (b, l))
51 % (b, l))
51 vfs = vfsmap[l]
52 vfs = vfsmap[l]
52 try:
53 try:
53 if f and b:
54 if f and b:
54 filepath = vfs.join(f)
55 filepath = vfs.join(f)
55 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
56 try:
57 try:
57 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
58 backupfiles.append(b)
59 backupfiles.append(b)
59 except IOError:
60 except IOError:
60 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
61 else:
62 else:
62 target = f or b
63 target = f or b
63 try:
64 try:
64 vfs.unlink(target)
65 vfs.unlink(target)
65 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
66 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
67 raise
68 raise
68 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
69 if not c:
70 if not c:
70 raise
71 raise
71
72
72 opener.unlink(journal)
73 opener.unlink(journal)
73 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
74 if opener.exists(backuppath):
75 if opener.exists(backuppath):
75 opener.unlink(backuppath)
76 opener.unlink(backuppath)
76 try:
77 try:
77 for f in backupfiles:
78 for f in backupfiles:
78 if opener.exists(f):
79 if opener.exists(f):
79 opener.unlink(f)
80 opener.unlink(f)
80 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
81 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
82 pass
83 pass
83
84
84 class transaction(object):
85 class transaction(object):
85 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 createmode=None, onclose=None, onabort=None):
87 createmode=None, onclose=None, onabort=None):
87 """Begin a new transaction
88 """Begin a new transaction
88
89
89 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
90 an exception.
91 an exception.
91
92
92 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
93 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
94 * `onclose`: called as the transaction is closing, but before it is
95 * `onclose`: called as the transaction is closing, but before it is
95 closed
96 closed
96 * `onabort`: called as the transaction is aborting, but before any files
97 * `onabort`: called as the transaction is aborting, but before any files
97 have been truncated
98 have been truncated
98 """
99 """
99 self.count = 1
100 self.count = 1
100 self.usages = 1
101 self.usages = 1
101 self.report = report
102 self.report = report
102 # a vfs to the store content
103 # a vfs to the store content
103 self.opener = opener
104 self.opener = opener
104 # a map to access file in various {location -> vfs}
105 # a map to access file in various {location -> vfs}
105 vfsmap = vfsmap.copy()
106 vfsmap = vfsmap.copy()
106 vfsmap[''] = opener # set default value
107 vfsmap[''] = opener # set default value
107 self._vfsmap = vfsmap
108 self._vfsmap = vfsmap
108 self.after = after
109 self.after = after
109 self.onclose = onclose
110 self.onclose = onclose
110 self.onabort = onabort
111 self.onabort = onabort
111 self.entries = []
112 self.entries = []
112 self.map = {}
113 self.map = {}
113 self.journal = journal
114 self.journal = journal
114 self._queue = []
115 self._queue = []
115 # a dict of arguments to be passed to hooks
116 # a dict of arguments to be passed to hooks
116 self.hookargs = {}
117 self.hookargs = {}
117 self.file = opener.open(self.journal, "w")
118 self.file = opener.open(self.journal, "w")
118
119
119 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'location' is not empty, the path is outside main opener reach.
123 # - if 'location' is not empty, the path is outside main opener reach.
123 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # (cache is currently unused)
125 # (cache is currently unused)
125 self._backupentries = []
126 self._backupentries = []
126 self._backupmap = {}
127 self._backupmap = {}
127 self._backupjournal = "%s.backupfiles" % journal
128 self._backupjournal = "%s.backupfiles" % journal
128 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile.write('%d\n' % version)
130 self._backupsfile.write('%d\n' % version)
130
131
131 if createmode is not None:
132 if createmode is not None:
132 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self._backupjournal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
134
135
135 # hold file generations to be performed on commit
136 # hold file generations to be performed on commit
136 self._filegenerators = {}
137 self._filegenerators = {}
137 # hold callbalk to write pending data for hooks
138 # hold callbalk to write pending data for hooks
138 self._pendingcallback = {}
139 self._pendingcallback = {}
139 # True is any pending data have been written ever
140 # True is any pending data have been written ever
140 self._anypending = False
141 self._anypending = False
141 # holds callback to call when writing the transaction
142 # holds callback to call when writing the transaction
142 self._finalizecallback = {}
143 self._finalizecallback = {}
143 # hold callbalk for post transaction close
144 # hold callbalk for post transaction close
144 self._postclosecallback = {}
145 self._postclosecallback = {}
145
146
146 def __del__(self):
147 def __del__(self):
147 if self.journal:
148 if self.journal:
148 self._abort()
149 self._abort()
149
150
150 @active
151 @active
151 def startgroup(self):
152 def startgroup(self):
152 """delay registration of file entry
153 """delay registration of file entry
153
154
154 This is used by strip to delay vision of strip offset. The transaction
155 This is used by strip to delay vision of strip offset. The transaction
155 sees either none or all of the strip actions to be done."""
156 sees either none or all of the strip actions to be done."""
156 self._queue.append([])
157 self._queue.append([])
157
158
158 @active
159 @active
159 def endgroup(self):
160 def endgroup(self):
160 """apply delayed registration of file entry.
161 """apply delayed registration of file entry.
161
162
162 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
163 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
164 q = self._queue.pop()
165 q = self._queue.pop()
165 for f, o, data in q:
166 for f, o, data in q:
166 self._addentry(f, o, data)
167 self._addentry(f, o, data)
167
168
168 @active
169 @active
169 def add(self, file, offset, data=None):
170 def add(self, file, offset, data=None):
170 """record the state of an append-only file before update"""
171 """record the state of an append-only file before update"""
171 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
172 return
173 return
173 if self._queue:
174 if self._queue:
174 self._queue[-1].append((file, offset, data))
175 self._queue[-1].append((file, offset, data))
175 return
176 return
176
177
177 self._addentry(file, offset, data)
178 self._addentry(file, offset, data)
178
179
179 def _addentry(self, file, offset, data):
180 def _addentry(self, file, offset, data):
180 """add a append-only entry to memory and on-disk state"""
181 """add a append-only entry to memory and on-disk state"""
181 if file in self.map or file in self._backupmap:
182 if file in self.map or file in self._backupmap:
182 return
183 return
183 self.entries.append((file, offset, data))
184 self.entries.append((file, offset, data))
184 self.map[file] = len(self.entries) - 1
185 self.map[file] = len(self.entries) - 1
185 # add enough data to the journal to do the truncate
186 # add enough data to the journal to do the truncate
186 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.flush()
188 self.file.flush()
188
189
189 @active
190 @active
190 def addbackup(self, file, hardlink=True, vfs=None):
191 def addbackup(self, file, hardlink=True, vfs=None):
191 """Adds a backup of the file to the transaction
192 """Adds a backup of the file to the transaction
192
193
193 Calling addbackup() creates a hardlink backup of the specified file
194 Calling addbackup() creates a hardlink backup of the specified file
194 that is used to recover the file in the event of the transaction
195 that is used to recover the file in the event of the transaction
195 aborting.
196 aborting.
196
197
197 * `file`: the file path, relative to .hg/store
198 * `file`: the file path, relative to .hg/store
198 * `hardlink`: use a hardlink to quickly create the backup
199 * `hardlink`: use a hardlink to quickly create the backup
199 """
200 """
200 if self._queue:
201 if self._queue:
201 msg = 'cannot use transaction.addbackup inside "group"'
202 msg = 'cannot use transaction.addbackup inside "group"'
202 raise RuntimeError(msg)
203 raise RuntimeError(msg)
203
204
204 if file in self.map or file in self._backupmap:
205 if file in self.map or file in self._backupmap:
205 return
206 return
206 backupfile = "%s.backup.%s" % (self.journal, file)
207 dirname, filename = os.path.split(file)
208
209 backupfilename = "%s.backup.%s" % (self.journal, filename)
210 backupfile = os.path.join(dirname, backupfilename)
207 if vfs is None:
211 if vfs is None:
208 vfs = self.opener
212 vfs = self.opener
209 if vfs.exists(file):
213 if vfs.exists(file):
210 filepath = vfs.join(file)
214 filepath = vfs.join(file)
211 backuppath = vfs.join(backupfile)
215 backuppath = vfs.join(backupfile)
212 util.copyfiles(filepath, backuppath, hardlink=hardlink)
216 util.copyfiles(filepath, backuppath, hardlink=hardlink)
213 else:
217 else:
214 backupfile = ''
218 backupfile = ''
215
219
216 self._addbackupentry(('', file, backupfile, False))
220 self._addbackupentry(('', file, backupfile, False))
217
221
218 def _addbackupentry(self, entry):
222 def _addbackupentry(self, entry):
219 """register a new backup entry and write it to disk"""
223 """register a new backup entry and write it to disk"""
220 self._backupentries.append(entry)
224 self._backupentries.append(entry)
221 self._backupmap[file] = len(self._backupentries) - 1
225 self._backupmap[file] = len(self._backupentries) - 1
222 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
226 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
223 self._backupsfile.flush()
227 self._backupsfile.flush()
224
228
225 @active
229 @active
226 def registertmp(self, tmpfile):
230 def registertmp(self, tmpfile):
227 """register a temporary transaction file
231 """register a temporary transaction file
228
232
229 Such file will be delete when the transaction exit (on both failure and
233 Such file will be delete when the transaction exit (on both failure and
230 success).
234 success).
231 """
235 """
232 self._addbackupentry(('', '', tmpfile, False))
236 self._addbackupentry(('', '', tmpfile, False))
233
237
234 @active
238 @active
235 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
239 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
236 """add a function to generates some files at transaction commit
240 """add a function to generates some files at transaction commit
237
241
238 The `genfunc` argument is a function capable of generating proper
242 The `genfunc` argument is a function capable of generating proper
239 content of each entry in the `filename` tuple.
243 content of each entry in the `filename` tuple.
240
244
241 At transaction close time, `genfunc` will be called with one file
245 At transaction close time, `genfunc` will be called with one file
242 object argument per entries in `filenames`.
246 object argument per entries in `filenames`.
243
247
244 The transaction itself is responsible for the backup, creation and
248 The transaction itself is responsible for the backup, creation and
245 final write of such file.
249 final write of such file.
246
250
247 The `genid` argument is used to ensure the same set of file is only
251 The `genid` argument is used to ensure the same set of file is only
248 generated once. Call to `addfilegenerator` for a `genid` already
252 generated once. Call to `addfilegenerator` for a `genid` already
249 present will overwrite the old entry.
253 present will overwrite the old entry.
250
254
251 The `order` argument may be used to control the order in which multiple
255 The `order` argument may be used to control the order in which multiple
252 generator will be executed.
256 generator will be executed.
253 """
257 """
254 # For now, we are unable to do proper backup and restore of custom vfs
258 # For now, we are unable to do proper backup and restore of custom vfs
255 # but for bookmarks that are handled outside this mechanism.
259 # but for bookmarks that are handled outside this mechanism.
256 assert vfs is None or filenames == ('bookmarks',)
260 assert vfs is None or filenames == ('bookmarks',)
257 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
261 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
258
262
259 def _generatefiles(self):
263 def _generatefiles(self):
260 # write files registered for generation
264 # write files registered for generation
261 for entry in sorted(self._filegenerators.values()):
265 for entry in sorted(self._filegenerators.values()):
262 order, filenames, genfunc, vfs = entry
266 order, filenames, genfunc, vfs = entry
263 if vfs is None:
267 if vfs is None:
264 vfs = self.opener
268 vfs = self.opener
265 files = []
269 files = []
266 try:
270 try:
267 for name in filenames:
271 for name in filenames:
268 # Some files are already backed up when creating the
272 # Some files are already backed up when creating the
269 # localrepo. Until this is properly fixed we disable the
273 # localrepo. Until this is properly fixed we disable the
270 # backup for them.
274 # backup for them.
271 if name not in ('phaseroots', 'bookmarks'):
275 if name not in ('phaseroots', 'bookmarks'):
272 self.addbackup(name)
276 self.addbackup(name)
273 files.append(vfs(name, 'w', atomictemp=True))
277 files.append(vfs(name, 'w', atomictemp=True))
274 genfunc(*files)
278 genfunc(*files)
275 finally:
279 finally:
276 for f in files:
280 for f in files:
277 f.close()
281 f.close()
278
282
279 @active
283 @active
280 def find(self, file):
284 def find(self, file):
281 if file in self.map:
285 if file in self.map:
282 return self.entries[self.map[file]]
286 return self.entries[self.map[file]]
283 if file in self._backupmap:
287 if file in self._backupmap:
284 return self._backupentries[self._backupmap[file]]
288 return self._backupentries[self._backupmap[file]]
285 return None
289 return None
286
290
287 @active
291 @active
288 def replace(self, file, offset, data=None):
292 def replace(self, file, offset, data=None):
289 '''
293 '''
290 replace can only replace already committed entries
294 replace can only replace already committed entries
291 that are not pending in the queue
295 that are not pending in the queue
292 '''
296 '''
293
297
294 if file not in self.map:
298 if file not in self.map:
295 raise KeyError(file)
299 raise KeyError(file)
296 index = self.map[file]
300 index = self.map[file]
297 self.entries[index] = (file, offset, data)
301 self.entries[index] = (file, offset, data)
298 self.file.write("%s\0%d\n" % (file, offset))
302 self.file.write("%s\0%d\n" % (file, offset))
299 self.file.flush()
303 self.file.flush()
300
304
301 @active
305 @active
302 def nest(self):
306 def nest(self):
303 self.count += 1
307 self.count += 1
304 self.usages += 1
308 self.usages += 1
305 return self
309 return self
306
310
307 def release(self):
311 def release(self):
308 if self.count > 0:
312 if self.count > 0:
309 self.usages -= 1
313 self.usages -= 1
310 # if the transaction scopes are left without being closed, fail
314 # if the transaction scopes are left without being closed, fail
311 if self.count > 0 and self.usages == 0:
315 if self.count > 0 and self.usages == 0:
312 self._abort()
316 self._abort()
313
317
314 def running(self):
318 def running(self):
315 return self.count > 0
319 return self.count > 0
316
320
317 def addpending(self, category, callback):
321 def addpending(self, category, callback):
318 """add a callback to be called when the transaction is pending
322 """add a callback to be called when the transaction is pending
319
323
320 The transaction will be given as callback's first argument.
324 The transaction will be given as callback's first argument.
321
325
322 Category is a unique identifier to allow overwriting an old callback
326 Category is a unique identifier to allow overwriting an old callback
323 with a newer callback.
327 with a newer callback.
324 """
328 """
325 self._pendingcallback[category] = callback
329 self._pendingcallback[category] = callback
326
330
327 @active
331 @active
328 def writepending(self):
332 def writepending(self):
329 '''write pending file to temporary version
333 '''write pending file to temporary version
330
334
331 This is used to allow hooks to view a transaction before commit'''
335 This is used to allow hooks to view a transaction before commit'''
332 categories = sorted(self._pendingcallback)
336 categories = sorted(self._pendingcallback)
333 for cat in categories:
337 for cat in categories:
334 # remove callback since the data will have been flushed
338 # remove callback since the data will have been flushed
335 any = self._pendingcallback.pop(cat)(self)
339 any = self._pendingcallback.pop(cat)(self)
336 self._anypending = self._anypending or any
340 self._anypending = self._anypending or any
337 return self._anypending
341 return self._anypending
338
342
339 @active
343 @active
340 def addfinalize(self, category, callback):
344 def addfinalize(self, category, callback):
341 """add a callback to be called when the transaction is closed
345 """add a callback to be called when the transaction is closed
342
346
343 The transaction will be given as callback's first argument.
347 The transaction will be given as callback's first argument.
344
348
345 Category is a unique identifier to allow overwriting old callbacks with
349 Category is a unique identifier to allow overwriting old callbacks with
346 newer callbacks.
350 newer callbacks.
347 """
351 """
348 self._finalizecallback[category] = callback
352 self._finalizecallback[category] = callback
349
353
350 @active
354 @active
351 def addpostclose(self, category, callback):
355 def addpostclose(self, category, callback):
352 """add a callback to be called after the transaction is closed
356 """add a callback to be called after the transaction is closed
353
357
354 The transaction will be given as callback's first argument.
358 The transaction will be given as callback's first argument.
355
359
356 Category is a unique identifier to allow overwriting an old callback
360 Category is a unique identifier to allow overwriting an old callback
357 with a newer callback.
361 with a newer callback.
358 """
362 """
359 self._postclosecallback[category] = callback
363 self._postclosecallback[category] = callback
360
364
361 @active
365 @active
362 def close(self):
366 def close(self):
363 '''commit the transaction'''
367 '''commit the transaction'''
364 if self.count == 1:
368 if self.count == 1:
365 self._generatefiles()
369 self._generatefiles()
366 categories = sorted(self._finalizecallback)
370 categories = sorted(self._finalizecallback)
367 for cat in categories:
371 for cat in categories:
368 self._finalizecallback[cat](self)
372 self._finalizecallback[cat](self)
369 if self.onclose is not None:
373 if self.onclose is not None:
370 self.onclose()
374 self.onclose()
371
375
372 self.count -= 1
376 self.count -= 1
373 if self.count != 0:
377 if self.count != 0:
374 return
378 return
375 self.file.close()
379 self.file.close()
376 self._backupsfile.close()
380 self._backupsfile.close()
377 # cleanup temporary files
381 # cleanup temporary files
378 for l, f, b, c in self._backupentries:
382 for l, f, b, c in self._backupentries:
379 if l not in self._vfsmap and c:
383 if l not in self._vfsmap and c:
380 self.report("couldn't remote %s: unknown cache location %s\n"
384 self.report("couldn't remote %s: unknown cache location %s\n"
381 % (b, l))
385 % (b, l))
382 continue
386 continue
383 vfs = self._vfsmap[l]
387 vfs = self._vfsmap[l]
384 if not f and b and vfs.exists(b):
388 if not f and b and vfs.exists(b):
385 try:
389 try:
386 vfs.unlink(b)
390 vfs.unlink(b)
387 except (IOError, OSError, util.Abort), inst:
391 except (IOError, OSError, util.Abort), inst:
388 if not c:
392 if not c:
389 raise
393 raise
390 # Abort may be raise by read only opener
394 # Abort may be raise by read only opener
391 self.report("couldn't remote %s: %s\n"
395 self.report("couldn't remote %s: %s\n"
392 % (vfs.join(b), inst))
396 % (vfs.join(b), inst))
393 self.entries = []
397 self.entries = []
394 if self.after:
398 if self.after:
395 self.after()
399 self.after()
396 if self.opener.isfile(self.journal):
400 if self.opener.isfile(self.journal):
397 self.opener.unlink(self.journal)
401 self.opener.unlink(self.journal)
398 if self.opener.isfile(self._backupjournal):
402 if self.opener.isfile(self._backupjournal):
399 self.opener.unlink(self._backupjournal)
403 self.opener.unlink(self._backupjournal)
400 for _l, _f, b, c in self._backupentries:
404 for _l, _f, b, c in self._backupentries:
401 if l not in self._vfsmap and c:
405 if l not in self._vfsmap and c:
402 self.report("couldn't remote %s: unknown cache location"
406 self.report("couldn't remote %s: unknown cache location"
403 "%s\n" % (b, l))
407 "%s\n" % (b, l))
404 continue
408 continue
405 vfs = self._vfsmap[l]
409 vfs = self._vfsmap[l]
406 if b and vfs.exists(b):
410 if b and vfs.exists(b):
407 try:
411 try:
408 vfs.unlink(b)
412 vfs.unlink(b)
409 except (IOError, OSError, util.Abort), inst:
413 except (IOError, OSError, util.Abort), inst:
410 if not c:
414 if not c:
411 raise
415 raise
412 # Abort may be raise by read only opener
416 # Abort may be raise by read only opener
413 self.report("couldn't remote %s: %s\n"
417 self.report("couldn't remote %s: %s\n"
414 % (vfs.join(b), inst))
418 % (vfs.join(b), inst))
415 self._backupentries = []
419 self._backupentries = []
416 self.journal = None
420 self.journal = None
417 # run post close action
421 # run post close action
418 categories = sorted(self._postclosecallback)
422 categories = sorted(self._postclosecallback)
419 for cat in categories:
423 for cat in categories:
420 self._postclosecallback[cat](self)
424 self._postclosecallback[cat](self)
421
425
422 @active
426 @active
423 def abort(self):
427 def abort(self):
424 '''abort the transaction (generally called on error, or when the
428 '''abort the transaction (generally called on error, or when the
425 transaction is not explicitly committed before going out of
429 transaction is not explicitly committed before going out of
426 scope)'''
430 scope)'''
427 self._abort()
431 self._abort()
428
432
429 def _abort(self):
433 def _abort(self):
430 self.count = 0
434 self.count = 0
431 self.usages = 0
435 self.usages = 0
432 self.file.close()
436 self.file.close()
433 self._backupsfile.close()
437 self._backupsfile.close()
434
438
435 if self.onabort is not None:
439 if self.onabort is not None:
436 self.onabort()
440 self.onabort()
437
441
438 try:
442 try:
439 if not self.entries and not self._backupentries:
443 if not self.entries and not self._backupentries:
440 if self.journal:
444 if self.journal:
441 self.opener.unlink(self.journal)
445 self.opener.unlink(self.journal)
442 if self._backupjournal:
446 if self._backupjournal:
443 self.opener.unlink(self._backupjournal)
447 self.opener.unlink(self._backupjournal)
444 return
448 return
445
449
446 self.report(_("transaction abort!\n"))
450 self.report(_("transaction abort!\n"))
447
451
448 try:
452 try:
449 _playback(self.journal, self.report, self.opener, self._vfsmap,
453 _playback(self.journal, self.report, self.opener, self._vfsmap,
450 self.entries, self._backupentries, False)
454 self.entries, self._backupentries, False)
451 self.report(_("rollback completed\n"))
455 self.report(_("rollback completed\n"))
452 except Exception:
456 except Exception:
453 self.report(_("rollback failed - please run hg recover\n"))
457 self.report(_("rollback failed - please run hg recover\n"))
454 finally:
458 finally:
455 self.journal = None
459 self.journal = None
456
460
457
461
458 def rollback(opener, vfsmap, file, report):
462 def rollback(opener, vfsmap, file, report):
459 """Rolls back the transaction contained in the given file
463 """Rolls back the transaction contained in the given file
460
464
461 Reads the entries in the specified file, and the corresponding
465 Reads the entries in the specified file, and the corresponding
462 '*.backupfiles' file, to recover from an incomplete transaction.
466 '*.backupfiles' file, to recover from an incomplete transaction.
463
467
464 * `file`: a file containing a list of entries, specifying where
468 * `file`: a file containing a list of entries, specifying where
465 to truncate each file. The file should contain a list of
469 to truncate each file. The file should contain a list of
466 file\0offset pairs, delimited by newlines. The corresponding
470 file\0offset pairs, delimited by newlines. The corresponding
467 '*.backupfiles' file should contain a list of file\0backupfile
471 '*.backupfiles' file should contain a list of file\0backupfile
468 pairs, delimited by \0.
472 pairs, delimited by \0.
469 """
473 """
470 entries = []
474 entries = []
471 backupentries = []
475 backupentries = []
472
476
473 fp = opener.open(file)
477 fp = opener.open(file)
474 lines = fp.readlines()
478 lines = fp.readlines()
475 fp.close()
479 fp.close()
476 for l in lines:
480 for l in lines:
477 try:
481 try:
478 f, o = l.split('\0')
482 f, o = l.split('\0')
479 entries.append((f, int(o), None))
483 entries.append((f, int(o), None))
480 except ValueError:
484 except ValueError:
481 report(_("couldn't read journal entry %r!\n") % l)
485 report(_("couldn't read journal entry %r!\n") % l)
482
486
483 backupjournal = "%s.backupfiles" % file
487 backupjournal = "%s.backupfiles" % file
484 if opener.exists(backupjournal):
488 if opener.exists(backupjournal):
485 fp = opener.open(backupjournal)
489 fp = opener.open(backupjournal)
486 lines = fp.readlines()
490 lines = fp.readlines()
487 if lines:
491 if lines:
488 ver = lines[0][:-1]
492 ver = lines[0][:-1]
489 if ver == str(version):
493 if ver == str(version):
490 for line in lines[1:]:
494 for line in lines[1:]:
491 if line:
495 if line:
492 # Shave off the trailing newline
496 # Shave off the trailing newline
493 line = line[:-1]
497 line = line[:-1]
494 l, f, b, c = line.split('\0')
498 l, f, b, c = line.split('\0')
495 backupentries.append((l, f, b, bool(c)))
499 backupentries.append((l, f, b, bool(c)))
496 else:
500 else:
497 report(_("journal was created by a different version of "
501 report(_("journal was created by a different version of "
498 "Mercurial"))
502 "Mercurial"))
499
503
500 _playback(file, report, opener, vfsmap, entries, backupentries)
504 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now