##// END OF EJS Templates
transaction: set backupentries version to proper value...
Pierre-Yves David -
r23313:99109857 default
parent child Browse files
Show More
@@ -1,500 +1,500 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 version = 0
18 version = 2
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 29 unlink=True):
30 30 for f, o, _ignore in entries:
31 31 if o or not unlink:
32 32 try:
33 33 fp = opener(f, 'a')
34 34 fp.truncate(o)
35 35 fp.close()
36 36 except IOError:
37 37 report(_("failed to truncate %s\n") % f)
38 38 raise
39 39 else:
40 40 try:
41 41 opener.unlink(f)
42 42 except (IOError, OSError), inst:
43 43 if inst.errno != errno.ENOENT:
44 44 raise
45 45
46 46 backupfiles = []
47 47 for l, f, b, c in backupentries:
48 48 if l not in vfsmap and c:
49 49 report("couldn't handle %s: unknown cache location %s\n"
50 50 % (b, l))
51 51 vfs = vfsmap[l]
52 52 try:
53 53 if f and b:
54 54 filepath = vfs.join(f)
55 55 backuppath = vfs.join(b)
56 56 try:
57 57 util.copyfile(backuppath, filepath)
58 58 backupfiles.append(b)
59 59 except IOError:
60 60 report(_("failed to recover %s\n") % f)
61 61 else:
62 62 target = f or b
63 63 try:
64 64 vfs.unlink(target)
65 65 except (IOError, OSError), inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 except (IOError, OSError, util.Abort), inst:
69 69 if not c:
70 70 raise
71 71
72 72 opener.unlink(journal)
73 73 backuppath = "%s.backupfiles" % journal
74 74 if opener.exists(backuppath):
75 75 opener.unlink(backuppath)
76 76 try:
77 77 for f in backupfiles:
78 78 if opener.exists(f):
79 79 opener.unlink(f)
80 80 except (IOError, OSError, util.Abort), inst:
81 81 # only pure backup file remains, it is sage to ignore any error
82 82 pass
83 83
84 84 class transaction(object):
85 85 def __init__(self, report, opener, vfsmap, journal, after=None,
86 86 createmode=None, onclose=None, onabort=None):
87 87 """Begin a new transaction
88 88
89 89 Begins a new transaction that allows rolling back writes in the event of
90 90 an exception.
91 91
92 92 * `after`: called after the transaction has been committed
93 93 * `createmode`: the mode of the journal file that will be created
94 94 * `onclose`: called as the transaction is closing, but before it is
95 95 closed
96 96 * `onabort`: called as the transaction is aborting, but before any files
97 97 have been truncated
98 98 """
99 99 self.count = 1
100 100 self.usages = 1
101 101 self.report = report
102 102 # a vfs to the store content
103 103 self.opener = opener
104 104 # a map to access file in various {location -> vfs}
105 105 vfsmap = vfsmap.copy()
106 106 vfsmap[''] = opener # set default value
107 107 self._vfsmap = vfsmap
108 108 self.after = after
109 109 self.onclose = onclose
110 110 self.onabort = onabort
111 111 self.entries = []
112 112 self.map = {}
113 113 self.journal = journal
114 114 self._queue = []
115 115 # a dict of arguments to be passed to hooks
116 116 self.hookargs = {}
117 117 self.file = opener.open(self.journal, "w")
118 118
119 119 # a list of ('location', 'path', 'backuppath', cache) entries.
120 120 # - if 'backuppath' is empty, no file existed at backup time
121 121 # - if 'path' is empty, this is a temporary transaction file
122 122 # - if 'location' is not empty, the path is outside main opener reach.
123 123 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 124 # (cache is currently unused)
125 125 self._backupentries = []
126 126 self._backupmap = {}
127 127 self._backupjournal = "%s.backupfiles" % journal
128 128 self._backupsfile = opener.open(self._backupjournal, 'w')
129 129 self._backupsfile.write('%d\n' % version)
130 130
131 131 if createmode is not None:
132 132 opener.chmod(self.journal, createmode & 0666)
133 133 opener.chmod(self._backupjournal, createmode & 0666)
134 134
135 135 # hold file generations to be performed on commit
136 136 self._filegenerators = {}
137 137 # hold callbalk to write pending data for hooks
138 138 self._pendingcallback = {}
139 139 # True is any pending data have been written ever
140 140 self._anypending = False
141 141 # holds callback to call when writing the transaction
142 142 self._finalizecallback = {}
143 143 # hold callbalk for post transaction close
144 144 self._postclosecallback = {}
145 145
146 146 def __del__(self):
147 147 if self.journal:
148 148 self._abort()
149 149
150 150 @active
151 151 def startgroup(self):
152 152 """delay registration of file entry
153 153
154 154 This is used by strip to delay vision of strip offset. The transaction
155 155 sees either none or all of the strip actions to be done."""
156 156 self._queue.append([])
157 157
158 158 @active
159 159 def endgroup(self):
160 160 """apply delayed registration of file entry.
161 161
162 162 This is used by strip to delay vision of strip offset. The transaction
163 163 sees either none or all of the strip actions to be done."""
164 164 q = self._queue.pop()
165 165 for f, o, data in q:
166 166 self._addentry(f, o, data)
167 167
168 168 @active
169 169 def add(self, file, offset, data=None):
170 170 """record the state of an append-only file before update"""
171 171 if file in self.map or file in self._backupmap:
172 172 return
173 173 if self._queue:
174 174 self._queue[-1].append((file, offset, data))
175 175 return
176 176
177 177 self._addentry(file, offset, data)
178 178
179 179 def _addentry(self, file, offset, data):
180 180 """add a append-only entry to memory and on-disk state"""
181 181 if file in self.map or file in self._backupmap:
182 182 return
183 183 self.entries.append((file, offset, data))
184 184 self.map[file] = len(self.entries) - 1
185 185 # add enough data to the journal to do the truncate
186 186 self.file.write("%s\0%d\n" % (file, offset))
187 187 self.file.flush()
188 188
189 189 @active
190 190 def addbackup(self, file, hardlink=True, vfs=None):
191 191 """Adds a backup of the file to the transaction
192 192
193 193 Calling addbackup() creates a hardlink backup of the specified file
194 194 that is used to recover the file in the event of the transaction
195 195 aborting.
196 196
197 197 * `file`: the file path, relative to .hg/store
198 198 * `hardlink`: use a hardlink to quickly create the backup
199 199 """
200 200 if self._queue:
201 201 msg = 'cannot use transaction.addbackup inside "group"'
202 202 raise RuntimeError(msg)
203 203
204 204 if file in self.map or file in self._backupmap:
205 205 return
206 206 backupfile = "%s.backup.%s" % (self.journal, file)
207 207 if vfs is None:
208 208 vfs = self.opener
209 209 if vfs.exists(file):
210 210 filepath = vfs.join(file)
211 211 backuppath = self.opener.join(backupfile)
212 212 util.copyfiles(filepath, backuppath, hardlink=hardlink)
213 213 else:
214 214 backupfile = ''
215 215
216 216 self._addbackupentry(('', file, backupfile, False))
217 217
218 218 def _addbackupentry(self, entry):
219 219 """register a new backup entry and write it to disk"""
220 220 self._backupentries.append(entry)
221 221 self._backupmap[file] = len(self._backupentries) - 1
222 222 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
223 223 self._backupsfile.flush()
224 224
225 225 @active
226 226 def registertmp(self, tmpfile):
227 227 """register a temporary transaction file
228 228
229 229 Such file will be delete when the transaction exit (on both failure and
230 230 success).
231 231 """
232 232 self._addbackupentry(('', '', tmpfile, False))
233 233
234 234 @active
235 235 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
236 236 """add a function to generates some files at transaction commit
237 237
238 238 The `genfunc` argument is a function capable of generating proper
239 239 content of each entry in the `filename` tuple.
240 240
241 241 At transaction close time, `genfunc` will be called with one file
242 242 object argument per entries in `filenames`.
243 243
244 244 The transaction itself is responsible for the backup, creation and
245 245 final write of such file.
246 246
247 247 The `genid` argument is used to ensure the same set of file is only
248 248 generated once. Call to `addfilegenerator` for a `genid` already
249 249 present will overwrite the old entry.
250 250
251 251 The `order` argument may be used to control the order in which multiple
252 252 generator will be executed.
253 253 """
254 254 # For now, we are unable to do proper backup and restore of custom vfs
255 255 # but for bookmarks that are handled outside this mechanism.
256 256 assert vfs is None or filenames == ('bookmarks',)
257 257 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
258 258
259 259 def _generatefiles(self):
260 260 # write files registered for generation
261 261 for entry in sorted(self._filegenerators.values()):
262 262 order, filenames, genfunc, vfs = entry
263 263 if vfs is None:
264 264 vfs = self.opener
265 265 files = []
266 266 try:
267 267 for name in filenames:
268 268 # Some files are already backed up when creating the
269 269 # localrepo. Until this is properly fixed we disable the
270 270 # backup for them.
271 271 if name not in ('phaseroots', 'bookmarks'):
272 272 self.addbackup(name)
273 273 files.append(vfs(name, 'w', atomictemp=True))
274 274 genfunc(*files)
275 275 finally:
276 276 for f in files:
277 277 f.close()
278 278
279 279 @active
280 280 def find(self, file):
281 281 if file in self.map:
282 282 return self.entries[self.map[file]]
283 283 if file in self._backupmap:
284 284 return self._backupentries[self._backupmap[file]]
285 285 return None
286 286
287 287 @active
288 288 def replace(self, file, offset, data=None):
289 289 '''
290 290 replace can only replace already committed entries
291 291 that are not pending in the queue
292 292 '''
293 293
294 294 if file not in self.map:
295 295 raise KeyError(file)
296 296 index = self.map[file]
297 297 self.entries[index] = (file, offset, data)
298 298 self.file.write("%s\0%d\n" % (file, offset))
299 299 self.file.flush()
300 300
301 301 @active
302 302 def nest(self):
303 303 self.count += 1
304 304 self.usages += 1
305 305 return self
306 306
307 307 def release(self):
308 308 if self.count > 0:
309 309 self.usages -= 1
310 310 # if the transaction scopes are left without being closed, fail
311 311 if self.count > 0 and self.usages == 0:
312 312 self._abort()
313 313
314 314 def running(self):
315 315 return self.count > 0
316 316
317 317 def addpending(self, category, callback):
318 318 """add a callback to be called when the transaction is pending
319 319
320 320 The transaction will be given as callback's first argument.
321 321
322 322 Category is a unique identifier to allow overwriting an old callback
323 323 with a newer callback.
324 324 """
325 325 self._pendingcallback[category] = callback
326 326
327 327 @active
328 328 def writepending(self):
329 329 '''write pending file to temporary version
330 330
331 331 This is used to allow hooks to view a transaction before commit'''
332 332 categories = sorted(self._pendingcallback)
333 333 for cat in categories:
334 334 # remove callback since the data will have been flushed
335 335 any = self._pendingcallback.pop(cat)(self)
336 336 self._anypending = self._anypending or any
337 337 return self._anypending
338 338
339 339 @active
340 340 def addfinalize(self, category, callback):
341 341 """add a callback to be called when the transaction is closed
342 342
343 343 The transaction will be given as callback's first argument.
344 344
345 345 Category is a unique identifier to allow overwriting old callbacks with
346 346 newer callbacks.
347 347 """
348 348 self._finalizecallback[category] = callback
349 349
350 350 @active
351 351 def addpostclose(self, category, callback):
352 352 """add a callback to be called after the transaction is closed
353 353
354 354 The transaction will be given as callback's first argument.
355 355
356 356 Category is a unique identifier to allow overwriting an old callback
357 357 with a newer callback.
358 358 """
359 359 self._postclosecallback[category] = callback
360 360
361 361 @active
362 362 def close(self):
363 363 '''commit the transaction'''
364 364 if self.count == 1:
365 365 self._generatefiles()
366 366 categories = sorted(self._finalizecallback)
367 367 for cat in categories:
368 368 self._finalizecallback[cat](self)
369 369 if self.onclose is not None:
370 370 self.onclose()
371 371
372 372 self.count -= 1
373 373 if self.count != 0:
374 374 return
375 375 self.file.close()
376 376 self._backupsfile.close()
377 377 # cleanup temporary files
378 378 for l, f, b, c in self._backupentries:
379 379 if l not in self._vfsmap and c:
380 380 self.report("couldn't remote %s: unknown cache location %s\n"
381 381 % (b, l))
382 382 continue
383 383 vfs = self._vfsmap[l]
384 384 if not f and b and vfs.exists(b):
385 385 try:
386 386 vfs.unlink(b)
387 387 except (IOError, OSError, util.Abort), inst:
388 388 if not c:
389 389 raise
390 390 # Abort may be raise by read only opener
391 391 self.report("couldn't remote %s: %s\n"
392 392 % (vfs.join(b), inst))
393 393 self.entries = []
394 394 if self.after:
395 395 self.after()
396 396 if self.opener.isfile(self.journal):
397 397 self.opener.unlink(self.journal)
398 398 if self.opener.isfile(self._backupjournal):
399 399 self.opener.unlink(self._backupjournal)
400 400 for _l, _f, b, c in self._backupentries:
401 401 if l not in self._vfsmap and c:
402 402 self.report("couldn't remote %s: unknown cache location"
403 403 "%s\n" % (b, l))
404 404 continue
405 405 vfs = self._vfsmap[l]
406 406 if b and vfs.exists(b):
407 407 try:
408 408 vfs.unlink(b)
409 409 except (IOError, OSError, util.Abort), inst:
410 410 if not c:
411 411 raise
412 412 # Abort may be raise by read only opener
413 413 self.report("couldn't remote %s: %s\n"
414 414 % (vfs.join(b), inst))
415 415 self._backupentries = []
416 416 self.journal = None
417 417 # run post close action
418 418 categories = sorted(self._postclosecallback)
419 419 for cat in categories:
420 420 self._postclosecallback[cat](self)
421 421
422 422 @active
423 423 def abort(self):
424 424 '''abort the transaction (generally called on error, or when the
425 425 transaction is not explicitly committed before going out of
426 426 scope)'''
427 427 self._abort()
428 428
429 429 def _abort(self):
430 430 self.count = 0
431 431 self.usages = 0
432 432 self.file.close()
433 433 self._backupsfile.close()
434 434
435 435 if self.onabort is not None:
436 436 self.onabort()
437 437
438 438 try:
439 439 if not self.entries and not self._backupentries:
440 440 if self.journal:
441 441 self.opener.unlink(self.journal)
442 442 if self._backupjournal:
443 443 self.opener.unlink(self._backupjournal)
444 444 return
445 445
446 446 self.report(_("transaction abort!\n"))
447 447
448 448 try:
449 449 _playback(self.journal, self.report, self.opener, self._vfsmap,
450 450 self.entries, self._backupentries, False)
451 451 self.report(_("rollback completed\n"))
452 452 except Exception:
453 453 self.report(_("rollback failed - please run hg recover\n"))
454 454 finally:
455 455 self.journal = None
456 456
457 457
458 458 def rollback(opener, vfsmap, file, report):
459 459 """Rolls back the transaction contained in the given file
460 460
461 461 Reads the entries in the specified file, and the corresponding
462 462 '*.backupfiles' file, to recover from an incomplete transaction.
463 463
464 464 * `file`: a file containing a list of entries, specifying where
465 465 to truncate each file. The file should contain a list of
466 466 file\0offset pairs, delimited by newlines. The corresponding
467 467 '*.backupfiles' file should contain a list of file\0backupfile
468 468 pairs, delimited by \0.
469 469 """
470 470 entries = []
471 471 backupentries = []
472 472
473 473 fp = opener.open(file)
474 474 lines = fp.readlines()
475 475 fp.close()
476 476 for l in lines:
477 477 try:
478 478 f, o = l.split('\0')
479 479 entries.append((f, int(o), None))
480 480 except ValueError:
481 481 report(_("couldn't read journal entry %r!\n") % l)
482 482
483 483 backupjournal = "%s.backupfiles" % file
484 484 if opener.exists(backupjournal):
485 485 fp = opener.open(backupjournal)
486 486 lines = fp.readlines()
487 487 if lines:
488 488 ver = lines[0][:-1]
489 489 if ver == str(version):
490 490 for line in lines[1:]:
491 491 if line:
492 492 # Shave off the trailing newline
493 493 line = line[:-1]
494 494 l, f, b, c = line.split('\0')
495 495 backupentries.append((l, f, b, bool(c)))
496 496 else:
497 497 report(_("journal was created by a different version of "
498 498 "Mercurial"))
499 499
500 500 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now