##// END OF EJS Templates
transaction: drop special handling for phases and bookmarks generation...
Pierre-Yves David -
r23318:fc73293f default
parent child Browse files
Show More
@@ -1,505 +1,501 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import os
16 16 import errno
17 17 import error, util
18 18
19 19 version = 2
20 20
21 21 def active(func):
22 22 def _active(self, *args, **kwds):
23 23 if self.count == 0:
24 24 raise error.Abort(_(
25 25 'cannot use transaction when it is already committed/aborted'))
26 26 return func(self, *args, **kwds)
27 27 return _active
28 28
29 29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 30 unlink=True):
31 31 for f, o, _ignore in entries:
32 32 if o or not unlink:
33 33 try:
34 34 fp = opener(f, 'a')
35 35 fp.truncate(o)
36 36 fp.close()
37 37 except IOError:
38 38 report(_("failed to truncate %s\n") % f)
39 39 raise
40 40 else:
41 41 try:
42 42 opener.unlink(f)
43 43 except (IOError, OSError), inst:
44 44 if inst.errno != errno.ENOENT:
45 45 raise
46 46
47 47 backupfiles = []
48 48 for l, f, b, c in backupentries:
49 49 if l not in vfsmap and c:
50 50 report("couldn't handle %s: unknown cache location %s\n"
51 51 % (b, l))
52 52 vfs = vfsmap[l]
53 53 try:
54 54 if f and b:
55 55 filepath = vfs.join(f)
56 56 backuppath = vfs.join(b)
57 57 try:
58 58 util.copyfile(backuppath, filepath)
59 59 backupfiles.append(b)
60 60 except IOError:
61 61 report(_("failed to recover %s\n") % f)
62 62 else:
63 63 target = f or b
64 64 try:
65 65 vfs.unlink(target)
66 66 except (IOError, OSError), inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 except (IOError, OSError, util.Abort), inst:
70 70 if not c:
71 71 raise
72 72
73 73 opener.unlink(journal)
74 74 backuppath = "%s.backupfiles" % journal
75 75 if opener.exists(backuppath):
76 76 opener.unlink(backuppath)
77 77 try:
78 78 for f in backupfiles:
79 79 if opener.exists(f):
80 80 opener.unlink(f)
81 81 except (IOError, OSError, util.Abort), inst:
82 82 # only pure backup file remains, it is sage to ignore any error
83 83 pass
84 84
85 85 class transaction(object):
86 86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 87 createmode=None, onclose=None, onabort=None):
88 88 """Begin a new transaction
89 89
90 90 Begins a new transaction that allows rolling back writes in the event of
91 91 an exception.
92 92
93 93 * `after`: called after the transaction has been committed
94 94 * `createmode`: the mode of the journal file that will be created
95 95 * `onclose`: called as the transaction is closing, but before it is
96 96 closed
97 97 * `onabort`: called as the transaction is aborting, but before any files
98 98 have been truncated
99 99 """
100 100 self.count = 1
101 101 self.usages = 1
102 102 self.report = report
103 103 # a vfs to the store content
104 104 self.opener = opener
105 105 # a map to access file in various {location -> vfs}
106 106 vfsmap = vfsmap.copy()
107 107 vfsmap[''] = opener # set default value
108 108 self._vfsmap = vfsmap
109 109 self.after = after
110 110 self.onclose = onclose
111 111 self.onabort = onabort
112 112 self.entries = []
113 113 self.map = {}
114 114 self.journal = journal
115 115 self._queue = []
116 116 # a dict of arguments to be passed to hooks
117 117 self.hookargs = {}
118 118 self.file = opener.open(self.journal, "w")
119 119
120 120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 121 # - if 'backuppath' is empty, no file existed at backup time
122 122 # - if 'path' is empty, this is a temporary transaction file
123 123 # - if 'location' is not empty, the path is outside main opener reach.
124 124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 125 # (cache is currently unused)
126 126 self._backupentries = []
127 127 self._backupmap = {}
128 128 self._backupjournal = "%s.backupfiles" % journal
129 129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 130 self._backupsfile.write('%d\n' % version)
131 131
132 132 if createmode is not None:
133 133 opener.chmod(self.journal, createmode & 0666)
134 134 opener.chmod(self._backupjournal, createmode & 0666)
135 135
136 136 # hold file generations to be performed on commit
137 137 self._filegenerators = {}
138 138 # hold callbalk to write pending data for hooks
139 139 self._pendingcallback = {}
140 140 # True is any pending data have been written ever
141 141 self._anypending = False
142 142 # holds callback to call when writing the transaction
143 143 self._finalizecallback = {}
144 144 # hold callbalk for post transaction close
145 145 self._postclosecallback = {}
146 146
147 147 def __del__(self):
148 148 if self.journal:
149 149 self._abort()
150 150
151 151 @active
152 152 def startgroup(self):
153 153 """delay registration of file entry
154 154
155 155 This is used by strip to delay vision of strip offset. The transaction
156 156 sees either none or all of the strip actions to be done."""
157 157 self._queue.append([])
158 158
159 159 @active
160 160 def endgroup(self):
161 161 """apply delayed registration of file entry.
162 162
163 163 This is used by strip to delay vision of strip offset. The transaction
164 164 sees either none or all of the strip actions to be done."""
165 165 q = self._queue.pop()
166 166 for f, o, data in q:
167 167 self._addentry(f, o, data)
168 168
169 169 @active
170 170 def add(self, file, offset, data=None):
171 171 """record the state of an append-only file before update"""
172 172 if file in self.map or file in self._backupmap:
173 173 return
174 174 if self._queue:
175 175 self._queue[-1].append((file, offset, data))
176 176 return
177 177
178 178 self._addentry(file, offset, data)
179 179
180 180 def _addentry(self, file, offset, data):
181 181 """add a append-only entry to memory and on-disk state"""
182 182 if file in self.map or file in self._backupmap:
183 183 return
184 184 self.entries.append((file, offset, data))
185 185 self.map[file] = len(self.entries) - 1
186 186 # add enough data to the journal to do the truncate
187 187 self.file.write("%s\0%d\n" % (file, offset))
188 188 self.file.flush()
189 189
190 190 @active
191 191 def addbackup(self, file, hardlink=True, location=''):
192 192 """Adds a backup of the file to the transaction
193 193
194 194 Calling addbackup() creates a hardlink backup of the specified file
195 195 that is used to recover the file in the event of the transaction
196 196 aborting.
197 197
198 198 * `file`: the file path, relative to .hg/store
199 199 * `hardlink`: use a hardlink to quickly create the backup
200 200 """
201 201 if self._queue:
202 202 msg = 'cannot use transaction.addbackup inside "group"'
203 203 raise RuntimeError(msg)
204 204
205 205 if file in self.map or file in self._backupmap:
206 206 return
207 207 dirname, filename = os.path.split(file)
208 208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 209 backupfile = os.path.join(dirname, backupfilename)
210 210 vfs = self._vfsmap[location]
211 211 if vfs.exists(file):
212 212 filepath = vfs.join(file)
213 213 backuppath = vfs.join(backupfile)
214 214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 215 else:
216 216 backupfile = ''
217 217
218 218 self._addbackupentry((location, file, backupfile, False))
219 219
220 220 def _addbackupentry(self, entry):
221 221 """register a new backup entry and write it to disk"""
222 222 self._backupentries.append(entry)
223 223 self._backupmap[file] = len(self._backupentries) - 1
224 224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 225 self._backupsfile.flush()
226 226
227 227 @active
228 228 def registertmp(self, tmpfile):
229 229 """register a temporary transaction file
230 230
231 231 Such file will be delete when the transaction exit (on both failure and
232 232 success).
233 233 """
234 234 self._addbackupentry(('', '', tmpfile, False))
235 235
236 236 @active
237 237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 238 location=''):
239 239 """add a function to generates some files at transaction commit
240 240
241 241 The `genfunc` argument is a function capable of generating proper
242 242 content of each entry in the `filename` tuple.
243 243
244 244 At transaction close time, `genfunc` will be called with one file
245 245 object argument per entries in `filenames`.
246 246
247 247 The transaction itself is responsible for the backup, creation and
248 248 final write of such file.
249 249
250 250 The `genid` argument is used to ensure the same set of file is only
251 251 generated once. Call to `addfilegenerator` for a `genid` already
252 252 present will overwrite the old entry.
253 253
254 254 The `order` argument may be used to control the order in which multiple
255 255 generator will be executed.
256 256
257 257 The `location` arguments may be used to indicate the files are located
258 258 outside of the the standard directory for transaction. It should match
259 259 one of the key of the `transaction.vfsmap` dictionnary.
260 260 """
261 261 # For now, we are unable to do proper backup and restore of custom vfs
262 262 # but for bookmarks that are handled outside this mechanism.
263 263 self._filegenerators[genid] = (order, filenames, genfunc, location)
264 264
265 265 def _generatefiles(self):
266 266 # write files registered for generation
267 267 for entry in sorted(self._filegenerators.values()):
268 268 order, filenames, genfunc, location = entry
269 269 vfs = self._vfsmap[location]
270 270 files = []
271 271 try:
272 272 for name in filenames:
273 # Some files are already backed up when creating the
274 # localrepo. Until this is properly fixed we disable the
275 # backup for them.
276 if name not in ('phaseroots', 'bookmarks'):
277 self.addbackup(name, location=location)
273 self.addbackup(name, location=location)
278 274 files.append(vfs(name, 'w', atomictemp=True))
279 275 genfunc(*files)
280 276 finally:
281 277 for f in files:
282 278 f.close()
283 279
284 280 @active
285 281 def find(self, file):
286 282 if file in self.map:
287 283 return self.entries[self.map[file]]
288 284 if file in self._backupmap:
289 285 return self._backupentries[self._backupmap[file]]
290 286 return None
291 287
292 288 @active
293 289 def replace(self, file, offset, data=None):
294 290 '''
295 291 replace can only replace already committed entries
296 292 that are not pending in the queue
297 293 '''
298 294
299 295 if file not in self.map:
300 296 raise KeyError(file)
301 297 index = self.map[file]
302 298 self.entries[index] = (file, offset, data)
303 299 self.file.write("%s\0%d\n" % (file, offset))
304 300 self.file.flush()
305 301
306 302 @active
307 303 def nest(self):
308 304 self.count += 1
309 305 self.usages += 1
310 306 return self
311 307
312 308 def release(self):
313 309 if self.count > 0:
314 310 self.usages -= 1
315 311 # if the transaction scopes are left without being closed, fail
316 312 if self.count > 0 and self.usages == 0:
317 313 self._abort()
318 314
319 315 def running(self):
320 316 return self.count > 0
321 317
322 318 def addpending(self, category, callback):
323 319 """add a callback to be called when the transaction is pending
324 320
325 321 The transaction will be given as callback's first argument.
326 322
327 323 Category is a unique identifier to allow overwriting an old callback
328 324 with a newer callback.
329 325 """
330 326 self._pendingcallback[category] = callback
331 327
332 328 @active
333 329 def writepending(self):
334 330 '''write pending file to temporary version
335 331
336 332 This is used to allow hooks to view a transaction before commit'''
337 333 categories = sorted(self._pendingcallback)
338 334 for cat in categories:
339 335 # remove callback since the data will have been flushed
340 336 any = self._pendingcallback.pop(cat)(self)
341 337 self._anypending = self._anypending or any
342 338 return self._anypending
343 339
344 340 @active
345 341 def addfinalize(self, category, callback):
346 342 """add a callback to be called when the transaction is closed
347 343
348 344 The transaction will be given as callback's first argument.
349 345
350 346 Category is a unique identifier to allow overwriting old callbacks with
351 347 newer callbacks.
352 348 """
353 349 self._finalizecallback[category] = callback
354 350
355 351 @active
356 352 def addpostclose(self, category, callback):
357 353 """add a callback to be called after the transaction is closed
358 354
359 355 The transaction will be given as callback's first argument.
360 356
361 357 Category is a unique identifier to allow overwriting an old callback
362 358 with a newer callback.
363 359 """
364 360 self._postclosecallback[category] = callback
365 361
366 362 @active
367 363 def close(self):
368 364 '''commit the transaction'''
369 365 if self.count == 1:
370 366 self._generatefiles()
371 367 categories = sorted(self._finalizecallback)
372 368 for cat in categories:
373 369 self._finalizecallback[cat](self)
374 370 if self.onclose is not None:
375 371 self.onclose()
376 372
377 373 self.count -= 1
378 374 if self.count != 0:
379 375 return
380 376 self.file.close()
381 377 self._backupsfile.close()
382 378 # cleanup temporary files
383 379 for l, f, b, c in self._backupentries:
384 380 if l not in self._vfsmap and c:
385 381 self.report("couldn't remote %s: unknown cache location %s\n"
386 382 % (b, l))
387 383 continue
388 384 vfs = self._vfsmap[l]
389 385 if not f and b and vfs.exists(b):
390 386 try:
391 387 vfs.unlink(b)
392 388 except (IOError, OSError, util.Abort), inst:
393 389 if not c:
394 390 raise
395 391 # Abort may be raise by read only opener
396 392 self.report("couldn't remote %s: %s\n"
397 393 % (vfs.join(b), inst))
398 394 self.entries = []
399 395 if self.after:
400 396 self.after()
401 397 if self.opener.isfile(self.journal):
402 398 self.opener.unlink(self.journal)
403 399 if self.opener.isfile(self._backupjournal):
404 400 self.opener.unlink(self._backupjournal)
405 401 for _l, _f, b, c in self._backupentries:
406 402 if l not in self._vfsmap and c:
407 403 self.report("couldn't remote %s: unknown cache location"
408 404 "%s\n" % (b, l))
409 405 continue
410 406 vfs = self._vfsmap[l]
411 407 if b and vfs.exists(b):
412 408 try:
413 409 vfs.unlink(b)
414 410 except (IOError, OSError, util.Abort), inst:
415 411 if not c:
416 412 raise
417 413 # Abort may be raise by read only opener
418 414 self.report("couldn't remote %s: %s\n"
419 415 % (vfs.join(b), inst))
420 416 self._backupentries = []
421 417 self.journal = None
422 418 # run post close action
423 419 categories = sorted(self._postclosecallback)
424 420 for cat in categories:
425 421 self._postclosecallback[cat](self)
426 422
427 423 @active
428 424 def abort(self):
429 425 '''abort the transaction (generally called on error, or when the
430 426 transaction is not explicitly committed before going out of
431 427 scope)'''
432 428 self._abort()
433 429
434 430 def _abort(self):
435 431 self.count = 0
436 432 self.usages = 0
437 433 self.file.close()
438 434 self._backupsfile.close()
439 435
440 436 if self.onabort is not None:
441 437 self.onabort()
442 438
443 439 try:
444 440 if not self.entries and not self._backupentries:
445 441 if self.journal:
446 442 self.opener.unlink(self.journal)
447 443 if self._backupjournal:
448 444 self.opener.unlink(self._backupjournal)
449 445 return
450 446
451 447 self.report(_("transaction abort!\n"))
452 448
453 449 try:
454 450 _playback(self.journal, self.report, self.opener, self._vfsmap,
455 451 self.entries, self._backupentries, False)
456 452 self.report(_("rollback completed\n"))
457 453 except Exception:
458 454 self.report(_("rollback failed - please run hg recover\n"))
459 455 finally:
460 456 self.journal = None
461 457
462 458
463 459 def rollback(opener, vfsmap, file, report):
464 460 """Rolls back the transaction contained in the given file
465 461
466 462 Reads the entries in the specified file, and the corresponding
467 463 '*.backupfiles' file, to recover from an incomplete transaction.
468 464
469 465 * `file`: a file containing a list of entries, specifying where
470 466 to truncate each file. The file should contain a list of
471 467 file\0offset pairs, delimited by newlines. The corresponding
472 468 '*.backupfiles' file should contain a list of file\0backupfile
473 469 pairs, delimited by \0.
474 470 """
475 471 entries = []
476 472 backupentries = []
477 473
478 474 fp = opener.open(file)
479 475 lines = fp.readlines()
480 476 fp.close()
481 477 for l in lines:
482 478 try:
483 479 f, o = l.split('\0')
484 480 entries.append((f, int(o), None))
485 481 except ValueError:
486 482 report(_("couldn't read journal entry %r!\n") % l)
487 483
488 484 backupjournal = "%s.backupfiles" % file
489 485 if opener.exists(backupjournal):
490 486 fp = opener.open(backupjournal)
491 487 lines = fp.readlines()
492 488 if lines:
493 489 ver = lines[0][:-1]
494 490 if ver == str(version):
495 491 for line in lines[1:]:
496 492 if line:
497 493 # Shave off the trailing newline
498 494 line = line[:-1]
499 495 l, f, b, c = line.split('\0')
500 496 backupentries.append((l, f, b, bool(c)))
501 497 else:
502 498 report(_("journal was created by a different version of "
503 499 "Mercurial"))
504 500
505 501 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now