##// END OF EJS Templates
transaction: fix some docstring grammar
Matt Mackall -
r23355:7faa55c2 default
parent child Browse files
Show More
@@ -1,501 +1,501 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import os
16 16 import errno
17 17 import error, util
18 18
19 19 version = 2
20 20
21 21 def active(func):
22 22 def _active(self, *args, **kwds):
23 23 if self.count == 0:
24 24 raise error.Abort(_(
25 25 'cannot use transaction when it is already committed/aborted'))
26 26 return func(self, *args, **kwds)
27 27 return _active
28 28
29 29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 30 unlink=True):
31 31 for f, o, _ignore in entries:
32 32 if o or not unlink:
33 33 try:
34 34 fp = opener(f, 'a')
35 35 fp.truncate(o)
36 36 fp.close()
37 37 except IOError:
38 38 report(_("failed to truncate %s\n") % f)
39 39 raise
40 40 else:
41 41 try:
42 42 opener.unlink(f)
43 43 except (IOError, OSError), inst:
44 44 if inst.errno != errno.ENOENT:
45 45 raise
46 46
47 47 backupfiles = []
48 48 for l, f, b, c in backupentries:
49 49 if l not in vfsmap and c:
50 50 report("couldn't handle %s: unknown cache location %s\n"
51 51 % (b, l))
52 52 vfs = vfsmap[l]
53 53 try:
54 54 if f and b:
55 55 filepath = vfs.join(f)
56 56 backuppath = vfs.join(b)
57 57 try:
58 58 util.copyfile(backuppath, filepath)
59 59 backupfiles.append(b)
60 60 except IOError:
61 61 report(_("failed to recover %s\n") % f)
62 62 else:
63 63 target = f or b
64 64 try:
65 65 vfs.unlink(target)
66 66 except (IOError, OSError), inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 except (IOError, OSError, util.Abort), inst:
70 70 if not c:
71 71 raise
72 72
73 73 opener.unlink(journal)
74 74 backuppath = "%s.backupfiles" % journal
75 75 if opener.exists(backuppath):
76 76 opener.unlink(backuppath)
77 77 try:
78 78 for f in backupfiles:
79 79 if opener.exists(f):
80 80 opener.unlink(f)
81 81 except (IOError, OSError, util.Abort), inst:
82 82 # only pure backup file remains, it is sage to ignore any error
83 83 pass
84 84
85 85 class transaction(object):
86 86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 87 createmode=None, onclose=None, onabort=None):
88 88 """Begin a new transaction
89 89
90 90 Begins a new transaction that allows rolling back writes in the event of
91 91 an exception.
92 92
93 93 * `after`: called after the transaction has been committed
94 94 * `createmode`: the mode of the journal file that will be created
95 95 * `onclose`: called as the transaction is closing, but before it is
96 96 closed
97 97 * `onabort`: called as the transaction is aborting, but before any files
98 98 have been truncated
99 99 """
100 100 self.count = 1
101 101 self.usages = 1
102 102 self.report = report
103 103 # a vfs to the store content
104 104 self.opener = opener
105 105 # a map to access file in various {location -> vfs}
106 106 vfsmap = vfsmap.copy()
107 107 vfsmap[''] = opener # set default value
108 108 self._vfsmap = vfsmap
109 109 self.after = after
110 110 self.onclose = onclose
111 111 self.onabort = onabort
112 112 self.entries = []
113 113 self.map = {}
114 114 self.journal = journal
115 115 self._queue = []
116 116 # a dict of arguments to be passed to hooks
117 117 self.hookargs = {}
118 118 self.file = opener.open(self.journal, "w")
119 119
120 120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 121 # - if 'backuppath' is empty, no file existed at backup time
122 122 # - if 'path' is empty, this is a temporary transaction file
123 123 # - if 'location' is not empty, the path is outside main opener reach.
124 124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 125 # (cache is currently unused)
126 126 self._backupentries = []
127 127 self._backupmap = {}
128 128 self._backupjournal = "%s.backupfiles" % journal
129 129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 130 self._backupsfile.write('%d\n' % version)
131 131
132 132 if createmode is not None:
133 133 opener.chmod(self.journal, createmode & 0666)
134 134 opener.chmod(self._backupjournal, createmode & 0666)
135 135
136 136 # hold file generations to be performed on commit
137 137 self._filegenerators = {}
138 138 # hold callbalk to write pending data for hooks
139 139 self._pendingcallback = {}
140 140 # True is any pending data have been written ever
141 141 self._anypending = False
142 142 # holds callback to call when writing the transaction
143 143 self._finalizecallback = {}
144 144 # hold callbalk for post transaction close
145 145 self._postclosecallback = {}
146 146
147 147 def __del__(self):
148 148 if self.journal:
149 149 self._abort()
150 150
151 151 @active
152 152 def startgroup(self):
153 153 """delay registration of file entry
154 154
155 155 This is used by strip to delay vision of strip offset. The transaction
156 156 sees either none or all of the strip actions to be done."""
157 157 self._queue.append([])
158 158
159 159 @active
160 160 def endgroup(self):
161 161 """apply delayed registration of file entry.
162 162
163 163 This is used by strip to delay vision of strip offset. The transaction
164 164 sees either none or all of the strip actions to be done."""
165 165 q = self._queue.pop()
166 166 for f, o, data in q:
167 167 self._addentry(f, o, data)
168 168
169 169 @active
170 170 def add(self, file, offset, data=None):
171 171 """record the state of an append-only file before update"""
172 172 if file in self.map or file in self._backupmap:
173 173 return
174 174 if self._queue:
175 175 self._queue[-1].append((file, offset, data))
176 176 return
177 177
178 178 self._addentry(file, offset, data)
179 179
180 180 def _addentry(self, file, offset, data):
181 181 """add a append-only entry to memory and on-disk state"""
182 182 if file in self.map or file in self._backupmap:
183 183 return
184 184 self.entries.append((file, offset, data))
185 185 self.map[file] = len(self.entries) - 1
186 186 # add enough data to the journal to do the truncate
187 187 self.file.write("%s\0%d\n" % (file, offset))
188 188 self.file.flush()
189 189
190 190 @active
191 191 def addbackup(self, file, hardlink=True, location=''):
192 192 """Adds a backup of the file to the transaction
193 193
194 194 Calling addbackup() creates a hardlink backup of the specified file
195 195 that is used to recover the file in the event of the transaction
196 196 aborting.
197 197
198 198 * `file`: the file path, relative to .hg/store
199 199 * `hardlink`: use a hardlink to quickly create the backup
200 200 """
201 201 if self._queue:
202 202 msg = 'cannot use transaction.addbackup inside "group"'
203 203 raise RuntimeError(msg)
204 204
205 205 if file in self.map or file in self._backupmap:
206 206 return
207 207 dirname, filename = os.path.split(file)
208 208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 209 backupfile = os.path.join(dirname, backupfilename)
210 210 vfs = self._vfsmap[location]
211 211 if vfs.exists(file):
212 212 filepath = vfs.join(file)
213 213 backuppath = vfs.join(backupfile)
214 214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 215 else:
216 216 backupfile = ''
217 217
218 218 self._addbackupentry((location, file, backupfile, False))
219 219
220 220 def _addbackupentry(self, entry):
221 221 """register a new backup entry and write it to disk"""
222 222 self._backupentries.append(entry)
223 223 self._backupmap[file] = len(self._backupentries) - 1
224 224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 225 self._backupsfile.flush()
226 226
227 227 @active
228 228 def registertmp(self, tmpfile, location=''):
229 229 """register a temporary transaction file
230 230
231 Such file will be delete when the transaction exit (on both failure and
232 success).
231 Such files will be deleted when the transaction exits (on both
232 failure and success).
233 233 """
234 234 self._addbackupentry((location, '', tmpfile, False))
235 235
236 236 @active
237 237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 238 location=''):
239 239 """add a function to generates some files at transaction commit
240 240
241 241 The `genfunc` argument is a function capable of generating proper
242 242 content of each entry in the `filename` tuple.
243 243
244 244 At transaction close time, `genfunc` will be called with one file
245 245 object argument per entries in `filenames`.
246 246
247 247 The transaction itself is responsible for the backup, creation and
248 248 final write of such file.
249 249
250 250 The `genid` argument is used to ensure the same set of file is only
251 251 generated once. Call to `addfilegenerator` for a `genid` already
252 252 present will overwrite the old entry.
253 253
254 254 The `order` argument may be used to control the order in which multiple
255 255 generator will be executed.
256 256
257 257 The `location` arguments may be used to indicate the files are located
258 258 outside of the the standard directory for transaction. It should match
259 259 one of the key of the `transaction.vfsmap` dictionnary.
260 260 """
261 261 # For now, we are unable to do proper backup and restore of custom vfs
262 262 # but for bookmarks that are handled outside this mechanism.
263 263 self._filegenerators[genid] = (order, filenames, genfunc, location)
264 264
265 265 def _generatefiles(self):
266 266 # write files registered for generation
267 267 for entry in sorted(self._filegenerators.values()):
268 268 order, filenames, genfunc, location = entry
269 269 vfs = self._vfsmap[location]
270 270 files = []
271 271 try:
272 272 for name in filenames:
273 273 self.addbackup(name, location=location)
274 274 files.append(vfs(name, 'w', atomictemp=True))
275 275 genfunc(*files)
276 276 finally:
277 277 for f in files:
278 278 f.close()
279 279
280 280 @active
281 281 def find(self, file):
282 282 if file in self.map:
283 283 return self.entries[self.map[file]]
284 284 if file in self._backupmap:
285 285 return self._backupentries[self._backupmap[file]]
286 286 return None
287 287
288 288 @active
289 289 def replace(self, file, offset, data=None):
290 290 '''
291 291 replace can only replace already committed entries
292 292 that are not pending in the queue
293 293 '''
294 294
295 295 if file not in self.map:
296 296 raise KeyError(file)
297 297 index = self.map[file]
298 298 self.entries[index] = (file, offset, data)
299 299 self.file.write("%s\0%d\n" % (file, offset))
300 300 self.file.flush()
301 301
302 302 @active
303 303 def nest(self):
304 304 self.count += 1
305 305 self.usages += 1
306 306 return self
307 307
308 308 def release(self):
309 309 if self.count > 0:
310 310 self.usages -= 1
311 311 # if the transaction scopes are left without being closed, fail
312 312 if self.count > 0 and self.usages == 0:
313 313 self._abort()
314 314
315 315 def running(self):
316 316 return self.count > 0
317 317
318 318 def addpending(self, category, callback):
319 319 """add a callback to be called when the transaction is pending
320 320
321 321 The transaction will be given as callback's first argument.
322 322
323 323 Category is a unique identifier to allow overwriting an old callback
324 324 with a newer callback.
325 325 """
326 326 self._pendingcallback[category] = callback
327 327
328 328 @active
329 329 def writepending(self):
330 330 '''write pending file to temporary version
331 331
332 332 This is used to allow hooks to view a transaction before commit'''
333 333 categories = sorted(self._pendingcallback)
334 334 for cat in categories:
335 335 # remove callback since the data will have been flushed
336 336 any = self._pendingcallback.pop(cat)(self)
337 337 self._anypending = self._anypending or any
338 338 return self._anypending
339 339
340 340 @active
341 341 def addfinalize(self, category, callback):
342 342 """add a callback to be called when the transaction is closed
343 343
344 344 The transaction will be given as callback's first argument.
345 345
346 346 Category is a unique identifier to allow overwriting old callbacks with
347 347 newer callbacks.
348 348 """
349 349 self._finalizecallback[category] = callback
350 350
351 351 @active
352 352 def addpostclose(self, category, callback):
353 353 """add a callback to be called after the transaction is closed
354 354
355 355 The transaction will be given as callback's first argument.
356 356
357 357 Category is a unique identifier to allow overwriting an old callback
358 358 with a newer callback.
359 359 """
360 360 self._postclosecallback[category] = callback
361 361
362 362 @active
363 363 def close(self):
364 364 '''commit the transaction'''
365 365 if self.count == 1:
366 366 self._generatefiles()
367 367 categories = sorted(self._finalizecallback)
368 368 for cat in categories:
369 369 self._finalizecallback[cat](self)
370 370 if self.onclose is not None:
371 371 self.onclose()
372 372
373 373 self.count -= 1
374 374 if self.count != 0:
375 375 return
376 376 self.file.close()
377 377 self._backupsfile.close()
378 378 # cleanup temporary files
379 379 for l, f, b, c in self._backupentries:
380 380 if l not in self._vfsmap and c:
381 381 self.report("couldn't remote %s: unknown cache location %s\n"
382 382 % (b, l))
383 383 continue
384 384 vfs = self._vfsmap[l]
385 385 if not f and b and vfs.exists(b):
386 386 try:
387 387 vfs.unlink(b)
388 388 except (IOError, OSError, util.Abort), inst:
389 389 if not c:
390 390 raise
391 391 # Abort may be raise by read only opener
392 392 self.report("couldn't remote %s: %s\n"
393 393 % (vfs.join(b), inst))
394 394 self.entries = []
395 395 if self.after:
396 396 self.after()
397 397 if self.opener.isfile(self.journal):
398 398 self.opener.unlink(self.journal)
399 399 if self.opener.isfile(self._backupjournal):
400 400 self.opener.unlink(self._backupjournal)
401 401 for _l, _f, b, c in self._backupentries:
402 402 if l not in self._vfsmap and c:
403 403 self.report("couldn't remote %s: unknown cache location"
404 404 "%s\n" % (b, l))
405 405 continue
406 406 vfs = self._vfsmap[l]
407 407 if b and vfs.exists(b):
408 408 try:
409 409 vfs.unlink(b)
410 410 except (IOError, OSError, util.Abort), inst:
411 411 if not c:
412 412 raise
413 413 # Abort may be raise by read only opener
414 414 self.report("couldn't remote %s: %s\n"
415 415 % (vfs.join(b), inst))
416 416 self._backupentries = []
417 417 self.journal = None
418 418 # run post close action
419 419 categories = sorted(self._postclosecallback)
420 420 for cat in categories:
421 421 self._postclosecallback[cat](self)
422 422
423 423 @active
424 424 def abort(self):
425 425 '''abort the transaction (generally called on error, or when the
426 426 transaction is not explicitly committed before going out of
427 427 scope)'''
428 428 self._abort()
429 429
430 430 def _abort(self):
431 431 self.count = 0
432 432 self.usages = 0
433 433 self.file.close()
434 434 self._backupsfile.close()
435 435
436 436 if self.onabort is not None:
437 437 self.onabort()
438 438
439 439 try:
440 440 if not self.entries and not self._backupentries:
441 441 if self.journal:
442 442 self.opener.unlink(self.journal)
443 443 if self._backupjournal:
444 444 self.opener.unlink(self._backupjournal)
445 445 return
446 446
447 447 self.report(_("transaction abort!\n"))
448 448
449 449 try:
450 450 _playback(self.journal, self.report, self.opener, self._vfsmap,
451 451 self.entries, self._backupentries, False)
452 452 self.report(_("rollback completed\n"))
453 453 except Exception:
454 454 self.report(_("rollback failed - please run hg recover\n"))
455 455 finally:
456 456 self.journal = None
457 457
458 458
459 459 def rollback(opener, vfsmap, file, report):
460 460 """Rolls back the transaction contained in the given file
461 461
462 462 Reads the entries in the specified file, and the corresponding
463 463 '*.backupfiles' file, to recover from an incomplete transaction.
464 464
465 465 * `file`: a file containing a list of entries, specifying where
466 466 to truncate each file. The file should contain a list of
467 467 file\0offset pairs, delimited by newlines. The corresponding
468 468 '*.backupfiles' file should contain a list of file\0backupfile
469 469 pairs, delimited by \0.
470 470 """
471 471 entries = []
472 472 backupentries = []
473 473
474 474 fp = opener.open(file)
475 475 lines = fp.readlines()
476 476 fp.close()
477 477 for l in lines:
478 478 try:
479 479 f, o = l.split('\0')
480 480 entries.append((f, int(o), None))
481 481 except ValueError:
482 482 report(_("couldn't read journal entry %r!\n") % l)
483 483
484 484 backupjournal = "%s.backupfiles" % file
485 485 if opener.exists(backupjournal):
486 486 fp = opener.open(backupjournal)
487 487 lines = fp.readlines()
488 488 if lines:
489 489 ver = lines[0][:-1]
490 490 if ver == str(version):
491 491 for line in lines[1:]:
492 492 if line:
493 493 # Shave off the trailing newline
494 494 line = line[:-1]
495 495 l, f, b, c = line.split('\0')
496 496 backupentries.append((l, f, b, bool(c)))
497 497 else:
498 498 report(_("journal was created by a different version of "
499 499 "Mercurial"))
500 500
501 501 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now