##// END OF EJS Templates
transaction: change the on disk format for backupentries...
Pierre-Yves David -
r23309:7eb520f5 default
parent child Browse files
Show More
@@ -1,456 +1,457
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 version = 1
18 version = 0
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 for f, b in backupentries:
46 for l, f, b, c in backupentries:
47 47 if f and b:
48 48 filepath = opener.join(f)
49 49 backuppath = opener.join(b)
50 50 try:
51 51 util.copyfile(backuppath, filepath)
52 52 backupfiles.append(b)
53 53 except IOError:
54 54 report(_("failed to recover %s\n") % f)
55 55 raise
56 56 else:
57 57 target = f or b
58 58 try:
59 59 opener.unlink(target)
60 60 except (IOError, OSError), inst:
61 61 if inst.errno != errno.ENOENT:
62 62 raise
63 63
64 64 opener.unlink(journal)
65 65 backuppath = "%s.backupfiles" % journal
66 66 if opener.exists(backuppath):
67 67 opener.unlink(backuppath)
68 68 for f in backupfiles:
69 69 if opener.exists(f):
70 70 opener.unlink(f)
71 71
72 72 class transaction(object):
73 73 def __init__(self, report, opener, journal, after=None, createmode=None,
74 74 onclose=None, onabort=None):
75 75 """Begin a new transaction
76 76
77 77 Begins a new transaction that allows rolling back writes in the event of
78 78 an exception.
79 79
80 80 * `after`: called after the transaction has been committed
81 81 * `createmode`: the mode of the journal file that will be created
82 82 * `onclose`: called as the transaction is closing, but before it is
83 83 closed
84 84 * `onabort`: called as the transaction is aborting, but before any files
85 85 have been truncated
86 86 """
87 87 self.count = 1
88 88 self.usages = 1
89 89 self.report = report
90 90 self.opener = opener
91 91 self.after = after
92 92 self.onclose = onclose
93 93 self.onabort = onabort
94 94 self.entries = []
95 95 self.map = {}
96 96 self.journal = journal
97 97 self._queue = []
98 98 # a dict of arguments to be passed to hooks
99 99 self.hookargs = {}
100 100 self.file = opener.open(self.journal, "w")
101 101
102 # a list of ('path', 'backuppath') entries.
102 # a list of ('location', 'path', 'backuppath', cache) entries.
103 103 # if 'backuppath' is empty, no file existed at backup time
104 104 # if 'path' is empty, this is a temporary transaction file
105 # (location, and cache are current unused)
105 106 self._backupentries = []
106 107 self._backupmap = {}
107 108 self._backupjournal = "%s.backupfiles" % journal
108 109 self._backupsfile = opener.open(self._backupjournal, 'w')
109 110 self._backupsfile.write('%d\n' % version)
110 111
111 112 if createmode is not None:
112 113 opener.chmod(self.journal, createmode & 0666)
113 114 opener.chmod(self._backupjournal, createmode & 0666)
114 115
115 116 # hold file generations to be performed on commit
116 117 self._filegenerators = {}
117 118 # hold callbalk to write pending data for hooks
118 119 self._pendingcallback = {}
119 120 # True is any pending data have been written ever
120 121 self._anypending = False
121 122 # holds callback to call when writing the transaction
122 123 self._finalizecallback = {}
123 124 # hold callbalk for post transaction close
124 125 self._postclosecallback = {}
125 126
126 127 def __del__(self):
127 128 if self.journal:
128 129 self._abort()
129 130
130 131 @active
131 132 def startgroup(self):
132 133 """delay registration of file entry
133 134
134 135 This is used by strip to delay vision of strip offset. The transaction
135 136 sees either none or all of the strip actions to be done."""
136 137 self._queue.append([])
137 138
138 139 @active
139 140 def endgroup(self):
140 141 """apply delayed registration of file entry.
141 142
142 143 This is used by strip to delay vision of strip offset. The transaction
143 144 sees either none or all of the strip actions to be done."""
144 145 q = self._queue.pop()
145 146 for f, o, data in q:
146 147 self._addentry(f, o, data)
147 148
148 149 @active
149 150 def add(self, file, offset, data=None):
150 151 """record the state of an append-only file before update"""
151 152 if file in self.map or file in self._backupmap:
152 153 return
153 154 if self._queue:
154 155 self._queue[-1].append((file, offset, data))
155 156 return
156 157
157 158 self._addentry(file, offset, data)
158 159
159 160 def _addentry(self, file, offset, data):
160 161 """add a append-only entry to memory and on-disk state"""
161 162 if file in self.map or file in self._backupmap:
162 163 return
163 164 self.entries.append((file, offset, data))
164 165 self.map[file] = len(self.entries) - 1
165 166 # add enough data to the journal to do the truncate
166 167 self.file.write("%s\0%d\n" % (file, offset))
167 168 self.file.flush()
168 169
169 170 @active
170 171 def addbackup(self, file, hardlink=True, vfs=None):
171 172 """Adds a backup of the file to the transaction
172 173
173 174 Calling addbackup() creates a hardlink backup of the specified file
174 175 that is used to recover the file in the event of the transaction
175 176 aborting.
176 177
177 178 * `file`: the file path, relative to .hg/store
178 179 * `hardlink`: use a hardlink to quickly create the backup
179 180 """
180 181 if self._queue:
181 182 msg = 'cannot use transaction.addbackup inside "group"'
182 183 raise RuntimeError(msg)
183 184
184 185 if file in self.map or file in self._backupmap:
185 186 return
186 187 backupfile = "%s.backup.%s" % (self.journal, file)
187 188 if vfs is None:
188 189 vfs = self.opener
189 190 if vfs.exists(file):
190 191 filepath = vfs.join(file)
191 192 backuppath = self.opener.join(backupfile)
192 193 util.copyfiles(filepath, backuppath, hardlink=hardlink)
193 194 else:
194 195 backupfile = ''
195 196
196 self._addbackupentry((file, backupfile))
197 self._addbackupentry(('', file, backupfile, False))
197 198
198 199 def _addbackupentry(self, entry):
199 200 """register a new backup entry and write it to disk"""
200 201 self._backupentries.append(entry)
201 202 self._backupmap[file] = len(self._backupentries) - 1
202 self._backupsfile.write("%s\0%s\n" % entry)
203 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
203 204 self._backupsfile.flush()
204 205
205 206 @active
206 207 def registertmp(self, tmpfile):
207 208 """register a temporary transaction file
208 209
209 210 Such file will be delete when the transaction exit (on both failure and
210 211 success).
211 212 """
212 self._addbackupentry(('', tmpfile))
213 self._addbackupentry(('', '', tmpfile, False))
213 214
214 215 @active
215 216 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
216 217 """add a function to generates some files at transaction commit
217 218
218 219 The `genfunc` argument is a function capable of generating proper
219 220 content of each entry in the `filename` tuple.
220 221
221 222 At transaction close time, `genfunc` will be called with one file
222 223 object argument per entries in `filenames`.
223 224
224 225 The transaction itself is responsible for the backup, creation and
225 226 final write of such file.
226 227
227 228 The `genid` argument is used to ensure the same set of file is only
228 229 generated once. Call to `addfilegenerator` for a `genid` already
229 230 present will overwrite the old entry.
230 231
231 232 The `order` argument may be used to control the order in which multiple
232 233 generator will be executed.
233 234 """
234 235 # For now, we are unable to do proper backup and restore of custom vfs
235 236 # but for bookmarks that are handled outside this mechanism.
236 237 assert vfs is None or filenames == ('bookmarks',)
237 238 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
238 239
239 240 def _generatefiles(self):
240 241 # write files registered for generation
241 242 for entry in sorted(self._filegenerators.values()):
242 243 order, filenames, genfunc, vfs = entry
243 244 if vfs is None:
244 245 vfs = self.opener
245 246 files = []
246 247 try:
247 248 for name in filenames:
248 249 # Some files are already backed up when creating the
249 250 # localrepo. Until this is properly fixed we disable the
250 251 # backup for them.
251 252 if name not in ('phaseroots', 'bookmarks'):
252 253 self.addbackup(name)
253 254 files.append(vfs(name, 'w', atomictemp=True))
254 255 genfunc(*files)
255 256 finally:
256 257 for f in files:
257 258 f.close()
258 259
259 260 @active
260 261 def find(self, file):
261 262 if file in self.map:
262 263 return self.entries[self.map[file]]
263 264 if file in self._backupmap:
264 265 return self._backupentries[self._backupmap[file]]
265 266 return None
266 267
267 268 @active
268 269 def replace(self, file, offset, data=None):
269 270 '''
270 271 replace can only replace already committed entries
271 272 that are not pending in the queue
272 273 '''
273 274
274 275 if file not in self.map:
275 276 raise KeyError(file)
276 277 index = self.map[file]
277 278 self.entries[index] = (file, offset, data)
278 279 self.file.write("%s\0%d\n" % (file, offset))
279 280 self.file.flush()
280 281
281 282 @active
282 283 def nest(self):
283 284 self.count += 1
284 285 self.usages += 1
285 286 return self
286 287
287 288 def release(self):
288 289 if self.count > 0:
289 290 self.usages -= 1
290 291 # if the transaction scopes are left without being closed, fail
291 292 if self.count > 0 and self.usages == 0:
292 293 self._abort()
293 294
294 295 def running(self):
295 296 return self.count > 0
296 297
297 298 def addpending(self, category, callback):
298 299 """add a callback to be called when the transaction is pending
299 300
300 301 The transaction will be given as callback's first argument.
301 302
302 303 Category is a unique identifier to allow overwriting an old callback
303 304 with a newer callback.
304 305 """
305 306 self._pendingcallback[category] = callback
306 307
307 308 @active
308 309 def writepending(self):
309 310 '''write pending file to temporary version
310 311
311 312 This is used to allow hooks to view a transaction before commit'''
312 313 categories = sorted(self._pendingcallback)
313 314 for cat in categories:
314 315 # remove callback since the data will have been flushed
315 316 any = self._pendingcallback.pop(cat)(self)
316 317 self._anypending = self._anypending or any
317 318 return self._anypending
318 319
319 320 @active
320 321 def addfinalize(self, category, callback):
321 322 """add a callback to be called when the transaction is closed
322 323
323 324 The transaction will be given as callback's first argument.
324 325
325 326 Category is a unique identifier to allow overwriting old callbacks with
326 327 newer callbacks.
327 328 """
328 329 self._finalizecallback[category] = callback
329 330
330 331 @active
331 332 def addpostclose(self, category, callback):
332 333 """add a callback to be called after the transaction is closed
333 334
334 335 The transaction will be given as callback's first argument.
335 336
336 337 Category is a unique identifier to allow overwriting an old callback
337 338 with a newer callback.
338 339 """
339 340 self._postclosecallback[category] = callback
340 341
341 342 @active
342 343 def close(self):
343 344 '''commit the transaction'''
344 345 if self.count == 1:
345 346 self._generatefiles()
346 347 categories = sorted(self._finalizecallback)
347 348 for cat in categories:
348 349 self._finalizecallback[cat](self)
349 350 if self.onclose is not None:
350 351 self.onclose()
351 352
352 353 self.count -= 1
353 354 if self.count != 0:
354 355 return
355 356 self.file.close()
356 357 self._backupsfile.close()
357 358 # cleanup temporary files
358 for f, b in self._backupentries:
359 for _l, f, b, _c in self._backupentries:
359 360 if not f and b and self.opener.exists(b):
360 361 self.opener.unlink(b)
361 362 self.entries = []
362 363 if self.after:
363 364 self.after()
364 365 if self.opener.isfile(self.journal):
365 366 self.opener.unlink(self.journal)
366 367 if self.opener.isfile(self._backupjournal):
367 368 self.opener.unlink(self._backupjournal)
368 for _f, b in self._backupentries:
369 for _l, _f, b, _c in self._backupentries:
369 370 if b and self.opener.exists(b):
370 371 self.opener.unlink(b)
371 372 self._backupentries = []
372 373 self.journal = None
373 374 # run post close action
374 375 categories = sorted(self._postclosecallback)
375 376 for cat in categories:
376 377 self._postclosecallback[cat](self)
377 378
378 379 @active
379 380 def abort(self):
380 381 '''abort the transaction (generally called on error, or when the
381 382 transaction is not explicitly committed before going out of
382 383 scope)'''
383 384 self._abort()
384 385
385 386 def _abort(self):
386 387 self.count = 0
387 388 self.usages = 0
388 389 self.file.close()
389 390 self._backupsfile.close()
390 391
391 392 if self.onabort is not None:
392 393 self.onabort()
393 394
394 395 try:
395 396 if not self.entries and not self._backupentries:
396 397 if self.journal:
397 398 self.opener.unlink(self.journal)
398 399 if self._backupjournal:
399 400 self.opener.unlink(self._backupjournal)
400 401 return
401 402
402 403 self.report(_("transaction abort!\n"))
403 404
404 405 try:
405 406 _playback(self.journal, self.report, self.opener,
406 407 self.entries, self._backupentries, False)
407 408 self.report(_("rollback completed\n"))
408 409 except Exception:
409 410 self.report(_("rollback failed - please run hg recover\n"))
410 411 finally:
411 412 self.journal = None
412 413
413 414
414 415 def rollback(opener, file, report):
415 416 """Rolls back the transaction contained in the given file
416 417
417 418 Reads the entries in the specified file, and the corresponding
418 419 '*.backupfiles' file, to recover from an incomplete transaction.
419 420
420 421 * `file`: a file containing a list of entries, specifying where
421 422 to truncate each file. The file should contain a list of
422 423 file\0offset pairs, delimited by newlines. The corresponding
423 424 '*.backupfiles' file should contain a list of file\0backupfile
424 425 pairs, delimited by \0.
425 426 """
426 427 entries = []
427 428 backupentries = []
428 429
429 430 fp = opener.open(file)
430 431 lines = fp.readlines()
431 432 fp.close()
432 433 for l in lines:
433 434 try:
434 435 f, o = l.split('\0')
435 436 entries.append((f, int(o), None))
436 437 except ValueError:
437 438 report(_("couldn't read journal entry %r!\n") % l)
438 439
439 440 backupjournal = "%s.backupfiles" % file
440 441 if opener.exists(backupjournal):
441 442 fp = opener.open(backupjournal)
442 443 lines = fp.readlines()
443 444 if lines:
444 445 ver = lines[0][:-1]
445 446 if ver == str(version):
446 447 for line in lines[1:]:
447 448 if line:
448 449 # Shave off the trailing newline
449 450 line = line[:-1]
450 f, b = line.split('\0')
451 backupentries.append((f, b))
451 l, f, b, c = line.split('\0')
452 backupentries.append((l, f, b, bool(c)))
452 453 else:
453 report(_("journal was created by a newer version of "
454 report(_("journal was created by a different version of "
454 455 "Mercurial"))
455 456
456 457 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now