##// END OF EJS Templates
transaction: document `tr.add`
Pierre-Yves David -
r23252:70809438 default
parent child Browse files
Show More
@@ -1,420 +1,421
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 version = 1
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 46 for f, b in backupentries:
47 47 filepath = opener.join(f)
48 48 backuppath = opener.join(b)
49 49 try:
50 50 util.copyfile(backuppath, filepath)
51 51 backupfiles.append(b)
52 52 except IOError:
53 53 report(_("failed to recover %s\n") % f)
54 54 raise
55 55
56 56 opener.unlink(journal)
57 57 backuppath = "%s.backupfiles" % journal
58 58 if opener.exists(backuppath):
59 59 opener.unlink(backuppath)
60 60 for f in backupfiles:
61 61 opener.unlink(f)
62 62
63 63 class transaction(object):
64 64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 65 onclose=None, onabort=None):
66 66 """Begin a new transaction
67 67
68 68 Begins a new transaction that allows rolling back writes in the event of
69 69 an exception.
70 70
71 71 * `after`: called after the transaction has been committed
72 72 * `createmode`: the mode of the journal file that will be created
73 73 * `onclose`: called as the transaction is closing, but before it is
74 74 closed
75 75 * `onabort`: called as the transaction is aborting, but before any files
76 76 have been truncated
77 77 """
78 78 self.count = 1
79 79 self.usages = 1
80 80 self.report = report
81 81 self.opener = opener
82 82 self.after = after
83 83 self.onclose = onclose
84 84 self.onabort = onabort
85 85 self.entries = []
86 86 self.map = {}
87 87 # a list of ('path', 'backuppath') entries.
88 88 self._backupentries = []
89 89 self._backupmap = {}
90 90 self.journal = journal
91 91 self._queue = []
92 92 # a dict of arguments to be passed to hooks
93 93 self.hookargs = {}
94 94
95 95 self._backupjournal = "%s.backupfiles" % journal
96 96 self.file = opener.open(self.journal, "w")
97 97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 98 self._backupsfile.write('%d\n' % version)
99 99 if createmode is not None:
100 100 opener.chmod(self.journal, createmode & 0666)
101 101 opener.chmod(self._backupjournal, createmode & 0666)
102 102
103 103 # hold file generations to be performed on commit
104 104 self._filegenerators = {}
105 105 # hold callbalk to write pending data for hooks
106 106 self._pendingcallback = {}
107 107 # True is any pending data have been written ever
108 108 self._anypending = False
109 109 # holds callback to call when writing the transaction
110 110 self._finalizecallback = {}
111 111 # hold callbalk for post transaction close
112 112 self._postclosecallback = {}
113 113
114 114 def __del__(self):
115 115 if self.journal:
116 116 self._abort()
117 117
118 118 @active
119 119 def startgroup(self):
120 120 """delay registration of file entry
121 121
122 122 This is used by strip to delay vision of strip offset. The transaction
123 123 sees either none or all of the strip actions to be done."""
124 124 self._queue.append([])
125 125
126 126 @active
127 127 def endgroup(self):
128 128 """apply delayed registration of file entry.
129 129
130 130 This is used by strip to delay vision of strip offset. The transaction
131 131 sees either none or all of the strip actions to be done."""
132 132 q = self._queue.pop()
133 133 self.entries.extend(q)
134 134
135 135 offsets = []
136 136 for f, o, _data in q:
137 137 offsets.append((f, o))
138 138
139 139 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
140 140 self.file.write(d)
141 141 self.file.flush()
142 142
143 143 @active
144 144 def add(self, file, offset, data=None):
145 """record the state of an append-only file before update"""
145 146 if file in self.map or file in self._backupmap:
146 147 return
147 148 if self._queue:
148 149 self._queue[-1].append((file, offset, data))
149 150 return
150 151
151 152 self.entries.append((file, offset, data))
152 153 self.map[file] = len(self.entries) - 1
153 154 # add enough data to the journal to do the truncate
154 155 self.file.write("%s\0%d\n" % (file, offset))
155 156 self.file.flush()
156 157
157 158 @active
158 159 def addbackup(self, file, hardlink=True, vfs=None):
159 160 """Adds a backup of the file to the transaction
160 161
161 162 Calling addbackup() creates a hardlink backup of the specified file
162 163 that is used to recover the file in the event of the transaction
163 164 aborting.
164 165
165 166 * `file`: the file path, relative to .hg/store
166 167 * `hardlink`: use a hardlink to quickly create the backup
167 168 """
168 169 if self._queue:
169 170 msg = 'cannot use transaction.addbackup inside "group"'
170 171 raise RuntimeError(msg)
171 172
172 173 if file in self.map or file in self._backupmap:
173 174 return
174 175 backupfile = "%s.backup.%s" % (self.journal, file)
175 176 if vfs is None:
176 177 vfs = self.opener
177 178 if vfs.exists(file):
178 179 filepath = vfs.join(file)
179 180 backuppath = self.opener.join(backupfile)
180 181 util.copyfiles(filepath, backuppath, hardlink=hardlink)
181 182 else:
182 183 self.add(file, 0)
183 184 return
184 185
185 186 self._backupentries.append((file, backupfile))
186 187 self._backupmap[file] = len(self._backupentries) - 1
187 188 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
188 189 self._backupsfile.flush()
189 190
190 191 @active
191 192 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
192 193 """add a function to generates some files at transaction commit
193 194
194 195 The `genfunc` argument is a function capable of generating proper
195 196 content of each entry in the `filename` tuple.
196 197
197 198 At transaction close time, `genfunc` will be called with one file
198 199 object argument per entries in `filenames`.
199 200
200 201 The transaction itself is responsible for the backup, creation and
201 202 final write of such file.
202 203
203 204 The `genid` argument is used to ensure the same set of file is only
204 205 generated once. Call to `addfilegenerator` for a `genid` already
205 206 present will overwrite the old entry.
206 207
207 208 The `order` argument may be used to control the order in which multiple
208 209 generator will be executed.
209 210 """
210 211 # For now, we are unable to do proper backup and restore of custom vfs
211 212 # but for bookmarks that are handled outside this mechanism.
212 213 assert vfs is None or filenames == ('bookmarks',)
213 214 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
214 215
215 216 def _generatefiles(self):
216 217 # write files registered for generation
217 218 for entry in sorted(self._filegenerators.values()):
218 219 order, filenames, genfunc, vfs = entry
219 220 if vfs is None:
220 221 vfs = self.opener
221 222 files = []
222 223 try:
223 224 for name in filenames:
224 225 # Some files are already backed up when creating the
225 226 # localrepo. Until this is properly fixed we disable the
226 227 # backup for them.
227 228 if name not in ('phaseroots', 'bookmarks'):
228 229 self.addbackup(name)
229 230 files.append(vfs(name, 'w', atomictemp=True))
230 231 genfunc(*files)
231 232 finally:
232 233 for f in files:
233 234 f.close()
234 235
235 236 @active
236 237 def find(self, file):
237 238 if file in self.map:
238 239 return self.entries[self.map[file]]
239 240 if file in self._backupmap:
240 241 return self._backupentries[self._backupmap[file]]
241 242 return None
242 243
243 244 @active
244 245 def replace(self, file, offset, data=None):
245 246 '''
246 247 replace can only replace already committed entries
247 248 that are not pending in the queue
248 249 '''
249 250
250 251 if file not in self.map:
251 252 raise KeyError(file)
252 253 index = self.map[file]
253 254 self.entries[index] = (file, offset, data)
254 255 self.file.write("%s\0%d\n" % (file, offset))
255 256 self.file.flush()
256 257
257 258 @active
258 259 def nest(self):
259 260 self.count += 1
260 261 self.usages += 1
261 262 return self
262 263
263 264 def release(self):
264 265 if self.count > 0:
265 266 self.usages -= 1
266 267 # if the transaction scopes are left without being closed, fail
267 268 if self.count > 0 and self.usages == 0:
268 269 self._abort()
269 270
270 271 def running(self):
271 272 return self.count > 0
272 273
273 274 def addpending(self, category, callback):
274 275 """add a callback to be called when the transaction is pending
275 276
276 277 Category is a unique identifier to allow overwriting an old callback
277 278 with a newer callback.
278 279 """
279 280 self._pendingcallback[category] = callback
280 281
281 282 @active
282 283 def writepending(self):
283 284 '''write pending file to temporary version
284 285
285 286 This is used to allow hooks to view a transaction before commit'''
286 287 categories = sorted(self._pendingcallback)
287 288 for cat in categories:
288 289 # remove callback since the data will have been flushed
289 290 any = self._pendingcallback.pop(cat)()
290 291 self._anypending = self._anypending or any
291 292 return self._anypending
292 293
293 294 @active
294 295 def addfinalize(self, category, callback):
295 296 """add a callback to be called when the transaction is closed
296 297
297 298 Category is a unique identifier to allow overwriting old callbacks with
298 299 newer callbacks.
299 300 """
300 301 self._finalizecallback[category] = callback
301 302
302 303 @active
303 304 def addpostclose(self, category, callback):
304 305 """add a callback to be called after the transaction is closed
305 306
306 307 Category is a unique identifier to allow overwriting an old callback
307 308 with a newer callback.
308 309 """
309 310 self._postclosecallback[category] = callback
310 311
311 312 @active
312 313 def close(self):
313 314 '''commit the transaction'''
314 315 if self.count == 1 and self.onclose is not None:
315 316 self._generatefiles()
316 317 categories = sorted(self._finalizecallback)
317 318 for cat in categories:
318 319 self._finalizecallback[cat]()
319 320 self.onclose()
320 321
321 322 self.count -= 1
322 323 if self.count != 0:
323 324 return
324 325 self.file.close()
325 326 self._backupsfile.close()
326 327 self.entries = []
327 328 if self.after:
328 329 self.after()
329 330 if self.opener.isfile(self.journal):
330 331 self.opener.unlink(self.journal)
331 332 if self.opener.isfile(self._backupjournal):
332 333 self.opener.unlink(self._backupjournal)
333 334 for _f, b in self._backupentries:
334 335 self.opener.unlink(b)
335 336 self._backupentries = []
336 337 self.journal = None
337 338 # run post close action
338 339 categories = sorted(self._postclosecallback)
339 340 for cat in categories:
340 341 self._postclosecallback[cat]()
341 342
342 343 @active
343 344 def abort(self):
344 345 '''abort the transaction (generally called on error, or when the
345 346 transaction is not explicitly committed before going out of
346 347 scope)'''
347 348 self._abort()
348 349
349 350 def _abort(self):
350 351 self.count = 0
351 352 self.usages = 0
352 353 self.file.close()
353 354 self._backupsfile.close()
354 355
355 356 if self.onabort is not None:
356 357 self.onabort()
357 358
358 359 try:
359 360 if not self.entries and not self._backupentries:
360 361 if self.journal:
361 362 self.opener.unlink(self.journal)
362 363 if self._backupjournal:
363 364 self.opener.unlink(self._backupjournal)
364 365 return
365 366
366 367 self.report(_("transaction abort!\n"))
367 368
368 369 try:
369 370 _playback(self.journal, self.report, self.opener,
370 371 self.entries, self._backupentries, False)
371 372 self.report(_("rollback completed\n"))
372 373 except Exception:
373 374 self.report(_("rollback failed - please run hg recover\n"))
374 375 finally:
375 376 self.journal = None
376 377
377 378
378 379 def rollback(opener, file, report):
379 380 """Rolls back the transaction contained in the given file
380 381
381 382 Reads the entries in the specified file, and the corresponding
382 383 '*.backupfiles' file, to recover from an incomplete transaction.
383 384
384 385 * `file`: a file containing a list of entries, specifying where
385 386 to truncate each file. The file should contain a list of
386 387 file\0offset pairs, delimited by newlines. The corresponding
387 388 '*.backupfiles' file should contain a list of file\0backupfile
388 389 pairs, delimited by \0.
389 390 """
390 391 entries = []
391 392 backupentries = []
392 393
393 394 fp = opener.open(file)
394 395 lines = fp.readlines()
395 396 fp.close()
396 397 for l in lines:
397 398 try:
398 399 f, o = l.split('\0')
399 400 entries.append((f, int(o), None))
400 401 except ValueError:
401 402 report(_("couldn't read journal entry %r!\n") % l)
402 403
403 404 backupjournal = "%s.backupfiles" % file
404 405 if opener.exists(backupjournal):
405 406 fp = opener.open(backupjournal)
406 407 lines = fp.readlines()
407 408 if lines:
408 409 ver = lines[0][:-1]
409 410 if ver == str(version):
410 411 for line in lines[1:]:
411 412 if line:
412 413 # Shave off the trailing newline
413 414 line = line[:-1]
414 415 f, b = line.split('\0')
415 416 backupentries.append((f, b))
416 417 else:
417 418 report(_("journal was created by a newer version of "
418 419 "Mercurial"))
419 420
420 421 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now