##// END OF EJS Templates
transaction: mark backup-related attributes private...
Pierre-Yves David -
r23249:84720eab default
parent child Browse files
Show More
@@ -1,422 +1,422
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 version = 1
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 46 for f, b in backupentries:
47 47 filepath = opener.join(f)
48 48 backuppath = opener.join(b)
49 49 try:
50 50 util.copyfile(backuppath, filepath)
51 51 backupfiles.append(b)
52 52 except IOError:
53 53 report(_("failed to recover %s\n") % f)
54 54 raise
55 55
56 56 opener.unlink(journal)
57 57 backuppath = "%s.backupfiles" % journal
58 58 if opener.exists(backuppath):
59 59 opener.unlink(backuppath)
60 60 for f in backupfiles:
61 61 opener.unlink(f)
62 62
63 63 class transaction(object):
64 64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 65 onclose=None, onabort=None):
66 66 """Begin a new transaction
67 67
68 68 Begins a new transaction that allows rolling back writes in the event of
69 69 an exception.
70 70
71 71 * `after`: called after the transaction has been committed
72 72 * `createmode`: the mode of the journal file that will be created
73 73 * `onclose`: called as the transaction is closing, but before it is
74 74 closed
75 75 * `onabort`: called as the transaction is aborting, but before any files
76 76 have been truncated
77 77 """
78 78 self.count = 1
79 79 self.usages = 1
80 80 self.report = report
81 81 self.opener = opener
82 82 self.after = after
83 83 self.onclose = onclose
84 84 self.onabort = onabort
85 85 self.entries = []
86 self.map = {}
86 87 # a list of ('path', 'backuppath') entries.
87 self.backupentries = []
88 self.map = {}
89 self.backupmap = {}
88 self._backupentries = []
89 self._backupmap = {}
90 90 self.journal = journal
91 91 self._queue = []
92 92 # a dict of arguments to be passed to hooks
93 93 self.hookargs = {}
94 94
95 self.backupjournal = "%s.backupfiles" % journal
95 self._backupjournal = "%s.backupfiles" % journal
96 96 self.file = opener.open(self.journal, "w")
97 self.backupsfile = opener.open(self.backupjournal, 'w')
98 self.backupsfile.write('%d\n' % version)
97 self._backupsfile = opener.open(self._backupjournal, 'w')
98 self._backupsfile.write('%d\n' % version)
99 99 if createmode is not None:
100 100 opener.chmod(self.journal, createmode & 0666)
101 opener.chmod(self.backupjournal, createmode & 0666)
101 opener.chmod(self._backupjournal, createmode & 0666)
102 102
103 103 # hold file generations to be performed on commit
104 104 self._filegenerators = {}
105 105 # hold callbalk to write pending data for hooks
106 106 self._pendingcallback = {}
107 107 # True is any pending data have been written ever
108 108 self._anypending = False
109 109 # holds callback to call when writing the transaction
110 110 self._finalizecallback = {}
111 111 # hold callbalk for post transaction close
112 112 self._postclosecallback = {}
113 113
114 114 def __del__(self):
115 115 if self.journal:
116 116 self._abort()
117 117
118 118 @active
119 119 def startgroup(self):
120 120 self._queue.append(([], []))
121 121
122 122 @active
123 123 def endgroup(self):
124 124 q = self._queue.pop()
125 125 self.entries.extend(q[0])
126 self.backupentries.extend(q[1])
126 self._backupentries.extend(q[1])
127 127
128 128 offsets = []
129 129 backups = []
130 130 for f, o, _data in q[0]:
131 131 offsets.append((f, o))
132 132
133 133 for f, b in q[1]:
134 134 backups.append((f, b))
135 135
136 136 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
137 137 self.file.write(d)
138 138 self.file.flush()
139 139
140 140 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
141 self.backupsfile.write(d)
142 self.backupsfile.flush()
141 self._backupsfile.write(d)
142 self._backupsfile.flush()
143 143
144 144 @active
145 145 def add(self, file, offset, data=None):
146 if file in self.map or file in self.backupmap:
146 if file in self.map or file in self._backupmap:
147 147 return
148 148 if self._queue:
149 149 self._queue[-1][0].append((file, offset, data))
150 150 return
151 151
152 152 self.entries.append((file, offset, data))
153 153 self.map[file] = len(self.entries) - 1
154 154 # add enough data to the journal to do the truncate
155 155 self.file.write("%s\0%d\n" % (file, offset))
156 156 self.file.flush()
157 157
158 158 @active
159 159 def addbackup(self, file, hardlink=True, vfs=None):
160 160 """Adds a backup of the file to the transaction
161 161
162 162 Calling addbackup() creates a hardlink backup of the specified file
163 163 that is used to recover the file in the event of the transaction
164 164 aborting.
165 165
166 166 * `file`: the file path, relative to .hg/store
167 167 * `hardlink`: use a hardlink to quickly create the backup
168 168 """
169 169
170 if file in self.map or file in self.backupmap:
170 if file in self.map or file in self._backupmap:
171 171 return
172 172 backupfile = "%s.backup.%s" % (self.journal, file)
173 173 if vfs is None:
174 174 vfs = self.opener
175 175 if vfs.exists(file):
176 176 filepath = vfs.join(file)
177 177 backuppath = self.opener.join(backupfile)
178 178 util.copyfiles(filepath, backuppath, hardlink=hardlink)
179 179 else:
180 180 self.add(file, 0)
181 181 return
182 182
183 183 if self._queue:
184 184 self._queue[-1][1].append((file, backupfile))
185 185 return
186 186
187 self.backupentries.append((file, backupfile))
188 self.backupmap[file] = len(self.backupentries) - 1
189 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
190 self.backupsfile.flush()
187 self._backupentries.append((file, backupfile))
188 self._backupmap[file] = len(self._backupentries) - 1
189 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
190 self._backupsfile.flush()
191 191
192 192 @active
193 193 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
194 194 """add a function to generates some files at transaction commit
195 195
196 196 The `genfunc` argument is a function capable of generating proper
197 197 content of each entry in the `filename` tuple.
198 198
199 199 At transaction close time, `genfunc` will be called with one file
200 200 object argument per entries in `filenames`.
201 201
202 202 The transaction itself is responsible for the backup, creation and
203 203 final write of such file.
204 204
205 205 The `genid` argument is used to ensure the same set of file is only
206 206 generated once. Call to `addfilegenerator` for a `genid` already
207 207 present will overwrite the old entry.
208 208
209 209 The `order` argument may be used to control the order in which multiple
210 210 generator will be executed.
211 211 """
212 212 # For now, we are unable to do proper backup and restore of custom vfs
213 213 # but for bookmarks that are handled outside this mechanism.
214 214 assert vfs is None or filenames == ('bookmarks',)
215 215 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
216 216
217 217 def _generatefiles(self):
218 218 # write files registered for generation
219 219 for entry in sorted(self._filegenerators.values()):
220 220 order, filenames, genfunc, vfs = entry
221 221 if vfs is None:
222 222 vfs = self.opener
223 223 files = []
224 224 try:
225 225 for name in filenames:
226 226 # Some files are already backed up when creating the
227 227 # localrepo. Until this is properly fixed we disable the
228 228 # backup for them.
229 229 if name not in ('phaseroots', 'bookmarks'):
230 230 self.addbackup(name)
231 231 files.append(vfs(name, 'w', atomictemp=True))
232 232 genfunc(*files)
233 233 finally:
234 234 for f in files:
235 235 f.close()
236 236
237 237 @active
238 238 def find(self, file):
239 239 if file in self.map:
240 240 return self.entries[self.map[file]]
241 if file in self.backupmap:
242 return self.backupentries[self.backupmap[file]]
241 if file in self._backupmap:
242 return self._backupentries[self._backupmap[file]]
243 243 return None
244 244
245 245 @active
246 246 def replace(self, file, offset, data=None):
247 247 '''
248 248 replace can only replace already committed entries
249 249 that are not pending in the queue
250 250 '''
251 251
252 252 if file not in self.map:
253 253 raise KeyError(file)
254 254 index = self.map[file]
255 255 self.entries[index] = (file, offset, data)
256 256 self.file.write("%s\0%d\n" % (file, offset))
257 257 self.file.flush()
258 258
259 259 @active
260 260 def nest(self):
261 261 self.count += 1
262 262 self.usages += 1
263 263 return self
264 264
265 265 def release(self):
266 266 if self.count > 0:
267 267 self.usages -= 1
268 268 # if the transaction scopes are left without being closed, fail
269 269 if self.count > 0 and self.usages == 0:
270 270 self._abort()
271 271
272 272 def running(self):
273 273 return self.count > 0
274 274
275 275 def addpending(self, category, callback):
276 276 """add a callback to be called when the transaction is pending
277 277
278 278 Category is a unique identifier to allow overwriting an old callback
279 279 with a newer callback.
280 280 """
281 281 self._pendingcallback[category] = callback
282 282
283 283 @active
284 284 def writepending(self):
285 285 '''write pending file to temporary version
286 286
287 287 This is used to allow hooks to view a transaction before commit'''
288 288 categories = sorted(self._pendingcallback)
289 289 for cat in categories:
290 290 # remove callback since the data will have been flushed
291 291 any = self._pendingcallback.pop(cat)()
292 292 self._anypending = self._anypending or any
293 293 return self._anypending
294 294
295 295 @active
296 296 def addfinalize(self, category, callback):
297 297 """add a callback to be called when the transaction is closed
298 298
299 299 Category is a unique identifier to allow overwriting old callbacks with
300 300 newer callbacks.
301 301 """
302 302 self._finalizecallback[category] = callback
303 303
304 304 @active
305 305 def addpostclose(self, category, callback):
306 306 """add a callback to be called after the transaction is closed
307 307
308 308 Category is a unique identifier to allow overwriting an old callback
309 309 with a newer callback.
310 310 """
311 311 self._postclosecallback[category] = callback
312 312
313 313 @active
314 314 def close(self):
315 315 '''commit the transaction'''
316 316 if self.count == 1 and self.onclose is not None:
317 317 self._generatefiles()
318 318 categories = sorted(self._finalizecallback)
319 319 for cat in categories:
320 320 self._finalizecallback[cat]()
321 321 self.onclose()
322 322
323 323 self.count -= 1
324 324 if self.count != 0:
325 325 return
326 326 self.file.close()
327 self.backupsfile.close()
327 self._backupsfile.close()
328 328 self.entries = []
329 329 if self.after:
330 330 self.after()
331 331 if self.opener.isfile(self.journal):
332 332 self.opener.unlink(self.journal)
333 if self.opener.isfile(self.backupjournal):
334 self.opener.unlink(self.backupjournal)
335 for _f, b in self.backupentries:
333 if self.opener.isfile(self._backupjournal):
334 self.opener.unlink(self._backupjournal)
335 for _f, b in self._backupentries:
336 336 self.opener.unlink(b)
337 self.backupentries = []
337 self._backupentries = []
338 338 self.journal = None
339 339 # run post close action
340 340 categories = sorted(self._postclosecallback)
341 341 for cat in categories:
342 342 self._postclosecallback[cat]()
343 343
344 344 @active
345 345 def abort(self):
346 346 '''abort the transaction (generally called on error, or when the
347 347 transaction is not explicitly committed before going out of
348 348 scope)'''
349 349 self._abort()
350 350
351 351 def _abort(self):
352 352 self.count = 0
353 353 self.usages = 0
354 354 self.file.close()
355 self.backupsfile.close()
355 self._backupsfile.close()
356 356
357 357 if self.onabort is not None:
358 358 self.onabort()
359 359
360 360 try:
361 if not self.entries and not self.backupentries:
361 if not self.entries and not self._backupentries:
362 362 if self.journal:
363 363 self.opener.unlink(self.journal)
364 if self.backupjournal:
365 self.opener.unlink(self.backupjournal)
364 if self._backupjournal:
365 self.opener.unlink(self._backupjournal)
366 366 return
367 367
368 368 self.report(_("transaction abort!\n"))
369 369
370 370 try:
371 371 _playback(self.journal, self.report, self.opener,
372 self.entries, self.backupentries, False)
372 self.entries, self._backupentries, False)
373 373 self.report(_("rollback completed\n"))
374 374 except Exception:
375 375 self.report(_("rollback failed - please run hg recover\n"))
376 376 finally:
377 377 self.journal = None
378 378
379 379
380 380 def rollback(opener, file, report):
381 381 """Rolls back the transaction contained in the given file
382 382
383 383 Reads the entries in the specified file, and the corresponding
384 384 '*.backupfiles' file, to recover from an incomplete transaction.
385 385
386 386 * `file`: a file containing a list of entries, specifying where
387 387 to truncate each file. The file should contain a list of
388 388 file\0offset pairs, delimited by newlines. The corresponding
389 389 '*.backupfiles' file should contain a list of file\0backupfile
390 390 pairs, delimited by \0.
391 391 """
392 392 entries = []
393 393 backupentries = []
394 394
395 395 fp = opener.open(file)
396 396 lines = fp.readlines()
397 397 fp.close()
398 398 for l in lines:
399 399 try:
400 400 f, o = l.split('\0')
401 401 entries.append((f, int(o), None))
402 402 except ValueError:
403 403 report(_("couldn't read journal entry %r!\n") % l)
404 404
405 405 backupjournal = "%s.backupfiles" % file
406 406 if opener.exists(backupjournal):
407 407 fp = opener.open(backupjournal)
408 408 lines = fp.readlines()
409 409 if lines:
410 410 ver = lines[0][:-1]
411 411 if ver == str(version):
412 412 for line in lines[1:]:
413 413 if line:
414 414 # Shave off the trailing newline
415 415 line = line[:-1]
416 416 f, b = line.split('\0')
417 417 backupentries.append((f, b))
418 418 else:
419 419 report(_("journal was created by a newer version of "
420 420 "Mercurial"))
421 421
422 422 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now