##// END OF EJS Templates
transaction: handle missing file in backupentries (instead of using entries)...
Pierre-Yves David -
r23278:aa194327 default
parent child Browse files
Show More
@@ -1,420 +1,428
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 version = 1
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 46 for f, b in backupentries:
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
49 try:
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
52 except IOError:
53 report(_("failed to recover %s\n") % f)
54 raise
47 if b:
48 filepath = opener.join(f)
49 backuppath = opener.join(b)
50 try:
51 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
53 except IOError:
54 report(_("failed to recover %s\n") % f)
55 raise
56 else:
57 try:
58 opener.unlink(f)
59 except (IOError, OSError), inst:
60 if inst.errno != errno.ENOENT:
61 raise
55 62
56 63 opener.unlink(journal)
57 64 backuppath = "%s.backupfiles" % journal
58 65 if opener.exists(backuppath):
59 66 opener.unlink(backuppath)
60 67 for f in backupfiles:
61 68 opener.unlink(f)
62 69
63 70 class transaction(object):
64 71 def __init__(self, report, opener, journal, after=None, createmode=None,
65 72 onclose=None, onabort=None):
66 73 """Begin a new transaction
67 74
68 75 Begins a new transaction that allows rolling back writes in the event of
69 76 an exception.
70 77
71 78 * `after`: called after the transaction has been committed
72 79 * `createmode`: the mode of the journal file that will be created
73 80 * `onclose`: called as the transaction is closing, but before it is
74 81 closed
75 82 * `onabort`: called as the transaction is aborting, but before any files
76 83 have been truncated
77 84 """
78 85 self.count = 1
79 86 self.usages = 1
80 87 self.report = report
81 88 self.opener = opener
82 89 self.after = after
83 90 self.onclose = onclose
84 91 self.onabort = onabort
85 92 self.entries = []
86 93 self.map = {}
87 94 # a list of ('path', 'backuppath') entries.
95 # if 'backuppath' is empty, no file existed at backup time
88 96 self._backupentries = []
89 97 self._backupmap = {}
90 98 self.journal = journal
91 99 self._queue = []
92 100 # a dict of arguments to be passed to hooks
93 101 self.hookargs = {}
94 102
95 103 self._backupjournal = "%s.backupfiles" % journal
96 104 self.file = opener.open(self.journal, "w")
97 105 self._backupsfile = opener.open(self._backupjournal, 'w')
98 106 self._backupsfile.write('%d\n' % version)
99 107 if createmode is not None:
100 108 opener.chmod(self.journal, createmode & 0666)
101 109 opener.chmod(self._backupjournal, createmode & 0666)
102 110
103 111 # hold file generations to be performed on commit
104 112 self._filegenerators = {}
105 113 # hold callbalk to write pending data for hooks
106 114 self._pendingcallback = {}
107 115 # True is any pending data have been written ever
108 116 self._anypending = False
109 117 # holds callback to call when writing the transaction
110 118 self._finalizecallback = {}
111 119 # hold callbalk for post transaction close
112 120 self._postclosecallback = {}
113 121
114 122 def __del__(self):
115 123 if self.journal:
116 124 self._abort()
117 125
118 126 @active
119 127 def startgroup(self):
120 128 """delay registration of file entry
121 129
122 130 This is used by strip to delay vision of strip offset. The transaction
123 131 sees either none or all of the strip actions to be done."""
124 132 self._queue.append([])
125 133
126 134 @active
127 135 def endgroup(self):
128 136 """apply delayed registration of file entry.
129 137
130 138 This is used by strip to delay vision of strip offset. The transaction
131 139 sees either none or all of the strip actions to be done."""
132 140 q = self._queue.pop()
133 141 for f, o, data in q:
134 142 self._addentry(f, o, data)
135 143
136 144 @active
137 145 def add(self, file, offset, data=None):
138 146 """record the state of an append-only file before update"""
139 147 if file in self.map or file in self._backupmap:
140 148 return
141 149 if self._queue:
142 150 self._queue[-1].append((file, offset, data))
143 151 return
144 152
145 153 self._addentry(file, offset, data)
146 154
147 155 def _addentry(self, file, offset, data):
148 156 """add a append-only entry to memory and on-disk state"""
149 157 if file in self.map or file in self._backupmap:
150 158 return
151 159 self.entries.append((file, offset, data))
152 160 self.map[file] = len(self.entries) - 1
153 161 # add enough data to the journal to do the truncate
154 162 self.file.write("%s\0%d\n" % (file, offset))
155 163 self.file.flush()
156 164
157 165 @active
158 166 def addbackup(self, file, hardlink=True, vfs=None):
159 167 """Adds a backup of the file to the transaction
160 168
161 169 Calling addbackup() creates a hardlink backup of the specified file
162 170 that is used to recover the file in the event of the transaction
163 171 aborting.
164 172
165 173 * `file`: the file path, relative to .hg/store
166 174 * `hardlink`: use a hardlink to quickly create the backup
167 175 """
168 176 if self._queue:
169 177 msg = 'cannot use transaction.addbackup inside "group"'
170 178 raise RuntimeError(msg)
171 179
172 180 if file in self.map or file in self._backupmap:
173 181 return
174 182 backupfile = "%s.backup.%s" % (self.journal, file)
175 183 if vfs is None:
176 184 vfs = self.opener
177 185 if vfs.exists(file):
178 186 filepath = vfs.join(file)
179 187 backuppath = self.opener.join(backupfile)
180 188 util.copyfiles(filepath, backuppath, hardlink=hardlink)
181 189 else:
182 self.add(file, 0)
183 return
190 backupfile = ''
184 191
185 192 self._backupentries.append((file, backupfile))
186 193 self._backupmap[file] = len(self._backupentries) - 1
187 194 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
188 195 self._backupsfile.flush()
189 196
190 197 @active
191 198 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
192 199 """add a function to generates some files at transaction commit
193 200
194 201 The `genfunc` argument is a function capable of generating proper
195 202 content of each entry in the `filename` tuple.
196 203
197 204 At transaction close time, `genfunc` will be called with one file
198 205 object argument per entries in `filenames`.
199 206
200 207 The transaction itself is responsible for the backup, creation and
201 208 final write of such file.
202 209
203 210 The `genid` argument is used to ensure the same set of file is only
204 211 generated once. Call to `addfilegenerator` for a `genid` already
205 212 present will overwrite the old entry.
206 213
207 214 The `order` argument may be used to control the order in which multiple
208 215 generator will be executed.
209 216 """
210 217 # For now, we are unable to do proper backup and restore of custom vfs
211 218 # but for bookmarks that are handled outside this mechanism.
212 219 assert vfs is None or filenames == ('bookmarks',)
213 220 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
214 221
215 222 def _generatefiles(self):
216 223 # write files registered for generation
217 224 for entry in sorted(self._filegenerators.values()):
218 225 order, filenames, genfunc, vfs = entry
219 226 if vfs is None:
220 227 vfs = self.opener
221 228 files = []
222 229 try:
223 230 for name in filenames:
224 231 # Some files are already backed up when creating the
225 232 # localrepo. Until this is properly fixed we disable the
226 233 # backup for them.
227 234 if name not in ('phaseroots', 'bookmarks'):
228 235 self.addbackup(name)
229 236 files.append(vfs(name, 'w', atomictemp=True))
230 237 genfunc(*files)
231 238 finally:
232 239 for f in files:
233 240 f.close()
234 241
235 242 @active
236 243 def find(self, file):
237 244 if file in self.map:
238 245 return self.entries[self.map[file]]
239 246 if file in self._backupmap:
240 247 return self._backupentries[self._backupmap[file]]
241 248 return None
242 249
243 250 @active
244 251 def replace(self, file, offset, data=None):
245 252 '''
246 253 replace can only replace already committed entries
247 254 that are not pending in the queue
248 255 '''
249 256
250 257 if file not in self.map:
251 258 raise KeyError(file)
252 259 index = self.map[file]
253 260 self.entries[index] = (file, offset, data)
254 261 self.file.write("%s\0%d\n" % (file, offset))
255 262 self.file.flush()
256 263
257 264 @active
258 265 def nest(self):
259 266 self.count += 1
260 267 self.usages += 1
261 268 return self
262 269
263 270 def release(self):
264 271 if self.count > 0:
265 272 self.usages -= 1
266 273 # if the transaction scopes are left without being closed, fail
267 274 if self.count > 0 and self.usages == 0:
268 275 self._abort()
269 276
270 277 def running(self):
271 278 return self.count > 0
272 279
273 280 def addpending(self, category, callback):
274 281 """add a callback to be called when the transaction is pending
275 282
276 283 Category is a unique identifier to allow overwriting an old callback
277 284 with a newer callback.
278 285 """
279 286 self._pendingcallback[category] = callback
280 287
281 288 @active
282 289 def writepending(self):
283 290 '''write pending file to temporary version
284 291
285 292 This is used to allow hooks to view a transaction before commit'''
286 293 categories = sorted(self._pendingcallback)
287 294 for cat in categories:
288 295 # remove callback since the data will have been flushed
289 296 any = self._pendingcallback.pop(cat)()
290 297 self._anypending = self._anypending or any
291 298 return self._anypending
292 299
293 300 @active
294 301 def addfinalize(self, category, callback):
295 302 """add a callback to be called when the transaction is closed
296 303
297 304 Category is a unique identifier to allow overwriting old callbacks with
298 305 newer callbacks.
299 306 """
300 307 self._finalizecallback[category] = callback
301 308
302 309 @active
303 310 def addpostclose(self, category, callback):
304 311 """add a callback to be called after the transaction is closed
305 312
306 313 Category is a unique identifier to allow overwriting an old callback
307 314 with a newer callback.
308 315 """
309 316 self._postclosecallback[category] = callback
310 317
311 318 @active
312 319 def close(self):
313 320 '''commit the transaction'''
314 321 if self.count == 1 and self.onclose is not None:
315 322 self._generatefiles()
316 323 categories = sorted(self._finalizecallback)
317 324 for cat in categories:
318 325 self._finalizecallback[cat]()
319 326 self.onclose()
320 327
321 328 self.count -= 1
322 329 if self.count != 0:
323 330 return
324 331 self.file.close()
325 332 self._backupsfile.close()
326 333 self.entries = []
327 334 if self.after:
328 335 self.after()
329 336 if self.opener.isfile(self.journal):
330 337 self.opener.unlink(self.journal)
331 338 if self.opener.isfile(self._backupjournal):
332 339 self.opener.unlink(self._backupjournal)
333 340 for _f, b in self._backupentries:
334 self.opener.unlink(b)
341 if b:
342 self.opener.unlink(b)
335 343 self._backupentries = []
336 344 self.journal = None
337 345 # run post close action
338 346 categories = sorted(self._postclosecallback)
339 347 for cat in categories:
340 348 self._postclosecallback[cat]()
341 349
342 350 @active
343 351 def abort(self):
344 352 '''abort the transaction (generally called on error, or when the
345 353 transaction is not explicitly committed before going out of
346 354 scope)'''
347 355 self._abort()
348 356
349 357 def _abort(self):
350 358 self.count = 0
351 359 self.usages = 0
352 360 self.file.close()
353 361 self._backupsfile.close()
354 362
355 363 if self.onabort is not None:
356 364 self.onabort()
357 365
358 366 try:
359 367 if not self.entries and not self._backupentries:
360 368 if self.journal:
361 369 self.opener.unlink(self.journal)
362 370 if self._backupjournal:
363 371 self.opener.unlink(self._backupjournal)
364 372 return
365 373
366 374 self.report(_("transaction abort!\n"))
367 375
368 376 try:
369 377 _playback(self.journal, self.report, self.opener,
370 378 self.entries, self._backupentries, False)
371 379 self.report(_("rollback completed\n"))
372 380 except Exception:
373 381 self.report(_("rollback failed - please run hg recover\n"))
374 382 finally:
375 383 self.journal = None
376 384
377 385
378 386 def rollback(opener, file, report):
379 387 """Rolls back the transaction contained in the given file
380 388
381 389 Reads the entries in the specified file, and the corresponding
382 390 '*.backupfiles' file, to recover from an incomplete transaction.
383 391
384 392 * `file`: a file containing a list of entries, specifying where
385 393 to truncate each file. The file should contain a list of
386 394 file\0offset pairs, delimited by newlines. The corresponding
387 395 '*.backupfiles' file should contain a list of file\0backupfile
388 396 pairs, delimited by \0.
389 397 """
390 398 entries = []
391 399 backupentries = []
392 400
393 401 fp = opener.open(file)
394 402 lines = fp.readlines()
395 403 fp.close()
396 404 for l in lines:
397 405 try:
398 406 f, o = l.split('\0')
399 407 entries.append((f, int(o), None))
400 408 except ValueError:
401 409 report(_("couldn't read journal entry %r!\n") % l)
402 410
403 411 backupjournal = "%s.backupfiles" % file
404 412 if opener.exists(backupjournal):
405 413 fp = opener.open(backupjournal)
406 414 lines = fp.readlines()
407 415 if lines:
408 416 ver = lines[0][:-1]
409 417 if ver == str(version):
410 418 for line in lines[1:]:
411 419 if line:
412 420 # Shave off the trailing newline
413 421 line = line[:-1]
414 422 f, b = line.split('\0')
415 423 backupentries.append((f, b))
416 424 else:
417 425 report(_("journal was created by a newer version of "
418 426 "Mercurial"))
419 427
420 428 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now