##// END OF EJS Templates
transaction: use 'location' instead of 'vfs' objects for file generation...
Pierre-Yves David -
r23317:197e17be default
parent child Browse files
Show More
@@ -1,442 +1,442
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex, bin
10 10 from mercurial import encoding, error, util, obsolete, lock as lockmod
11 11 import errno
12 12
13 13 class bmstore(dict):
14 14 """Storage for bookmarks.
15 15
16 16 This object should do all bookmark reads and writes, so that it's
17 17 fairly simple to replace the storage underlying bookmarks without
18 18 having to clone the logic surrounding bookmarks.
19 19
20 20 This particular bmstore implementation stores bookmarks as
21 21 {hash}\s{name}\n (the same format as localtags) in
22 22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
23 23
24 24 This class does NOT handle the "current" bookmark state at this
25 25 time.
26 26 """
27 27
28 28 def __init__(self, repo):
29 29 dict.__init__(self)
30 30 self._repo = repo
31 31 try:
32 32 for line in repo.vfs('bookmarks'):
33 33 line = line.strip()
34 34 if not line:
35 35 continue
36 36 if ' ' not in line:
37 37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
38 38 % line)
39 39 continue
40 40 sha, refspec = line.split(' ', 1)
41 41 refspec = encoding.tolocal(refspec)
42 42 try:
43 43 self[refspec] = repo.changelog.lookup(sha)
44 44 except LookupError:
45 45 pass
46 46 except IOError, inst:
47 47 if inst.errno != errno.ENOENT:
48 48 raise
49 49
50 50 def recordchange(self, tr):
51 51 """record that bookmarks have been changed in a transaction
52 52
53 53 The transaction is then responsible for updating the file content."""
54 54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
55 vfs=self._repo.vfs)
55 location='plain')
56 56 tr.hookargs['bookmark_moved'] = '1'
57 57
58 58 def write(self):
59 59 '''Write bookmarks
60 60
61 61 Write the given bookmark => hash dictionary to the .hg/bookmarks file
62 62 in a format equal to those of localtags.
63 63
64 64 We also store a backup of the previous state in undo.bookmarks that
65 65 can be copied back on rollback.
66 66 '''
67 67 repo = self._repo
68 68 if repo._bookmarkcurrent not in self:
69 69 unsetcurrent(repo)
70 70
71 71 wlock = repo.wlock()
72 72 try:
73 73
74 74 file = repo.vfs('bookmarks', 'w', atomictemp=True)
75 75 self._write(file)
76 76 file.close()
77 77
78 78 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
79 79 try:
80 80 repo.svfs.utime('00changelog.i', None)
81 81 except OSError:
82 82 pass
83 83
84 84 finally:
85 85 wlock.release()
86 86
87 87 def _write(self, fp):
88 88 for name, node in self.iteritems():
89 89 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
90 90
91 91 def readcurrent(repo):
92 92 '''Get the current bookmark
93 93
94 94 If we use gittish branches we have a current bookmark that
95 95 we are on. This function returns the name of the bookmark. It
96 96 is stored in .hg/bookmarks.current
97 97 '''
98 98 mark = None
99 99 try:
100 100 file = repo.opener('bookmarks.current')
101 101 except IOError, inst:
102 102 if inst.errno != errno.ENOENT:
103 103 raise
104 104 return None
105 105 try:
106 106 # No readline() in osutil.posixfile, reading everything is cheap
107 107 mark = encoding.tolocal((file.readlines() or [''])[0])
108 108 if mark == '' or mark not in repo._bookmarks:
109 109 mark = None
110 110 finally:
111 111 file.close()
112 112 return mark
113 113
114 114 def setcurrent(repo, mark):
115 115 '''Set the name of the bookmark that we are currently on
116 116
117 117 Set the name of the bookmark that we are on (hg update <bookmark>).
118 118 The name is recorded in .hg/bookmarks.current
119 119 '''
120 120 if mark not in repo._bookmarks:
121 121 raise AssertionError('bookmark %s does not exist!' % mark)
122 122
123 123 current = repo._bookmarkcurrent
124 124 if current == mark:
125 125 return
126 126
127 127 wlock = repo.wlock()
128 128 try:
129 129 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
130 130 file.write(encoding.fromlocal(mark))
131 131 file.close()
132 132 finally:
133 133 wlock.release()
134 134 repo._bookmarkcurrent = mark
135 135
136 136 def unsetcurrent(repo):
137 137 wlock = repo.wlock()
138 138 try:
139 139 try:
140 140 repo.vfs.unlink('bookmarks.current')
141 141 repo._bookmarkcurrent = None
142 142 except OSError, inst:
143 143 if inst.errno != errno.ENOENT:
144 144 raise
145 145 finally:
146 146 wlock.release()
147 147
148 148 def iscurrent(repo, mark=None, parents=None):
149 149 '''Tell whether the current bookmark is also active
150 150
151 151 I.e., the bookmark listed in .hg/bookmarks.current also points to a
152 152 parent of the working directory.
153 153 '''
154 154 if not mark:
155 155 mark = repo._bookmarkcurrent
156 156 if not parents:
157 157 parents = [p.node() for p in repo[None].parents()]
158 158 marks = repo._bookmarks
159 159 return (mark in marks and marks[mark] in parents)
160 160
161 161 def updatecurrentbookmark(repo, oldnode, curbranch):
162 162 try:
163 163 return update(repo, oldnode, repo.branchtip(curbranch))
164 164 except error.RepoLookupError:
165 165 if curbranch == "default": # no default branch!
166 166 return update(repo, oldnode, repo.lookup("tip"))
167 167 else:
168 168 raise util.Abort(_("branch %s not found") % curbranch)
169 169
170 170 def deletedivergent(repo, deletefrom, bm):
171 171 '''Delete divergent versions of bm on nodes in deletefrom.
172 172
173 173 Return True if at least one bookmark was deleted, False otherwise.'''
174 174 deleted = False
175 175 marks = repo._bookmarks
176 176 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
177 177 for mark in divergent:
178 178 if mark == '@' or '@' not in mark:
179 179 # can't be divergent by definition
180 180 continue
181 181 if mark and marks[mark] in deletefrom:
182 182 if mark != bm:
183 183 del marks[mark]
184 184 deleted = True
185 185 return deleted
186 186
187 187 def calculateupdate(ui, repo, checkout):
188 188 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
189 189 check out and where to move the active bookmark from, if needed.'''
190 190 movemarkfrom = None
191 191 if checkout is None:
192 192 curmark = repo._bookmarkcurrent
193 193 if iscurrent(repo):
194 194 movemarkfrom = repo['.'].node()
195 195 elif curmark:
196 196 ui.status(_("updating to active bookmark %s\n") % curmark)
197 197 checkout = curmark
198 198 return (checkout, movemarkfrom)
199 199
200 200 def update(repo, parents, node):
201 201 deletefrom = parents
202 202 marks = repo._bookmarks
203 203 update = False
204 204 cur = repo._bookmarkcurrent
205 205 if not cur:
206 206 return False
207 207
208 208 if marks[cur] in parents:
209 209 new = repo[node]
210 210 divs = [repo[b] for b in marks
211 211 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
212 212 anc = repo.changelog.ancestors([new.rev()])
213 213 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
214 214 if validdest(repo, repo[marks[cur]], new):
215 215 marks[cur] = new.node()
216 216 update = True
217 217
218 218 if deletedivergent(repo, deletefrom, cur):
219 219 update = True
220 220
221 221 if update:
222 222 marks.write()
223 223 return update
224 224
225 225 def listbookmarks(repo):
226 226 # We may try to list bookmarks on a repo type that does not
227 227 # support it (e.g., statichttprepository).
228 228 marks = getattr(repo, '_bookmarks', {})
229 229
230 230 d = {}
231 231 hasnode = repo.changelog.hasnode
232 232 for k, v in marks.iteritems():
233 233 # don't expose local divergent bookmarks
234 234 if hasnode(v) and ('@' not in k or k.endswith('@')):
235 235 d[k] = hex(v)
236 236 return d
237 237
238 238 def pushbookmark(repo, key, old, new):
239 239 w = l = tr = None
240 240 try:
241 241 w = repo.wlock()
242 242 l = repo.lock()
243 243 tr = repo.transaction('bookmarks')
244 244 marks = repo._bookmarks
245 245 existing = hex(marks.get(key, ''))
246 246 if existing != old and existing != new:
247 247 return False
248 248 if new == '':
249 249 del marks[key]
250 250 else:
251 251 if new not in repo:
252 252 return False
253 253 marks[key] = repo[new].node()
254 254 marks.recordchange(tr)
255 255 tr.close()
256 256 return True
257 257 finally:
258 258 lockmod.release(tr, l, w)
259 259
260 260 def compare(repo, srcmarks, dstmarks,
261 261 srchex=None, dsthex=None, targets=None):
262 262 '''Compare bookmarks between srcmarks and dstmarks
263 263
264 264 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
265 265 differ, invalid)", each are list of bookmarks below:
266 266
267 267 :addsrc: added on src side (removed on dst side, perhaps)
268 268 :adddst: added on dst side (removed on src side, perhaps)
269 269 :advsrc: advanced on src side
270 270 :advdst: advanced on dst side
271 271 :diverge: diverge
272 272 :differ: changed, but changeset referred on src is unknown on dst
273 273 :invalid: unknown on both side
274 274 :same: same on both side
275 275
276 276 Each elements of lists in result tuple is tuple "(bookmark name,
277 277 changeset ID on source side, changeset ID on destination
278 278 side)". Each changeset IDs are 40 hexadecimal digit string or
279 279 None.
280 280
281 281 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
282 282 "invalid" list may be unknown for repo.
283 283
284 284 This function expects that "srcmarks" and "dstmarks" return
285 285 changeset ID in 40 hexadecimal digit string for specified
286 286 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
287 287 binary value), "srchex" or "dsthex" should be specified to convert
288 288 into such form.
289 289
290 290 If "targets" is specified, only bookmarks listed in it are
291 291 examined.
292 292 '''
293 293 if not srchex:
294 294 srchex = lambda x: x
295 295 if not dsthex:
296 296 dsthex = lambda x: x
297 297
298 298 if targets:
299 299 bset = set(targets)
300 300 else:
301 301 srcmarkset = set(srcmarks)
302 302 dstmarkset = set(dstmarks)
303 303 bset = srcmarkset | dstmarkset
304 304
305 305 results = ([], [], [], [], [], [], [], [])
306 306 addsrc = results[0].append
307 307 adddst = results[1].append
308 308 advsrc = results[2].append
309 309 advdst = results[3].append
310 310 diverge = results[4].append
311 311 differ = results[5].append
312 312 invalid = results[6].append
313 313 same = results[7].append
314 314
315 315 for b in sorted(bset):
316 316 if b not in srcmarks:
317 317 if b in dstmarks:
318 318 adddst((b, None, dsthex(dstmarks[b])))
319 319 else:
320 320 invalid((b, None, None))
321 321 elif b not in dstmarks:
322 322 addsrc((b, srchex(srcmarks[b]), None))
323 323 else:
324 324 scid = srchex(srcmarks[b])
325 325 dcid = dsthex(dstmarks[b])
326 326 if scid == dcid:
327 327 same((b, scid, dcid))
328 328 elif scid in repo and dcid in repo:
329 329 sctx = repo[scid]
330 330 dctx = repo[dcid]
331 331 if sctx.rev() < dctx.rev():
332 332 if validdest(repo, sctx, dctx):
333 333 advdst((b, scid, dcid))
334 334 else:
335 335 diverge((b, scid, dcid))
336 336 else:
337 337 if validdest(repo, dctx, sctx):
338 338 advsrc((b, scid, dcid))
339 339 else:
340 340 diverge((b, scid, dcid))
341 341 else:
342 342 # it is too expensive to examine in detail, in this case
343 343 differ((b, scid, dcid))
344 344
345 345 return results
346 346
347 347 def _diverge(ui, b, path, localmarks):
348 348 if b == '@':
349 349 b = ''
350 350 # find a unique @ suffix
351 351 for x in range(1, 100):
352 352 n = '%s@%d' % (b, x)
353 353 if n not in localmarks:
354 354 break
355 355 # try to use an @pathalias suffix
356 356 # if an @pathalias already exists, we overwrite (update) it
357 357 if path.startswith("file:"):
358 358 path = util.url(path).path
359 359 for p, u in ui.configitems("paths"):
360 360 if u.startswith("file:"):
361 361 u = util.url(u).path
362 362 if path == u:
363 363 n = '%s@%s' % (b, p)
364 364 return n
365 365
366 366 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
367 367 ui.debug("checking for updated bookmarks\n")
368 368 localmarks = repo._bookmarks
369 369 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
370 370 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
371 371
372 372 status = ui.status
373 373 warn = ui.warn
374 374 if ui.configbool('ui', 'quietbookmarkmove', False):
375 375 status = warn = ui.debug
376 376
377 377 explicit = set(explicit)
378 378 changed = []
379 379 for b, scid, dcid in addsrc:
380 380 if scid in repo: # add remote bookmarks for changes we already have
381 381 changed.append((b, bin(scid), status,
382 382 _("adding remote bookmark %s\n") % (b)))
383 383 for b, scid, dcid in advsrc:
384 384 changed.append((b, bin(scid), status,
385 385 _("updating bookmark %s\n") % (b)))
386 386 # remove normal movement from explicit set
387 387 explicit.difference_update(d[0] for d in changed)
388 388
389 389 for b, scid, dcid in diverge:
390 390 if b in explicit:
391 391 explicit.discard(b)
392 392 changed.append((b, bin(scid), status,
393 393 _("importing bookmark %s\n") % (b)))
394 394 else:
395 395 db = _diverge(ui, b, path, localmarks)
396 396 changed.append((db, bin(scid), warn,
397 397 _("divergent bookmark %s stored as %s\n")
398 398 % (b, db)))
399 399 for b, scid, dcid in adddst + advdst:
400 400 if b in explicit:
401 401 explicit.discard(b)
402 402 changed.append((b, bin(scid), status,
403 403 _("importing bookmark %s\n") % (b)))
404 404
405 405 if changed:
406 406 tr = trfunc()
407 407 for b, node, writer, msg in sorted(changed):
408 408 localmarks[b] = node
409 409 writer(msg)
410 410 localmarks.recordchange(tr)
411 411
412 412 def diff(ui, dst, src):
413 413 ui.status(_("searching for changed bookmarks\n"))
414 414
415 415 smarks = src.listkeys('bookmarks')
416 416 dmarks = dst.listkeys('bookmarks')
417 417
418 418 diff = sorted(set(smarks) - set(dmarks))
419 419 for k in diff:
420 420 mark = ui.debugflag and smarks[k] or smarks[k][:12]
421 421 ui.write(" %-25s %s\n" % (k, mark))
422 422
423 423 if len(diff) <= 0:
424 424 ui.status(_("no changed bookmarks found\n"))
425 425 return 1
426 426 return 0
427 427
428 428 def validdest(repo, old, new):
429 429 """Is the new bookmark destination a valid update from the old one"""
430 430 repo = repo.unfiltered()
431 431 if old == new:
432 432 # Old == new -> nothing to update.
433 433 return False
434 434 elif not old:
435 435 # old is nullrev, anything is valid.
436 436 # (new != nullrev has been excluded by the previous check)
437 437 return True
438 438 elif repo.obsstore:
439 439 return new.node() in obsolete.foreground(repo, [old.node()])
440 440 else:
441 441 # still an independent clause as it is lazyer (and therefore faster)
442 442 return old.descendant(new)
@@ -1,502 +1,505
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import os
16 16 import errno
17 17 import error, util
18 18
19 19 version = 2
20 20
21 21 def active(func):
22 22 def _active(self, *args, **kwds):
23 23 if self.count == 0:
24 24 raise error.Abort(_(
25 25 'cannot use transaction when it is already committed/aborted'))
26 26 return func(self, *args, **kwds)
27 27 return _active
28 28
29 29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 30 unlink=True):
31 31 for f, o, _ignore in entries:
32 32 if o or not unlink:
33 33 try:
34 34 fp = opener(f, 'a')
35 35 fp.truncate(o)
36 36 fp.close()
37 37 except IOError:
38 38 report(_("failed to truncate %s\n") % f)
39 39 raise
40 40 else:
41 41 try:
42 42 opener.unlink(f)
43 43 except (IOError, OSError), inst:
44 44 if inst.errno != errno.ENOENT:
45 45 raise
46 46
47 47 backupfiles = []
48 48 for l, f, b, c in backupentries:
49 49 if l not in vfsmap and c:
50 50 report("couldn't handle %s: unknown cache location %s\n"
51 51 % (b, l))
52 52 vfs = vfsmap[l]
53 53 try:
54 54 if f and b:
55 55 filepath = vfs.join(f)
56 56 backuppath = vfs.join(b)
57 57 try:
58 58 util.copyfile(backuppath, filepath)
59 59 backupfiles.append(b)
60 60 except IOError:
61 61 report(_("failed to recover %s\n") % f)
62 62 else:
63 63 target = f or b
64 64 try:
65 65 vfs.unlink(target)
66 66 except (IOError, OSError), inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 except (IOError, OSError, util.Abort), inst:
70 70 if not c:
71 71 raise
72 72
73 73 opener.unlink(journal)
74 74 backuppath = "%s.backupfiles" % journal
75 75 if opener.exists(backuppath):
76 76 opener.unlink(backuppath)
77 77 try:
78 78 for f in backupfiles:
79 79 if opener.exists(f):
80 80 opener.unlink(f)
81 81 except (IOError, OSError, util.Abort), inst:
82 82 # only pure backup file remains, it is sage to ignore any error
83 83 pass
84 84
85 85 class transaction(object):
86 86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 87 createmode=None, onclose=None, onabort=None):
88 88 """Begin a new transaction
89 89
90 90 Begins a new transaction that allows rolling back writes in the event of
91 91 an exception.
92 92
93 93 * `after`: called after the transaction has been committed
94 94 * `createmode`: the mode of the journal file that will be created
95 95 * `onclose`: called as the transaction is closing, but before it is
96 96 closed
97 97 * `onabort`: called as the transaction is aborting, but before any files
98 98 have been truncated
99 99 """
100 100 self.count = 1
101 101 self.usages = 1
102 102 self.report = report
103 103 # a vfs to the store content
104 104 self.opener = opener
105 105 # a map to access file in various {location -> vfs}
106 106 vfsmap = vfsmap.copy()
107 107 vfsmap[''] = opener # set default value
108 108 self._vfsmap = vfsmap
109 109 self.after = after
110 110 self.onclose = onclose
111 111 self.onabort = onabort
112 112 self.entries = []
113 113 self.map = {}
114 114 self.journal = journal
115 115 self._queue = []
116 116 # a dict of arguments to be passed to hooks
117 117 self.hookargs = {}
118 118 self.file = opener.open(self.journal, "w")
119 119
120 120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 121 # - if 'backuppath' is empty, no file existed at backup time
122 122 # - if 'path' is empty, this is a temporary transaction file
123 123 # - if 'location' is not empty, the path is outside main opener reach.
124 124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 125 # (cache is currently unused)
126 126 self._backupentries = []
127 127 self._backupmap = {}
128 128 self._backupjournal = "%s.backupfiles" % journal
129 129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 130 self._backupsfile.write('%d\n' % version)
131 131
132 132 if createmode is not None:
133 133 opener.chmod(self.journal, createmode & 0666)
134 134 opener.chmod(self._backupjournal, createmode & 0666)
135 135
136 136 # hold file generations to be performed on commit
137 137 self._filegenerators = {}
138 138 # hold callbalk to write pending data for hooks
139 139 self._pendingcallback = {}
140 140 # True is any pending data have been written ever
141 141 self._anypending = False
142 142 # holds callback to call when writing the transaction
143 143 self._finalizecallback = {}
144 144 # hold callbalk for post transaction close
145 145 self._postclosecallback = {}
146 146
147 147 def __del__(self):
148 148 if self.journal:
149 149 self._abort()
150 150
151 151 @active
152 152 def startgroup(self):
153 153 """delay registration of file entry
154 154
155 155 This is used by strip to delay vision of strip offset. The transaction
156 156 sees either none or all of the strip actions to be done."""
157 157 self._queue.append([])
158 158
159 159 @active
160 160 def endgroup(self):
161 161 """apply delayed registration of file entry.
162 162
163 163 This is used by strip to delay vision of strip offset. The transaction
164 164 sees either none or all of the strip actions to be done."""
165 165 q = self._queue.pop()
166 166 for f, o, data in q:
167 167 self._addentry(f, o, data)
168 168
169 169 @active
170 170 def add(self, file, offset, data=None):
171 171 """record the state of an append-only file before update"""
172 172 if file in self.map or file in self._backupmap:
173 173 return
174 174 if self._queue:
175 175 self._queue[-1].append((file, offset, data))
176 176 return
177 177
178 178 self._addentry(file, offset, data)
179 179
180 180 def _addentry(self, file, offset, data):
181 181 """add a append-only entry to memory and on-disk state"""
182 182 if file in self.map or file in self._backupmap:
183 183 return
184 184 self.entries.append((file, offset, data))
185 185 self.map[file] = len(self.entries) - 1
186 186 # add enough data to the journal to do the truncate
187 187 self.file.write("%s\0%d\n" % (file, offset))
188 188 self.file.flush()
189 189
190 190 @active
191 191 def addbackup(self, file, hardlink=True, location=''):
192 192 """Adds a backup of the file to the transaction
193 193
194 194 Calling addbackup() creates a hardlink backup of the specified file
195 195 that is used to recover the file in the event of the transaction
196 196 aborting.
197 197
198 198 * `file`: the file path, relative to .hg/store
199 199 * `hardlink`: use a hardlink to quickly create the backup
200 200 """
201 201 if self._queue:
202 202 msg = 'cannot use transaction.addbackup inside "group"'
203 203 raise RuntimeError(msg)
204 204
205 205 if file in self.map or file in self._backupmap:
206 206 return
207 207 dirname, filename = os.path.split(file)
208 208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 209 backupfile = os.path.join(dirname, backupfilename)
210 210 vfs = self._vfsmap[location]
211 211 if vfs.exists(file):
212 212 filepath = vfs.join(file)
213 213 backuppath = vfs.join(backupfile)
214 214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 215 else:
216 216 backupfile = ''
217 217
218 218 self._addbackupentry((location, file, backupfile, False))
219 219
220 220 def _addbackupentry(self, entry):
221 221 """register a new backup entry and write it to disk"""
222 222 self._backupentries.append(entry)
223 223 self._backupmap[file] = len(self._backupentries) - 1
224 224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 225 self._backupsfile.flush()
226 226
227 227 @active
228 228 def registertmp(self, tmpfile):
229 229 """register a temporary transaction file
230 230
231 231 Such file will be delete when the transaction exit (on both failure and
232 232 success).
233 233 """
234 234 self._addbackupentry(('', '', tmpfile, False))
235 235
236 236 @active
237 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 location=''):
238 239 """add a function to generates some files at transaction commit
239 240
240 241 The `genfunc` argument is a function capable of generating proper
241 242 content of each entry in the `filename` tuple.
242 243
243 244 At transaction close time, `genfunc` will be called with one file
244 245 object argument per entries in `filenames`.
245 246
246 247 The transaction itself is responsible for the backup, creation and
247 248 final write of such file.
248 249
249 250 The `genid` argument is used to ensure the same set of file is only
250 251 generated once. Call to `addfilegenerator` for a `genid` already
251 252 present will overwrite the old entry.
252 253
253 254 The `order` argument may be used to control the order in which multiple
254 255 generator will be executed.
256
257 The `location` arguments may be used to indicate the files are located
258 outside of the the standard directory for transaction. It should match
259 one of the key of the `transaction.vfsmap` dictionnary.
255 260 """
256 261 # For now, we are unable to do proper backup and restore of custom vfs
257 262 # but for bookmarks that are handled outside this mechanism.
258 assert vfs is None or filenames == ('bookmarks',)
259 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
260 264
261 265 def _generatefiles(self):
262 266 # write files registered for generation
263 267 for entry in sorted(self._filegenerators.values()):
264 order, filenames, genfunc, vfs = entry
265 if vfs is None:
266 vfs = self.opener
268 order, filenames, genfunc, location = entry
269 vfs = self._vfsmap[location]
267 270 files = []
268 271 try:
269 272 for name in filenames:
270 273 # Some files are already backed up when creating the
271 274 # localrepo. Until this is properly fixed we disable the
272 275 # backup for them.
273 276 if name not in ('phaseroots', 'bookmarks'):
274 self.addbackup(name)
277 self.addbackup(name, location=location)
275 278 files.append(vfs(name, 'w', atomictemp=True))
276 279 genfunc(*files)
277 280 finally:
278 281 for f in files:
279 282 f.close()
280 283
281 284 @active
282 285 def find(self, file):
283 286 if file in self.map:
284 287 return self.entries[self.map[file]]
285 288 if file in self._backupmap:
286 289 return self._backupentries[self._backupmap[file]]
287 290 return None
288 291
289 292 @active
290 293 def replace(self, file, offset, data=None):
291 294 '''
292 295 replace can only replace already committed entries
293 296 that are not pending in the queue
294 297 '''
295 298
296 299 if file not in self.map:
297 300 raise KeyError(file)
298 301 index = self.map[file]
299 302 self.entries[index] = (file, offset, data)
300 303 self.file.write("%s\0%d\n" % (file, offset))
301 304 self.file.flush()
302 305
303 306 @active
304 307 def nest(self):
305 308 self.count += 1
306 309 self.usages += 1
307 310 return self
308 311
309 312 def release(self):
310 313 if self.count > 0:
311 314 self.usages -= 1
312 315 # if the transaction scopes are left without being closed, fail
313 316 if self.count > 0 and self.usages == 0:
314 317 self._abort()
315 318
316 319 def running(self):
317 320 return self.count > 0
318 321
319 322 def addpending(self, category, callback):
320 323 """add a callback to be called when the transaction is pending
321 324
322 325 The transaction will be given as callback's first argument.
323 326
324 327 Category is a unique identifier to allow overwriting an old callback
325 328 with a newer callback.
326 329 """
327 330 self._pendingcallback[category] = callback
328 331
329 332 @active
330 333 def writepending(self):
331 334 '''write pending file to temporary version
332 335
333 336 This is used to allow hooks to view a transaction before commit'''
334 337 categories = sorted(self._pendingcallback)
335 338 for cat in categories:
336 339 # remove callback since the data will have been flushed
337 340 any = self._pendingcallback.pop(cat)(self)
338 341 self._anypending = self._anypending or any
339 342 return self._anypending
340 343
341 344 @active
342 345 def addfinalize(self, category, callback):
343 346 """add a callback to be called when the transaction is closed
344 347
345 348 The transaction will be given as callback's first argument.
346 349
347 350 Category is a unique identifier to allow overwriting old callbacks with
348 351 newer callbacks.
349 352 """
350 353 self._finalizecallback[category] = callback
351 354
352 355 @active
353 356 def addpostclose(self, category, callback):
354 357 """add a callback to be called after the transaction is closed
355 358
356 359 The transaction will be given as callback's first argument.
357 360
358 361 Category is a unique identifier to allow overwriting an old callback
359 362 with a newer callback.
360 363 """
361 364 self._postclosecallback[category] = callback
362 365
363 366 @active
364 367 def close(self):
365 368 '''commit the transaction'''
366 369 if self.count == 1:
367 370 self._generatefiles()
368 371 categories = sorted(self._finalizecallback)
369 372 for cat in categories:
370 373 self._finalizecallback[cat](self)
371 374 if self.onclose is not None:
372 375 self.onclose()
373 376
374 377 self.count -= 1
375 378 if self.count != 0:
376 379 return
377 380 self.file.close()
378 381 self._backupsfile.close()
379 382 # cleanup temporary files
380 383 for l, f, b, c in self._backupentries:
381 384 if l not in self._vfsmap and c:
382 385 self.report("couldn't remote %s: unknown cache location %s\n"
383 386 % (b, l))
384 387 continue
385 388 vfs = self._vfsmap[l]
386 389 if not f and b and vfs.exists(b):
387 390 try:
388 391 vfs.unlink(b)
389 392 except (IOError, OSError, util.Abort), inst:
390 393 if not c:
391 394 raise
392 395 # Abort may be raise by read only opener
393 396 self.report("couldn't remote %s: %s\n"
394 397 % (vfs.join(b), inst))
395 398 self.entries = []
396 399 if self.after:
397 400 self.after()
398 401 if self.opener.isfile(self.journal):
399 402 self.opener.unlink(self.journal)
400 403 if self.opener.isfile(self._backupjournal):
401 404 self.opener.unlink(self._backupjournal)
402 405 for _l, _f, b, c in self._backupentries:
403 406 if l not in self._vfsmap and c:
404 407 self.report("couldn't remote %s: unknown cache location"
405 408 "%s\n" % (b, l))
406 409 continue
407 410 vfs = self._vfsmap[l]
408 411 if b and vfs.exists(b):
409 412 try:
410 413 vfs.unlink(b)
411 414 except (IOError, OSError, util.Abort), inst:
412 415 if not c:
413 416 raise
414 417 # Abort may be raise by read only opener
415 418 self.report("couldn't remote %s: %s\n"
416 419 % (vfs.join(b), inst))
417 420 self._backupentries = []
418 421 self.journal = None
419 422 # run post close action
420 423 categories = sorted(self._postclosecallback)
421 424 for cat in categories:
422 425 self._postclosecallback[cat](self)
423 426
424 427 @active
425 428 def abort(self):
426 429 '''abort the transaction (generally called on error, or when the
427 430 transaction is not explicitly committed before going out of
428 431 scope)'''
429 432 self._abort()
430 433
431 434 def _abort(self):
432 435 self.count = 0
433 436 self.usages = 0
434 437 self.file.close()
435 438 self._backupsfile.close()
436 439
437 440 if self.onabort is not None:
438 441 self.onabort()
439 442
440 443 try:
441 444 if not self.entries and not self._backupentries:
442 445 if self.journal:
443 446 self.opener.unlink(self.journal)
444 447 if self._backupjournal:
445 448 self.opener.unlink(self._backupjournal)
446 449 return
447 450
448 451 self.report(_("transaction abort!\n"))
449 452
450 453 try:
451 454 _playback(self.journal, self.report, self.opener, self._vfsmap,
452 455 self.entries, self._backupentries, False)
453 456 self.report(_("rollback completed\n"))
454 457 except Exception:
455 458 self.report(_("rollback failed - please run hg recover\n"))
456 459 finally:
457 460 self.journal = None
458 461
459 462
460 463 def rollback(opener, vfsmap, file, report):
461 464 """Rolls back the transaction contained in the given file
462 465
463 466 Reads the entries in the specified file, and the corresponding
464 467 '*.backupfiles' file, to recover from an incomplete transaction.
465 468
466 469 * `file`: a file containing a list of entries, specifying where
467 470 to truncate each file. The file should contain a list of
468 471 file\0offset pairs, delimited by newlines. The corresponding
469 472 '*.backupfiles' file should contain a list of file\0backupfile
470 473 pairs, delimited by \0.
471 474 """
472 475 entries = []
473 476 backupentries = []
474 477
475 478 fp = opener.open(file)
476 479 lines = fp.readlines()
477 480 fp.close()
478 481 for l in lines:
479 482 try:
480 483 f, o = l.split('\0')
481 484 entries.append((f, int(o), None))
482 485 except ValueError:
483 486 report(_("couldn't read journal entry %r!\n") % l)
484 487
485 488 backupjournal = "%s.backupfiles" % file
486 489 if opener.exists(backupjournal):
487 490 fp = opener.open(backupjournal)
488 491 lines = fp.readlines()
489 492 if lines:
490 493 ver = lines[0][:-1]
491 494 if ver == str(version):
492 495 for line in lines[1:]:
493 496 if line:
494 497 # Shave off the trailing newline
495 498 line = line[:-1]
496 499 l, f, b, c = line.split('\0')
497 500 backupentries.append((l, f, b, bool(c)))
498 501 else:
499 502 report(_("journal was created by a different version of "
500 503 "Mercurial"))
501 504
502 505 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now