##// END OF EJS Templates
branchcache: backout 0311a6abd38a
Matt Mackall -
r16745:27b2e182 default
parent child Browse files
Show More
@@ -1,2410 +1,2364 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class storecache(filecache):
23 23 """filecache for files in the store"""
24 24 def join(self, obj, fname):
25 25 return obj.sjoin(fname)
26 26
27 27 class localrepository(repo.repository):
28 28 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 29 'known', 'getbundle'))
30 30 supportedformats = set(('revlogv1', 'generaldelta'))
31 31 supported = supportedformats | set(('store', 'fncache', 'shared',
32 32 'dotencode'))
33 33
34 34 def __init__(self, baseui, path=None, create=False):
35 35 repo.repository.__init__(self)
36 36 self.root = os.path.realpath(util.expandpath(path))
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 40 self.opener = scmutil.opener(self.path)
41 41 self.wopener = scmutil.opener(self.root)
42 42 self.baseui = baseui
43 43 self.ui = baseui.copy()
44 44 # A list of callback to shape the phase if no data were found.
45 45 # Callback are in the form: func(repo, roots) --> processed root.
46 46 # This list it to be filled by extension during repo setup
47 47 self._phasedefaults = []
48 48
49 49 try:
50 50 self.ui.readconfig(self.join("hgrc"), self.root)
51 51 extensions.loadall(self.ui)
52 52 except IOError:
53 53 pass
54 54
55 55 if not os.path.isdir(self.path):
56 56 if create:
57 57 if not os.path.exists(path):
58 58 util.makedirs(path)
59 59 util.makedir(self.path, notindexed=True)
60 60 requirements = ["revlogv1"]
61 61 if self.ui.configbool('format', 'usestore', True):
62 62 os.mkdir(os.path.join(self.path, "store"))
63 63 requirements.append("store")
64 64 if self.ui.configbool('format', 'usefncache', True):
65 65 requirements.append("fncache")
66 66 if self.ui.configbool('format', 'dotencode', True):
67 67 requirements.append('dotencode')
68 68 # create an invalid changelog
69 69 self.opener.append(
70 70 "00changelog.i",
71 71 '\0\0\0\2' # represents revlogv2
72 72 ' dummy changelog to prevent using the old repo layout'
73 73 )
74 74 if self.ui.configbool('format', 'generaldelta', False):
75 75 requirements.append("generaldelta")
76 76 requirements = set(requirements)
77 77 else:
78 78 raise error.RepoError(_("repository %s not found") % path)
79 79 elif create:
80 80 raise error.RepoError(_("repository %s already exists") % path)
81 81 else:
82 82 try:
83 83 requirements = scmutil.readrequires(self.opener, self.supported)
84 84 except IOError, inst:
85 85 if inst.errno != errno.ENOENT:
86 86 raise
87 87 requirements = set()
88 88
89 89 self.sharedpath = self.path
90 90 try:
91 91 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 92 if not os.path.exists(s):
93 93 raise error.RepoError(
94 94 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 95 self.sharedpath = s
96 96 except IOError, inst:
97 97 if inst.errno != errno.ENOENT:
98 98 raise
99 99
100 100 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 101 self.spath = self.store.path
102 102 self.sopener = self.store.opener
103 103 self.sjoin = self.store.join
104 104 self.opener.createmode = self.store.createmode
105 105 self._applyrequirements(requirements)
106 106 if create:
107 107 self._writerequirements()
108 108
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.filterpats = {}
113 113 self._datafilters = {}
114 114 self._transref = self._lockref = self._wlockref = None
115 115
116 116 # A cache for various files under .hg/ that tracks file changes,
117 117 # (used by the filecache decorator)
118 118 #
119 119 # Maps a property name to its util.filecacheentry
120 120 self._filecache = {}
121 121
122 122 def _applyrequirements(self, requirements):
123 123 self.requirements = requirements
124 124 openerreqs = set(('revlogv1', 'generaldelta'))
125 125 self.sopener.options = dict((r, 1) for r in requirements
126 126 if r in openerreqs)
127 127
128 128 def _writerequirements(self):
129 129 reqfile = self.opener("requires", "w")
130 130 for r in self.requirements:
131 131 reqfile.write("%s\n" % r)
132 132 reqfile.close()
133 133
134 134 def _checknested(self, path):
135 135 """Determine if path is a legal nested repository."""
136 136 if not path.startswith(self.root):
137 137 return False
138 138 subpath = path[len(self.root) + 1:]
139 139 normsubpath = util.pconvert(subpath)
140 140
141 141 # XXX: Checking against the current working copy is wrong in
142 142 # the sense that it can reject things like
143 143 #
144 144 # $ hg cat -r 10 sub/x.txt
145 145 #
146 146 # if sub/ is no longer a subrepository in the working copy
147 147 # parent revision.
148 148 #
149 149 # However, it can of course also allow things that would have
150 150 # been rejected before, such as the above cat command if sub/
151 151 # is a subrepository now, but was a normal directory before.
152 152 # The old path auditor would have rejected by mistake since it
153 153 # panics when it sees sub/.hg/.
154 154 #
155 155 # All in all, checking against the working copy seems sensible
156 156 # since we want to prevent access to nested repositories on
157 157 # the filesystem *now*.
158 158 ctx = self[None]
159 159 parts = util.splitpath(subpath)
160 160 while parts:
161 161 prefix = '/'.join(parts)
162 162 if prefix in ctx.substate:
163 163 if prefix == normsubpath:
164 164 return True
165 165 else:
166 166 sub = ctx.sub(prefix)
167 167 return sub.checknested(subpath[len(prefix) + 1:])
168 168 else:
169 169 parts.pop()
170 170 return False
171 171
172 172 @filecache('bookmarks')
173 173 def _bookmarks(self):
174 174 return bookmarks.read(self)
175 175
176 176 @filecache('bookmarks.current')
177 177 def _bookmarkcurrent(self):
178 178 return bookmarks.readcurrent(self)
179 179
180 180 def _writebookmarks(self, marks):
181 181 bookmarks.write(self)
182 182
183 183 def bookmarkheads(self, bookmark):
184 184 name = bookmark.split('@', 1)[0]
185 185 heads = []
186 186 for mark, n in self._bookmarks.iteritems():
187 187 if mark.split('@', 1)[0] == name:
188 188 heads.append(n)
189 189 return heads
190 190
191 191 @storecache('phaseroots')
192 192 def _phasecache(self):
193 193 return phases.phasecache(self, self._phasedefaults)
194 194
195 195 @storecache('00changelog.i')
196 196 def changelog(self):
197 197 c = changelog.changelog(self.sopener)
198 198 if 'HG_PENDING' in os.environ:
199 199 p = os.environ['HG_PENDING']
200 200 if p.startswith(self.root):
201 201 c.readpending('00changelog.i.a')
202 202 return c
203 203
204 204 @storecache('00manifest.i')
205 205 def manifest(self):
206 206 return manifest.manifest(self.sopener)
207 207
208 208 @filecache('dirstate')
209 209 def dirstate(self):
210 210 warned = [0]
211 211 def validate(node):
212 212 try:
213 213 self.changelog.rev(node)
214 214 return node
215 215 except error.LookupError:
216 216 if not warned[0]:
217 217 warned[0] = True
218 218 self.ui.warn(_("warning: ignoring unknown"
219 219 " working parent %s!\n") % short(node))
220 220 return nullid
221 221
222 222 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
223 223
224 224 def __getitem__(self, changeid):
225 225 if changeid is None:
226 226 return context.workingctx(self)
227 227 return context.changectx(self, changeid)
228 228
229 229 def __contains__(self, changeid):
230 230 try:
231 231 return bool(self.lookup(changeid))
232 232 except error.RepoLookupError:
233 233 return False
234 234
235 235 def __nonzero__(self):
236 236 return True
237 237
238 238 def __len__(self):
239 239 return len(self.changelog)
240 240
241 241 def __iter__(self):
242 242 for i in xrange(len(self)):
243 243 yield i
244 244
245 245 def revs(self, expr, *args):
246 246 '''Return a list of revisions matching the given revset'''
247 247 expr = revset.formatspec(expr, *args)
248 248 m = revset.match(None, expr)
249 249 return [r for r in m(self, range(len(self)))]
250 250
251 251 def set(self, expr, *args):
252 252 '''
253 253 Yield a context for each matching revision, after doing arg
254 254 replacement via revset.formatspec
255 255 '''
256 256 for r in self.revs(expr, *args):
257 257 yield self[r]
258 258
259 259 def url(self):
260 260 return 'file:' + self.root
261 261
262 262 def hook(self, name, throw=False, **args):
263 263 return hook.hook(self.ui, self, name, throw, **args)
264 264
265 265 tag_disallowed = ':\r\n'
266 266
267 267 def _tag(self, names, node, message, local, user, date, extra={}):
268 268 if isinstance(names, str):
269 269 allchars = names
270 270 names = (names,)
271 271 else:
272 272 allchars = ''.join(names)
273 273 for c in self.tag_disallowed:
274 274 if c in allchars:
275 275 raise util.Abort(_('%r cannot be used in a tag name') % c)
276 276
277 277 branches = self.branchmap()
278 278 for name in names:
279 279 self.hook('pretag', throw=True, node=hex(node), tag=name,
280 280 local=local)
281 281 if name in branches:
282 282 self.ui.warn(_("warning: tag %s conflicts with existing"
283 283 " branch name\n") % name)
284 284
285 285 def writetags(fp, names, munge, prevtags):
286 286 fp.seek(0, 2)
287 287 if prevtags and prevtags[-1] != '\n':
288 288 fp.write('\n')
289 289 for name in names:
290 290 m = munge and munge(name) or name
291 291 if (self._tagscache.tagtypes and
292 292 name in self._tagscache.tagtypes):
293 293 old = self.tags().get(name, nullid)
294 294 fp.write('%s %s\n' % (hex(old), m))
295 295 fp.write('%s %s\n' % (hex(node), m))
296 296 fp.close()
297 297
298 298 prevtags = ''
299 299 if local:
300 300 try:
301 301 fp = self.opener('localtags', 'r+')
302 302 except IOError:
303 303 fp = self.opener('localtags', 'a')
304 304 else:
305 305 prevtags = fp.read()
306 306
307 307 # local tags are stored in the current charset
308 308 writetags(fp, names, None, prevtags)
309 309 for name in names:
310 310 self.hook('tag', node=hex(node), tag=name, local=local)
311 311 return
312 312
313 313 try:
314 314 fp = self.wfile('.hgtags', 'rb+')
315 315 except IOError, e:
316 316 if e.errno != errno.ENOENT:
317 317 raise
318 318 fp = self.wfile('.hgtags', 'ab')
319 319 else:
320 320 prevtags = fp.read()
321 321
322 322 # committed tags are stored in UTF-8
323 323 writetags(fp, names, encoding.fromlocal, prevtags)
324 324
325 325 fp.close()
326 326
327 327 self.invalidatecaches()
328 328
329 329 if '.hgtags' not in self.dirstate:
330 330 self[None].add(['.hgtags'])
331 331
332 332 m = matchmod.exact(self.root, '', ['.hgtags'])
333 333 tagnode = self.commit(message, user, date, extra=extra, match=m)
334 334
335 335 for name in names:
336 336 self.hook('tag', node=hex(node), tag=name, local=local)
337 337
338 338 return tagnode
339 339
340 340 def tag(self, names, node, message, local, user, date):
341 341 '''tag a revision with one or more symbolic names.
342 342
343 343 names is a list of strings or, when adding a single tag, names may be a
344 344 string.
345 345
346 346 if local is True, the tags are stored in a per-repository file.
347 347 otherwise, they are stored in the .hgtags file, and a new
348 348 changeset is committed with the change.
349 349
350 350 keyword arguments:
351 351
352 352 local: whether to store tags in non-version-controlled file
353 353 (default False)
354 354
355 355 message: commit message to use if committing
356 356
357 357 user: name of user to use if committing
358 358
359 359 date: date tuple to use if committing'''
360 360
361 361 if not local:
362 362 for x in self.status()[:5]:
363 363 if '.hgtags' in x:
364 364 raise util.Abort(_('working copy of .hgtags is changed '
365 365 '(please commit .hgtags manually)'))
366 366
367 367 self.tags() # instantiate the cache
368 368 self._tag(names, node, message, local, user, date)
369 369
370 370 @propertycache
371 371 def _tagscache(self):
372 372 '''Returns a tagscache object that contains various tags related
373 373 caches.'''
374 374
375 375 # This simplifies its cache management by having one decorated
376 376 # function (this one) and the rest simply fetch things from it.
377 377 class tagscache(object):
378 378 def __init__(self):
379 379 # These two define the set of tags for this repository. tags
380 380 # maps tag name to node; tagtypes maps tag name to 'global' or
381 381 # 'local'. (Global tags are defined by .hgtags across all
382 382 # heads, and local tags are defined in .hg/localtags.)
383 383 # They constitute the in-memory cache of tags.
384 384 self.tags = self.tagtypes = None
385 385
386 386 self.nodetagscache = self.tagslist = None
387 387
388 388 cache = tagscache()
389 389 cache.tags, cache.tagtypes = self._findtags()
390 390
391 391 return cache
392 392
393 393 def tags(self):
394 394 '''return a mapping of tag to node'''
395 395 t = {}
396 396 for k, v in self._tagscache.tags.iteritems():
397 397 try:
398 398 # ignore tags to unknown nodes
399 399 self.changelog.rev(v)
400 400 t[k] = v
401 401 except (error.LookupError, ValueError):
402 402 pass
403 403 return t
404 404
405 405 def _findtags(self):
406 406 '''Do the hard work of finding tags. Return a pair of dicts
407 407 (tags, tagtypes) where tags maps tag name to node, and tagtypes
408 408 maps tag name to a string like \'global\' or \'local\'.
409 409 Subclasses or extensions are free to add their own tags, but
410 410 should be aware that the returned dicts will be retained for the
411 411 duration of the localrepo object.'''
412 412
413 413 # XXX what tagtype should subclasses/extensions use? Currently
414 414 # mq and bookmarks add tags, but do not set the tagtype at all.
415 415 # Should each extension invent its own tag type? Should there
416 416 # be one tagtype for all such "virtual" tags? Or is the status
417 417 # quo fine?
418 418
419 419 alltags = {} # map tag name to (node, hist)
420 420 tagtypes = {}
421 421
422 422 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
423 423 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
424 424
425 425 # Build the return dicts. Have to re-encode tag names because
426 426 # the tags module always uses UTF-8 (in order not to lose info
427 427 # writing to the cache), but the rest of Mercurial wants them in
428 428 # local encoding.
429 429 tags = {}
430 430 for (name, (node, hist)) in alltags.iteritems():
431 431 if node != nullid:
432 432 tags[encoding.tolocal(name)] = node
433 433 tags['tip'] = self.changelog.tip()
434 434 tagtypes = dict([(encoding.tolocal(name), value)
435 435 for (name, value) in tagtypes.iteritems()])
436 436 return (tags, tagtypes)
437 437
438 438 def tagtype(self, tagname):
439 439 '''
440 440 return the type of the given tag. result can be:
441 441
442 442 'local' : a local tag
443 443 'global' : a global tag
444 444 None : tag does not exist
445 445 '''
446 446
447 447 return self._tagscache.tagtypes.get(tagname)
448 448
449 449 def tagslist(self):
450 450 '''return a list of tags ordered by revision'''
451 451 if not self._tagscache.tagslist:
452 452 l = []
453 453 for t, n in self.tags().iteritems():
454 454 r = self.changelog.rev(n)
455 455 l.append((r, t, n))
456 456 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
457 457
458 458 return self._tagscache.tagslist
459 459
460 460 def nodetags(self, node):
461 461 '''return the tags associated with a node'''
462 462 if not self._tagscache.nodetagscache:
463 463 nodetagscache = {}
464 464 for t, n in self._tagscache.tags.iteritems():
465 465 nodetagscache.setdefault(n, []).append(t)
466 466 for tags in nodetagscache.itervalues():
467 467 tags.sort()
468 468 self._tagscache.nodetagscache = nodetagscache
469 469 return self._tagscache.nodetagscache.get(node, [])
470 470
471 471 def nodebookmarks(self, node):
472 472 marks = []
473 473 for bookmark, n in self._bookmarks.iteritems():
474 474 if n == node:
475 475 marks.append(bookmark)
476 476 return sorted(marks)
477 477
478 478 def _branchtags(self, partial, lrev):
479 479 # TODO: rename this function?
480 480 tiprev = len(self) - 1
481 481 if lrev != tiprev:
482 482 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
483 483 self._updatebranchcache(partial, ctxgen)
484 484 self._writebranchcache(partial, self.changelog.tip(), tiprev)
485 485
486 486 return partial
487 487
488 488 def updatebranchcache(self):
489 489 tip = self.changelog.tip()
490 490 if self._branchcache is not None and self._branchcachetip == tip:
491 491 return
492 492
493 493 oldtip = self._branchcachetip
494 494 self._branchcachetip = tip
495 495 if oldtip is None or oldtip not in self.changelog.nodemap:
496 496 partial, last, lrev = self._readbranchcache()
497 497 else:
498 498 lrev = self.changelog.rev(oldtip)
499 499 partial = self._branchcache
500 500
501 501 self._branchtags(partial, lrev)
502 502 # this private cache holds all heads (not just the branch tips)
503 503 self._branchcache = partial
504 504
505 505 def branchmap(self):
506 506 '''returns a dictionary {branch: [branchheads]}'''
507 507 self.updatebranchcache()
508 508 return self._branchcache
509 509
510 510 def _branchtip(self, heads):
511 511 '''return the tipmost branch head in heads'''
512 512 tip = heads[-1]
513 513 for h in reversed(heads):
514 514 if not self[h].closesbranch():
515 515 tip = h
516 516 break
517 517 return tip
518 518
519 519 def branchtip(self, branch):
520 520 '''return the tip node for a given branch'''
521 521 if branch not in self.branchmap():
522 522 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
523 523 return self._branchtip(self.branchmap()[branch])
524 524
525 525 def branchtags(self):
526 526 '''return a dict where branch names map to the tipmost head of
527 527 the branch, open heads come before closed'''
528 528 bt = {}
529 529 for bn, heads in self.branchmap().iteritems():
530 530 bt[bn] = self._branchtip(heads)
531 531 return bt
532 532
533 533 def _readbranchcache(self):
534 534 partial = {}
535 535 try:
536 536 f = self.opener("cache/branchheads")
537 537 lines = f.read().split('\n')
538 538 f.close()
539 539 except (IOError, OSError):
540 540 return {}, nullid, nullrev
541 541
542 542 try:
543 543 last, lrev = lines.pop(0).split(" ", 1)
544 544 last, lrev = bin(last), int(lrev)
545 545 if lrev >= len(self) or self[lrev].node() != last:
546 546 # invalidate the cache
547 547 raise ValueError('invalidating branch cache (tip differs)')
548 548 for l in lines:
549 549 if not l:
550 550 continue
551 551 node, label = l.split(" ", 1)
552 552 label = encoding.tolocal(label.strip())
553 if not node in self:
554 raise ValueError('invalidating branch cache because node '+
555 '%s does not exist' % node)
556 553 partial.setdefault(label, []).append(bin(node))
557 554 except KeyboardInterrupt:
558 555 raise
559 556 except Exception, inst:
560 557 if self.ui.debugflag:
561 558 self.ui.warn(str(inst), '\n')
562 559 partial, last, lrev = {}, nullid, nullrev
563 560 return partial, last, lrev
564 561
565 562 def _writebranchcache(self, branches, tip, tiprev):
566 563 try:
567 564 f = self.opener("cache/branchheads", "w", atomictemp=True)
568 565 f.write("%s %s\n" % (hex(tip), tiprev))
569 566 for label, nodes in branches.iteritems():
570 567 for node in nodes:
571 568 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
572 569 f.close()
573 570 except (IOError, OSError):
574 571 pass
575 572
576 573 def _updatebranchcache(self, partial, ctxgen):
577 """Given a branchhead cache, partial, that may have extra nodes or be
578 missing heads, and a generator of nodes that are at least a superset of
579 heads missing, this function updates partial to be correct.
580 """
581 574 # collect new branch entries
582 575 newbranches = {}
583 576 for c in ctxgen:
584 577 newbranches.setdefault(c.branch(), []).append(c.node())
585 578 # if older branchheads are reachable from new ones, they aren't
586 579 # really branchheads. Note checking parents is insufficient:
587 580 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
588 581 for branch, newnodes in newbranches.iteritems():
589 582 bheads = partial.setdefault(branch, [])
590 583 bheads.extend(newnodes)
591 # Remove duplicates - nodes that are in newnodes and are already in
592 # bheads. This can happen if you strip a node and its parent was
593 # already a head (because they're on different branches).
594 bheads = set(bheads)
595
596 # Remove candidate heads that no longer are in the repo (e.g., as
597 # the result of a strip that just happened).
598 # avoid using 'bhead in self' here because that dives down into
599 # branchcache code somewhat recrusively.
600 bheads = [bhead for bhead in bheads \
601 if self.changelog.hasnode(bhead)]
602 if len(bheads) > 1:
603 bheads = sorted(bheads, key=lambda x: self[x].rev())
604 # starting from tip means fewer passes over reachable
605 while newnodes:
606 latest = newnodes.pop()
607 if latest not in bheads:
608 continue
609 minbhnode = self[bheads[0]].node()
610 reachable = self.changelog.reachable(latest, minbhnode)
611 reachable.remove(latest)
612 if reachable:
613 bheads = [b for b in bheads if b not in reachable]
584 if len(bheads) <= 1:
585 continue
586 bheads = sorted(bheads, key=lambda x: self[x].rev())
587 # starting from tip means fewer passes over reachable
588 while newnodes:
589 latest = newnodes.pop()
590 if latest not in bheads:
591 continue
592 minbhnode = self[bheads[0]].node()
593 reachable = self.changelog.reachable(latest, minbhnode)
594 reachable.remove(latest)
595 if reachable:
596 bheads = [b for b in bheads if b not in reachable]
614 597 partial[branch] = bheads
615 598
616 # There may be branches that cease to exist when the last commit in the
617 # branch was stripped. This code filters them out. Note that the
618 # branch that ceased to exist may not be in newbranches because
619 # newbranches is the set of candidate heads, which when you strip the
620 # last commit in a branch will be the parent branch.
621 for branch in partial.keys():
622 nodes = [head for head in partial[branch] \
623 if self.changelog.hasnode(head)]
624 if len(nodes) < 1:
625 del partial[branch]
626
627 599 def lookup(self, key):
628 600 return self[key].node()
629 601
630 602 def lookupbranch(self, key, remote=None):
631 603 repo = remote or self
632 604 if key in repo.branchmap():
633 605 return key
634 606
635 607 repo = (remote and remote.local()) and remote or self
636 608 return repo[key].branch()
637 609
638 610 def known(self, nodes):
639 611 nm = self.changelog.nodemap
640 612 pc = self._phasecache
641 613 result = []
642 614 for n in nodes:
643 615 r = nm.get(n)
644 616 resp = not (r is None or pc.phase(self, r) >= phases.secret)
645 617 result.append(resp)
646 618 return result
647 619
648 620 def local(self):
649 621 return self
650 622
651 623 def join(self, f):
652 624 return os.path.join(self.path, f)
653 625
654 626 def wjoin(self, f):
655 627 return os.path.join(self.root, f)
656 628
657 629 def file(self, f):
658 630 if f[0] == '/':
659 631 f = f[1:]
660 632 return filelog.filelog(self.sopener, f)
661 633
662 634 def changectx(self, changeid):
663 635 return self[changeid]
664 636
665 637 def parents(self, changeid=None):
666 638 '''get list of changectxs for parents of changeid'''
667 639 return self[changeid].parents()
668 640
669 641 def setparents(self, p1, p2=nullid):
670 642 copies = self.dirstate.setparents(p1, p2)
671 643 if copies:
672 644 # Adjust copy records, the dirstate cannot do it, it
673 645 # requires access to parents manifests. Preserve them
674 646 # only for entries added to first parent.
675 647 pctx = self[p1]
676 648 for f in copies:
677 649 if f not in pctx and copies[f] in pctx:
678 650 self.dirstate.copy(copies[f], f)
679 651
680 652 def filectx(self, path, changeid=None, fileid=None):
681 653 """changeid can be a changeset revision, node, or tag.
682 654 fileid can be a file revision or node."""
683 655 return context.filectx(self, path, changeid, fileid)
684 656
685 657 def getcwd(self):
686 658 return self.dirstate.getcwd()
687 659
688 660 def pathto(self, f, cwd=None):
689 661 return self.dirstate.pathto(f, cwd)
690 662
691 663 def wfile(self, f, mode='r'):
692 664 return self.wopener(f, mode)
693 665
694 666 def _link(self, f):
695 667 return os.path.islink(self.wjoin(f))
696 668
697 669 def _loadfilter(self, filter):
698 670 if filter not in self.filterpats:
699 671 l = []
700 672 for pat, cmd in self.ui.configitems(filter):
701 673 if cmd == '!':
702 674 continue
703 675 mf = matchmod.match(self.root, '', [pat])
704 676 fn = None
705 677 params = cmd
706 678 for name, filterfn in self._datafilters.iteritems():
707 679 if cmd.startswith(name):
708 680 fn = filterfn
709 681 params = cmd[len(name):].lstrip()
710 682 break
711 683 if not fn:
712 684 fn = lambda s, c, **kwargs: util.filter(s, c)
713 685 # Wrap old filters not supporting keyword arguments
714 686 if not inspect.getargspec(fn)[2]:
715 687 oldfn = fn
716 688 fn = lambda s, c, **kwargs: oldfn(s, c)
717 689 l.append((mf, fn, params))
718 690 self.filterpats[filter] = l
719 691 return self.filterpats[filter]
720 692
721 693 def _filter(self, filterpats, filename, data):
722 694 for mf, fn, cmd in filterpats:
723 695 if mf(filename):
724 696 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
725 697 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
726 698 break
727 699
728 700 return data
729 701
730 702 @propertycache
731 703 def _encodefilterpats(self):
732 704 return self._loadfilter('encode')
733 705
734 706 @propertycache
735 707 def _decodefilterpats(self):
736 708 return self._loadfilter('decode')
737 709
738 710 def adddatafilter(self, name, filter):
739 711 self._datafilters[name] = filter
740 712
741 713 def wread(self, filename):
742 714 if self._link(filename):
743 715 data = os.readlink(self.wjoin(filename))
744 716 else:
745 717 data = self.wopener.read(filename)
746 718 return self._filter(self._encodefilterpats, filename, data)
747 719
748 720 def wwrite(self, filename, data, flags):
749 721 data = self._filter(self._decodefilterpats, filename, data)
750 722 if 'l' in flags:
751 723 self.wopener.symlink(data, filename)
752 724 else:
753 725 self.wopener.write(filename, data)
754 726 if 'x' in flags:
755 727 util.setflags(self.wjoin(filename), False, True)
756 728
757 729 def wwritedata(self, filename, data):
758 730 return self._filter(self._decodefilterpats, filename, data)
759 731
760 732 def transaction(self, desc):
761 733 tr = self._transref and self._transref() or None
762 734 if tr and tr.running():
763 735 return tr.nest()
764 736
765 737 # abort here if the journal already exists
766 738 if os.path.exists(self.sjoin("journal")):
767 739 raise error.RepoError(
768 740 _("abandoned transaction found - run hg recover"))
769 741
770 742 self._writejournal(desc)
771 743 renames = [(x, undoname(x)) for x in self._journalfiles()]
772 744
773 745 tr = transaction.transaction(self.ui.warn, self.sopener,
774 746 self.sjoin("journal"),
775 747 aftertrans(renames),
776 748 self.store.createmode)
777 749 self._transref = weakref.ref(tr)
778 750 return tr
779 751
780 752 def _journalfiles(self):
781 753 return (self.sjoin('journal'), self.join('journal.dirstate'),
782 754 self.join('journal.branch'), self.join('journal.desc'),
783 755 self.join('journal.bookmarks'),
784 756 self.sjoin('journal.phaseroots'))
785 757
786 758 def undofiles(self):
787 759 return [undoname(x) for x in self._journalfiles()]
788 760
789 761 def _writejournal(self, desc):
790 762 self.opener.write("journal.dirstate",
791 763 self.opener.tryread("dirstate"))
792 764 self.opener.write("journal.branch",
793 765 encoding.fromlocal(self.dirstate.branch()))
794 766 self.opener.write("journal.desc",
795 767 "%d\n%s\n" % (len(self), desc))
796 768 self.opener.write("journal.bookmarks",
797 769 self.opener.tryread("bookmarks"))
798 770 self.sopener.write("journal.phaseroots",
799 771 self.sopener.tryread("phaseroots"))
800 772
801 773 def recover(self):
802 774 lock = self.lock()
803 775 try:
804 776 if os.path.exists(self.sjoin("journal")):
805 777 self.ui.status(_("rolling back interrupted transaction\n"))
806 778 transaction.rollback(self.sopener, self.sjoin("journal"),
807 779 self.ui.warn)
808 780 self.invalidate()
809 781 return True
810 782 else:
811 783 self.ui.warn(_("no interrupted transaction available\n"))
812 784 return False
813 785 finally:
814 786 lock.release()
815 787
816 788 def rollback(self, dryrun=False, force=False):
817 789 wlock = lock = None
818 790 try:
819 791 wlock = self.wlock()
820 792 lock = self.lock()
821 793 if os.path.exists(self.sjoin("undo")):
822 794 return self._rollback(dryrun, force)
823 795 else:
824 796 self.ui.warn(_("no rollback information available\n"))
825 797 return 1
826 798 finally:
827 799 release(lock, wlock)
828 800
829 801 def _rollback(self, dryrun, force):
830 802 ui = self.ui
831 803 try:
832 804 args = self.opener.read('undo.desc').splitlines()
833 805 (oldlen, desc, detail) = (int(args[0]), args[1], None)
834 806 if len(args) >= 3:
835 807 detail = args[2]
836 808 oldtip = oldlen - 1
837 809
838 810 if detail and ui.verbose:
839 811 msg = (_('repository tip rolled back to revision %s'
840 812 ' (undo %s: %s)\n')
841 813 % (oldtip, desc, detail))
842 814 else:
843 815 msg = (_('repository tip rolled back to revision %s'
844 816 ' (undo %s)\n')
845 817 % (oldtip, desc))
846 818 except IOError:
847 819 msg = _('rolling back unknown transaction\n')
848 820 desc = None
849 821
850 822 if not force and self['.'] != self['tip'] and desc == 'commit':
851 823 raise util.Abort(
852 824 _('rollback of last commit while not checked out '
853 825 'may lose data'), hint=_('use -f to force'))
854 826
855 827 ui.status(msg)
856 828 if dryrun:
857 829 return 0
858 830
859 831 parents = self.dirstate.parents()
860 832 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
861 833 if os.path.exists(self.join('undo.bookmarks')):
862 834 util.rename(self.join('undo.bookmarks'),
863 835 self.join('bookmarks'))
864 836 if os.path.exists(self.sjoin('undo.phaseroots')):
865 837 util.rename(self.sjoin('undo.phaseroots'),
866 838 self.sjoin('phaseroots'))
867 839 self.invalidate()
868 840
869 841 parentgone = (parents[0] not in self.changelog.nodemap or
870 842 parents[1] not in self.changelog.nodemap)
871 843 if parentgone:
872 844 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
873 845 try:
874 846 branch = self.opener.read('undo.branch')
875 847 self.dirstate.setbranch(branch)
876 848 except IOError:
877 849 ui.warn(_('named branch could not be reset: '
878 850 'current branch is still \'%s\'\n')
879 851 % self.dirstate.branch())
880 852
881 853 self.dirstate.invalidate()
882 854 parents = tuple([p.rev() for p in self.parents()])
883 855 if len(parents) > 1:
884 856 ui.status(_('working directory now based on '
885 857 'revisions %d and %d\n') % parents)
886 858 else:
887 859 ui.status(_('working directory now based on '
888 860 'revision %d\n') % parents)
889 # TODO: if we know which new heads may result from this rollback, pass
890 # them to destroy(), which will prevent the branchhead cache from being
891 # invalidated.
892 861 self.destroyed()
893 862 return 0
894 863
895 864 def invalidatecaches(self):
896 865 def delcache(name):
897 866 try:
898 867 delattr(self, name)
899 868 except AttributeError:
900 869 pass
901 870
902 871 delcache('_tagscache')
903 872
904 873 self._branchcache = None # in UTF-8
905 874 self._branchcachetip = None
906 875
907 876 def invalidatedirstate(self):
908 877 '''Invalidates the dirstate, causing the next call to dirstate
909 878 to check if it was modified since the last time it was read,
910 879 rereading it if it has.
911 880
912 881 This is different to dirstate.invalidate() that it doesn't always
913 882 rereads the dirstate. Use dirstate.invalidate() if you want to
914 883 explicitly read the dirstate again (i.e. restoring it to a previous
915 884 known good state).'''
916 885 if 'dirstate' in self.__dict__:
917 886 for k in self.dirstate._filecache:
918 887 try:
919 888 delattr(self.dirstate, k)
920 889 except AttributeError:
921 890 pass
922 891 delattr(self, 'dirstate')
923 892
924 893 def invalidate(self):
925 894 for k in self._filecache:
926 895 # dirstate is invalidated separately in invalidatedirstate()
927 896 if k == 'dirstate':
928 897 continue
929 898
930 899 try:
931 900 delattr(self, k)
932 901 except AttributeError:
933 902 pass
934 903 self.invalidatecaches()
935 904
936 905 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
937 906 try:
938 907 l = lock.lock(lockname, 0, releasefn, desc=desc)
939 908 except error.LockHeld, inst:
940 909 if not wait:
941 910 raise
942 911 self.ui.warn(_("waiting for lock on %s held by %r\n") %
943 912 (desc, inst.locker))
944 913 # default to 600 seconds timeout
945 914 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
946 915 releasefn, desc=desc)
947 916 if acquirefn:
948 917 acquirefn()
949 918 return l
950 919
951 920 def _afterlock(self, callback):
952 921 """add a callback to the current repository lock.
953 922
954 923 The callback will be executed on lock release."""
955 924 l = self._lockref and self._lockref()
956 925 if l:
957 926 l.postrelease.append(callback)
958 927 else:
959 928 callback()
960 929
961 930 def lock(self, wait=True):
962 931 '''Lock the repository store (.hg/store) and return a weak reference
963 932 to the lock. Use this before modifying the store (e.g. committing or
964 933 stripping). If you are opening a transaction, get a lock as well.)'''
965 934 l = self._lockref and self._lockref()
966 935 if l is not None and l.held:
967 936 l.lock()
968 937 return l
969 938
970 939 def unlock():
971 940 self.store.write()
972 941 if '_phasecache' in vars(self):
973 942 self._phasecache.write()
974 943 for k, ce in self._filecache.items():
975 944 if k == 'dirstate':
976 945 continue
977 946 ce.refresh()
978 947
979 948 l = self._lock(self.sjoin("lock"), wait, unlock,
980 949 self.invalidate, _('repository %s') % self.origroot)
981 950 self._lockref = weakref.ref(l)
982 951 return l
983 952
984 953 def wlock(self, wait=True):
985 954 '''Lock the non-store parts of the repository (everything under
986 955 .hg except .hg/store) and return a weak reference to the lock.
987 956 Use this before modifying files in .hg.'''
988 957 l = self._wlockref and self._wlockref()
989 958 if l is not None and l.held:
990 959 l.lock()
991 960 return l
992 961
993 962 def unlock():
994 963 self.dirstate.write()
995 964 ce = self._filecache.get('dirstate')
996 965 if ce:
997 966 ce.refresh()
998 967
999 968 l = self._lock(self.join("wlock"), wait, unlock,
1000 969 self.invalidatedirstate, _('working directory of %s') %
1001 970 self.origroot)
1002 971 self._wlockref = weakref.ref(l)
1003 972 return l
1004 973
1005 974 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1006 975 """
1007 976 commit an individual file as part of a larger transaction
1008 977 """
1009 978
1010 979 fname = fctx.path()
1011 980 text = fctx.data()
1012 981 flog = self.file(fname)
1013 982 fparent1 = manifest1.get(fname, nullid)
1014 983 fparent2 = fparent2o = manifest2.get(fname, nullid)
1015 984
1016 985 meta = {}
1017 986 copy = fctx.renamed()
1018 987 if copy and copy[0] != fname:
1019 988 # Mark the new revision of this file as a copy of another
1020 989 # file. This copy data will effectively act as a parent
1021 990 # of this new revision. If this is a merge, the first
1022 991 # parent will be the nullid (meaning "look up the copy data")
1023 992 # and the second one will be the other parent. For example:
1024 993 #
1025 994 # 0 --- 1 --- 3 rev1 changes file foo
1026 995 # \ / rev2 renames foo to bar and changes it
1027 996 # \- 2 -/ rev3 should have bar with all changes and
1028 997 # should record that bar descends from
1029 998 # bar in rev2 and foo in rev1
1030 999 #
1031 1000 # this allows this merge to succeed:
1032 1001 #
1033 1002 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1034 1003 # \ / merging rev3 and rev4 should use bar@rev2
1035 1004 # \- 2 --- 4 as the merge base
1036 1005 #
1037 1006
1038 1007 cfname = copy[0]
1039 1008 crev = manifest1.get(cfname)
1040 1009 newfparent = fparent2
1041 1010
1042 1011 if manifest2: # branch merge
1043 1012 if fparent2 == nullid or crev is None: # copied on remote side
1044 1013 if cfname in manifest2:
1045 1014 crev = manifest2[cfname]
1046 1015 newfparent = fparent1
1047 1016
1048 1017 # find source in nearest ancestor if we've lost track
1049 1018 if not crev:
1050 1019 self.ui.debug(" %s: searching for copy revision for %s\n" %
1051 1020 (fname, cfname))
1052 1021 for ancestor in self[None].ancestors():
1053 1022 if cfname in ancestor:
1054 1023 crev = ancestor[cfname].filenode()
1055 1024 break
1056 1025
1057 1026 if crev:
1058 1027 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1059 1028 meta["copy"] = cfname
1060 1029 meta["copyrev"] = hex(crev)
1061 1030 fparent1, fparent2 = nullid, newfparent
1062 1031 else:
1063 1032 self.ui.warn(_("warning: can't find ancestor for '%s' "
1064 1033 "copied from '%s'!\n") % (fname, cfname))
1065 1034
1066 1035 elif fparent2 != nullid:
1067 1036 # is one parent an ancestor of the other?
1068 1037 fparentancestor = flog.ancestor(fparent1, fparent2)
1069 1038 if fparentancestor == fparent1:
1070 1039 fparent1, fparent2 = fparent2, nullid
1071 1040 elif fparentancestor == fparent2:
1072 1041 fparent2 = nullid
1073 1042
1074 1043 # is the file changed?
1075 1044 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1076 1045 changelist.append(fname)
1077 1046 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1078 1047
1079 1048 # are just the flags changed during merge?
1080 1049 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1081 1050 changelist.append(fname)
1082 1051
1083 1052 return fparent1
1084 1053
1085 1054 def commit(self, text="", user=None, date=None, match=None, force=False,
1086 1055 editor=False, extra={}):
1087 1056 """Add a new revision to current repository.
1088 1057
1089 1058 Revision information is gathered from the working directory,
1090 1059 match can be used to filter the committed files. If editor is
1091 1060 supplied, it is called to get a commit message.
1092 1061 """
1093 1062
1094 1063 def fail(f, msg):
1095 1064 raise util.Abort('%s: %s' % (f, msg))
1096 1065
1097 1066 if not match:
1098 1067 match = matchmod.always(self.root, '')
1099 1068
1100 1069 if not force:
1101 1070 vdirs = []
1102 1071 match.dir = vdirs.append
1103 1072 match.bad = fail
1104 1073
1105 1074 wlock = self.wlock()
1106 1075 try:
1107 1076 wctx = self[None]
1108 1077 merge = len(wctx.parents()) > 1
1109 1078
1110 1079 if (not force and merge and match and
1111 1080 (match.files() or match.anypats())):
1112 1081 raise util.Abort(_('cannot partially commit a merge '
1113 1082 '(do not specify files or patterns)'))
1114 1083
1115 1084 changes = self.status(match=match, clean=force)
1116 1085 if force:
1117 1086 changes[0].extend(changes[6]) # mq may commit unchanged files
1118 1087
1119 1088 # check subrepos
1120 1089 subs = []
1121 1090 commitsubs = set()
1122 1091 newstate = wctx.substate.copy()
1123 1092 # only manage subrepos and .hgsubstate if .hgsub is present
1124 1093 if '.hgsub' in wctx:
1125 1094 # we'll decide whether to track this ourselves, thanks
1126 1095 if '.hgsubstate' in changes[0]:
1127 1096 changes[0].remove('.hgsubstate')
1128 1097 if '.hgsubstate' in changes[2]:
1129 1098 changes[2].remove('.hgsubstate')
1130 1099
1131 1100 # compare current state to last committed state
1132 1101 # build new substate based on last committed state
1133 1102 oldstate = wctx.p1().substate
1134 1103 for s in sorted(newstate.keys()):
1135 1104 if not match(s):
1136 1105 # ignore working copy, use old state if present
1137 1106 if s in oldstate:
1138 1107 newstate[s] = oldstate[s]
1139 1108 continue
1140 1109 if not force:
1141 1110 raise util.Abort(
1142 1111 _("commit with new subrepo %s excluded") % s)
1143 1112 if wctx.sub(s).dirty(True):
1144 1113 if not self.ui.configbool('ui', 'commitsubrepos'):
1145 1114 raise util.Abort(
1146 1115 _("uncommitted changes in subrepo %s") % s,
1147 1116 hint=_("use --subrepos for recursive commit"))
1148 1117 subs.append(s)
1149 1118 commitsubs.add(s)
1150 1119 else:
1151 1120 bs = wctx.sub(s).basestate()
1152 1121 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1153 1122 if oldstate.get(s, (None, None, None))[1] != bs:
1154 1123 subs.append(s)
1155 1124
1156 1125 # check for removed subrepos
1157 1126 for p in wctx.parents():
1158 1127 r = [s for s in p.substate if s not in newstate]
1159 1128 subs += [s for s in r if match(s)]
1160 1129 if subs:
1161 1130 if (not match('.hgsub') and
1162 1131 '.hgsub' in (wctx.modified() + wctx.added())):
1163 1132 raise util.Abort(
1164 1133 _("can't commit subrepos without .hgsub"))
1165 1134 changes[0].insert(0, '.hgsubstate')
1166 1135
1167 1136 elif '.hgsub' in changes[2]:
1168 1137 # clean up .hgsubstate when .hgsub is removed
1169 1138 if ('.hgsubstate' in wctx and
1170 1139 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1171 1140 changes[2].insert(0, '.hgsubstate')
1172 1141
1173 1142 # make sure all explicit patterns are matched
1174 1143 if not force and match.files():
1175 1144 matched = set(changes[0] + changes[1] + changes[2])
1176 1145
1177 1146 for f in match.files():
1178 1147 if f == '.' or f in matched or f in wctx.substate:
1179 1148 continue
1180 1149 if f in changes[3]: # missing
1181 1150 fail(f, _('file not found!'))
1182 1151 if f in vdirs: # visited directory
1183 1152 d = f + '/'
1184 1153 for mf in matched:
1185 1154 if mf.startswith(d):
1186 1155 break
1187 1156 else:
1188 1157 fail(f, _("no match under directory!"))
1189 1158 elif f not in self.dirstate:
1190 1159 fail(f, _("file not tracked!"))
1191 1160
1192 1161 if (not force and not extra.get("close") and not merge
1193 1162 and not (changes[0] or changes[1] or changes[2])
1194 1163 and wctx.branch() == wctx.p1().branch()):
1195 1164 return None
1196 1165
1197 1166 if merge and changes[3]:
1198 1167 raise util.Abort(_("cannot commit merge with missing files"))
1199 1168
1200 1169 ms = mergemod.mergestate(self)
1201 1170 for f in changes[0]:
1202 1171 if f in ms and ms[f] == 'u':
1203 1172 raise util.Abort(_("unresolved merge conflicts "
1204 1173 "(see hg help resolve)"))
1205 1174
1206 1175 cctx = context.workingctx(self, text, user, date, extra, changes)
1207 1176 if editor:
1208 1177 cctx._text = editor(self, cctx, subs)
1209 1178 edited = (text != cctx._text)
1210 1179
1211 1180 # commit subs and write new state
1212 1181 if subs:
1213 1182 for s in sorted(commitsubs):
1214 1183 sub = wctx.sub(s)
1215 1184 self.ui.status(_('committing subrepository %s\n') %
1216 1185 subrepo.subrelpath(sub))
1217 1186 sr = sub.commit(cctx._text, user, date)
1218 1187 newstate[s] = (newstate[s][0], sr)
1219 1188 subrepo.writestate(self, newstate)
1220 1189
1221 1190 # Save commit message in case this transaction gets rolled back
1222 1191 # (e.g. by a pretxncommit hook). Leave the content alone on
1223 1192 # the assumption that the user will use the same editor again.
1224 1193 msgfn = self.savecommitmessage(cctx._text)
1225 1194
1226 1195 p1, p2 = self.dirstate.parents()
1227 1196 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1228 1197 try:
1229 1198 self.hook("precommit", throw=True, parent1=hookp1,
1230 1199 parent2=hookp2)
1231 1200 ret = self.commitctx(cctx, True)
1232 1201 except: # re-raises
1233 1202 if edited:
1234 1203 self.ui.write(
1235 1204 _('note: commit message saved in %s\n') % msgfn)
1236 1205 raise
1237 1206
1238 1207 # update bookmarks, dirstate and mergestate
1239 1208 bookmarks.update(self, [p1, p2], ret)
1240 1209 for f in changes[0] + changes[1]:
1241 1210 self.dirstate.normal(f)
1242 1211 for f in changes[2]:
1243 1212 self.dirstate.drop(f)
1244 1213 self.dirstate.setparents(ret)
1245 1214 ms.reset()
1246 1215 finally:
1247 1216 wlock.release()
1248 1217
1249 1218 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1250 1219 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1251 1220 self._afterlock(commithook)
1252 1221 return ret
1253 1222
1254 1223 def commitctx(self, ctx, error=False):
1255 1224 """Add a new revision to current repository.
1256 1225 Revision information is passed via the context argument.
1257 1226 """
1258 1227
1259 1228 tr = lock = None
1260 1229 removed = list(ctx.removed())
1261 1230 p1, p2 = ctx.p1(), ctx.p2()
1262 1231 user = ctx.user()
1263 1232
1264 1233 lock = self.lock()
1265 1234 try:
1266 1235 tr = self.transaction("commit")
1267 1236 trp = weakref.proxy(tr)
1268 1237
1269 1238 if ctx.files():
1270 1239 m1 = p1.manifest().copy()
1271 1240 m2 = p2.manifest()
1272 1241
1273 1242 # check in files
1274 1243 new = {}
1275 1244 changed = []
1276 1245 linkrev = len(self)
1277 1246 for f in sorted(ctx.modified() + ctx.added()):
1278 1247 self.ui.note(f + "\n")
1279 1248 try:
1280 1249 fctx = ctx[f]
1281 1250 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1282 1251 changed)
1283 1252 m1.set(f, fctx.flags())
1284 1253 except OSError, inst:
1285 1254 self.ui.warn(_("trouble committing %s!\n") % f)
1286 1255 raise
1287 1256 except IOError, inst:
1288 1257 errcode = getattr(inst, 'errno', errno.ENOENT)
1289 1258 if error or errcode and errcode != errno.ENOENT:
1290 1259 self.ui.warn(_("trouble committing %s!\n") % f)
1291 1260 raise
1292 1261 else:
1293 1262 removed.append(f)
1294 1263
1295 1264 # update manifest
1296 1265 m1.update(new)
1297 1266 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1298 1267 drop = [f for f in removed if f in m1]
1299 1268 for f in drop:
1300 1269 del m1[f]
1301 1270 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1302 1271 p2.manifestnode(), (new, drop))
1303 1272 files = changed + removed
1304 1273 else:
1305 1274 mn = p1.manifestnode()
1306 1275 files = []
1307 1276
1308 1277 # update changelog
1309 1278 self.changelog.delayupdate()
1310 1279 n = self.changelog.add(mn, files, ctx.description(),
1311 1280 trp, p1.node(), p2.node(),
1312 1281 user, ctx.date(), ctx.extra().copy())
1313 1282 p = lambda: self.changelog.writepending() and self.root or ""
1314 1283 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1315 1284 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1316 1285 parent2=xp2, pending=p)
1317 1286 self.changelog.finalize(trp)
1318 1287 # set the new commit is proper phase
1319 1288 targetphase = phases.newcommitphase(self.ui)
1320 1289 if targetphase:
1321 1290 # retract boundary do not alter parent changeset.
1322 1291 # if a parent have higher the resulting phase will
1323 1292 # be compliant anyway
1324 1293 #
1325 1294 # if minimal phase was 0 we don't need to retract anything
1326 1295 phases.retractboundary(self, targetphase, [n])
1327 1296 tr.close()
1328 1297 self.updatebranchcache()
1329 1298 return n
1330 1299 finally:
1331 1300 if tr:
1332 1301 tr.release()
1333 1302 lock.release()
1334 1303
1335 def destroyed(self, newheadrevs=None):
1304 def destroyed(self):
1336 1305 '''Inform the repository that nodes have been destroyed.
1337 1306 Intended for use by strip and rollback, so there's a common
1338 place for anything that has to be done after destroying history.
1339
1340 If you know the branchheadcache was uptodate before nodes were removed
1341 and you also know the set of candidate set of new heads that may have
1342 resulted from the destruction, you can set newheadrevs. This will
1343 enable the code to update the branchheads cache, rather than having
1344 future code decide it's invalid and regenrating it.
1345 '''
1346 if newheadrevs:
1347 tiprev = len(self) - 1
1348 ctxgen = (self[rev] for rev in newheadrevs)
1349 self._updatebranchcache(self._branchcache, ctxgen)
1350 self._writebranchcache(self._branchcache, self.changelog.tip(),
1351 tiprev)
1352 else:
1353 # No info to update the cache. If nodes were destroyed, the cache
1354 # is stale and this will be caught the next time it is read.
1355 pass
1307 place for anything that has to be done after destroying history.'''
1308 # XXX it might be nice if we could take the list of destroyed
1309 # nodes, but I don't see an easy way for rollback() to do that
1356 1310
1357 1311 # Ensure the persistent tag cache is updated. Doing it now
1358 1312 # means that the tag cache only has to worry about destroyed
1359 1313 # heads immediately after a strip/rollback. That in turn
1360 1314 # guarantees that "cachetip == currenttip" (comparing both rev
1361 1315 # and node) always means no nodes have been added or destroyed.
1362 1316
1363 1317 # XXX this is suboptimal when qrefresh'ing: we strip the current
1364 1318 # head, refresh the tag cache, then immediately add a new head.
1365 1319 # But I think doing it this way is necessary for the "instant
1366 1320 # tag cache retrieval" case to work.
1367 1321 self.invalidatecaches()
1368 1322
1369 1323 # Discard all cache entries to force reloading everything.
1370 1324 self._filecache.clear()
1371 1325
1372 1326 def walk(self, match, node=None):
1373 1327 '''
1374 1328 walk recursively through the directory tree or a given
1375 1329 changeset, finding all files matched by the match
1376 1330 function
1377 1331 '''
1378 1332 return self[node].walk(match)
1379 1333
1380 1334 def status(self, node1='.', node2=None, match=None,
1381 1335 ignored=False, clean=False, unknown=False,
1382 1336 listsubrepos=False):
1383 1337 """return status of files between two nodes or node and working
1384 1338 directory.
1385 1339
1386 1340 If node1 is None, use the first dirstate parent instead.
1387 1341 If node2 is None, compare node1 with working directory.
1388 1342 """
1389 1343
1390 1344 def mfmatches(ctx):
1391 1345 mf = ctx.manifest().copy()
1392 1346 if match.always():
1393 1347 return mf
1394 1348 for fn in mf.keys():
1395 1349 if not match(fn):
1396 1350 del mf[fn]
1397 1351 return mf
1398 1352
1399 1353 if isinstance(node1, context.changectx):
1400 1354 ctx1 = node1
1401 1355 else:
1402 1356 ctx1 = self[node1]
1403 1357 if isinstance(node2, context.changectx):
1404 1358 ctx2 = node2
1405 1359 else:
1406 1360 ctx2 = self[node2]
1407 1361
1408 1362 working = ctx2.rev() is None
1409 1363 parentworking = working and ctx1 == self['.']
1410 1364 match = match or matchmod.always(self.root, self.getcwd())
1411 1365 listignored, listclean, listunknown = ignored, clean, unknown
1412 1366
1413 1367 # load earliest manifest first for caching reasons
1414 1368 if not working and ctx2.rev() < ctx1.rev():
1415 1369 ctx2.manifest()
1416 1370
1417 1371 if not parentworking:
1418 1372 def bad(f, msg):
1419 1373 # 'f' may be a directory pattern from 'match.files()',
1420 1374 # so 'f not in ctx1' is not enough
1421 1375 if f not in ctx1 and f not in ctx1.dirs():
1422 1376 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1423 1377 match.bad = bad
1424 1378
1425 1379 if working: # we need to scan the working dir
1426 1380 subrepos = []
1427 1381 if '.hgsub' in self.dirstate:
1428 1382 subrepos = ctx2.substate.keys()
1429 1383 s = self.dirstate.status(match, subrepos, listignored,
1430 1384 listclean, listunknown)
1431 1385 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1432 1386
1433 1387 # check for any possibly clean files
1434 1388 if parentworking and cmp:
1435 1389 fixup = []
1436 1390 # do a full compare of any files that might have changed
1437 1391 for f in sorted(cmp):
1438 1392 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1439 1393 or ctx1[f].cmp(ctx2[f])):
1440 1394 modified.append(f)
1441 1395 else:
1442 1396 fixup.append(f)
1443 1397
1444 1398 # update dirstate for files that are actually clean
1445 1399 if fixup:
1446 1400 if listclean:
1447 1401 clean += fixup
1448 1402
1449 1403 try:
1450 1404 # updating the dirstate is optional
1451 1405 # so we don't wait on the lock
1452 1406 wlock = self.wlock(False)
1453 1407 try:
1454 1408 for f in fixup:
1455 1409 self.dirstate.normal(f)
1456 1410 finally:
1457 1411 wlock.release()
1458 1412 except error.LockError:
1459 1413 pass
1460 1414
1461 1415 if not parentworking:
1462 1416 mf1 = mfmatches(ctx1)
1463 1417 if working:
1464 1418 # we are comparing working dir against non-parent
1465 1419 # generate a pseudo-manifest for the working dir
1466 1420 mf2 = mfmatches(self['.'])
1467 1421 for f in cmp + modified + added:
1468 1422 mf2[f] = None
1469 1423 mf2.set(f, ctx2.flags(f))
1470 1424 for f in removed:
1471 1425 if f in mf2:
1472 1426 del mf2[f]
1473 1427 else:
1474 1428 # we are comparing two revisions
1475 1429 deleted, unknown, ignored = [], [], []
1476 1430 mf2 = mfmatches(ctx2)
1477 1431
1478 1432 modified, added, clean = [], [], []
1479 1433 withflags = mf1.withflags() | mf2.withflags()
1480 1434 for fn in mf2:
1481 1435 if fn in mf1:
1482 1436 if (fn not in deleted and
1483 1437 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1484 1438 (mf1[fn] != mf2[fn] and
1485 1439 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1486 1440 modified.append(fn)
1487 1441 elif listclean:
1488 1442 clean.append(fn)
1489 1443 del mf1[fn]
1490 1444 elif fn not in deleted:
1491 1445 added.append(fn)
1492 1446 removed = mf1.keys()
1493 1447
1494 1448 if working and modified and not self.dirstate._checklink:
1495 1449 # Symlink placeholders may get non-symlink-like contents
1496 1450 # via user error or dereferencing by NFS or Samba servers,
1497 1451 # so we filter out any placeholders that don't look like a
1498 1452 # symlink
1499 1453 sane = []
1500 1454 for f in modified:
1501 1455 if ctx2.flags(f) == 'l':
1502 1456 d = ctx2[f].data()
1503 1457 if len(d) >= 1024 or '\n' in d or util.binary(d):
1504 1458 self.ui.debug('ignoring suspect symlink placeholder'
1505 1459 ' "%s"\n' % f)
1506 1460 continue
1507 1461 sane.append(f)
1508 1462 modified = sane
1509 1463
1510 1464 r = modified, added, removed, deleted, unknown, ignored, clean
1511 1465
1512 1466 if listsubrepos:
1513 1467 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1514 1468 if working:
1515 1469 rev2 = None
1516 1470 else:
1517 1471 rev2 = ctx2.substate[subpath][1]
1518 1472 try:
1519 1473 submatch = matchmod.narrowmatcher(subpath, match)
1520 1474 s = sub.status(rev2, match=submatch, ignored=listignored,
1521 1475 clean=listclean, unknown=listunknown,
1522 1476 listsubrepos=True)
1523 1477 for rfiles, sfiles in zip(r, s):
1524 1478 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1525 1479 except error.LookupError:
1526 1480 self.ui.status(_("skipping missing subrepository: %s\n")
1527 1481 % subpath)
1528 1482
1529 1483 for l in r:
1530 1484 l.sort()
1531 1485 return r
1532 1486
1533 1487 def heads(self, start=None):
1534 1488 heads = self.changelog.heads(start)
1535 1489 # sort the output in rev descending order
1536 1490 return sorted(heads, key=self.changelog.rev, reverse=True)
1537 1491
1538 1492 def branchheads(self, branch=None, start=None, closed=False):
1539 1493 '''return a (possibly filtered) list of heads for the given branch
1540 1494
1541 1495 Heads are returned in topological order, from newest to oldest.
1542 1496 If branch is None, use the dirstate branch.
1543 1497 If start is not None, return only heads reachable from start.
1544 1498 If closed is True, return heads that are marked as closed as well.
1545 1499 '''
1546 1500 if branch is None:
1547 1501 branch = self[None].branch()
1548 1502 branches = self.branchmap()
1549 1503 if branch not in branches:
1550 1504 return []
1551 1505 # the cache returns heads ordered lowest to highest
1552 1506 bheads = list(reversed(branches[branch]))
1553 1507 if start is not None:
1554 1508 # filter out the heads that cannot be reached from startrev
1555 1509 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1556 1510 bheads = [h for h in bheads if h in fbheads]
1557 1511 if not closed:
1558 1512 bheads = [h for h in bheads if not self[h].closesbranch()]
1559 1513 return bheads
1560 1514
1561 1515 def branches(self, nodes):
1562 1516 if not nodes:
1563 1517 nodes = [self.changelog.tip()]
1564 1518 b = []
1565 1519 for n in nodes:
1566 1520 t = n
1567 1521 while True:
1568 1522 p = self.changelog.parents(n)
1569 1523 if p[1] != nullid or p[0] == nullid:
1570 1524 b.append((t, n, p[0], p[1]))
1571 1525 break
1572 1526 n = p[0]
1573 1527 return b
1574 1528
1575 1529 def between(self, pairs):
1576 1530 r = []
1577 1531
1578 1532 for top, bottom in pairs:
1579 1533 n, l, i = top, [], 0
1580 1534 f = 1
1581 1535
1582 1536 while n != bottom and n != nullid:
1583 1537 p = self.changelog.parents(n)[0]
1584 1538 if i == f:
1585 1539 l.append(n)
1586 1540 f = f * 2
1587 1541 n = p
1588 1542 i += 1
1589 1543
1590 1544 r.append(l)
1591 1545
1592 1546 return r
1593 1547
1594 1548 def pull(self, remote, heads=None, force=False):
1595 1549 lock = self.lock()
1596 1550 try:
1597 1551 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1598 1552 force=force)
1599 1553 common, fetch, rheads = tmp
1600 1554 if not fetch:
1601 1555 self.ui.status(_("no changes found\n"))
1602 1556 added = []
1603 1557 result = 0
1604 1558 else:
1605 1559 if heads is None and list(common) == [nullid]:
1606 1560 self.ui.status(_("requesting all changes\n"))
1607 1561 elif heads is None and remote.capable('changegroupsubset'):
1608 1562 # issue1320, avoid a race if remote changed after discovery
1609 1563 heads = rheads
1610 1564
1611 1565 if remote.capable('getbundle'):
1612 1566 cg = remote.getbundle('pull', common=common,
1613 1567 heads=heads or rheads)
1614 1568 elif heads is None:
1615 1569 cg = remote.changegroup(fetch, 'pull')
1616 1570 elif not remote.capable('changegroupsubset'):
1617 1571 raise util.Abort(_("partial pull cannot be done because "
1618 1572 "other repository doesn't support "
1619 1573 "changegroupsubset."))
1620 1574 else:
1621 1575 cg = remote.changegroupsubset(fetch, heads, 'pull')
1622 1576 clstart = len(self.changelog)
1623 1577 result = self.addchangegroup(cg, 'pull', remote.url())
1624 1578 clend = len(self.changelog)
1625 1579 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1626 1580
1627 1581 # compute target subset
1628 1582 if heads is None:
1629 1583 # We pulled every thing possible
1630 1584 # sync on everything common
1631 1585 subset = common + added
1632 1586 else:
1633 1587 # We pulled a specific subset
1634 1588 # sync on this subset
1635 1589 subset = heads
1636 1590
1637 1591 # Get remote phases data from remote
1638 1592 remotephases = remote.listkeys('phases')
1639 1593 publishing = bool(remotephases.get('publishing', False))
1640 1594 if remotephases and not publishing:
1641 1595 # remote is new and unpublishing
1642 1596 pheads, _dr = phases.analyzeremotephases(self, subset,
1643 1597 remotephases)
1644 1598 phases.advanceboundary(self, phases.public, pheads)
1645 1599 phases.advanceboundary(self, phases.draft, subset)
1646 1600 else:
1647 1601 # Remote is old or publishing all common changesets
1648 1602 # should be seen as public
1649 1603 phases.advanceboundary(self, phases.public, subset)
1650 1604 finally:
1651 1605 lock.release()
1652 1606
1653 1607 return result
1654 1608
1655 1609 def checkpush(self, force, revs):
1656 1610 """Extensions can override this function if additional checks have
1657 1611 to be performed before pushing, or call it if they override push
1658 1612 command.
1659 1613 """
1660 1614 pass
1661 1615
1662 1616 def push(self, remote, force=False, revs=None, newbranch=False):
1663 1617 '''Push outgoing changesets (limited by revs) from the current
1664 1618 repository to remote. Return an integer:
1665 1619 - None means nothing to push
1666 1620 - 0 means HTTP error
1667 1621 - 1 means we pushed and remote head count is unchanged *or*
1668 1622 we have outgoing changesets but refused to push
1669 1623 - other values as described by addchangegroup()
1670 1624 '''
1671 1625 # there are two ways to push to remote repo:
1672 1626 #
1673 1627 # addchangegroup assumes local user can lock remote
1674 1628 # repo (local filesystem, old ssh servers).
1675 1629 #
1676 1630 # unbundle assumes local user cannot lock remote repo (new ssh
1677 1631 # servers, http servers).
1678 1632
1679 1633 # get local lock as we might write phase data
1680 1634 locallock = self.lock()
1681 1635 try:
1682 1636 self.checkpush(force, revs)
1683 1637 lock = None
1684 1638 unbundle = remote.capable('unbundle')
1685 1639 if not unbundle:
1686 1640 lock = remote.lock()
1687 1641 try:
1688 1642 # discovery
1689 1643 fci = discovery.findcommonincoming
1690 1644 commoninc = fci(self, remote, force=force)
1691 1645 common, inc, remoteheads = commoninc
1692 1646 fco = discovery.findcommonoutgoing
1693 1647 outgoing = fco(self, remote, onlyheads=revs,
1694 1648 commoninc=commoninc, force=force)
1695 1649
1696 1650
1697 1651 if not outgoing.missing:
1698 1652 # nothing to push
1699 1653 scmutil.nochangesfound(self.ui, outgoing.excluded)
1700 1654 ret = None
1701 1655 else:
1702 1656 # something to push
1703 1657 if not force:
1704 1658 discovery.checkheads(self, remote, outgoing,
1705 1659 remoteheads, newbranch,
1706 1660 bool(inc))
1707 1661
1708 1662 # create a changegroup from local
1709 1663 if revs is None and not outgoing.excluded:
1710 1664 # push everything,
1711 1665 # use the fast path, no race possible on push
1712 1666 cg = self._changegroup(outgoing.missing, 'push')
1713 1667 else:
1714 1668 cg = self.getlocalbundle('push', outgoing)
1715 1669
1716 1670 # apply changegroup to remote
1717 1671 if unbundle:
1718 1672 # local repo finds heads on server, finds out what
1719 1673 # revs it must push. once revs transferred, if server
1720 1674 # finds it has different heads (someone else won
1721 1675 # commit/push race), server aborts.
1722 1676 if force:
1723 1677 remoteheads = ['force']
1724 1678 # ssh: return remote's addchangegroup()
1725 1679 # http: return remote's addchangegroup() or 0 for error
1726 1680 ret = remote.unbundle(cg, remoteheads, 'push')
1727 1681 else:
1728 1682 # we return an integer indicating remote head count
1729 1683 # change
1730 1684 ret = remote.addchangegroup(cg, 'push', self.url())
1731 1685
1732 1686 if ret:
1733 1687 # push succeed, synchonize target of the push
1734 1688 cheads = outgoing.missingheads
1735 1689 elif revs is None:
1736 1690 # All out push fails. synchronize all common
1737 1691 cheads = outgoing.commonheads
1738 1692 else:
1739 1693 # I want cheads = heads(::missingheads and ::commonheads)
1740 1694 # (missingheads is revs with secret changeset filtered out)
1741 1695 #
1742 1696 # This can be expressed as:
1743 1697 # cheads = ( (missingheads and ::commonheads)
1744 1698 # + (commonheads and ::missingheads))"
1745 1699 # )
1746 1700 #
1747 1701 # while trying to push we already computed the following:
1748 1702 # common = (::commonheads)
1749 1703 # missing = ((commonheads::missingheads) - commonheads)
1750 1704 #
1751 1705 # We can pick:
1752 1706 # * missingheads part of comon (::commonheads)
1753 1707 common = set(outgoing.common)
1754 1708 cheads = [node for node in revs if node in common]
1755 1709 # and
1756 1710 # * commonheads parents on missing
1757 1711 revset = self.set('%ln and parents(roots(%ln))',
1758 1712 outgoing.commonheads,
1759 1713 outgoing.missing)
1760 1714 cheads.extend(c.node() for c in revset)
1761 1715 # even when we don't push, exchanging phase data is useful
1762 1716 remotephases = remote.listkeys('phases')
1763 1717 if not remotephases: # old server or public only repo
1764 1718 phases.advanceboundary(self, phases.public, cheads)
1765 1719 # don't push any phase data as there is nothing to push
1766 1720 else:
1767 1721 ana = phases.analyzeremotephases(self, cheads, remotephases)
1768 1722 pheads, droots = ana
1769 1723 ### Apply remote phase on local
1770 1724 if remotephases.get('publishing', False):
1771 1725 phases.advanceboundary(self, phases.public, cheads)
1772 1726 else: # publish = False
1773 1727 phases.advanceboundary(self, phases.public, pheads)
1774 1728 phases.advanceboundary(self, phases.draft, cheads)
1775 1729 ### Apply local phase on remote
1776 1730
1777 1731 # Get the list of all revs draft on remote by public here.
1778 1732 # XXX Beware that revset break if droots is not strictly
1779 1733 # XXX root we may want to ensure it is but it is costly
1780 1734 outdated = self.set('heads((%ln::%ln) and public())',
1781 1735 droots, cheads)
1782 1736 for newremotehead in outdated:
1783 1737 r = remote.pushkey('phases',
1784 1738 newremotehead.hex(),
1785 1739 str(phases.draft),
1786 1740 str(phases.public))
1787 1741 if not r:
1788 1742 self.ui.warn(_('updating %s to public failed!\n')
1789 1743 % newremotehead)
1790 1744 finally:
1791 1745 if lock is not None:
1792 1746 lock.release()
1793 1747 finally:
1794 1748 locallock.release()
1795 1749
1796 1750 self.ui.debug("checking for updated bookmarks\n")
1797 1751 rb = remote.listkeys('bookmarks')
1798 1752 for k in rb.keys():
1799 1753 if k in self._bookmarks:
1800 1754 nr, nl = rb[k], hex(self._bookmarks[k])
1801 1755 if nr in self:
1802 1756 cr = self[nr]
1803 1757 cl = self[nl]
1804 1758 if cl in cr.descendants():
1805 1759 r = remote.pushkey('bookmarks', k, nr, nl)
1806 1760 if r:
1807 1761 self.ui.status(_("updating bookmark %s\n") % k)
1808 1762 else:
1809 1763 self.ui.warn(_('updating bookmark %s'
1810 1764 ' failed!\n') % k)
1811 1765
1812 1766 return ret
1813 1767
1814 1768 def changegroupinfo(self, nodes, source):
1815 1769 if self.ui.verbose or source == 'bundle':
1816 1770 self.ui.status(_("%d changesets found\n") % len(nodes))
1817 1771 if self.ui.debugflag:
1818 1772 self.ui.debug("list of changesets:\n")
1819 1773 for node in nodes:
1820 1774 self.ui.debug("%s\n" % hex(node))
1821 1775
1822 1776 def changegroupsubset(self, bases, heads, source):
1823 1777 """Compute a changegroup consisting of all the nodes that are
1824 1778 descendants of any of the bases and ancestors of any of the heads.
1825 1779 Return a chunkbuffer object whose read() method will return
1826 1780 successive changegroup chunks.
1827 1781
1828 1782 It is fairly complex as determining which filenodes and which
1829 1783 manifest nodes need to be included for the changeset to be complete
1830 1784 is non-trivial.
1831 1785
1832 1786 Another wrinkle is doing the reverse, figuring out which changeset in
1833 1787 the changegroup a particular filenode or manifestnode belongs to.
1834 1788 """
1835 1789 cl = self.changelog
1836 1790 if not bases:
1837 1791 bases = [nullid]
1838 1792 csets, bases, heads = cl.nodesbetween(bases, heads)
1839 1793 # We assume that all ancestors of bases are known
1840 1794 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1841 1795 return self._changegroupsubset(common, csets, heads, source)
1842 1796
1843 1797 def getlocalbundle(self, source, outgoing):
1844 1798 """Like getbundle, but taking a discovery.outgoing as an argument.
1845 1799
1846 1800 This is only implemented for local repos and reuses potentially
1847 1801 precomputed sets in outgoing."""
1848 1802 if not outgoing.missing:
1849 1803 return None
1850 1804 return self._changegroupsubset(outgoing.common,
1851 1805 outgoing.missing,
1852 1806 outgoing.missingheads,
1853 1807 source)
1854 1808
1855 1809 def getbundle(self, source, heads=None, common=None):
1856 1810 """Like changegroupsubset, but returns the set difference between the
1857 1811 ancestors of heads and the ancestors common.
1858 1812
1859 1813 If heads is None, use the local heads. If common is None, use [nullid].
1860 1814
1861 1815 The nodes in common might not all be known locally due to the way the
1862 1816 current discovery protocol works.
1863 1817 """
1864 1818 cl = self.changelog
1865 1819 if common:
1866 1820 nm = cl.nodemap
1867 1821 common = [n for n in common if n in nm]
1868 1822 else:
1869 1823 common = [nullid]
1870 1824 if not heads:
1871 1825 heads = cl.heads()
1872 1826 return self.getlocalbundle(source,
1873 1827 discovery.outgoing(cl, common, heads))
1874 1828
1875 1829 def _changegroupsubset(self, commonrevs, csets, heads, source):
1876 1830
1877 1831 cl = self.changelog
1878 1832 mf = self.manifest
1879 1833 mfs = {} # needed manifests
1880 1834 fnodes = {} # needed file nodes
1881 1835 changedfiles = set()
1882 1836 fstate = ['', {}]
1883 1837 count = [0, 0]
1884 1838
1885 1839 # can we go through the fast path ?
1886 1840 heads.sort()
1887 1841 if heads == sorted(self.heads()):
1888 1842 return self._changegroup(csets, source)
1889 1843
1890 1844 # slow path
1891 1845 self.hook('preoutgoing', throw=True, source=source)
1892 1846 self.changegroupinfo(csets, source)
1893 1847
1894 1848 # filter any nodes that claim to be part of the known set
1895 1849 def prune(revlog, missing):
1896 1850 rr, rl = revlog.rev, revlog.linkrev
1897 1851 return [n for n in missing
1898 1852 if rl(rr(n)) not in commonrevs]
1899 1853
1900 1854 progress = self.ui.progress
1901 1855 _bundling = _('bundling')
1902 1856 _changesets = _('changesets')
1903 1857 _manifests = _('manifests')
1904 1858 _files = _('files')
1905 1859
1906 1860 def lookup(revlog, x):
1907 1861 if revlog == cl:
1908 1862 c = cl.read(x)
1909 1863 changedfiles.update(c[3])
1910 1864 mfs.setdefault(c[0], x)
1911 1865 count[0] += 1
1912 1866 progress(_bundling, count[0],
1913 1867 unit=_changesets, total=count[1])
1914 1868 return x
1915 1869 elif revlog == mf:
1916 1870 clnode = mfs[x]
1917 1871 mdata = mf.readfast(x)
1918 1872 for f, n in mdata.iteritems():
1919 1873 if f in changedfiles:
1920 1874 fnodes[f].setdefault(n, clnode)
1921 1875 count[0] += 1
1922 1876 progress(_bundling, count[0],
1923 1877 unit=_manifests, total=count[1])
1924 1878 return clnode
1925 1879 else:
1926 1880 progress(_bundling, count[0], item=fstate[0],
1927 1881 unit=_files, total=count[1])
1928 1882 return fstate[1][x]
1929 1883
1930 1884 bundler = changegroup.bundle10(lookup)
1931 1885 reorder = self.ui.config('bundle', 'reorder', 'auto')
1932 1886 if reorder == 'auto':
1933 1887 reorder = None
1934 1888 else:
1935 1889 reorder = util.parsebool(reorder)
1936 1890
1937 1891 def gengroup():
1938 1892 # Create a changenode group generator that will call our functions
1939 1893 # back to lookup the owning changenode and collect information.
1940 1894 count[:] = [0, len(csets)]
1941 1895 for chunk in cl.group(csets, bundler, reorder=reorder):
1942 1896 yield chunk
1943 1897 progress(_bundling, None)
1944 1898
1945 1899 # Create a generator for the manifestnodes that calls our lookup
1946 1900 # and data collection functions back.
1947 1901 for f in changedfiles:
1948 1902 fnodes[f] = {}
1949 1903 count[:] = [0, len(mfs)]
1950 1904 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1951 1905 yield chunk
1952 1906 progress(_bundling, None)
1953 1907
1954 1908 mfs.clear()
1955 1909
1956 1910 # Go through all our files in order sorted by name.
1957 1911 count[:] = [0, len(changedfiles)]
1958 1912 for fname in sorted(changedfiles):
1959 1913 filerevlog = self.file(fname)
1960 1914 if not len(filerevlog):
1961 1915 raise util.Abort(_("empty or missing revlog for %s")
1962 1916 % fname)
1963 1917 fstate[0] = fname
1964 1918 fstate[1] = fnodes.pop(fname, {})
1965 1919
1966 1920 nodelist = prune(filerevlog, fstate[1])
1967 1921 if nodelist:
1968 1922 count[0] += 1
1969 1923 yield bundler.fileheader(fname)
1970 1924 for chunk in filerevlog.group(nodelist, bundler, reorder):
1971 1925 yield chunk
1972 1926
1973 1927 # Signal that no more groups are left.
1974 1928 yield bundler.close()
1975 1929 progress(_bundling, None)
1976 1930
1977 1931 if csets:
1978 1932 self.hook('outgoing', node=hex(csets[0]), source=source)
1979 1933
1980 1934 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1981 1935
1982 1936 def changegroup(self, basenodes, source):
1983 1937 # to avoid a race we use changegroupsubset() (issue1320)
1984 1938 return self.changegroupsubset(basenodes, self.heads(), source)
1985 1939
1986 1940 def _changegroup(self, nodes, source):
1987 1941 """Compute the changegroup of all nodes that we have that a recipient
1988 1942 doesn't. Return a chunkbuffer object whose read() method will return
1989 1943 successive changegroup chunks.
1990 1944
1991 1945 This is much easier than the previous function as we can assume that
1992 1946 the recipient has any changenode we aren't sending them.
1993 1947
1994 1948 nodes is the set of nodes to send"""
1995 1949
1996 1950 cl = self.changelog
1997 1951 mf = self.manifest
1998 1952 mfs = {}
1999 1953 changedfiles = set()
2000 1954 fstate = ['']
2001 1955 count = [0, 0]
2002 1956
2003 1957 self.hook('preoutgoing', throw=True, source=source)
2004 1958 self.changegroupinfo(nodes, source)
2005 1959
2006 1960 revset = set([cl.rev(n) for n in nodes])
2007 1961
2008 1962 def gennodelst(log):
2009 1963 ln, llr = log.node, log.linkrev
2010 1964 return [ln(r) for r in log if llr(r) in revset]
2011 1965
2012 1966 progress = self.ui.progress
2013 1967 _bundling = _('bundling')
2014 1968 _changesets = _('changesets')
2015 1969 _manifests = _('manifests')
2016 1970 _files = _('files')
2017 1971
2018 1972 def lookup(revlog, x):
2019 1973 if revlog == cl:
2020 1974 c = cl.read(x)
2021 1975 changedfiles.update(c[3])
2022 1976 mfs.setdefault(c[0], x)
2023 1977 count[0] += 1
2024 1978 progress(_bundling, count[0],
2025 1979 unit=_changesets, total=count[1])
2026 1980 return x
2027 1981 elif revlog == mf:
2028 1982 count[0] += 1
2029 1983 progress(_bundling, count[0],
2030 1984 unit=_manifests, total=count[1])
2031 1985 return cl.node(revlog.linkrev(revlog.rev(x)))
2032 1986 else:
2033 1987 progress(_bundling, count[0], item=fstate[0],
2034 1988 total=count[1], unit=_files)
2035 1989 return cl.node(revlog.linkrev(revlog.rev(x)))
2036 1990
2037 1991 bundler = changegroup.bundle10(lookup)
2038 1992 reorder = self.ui.config('bundle', 'reorder', 'auto')
2039 1993 if reorder == 'auto':
2040 1994 reorder = None
2041 1995 else:
2042 1996 reorder = util.parsebool(reorder)
2043 1997
2044 1998 def gengroup():
2045 1999 '''yield a sequence of changegroup chunks (strings)'''
2046 2000 # construct a list of all changed files
2047 2001
2048 2002 count[:] = [0, len(nodes)]
2049 2003 for chunk in cl.group(nodes, bundler, reorder=reorder):
2050 2004 yield chunk
2051 2005 progress(_bundling, None)
2052 2006
2053 2007 count[:] = [0, len(mfs)]
2054 2008 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2055 2009 yield chunk
2056 2010 progress(_bundling, None)
2057 2011
2058 2012 count[:] = [0, len(changedfiles)]
2059 2013 for fname in sorted(changedfiles):
2060 2014 filerevlog = self.file(fname)
2061 2015 if not len(filerevlog):
2062 2016 raise util.Abort(_("empty or missing revlog for %s")
2063 2017 % fname)
2064 2018 fstate[0] = fname
2065 2019 nodelist = gennodelst(filerevlog)
2066 2020 if nodelist:
2067 2021 count[0] += 1
2068 2022 yield bundler.fileheader(fname)
2069 2023 for chunk in filerevlog.group(nodelist, bundler, reorder):
2070 2024 yield chunk
2071 2025 yield bundler.close()
2072 2026 progress(_bundling, None)
2073 2027
2074 2028 if nodes:
2075 2029 self.hook('outgoing', node=hex(nodes[0]), source=source)
2076 2030
2077 2031 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2078 2032
2079 2033 def addchangegroup(self, source, srctype, url, emptyok=False):
2080 2034 """Add the changegroup returned by source.read() to this repo.
2081 2035 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2082 2036 the URL of the repo where this changegroup is coming from.
2083 2037
2084 2038 Return an integer summarizing the change to this repo:
2085 2039 - nothing changed or no source: 0
2086 2040 - more heads than before: 1+added heads (2..n)
2087 2041 - fewer heads than before: -1-removed heads (-2..-n)
2088 2042 - number of heads stays the same: 1
2089 2043 """
2090 2044 def csmap(x):
2091 2045 self.ui.debug("add changeset %s\n" % short(x))
2092 2046 return len(cl)
2093 2047
2094 2048 def revmap(x):
2095 2049 return cl.rev(x)
2096 2050
2097 2051 if not source:
2098 2052 return 0
2099 2053
2100 2054 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2101 2055
2102 2056 changesets = files = revisions = 0
2103 2057 efiles = set()
2104 2058
2105 2059 # write changelog data to temp files so concurrent readers will not see
2106 2060 # inconsistent view
2107 2061 cl = self.changelog
2108 2062 cl.delayupdate()
2109 2063 oldheads = cl.heads()
2110 2064
2111 2065 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2112 2066 try:
2113 2067 trp = weakref.proxy(tr)
2114 2068 # pull off the changeset group
2115 2069 self.ui.status(_("adding changesets\n"))
2116 2070 clstart = len(cl)
2117 2071 class prog(object):
2118 2072 step = _('changesets')
2119 2073 count = 1
2120 2074 ui = self.ui
2121 2075 total = None
2122 2076 def __call__(self):
2123 2077 self.ui.progress(self.step, self.count, unit=_('chunks'),
2124 2078 total=self.total)
2125 2079 self.count += 1
2126 2080 pr = prog()
2127 2081 source.callback = pr
2128 2082
2129 2083 source.changelogheader()
2130 2084 srccontent = cl.addgroup(source, csmap, trp)
2131 2085 if not (srccontent or emptyok):
2132 2086 raise util.Abort(_("received changelog group is empty"))
2133 2087 clend = len(cl)
2134 2088 changesets = clend - clstart
2135 2089 for c in xrange(clstart, clend):
2136 2090 efiles.update(self[c].files())
2137 2091 efiles = len(efiles)
2138 2092 self.ui.progress(_('changesets'), None)
2139 2093
2140 2094 # pull off the manifest group
2141 2095 self.ui.status(_("adding manifests\n"))
2142 2096 pr.step = _('manifests')
2143 2097 pr.count = 1
2144 2098 pr.total = changesets # manifests <= changesets
2145 2099 # no need to check for empty manifest group here:
2146 2100 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2147 2101 # no new manifest will be created and the manifest group will
2148 2102 # be empty during the pull
2149 2103 source.manifestheader()
2150 2104 self.manifest.addgroup(source, revmap, trp)
2151 2105 self.ui.progress(_('manifests'), None)
2152 2106
2153 2107 needfiles = {}
2154 2108 if self.ui.configbool('server', 'validate', default=False):
2155 2109 # validate incoming csets have their manifests
2156 2110 for cset in xrange(clstart, clend):
2157 2111 mfest = self.changelog.read(self.changelog.node(cset))[0]
2158 2112 mfest = self.manifest.readdelta(mfest)
2159 2113 # store file nodes we must see
2160 2114 for f, n in mfest.iteritems():
2161 2115 needfiles.setdefault(f, set()).add(n)
2162 2116
2163 2117 # process the files
2164 2118 self.ui.status(_("adding file changes\n"))
2165 2119 pr.step = _('files')
2166 2120 pr.count = 1
2167 2121 pr.total = efiles
2168 2122 source.callback = None
2169 2123
2170 2124 while True:
2171 2125 chunkdata = source.filelogheader()
2172 2126 if not chunkdata:
2173 2127 break
2174 2128 f = chunkdata["filename"]
2175 2129 self.ui.debug("adding %s revisions\n" % f)
2176 2130 pr()
2177 2131 fl = self.file(f)
2178 2132 o = len(fl)
2179 2133 if not fl.addgroup(source, revmap, trp):
2180 2134 raise util.Abort(_("received file revlog group is empty"))
2181 2135 revisions += len(fl) - o
2182 2136 files += 1
2183 2137 if f in needfiles:
2184 2138 needs = needfiles[f]
2185 2139 for new in xrange(o, len(fl)):
2186 2140 n = fl.node(new)
2187 2141 if n in needs:
2188 2142 needs.remove(n)
2189 2143 if not needs:
2190 2144 del needfiles[f]
2191 2145 self.ui.progress(_('files'), None)
2192 2146
2193 2147 for f, needs in needfiles.iteritems():
2194 2148 fl = self.file(f)
2195 2149 for n in needs:
2196 2150 try:
2197 2151 fl.rev(n)
2198 2152 except error.LookupError:
2199 2153 raise util.Abort(
2200 2154 _('missing file data for %s:%s - run hg verify') %
2201 2155 (f, hex(n)))
2202 2156
2203 2157 dh = 0
2204 2158 if oldheads:
2205 2159 heads = cl.heads()
2206 2160 dh = len(heads) - len(oldheads)
2207 2161 for h in heads:
2208 2162 if h not in oldheads and self[h].closesbranch():
2209 2163 dh -= 1
2210 2164 htext = ""
2211 2165 if dh:
2212 2166 htext = _(" (%+d heads)") % dh
2213 2167
2214 2168 self.ui.status(_("added %d changesets"
2215 2169 " with %d changes to %d files%s\n")
2216 2170 % (changesets, revisions, files, htext))
2217 2171
2218 2172 if changesets > 0:
2219 2173 p = lambda: cl.writepending() and self.root or ""
2220 2174 self.hook('pretxnchangegroup', throw=True,
2221 2175 node=hex(cl.node(clstart)), source=srctype,
2222 2176 url=url, pending=p)
2223 2177
2224 2178 added = [cl.node(r) for r in xrange(clstart, clend)]
2225 2179 publishing = self.ui.configbool('phases', 'publish', True)
2226 2180 if srctype == 'push':
2227 2181 # Old server can not push the boundary themself.
2228 2182 # New server won't push the boundary if changeset already
2229 2183 # existed locally as secrete
2230 2184 #
2231 2185 # We should not use added here but the list of all change in
2232 2186 # the bundle
2233 2187 if publishing:
2234 2188 phases.advanceboundary(self, phases.public, srccontent)
2235 2189 else:
2236 2190 phases.advanceboundary(self, phases.draft, srccontent)
2237 2191 phases.retractboundary(self, phases.draft, added)
2238 2192 elif srctype != 'strip':
2239 2193 # publishing only alter behavior during push
2240 2194 #
2241 2195 # strip should not touch boundary at all
2242 2196 phases.retractboundary(self, phases.draft, added)
2243 2197
2244 2198 # make changelog see real files again
2245 2199 cl.finalize(trp)
2246 2200
2247 2201 tr.close()
2248 2202
2249 2203 if changesets > 0:
2250 2204 def runhooks():
2251 2205 # forcefully update the on-disk branch cache
2252 2206 self.ui.debug("updating the branch cache\n")
2253 2207 self.updatebranchcache()
2254 2208 self.hook("changegroup", node=hex(cl.node(clstart)),
2255 2209 source=srctype, url=url)
2256 2210
2257 2211 for n in added:
2258 2212 self.hook("incoming", node=hex(n), source=srctype,
2259 2213 url=url)
2260 2214 self._afterlock(runhooks)
2261 2215
2262 2216 finally:
2263 2217 tr.release()
2264 2218 # never return 0 here:
2265 2219 if dh < 0:
2266 2220 return dh - 1
2267 2221 else:
2268 2222 return dh + 1
2269 2223
2270 2224 def stream_in(self, remote, requirements):
2271 2225 lock = self.lock()
2272 2226 try:
2273 2227 fp = remote.stream_out()
2274 2228 l = fp.readline()
2275 2229 try:
2276 2230 resp = int(l)
2277 2231 except ValueError:
2278 2232 raise error.ResponseError(
2279 2233 _('Unexpected response from remote server:'), l)
2280 2234 if resp == 1:
2281 2235 raise util.Abort(_('operation forbidden by server'))
2282 2236 elif resp == 2:
2283 2237 raise util.Abort(_('locking the remote repository failed'))
2284 2238 elif resp != 0:
2285 2239 raise util.Abort(_('the server sent an unknown error code'))
2286 2240 self.ui.status(_('streaming all changes\n'))
2287 2241 l = fp.readline()
2288 2242 try:
2289 2243 total_files, total_bytes = map(int, l.split(' ', 1))
2290 2244 except (ValueError, TypeError):
2291 2245 raise error.ResponseError(
2292 2246 _('Unexpected response from remote server:'), l)
2293 2247 self.ui.status(_('%d files to transfer, %s of data\n') %
2294 2248 (total_files, util.bytecount(total_bytes)))
2295 2249 start = time.time()
2296 2250 for i in xrange(total_files):
2297 2251 # XXX doesn't support '\n' or '\r' in filenames
2298 2252 l = fp.readline()
2299 2253 try:
2300 2254 name, size = l.split('\0', 1)
2301 2255 size = int(size)
2302 2256 except (ValueError, TypeError):
2303 2257 raise error.ResponseError(
2304 2258 _('Unexpected response from remote server:'), l)
2305 2259 if self.ui.debugflag:
2306 2260 self.ui.debug('adding %s (%s)\n' %
2307 2261 (name, util.bytecount(size)))
2308 2262 # for backwards compat, name was partially encoded
2309 2263 ofp = self.sopener(store.decodedir(name), 'w')
2310 2264 for chunk in util.filechunkiter(fp, limit=size):
2311 2265 ofp.write(chunk)
2312 2266 ofp.close()
2313 2267 elapsed = time.time() - start
2314 2268 if elapsed <= 0:
2315 2269 elapsed = 0.001
2316 2270 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2317 2271 (util.bytecount(total_bytes), elapsed,
2318 2272 util.bytecount(total_bytes / elapsed)))
2319 2273
2320 2274 # new requirements = old non-format requirements +
2321 2275 # new format-related
2322 2276 # requirements from the streamed-in repository
2323 2277 requirements.update(set(self.requirements) - self.supportedformats)
2324 2278 self._applyrequirements(requirements)
2325 2279 self._writerequirements()
2326 2280
2327 2281 self.invalidate()
2328 2282 return len(self.heads()) + 1
2329 2283 finally:
2330 2284 lock.release()
2331 2285
2332 2286 def clone(self, remote, heads=[], stream=False):
2333 2287 '''clone remote repository.
2334 2288
2335 2289 keyword arguments:
2336 2290 heads: list of revs to clone (forces use of pull)
2337 2291 stream: use streaming clone if possible'''
2338 2292
2339 2293 # now, all clients that can request uncompressed clones can
2340 2294 # read repo formats supported by all servers that can serve
2341 2295 # them.
2342 2296
2343 2297 # if revlog format changes, client will have to check version
2344 2298 # and format flags on "stream" capability, and use
2345 2299 # uncompressed only if compatible.
2346 2300
2347 2301 if not stream:
2348 2302 # if the server explicitely prefer to stream (for fast LANs)
2349 2303 stream = remote.capable('stream-preferred')
2350 2304
2351 2305 if stream and not heads:
2352 2306 # 'stream' means remote revlog format is revlogv1 only
2353 2307 if remote.capable('stream'):
2354 2308 return self.stream_in(remote, set(('revlogv1',)))
2355 2309 # otherwise, 'streamreqs' contains the remote revlog format
2356 2310 streamreqs = remote.capable('streamreqs')
2357 2311 if streamreqs:
2358 2312 streamreqs = set(streamreqs.split(','))
2359 2313 # if we support it, stream in and adjust our requirements
2360 2314 if not streamreqs - self.supportedformats:
2361 2315 return self.stream_in(remote, streamreqs)
2362 2316 return self.pull(remote, heads)
2363 2317
2364 2318 def pushkey(self, namespace, key, old, new):
2365 2319 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2366 2320 old=old, new=new)
2367 2321 ret = pushkey.push(self, namespace, key, old, new)
2368 2322 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2369 2323 ret=ret)
2370 2324 return ret
2371 2325
2372 2326 def listkeys(self, namespace):
2373 2327 self.hook('prelistkeys', throw=True, namespace=namespace)
2374 2328 values = pushkey.list(self, namespace)
2375 2329 self.hook('listkeys', namespace=namespace, values=values)
2376 2330 return values
2377 2331
2378 2332 def debugwireargs(self, one, two, three=None, four=None, five=None):
2379 2333 '''used to test argument passing over the wire'''
2380 2334 return "%s %s %s %s %s" % (one, two, three, four, five)
2381 2335
2382 2336 def savecommitmessage(self, text):
2383 2337 fp = self.opener('last-message.txt', 'wb')
2384 2338 try:
2385 2339 fp.write(text)
2386 2340 finally:
2387 2341 fp.close()
2388 2342 return self.pathto(fp.name[len(self.root)+1:])
2389 2343
2390 2344 # used to avoid circular references so destructors work
2391 2345 def aftertrans(files):
2392 2346 renamefiles = [tuple(t) for t in files]
2393 2347 def a():
2394 2348 for src, dest in renamefiles:
2395 2349 try:
2396 2350 util.rename(src, dest)
2397 2351 except OSError: # journal file does not yet exist
2398 2352 pass
2399 2353 return a
2400 2354
2401 2355 def undoname(fn):
2402 2356 base, name = os.path.split(fn)
2403 2357 assert name.startswith('journal')
2404 2358 return os.path.join(base, name.replace('journal', 'undo', 1))
2405 2359
2406 2360 def instance(ui, path, create):
2407 2361 return localrepository(ui, util.urllocalpath(path), create)
2408 2362
2409 2363 def islocal(path):
2410 2364 return True
@@ -1,185 +1,172 b''
1 1 # repair.py - functions for repository repair for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 # Copyright 2007 Matt Mackall
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from mercurial import changegroup, bookmarks
10 10 from mercurial.node import short
11 11 from mercurial.i18n import _
12 12 import os
13 13 import errno
14 14
15 15 def _bundle(repo, bases, heads, node, suffix, compress=True):
16 16 """create a bundle with the specified revisions as a backup"""
17 17 cg = repo.changegroupsubset(bases, heads, 'strip')
18 18 backupdir = repo.join("strip-backup")
19 19 if not os.path.isdir(backupdir):
20 20 os.mkdir(backupdir)
21 21 name = os.path.join(backupdir, "%s-%s.hg" % (short(node), suffix))
22 22 if compress:
23 23 bundletype = "HG10BZ"
24 24 else:
25 25 bundletype = "HG10UN"
26 26 return changegroup.writebundle(cg, name, bundletype)
27 27
28 28 def _collectfiles(repo, striprev):
29 29 """find out the filelogs affected by the strip"""
30 30 files = set()
31 31
32 32 for x in xrange(striprev, len(repo)):
33 33 files.update(repo[x].files())
34 34
35 35 return sorted(files)
36 36
37 37 def _collectbrokencsets(repo, files, striprev):
38 38 """return the changesets which will be broken by the truncation"""
39 39 s = set()
40 40 def collectone(revlog):
41 41 linkgen = (revlog.linkrev(i) for i in revlog)
42 42 # find the truncation point of the revlog
43 43 for lrev in linkgen:
44 44 if lrev >= striprev:
45 45 break
46 46 # see if any revision after this point has a linkrev
47 47 # less than striprev (those will be broken by strip)
48 48 for lrev in linkgen:
49 49 if lrev < striprev:
50 50 s.add(lrev)
51 51
52 52 collectone(repo.manifest)
53 53 for fname in files:
54 54 collectone(repo.file(fname))
55 55
56 56 return s
57 57
58 58 def strip(ui, repo, nodelist, backup="all", topic='backup'):
59 # It simplifies the logic around updating the branchheads cache if we only
60 # have to consider the effect of the stripped revisions and not revisions
61 # missing because the cache is out-of-date.
62 repo.updatebranchcache()
63
64 59 cl = repo.changelog
65 60 # TODO handle undo of merge sets
66 61 if isinstance(nodelist, str):
67 62 nodelist = [nodelist]
68 63 striplist = [cl.rev(node) for node in nodelist]
69 64 striprev = min(striplist)
70 65
71 # Set of potential new heads resulting from the strip. The parents of any
72 # node removed could be a new head because the node to be removed could have
73 # been the only child of the parent.
74 # Do a list->set->list conversion to remove duplicates.
75 stringstriplist = [str(rev) for rev in striplist]
76 newheadrevs = set(repo.revs("parents(%lr::) - %lr::", stringstriplist,
77 stringstriplist))
78
79 66 keeppartialbundle = backup == 'strip'
80 67
81 68 # Some revisions with rev > striprev may not be descendants of striprev.
82 69 # We have to find these revisions and put them in a bundle, so that
83 70 # we can restore them after the truncations.
84 71 # To create the bundle we use repo.changegroupsubset which requires
85 72 # the list of heads and bases of the set of interesting revisions.
86 73 # (head = revision in the set that has no descendant in the set;
87 74 # base = revision in the set that has no ancestor in the set)
88 75 tostrip = set(striplist)
89 76 for rev in striplist:
90 77 for desc in cl.descendants(rev):
91 78 tostrip.add(desc)
92 79
93 80 files = _collectfiles(repo, striprev)
94 81 saverevs = _collectbrokencsets(repo, files, striprev)
95 82
96 83 # compute heads
97 84 saveheads = set(saverevs)
98 85 for r in xrange(striprev + 1, len(cl)):
99 86 if r not in tostrip:
100 87 saverevs.add(r)
101 88 saveheads.difference_update(cl.parentrevs(r))
102 89 saveheads.add(r)
103 90 saveheads = [cl.node(r) for r in saveheads]
104 91
105 92 # compute base nodes
106 93 if saverevs:
107 94 descendants = set(cl.descendants(*saverevs))
108 95 saverevs.difference_update(descendants)
109 96 savebases = [cl.node(r) for r in saverevs]
110 97 stripbases = [cl.node(r) for r in tostrip]
111 98
112 99 bm = repo._bookmarks
113 100 updatebm = []
114 101 for m in bm:
115 102 rev = repo[bm[m]].rev()
116 103 if rev in tostrip:
117 104 updatebm.append(m)
118 105
119 106 # create a changegroup for all the branches we need to keep
120 107 backupfile = None
121 108 if backup == "all":
122 109 backupfile = _bundle(repo, stripbases, cl.heads(), node, topic)
123 110 repo.ui.status(_("saved backup bundle to %s\n") % backupfile)
124 111 if saveheads or savebases:
125 112 # do not compress partial bundle if we remove it from disk later
126 113 chgrpfile = _bundle(repo, savebases, saveheads, node, 'temp',
127 114 compress=keeppartialbundle)
128 115
129 116 mfst = repo.manifest
130 117
131 118 tr = repo.transaction("strip")
132 119 offset = len(tr.entries)
133 120
134 121 try:
135 122 tr.startgroup()
136 123 cl.strip(striprev, tr)
137 124 mfst.strip(striprev, tr)
138 125 for fn in files:
139 126 repo.file(fn).strip(striprev, tr)
140 127 tr.endgroup()
141 128
142 129 try:
143 130 for i in xrange(offset, len(tr.entries)):
144 131 file, troffset, ignore = tr.entries[i]
145 132 repo.sopener(file, 'a').truncate(troffset)
146 133 tr.close()
147 134 except: # re-raises
148 135 tr.abort()
149 136 raise
150 137
151 138 if saveheads or savebases:
152 139 ui.note(_("adding branch\n"))
153 140 f = open(chgrpfile, "rb")
154 141 gen = changegroup.readbundle(f, chgrpfile)
155 142 if not repo.ui.verbose:
156 143 # silence internal shuffling chatter
157 144 repo.ui.pushbuffer()
158 145 repo.addchangegroup(gen, 'strip', 'bundle:' + chgrpfile, True)
159 146 if not repo.ui.verbose:
160 147 repo.ui.popbuffer()
161 148 f.close()
162 149 if not keeppartialbundle:
163 150 os.unlink(chgrpfile)
164 151
165 152 # remove undo files
166 153 for undofile in repo.undofiles():
167 154 try:
168 155 os.unlink(undofile)
169 156 except OSError, e:
170 157 if e.errno != errno.ENOENT:
171 158 ui.warn(_('error removing %s: %s\n') % (undofile, str(e)))
172 159
173 160 for m in updatebm:
174 161 bm[m] = repo['.'].node()
175 162 bookmarks.write(repo)
176 163 except: # re-raises
177 164 if backupfile:
178 165 ui.warn(_("strip failed, full bundle stored in '%s'\n")
179 166 % backupfile)
180 167 elif saveheads:
181 168 ui.warn(_("strip failed, partial bundle stored in '%s'\n")
182 169 % chgrpfile)
183 170 raise
184 171
185 repo.destroyed(newheadrevs)
172 repo.destroyed()
@@ -1,353 +1,352 b''
1 1 $ "$TESTDIR/hghave" execbit || exit 80
2 2
3 3 $ hg init
4 4
5 5 Setup:
6 6
7 7 $ echo a >> a
8 8 $ hg ci -Am 'base'
9 9 adding a
10 10
11 11 Refuse to amend public csets:
12 12
13 13 $ hg phase -r . -p
14 14 $ hg ci --amend
15 15 abort: cannot amend public changesets
16 16 [255]
17 17 $ hg phase -r . -f -d
18 18
19 19 $ echo a >> a
20 20 $ hg ci -Am 'base1'
21 21
22 22 Nothing to amend:
23 23
24 24 $ hg ci --amend
25 25 nothing changed
26 26 [1]
27 27
28 28 Amending changeset with changes in working dir:
29 29
30 30 $ echo a >> a
31 31 $ hg ci --amend -m 'amend base1'
32 32 saved backup bundle to $TESTTMP/.hg/strip-backup/489edb5b847d-amend-backup.hg
33 33 $ hg diff -c .
34 34 diff -r ad120869acf0 -r 9cd25b479c51 a
35 35 --- a/a Thu Jan 01 00:00:00 1970 +0000
36 36 +++ b/a Thu Jan 01 00:00:00 1970 +0000
37 37 @@ -1,1 +1,3 @@
38 38 a
39 39 +a
40 40 +a
41 41 $ hg log
42 42 changeset: 1:9cd25b479c51
43 43 tag: tip
44 44 user: test
45 45 date: Thu Jan 01 00:00:00 1970 +0000
46 46 summary: amend base1
47 47
48 48 changeset: 0:ad120869acf0
49 49 user: test
50 50 date: Thu Jan 01 00:00:00 1970 +0000
51 51 summary: base
52 52
53 53
54 54 Add new file:
55 55
56 56 $ echo b > b
57 57 $ hg ci --amend -Am 'amend base1 new file'
58 58 adding b
59 59 saved backup bundle to $TESTTMP/.hg/strip-backup/9cd25b479c51-amend-backup.hg
60 60
61 61 Remove file that was added in amended commit:
62 62
63 63 $ hg rm b
64 64 $ hg ci --amend -m 'amend base1 remove new file'
65 65 saved backup bundle to $TESTTMP/.hg/strip-backup/e2bb3ecffd2f-amend-backup.hg
66 66
67 67 $ hg cat b
68 68 b: no such file in rev 664a9b2d60cd
69 69 [1]
70 70
71 71 No changes, just a different message:
72 72
73 73 $ hg ci -v --amend -m 'no changes, new message'
74 74 amending changeset 664a9b2d60cd
75 75 copying changeset 664a9b2d60cd to ad120869acf0
76 76 a
77 77 stripping amended changeset 664a9b2d60cd
78 78 1 changesets found
79 79 saved backup bundle to $TESTTMP/.hg/strip-backup/664a9b2d60cd-amend-backup.hg
80 80 1 changesets found
81 81 adding branch
82 82 adding changesets
83 83 adding manifests
84 84 adding file changes
85 85 added 1 changesets with 1 changes to 1 files
86 86 committed changeset 1:ea6e356ff2ad
87 87 $ hg diff -c .
88 88 diff -r ad120869acf0 -r ea6e356ff2ad a
89 89 --- a/a Thu Jan 01 00:00:00 1970 +0000
90 90 +++ b/a Thu Jan 01 00:00:00 1970 +0000
91 91 @@ -1,1 +1,3 @@
92 92 a
93 93 +a
94 94 +a
95 95 $ hg log
96 96 changeset: 1:ea6e356ff2ad
97 97 tag: tip
98 98 user: test
99 99 date: Thu Jan 01 00:00:00 1970 +0000
100 100 summary: no changes, new message
101 101
102 102 changeset: 0:ad120869acf0
103 103 user: test
104 104 date: Thu Jan 01 00:00:00 1970 +0000
105 105 summary: base
106 106
107 107
108 108 Disable default date on commit so when -d isn't given, the old date is preserved:
109 109
110 110 $ echo '[defaults]' >> $HGRCPATH
111 111 $ echo 'commit=' >> $HGRCPATH
112 112
113 113 Test -u/-d:
114 114
115 115 $ hg ci --amend -u foo -d '1 0'
116 116 saved backup bundle to $TESTTMP/.hg/strip-backup/ea6e356ff2ad-amend-backup.hg
117 117 $ echo a >> a
118 118 $ hg ci --amend -u foo -d '1 0'
119 119 saved backup bundle to $TESTTMP/.hg/strip-backup/377b91ce8b56-amend-backup.hg
120 120 $ hg log -r .
121 121 changeset: 1:2c94e4a5756f
122 122 tag: tip
123 123 user: foo
124 124 date: Thu Jan 01 00:00:01 1970 +0000
125 125 summary: no changes, new message
126 126
127 127
128 128 Open editor with old commit message if a message isn't given otherwise:
129 129
130 130 $ cat > editor << '__EOF__'
131 131 > #!/bin/sh
132 132 > cat $1
133 133 > echo "another precious commit message" > "$1"
134 134 > __EOF__
135 135 $ chmod +x editor
136 136 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
137 137 amending changeset 2c94e4a5756f
138 138 copying changeset 2c94e4a5756f to ad120869acf0
139 139 no changes, new message
140 140
141 141
142 142 HG: Enter commit message. Lines beginning with 'HG:' are removed.
143 143 HG: Leave message empty to abort commit.
144 144 HG: --
145 145 HG: user: foo
146 146 HG: branch 'default'
147 147 HG: changed a
148 148 a
149 149 stripping amended changeset 2c94e4a5756f
150 150 1 changesets found
151 151 saved backup bundle to $TESTTMP/.hg/strip-backup/2c94e4a5756f-amend-backup.hg
152 152 1 changesets found
153 153 adding branch
154 154 adding changesets
155 155 adding manifests
156 156 adding file changes
157 157 added 1 changesets with 1 changes to 1 files
158 158 committed changeset 1:ffb49186f961
159 159
160 160 Same, but with changes in working dir (different code path):
161 161
162 162 $ echo a >> a
163 163 $ HGEDITOR="'`pwd`'"/editor hg commit --amend -v
164 164 amending changeset ffb49186f961
165 165 another precious commit message
166 166
167 167
168 168 HG: Enter commit message. Lines beginning with 'HG:' are removed.
169 169 HG: Leave message empty to abort commit.
170 170 HG: --
171 171 HG: user: foo
172 172 HG: branch 'default'
173 173 HG: changed a
174 174 a
175 175 copying changeset 27f3aacd3011 to ad120869acf0
176 176 a
177 177 stripping intermediate changeset 27f3aacd3011
178 178 stripping amended changeset ffb49186f961
179 179 2 changesets found
180 180 saved backup bundle to $TESTTMP/.hg/strip-backup/ffb49186f961-amend-backup.hg
181 181 1 changesets found
182 182 adding branch
183 183 adding changesets
184 184 adding manifests
185 185 adding file changes
186 186 added 1 changesets with 1 changes to 1 files
187 187 committed changeset 1:fb6cca43446f
188 188
189 189 $ rm editor
190 190 $ hg log -r .
191 191 changeset: 1:fb6cca43446f
192 192 tag: tip
193 193 user: foo
194 194 date: Thu Jan 01 00:00:01 1970 +0000
195 195 summary: another precious commit message
196 196
197 197
198 198 Moving bookmarks, preserve active bookmark:
199 199
200 200 $ hg book book1
201 201 $ hg book book2
202 202 $ hg ci --amend -m 'move bookmarks'
203 203 saved backup bundle to $TESTTMP/.hg/strip-backup/fb6cca43446f-amend-backup.hg
204 204 $ hg book
205 205 book1 1:0cf1c7a51bcf
206 206 * book2 1:0cf1c7a51bcf
207 207 $ echo a >> a
208 208 $ hg ci --amend -m 'move bookmarks'
209 209 saved backup bundle to $TESTTMP/.hg/strip-backup/0cf1c7a51bcf-amend-backup.hg
210 210 $ hg book
211 211 book1 1:7344472bd951
212 212 * book2 1:7344472bd951
213 213
214 214 $ echo '[defaults]' >> $HGRCPATH
215 215 $ echo "commit=-d '0 0'" >> $HGRCPATH
216 216
217 217 Moving branches:
218 218
219 219 $ hg branch foo
220 220 marked working directory as branch foo
221 221 (branches are permanent and global, did you want a bookmark?)
222 222 $ echo a >> a
223 223 $ hg ci -m 'branch foo'
224 224 $ hg branch default -f
225 225 marked working directory as branch default
226 226 (branches are permanent and global, did you want a bookmark?)
227 227 $ hg ci --amend -m 'back to default'
228 228 saved backup bundle to $TESTTMP/.hg/strip-backup/1661ca36a2db-amend-backup.hg
229 229 $ hg branches
230 230 default 2:f24ee5961967
231 231
232 232 Close branch:
233 233
234 234 $ hg up -q 0
235 235 $ echo b >> b
236 236 $ hg branch foo
237 237 marked working directory as branch foo
238 238 (branches are permanent and global, did you want a bookmark?)
239 239 $ hg ci -Am 'fork'
240 240 adding b
241 241 $ echo b >> b
242 242 $ hg ci -mb
243 243 $ hg ci --amend --close-branch -m 'closing branch foo'
244 244 saved backup bundle to $TESTTMP/.hg/strip-backup/c962248fa264-amend-backup.hg
245 245
246 246 Same thing, different code path:
247 247
248 248 $ echo b >> b
249 249 $ hg ci -m 'reopen branch'
250 created new head
251 250 reopening closed branch head 4
252 251 $ echo b >> b
253 252 $ hg ci --amend --close-branch
254 253 saved backup bundle to $TESTTMP/.hg/strip-backup/5e302dcc12b8-amend-backup.hg
255 254 $ hg branches
256 255 default 2:f24ee5961967
257 256
258 257 Refuse to amend merges:
259 258
260 259 $ hg up -q default
261 260 $ hg merge foo
262 261 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
263 262 (branch merge, don't forget to commit)
264 263 $ hg ci --amend
265 264 abort: cannot amend while merging
266 265 [255]
267 266 $ hg ci -m 'merge'
268 267 $ hg ci --amend
269 268 abort: cannot amend merge changesets
270 269 [255]
271 270
272 271 Follow copies/renames:
273 272
274 273 $ hg mv b c
275 274 $ hg ci -m 'b -> c'
276 275 $ hg mv c d
277 276 $ hg ci --amend -m 'b -> d'
278 277 saved backup bundle to $TESTTMP/.hg/strip-backup/9c207120aa98-amend-backup.hg
279 278 $ hg st --rev '.^' --copies d
280 279 A d
281 280 b
282 281 $ hg cp d e
283 282 $ hg ci -m 'e = d'
284 283 $ hg cp e f
285 284 $ hg ci --amend -m 'f = d'
286 285 saved backup bundle to $TESTTMP/.hg/strip-backup/fda2b3b27b22-amend-backup.hg
287 286 $ hg st --rev '.^' --copies f
288 287 A f
289 288 d
290 289
291 290 $ mv f f.orig
292 291 $ hg rm -A f
293 292 $ hg ci -m removef
294 293 $ hg cp a f
295 294 $ mv f.orig f
296 295 $ hg ci --amend -m replacef
297 296 saved backup bundle to $TESTTMP/.hg/strip-backup/20a7413547f9-amend-backup.hg
298 297 $ hg st --change . --copies
299 298 $ hg log -r . --template "{file_copies}\n"
300 299
301 300
302 301 Move added file (issue3410):
303 302
304 303 $ echo g >> g
305 304 $ hg ci -Am g
306 305 adding g
307 306 $ hg mv g h
308 307 $ hg ci --amend
309 308 saved backup bundle to $TESTTMP/.hg/strip-backup/5daa77a5d616-amend-backup.hg
310 309 $ hg st --change . --copies h
311 310 A h
312 311 $ hg log -r . --template "{file_copies}\n"
313 312
314 313
315 314 Can't rollback an amend:
316 315
317 316 $ hg rollback
318 317 no rollback information available
319 318 [1]
320 319
321 320 Preserve extra dict (issue3430):
322 321
323 322 $ hg branch a
324 323 marked working directory as branch a
325 324 (branches are permanent and global, did you want a bookmark?)
326 325 $ echo a >> a
327 326 $ hg ci -ma
328 327 $ hg ci --amend -m "a'"
329 328 saved backup bundle to $TESTTMP/.hg/strip-backup/167f8e3031df-amend-backup.hg
330 329 $ hg log -r . --template "{branch}\n"
331 330 a
332 331 $ hg ci --amend -m "a''"
333 332 saved backup bundle to $TESTTMP/.hg/strip-backup/ceac1a44c806-amend-backup.hg
334 333 $ hg log -r . --template "{branch}\n"
335 334 a
336 335
337 336 Also preserve other entries in the dict that are in the old commit,
338 337 first graft something so there's an additional entry:
339 338
340 339 $ hg up 0 -q
341 340 $ echo z > z
342 341 $ hg ci -Am 'fork'
343 342 adding z
344 343 created new head
345 344 $ hg up 11
346 345 5 files updated, 0 files merged, 1 files removed, 0 files unresolved
347 346 $ hg graft 12
348 347 grafting revision 12
349 348 $ hg ci --amend -m 'graft amend'
350 349 saved backup bundle to $TESTTMP/.hg/strip-backup/18a5124daf7a-amend-backup.hg
351 350 $ hg log -r . --debug | grep extra
352 351 extra: branch=a
353 352 extra: source=2647734878ef0236dda712fae9c1651cf694ea8a
@@ -1,125 +1,124 b''
1 1 $ branches=.hg/cache/branchheads
2 2 $ echo '[extensions]' >> $HGRCPATH
3 3 $ echo 'mq =' >> $HGRCPATH
4 4
5 5 $ show_branch_cache()
6 6 > {
7 7 > # force cache (re)generation
8 8 > hg log -r does-not-exist 2> /dev/null
9 9 > hg log -r tip --template 'tip: {rev}\n'
10 10 > if [ -f $branches ]; then
11 11 > sort $branches
12 12 > else
13 13 > echo No branch cache
14 14 > fi
15 15 > if [ "$1" = 1 ]; then
16 16 > for b in foo bar; do
17 17 > hg log -r $b --template "branch $b: "'{rev}\n'
18 18 > done
19 19 > fi
20 20 > }
21 21
22 22 $ hg init a
23 23 $ cd a
24 24 $ hg qinit -c
25 25
26 26
27 27 mq patch on an empty repo
28 28
29 29 $ hg qnew -d '0 0' p1
30 30 $ show_branch_cache
31 31 tip: 0
32 32 No branch cache
33 33
34 34 $ echo > pfile
35 35 $ hg add pfile
36 36 $ hg qrefresh -m 'patch 1'
37 37 $ show_branch_cache
38 38 tip: 0
39 d986d5caac23a7d44a46efc0ddaf5eb9665844cf 0
40 d986d5caac23a7d44a46efc0ddaf5eb9665844cf default
39 No branch cache
41 40
42 41 some regular revisions
43 42
44 43 $ hg qpop
45 44 popping p1
46 45 patch queue now empty
47 46 $ echo foo > foo
48 47 $ hg add foo
49 48 $ echo foo > .hg/branch
50 49 $ hg ci -m 'branch foo'
51 50
52 51 $ echo bar > bar
53 52 $ hg add bar
54 53 $ echo bar > .hg/branch
55 54 $ hg ci -m 'branch bar'
56 55 $ show_branch_cache
57 56 tip: 1
58 57 c229711f16da3d7591f89b1b8d963b79bda22714 1
59 58 c229711f16da3d7591f89b1b8d963b79bda22714 bar
60 59 dc25e3827021582e979f600811852e36cbe57341 foo
61 60
62 61 add some mq patches
63 62
64 63 $ hg qpush
65 64 applying p1
66 65 now at: p1
67 66 $ show_branch_cache
68 67 tip: 2
69 68 c229711f16da3d7591f89b1b8d963b79bda22714 1
70 69 c229711f16da3d7591f89b1b8d963b79bda22714 bar
71 70 dc25e3827021582e979f600811852e36cbe57341 foo
72 71
73 72 $ hg qnew -d '0 0' p2
74 73 $ echo foo > .hg/branch
75 74 $ echo foo2 >> foo
76 75 $ hg qrefresh -m 'patch 2'
77 76 $ show_branch_cache 1
78 77 tip: 3
79 982611f6955f9c48d3365decea203217c945ef0d 2
80 982611f6955f9c48d3365decea203217c945ef0d bar
78 c229711f16da3d7591f89b1b8d963b79bda22714 1
79 c229711f16da3d7591f89b1b8d963b79bda22714 bar
81 80 dc25e3827021582e979f600811852e36cbe57341 foo
82 81 branch foo: 3
83 82 branch bar: 2
84 83
85 84 removing the cache
86 85
87 86 $ rm $branches
88 87 $ show_branch_cache 1
89 88 tip: 3
90 89 c229711f16da3d7591f89b1b8d963b79bda22714 1
91 90 c229711f16da3d7591f89b1b8d963b79bda22714 bar
92 91 dc25e3827021582e979f600811852e36cbe57341 foo
93 92 branch foo: 3
94 93 branch bar: 2
95 94
96 95 importing rev 1 (the cache now ends in one of the patches)
97 96
98 97 $ hg qimport -r 1 -n p0
99 98 $ show_branch_cache 1
100 99 tip: 3
101 100 c229711f16da3d7591f89b1b8d963b79bda22714 1
102 101 c229711f16da3d7591f89b1b8d963b79bda22714 bar
103 102 dc25e3827021582e979f600811852e36cbe57341 foo
104 103 branch foo: 3
105 104 branch bar: 2
106 105 $ hg log -r qbase --template 'qbase: {rev}\n'
107 106 qbase: 1
108 107
109 108 detect an invalid cache
110 109
111 110 $ hg qpop -a
112 111 popping p2
113 112 popping p1
114 113 popping p0
115 114 patch queue now empty
116 115 $ hg qpush -a
117 116 applying p0
118 117 applying p1
119 118 applying p2
120 119 now at: p2
121 120 $ show_branch_cache
122 121 tip: 3
123 122 dc25e3827021582e979f600811852e36cbe57341 0
124 123 dc25e3827021582e979f600811852e36cbe57341 foo
125 124
General Comments 0
You need to be logged in to leave comments. Login now