##// END OF EJS Templates
cache: group obscache and revsfiltercache invalidation in a single function...
Pierre-Yves David -
r18105:312262eb default
parent child Browse files
Show More
@@ -1,2703 +1,2705 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class repofilecache(filecache):
22 22 """All filecache usage on repo are done for logic that should be unfiltered
23 23 """
24 24
25 25 def __get__(self, repo, type=None):
26 26 return super(repofilecache, self).__get__(repo.unfiltered(), type)
27 27 def __set__(self, repo, value):
28 28 return super(repofilecache, self).__set__(repo.unfiltered(), value)
29 29 def __delete__(self, repo):
30 30 return super(repofilecache, self).__delete__(repo.unfiltered())
31 31
32 32 class storecache(repofilecache):
33 33 """filecache for files in the store"""
34 34 def join(self, obj, fname):
35 35 return obj.sjoin(fname)
36 36
37 37 class unfilteredpropertycache(propertycache):
38 38 """propertycache that apply to unfiltered repo only"""
39 39
40 40 def __get__(self, repo, type=None):
41 41 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
42 42
43 43 class filteredpropertycache(propertycache):
44 44 """propertycache that must take filtering in account"""
45 45
46 46 def cachevalue(self, obj, value):
47 47 object.__setattr__(obj, self.name, value)
48 48
49 49
50 50 def hasunfilteredcache(repo, name):
51 51 """check if an repo and a unfilteredproperty cached value for <name>"""
52 52 return name in vars(repo.unfiltered())
53 53
54 54 def unfilteredmethod(orig):
55 55 """decorate method that always need to be run on unfiltered version"""
56 56 def wrapper(repo, *args, **kwargs):
57 57 return orig(repo.unfiltered(), *args, **kwargs)
58 58 return wrapper
59 59
60 60 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
61 61 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
62 62
63 63 class localpeer(peer.peerrepository):
64 64 '''peer for a local repo; reflects only the most recent API'''
65 65
66 66 def __init__(self, repo, caps=MODERNCAPS):
67 67 peer.peerrepository.__init__(self)
68 68 self._repo = repo
69 69 self.ui = repo.ui
70 70 self._caps = repo._restrictcapabilities(caps)
71 71 self.requirements = repo.requirements
72 72 self.supportedformats = repo.supportedformats
73 73
74 74 def close(self):
75 75 self._repo.close()
76 76
77 77 def _capabilities(self):
78 78 return self._caps
79 79
80 80 def local(self):
81 81 return self._repo
82 82
83 83 def canpush(self):
84 84 return True
85 85
86 86 def url(self):
87 87 return self._repo.url()
88 88
89 89 def lookup(self, key):
90 90 return self._repo.lookup(key)
91 91
92 92 def branchmap(self):
93 93 return discovery.visiblebranchmap(self._repo)
94 94
95 95 def heads(self):
96 96 return discovery.visibleheads(self._repo)
97 97
98 98 def known(self, nodes):
99 99 return self._repo.known(nodes)
100 100
101 101 def getbundle(self, source, heads=None, common=None):
102 102 return self._repo.getbundle(source, heads=heads, common=common)
103 103
104 104 # TODO We might want to move the next two calls into legacypeer and add
105 105 # unbundle instead.
106 106
107 107 def lock(self):
108 108 return self._repo.lock()
109 109
110 110 def addchangegroup(self, cg, source, url):
111 111 return self._repo.addchangegroup(cg, source, url)
112 112
113 113 def pushkey(self, namespace, key, old, new):
114 114 return self._repo.pushkey(namespace, key, old, new)
115 115
116 116 def listkeys(self, namespace):
117 117 return self._repo.listkeys(namespace)
118 118
119 119 def debugwireargs(self, one, two, three=None, four=None, five=None):
120 120 '''used to test argument passing over the wire'''
121 121 return "%s %s %s %s %s" % (one, two, three, four, five)
122 122
123 123 class locallegacypeer(localpeer):
124 124 '''peer extension which implements legacy methods too; used for tests with
125 125 restricted capabilities'''
126 126
127 127 def __init__(self, repo):
128 128 localpeer.__init__(self, repo, caps=LEGACYCAPS)
129 129
130 130 def branches(self, nodes):
131 131 return self._repo.branches(nodes)
132 132
133 133 def between(self, pairs):
134 134 return self._repo.between(pairs)
135 135
136 136 def changegroup(self, basenodes, source):
137 137 return self._repo.changegroup(basenodes, source)
138 138
139 139 def changegroupsubset(self, bases, heads, source):
140 140 return self._repo.changegroupsubset(bases, heads, source)
141 141
142 142 class localrepository(object):
143 143
144 144 supportedformats = set(('revlogv1', 'generaldelta'))
145 145 supported = supportedformats | set(('store', 'fncache', 'shared',
146 146 'dotencode'))
147 147 openerreqs = set(('revlogv1', 'generaldelta'))
148 148 requirements = ['revlogv1']
149 149
150 150 def _baserequirements(self, create):
151 151 return self.requirements[:]
152 152
153 153 def __init__(self, baseui, path=None, create=False):
154 154 self.wvfs = scmutil.vfs(path, expand=True)
155 155 self.wopener = self.wvfs
156 156 self.root = self.wvfs.base
157 157 self.path = self.wvfs.join(".hg")
158 158 self.origroot = path
159 159 self.auditor = scmutil.pathauditor(self.root, self._checknested)
160 160 self.vfs = scmutil.vfs(self.path)
161 161 self.opener = self.vfs
162 162 self.baseui = baseui
163 163 self.ui = baseui.copy()
164 164 # A list of callback to shape the phase if no data were found.
165 165 # Callback are in the form: func(repo, roots) --> processed root.
166 166 # This list it to be filled by extension during repo setup
167 167 self._phasedefaults = []
168 168 try:
169 169 self.ui.readconfig(self.join("hgrc"), self.root)
170 170 extensions.loadall(self.ui)
171 171 except IOError:
172 172 pass
173 173
174 174 if not self.vfs.isdir():
175 175 if create:
176 176 if not self.wvfs.exists():
177 177 self.wvfs.makedirs()
178 178 self.vfs.makedir(notindexed=True)
179 179 requirements = self._baserequirements(create)
180 180 if self.ui.configbool('format', 'usestore', True):
181 181 self.vfs.mkdir("store")
182 182 requirements.append("store")
183 183 if self.ui.configbool('format', 'usefncache', True):
184 184 requirements.append("fncache")
185 185 if self.ui.configbool('format', 'dotencode', True):
186 186 requirements.append('dotencode')
187 187 # create an invalid changelog
188 188 self.vfs.append(
189 189 "00changelog.i",
190 190 '\0\0\0\2' # represents revlogv2
191 191 ' dummy changelog to prevent using the old repo layout'
192 192 )
193 193 if self.ui.configbool('format', 'generaldelta', False):
194 194 requirements.append("generaldelta")
195 195 requirements = set(requirements)
196 196 else:
197 197 raise error.RepoError(_("repository %s not found") % path)
198 198 elif create:
199 199 raise error.RepoError(_("repository %s already exists") % path)
200 200 else:
201 201 try:
202 202 requirements = scmutil.readrequires(self.vfs, self.supported)
203 203 except IOError, inst:
204 204 if inst.errno != errno.ENOENT:
205 205 raise
206 206 requirements = set()
207 207
208 208 self.sharedpath = self.path
209 209 try:
210 210 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
211 211 if not os.path.exists(s):
212 212 raise error.RepoError(
213 213 _('.hg/sharedpath points to nonexistent directory %s') % s)
214 214 self.sharedpath = s
215 215 except IOError, inst:
216 216 if inst.errno != errno.ENOENT:
217 217 raise
218 218
219 219 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
220 220 self.spath = self.store.path
221 221 self.svfs = self.store.vfs
222 222 self.sopener = self.svfs
223 223 self.sjoin = self.store.join
224 224 self.vfs.createmode = self.store.createmode
225 225 self._applyrequirements(requirements)
226 226 if create:
227 227 self._writerequirements()
228 228
229 229
230 230 self._branchcache = None
231 231 self._branchcachetip = None
232 232 self.filterpats = {}
233 233 self._datafilters = {}
234 234 self._transref = self._lockref = self._wlockref = None
235 235
236 236 # A cache for various files under .hg/ that tracks file changes,
237 237 # (used by the filecache decorator)
238 238 #
239 239 # Maps a property name to its util.filecacheentry
240 240 self._filecache = {}
241 241
242 242 # hold sets of revision to be filtered
243 243 # should be cleared when something might have changed the filter value:
244 244 # - new changesets,
245 245 # - phase change,
246 246 # - new obsolescence marker,
247 247 # - working directory parent change,
248 248 # - bookmark changes
249 249 self.filteredrevcache = {}
250 250
251 251 def close(self):
252 252 pass
253 253
254 254 def _restrictcapabilities(self, caps):
255 255 return caps
256 256
257 257 def _applyrequirements(self, requirements):
258 258 self.requirements = requirements
259 259 self.sopener.options = dict((r, 1) for r in requirements
260 260 if r in self.openerreqs)
261 261
262 262 def _writerequirements(self):
263 263 reqfile = self.opener("requires", "w")
264 264 for r in self.requirements:
265 265 reqfile.write("%s\n" % r)
266 266 reqfile.close()
267 267
268 268 def _checknested(self, path):
269 269 """Determine if path is a legal nested repository."""
270 270 if not path.startswith(self.root):
271 271 return False
272 272 subpath = path[len(self.root) + 1:]
273 273 normsubpath = util.pconvert(subpath)
274 274
275 275 # XXX: Checking against the current working copy is wrong in
276 276 # the sense that it can reject things like
277 277 #
278 278 # $ hg cat -r 10 sub/x.txt
279 279 #
280 280 # if sub/ is no longer a subrepository in the working copy
281 281 # parent revision.
282 282 #
283 283 # However, it can of course also allow things that would have
284 284 # been rejected before, such as the above cat command if sub/
285 285 # is a subrepository now, but was a normal directory before.
286 286 # The old path auditor would have rejected by mistake since it
287 287 # panics when it sees sub/.hg/.
288 288 #
289 289 # All in all, checking against the working copy seems sensible
290 290 # since we want to prevent access to nested repositories on
291 291 # the filesystem *now*.
292 292 ctx = self[None]
293 293 parts = util.splitpath(subpath)
294 294 while parts:
295 295 prefix = '/'.join(parts)
296 296 if prefix in ctx.substate:
297 297 if prefix == normsubpath:
298 298 return True
299 299 else:
300 300 sub = ctx.sub(prefix)
301 301 return sub.checknested(subpath[len(prefix) + 1:])
302 302 else:
303 303 parts.pop()
304 304 return False
305 305
306 306 def peer(self):
307 307 return localpeer(self) # not cached to avoid reference cycle
308 308
309 309 def unfiltered(self):
310 310 """Return unfiltered version of the repository
311 311
312 312 Intended to be ovewritten by filtered repo."""
313 313 return self
314 314
315 315 def filtered(self, name):
316 316 """Return a filtered version of a repository"""
317 317 # build a new class with the mixin and the current class
318 318 # (possibily subclass of the repo)
319 319 class proxycls(repoview.repoview, self.unfiltered().__class__):
320 320 pass
321 321 return proxycls(self, name)
322 322
323 323 @repofilecache('bookmarks')
324 324 def _bookmarks(self):
325 325 return bookmarks.bmstore(self)
326 326
327 327 @repofilecache('bookmarks.current')
328 328 def _bookmarkcurrent(self):
329 329 return bookmarks.readcurrent(self)
330 330
331 331 def bookmarkheads(self, bookmark):
332 332 name = bookmark.split('@', 1)[0]
333 333 heads = []
334 334 for mark, n in self._bookmarks.iteritems():
335 335 if mark.split('@', 1)[0] == name:
336 336 heads.append(n)
337 337 return heads
338 338
339 339 @storecache('phaseroots')
340 340 def _phasecache(self):
341 341 return phases.phasecache(self, self._phasedefaults)
342 342
343 343 @storecache('obsstore')
344 344 def obsstore(self):
345 345 store = obsolete.obsstore(self.sopener)
346 346 if store and not obsolete._enabled:
347 347 # message is rare enough to not be translated
348 348 msg = 'obsolete feature not enabled but %i markers found!\n'
349 349 self.ui.warn(msg % len(list(store)))
350 350 return store
351 351
352 352 @unfilteredpropertycache
353 353 def hiddenrevs(self):
354 354 """hiddenrevs: revs that should be hidden by command and tools
355 355
356 356 This set is carried on the repo to ease initialization and lazy
357 357 loading; it'll probably move back to changelog for efficiency and
358 358 consistency reasons.
359 359
360 360 Note that the hiddenrevs will needs invalidations when
361 361 - a new changesets is added (possible unstable above extinct)
362 362 - a new obsolete marker is added (possible new extinct changeset)
363 363
364 364 hidden changesets cannot have non-hidden descendants
365 365 """
366 366 hidden = set()
367 367 if self.obsstore:
368 368 ### hide extinct changeset that are not accessible by any mean
369 369 hiddenquery = 'extinct() - ::(. + bookmark())'
370 370 hidden.update(self.revs(hiddenquery))
371 371 return hidden
372 372
373 373 @storecache('00changelog.i')
374 374 def changelog(self):
375 375 c = changelog.changelog(self.sopener)
376 376 if 'HG_PENDING' in os.environ:
377 377 p = os.environ['HG_PENDING']
378 378 if p.startswith(self.root):
379 379 c.readpending('00changelog.i.a')
380 380 return c
381 381
382 382 @storecache('00manifest.i')
383 383 def manifest(self):
384 384 return manifest.manifest(self.sopener)
385 385
386 386 @repofilecache('dirstate')
387 387 def dirstate(self):
388 388 warned = [0]
389 389 def validate(node):
390 390 try:
391 391 self.changelog.rev(node)
392 392 return node
393 393 except error.LookupError:
394 394 if not warned[0]:
395 395 warned[0] = True
396 396 self.ui.warn(_("warning: ignoring unknown"
397 397 " working parent %s!\n") % short(node))
398 398 return nullid
399 399
400 400 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
401 401
402 402 def __getitem__(self, changeid):
403 403 if changeid is None:
404 404 return context.workingctx(self)
405 405 return context.changectx(self, changeid)
406 406
407 407 def __contains__(self, changeid):
408 408 try:
409 409 return bool(self.lookup(changeid))
410 410 except error.RepoLookupError:
411 411 return False
412 412
413 413 def __nonzero__(self):
414 414 return True
415 415
416 416 def __len__(self):
417 417 return len(self.changelog)
418 418
419 419 def __iter__(self):
420 420 return iter(self.changelog)
421 421
422 422 def revs(self, expr, *args):
423 423 '''Return a list of revisions matching the given revset'''
424 424 expr = revset.formatspec(expr, *args)
425 425 m = revset.match(None, expr)
426 426 return [r for r in m(self, list(self))]
427 427
428 428 def set(self, expr, *args):
429 429 '''
430 430 Yield a context for each matching revision, after doing arg
431 431 replacement via revset.formatspec
432 432 '''
433 433 for r in self.revs(expr, *args):
434 434 yield self[r]
435 435
436 436 def url(self):
437 437 return 'file:' + self.root
438 438
439 439 def hook(self, name, throw=False, **args):
440 440 return hook.hook(self.ui, self, name, throw, **args)
441 441
442 442 @unfilteredmethod
443 443 def _tag(self, names, node, message, local, user, date, extra={}):
444 444 if isinstance(names, str):
445 445 names = (names,)
446 446
447 447 branches = self.branchmap()
448 448 for name in names:
449 449 self.hook('pretag', throw=True, node=hex(node), tag=name,
450 450 local=local)
451 451 if name in branches:
452 452 self.ui.warn(_("warning: tag %s conflicts with existing"
453 453 " branch name\n") % name)
454 454
455 455 def writetags(fp, names, munge, prevtags):
456 456 fp.seek(0, 2)
457 457 if prevtags and prevtags[-1] != '\n':
458 458 fp.write('\n')
459 459 for name in names:
460 460 m = munge and munge(name) or name
461 461 if (self._tagscache.tagtypes and
462 462 name in self._tagscache.tagtypes):
463 463 old = self.tags().get(name, nullid)
464 464 fp.write('%s %s\n' % (hex(old), m))
465 465 fp.write('%s %s\n' % (hex(node), m))
466 466 fp.close()
467 467
468 468 prevtags = ''
469 469 if local:
470 470 try:
471 471 fp = self.opener('localtags', 'r+')
472 472 except IOError:
473 473 fp = self.opener('localtags', 'a')
474 474 else:
475 475 prevtags = fp.read()
476 476
477 477 # local tags are stored in the current charset
478 478 writetags(fp, names, None, prevtags)
479 479 for name in names:
480 480 self.hook('tag', node=hex(node), tag=name, local=local)
481 481 return
482 482
483 483 try:
484 484 fp = self.wfile('.hgtags', 'rb+')
485 485 except IOError, e:
486 486 if e.errno != errno.ENOENT:
487 487 raise
488 488 fp = self.wfile('.hgtags', 'ab')
489 489 else:
490 490 prevtags = fp.read()
491 491
492 492 # committed tags are stored in UTF-8
493 493 writetags(fp, names, encoding.fromlocal, prevtags)
494 494
495 495 fp.close()
496 496
497 497 self.invalidatecaches()
498 498
499 499 if '.hgtags' not in self.dirstate:
500 500 self[None].add(['.hgtags'])
501 501
502 502 m = matchmod.exact(self.root, '', ['.hgtags'])
503 503 tagnode = self.commit(message, user, date, extra=extra, match=m)
504 504
505 505 for name in names:
506 506 self.hook('tag', node=hex(node), tag=name, local=local)
507 507
508 508 return tagnode
509 509
510 510 def tag(self, names, node, message, local, user, date):
511 511 '''tag a revision with one or more symbolic names.
512 512
513 513 names is a list of strings or, when adding a single tag, names may be a
514 514 string.
515 515
516 516 if local is True, the tags are stored in a per-repository file.
517 517 otherwise, they are stored in the .hgtags file, and a new
518 518 changeset is committed with the change.
519 519
520 520 keyword arguments:
521 521
522 522 local: whether to store tags in non-version-controlled file
523 523 (default False)
524 524
525 525 message: commit message to use if committing
526 526
527 527 user: name of user to use if committing
528 528
529 529 date: date tuple to use if committing'''
530 530
531 531 if not local:
532 532 for x in self.status()[:5]:
533 533 if '.hgtags' in x:
534 534 raise util.Abort(_('working copy of .hgtags is changed '
535 535 '(please commit .hgtags manually)'))
536 536
537 537 self.tags() # instantiate the cache
538 538 self._tag(names, node, message, local, user, date)
539 539
540 540 @filteredpropertycache
541 541 def _tagscache(self):
542 542 '''Returns a tagscache object that contains various tags related
543 543 caches.'''
544 544
545 545 # This simplifies its cache management by having one decorated
546 546 # function (this one) and the rest simply fetch things from it.
547 547 class tagscache(object):
548 548 def __init__(self):
549 549 # These two define the set of tags for this repository. tags
550 550 # maps tag name to node; tagtypes maps tag name to 'global' or
551 551 # 'local'. (Global tags are defined by .hgtags across all
552 552 # heads, and local tags are defined in .hg/localtags.)
553 553 # They constitute the in-memory cache of tags.
554 554 self.tags = self.tagtypes = None
555 555
556 556 self.nodetagscache = self.tagslist = None
557 557
558 558 cache = tagscache()
559 559 cache.tags, cache.tagtypes = self._findtags()
560 560
561 561 return cache
562 562
563 563 def tags(self):
564 564 '''return a mapping of tag to node'''
565 565 t = {}
566 566 if self.changelog.filteredrevs:
567 567 tags, tt = self._findtags()
568 568 else:
569 569 tags = self._tagscache.tags
570 570 for k, v in tags.iteritems():
571 571 try:
572 572 # ignore tags to unknown nodes
573 573 self.changelog.rev(v)
574 574 t[k] = v
575 575 except (error.LookupError, ValueError):
576 576 pass
577 577 return t
578 578
579 579 def _findtags(self):
580 580 '''Do the hard work of finding tags. Return a pair of dicts
581 581 (tags, tagtypes) where tags maps tag name to node, and tagtypes
582 582 maps tag name to a string like \'global\' or \'local\'.
583 583 Subclasses or extensions are free to add their own tags, but
584 584 should be aware that the returned dicts will be retained for the
585 585 duration of the localrepo object.'''
586 586
587 587 # XXX what tagtype should subclasses/extensions use? Currently
588 588 # mq and bookmarks add tags, but do not set the tagtype at all.
589 589 # Should each extension invent its own tag type? Should there
590 590 # be one tagtype for all such "virtual" tags? Or is the status
591 591 # quo fine?
592 592
593 593 alltags = {} # map tag name to (node, hist)
594 594 tagtypes = {}
595 595
596 596 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
597 597 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
598 598
599 599 # Build the return dicts. Have to re-encode tag names because
600 600 # the tags module always uses UTF-8 (in order not to lose info
601 601 # writing to the cache), but the rest of Mercurial wants them in
602 602 # local encoding.
603 603 tags = {}
604 604 for (name, (node, hist)) in alltags.iteritems():
605 605 if node != nullid:
606 606 tags[encoding.tolocal(name)] = node
607 607 tags['tip'] = self.changelog.tip()
608 608 tagtypes = dict([(encoding.tolocal(name), value)
609 609 for (name, value) in tagtypes.iteritems()])
610 610 return (tags, tagtypes)
611 611
612 612 def tagtype(self, tagname):
613 613 '''
614 614 return the type of the given tag. result can be:
615 615
616 616 'local' : a local tag
617 617 'global' : a global tag
618 618 None : tag does not exist
619 619 '''
620 620
621 621 return self._tagscache.tagtypes.get(tagname)
622 622
623 623 def tagslist(self):
624 624 '''return a list of tags ordered by revision'''
625 625 if not self._tagscache.tagslist:
626 626 l = []
627 627 for t, n in self.tags().iteritems():
628 628 r = self.changelog.rev(n)
629 629 l.append((r, t, n))
630 630 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
631 631
632 632 return self._tagscache.tagslist
633 633
634 634 def nodetags(self, node):
635 635 '''return the tags associated with a node'''
636 636 if not self._tagscache.nodetagscache:
637 637 nodetagscache = {}
638 638 for t, n in self._tagscache.tags.iteritems():
639 639 nodetagscache.setdefault(n, []).append(t)
640 640 for tags in nodetagscache.itervalues():
641 641 tags.sort()
642 642 self._tagscache.nodetagscache = nodetagscache
643 643 return self._tagscache.nodetagscache.get(node, [])
644 644
645 645 def nodebookmarks(self, node):
646 646 marks = []
647 647 for bookmark, n in self._bookmarks.iteritems():
648 648 if n == node:
649 649 marks.append(bookmark)
650 650 return sorted(marks)
651 651
652 652 def _branchtags(self, partial, lrev):
653 653 # TODO: rename this function?
654 654 tiprev = len(self) - 1
655 655 if lrev != tiprev:
656 656 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
657 657 self._updatebranchcache(partial, ctxgen)
658 658 self._writebranchcache(partial, self.changelog.tip(), tiprev)
659 659
660 660 return partial
661 661
662 662 @unfilteredmethod # Until we get a smarter cache management
663 663 def updatebranchcache(self):
664 664 tip = self.changelog.tip()
665 665 if self._branchcache is not None and self._branchcachetip == tip:
666 666 return
667 667
668 668 oldtip = self._branchcachetip
669 669 self._branchcachetip = tip
670 670 if oldtip is None or oldtip not in self.changelog.nodemap:
671 671 partial, last, lrev = self._readbranchcache()
672 672 else:
673 673 lrev = self.changelog.rev(oldtip)
674 674 partial = self._branchcache
675 675
676 676 self._branchtags(partial, lrev)
677 677 # this private cache holds all heads (not just the branch tips)
678 678 self._branchcache = partial
679 679
680 680 def branchmap(self):
681 681 '''returns a dictionary {branch: [branchheads]}'''
682 682 if self.changelog.filteredrevs:
683 683 # some changeset are excluded we can't use the cache
684 684 branchmap = {}
685 685 self._updatebranchcache(branchmap, (self[r] for r in self))
686 686 return branchmap
687 687 else:
688 688 self.updatebranchcache()
689 689 return self._branchcache
690 690
691 691
692 692 def _branchtip(self, heads):
693 693 '''return the tipmost branch head in heads'''
694 694 tip = heads[-1]
695 695 for h in reversed(heads):
696 696 if not self[h].closesbranch():
697 697 tip = h
698 698 break
699 699 return tip
700 700
701 701 def branchtip(self, branch):
702 702 '''return the tip node for a given branch'''
703 703 if branch not in self.branchmap():
704 704 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
705 705 return self._branchtip(self.branchmap()[branch])
706 706
707 707 def branchtags(self):
708 708 '''return a dict where branch names map to the tipmost head of
709 709 the branch, open heads come before closed'''
710 710 bt = {}
711 711 for bn, heads in self.branchmap().iteritems():
712 712 bt[bn] = self._branchtip(heads)
713 713 return bt
714 714
715 715 @unfilteredmethod # Until we get a smarter cache management
716 716 def _readbranchcache(self):
717 717 partial = {}
718 718 try:
719 719 f = self.opener("cache/branchheads")
720 720 lines = f.read().split('\n')
721 721 f.close()
722 722 except (IOError, OSError):
723 723 return {}, nullid, nullrev
724 724
725 725 try:
726 726 last, lrev = lines.pop(0).split(" ", 1)
727 727 last, lrev = bin(last), int(lrev)
728 728 if lrev >= len(self) or self[lrev].node() != last:
729 729 # invalidate the cache
730 730 raise ValueError('invalidating branch cache (tip differs)')
731 731 for l in lines:
732 732 if not l:
733 733 continue
734 734 node, label = l.split(" ", 1)
735 735 label = encoding.tolocal(label.strip())
736 736 if not node in self:
737 737 raise ValueError('invalidating branch cache because node '+
738 738 '%s does not exist' % node)
739 739 partial.setdefault(label, []).append(bin(node))
740 740 except KeyboardInterrupt:
741 741 raise
742 742 except Exception, inst:
743 743 if self.ui.debugflag:
744 744 self.ui.warn(str(inst), '\n')
745 745 partial, last, lrev = {}, nullid, nullrev
746 746 return partial, last, lrev
747 747
748 748 @unfilteredmethod # Until we get a smarter cache management
749 749 def _writebranchcache(self, branches, tip, tiprev):
750 750 try:
751 751 f = self.opener("cache/branchheads", "w", atomictemp=True)
752 752 f.write("%s %s\n" % (hex(tip), tiprev))
753 753 for label, nodes in branches.iteritems():
754 754 for node in nodes:
755 755 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
756 756 f.close()
757 757 except (IOError, OSError):
758 758 pass
759 759
760 760 @unfilteredmethod # Until we get a smarter cache management
761 761 def _updatebranchcache(self, partial, ctxgen):
762 762 """Given a branchhead cache, partial, that may have extra nodes or be
763 763 missing heads, and a generator of nodes that are at least a superset of
764 764 heads missing, this function updates partial to be correct.
765 765 """
766 766 # collect new branch entries
767 767 newbranches = {}
768 768 for c in ctxgen:
769 769 newbranches.setdefault(c.branch(), []).append(c.node())
770 770 # if older branchheads are reachable from new ones, they aren't
771 771 # really branchheads. Note checking parents is insufficient:
772 772 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
773 773 for branch, newnodes in newbranches.iteritems():
774 774 bheads = partial.setdefault(branch, [])
775 775 # Remove candidate heads that no longer are in the repo (e.g., as
776 776 # the result of a strip that just happened). Avoid using 'node in
777 777 # self' here because that dives down into branchcache code somewhat
778 778 # recursively.
779 779 bheadrevs = [self.changelog.rev(node) for node in bheads
780 780 if self.changelog.hasnode(node)]
781 781 newheadrevs = [self.changelog.rev(node) for node in newnodes
782 782 if self.changelog.hasnode(node)]
783 783 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
784 784 # Remove duplicates - nodes that are in newheadrevs and are already
785 785 # in bheadrevs. This can happen if you strip a node whose parent
786 786 # was already a head (because they're on different branches).
787 787 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
788 788
789 789 # Starting from tip means fewer passes over reachable. If we know
790 790 # the new candidates are not ancestors of existing heads, we don't
791 791 # have to examine ancestors of existing heads
792 792 if ctxisnew:
793 793 iterrevs = sorted(newheadrevs)
794 794 else:
795 795 iterrevs = list(bheadrevs)
796 796
797 797 # This loop prunes out two kinds of heads - heads that are
798 798 # superseded by a head in newheadrevs, and newheadrevs that are not
799 799 # heads because an existing head is their descendant.
800 800 while iterrevs:
801 801 latest = iterrevs.pop()
802 802 if latest not in bheadrevs:
803 803 continue
804 804 ancestors = set(self.changelog.ancestors([latest],
805 805 bheadrevs[0]))
806 806 if ancestors:
807 807 bheadrevs = [b for b in bheadrevs if b not in ancestors]
808 808 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
809 809
810 810 # There may be branches that cease to exist when the last commit in the
811 811 # branch was stripped. This code filters them out. Note that the
812 812 # branch that ceased to exist may not be in newbranches because
813 813 # newbranches is the set of candidate heads, which when you strip the
814 814 # last commit in a branch will be the parent branch.
815 815 for branch in partial.keys():
816 816 nodes = [head for head in partial[branch]
817 817 if self.changelog.hasnode(head)]
818 818 if not nodes:
819 819 del partial[branch]
820 820
821 821 def lookup(self, key):
822 822 return self[key].node()
823 823
824 824 def lookupbranch(self, key, remote=None):
825 825 repo = remote or self
826 826 if key in repo.branchmap():
827 827 return key
828 828
829 829 repo = (remote and remote.local()) and remote or self
830 830 return repo[key].branch()
831 831
832 832 def known(self, nodes):
833 833 nm = self.changelog.nodemap
834 834 pc = self._phasecache
835 835 result = []
836 836 for n in nodes:
837 837 r = nm.get(n)
838 838 resp = not (r is None or pc.phase(self, r) >= phases.secret)
839 839 result.append(resp)
840 840 return result
841 841
842 842 def local(self):
843 843 return self
844 844
845 845 def cancopy(self):
846 846 return self.local() # so statichttprepo's override of local() works
847 847
848 848 def join(self, f):
849 849 return os.path.join(self.path, f)
850 850
851 851 def wjoin(self, f):
852 852 return os.path.join(self.root, f)
853 853
854 854 def file(self, f):
855 855 if f[0] == '/':
856 856 f = f[1:]
857 857 return filelog.filelog(self.sopener, f)
858 858
859 859 def changectx(self, changeid):
860 860 return self[changeid]
861 861
862 862 def parents(self, changeid=None):
863 863 '''get list of changectxs for parents of changeid'''
864 864 return self[changeid].parents()
865 865
866 866 def setparents(self, p1, p2=nullid):
867 867 copies = self.dirstate.setparents(p1, p2)
868 868 if copies:
869 869 # Adjust copy records, the dirstate cannot do it, it
870 870 # requires access to parents manifests. Preserve them
871 871 # only for entries added to first parent.
872 872 pctx = self[p1]
873 873 for f in copies:
874 874 if f not in pctx and copies[f] in pctx:
875 875 self.dirstate.copy(copies[f], f)
876 876
877 877 def filectx(self, path, changeid=None, fileid=None):
878 878 """changeid can be a changeset revision, node, or tag.
879 879 fileid can be a file revision or node."""
880 880 return context.filectx(self, path, changeid, fileid)
881 881
882 882 def getcwd(self):
883 883 return self.dirstate.getcwd()
884 884
885 885 def pathto(self, f, cwd=None):
886 886 return self.dirstate.pathto(f, cwd)
887 887
888 888 def wfile(self, f, mode='r'):
889 889 return self.wopener(f, mode)
890 890
891 891 def _link(self, f):
892 892 return os.path.islink(self.wjoin(f))
893 893
894 894 def _loadfilter(self, filter):
895 895 if filter not in self.filterpats:
896 896 l = []
897 897 for pat, cmd in self.ui.configitems(filter):
898 898 if cmd == '!':
899 899 continue
900 900 mf = matchmod.match(self.root, '', [pat])
901 901 fn = None
902 902 params = cmd
903 903 for name, filterfn in self._datafilters.iteritems():
904 904 if cmd.startswith(name):
905 905 fn = filterfn
906 906 params = cmd[len(name):].lstrip()
907 907 break
908 908 if not fn:
909 909 fn = lambda s, c, **kwargs: util.filter(s, c)
910 910 # Wrap old filters not supporting keyword arguments
911 911 if not inspect.getargspec(fn)[2]:
912 912 oldfn = fn
913 913 fn = lambda s, c, **kwargs: oldfn(s, c)
914 914 l.append((mf, fn, params))
915 915 self.filterpats[filter] = l
916 916 return self.filterpats[filter]
917 917
918 918 def _filter(self, filterpats, filename, data):
919 919 for mf, fn, cmd in filterpats:
920 920 if mf(filename):
921 921 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
922 922 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
923 923 break
924 924
925 925 return data
926 926
927 927 @unfilteredpropertycache
928 928 def _encodefilterpats(self):
929 929 return self._loadfilter('encode')
930 930
931 931 @unfilteredpropertycache
932 932 def _decodefilterpats(self):
933 933 return self._loadfilter('decode')
934 934
935 935 def adddatafilter(self, name, filter):
936 936 self._datafilters[name] = filter
937 937
938 938 def wread(self, filename):
939 939 if self._link(filename):
940 940 data = os.readlink(self.wjoin(filename))
941 941 else:
942 942 data = self.wopener.read(filename)
943 943 return self._filter(self._encodefilterpats, filename, data)
944 944
945 945 def wwrite(self, filename, data, flags):
946 946 data = self._filter(self._decodefilterpats, filename, data)
947 947 if 'l' in flags:
948 948 self.wopener.symlink(data, filename)
949 949 else:
950 950 self.wopener.write(filename, data)
951 951 if 'x' in flags:
952 952 util.setflags(self.wjoin(filename), False, True)
953 953
954 954 def wwritedata(self, filename, data):
955 955 return self._filter(self._decodefilterpats, filename, data)
956 956
957 957 def transaction(self, desc):
958 958 tr = self._transref and self._transref() or None
959 959 if tr and tr.running():
960 960 return tr.nest()
961 961
962 962 # abort here if the journal already exists
963 963 if os.path.exists(self.sjoin("journal")):
964 964 raise error.RepoError(
965 965 _("abandoned transaction found - run hg recover"))
966 966
967 967 self._writejournal(desc)
968 968 renames = [(x, undoname(x)) for x in self._journalfiles()]
969 969
970 970 tr = transaction.transaction(self.ui.warn, self.sopener,
971 971 self.sjoin("journal"),
972 972 aftertrans(renames),
973 973 self.store.createmode)
974 974 self._transref = weakref.ref(tr)
975 975 return tr
976 976
977 977 def _journalfiles(self):
978 978 return (self.sjoin('journal'), self.join('journal.dirstate'),
979 979 self.join('journal.branch'), self.join('journal.desc'),
980 980 self.join('journal.bookmarks'),
981 981 self.sjoin('journal.phaseroots'))
982 982
983 983 def undofiles(self):
984 984 return [undoname(x) for x in self._journalfiles()]
985 985
986 986 def _writejournal(self, desc):
987 987 self.opener.write("journal.dirstate",
988 988 self.opener.tryread("dirstate"))
989 989 self.opener.write("journal.branch",
990 990 encoding.fromlocal(self.dirstate.branch()))
991 991 self.opener.write("journal.desc",
992 992 "%d\n%s\n" % (len(self), desc))
993 993 self.opener.write("journal.bookmarks",
994 994 self.opener.tryread("bookmarks"))
995 995 self.sopener.write("journal.phaseroots",
996 996 self.sopener.tryread("phaseroots"))
997 997
998 998 def recover(self):
999 999 lock = self.lock()
1000 1000 try:
1001 1001 if os.path.exists(self.sjoin("journal")):
1002 1002 self.ui.status(_("rolling back interrupted transaction\n"))
1003 1003 transaction.rollback(self.sopener, self.sjoin("journal"),
1004 1004 self.ui.warn)
1005 1005 self.invalidate()
1006 1006 return True
1007 1007 else:
1008 1008 self.ui.warn(_("no interrupted transaction available\n"))
1009 1009 return False
1010 1010 finally:
1011 1011 lock.release()
1012 1012
1013 1013 def rollback(self, dryrun=False, force=False):
1014 1014 wlock = lock = None
1015 1015 try:
1016 1016 wlock = self.wlock()
1017 1017 lock = self.lock()
1018 1018 if os.path.exists(self.sjoin("undo")):
1019 1019 return self._rollback(dryrun, force)
1020 1020 else:
1021 1021 self.ui.warn(_("no rollback information available\n"))
1022 1022 return 1
1023 1023 finally:
1024 1024 release(lock, wlock)
1025 1025
1026 1026 @unfilteredmethod # Until we get smarter cache management
1027 1027 def _rollback(self, dryrun, force):
1028 1028 ui = self.ui
1029 1029 try:
1030 1030 args = self.opener.read('undo.desc').splitlines()
1031 1031 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1032 1032 if len(args) >= 3:
1033 1033 detail = args[2]
1034 1034 oldtip = oldlen - 1
1035 1035
1036 1036 if detail and ui.verbose:
1037 1037 msg = (_('repository tip rolled back to revision %s'
1038 1038 ' (undo %s: %s)\n')
1039 1039 % (oldtip, desc, detail))
1040 1040 else:
1041 1041 msg = (_('repository tip rolled back to revision %s'
1042 1042 ' (undo %s)\n')
1043 1043 % (oldtip, desc))
1044 1044 except IOError:
1045 1045 msg = _('rolling back unknown transaction\n')
1046 1046 desc = None
1047 1047
1048 1048 if not force and self['.'] != self['tip'] and desc == 'commit':
1049 1049 raise util.Abort(
1050 1050 _('rollback of last commit while not checked out '
1051 1051 'may lose data'), hint=_('use -f to force'))
1052 1052
1053 1053 ui.status(msg)
1054 1054 if dryrun:
1055 1055 return 0
1056 1056
1057 1057 parents = self.dirstate.parents()
1058 1058 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
1059 1059 if os.path.exists(self.join('undo.bookmarks')):
1060 1060 util.rename(self.join('undo.bookmarks'),
1061 1061 self.join('bookmarks'))
1062 1062 if os.path.exists(self.sjoin('undo.phaseroots')):
1063 1063 util.rename(self.sjoin('undo.phaseroots'),
1064 1064 self.sjoin('phaseroots'))
1065 1065 self.invalidate()
1066 1066
1067 1067 # Discard all cache entries to force reloading everything.
1068 1068 self._filecache.clear()
1069 1069
1070 1070 parentgone = (parents[0] not in self.changelog.nodemap or
1071 1071 parents[1] not in self.changelog.nodemap)
1072 1072 if parentgone:
1073 1073 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1074 1074 try:
1075 1075 branch = self.opener.read('undo.branch')
1076 1076 self.dirstate.setbranch(encoding.tolocal(branch))
1077 1077 except IOError:
1078 1078 ui.warn(_('named branch could not be reset: '
1079 1079 'current branch is still \'%s\'\n')
1080 1080 % self.dirstate.branch())
1081 1081
1082 1082 self.dirstate.invalidate()
1083 1083 parents = tuple([p.rev() for p in self.parents()])
1084 1084 if len(parents) > 1:
1085 1085 ui.status(_('working directory now based on '
1086 1086 'revisions %d and %d\n') % parents)
1087 1087 else:
1088 1088 ui.status(_('working directory now based on '
1089 1089 'revision %d\n') % parents)
1090 1090 # TODO: if we know which new heads may result from this rollback, pass
1091 1091 # them to destroy(), which will prevent the branchhead cache from being
1092 1092 # invalidated.
1093 1093 self.destroyed()
1094 1094 return 0
1095 1095
1096 1096 def invalidatecaches(self):
1097 1097
1098 1098 if '_tagscache' in vars(self):
1099 1099 # can't use delattr on proxy
1100 1100 del self.__dict__['_tagscache']
1101 1101
1102 1102 self.unfiltered()._branchcache = None # in UTF-8
1103 1103 self.unfiltered()._branchcachetip = None
1104 self.invalidatevolatilesets()
1105
1106 def invalidatevolatilesets(self):
1107 self.filteredrevcache.clear()
1104 1108 obsolete.clearobscaches(self)
1105 self.filteredrevcache.clear()
1106 1109
1107 1110 def invalidatedirstate(self):
1108 1111 '''Invalidates the dirstate, causing the next call to dirstate
1109 1112 to check if it was modified since the last time it was read,
1110 1113 rereading it if it has.
1111 1114
1112 1115 This is different to dirstate.invalidate() that it doesn't always
1113 1116 rereads the dirstate. Use dirstate.invalidate() if you want to
1114 1117 explicitly read the dirstate again (i.e. restoring it to a previous
1115 1118 known good state).'''
1116 1119 if hasunfilteredcache(self, 'dirstate'):
1117 1120 for k in self.dirstate._filecache:
1118 1121 try:
1119 1122 delattr(self.dirstate, k)
1120 1123 except AttributeError:
1121 1124 pass
1122 1125 delattr(self.unfiltered(), 'dirstate')
1123 1126
1124 1127 def invalidate(self):
1125 1128 unfiltered = self.unfiltered() # all filecaches are stored on unfiltered
1126 1129 for k in self._filecache:
1127 1130 # dirstate is invalidated separately in invalidatedirstate()
1128 1131 if k == 'dirstate':
1129 1132 continue
1130 1133
1131 1134 try:
1132 1135 delattr(unfiltered, k)
1133 1136 except AttributeError:
1134 1137 pass
1135 1138 self.invalidatecaches()
1136 1139
1137 1140 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1138 1141 try:
1139 1142 l = lock.lock(lockname, 0, releasefn, desc=desc)
1140 1143 except error.LockHeld, inst:
1141 1144 if not wait:
1142 1145 raise
1143 1146 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1144 1147 (desc, inst.locker))
1145 1148 # default to 600 seconds timeout
1146 1149 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1147 1150 releasefn, desc=desc)
1148 1151 if acquirefn:
1149 1152 acquirefn()
1150 1153 return l
1151 1154
1152 1155 def _afterlock(self, callback):
1153 1156 """add a callback to the current repository lock.
1154 1157
1155 1158 The callback will be executed on lock release."""
1156 1159 l = self._lockref and self._lockref()
1157 1160 if l:
1158 1161 l.postrelease.append(callback)
1159 1162 else:
1160 1163 callback()
1161 1164
1162 1165 def lock(self, wait=True):
1163 1166 '''Lock the repository store (.hg/store) and return a weak reference
1164 1167 to the lock. Use this before modifying the store (e.g. committing or
1165 1168 stripping). If you are opening a transaction, get a lock as well.)'''
1166 1169 l = self._lockref and self._lockref()
1167 1170 if l is not None and l.held:
1168 1171 l.lock()
1169 1172 return l
1170 1173
1171 1174 def unlock():
1172 1175 self.store.write()
1173 1176 if hasunfilteredcache(self, '_phasecache'):
1174 1177 self._phasecache.write()
1175 1178 for k, ce in self._filecache.items():
1176 1179 if k == 'dirstate':
1177 1180 continue
1178 1181 ce.refresh()
1179 1182
1180 1183 l = self._lock(self.sjoin("lock"), wait, unlock,
1181 1184 self.invalidate, _('repository %s') % self.origroot)
1182 1185 self._lockref = weakref.ref(l)
1183 1186 return l
1184 1187
1185 1188 def wlock(self, wait=True):
1186 1189 '''Lock the non-store parts of the repository (everything under
1187 1190 .hg except .hg/store) and return a weak reference to the lock.
1188 1191 Use this before modifying files in .hg.'''
1189 1192 l = self._wlockref and self._wlockref()
1190 1193 if l is not None and l.held:
1191 1194 l.lock()
1192 1195 return l
1193 1196
1194 1197 def unlock():
1195 1198 self.dirstate.write()
1196 1199 ce = self._filecache.get('dirstate')
1197 1200 if ce:
1198 1201 ce.refresh()
1199 1202
1200 1203 l = self._lock(self.join("wlock"), wait, unlock,
1201 1204 self.invalidatedirstate, _('working directory of %s') %
1202 1205 self.origroot)
1203 1206 self._wlockref = weakref.ref(l)
1204 1207 return l
1205 1208
1206 1209 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1207 1210 """
1208 1211 commit an individual file as part of a larger transaction
1209 1212 """
1210 1213
1211 1214 fname = fctx.path()
1212 1215 text = fctx.data()
1213 1216 flog = self.file(fname)
1214 1217 fparent1 = manifest1.get(fname, nullid)
1215 1218 fparent2 = fparent2o = manifest2.get(fname, nullid)
1216 1219
1217 1220 meta = {}
1218 1221 copy = fctx.renamed()
1219 1222 if copy and copy[0] != fname:
1220 1223 # Mark the new revision of this file as a copy of another
1221 1224 # file. This copy data will effectively act as a parent
1222 1225 # of this new revision. If this is a merge, the first
1223 1226 # parent will be the nullid (meaning "look up the copy data")
1224 1227 # and the second one will be the other parent. For example:
1225 1228 #
1226 1229 # 0 --- 1 --- 3 rev1 changes file foo
1227 1230 # \ / rev2 renames foo to bar and changes it
1228 1231 # \- 2 -/ rev3 should have bar with all changes and
1229 1232 # should record that bar descends from
1230 1233 # bar in rev2 and foo in rev1
1231 1234 #
1232 1235 # this allows this merge to succeed:
1233 1236 #
1234 1237 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1235 1238 # \ / merging rev3 and rev4 should use bar@rev2
1236 1239 # \- 2 --- 4 as the merge base
1237 1240 #
1238 1241
1239 1242 cfname = copy[0]
1240 1243 crev = manifest1.get(cfname)
1241 1244 newfparent = fparent2
1242 1245
1243 1246 if manifest2: # branch merge
1244 1247 if fparent2 == nullid or crev is None: # copied on remote side
1245 1248 if cfname in manifest2:
1246 1249 crev = manifest2[cfname]
1247 1250 newfparent = fparent1
1248 1251
1249 1252 # find source in nearest ancestor if we've lost track
1250 1253 if not crev:
1251 1254 self.ui.debug(" %s: searching for copy revision for %s\n" %
1252 1255 (fname, cfname))
1253 1256 for ancestor in self[None].ancestors():
1254 1257 if cfname in ancestor:
1255 1258 crev = ancestor[cfname].filenode()
1256 1259 break
1257 1260
1258 1261 if crev:
1259 1262 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1260 1263 meta["copy"] = cfname
1261 1264 meta["copyrev"] = hex(crev)
1262 1265 fparent1, fparent2 = nullid, newfparent
1263 1266 else:
1264 1267 self.ui.warn(_("warning: can't find ancestor for '%s' "
1265 1268 "copied from '%s'!\n") % (fname, cfname))
1266 1269
1267 1270 elif fparent2 != nullid:
1268 1271 # is one parent an ancestor of the other?
1269 1272 fparentancestor = flog.ancestor(fparent1, fparent2)
1270 1273 if fparentancestor == fparent1:
1271 1274 fparent1, fparent2 = fparent2, nullid
1272 1275 elif fparentancestor == fparent2:
1273 1276 fparent2 = nullid
1274 1277
1275 1278 # is the file changed?
1276 1279 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1277 1280 changelist.append(fname)
1278 1281 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1279 1282
1280 1283 # are just the flags changed during merge?
1281 1284 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1282 1285 changelist.append(fname)
1283 1286
1284 1287 return fparent1
1285 1288
1286 1289 @unfilteredmethod
1287 1290 def commit(self, text="", user=None, date=None, match=None, force=False,
1288 1291 editor=False, extra={}):
1289 1292 """Add a new revision to current repository.
1290 1293
1291 1294 Revision information is gathered from the working directory,
1292 1295 match can be used to filter the committed files. If editor is
1293 1296 supplied, it is called to get a commit message.
1294 1297 """
1295 1298
1296 1299 def fail(f, msg):
1297 1300 raise util.Abort('%s: %s' % (f, msg))
1298 1301
1299 1302 if not match:
1300 1303 match = matchmod.always(self.root, '')
1301 1304
1302 1305 if not force:
1303 1306 vdirs = []
1304 1307 match.dir = vdirs.append
1305 1308 match.bad = fail
1306 1309
1307 1310 wlock = self.wlock()
1308 1311 try:
1309 1312 wctx = self[None]
1310 1313 merge = len(wctx.parents()) > 1
1311 1314
1312 1315 if (not force and merge and match and
1313 1316 (match.files() or match.anypats())):
1314 1317 raise util.Abort(_('cannot partially commit a merge '
1315 1318 '(do not specify files or patterns)'))
1316 1319
1317 1320 changes = self.status(match=match, clean=force)
1318 1321 if force:
1319 1322 changes[0].extend(changes[6]) # mq may commit unchanged files
1320 1323
1321 1324 # check subrepos
1322 1325 subs = []
1323 1326 commitsubs = set()
1324 1327 newstate = wctx.substate.copy()
1325 1328 # only manage subrepos and .hgsubstate if .hgsub is present
1326 1329 if '.hgsub' in wctx:
1327 1330 # we'll decide whether to track this ourselves, thanks
1328 1331 if '.hgsubstate' in changes[0]:
1329 1332 changes[0].remove('.hgsubstate')
1330 1333 if '.hgsubstate' in changes[2]:
1331 1334 changes[2].remove('.hgsubstate')
1332 1335
1333 1336 # compare current state to last committed state
1334 1337 # build new substate based on last committed state
1335 1338 oldstate = wctx.p1().substate
1336 1339 for s in sorted(newstate.keys()):
1337 1340 if not match(s):
1338 1341 # ignore working copy, use old state if present
1339 1342 if s in oldstate:
1340 1343 newstate[s] = oldstate[s]
1341 1344 continue
1342 1345 if not force:
1343 1346 raise util.Abort(
1344 1347 _("commit with new subrepo %s excluded") % s)
1345 1348 if wctx.sub(s).dirty(True):
1346 1349 if not self.ui.configbool('ui', 'commitsubrepos'):
1347 1350 raise util.Abort(
1348 1351 _("uncommitted changes in subrepo %s") % s,
1349 1352 hint=_("use --subrepos for recursive commit"))
1350 1353 subs.append(s)
1351 1354 commitsubs.add(s)
1352 1355 else:
1353 1356 bs = wctx.sub(s).basestate()
1354 1357 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1355 1358 if oldstate.get(s, (None, None, None))[1] != bs:
1356 1359 subs.append(s)
1357 1360
1358 1361 # check for removed subrepos
1359 1362 for p in wctx.parents():
1360 1363 r = [s for s in p.substate if s not in newstate]
1361 1364 subs += [s for s in r if match(s)]
1362 1365 if subs:
1363 1366 if (not match('.hgsub') and
1364 1367 '.hgsub' in (wctx.modified() + wctx.added())):
1365 1368 raise util.Abort(
1366 1369 _("can't commit subrepos without .hgsub"))
1367 1370 changes[0].insert(0, '.hgsubstate')
1368 1371
1369 1372 elif '.hgsub' in changes[2]:
1370 1373 # clean up .hgsubstate when .hgsub is removed
1371 1374 if ('.hgsubstate' in wctx and
1372 1375 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1373 1376 changes[2].insert(0, '.hgsubstate')
1374 1377
1375 1378 # make sure all explicit patterns are matched
1376 1379 if not force and match.files():
1377 1380 matched = set(changes[0] + changes[1] + changes[2])
1378 1381
1379 1382 for f in match.files():
1380 1383 f = self.dirstate.normalize(f)
1381 1384 if f == '.' or f in matched or f in wctx.substate:
1382 1385 continue
1383 1386 if f in changes[3]: # missing
1384 1387 fail(f, _('file not found!'))
1385 1388 if f in vdirs: # visited directory
1386 1389 d = f + '/'
1387 1390 for mf in matched:
1388 1391 if mf.startswith(d):
1389 1392 break
1390 1393 else:
1391 1394 fail(f, _("no match under directory!"))
1392 1395 elif f not in self.dirstate:
1393 1396 fail(f, _("file not tracked!"))
1394 1397
1395 1398 if (not force and not extra.get("close") and not merge
1396 1399 and not (changes[0] or changes[1] or changes[2])
1397 1400 and wctx.branch() == wctx.p1().branch()):
1398 1401 return None
1399 1402
1400 1403 if merge and changes[3]:
1401 1404 raise util.Abort(_("cannot commit merge with missing files"))
1402 1405
1403 1406 ms = mergemod.mergestate(self)
1404 1407 for f in changes[0]:
1405 1408 if f in ms and ms[f] == 'u':
1406 1409 raise util.Abort(_("unresolved merge conflicts "
1407 1410 "(see hg help resolve)"))
1408 1411
1409 1412 cctx = context.workingctx(self, text, user, date, extra, changes)
1410 1413 if editor:
1411 1414 cctx._text = editor(self, cctx, subs)
1412 1415 edited = (text != cctx._text)
1413 1416
1414 1417 # commit subs and write new state
1415 1418 if subs:
1416 1419 for s in sorted(commitsubs):
1417 1420 sub = wctx.sub(s)
1418 1421 self.ui.status(_('committing subrepository %s\n') %
1419 1422 subrepo.subrelpath(sub))
1420 1423 sr = sub.commit(cctx._text, user, date)
1421 1424 newstate[s] = (newstate[s][0], sr)
1422 1425 subrepo.writestate(self, newstate)
1423 1426
1424 1427 # Save commit message in case this transaction gets rolled back
1425 1428 # (e.g. by a pretxncommit hook). Leave the content alone on
1426 1429 # the assumption that the user will use the same editor again.
1427 1430 msgfn = self.savecommitmessage(cctx._text)
1428 1431
1429 1432 p1, p2 = self.dirstate.parents()
1430 1433 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1431 1434 try:
1432 1435 self.hook("precommit", throw=True, parent1=hookp1,
1433 1436 parent2=hookp2)
1434 1437 ret = self.commitctx(cctx, True)
1435 1438 except: # re-raises
1436 1439 if edited:
1437 1440 self.ui.write(
1438 1441 _('note: commit message saved in %s\n') % msgfn)
1439 1442 raise
1440 1443
1441 1444 # update bookmarks, dirstate and mergestate
1442 1445 bookmarks.update(self, [p1, p2], ret)
1443 1446 for f in changes[0] + changes[1]:
1444 1447 self.dirstate.normal(f)
1445 1448 for f in changes[2]:
1446 1449 self.dirstate.drop(f)
1447 1450 self.dirstate.setparents(ret)
1448 1451 ms.reset()
1449 1452 finally:
1450 1453 wlock.release()
1451 1454
1452 1455 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1453 1456 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1454 1457 self._afterlock(commithook)
1455 1458 return ret
1456 1459
1457 1460 @unfilteredmethod
1458 1461 def commitctx(self, ctx, error=False):
1459 1462 """Add a new revision to current repository.
1460 1463 Revision information is passed via the context argument.
1461 1464 """
1462 1465
1463 1466 tr = lock = None
1464 1467 removed = list(ctx.removed())
1465 1468 p1, p2 = ctx.p1(), ctx.p2()
1466 1469 user = ctx.user()
1467 1470
1468 1471 lock = self.lock()
1469 1472 try:
1470 1473 tr = self.transaction("commit")
1471 1474 trp = weakref.proxy(tr)
1472 1475
1473 1476 if ctx.files():
1474 1477 m1 = p1.manifest().copy()
1475 1478 m2 = p2.manifest()
1476 1479
1477 1480 # check in files
1478 1481 new = {}
1479 1482 changed = []
1480 1483 linkrev = len(self)
1481 1484 for f in sorted(ctx.modified() + ctx.added()):
1482 1485 self.ui.note(f + "\n")
1483 1486 try:
1484 1487 fctx = ctx[f]
1485 1488 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1486 1489 changed)
1487 1490 m1.set(f, fctx.flags())
1488 1491 except OSError, inst:
1489 1492 self.ui.warn(_("trouble committing %s!\n") % f)
1490 1493 raise
1491 1494 except IOError, inst:
1492 1495 errcode = getattr(inst, 'errno', errno.ENOENT)
1493 1496 if error or errcode and errcode != errno.ENOENT:
1494 1497 self.ui.warn(_("trouble committing %s!\n") % f)
1495 1498 raise
1496 1499 else:
1497 1500 removed.append(f)
1498 1501
1499 1502 # update manifest
1500 1503 m1.update(new)
1501 1504 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1502 1505 drop = [f for f in removed if f in m1]
1503 1506 for f in drop:
1504 1507 del m1[f]
1505 1508 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1506 1509 p2.manifestnode(), (new, drop))
1507 1510 files = changed + removed
1508 1511 else:
1509 1512 mn = p1.manifestnode()
1510 1513 files = []
1511 1514
1512 1515 # update changelog
1513 1516 self.changelog.delayupdate()
1514 1517 n = self.changelog.add(mn, files, ctx.description(),
1515 1518 trp, p1.node(), p2.node(),
1516 1519 user, ctx.date(), ctx.extra().copy())
1517 1520 p = lambda: self.changelog.writepending() and self.root or ""
1518 1521 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1519 1522 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1520 1523 parent2=xp2, pending=p)
1521 1524 self.changelog.finalize(trp)
1522 1525 # set the new commit is proper phase
1523 1526 targetphase = phases.newcommitphase(self.ui)
1524 1527 if targetphase:
1525 1528 # retract boundary do not alter parent changeset.
1526 1529 # if a parent have higher the resulting phase will
1527 1530 # be compliant anyway
1528 1531 #
1529 1532 # if minimal phase was 0 we don't need to retract anything
1530 1533 phases.retractboundary(self, targetphase, [n])
1531 1534 tr.close()
1532 1535 self.updatebranchcache()
1533 1536 return n
1534 1537 finally:
1535 1538 if tr:
1536 1539 tr.release()
1537 1540 lock.release()
1538 1541
1539 1542 @unfilteredmethod
1540 1543 def destroyed(self, newheadnodes=None):
1541 1544 '''Inform the repository that nodes have been destroyed.
1542 1545 Intended for use by strip and rollback, so there's a common
1543 1546 place for anything that has to be done after destroying history.
1544 1547
1545 1548 If you know the branchheadcache was uptodate before nodes were removed
1546 1549 and you also know the set of candidate new heads that may have resulted
1547 1550 from the destruction, you can set newheadnodes. This will enable the
1548 1551 code to update the branchheads cache, rather than having future code
1549 1552 decide it's invalid and regenerating it from scratch.
1550 1553 '''
1551 1554 # If we have info, newheadnodes, on how to update the branch cache, do
1552 1555 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1553 1556 # will be caught the next time it is read.
1554 1557 if newheadnodes:
1555 1558 tiprev = len(self) - 1
1556 1559 ctxgen = (self[node] for node in newheadnodes
1557 1560 if self.changelog.hasnode(node))
1558 1561 self._updatebranchcache(self._branchcache, ctxgen)
1559 1562 self._writebranchcache(self._branchcache, self.changelog.tip(),
1560 1563 tiprev)
1561 1564
1562 1565 # Ensure the persistent tag cache is updated. Doing it now
1563 1566 # means that the tag cache only has to worry about destroyed
1564 1567 # heads immediately after a strip/rollback. That in turn
1565 1568 # guarantees that "cachetip == currenttip" (comparing both rev
1566 1569 # and node) always means no nodes have been added or destroyed.
1567 1570
1568 1571 # XXX this is suboptimal when qrefresh'ing: we strip the current
1569 1572 # head, refresh the tag cache, then immediately add a new head.
1570 1573 # But I think doing it this way is necessary for the "instant
1571 1574 # tag cache retrieval" case to work.
1572 1575 self.invalidatecaches()
1573 1576
1574 1577 # Discard all cache entries to force reloading everything.
1575 1578 self._filecache.clear()
1576 1579
1577 1580 def walk(self, match, node=None):
1578 1581 '''
1579 1582 walk recursively through the directory tree or a given
1580 1583 changeset, finding all files matched by the match
1581 1584 function
1582 1585 '''
1583 1586 return self[node].walk(match)
1584 1587
1585 1588 def status(self, node1='.', node2=None, match=None,
1586 1589 ignored=False, clean=False, unknown=False,
1587 1590 listsubrepos=False):
1588 1591 """return status of files between two nodes or node and working
1589 1592 directory.
1590 1593
1591 1594 If node1 is None, use the first dirstate parent instead.
1592 1595 If node2 is None, compare node1 with working directory.
1593 1596 """
1594 1597
1595 1598 def mfmatches(ctx):
1596 1599 mf = ctx.manifest().copy()
1597 1600 if match.always():
1598 1601 return mf
1599 1602 for fn in mf.keys():
1600 1603 if not match(fn):
1601 1604 del mf[fn]
1602 1605 return mf
1603 1606
1604 1607 if isinstance(node1, context.changectx):
1605 1608 ctx1 = node1
1606 1609 else:
1607 1610 ctx1 = self[node1]
1608 1611 if isinstance(node2, context.changectx):
1609 1612 ctx2 = node2
1610 1613 else:
1611 1614 ctx2 = self[node2]
1612 1615
1613 1616 working = ctx2.rev() is None
1614 1617 parentworking = working and ctx1 == self['.']
1615 1618 match = match or matchmod.always(self.root, self.getcwd())
1616 1619 listignored, listclean, listunknown = ignored, clean, unknown
1617 1620
1618 1621 # load earliest manifest first for caching reasons
1619 1622 if not working and ctx2.rev() < ctx1.rev():
1620 1623 ctx2.manifest()
1621 1624
1622 1625 if not parentworking:
1623 1626 def bad(f, msg):
1624 1627 # 'f' may be a directory pattern from 'match.files()',
1625 1628 # so 'f not in ctx1' is not enough
1626 1629 if f not in ctx1 and f not in ctx1.dirs():
1627 1630 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1628 1631 match.bad = bad
1629 1632
1630 1633 if working: # we need to scan the working dir
1631 1634 subrepos = []
1632 1635 if '.hgsub' in self.dirstate:
1633 1636 subrepos = ctx2.substate.keys()
1634 1637 s = self.dirstate.status(match, subrepos, listignored,
1635 1638 listclean, listunknown)
1636 1639 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1637 1640
1638 1641 # check for any possibly clean files
1639 1642 if parentworking and cmp:
1640 1643 fixup = []
1641 1644 # do a full compare of any files that might have changed
1642 1645 for f in sorted(cmp):
1643 1646 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1644 1647 or ctx1[f].cmp(ctx2[f])):
1645 1648 modified.append(f)
1646 1649 else:
1647 1650 fixup.append(f)
1648 1651
1649 1652 # update dirstate for files that are actually clean
1650 1653 if fixup:
1651 1654 if listclean:
1652 1655 clean += fixup
1653 1656
1654 1657 try:
1655 1658 # updating the dirstate is optional
1656 1659 # so we don't wait on the lock
1657 1660 wlock = self.wlock(False)
1658 1661 try:
1659 1662 for f in fixup:
1660 1663 self.dirstate.normal(f)
1661 1664 finally:
1662 1665 wlock.release()
1663 1666 except error.LockError:
1664 1667 pass
1665 1668
1666 1669 if not parentworking:
1667 1670 mf1 = mfmatches(ctx1)
1668 1671 if working:
1669 1672 # we are comparing working dir against non-parent
1670 1673 # generate a pseudo-manifest for the working dir
1671 1674 mf2 = mfmatches(self['.'])
1672 1675 for f in cmp + modified + added:
1673 1676 mf2[f] = None
1674 1677 mf2.set(f, ctx2.flags(f))
1675 1678 for f in removed:
1676 1679 if f in mf2:
1677 1680 del mf2[f]
1678 1681 else:
1679 1682 # we are comparing two revisions
1680 1683 deleted, unknown, ignored = [], [], []
1681 1684 mf2 = mfmatches(ctx2)
1682 1685
1683 1686 modified, added, clean = [], [], []
1684 1687 withflags = mf1.withflags() | mf2.withflags()
1685 1688 for fn in mf2:
1686 1689 if fn in mf1:
1687 1690 if (fn not in deleted and
1688 1691 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1689 1692 (mf1[fn] != mf2[fn] and
1690 1693 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1691 1694 modified.append(fn)
1692 1695 elif listclean:
1693 1696 clean.append(fn)
1694 1697 del mf1[fn]
1695 1698 elif fn not in deleted:
1696 1699 added.append(fn)
1697 1700 removed = mf1.keys()
1698 1701
1699 1702 if working and modified and not self.dirstate._checklink:
1700 1703 # Symlink placeholders may get non-symlink-like contents
1701 1704 # via user error or dereferencing by NFS or Samba servers,
1702 1705 # so we filter out any placeholders that don't look like a
1703 1706 # symlink
1704 1707 sane = []
1705 1708 for f in modified:
1706 1709 if ctx2.flags(f) == 'l':
1707 1710 d = ctx2[f].data()
1708 1711 if len(d) >= 1024 or '\n' in d or util.binary(d):
1709 1712 self.ui.debug('ignoring suspect symlink placeholder'
1710 1713 ' "%s"\n' % f)
1711 1714 continue
1712 1715 sane.append(f)
1713 1716 modified = sane
1714 1717
1715 1718 r = modified, added, removed, deleted, unknown, ignored, clean
1716 1719
1717 1720 if listsubrepos:
1718 1721 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1719 1722 if working:
1720 1723 rev2 = None
1721 1724 else:
1722 1725 rev2 = ctx2.substate[subpath][1]
1723 1726 try:
1724 1727 submatch = matchmod.narrowmatcher(subpath, match)
1725 1728 s = sub.status(rev2, match=submatch, ignored=listignored,
1726 1729 clean=listclean, unknown=listunknown,
1727 1730 listsubrepos=True)
1728 1731 for rfiles, sfiles in zip(r, s):
1729 1732 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1730 1733 except error.LookupError:
1731 1734 self.ui.status(_("skipping missing subrepository: %s\n")
1732 1735 % subpath)
1733 1736
1734 1737 for l in r:
1735 1738 l.sort()
1736 1739 return r
1737 1740
1738 1741 def heads(self, start=None):
1739 1742 heads = self.changelog.heads(start)
1740 1743 # sort the output in rev descending order
1741 1744 return sorted(heads, key=self.changelog.rev, reverse=True)
1742 1745
1743 1746 def branchheads(self, branch=None, start=None, closed=False):
1744 1747 '''return a (possibly filtered) list of heads for the given branch
1745 1748
1746 1749 Heads are returned in topological order, from newest to oldest.
1747 1750 If branch is None, use the dirstate branch.
1748 1751 If start is not None, return only heads reachable from start.
1749 1752 If closed is True, return heads that are marked as closed as well.
1750 1753 '''
1751 1754 if branch is None:
1752 1755 branch = self[None].branch()
1753 1756 branches = self.branchmap()
1754 1757 if branch not in branches:
1755 1758 return []
1756 1759 # the cache returns heads ordered lowest to highest
1757 1760 bheads = list(reversed(branches[branch]))
1758 1761 if start is not None:
1759 1762 # filter out the heads that cannot be reached from startrev
1760 1763 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1761 1764 bheads = [h for h in bheads if h in fbheads]
1762 1765 if not closed:
1763 1766 bheads = [h for h in bheads if not self[h].closesbranch()]
1764 1767 return bheads
1765 1768
1766 1769 def branches(self, nodes):
1767 1770 if not nodes:
1768 1771 nodes = [self.changelog.tip()]
1769 1772 b = []
1770 1773 for n in nodes:
1771 1774 t = n
1772 1775 while True:
1773 1776 p = self.changelog.parents(n)
1774 1777 if p[1] != nullid or p[0] == nullid:
1775 1778 b.append((t, n, p[0], p[1]))
1776 1779 break
1777 1780 n = p[0]
1778 1781 return b
1779 1782
1780 1783 def between(self, pairs):
1781 1784 r = []
1782 1785
1783 1786 for top, bottom in pairs:
1784 1787 n, l, i = top, [], 0
1785 1788 f = 1
1786 1789
1787 1790 while n != bottom and n != nullid:
1788 1791 p = self.changelog.parents(n)[0]
1789 1792 if i == f:
1790 1793 l.append(n)
1791 1794 f = f * 2
1792 1795 n = p
1793 1796 i += 1
1794 1797
1795 1798 r.append(l)
1796 1799
1797 1800 return r
1798 1801
1799 1802 def pull(self, remote, heads=None, force=False):
1800 1803 # don't open transaction for nothing or you break future useful
1801 1804 # rollback call
1802 1805 tr = None
1803 1806 trname = 'pull\n' + util.hidepassword(remote.url())
1804 1807 lock = self.lock()
1805 1808 try:
1806 1809 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1807 1810 force=force)
1808 1811 common, fetch, rheads = tmp
1809 1812 if not fetch:
1810 1813 self.ui.status(_("no changes found\n"))
1811 1814 added = []
1812 1815 result = 0
1813 1816 else:
1814 1817 tr = self.transaction(trname)
1815 1818 if heads is None and list(common) == [nullid]:
1816 1819 self.ui.status(_("requesting all changes\n"))
1817 1820 elif heads is None and remote.capable('changegroupsubset'):
1818 1821 # issue1320, avoid a race if remote changed after discovery
1819 1822 heads = rheads
1820 1823
1821 1824 if remote.capable('getbundle'):
1822 1825 cg = remote.getbundle('pull', common=common,
1823 1826 heads=heads or rheads)
1824 1827 elif heads is None:
1825 1828 cg = remote.changegroup(fetch, 'pull')
1826 1829 elif not remote.capable('changegroupsubset'):
1827 1830 raise util.Abort(_("partial pull cannot be done because "
1828 1831 "other repository doesn't support "
1829 1832 "changegroupsubset."))
1830 1833 else:
1831 1834 cg = remote.changegroupsubset(fetch, heads, 'pull')
1832 1835 clstart = len(self.changelog)
1833 1836 result = self.addchangegroup(cg, 'pull', remote.url())
1834 1837 clend = len(self.changelog)
1835 1838 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1836 1839
1837 1840 # compute target subset
1838 1841 if heads is None:
1839 1842 # We pulled every thing possible
1840 1843 # sync on everything common
1841 1844 subset = common + added
1842 1845 else:
1843 1846 # We pulled a specific subset
1844 1847 # sync on this subset
1845 1848 subset = heads
1846 1849
1847 1850 # Get remote phases data from remote
1848 1851 remotephases = remote.listkeys('phases')
1849 1852 publishing = bool(remotephases.get('publishing', False))
1850 1853 if remotephases and not publishing:
1851 1854 # remote is new and unpublishing
1852 1855 pheads, _dr = phases.analyzeremotephases(self, subset,
1853 1856 remotephases)
1854 1857 phases.advanceboundary(self, phases.public, pheads)
1855 1858 phases.advanceboundary(self, phases.draft, subset)
1856 1859 else:
1857 1860 # Remote is old or publishing all common changesets
1858 1861 # should be seen as public
1859 1862 phases.advanceboundary(self, phases.public, subset)
1860 1863
1861 1864 if obsolete._enabled:
1862 1865 self.ui.debug('fetching remote obsolete markers\n')
1863 1866 remoteobs = remote.listkeys('obsolete')
1864 1867 if 'dump0' in remoteobs:
1865 1868 if tr is None:
1866 1869 tr = self.transaction(trname)
1867 1870 for key in sorted(remoteobs, reverse=True):
1868 1871 if key.startswith('dump'):
1869 1872 data = base85.b85decode(remoteobs[key])
1870 1873 self.obsstore.mergemarkers(tr, data)
1871 self.filteredrevcache.clear()
1874 self.invalidatevolatilesets()
1872 1875 if tr is not None:
1873 1876 tr.close()
1874 1877 finally:
1875 1878 if tr is not None:
1876 1879 tr.release()
1877 1880 lock.release()
1878 1881
1879 1882 return result
1880 1883
1881 1884 def checkpush(self, force, revs):
1882 1885 """Extensions can override this function if additional checks have
1883 1886 to be performed before pushing, or call it if they override push
1884 1887 command.
1885 1888 """
1886 1889 pass
1887 1890
1888 1891 def push(self, remote, force=False, revs=None, newbranch=False):
1889 1892 '''Push outgoing changesets (limited by revs) from the current
1890 1893 repository to remote. Return an integer:
1891 1894 - None means nothing to push
1892 1895 - 0 means HTTP error
1893 1896 - 1 means we pushed and remote head count is unchanged *or*
1894 1897 we have outgoing changesets but refused to push
1895 1898 - other values as described by addchangegroup()
1896 1899 '''
1897 1900 # there are two ways to push to remote repo:
1898 1901 #
1899 1902 # addchangegroup assumes local user can lock remote
1900 1903 # repo (local filesystem, old ssh servers).
1901 1904 #
1902 1905 # unbundle assumes local user cannot lock remote repo (new ssh
1903 1906 # servers, http servers).
1904 1907
1905 1908 if not remote.canpush():
1906 1909 raise util.Abort(_("destination does not support push"))
1907 1910 unfi = self.unfiltered()
1908 1911 # get local lock as we might write phase data
1909 1912 locallock = self.lock()
1910 1913 try:
1911 1914 self.checkpush(force, revs)
1912 1915 lock = None
1913 1916 unbundle = remote.capable('unbundle')
1914 1917 if not unbundle:
1915 1918 lock = remote.lock()
1916 1919 try:
1917 1920 # discovery
1918 1921 fci = discovery.findcommonincoming
1919 1922 commoninc = fci(unfi, remote, force=force)
1920 1923 common, inc, remoteheads = commoninc
1921 1924 fco = discovery.findcommonoutgoing
1922 1925 outgoing = fco(unfi, remote, onlyheads=revs,
1923 1926 commoninc=commoninc, force=force)
1924 1927
1925 1928
1926 1929 if not outgoing.missing:
1927 1930 # nothing to push
1928 1931 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1929 1932 ret = None
1930 1933 else:
1931 1934 # something to push
1932 1935 if not force:
1933 1936 # if self.obsstore == False --> no obsolete
1934 1937 # then, save the iteration
1935 1938 if unfi.obsstore:
1936 1939 # this message are here for 80 char limit reason
1937 1940 mso = _("push includes obsolete changeset: %s!")
1938 1941 msu = _("push includes unstable changeset: %s!")
1939 1942 msb = _("push includes bumped changeset: %s!")
1940 1943 msd = _("push includes divergent changeset: %s!")
1941 1944 # If we are to push if there is at least one
1942 1945 # obsolete or unstable changeset in missing, at
1943 1946 # least one of the missinghead will be obsolete or
1944 1947 # unstable. So checking heads only is ok
1945 1948 for node in outgoing.missingheads:
1946 1949 ctx = unfi[node]
1947 1950 if ctx.obsolete():
1948 1951 raise util.Abort(mso % ctx)
1949 1952 elif ctx.unstable():
1950 1953 raise util.Abort(msu % ctx)
1951 1954 elif ctx.bumped():
1952 1955 raise util.Abort(msb % ctx)
1953 1956 elif ctx.divergent():
1954 1957 raise util.Abort(msd % ctx)
1955 1958 discovery.checkheads(unfi, remote, outgoing,
1956 1959 remoteheads, newbranch,
1957 1960 bool(inc))
1958 1961
1959 1962 # create a changegroup from local
1960 1963 if revs is None and not outgoing.excluded:
1961 1964 # push everything,
1962 1965 # use the fast path, no race possible on push
1963 1966 cg = self._changegroup(outgoing.missing, 'push')
1964 1967 else:
1965 1968 cg = self.getlocalbundle('push', outgoing)
1966 1969
1967 1970 # apply changegroup to remote
1968 1971 if unbundle:
1969 1972 # local repo finds heads on server, finds out what
1970 1973 # revs it must push. once revs transferred, if server
1971 1974 # finds it has different heads (someone else won
1972 1975 # commit/push race), server aborts.
1973 1976 if force:
1974 1977 remoteheads = ['force']
1975 1978 # ssh: return remote's addchangegroup()
1976 1979 # http: return remote's addchangegroup() or 0 for error
1977 1980 ret = remote.unbundle(cg, remoteheads, 'push')
1978 1981 else:
1979 1982 # we return an integer indicating remote head count
1980 1983 # change
1981 1984 ret = remote.addchangegroup(cg, 'push', self.url())
1982 1985
1983 1986 if ret:
1984 1987 # push succeed, synchronize target of the push
1985 1988 cheads = outgoing.missingheads
1986 1989 elif revs is None:
1987 1990 # All out push fails. synchronize all common
1988 1991 cheads = outgoing.commonheads
1989 1992 else:
1990 1993 # I want cheads = heads(::missingheads and ::commonheads)
1991 1994 # (missingheads is revs with secret changeset filtered out)
1992 1995 #
1993 1996 # This can be expressed as:
1994 1997 # cheads = ( (missingheads and ::commonheads)
1995 1998 # + (commonheads and ::missingheads))"
1996 1999 # )
1997 2000 #
1998 2001 # while trying to push we already computed the following:
1999 2002 # common = (::commonheads)
2000 2003 # missing = ((commonheads::missingheads) - commonheads)
2001 2004 #
2002 2005 # We can pick:
2003 2006 # * missingheads part of common (::commonheads)
2004 2007 common = set(outgoing.common)
2005 2008 cheads = [node for node in revs if node in common]
2006 2009 # and
2007 2010 # * commonheads parents on missing
2008 2011 revset = unfi.set('%ln and parents(roots(%ln))',
2009 2012 outgoing.commonheads,
2010 2013 outgoing.missing)
2011 2014 cheads.extend(c.node() for c in revset)
2012 2015 # even when we don't push, exchanging phase data is useful
2013 2016 remotephases = remote.listkeys('phases')
2014 2017 if not remotephases: # old server or public only repo
2015 2018 phases.advanceboundary(self, phases.public, cheads)
2016 2019 # don't push any phase data as there is nothing to push
2017 2020 else:
2018 2021 ana = phases.analyzeremotephases(self, cheads, remotephases)
2019 2022 pheads, droots = ana
2020 2023 ### Apply remote phase on local
2021 2024 if remotephases.get('publishing', False):
2022 2025 phases.advanceboundary(self, phases.public, cheads)
2023 2026 else: # publish = False
2024 2027 phases.advanceboundary(self, phases.public, pheads)
2025 2028 phases.advanceboundary(self, phases.draft, cheads)
2026 2029 ### Apply local phase on remote
2027 2030
2028 2031 # Get the list of all revs draft on remote by public here.
2029 2032 # XXX Beware that revset break if droots is not strictly
2030 2033 # XXX root we may want to ensure it is but it is costly
2031 2034 outdated = unfi.set('heads((%ln::%ln) and public())',
2032 2035 droots, cheads)
2033 2036 for newremotehead in outdated:
2034 2037 r = remote.pushkey('phases',
2035 2038 newremotehead.hex(),
2036 2039 str(phases.draft),
2037 2040 str(phases.public))
2038 2041 if not r:
2039 2042 self.ui.warn(_('updating %s to public failed!\n')
2040 2043 % newremotehead)
2041 2044 self.ui.debug('try to push obsolete markers to remote\n')
2042 2045 if (obsolete._enabled and self.obsstore and
2043 2046 'obsolete' in remote.listkeys('namespaces')):
2044 2047 rslts = []
2045 2048 remotedata = self.listkeys('obsolete')
2046 2049 for key in sorted(remotedata, reverse=True):
2047 2050 # reverse sort to ensure we end with dump0
2048 2051 data = remotedata[key]
2049 2052 rslts.append(remote.pushkey('obsolete', key, '', data))
2050 2053 if [r for r in rslts if not r]:
2051 2054 msg = _('failed to push some obsolete markers!\n')
2052 2055 self.ui.warn(msg)
2053 2056 finally:
2054 2057 if lock is not None:
2055 2058 lock.release()
2056 2059 finally:
2057 2060 locallock.release()
2058 2061
2059 2062 self.ui.debug("checking for updated bookmarks\n")
2060 2063 rb = remote.listkeys('bookmarks')
2061 2064 for k in rb.keys():
2062 2065 if k in unfi._bookmarks:
2063 2066 nr, nl = rb[k], hex(self._bookmarks[k])
2064 2067 if nr in unfi:
2065 2068 cr = unfi[nr]
2066 2069 cl = unfi[nl]
2067 2070 if bookmarks.validdest(unfi, cr, cl):
2068 2071 r = remote.pushkey('bookmarks', k, nr, nl)
2069 2072 if r:
2070 2073 self.ui.status(_("updating bookmark %s\n") % k)
2071 2074 else:
2072 2075 self.ui.warn(_('updating bookmark %s'
2073 2076 ' failed!\n') % k)
2074 2077
2075 2078 return ret
2076 2079
2077 2080 def changegroupinfo(self, nodes, source):
2078 2081 if self.ui.verbose or source == 'bundle':
2079 2082 self.ui.status(_("%d changesets found\n") % len(nodes))
2080 2083 if self.ui.debugflag:
2081 2084 self.ui.debug("list of changesets:\n")
2082 2085 for node in nodes:
2083 2086 self.ui.debug("%s\n" % hex(node))
2084 2087
2085 2088 def changegroupsubset(self, bases, heads, source):
2086 2089 """Compute a changegroup consisting of all the nodes that are
2087 2090 descendants of any of the bases and ancestors of any of the heads.
2088 2091 Return a chunkbuffer object whose read() method will return
2089 2092 successive changegroup chunks.
2090 2093
2091 2094 It is fairly complex as determining which filenodes and which
2092 2095 manifest nodes need to be included for the changeset to be complete
2093 2096 is non-trivial.
2094 2097
2095 2098 Another wrinkle is doing the reverse, figuring out which changeset in
2096 2099 the changegroup a particular filenode or manifestnode belongs to.
2097 2100 """
2098 2101 cl = self.changelog
2099 2102 if not bases:
2100 2103 bases = [nullid]
2101 2104 csets, bases, heads = cl.nodesbetween(bases, heads)
2102 2105 # We assume that all ancestors of bases are known
2103 2106 common = cl.ancestors([cl.rev(n) for n in bases])
2104 2107 return self._changegroupsubset(common, csets, heads, source)
2105 2108
2106 2109 def getlocalbundle(self, source, outgoing):
2107 2110 """Like getbundle, but taking a discovery.outgoing as an argument.
2108 2111
2109 2112 This is only implemented for local repos and reuses potentially
2110 2113 precomputed sets in outgoing."""
2111 2114 if not outgoing.missing:
2112 2115 return None
2113 2116 return self._changegroupsubset(outgoing.common,
2114 2117 outgoing.missing,
2115 2118 outgoing.missingheads,
2116 2119 source)
2117 2120
2118 2121 def getbundle(self, source, heads=None, common=None):
2119 2122 """Like changegroupsubset, but returns the set difference between the
2120 2123 ancestors of heads and the ancestors common.
2121 2124
2122 2125 If heads is None, use the local heads. If common is None, use [nullid].
2123 2126
2124 2127 The nodes in common might not all be known locally due to the way the
2125 2128 current discovery protocol works.
2126 2129 """
2127 2130 cl = self.changelog
2128 2131 if common:
2129 2132 hasnode = cl.hasnode
2130 2133 common = [n for n in common if hasnode(n)]
2131 2134 else:
2132 2135 common = [nullid]
2133 2136 if not heads:
2134 2137 heads = cl.heads()
2135 2138 return self.getlocalbundle(source,
2136 2139 discovery.outgoing(cl, common, heads))
2137 2140
2138 2141 @unfilteredmethod
2139 2142 def _changegroupsubset(self, commonrevs, csets, heads, source):
2140 2143
2141 2144 cl = self.changelog
2142 2145 mf = self.manifest
2143 2146 mfs = {} # needed manifests
2144 2147 fnodes = {} # needed file nodes
2145 2148 changedfiles = set()
2146 2149 fstate = ['', {}]
2147 2150 count = [0, 0]
2148 2151
2149 2152 # can we go through the fast path ?
2150 2153 heads.sort()
2151 2154 if heads == sorted(self.heads()):
2152 2155 return self._changegroup(csets, source)
2153 2156
2154 2157 # slow path
2155 2158 self.hook('preoutgoing', throw=True, source=source)
2156 2159 self.changegroupinfo(csets, source)
2157 2160
2158 2161 # filter any nodes that claim to be part of the known set
2159 2162 def prune(revlog, missing):
2160 2163 rr, rl = revlog.rev, revlog.linkrev
2161 2164 return [n for n in missing
2162 2165 if rl(rr(n)) not in commonrevs]
2163 2166
2164 2167 progress = self.ui.progress
2165 2168 _bundling = _('bundling')
2166 2169 _changesets = _('changesets')
2167 2170 _manifests = _('manifests')
2168 2171 _files = _('files')
2169 2172
2170 2173 def lookup(revlog, x):
2171 2174 if revlog == cl:
2172 2175 c = cl.read(x)
2173 2176 changedfiles.update(c[3])
2174 2177 mfs.setdefault(c[0], x)
2175 2178 count[0] += 1
2176 2179 progress(_bundling, count[0],
2177 2180 unit=_changesets, total=count[1])
2178 2181 return x
2179 2182 elif revlog == mf:
2180 2183 clnode = mfs[x]
2181 2184 mdata = mf.readfast(x)
2182 2185 for f, n in mdata.iteritems():
2183 2186 if f in changedfiles:
2184 2187 fnodes[f].setdefault(n, clnode)
2185 2188 count[0] += 1
2186 2189 progress(_bundling, count[0],
2187 2190 unit=_manifests, total=count[1])
2188 2191 return clnode
2189 2192 else:
2190 2193 progress(_bundling, count[0], item=fstate[0],
2191 2194 unit=_files, total=count[1])
2192 2195 return fstate[1][x]
2193 2196
2194 2197 bundler = changegroup.bundle10(lookup)
2195 2198 reorder = self.ui.config('bundle', 'reorder', 'auto')
2196 2199 if reorder == 'auto':
2197 2200 reorder = None
2198 2201 else:
2199 2202 reorder = util.parsebool(reorder)
2200 2203
2201 2204 def gengroup():
2202 2205 # Create a changenode group generator that will call our functions
2203 2206 # back to lookup the owning changenode and collect information.
2204 2207 count[:] = [0, len(csets)]
2205 2208 for chunk in cl.group(csets, bundler, reorder=reorder):
2206 2209 yield chunk
2207 2210 progress(_bundling, None)
2208 2211
2209 2212 # Create a generator for the manifestnodes that calls our lookup
2210 2213 # and data collection functions back.
2211 2214 for f in changedfiles:
2212 2215 fnodes[f] = {}
2213 2216 count[:] = [0, len(mfs)]
2214 2217 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2215 2218 yield chunk
2216 2219 progress(_bundling, None)
2217 2220
2218 2221 mfs.clear()
2219 2222
2220 2223 # Go through all our files in order sorted by name.
2221 2224 count[:] = [0, len(changedfiles)]
2222 2225 for fname in sorted(changedfiles):
2223 2226 filerevlog = self.file(fname)
2224 2227 if not len(filerevlog):
2225 2228 raise util.Abort(_("empty or missing revlog for %s")
2226 2229 % fname)
2227 2230 fstate[0] = fname
2228 2231 fstate[1] = fnodes.pop(fname, {})
2229 2232
2230 2233 nodelist = prune(filerevlog, fstate[1])
2231 2234 if nodelist:
2232 2235 count[0] += 1
2233 2236 yield bundler.fileheader(fname)
2234 2237 for chunk in filerevlog.group(nodelist, bundler, reorder):
2235 2238 yield chunk
2236 2239
2237 2240 # Signal that no more groups are left.
2238 2241 yield bundler.close()
2239 2242 progress(_bundling, None)
2240 2243
2241 2244 if csets:
2242 2245 self.hook('outgoing', node=hex(csets[0]), source=source)
2243 2246
2244 2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2245 2248
2246 2249 def changegroup(self, basenodes, source):
2247 2250 # to avoid a race we use changegroupsubset() (issue1320)
2248 2251 return self.changegroupsubset(basenodes, self.heads(), source)
2249 2252
2250 2253 @unfilteredmethod
2251 2254 def _changegroup(self, nodes, source):
2252 2255 """Compute the changegroup of all nodes that we have that a recipient
2253 2256 doesn't. Return a chunkbuffer object whose read() method will return
2254 2257 successive changegroup chunks.
2255 2258
2256 2259 This is much easier than the previous function as we can assume that
2257 2260 the recipient has any changenode we aren't sending them.
2258 2261
2259 2262 nodes is the set of nodes to send"""
2260 2263
2261 2264 cl = self.changelog
2262 2265 mf = self.manifest
2263 2266 mfs = {}
2264 2267 changedfiles = set()
2265 2268 fstate = ['']
2266 2269 count = [0, 0]
2267 2270
2268 2271 self.hook('preoutgoing', throw=True, source=source)
2269 2272 self.changegroupinfo(nodes, source)
2270 2273
2271 2274 revset = set([cl.rev(n) for n in nodes])
2272 2275
2273 2276 def gennodelst(log):
2274 2277 ln, llr = log.node, log.linkrev
2275 2278 return [ln(r) for r in log if llr(r) in revset]
2276 2279
2277 2280 progress = self.ui.progress
2278 2281 _bundling = _('bundling')
2279 2282 _changesets = _('changesets')
2280 2283 _manifests = _('manifests')
2281 2284 _files = _('files')
2282 2285
2283 2286 def lookup(revlog, x):
2284 2287 if revlog == cl:
2285 2288 c = cl.read(x)
2286 2289 changedfiles.update(c[3])
2287 2290 mfs.setdefault(c[0], x)
2288 2291 count[0] += 1
2289 2292 progress(_bundling, count[0],
2290 2293 unit=_changesets, total=count[1])
2291 2294 return x
2292 2295 elif revlog == mf:
2293 2296 count[0] += 1
2294 2297 progress(_bundling, count[0],
2295 2298 unit=_manifests, total=count[1])
2296 2299 return cl.node(revlog.linkrev(revlog.rev(x)))
2297 2300 else:
2298 2301 progress(_bundling, count[0], item=fstate[0],
2299 2302 total=count[1], unit=_files)
2300 2303 return cl.node(revlog.linkrev(revlog.rev(x)))
2301 2304
2302 2305 bundler = changegroup.bundle10(lookup)
2303 2306 reorder = self.ui.config('bundle', 'reorder', 'auto')
2304 2307 if reorder == 'auto':
2305 2308 reorder = None
2306 2309 else:
2307 2310 reorder = util.parsebool(reorder)
2308 2311
2309 2312 def gengroup():
2310 2313 '''yield a sequence of changegroup chunks (strings)'''
2311 2314 # construct a list of all changed files
2312 2315
2313 2316 count[:] = [0, len(nodes)]
2314 2317 for chunk in cl.group(nodes, bundler, reorder=reorder):
2315 2318 yield chunk
2316 2319 progress(_bundling, None)
2317 2320
2318 2321 count[:] = [0, len(mfs)]
2319 2322 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2320 2323 yield chunk
2321 2324 progress(_bundling, None)
2322 2325
2323 2326 count[:] = [0, len(changedfiles)]
2324 2327 for fname in sorted(changedfiles):
2325 2328 filerevlog = self.file(fname)
2326 2329 if not len(filerevlog):
2327 2330 raise util.Abort(_("empty or missing revlog for %s")
2328 2331 % fname)
2329 2332 fstate[0] = fname
2330 2333 nodelist = gennodelst(filerevlog)
2331 2334 if nodelist:
2332 2335 count[0] += 1
2333 2336 yield bundler.fileheader(fname)
2334 2337 for chunk in filerevlog.group(nodelist, bundler, reorder):
2335 2338 yield chunk
2336 2339 yield bundler.close()
2337 2340 progress(_bundling, None)
2338 2341
2339 2342 if nodes:
2340 2343 self.hook('outgoing', node=hex(nodes[0]), source=source)
2341 2344
2342 2345 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2343 2346
2344 2347 @unfilteredmethod
2345 2348 def addchangegroup(self, source, srctype, url, emptyok=False):
2346 2349 """Add the changegroup returned by source.read() to this repo.
2347 2350 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2348 2351 the URL of the repo where this changegroup is coming from.
2349 2352
2350 2353 Return an integer summarizing the change to this repo:
2351 2354 - nothing changed or no source: 0
2352 2355 - more heads than before: 1+added heads (2..n)
2353 2356 - fewer heads than before: -1-removed heads (-2..-n)
2354 2357 - number of heads stays the same: 1
2355 2358 """
2356 2359 def csmap(x):
2357 2360 self.ui.debug("add changeset %s\n" % short(x))
2358 2361 return len(cl)
2359 2362
2360 2363 def revmap(x):
2361 2364 return cl.rev(x)
2362 2365
2363 2366 if not source:
2364 2367 return 0
2365 2368
2366 2369 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2367 2370
2368 2371 changesets = files = revisions = 0
2369 2372 efiles = set()
2370 2373
2371 2374 # write changelog data to temp files so concurrent readers will not see
2372 2375 # inconsistent view
2373 2376 cl = self.changelog
2374 2377 cl.delayupdate()
2375 2378 oldheads = cl.heads()
2376 2379
2377 2380 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2378 2381 try:
2379 2382 trp = weakref.proxy(tr)
2380 2383 # pull off the changeset group
2381 2384 self.ui.status(_("adding changesets\n"))
2382 2385 clstart = len(cl)
2383 2386 class prog(object):
2384 2387 step = _('changesets')
2385 2388 count = 1
2386 2389 ui = self.ui
2387 2390 total = None
2388 2391 def __call__(self):
2389 2392 self.ui.progress(self.step, self.count, unit=_('chunks'),
2390 2393 total=self.total)
2391 2394 self.count += 1
2392 2395 pr = prog()
2393 2396 source.callback = pr
2394 2397
2395 2398 source.changelogheader()
2396 2399 srccontent = cl.addgroup(source, csmap, trp)
2397 2400 if not (srccontent or emptyok):
2398 2401 raise util.Abort(_("received changelog group is empty"))
2399 2402 clend = len(cl)
2400 2403 changesets = clend - clstart
2401 2404 for c in xrange(clstart, clend):
2402 2405 efiles.update(self[c].files())
2403 2406 efiles = len(efiles)
2404 2407 self.ui.progress(_('changesets'), None)
2405 2408
2406 2409 # pull off the manifest group
2407 2410 self.ui.status(_("adding manifests\n"))
2408 2411 pr.step = _('manifests')
2409 2412 pr.count = 1
2410 2413 pr.total = changesets # manifests <= changesets
2411 2414 # no need to check for empty manifest group here:
2412 2415 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2413 2416 # no new manifest will be created and the manifest group will
2414 2417 # be empty during the pull
2415 2418 source.manifestheader()
2416 2419 self.manifest.addgroup(source, revmap, trp)
2417 2420 self.ui.progress(_('manifests'), None)
2418 2421
2419 2422 needfiles = {}
2420 2423 if self.ui.configbool('server', 'validate', default=False):
2421 2424 # validate incoming csets have their manifests
2422 2425 for cset in xrange(clstart, clend):
2423 2426 mfest = self.changelog.read(self.changelog.node(cset))[0]
2424 2427 mfest = self.manifest.readdelta(mfest)
2425 2428 # store file nodes we must see
2426 2429 for f, n in mfest.iteritems():
2427 2430 needfiles.setdefault(f, set()).add(n)
2428 2431
2429 2432 # process the files
2430 2433 self.ui.status(_("adding file changes\n"))
2431 2434 pr.step = _('files')
2432 2435 pr.count = 1
2433 2436 pr.total = efiles
2434 2437 source.callback = None
2435 2438
2436 2439 while True:
2437 2440 chunkdata = source.filelogheader()
2438 2441 if not chunkdata:
2439 2442 break
2440 2443 f = chunkdata["filename"]
2441 2444 self.ui.debug("adding %s revisions\n" % f)
2442 2445 pr()
2443 2446 fl = self.file(f)
2444 2447 o = len(fl)
2445 2448 if not fl.addgroup(source, revmap, trp):
2446 2449 raise util.Abort(_("received file revlog group is empty"))
2447 2450 revisions += len(fl) - o
2448 2451 files += 1
2449 2452 if f in needfiles:
2450 2453 needs = needfiles[f]
2451 2454 for new in xrange(o, len(fl)):
2452 2455 n = fl.node(new)
2453 2456 if n in needs:
2454 2457 needs.remove(n)
2455 2458 if not needs:
2456 2459 del needfiles[f]
2457 2460 self.ui.progress(_('files'), None)
2458 2461
2459 2462 for f, needs in needfiles.iteritems():
2460 2463 fl = self.file(f)
2461 2464 for n in needs:
2462 2465 try:
2463 2466 fl.rev(n)
2464 2467 except error.LookupError:
2465 2468 raise util.Abort(
2466 2469 _('missing file data for %s:%s - run hg verify') %
2467 2470 (f, hex(n)))
2468 2471
2469 2472 dh = 0
2470 2473 if oldheads:
2471 2474 heads = cl.heads()
2472 2475 dh = len(heads) - len(oldheads)
2473 2476 for h in heads:
2474 2477 if h not in oldheads and self[h].closesbranch():
2475 2478 dh -= 1
2476 2479 htext = ""
2477 2480 if dh:
2478 2481 htext = _(" (%+d heads)") % dh
2479 2482
2480 2483 self.ui.status(_("added %d changesets"
2481 2484 " with %d changes to %d files%s\n")
2482 2485 % (changesets, revisions, files, htext))
2483 obsolete.clearobscaches(self)
2484 self.filteredrevcache.clear()
2486 self.invalidatevolatilesets()
2485 2487
2486 2488 if changesets > 0:
2487 2489 p = lambda: cl.writepending() and self.root or ""
2488 2490 self.hook('pretxnchangegroup', throw=True,
2489 2491 node=hex(cl.node(clstart)), source=srctype,
2490 2492 url=url, pending=p)
2491 2493
2492 2494 added = [cl.node(r) for r in xrange(clstart, clend)]
2493 2495 publishing = self.ui.configbool('phases', 'publish', True)
2494 2496 if srctype == 'push':
2495 2497 # Old server can not push the boundary themself.
2496 2498 # New server won't push the boundary if changeset already
2497 2499 # existed locally as secrete
2498 2500 #
2499 2501 # We should not use added here but the list of all change in
2500 2502 # the bundle
2501 2503 if publishing:
2502 2504 phases.advanceboundary(self, phases.public, srccontent)
2503 2505 else:
2504 2506 phases.advanceboundary(self, phases.draft, srccontent)
2505 2507 phases.retractboundary(self, phases.draft, added)
2506 2508 elif srctype != 'strip':
2507 2509 # publishing only alter behavior during push
2508 2510 #
2509 2511 # strip should not touch boundary at all
2510 2512 phases.retractboundary(self, phases.draft, added)
2511 2513
2512 2514 # make changelog see real files again
2513 2515 cl.finalize(trp)
2514 2516
2515 2517 tr.close()
2516 2518
2517 2519 if changesets > 0:
2518 2520 self.updatebranchcache()
2519 2521 def runhooks():
2520 2522 # forcefully update the on-disk branch cache
2521 2523 self.ui.debug("updating the branch cache\n")
2522 2524 self.hook("changegroup", node=hex(cl.node(clstart)),
2523 2525 source=srctype, url=url)
2524 2526
2525 2527 for n in added:
2526 2528 self.hook("incoming", node=hex(n), source=srctype,
2527 2529 url=url)
2528 2530 self._afterlock(runhooks)
2529 2531
2530 2532 finally:
2531 2533 tr.release()
2532 2534 # never return 0 here:
2533 2535 if dh < 0:
2534 2536 return dh - 1
2535 2537 else:
2536 2538 return dh + 1
2537 2539
2538 2540 def stream_in(self, remote, requirements):
2539 2541 lock = self.lock()
2540 2542 try:
2541 2543 # Save remote branchmap. We will use it later
2542 2544 # to speed up branchcache creation
2543 2545 rbranchmap = None
2544 2546 if remote.capable("branchmap"):
2545 2547 rbranchmap = remote.branchmap()
2546 2548
2547 2549 fp = remote.stream_out()
2548 2550 l = fp.readline()
2549 2551 try:
2550 2552 resp = int(l)
2551 2553 except ValueError:
2552 2554 raise error.ResponseError(
2553 2555 _('unexpected response from remote server:'), l)
2554 2556 if resp == 1:
2555 2557 raise util.Abort(_('operation forbidden by server'))
2556 2558 elif resp == 2:
2557 2559 raise util.Abort(_('locking the remote repository failed'))
2558 2560 elif resp != 0:
2559 2561 raise util.Abort(_('the server sent an unknown error code'))
2560 2562 self.ui.status(_('streaming all changes\n'))
2561 2563 l = fp.readline()
2562 2564 try:
2563 2565 total_files, total_bytes = map(int, l.split(' ', 1))
2564 2566 except (ValueError, TypeError):
2565 2567 raise error.ResponseError(
2566 2568 _('unexpected response from remote server:'), l)
2567 2569 self.ui.status(_('%d files to transfer, %s of data\n') %
2568 2570 (total_files, util.bytecount(total_bytes)))
2569 2571 handled_bytes = 0
2570 2572 self.ui.progress(_('clone'), 0, total=total_bytes)
2571 2573 start = time.time()
2572 2574 for i in xrange(total_files):
2573 2575 # XXX doesn't support '\n' or '\r' in filenames
2574 2576 l = fp.readline()
2575 2577 try:
2576 2578 name, size = l.split('\0', 1)
2577 2579 size = int(size)
2578 2580 except (ValueError, TypeError):
2579 2581 raise error.ResponseError(
2580 2582 _('unexpected response from remote server:'), l)
2581 2583 if self.ui.debugflag:
2582 2584 self.ui.debug('adding %s (%s)\n' %
2583 2585 (name, util.bytecount(size)))
2584 2586 # for backwards compat, name was partially encoded
2585 2587 ofp = self.sopener(store.decodedir(name), 'w')
2586 2588 for chunk in util.filechunkiter(fp, limit=size):
2587 2589 handled_bytes += len(chunk)
2588 2590 self.ui.progress(_('clone'), handled_bytes,
2589 2591 total=total_bytes)
2590 2592 ofp.write(chunk)
2591 2593 ofp.close()
2592 2594 elapsed = time.time() - start
2593 2595 if elapsed <= 0:
2594 2596 elapsed = 0.001
2595 2597 self.ui.progress(_('clone'), None)
2596 2598 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2597 2599 (util.bytecount(total_bytes), elapsed,
2598 2600 util.bytecount(total_bytes / elapsed)))
2599 2601
2600 2602 # new requirements = old non-format requirements +
2601 2603 # new format-related
2602 2604 # requirements from the streamed-in repository
2603 2605 requirements.update(set(self.requirements) - self.supportedformats)
2604 2606 self._applyrequirements(requirements)
2605 2607 self._writerequirements()
2606 2608
2607 2609 if rbranchmap:
2608 2610 rbheads = []
2609 2611 for bheads in rbranchmap.itervalues():
2610 2612 rbheads.extend(bheads)
2611 2613
2612 2614 self.branchcache = rbranchmap
2613 2615 if rbheads:
2614 2616 rtiprev = max((int(self.changelog.rev(node))
2615 2617 for node in rbheads))
2616 2618 self._writebranchcache(self.branchcache,
2617 2619 self[rtiprev].node(), rtiprev)
2618 2620 self.invalidate()
2619 2621 return len(self.heads()) + 1
2620 2622 finally:
2621 2623 lock.release()
2622 2624
2623 2625 def clone(self, remote, heads=[], stream=False):
2624 2626 '''clone remote repository.
2625 2627
2626 2628 keyword arguments:
2627 2629 heads: list of revs to clone (forces use of pull)
2628 2630 stream: use streaming clone if possible'''
2629 2631
2630 2632 # now, all clients that can request uncompressed clones can
2631 2633 # read repo formats supported by all servers that can serve
2632 2634 # them.
2633 2635
2634 2636 # if revlog format changes, client will have to check version
2635 2637 # and format flags on "stream" capability, and use
2636 2638 # uncompressed only if compatible.
2637 2639
2638 2640 if not stream:
2639 2641 # if the server explicitly prefers to stream (for fast LANs)
2640 2642 stream = remote.capable('stream-preferred')
2641 2643
2642 2644 if stream and not heads:
2643 2645 # 'stream' means remote revlog format is revlogv1 only
2644 2646 if remote.capable('stream'):
2645 2647 return self.stream_in(remote, set(('revlogv1',)))
2646 2648 # otherwise, 'streamreqs' contains the remote revlog format
2647 2649 streamreqs = remote.capable('streamreqs')
2648 2650 if streamreqs:
2649 2651 streamreqs = set(streamreqs.split(','))
2650 2652 # if we support it, stream in and adjust our requirements
2651 2653 if not streamreqs - self.supportedformats:
2652 2654 return self.stream_in(remote, streamreqs)
2653 2655 return self.pull(remote, heads)
2654 2656
2655 2657 def pushkey(self, namespace, key, old, new):
2656 2658 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2657 2659 old=old, new=new)
2658 2660 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2659 2661 ret = pushkey.push(self, namespace, key, old, new)
2660 2662 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2661 2663 ret=ret)
2662 2664 return ret
2663 2665
2664 2666 def listkeys(self, namespace):
2665 2667 self.hook('prelistkeys', throw=True, namespace=namespace)
2666 2668 self.ui.debug('listing keys for "%s"\n' % namespace)
2667 2669 values = pushkey.list(self, namespace)
2668 2670 self.hook('listkeys', namespace=namespace, values=values)
2669 2671 return values
2670 2672
2671 2673 def debugwireargs(self, one, two, three=None, four=None, five=None):
2672 2674 '''used to test argument passing over the wire'''
2673 2675 return "%s %s %s %s %s" % (one, two, three, four, five)
2674 2676
2675 2677 def savecommitmessage(self, text):
2676 2678 fp = self.opener('last-message.txt', 'wb')
2677 2679 try:
2678 2680 fp.write(text)
2679 2681 finally:
2680 2682 fp.close()
2681 2683 return self.pathto(fp.name[len(self.root) + 1:])
2682 2684
2683 2685 # used to avoid circular references so destructors work
2684 2686 def aftertrans(files):
2685 2687 renamefiles = [tuple(t) for t in files]
2686 2688 def a():
2687 2689 for src, dest in renamefiles:
2688 2690 try:
2689 2691 util.rename(src, dest)
2690 2692 except OSError: # journal file does not yet exist
2691 2693 pass
2692 2694 return a
2693 2695
2694 2696 def undoname(fn):
2695 2697 base, name = os.path.split(fn)
2696 2698 assert name.startswith('journal')
2697 2699 return os.path.join(base, name.replace('journal', 'undo', 1))
2698 2700
2699 2701 def instance(ui, path, create):
2700 2702 return localrepository(ui, util.urllocalpath(path), create)
2701 2703
2702 2704 def islocal(path):
2703 2705 return True
@@ -1,402 +1,399 b''
1 1 """ Mercurial phases support code
2 2
3 3 ---
4 4
5 5 Copyright 2011 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
6 6 Logilab SA <contact@logilab.fr>
7 7 Augie Fackler <durin42@gmail.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License version 2 or any later version.
11 11
12 12 ---
13 13
14 14 This module implements most phase logic in mercurial.
15 15
16 16
17 17 Basic Concept
18 18 =============
19 19
20 20 A 'changeset phase' is an indicator that tells us how a changeset is
21 21 manipulated and communicated. The details of each phase is described
22 22 below, here we describe the properties they have in common.
23 23
24 24 Like bookmarks, phases are not stored in history and thus are not
25 25 permanent and leave no audit trail.
26 26
27 27 First, no changeset can be in two phases at once. Phases are ordered,
28 28 so they can be considered from lowest to highest. The default, lowest
29 29 phase is 'public' - this is the normal phase of existing changesets. A
30 30 child changeset can not be in a lower phase than its parents.
31 31
32 32 These phases share a hierarchy of traits:
33 33
34 34 immutable shared
35 35 public: X X
36 36 draft: X
37 37 secret:
38 38
39 39 Local commits are draft by default.
40 40
41 41 Phase Movement and Exchange
42 42 ===========================
43 43
44 44 Phase data is exchanged by pushkey on pull and push. Some servers have
45 45 a publish option set, we call such a server a "publishing server".
46 46 Pushing a draft changeset to a publishing server changes the phase to
47 47 public.
48 48
49 49 A small list of fact/rules define the exchange of phase:
50 50
51 51 * old client never changes server states
52 52 * pull never changes server states
53 53 * publish and old server changesets are seen as public by client
54 54 * any secret changeset seen in another repository is lowered to at
55 55 least draft
56 56
57 57 Here is the final table summing up the 49 possible use cases of phase
58 58 exchange:
59 59
60 60 server
61 61 old publish non-publish
62 62 N X N D P N D P
63 63 old client
64 64 pull
65 65 N - X/X - X/D X/P - X/D X/P
66 66 X - X/X - X/D X/P - X/D X/P
67 67 push
68 68 X X/X X/X X/P X/P X/P X/D X/D X/P
69 69 new client
70 70 pull
71 71 N - P/X - P/D P/P - D/D P/P
72 72 D - P/X - P/D P/P - D/D P/P
73 73 P - P/X - P/D P/P - P/D P/P
74 74 push
75 75 D P/X P/X P/P P/P P/P D/D D/D P/P
76 76 P P/X P/X P/P P/P P/P P/P P/P P/P
77 77
78 78 Legend:
79 79
80 80 A/B = final state on client / state on server
81 81
82 82 * N = new/not present,
83 83 * P = public,
84 84 * D = draft,
85 85 * X = not tracked (i.e., the old client or server has no internal
86 86 way of recording the phase.)
87 87
88 88 passive = only pushes
89 89
90 90
91 91 A cell here can be read like this:
92 92
93 93 "When a new client pushes a draft changeset (D) to a publishing
94 94 server where it's not present (N), it's marked public on both
95 95 sides (P/P)."
96 96
97 97 Note: old client behave as a publishing server with draft only content
98 98 - other people see it as public
99 99 - content is pushed as draft
100 100
101 101 """
102 102
103 103 import errno
104 104 from node import nullid, nullrev, bin, hex, short
105 105 from i18n import _
106 106 import util, error
107 import obsolete
108 107
109 108 allphases = public, draft, secret = range(3)
110 109 trackedphases = allphases[1:]
111 110 phasenames = ['public', 'draft', 'secret']
112 111
113 112 def _filterunknown(ui, changelog, phaseroots):
114 113 """remove unknown nodes from the phase boundary
115 114
116 115 Nothing is lost as unknown nodes only hold data for their descendants.
117 116 """
118 117 updated = False
119 118 nodemap = changelog.nodemap # to filter unknown nodes
120 119 for phase, nodes in enumerate(phaseroots):
121 120 missing = [node for node in nodes if node not in nodemap]
122 121 if missing:
123 122 for mnode in missing:
124 123 ui.debug(
125 124 'removing unknown node %s from %i-phase boundary\n'
126 125 % (short(mnode), phase))
127 126 nodes.symmetric_difference_update(missing)
128 127 updated = True
129 128 return updated
130 129
131 130 def _readroots(repo, phasedefaults=None):
132 131 """Read phase roots from disk
133 132
134 133 phasedefaults is a list of fn(repo, roots) callable, which are
135 134 executed if the phase roots file does not exist. When phases are
136 135 being initialized on an existing repository, this could be used to
137 136 set selected changesets phase to something else than public.
138 137
139 138 Return (roots, dirty) where dirty is true if roots differ from
140 139 what is being stored.
141 140 """
142 141 repo = repo.unfiltered()
143 142 dirty = False
144 143 roots = [set() for i in allphases]
145 144 try:
146 145 f = repo.sopener('phaseroots')
147 146 try:
148 147 for line in f:
149 148 phase, nh = line.split()
150 149 roots[int(phase)].add(bin(nh))
151 150 finally:
152 151 f.close()
153 152 except IOError, inst:
154 153 if inst.errno != errno.ENOENT:
155 154 raise
156 155 if phasedefaults:
157 156 for f in phasedefaults:
158 157 roots = f(repo, roots)
159 158 dirty = True
160 159 if _filterunknown(repo.ui, repo.changelog, roots):
161 160 dirty = True
162 161 return roots, dirty
163 162
164 163 class phasecache(object):
165 164 def __init__(self, repo, phasedefaults, _load=True):
166 165 if _load:
167 166 # Cheap trick to allow shallow-copy without copy module
168 167 self.phaseroots, self.dirty = _readroots(repo, phasedefaults)
169 168 self.opener = repo.sopener
170 169 self._phaserevs = None
171 170
172 171 def copy(self):
173 172 # Shallow copy meant to ensure isolation in
174 173 # advance/retractboundary(), nothing more.
175 174 ph = phasecache(None, None, _load=False)
176 175 ph.phaseroots = self.phaseroots[:]
177 176 ph.dirty = self.dirty
178 177 ph.opener = self.opener
179 178 ph._phaserevs = self._phaserevs
180 179 return ph
181 180
182 181 def replace(self, phcache):
183 182 for a in 'phaseroots dirty opener _phaserevs'.split():
184 183 setattr(self, a, getattr(phcache, a))
185 184
186 185 def getphaserevs(self, repo, rebuild=False):
187 186 if rebuild or self._phaserevs is None:
188 187 repo = repo.unfiltered()
189 188 revs = [public] * len(repo.changelog)
190 189 for phase in trackedphases:
191 190 roots = map(repo.changelog.rev, self.phaseroots[phase])
192 191 if roots:
193 192 for rev in roots:
194 193 revs[rev] = phase
195 194 for rev in repo.changelog.descendants(roots):
196 195 revs[rev] = phase
197 196 self._phaserevs = revs
198 197 return self._phaserevs
199 198
200 199 def phase(self, repo, rev):
201 200 # We need a repo argument here to be able to build _phaserevs
202 201 # if necessary. The repository instance is not stored in
203 202 # phasecache to avoid reference cycles. The changelog instance
204 203 # is not stored because it is a filecache() property and can
205 204 # be replaced without us being notified.
206 205 if rev == nullrev:
207 206 return public
208 207 if self._phaserevs is None or rev >= len(self._phaserevs):
209 208 self._phaserevs = self.getphaserevs(repo, rebuild=True)
210 209 return self._phaserevs[rev]
211 210
212 211 def write(self):
213 212 if not self.dirty:
214 213 return
215 214 f = self.opener('phaseroots', 'w', atomictemp=True)
216 215 try:
217 216 for phase, roots in enumerate(self.phaseroots):
218 217 for h in roots:
219 218 f.write('%i %s\n' % (phase, hex(h)))
220 219 finally:
221 220 f.close()
222 221 self.dirty = False
223 222
224 223 def _updateroots(self, phase, newroots):
225 224 self.phaseroots[phase] = newroots
226 225 self._phaserevs = None
227 226 self.dirty = True
228 227
229 228 def advanceboundary(self, repo, targetphase, nodes):
230 229 # Be careful to preserve shallow-copied values: do not update
231 230 # phaseroots values, replace them.
232 231
233 232 repo = repo.unfiltered()
234 233 delroots = [] # set of root deleted by this path
235 234 for phase in xrange(targetphase + 1, len(allphases)):
236 235 # filter nodes that are not in a compatible phase already
237 236 nodes = [n for n in nodes
238 237 if self.phase(repo, repo[n].rev()) >= phase]
239 238 if not nodes:
240 239 break # no roots to move anymore
241 240 olds = self.phaseroots[phase]
242 241 roots = set(ctx.node() for ctx in repo.set(
243 242 'roots((%ln::) - (%ln::%ln))', olds, olds, nodes))
244 243 if olds != roots:
245 244 self._updateroots(phase, roots)
246 245 # some roots may need to be declared for lower phases
247 246 delroots.extend(olds - roots)
248 247 # declare deleted root in the target phase
249 248 if targetphase != 0:
250 249 self.retractboundary(repo, targetphase, delroots)
251 obsolete.clearobscaches(repo)
252 repo.filteredrevcache.clear()
250 repo.invalidatevolatilesets()
253 251
254 252 def retractboundary(self, repo, targetphase, nodes):
255 253 # Be careful to preserve shallow-copied values: do not update
256 254 # phaseroots values, replace them.
257 255
258 256 repo = repo.unfiltered()
259 257 currentroots = self.phaseroots[targetphase]
260 258 newroots = [n for n in nodes
261 259 if self.phase(repo, repo[n].rev()) < targetphase]
262 260 if newroots:
263 261 if nullid in newroots:
264 262 raise util.Abort(_('cannot change null revision phase'))
265 263 currentroots = currentroots.copy()
266 264 currentroots.update(newroots)
267 265 ctxs = repo.set('roots(%ln::)', currentroots)
268 266 currentroots.intersection_update(ctx.node() for ctx in ctxs)
269 267 self._updateroots(targetphase, currentroots)
270 obsolete.clearobscaches(repo)
271 repo.filteredrevcache.clear()
268 repo.invalidatevolatilesets()
272 269
273 270 def advanceboundary(repo, targetphase, nodes):
274 271 """Add nodes to a phase changing other nodes phases if necessary.
275 272
276 273 This function move boundary *forward* this means that all nodes
277 274 are set in the target phase or kept in a *lower* phase.
278 275
279 276 Simplify boundary to contains phase roots only."""
280 277 phcache = repo._phasecache.copy()
281 278 phcache.advanceboundary(repo, targetphase, nodes)
282 279 repo._phasecache.replace(phcache)
283 280
284 281 def retractboundary(repo, targetphase, nodes):
285 282 """Set nodes back to a phase changing other nodes phases if
286 283 necessary.
287 284
288 285 This function move boundary *backward* this means that all nodes
289 286 are set in the target phase or kept in a *higher* phase.
290 287
291 288 Simplify boundary to contains phase roots only."""
292 289 phcache = repo._phasecache.copy()
293 290 phcache.retractboundary(repo, targetphase, nodes)
294 291 repo._phasecache.replace(phcache)
295 292
296 293 def listphases(repo):
297 294 """List phases root for serialization over pushkey"""
298 295 keys = {}
299 296 value = '%i' % draft
300 297 for root in repo._phasecache.phaseroots[draft]:
301 298 keys[hex(root)] = value
302 299
303 300 if repo.ui.configbool('phases', 'publish', True):
304 301 # Add an extra data to let remote know we are a publishing
305 302 # repo. Publishing repo can't just pretend they are old repo.
306 303 # When pushing to a publishing repo, the client still need to
307 304 # push phase boundary
308 305 #
309 306 # Push do not only push changeset. It also push phase data.
310 307 # New phase data may apply to common changeset which won't be
311 308 # push (as they are common). Here is a very simple example:
312 309 #
313 310 # 1) repo A push changeset X as draft to repo B
314 311 # 2) repo B make changeset X public
315 312 # 3) repo B push to repo A. X is not pushed but the data that
316 313 # X as now public should
317 314 #
318 315 # The server can't handle it on it's own as it has no idea of
319 316 # client phase data.
320 317 keys['publishing'] = 'True'
321 318 return keys
322 319
323 320 def pushphase(repo, nhex, oldphasestr, newphasestr):
324 321 """List phases root for serialization over pushkey"""
325 322 repo = repo.unfiltered()
326 323 lock = repo.lock()
327 324 try:
328 325 currentphase = repo[nhex].phase()
329 326 newphase = abs(int(newphasestr)) # let's avoid negative index surprise
330 327 oldphase = abs(int(oldphasestr)) # let's avoid negative index surprise
331 328 if currentphase == oldphase and newphase < oldphase:
332 329 advanceboundary(repo, newphase, [bin(nhex)])
333 330 return 1
334 331 elif currentphase == newphase:
335 332 # raced, but got correct result
336 333 return 1
337 334 else:
338 335 return 0
339 336 finally:
340 337 lock.release()
341 338
342 339 def analyzeremotephases(repo, subset, roots):
343 340 """Compute phases heads and root in a subset of node from root dict
344 341
345 342 * subset is heads of the subset
346 343 * roots is {<nodeid> => phase} mapping. key and value are string.
347 344
348 345 Accept unknown element input
349 346 """
350 347 repo = repo.unfiltered()
351 348 # build list from dictionary
352 349 draftroots = []
353 350 nodemap = repo.changelog.nodemap # to filter unknown nodes
354 351 for nhex, phase in roots.iteritems():
355 352 if nhex == 'publishing': # ignore data related to publish option
356 353 continue
357 354 node = bin(nhex)
358 355 phase = int(phase)
359 356 if phase == 0:
360 357 if node != nullid:
361 358 repo.ui.warn(_('ignoring inconsistent public root'
362 359 ' from remote: %s\n') % nhex)
363 360 elif phase == 1:
364 361 if node in nodemap:
365 362 draftroots.append(node)
366 363 else:
367 364 repo.ui.warn(_('ignoring unexpected root from remote: %i %s\n')
368 365 % (phase, nhex))
369 366 # compute heads
370 367 publicheads = newheads(repo, subset, draftroots)
371 368 return publicheads, draftroots
372 369
373 370 def newheads(repo, heads, roots):
374 371 """compute new head of a subset minus another
375 372
376 373 * `heads`: define the first subset
377 374 * `roots`: define the second we subtract from the first"""
378 375 repo = repo.unfiltered()
379 376 revset = repo.set('heads((%ln + parents(%ln)) - (%ln::%ln))',
380 377 heads, roots, roots, heads)
381 378 return [c.node() for c in revset]
382 379
383 380
384 381 def newcommitphase(ui):
385 382 """helper to get the target phase of new commit
386 383
387 384 Handle all possible values for the phases.new-commit options.
388 385
389 386 """
390 387 v = ui.config('phases', 'new-commit', draft)
391 388 try:
392 389 return phasenames.index(v)
393 390 except ValueError:
394 391 try:
395 392 return int(v)
396 393 except ValueError:
397 394 msg = _("phases.new-commit: not a valid phase name ('%s')")
398 395 raise error.ConfigError(msg % v)
399 396
400 397 def hassecret(repo):
401 398 """utility function that check if a repo have any secret changeset."""
402 399 return bool(repo._phasecache.phaseroots[2])
General Comments 0
You need to be logged in to leave comments. Login now