##// END OF EJS Templates
push: make locking of source optional (issue3684)...
Pierre-Yves David -
r19097:3f5e75c2 stable
parent child Browse files
Show More
@@ -1,2587 +1,2608
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete, repoview
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 import branchmap
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class repofilecache(filecache):
23 23 """All filecache usage on repo are done for logic that should be unfiltered
24 24 """
25 25
26 26 def __get__(self, repo, type=None):
27 27 return super(repofilecache, self).__get__(repo.unfiltered(), type)
28 28 def __set__(self, repo, value):
29 29 return super(repofilecache, self).__set__(repo.unfiltered(), value)
30 30 def __delete__(self, repo):
31 31 return super(repofilecache, self).__delete__(repo.unfiltered())
32 32
33 33 class storecache(repofilecache):
34 34 """filecache for files in the store"""
35 35 def join(self, obj, fname):
36 36 return obj.sjoin(fname)
37 37
38 38 class unfilteredpropertycache(propertycache):
39 39 """propertycache that apply to unfiltered repo only"""
40 40
41 41 def __get__(self, repo, type=None):
42 42 return super(unfilteredpropertycache, self).__get__(repo.unfiltered())
43 43
44 44 class filteredpropertycache(propertycache):
45 45 """propertycache that must take filtering in account"""
46 46
47 47 def cachevalue(self, obj, value):
48 48 object.__setattr__(obj, self.name, value)
49 49
50 50
51 51 def hasunfilteredcache(repo, name):
52 52 """check if a repo has an unfilteredpropertycache value for <name>"""
53 53 return name in vars(repo.unfiltered())
54 54
55 55 def unfilteredmethod(orig):
56 56 """decorate method that always need to be run on unfiltered version"""
57 57 def wrapper(repo, *args, **kwargs):
58 58 return orig(repo.unfiltered(), *args, **kwargs)
59 59 return wrapper
60 60
61 61 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
62 62 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
63 63
64 64 class localpeer(peer.peerrepository):
65 65 '''peer for a local repo; reflects only the most recent API'''
66 66
67 67 def __init__(self, repo, caps=MODERNCAPS):
68 68 peer.peerrepository.__init__(self)
69 69 self._repo = repo.filtered('served')
70 70 self.ui = repo.ui
71 71 self._caps = repo._restrictcapabilities(caps)
72 72 self.requirements = repo.requirements
73 73 self.supportedformats = repo.supportedformats
74 74
75 75 def close(self):
76 76 self._repo.close()
77 77
78 78 def _capabilities(self):
79 79 return self._caps
80 80
81 81 def local(self):
82 82 return self._repo
83 83
84 84 def canpush(self):
85 85 return True
86 86
87 87 def url(self):
88 88 return self._repo.url()
89 89
90 90 def lookup(self, key):
91 91 return self._repo.lookup(key)
92 92
93 93 def branchmap(self):
94 94 return self._repo.branchmap()
95 95
96 96 def heads(self):
97 97 return self._repo.heads()
98 98
99 99 def known(self, nodes):
100 100 return self._repo.known(nodes)
101 101
102 102 def getbundle(self, source, heads=None, common=None):
103 103 return self._repo.getbundle(source, heads=heads, common=common)
104 104
105 105 # TODO We might want to move the next two calls into legacypeer and add
106 106 # unbundle instead.
107 107
108 108 def lock(self):
109 109 return self._repo.lock()
110 110
111 111 def addchangegroup(self, cg, source, url):
112 112 return self._repo.addchangegroup(cg, source, url)
113 113
114 114 def pushkey(self, namespace, key, old, new):
115 115 return self._repo.pushkey(namespace, key, old, new)
116 116
117 117 def listkeys(self, namespace):
118 118 return self._repo.listkeys(namespace)
119 119
120 120 def debugwireargs(self, one, two, three=None, four=None, five=None):
121 121 '''used to test argument passing over the wire'''
122 122 return "%s %s %s %s %s" % (one, two, three, four, five)
123 123
124 124 class locallegacypeer(localpeer):
125 125 '''peer extension which implements legacy methods too; used for tests with
126 126 restricted capabilities'''
127 127
128 128 def __init__(self, repo):
129 129 localpeer.__init__(self, repo, caps=LEGACYCAPS)
130 130
131 131 def branches(self, nodes):
132 132 return self._repo.branches(nodes)
133 133
134 134 def between(self, pairs):
135 135 return self._repo.between(pairs)
136 136
137 137 def changegroup(self, basenodes, source):
138 138 return self._repo.changegroup(basenodes, source)
139 139
140 140 def changegroupsubset(self, bases, heads, source):
141 141 return self._repo.changegroupsubset(bases, heads, source)
142 142
143 143 class localrepository(object):
144 144
145 145 supportedformats = set(('revlogv1', 'generaldelta'))
146 146 supported = supportedformats | set(('store', 'fncache', 'shared',
147 147 'dotencode'))
148 148 openerreqs = set(('revlogv1', 'generaldelta'))
149 149 requirements = ['revlogv1']
150 150 filtername = None
151 151
152 152 def _baserequirements(self, create):
153 153 return self.requirements[:]
154 154
155 155 def __init__(self, baseui, path=None, create=False):
156 156 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
157 157 self.wopener = self.wvfs
158 158 self.root = self.wvfs.base
159 159 self.path = self.wvfs.join(".hg")
160 160 self.origroot = path
161 161 self.auditor = scmutil.pathauditor(self.root, self._checknested)
162 162 self.vfs = scmutil.vfs(self.path)
163 163 self.opener = self.vfs
164 164 self.baseui = baseui
165 165 self.ui = baseui.copy()
166 166 # A list of callback to shape the phase if no data were found.
167 167 # Callback are in the form: func(repo, roots) --> processed root.
168 168 # This list it to be filled by extension during repo setup
169 169 self._phasedefaults = []
170 170 try:
171 171 self.ui.readconfig(self.join("hgrc"), self.root)
172 172 extensions.loadall(self.ui)
173 173 except IOError:
174 174 pass
175 175
176 176 if not self.vfs.isdir():
177 177 if create:
178 178 if not self.wvfs.exists():
179 179 self.wvfs.makedirs()
180 180 self.vfs.makedir(notindexed=True)
181 181 requirements = self._baserequirements(create)
182 182 if self.ui.configbool('format', 'usestore', True):
183 183 self.vfs.mkdir("store")
184 184 requirements.append("store")
185 185 if self.ui.configbool('format', 'usefncache', True):
186 186 requirements.append("fncache")
187 187 if self.ui.configbool('format', 'dotencode', True):
188 188 requirements.append('dotencode')
189 189 # create an invalid changelog
190 190 self.vfs.append(
191 191 "00changelog.i",
192 192 '\0\0\0\2' # represents revlogv2
193 193 ' dummy changelog to prevent using the old repo layout'
194 194 )
195 195 if self.ui.configbool('format', 'generaldelta', False):
196 196 requirements.append("generaldelta")
197 197 requirements = set(requirements)
198 198 else:
199 199 raise error.RepoError(_("repository %s not found") % path)
200 200 elif create:
201 201 raise error.RepoError(_("repository %s already exists") % path)
202 202 else:
203 203 try:
204 204 requirements = scmutil.readrequires(self.vfs, self.supported)
205 205 except IOError, inst:
206 206 if inst.errno != errno.ENOENT:
207 207 raise
208 208 requirements = set()
209 209
210 210 self.sharedpath = self.path
211 211 try:
212 212 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
213 213 realpath=True)
214 214 s = vfs.base
215 215 if not vfs.exists():
216 216 raise error.RepoError(
217 217 _('.hg/sharedpath points to nonexistent directory %s') % s)
218 218 self.sharedpath = s
219 219 except IOError, inst:
220 220 if inst.errno != errno.ENOENT:
221 221 raise
222 222
223 223 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
224 224 self.spath = self.store.path
225 225 self.svfs = self.store.vfs
226 226 self.sopener = self.svfs
227 227 self.sjoin = self.store.join
228 228 self.vfs.createmode = self.store.createmode
229 229 self._applyrequirements(requirements)
230 230 if create:
231 231 self._writerequirements()
232 232
233 233
234 234 self._branchcaches = {}
235 235 self.filterpats = {}
236 236 self._datafilters = {}
237 237 self._transref = self._lockref = self._wlockref = None
238 238
239 239 # A cache for various files under .hg/ that tracks file changes,
240 240 # (used by the filecache decorator)
241 241 #
242 242 # Maps a property name to its util.filecacheentry
243 243 self._filecache = {}
244 244
245 245 # hold sets of revision to be filtered
246 246 # should be cleared when something might have changed the filter value:
247 247 # - new changesets,
248 248 # - phase change,
249 249 # - new obsolescence marker,
250 250 # - working directory parent change,
251 251 # - bookmark changes
252 252 self.filteredrevcache = {}
253 253
254 254 def close(self):
255 255 pass
256 256
257 257 def _restrictcapabilities(self, caps):
258 258 return caps
259 259
260 260 def _applyrequirements(self, requirements):
261 261 self.requirements = requirements
262 262 self.sopener.options = dict((r, 1) for r in requirements
263 263 if r in self.openerreqs)
264 264
265 265 def _writerequirements(self):
266 266 reqfile = self.opener("requires", "w")
267 267 for r in sorted(self.requirements):
268 268 reqfile.write("%s\n" % r)
269 269 reqfile.close()
270 270
271 271 def _checknested(self, path):
272 272 """Determine if path is a legal nested repository."""
273 273 if not path.startswith(self.root):
274 274 return False
275 275 subpath = path[len(self.root) + 1:]
276 276 normsubpath = util.pconvert(subpath)
277 277
278 278 # XXX: Checking against the current working copy is wrong in
279 279 # the sense that it can reject things like
280 280 #
281 281 # $ hg cat -r 10 sub/x.txt
282 282 #
283 283 # if sub/ is no longer a subrepository in the working copy
284 284 # parent revision.
285 285 #
286 286 # However, it can of course also allow things that would have
287 287 # been rejected before, such as the above cat command if sub/
288 288 # is a subrepository now, but was a normal directory before.
289 289 # The old path auditor would have rejected by mistake since it
290 290 # panics when it sees sub/.hg/.
291 291 #
292 292 # All in all, checking against the working copy seems sensible
293 293 # since we want to prevent access to nested repositories on
294 294 # the filesystem *now*.
295 295 ctx = self[None]
296 296 parts = util.splitpath(subpath)
297 297 while parts:
298 298 prefix = '/'.join(parts)
299 299 if prefix in ctx.substate:
300 300 if prefix == normsubpath:
301 301 return True
302 302 else:
303 303 sub = ctx.sub(prefix)
304 304 return sub.checknested(subpath[len(prefix) + 1:])
305 305 else:
306 306 parts.pop()
307 307 return False
308 308
309 309 def peer(self):
310 310 return localpeer(self) # not cached to avoid reference cycle
311 311
312 312 def unfiltered(self):
313 313 """Return unfiltered version of the repository
314 314
315 315 Intended to be overwritten by filtered repo."""
316 316 return self
317 317
318 318 def filtered(self, name):
319 319 """Return a filtered version of a repository"""
320 320 # build a new class with the mixin and the current class
321 321 # (possibly subclass of the repo)
322 322 class proxycls(repoview.repoview, self.unfiltered().__class__):
323 323 pass
324 324 return proxycls(self, name)
325 325
326 326 @repofilecache('bookmarks')
327 327 def _bookmarks(self):
328 328 return bookmarks.bmstore(self)
329 329
330 330 @repofilecache('bookmarks.current')
331 331 def _bookmarkcurrent(self):
332 332 return bookmarks.readcurrent(self)
333 333
334 334 def bookmarkheads(self, bookmark):
335 335 name = bookmark.split('@', 1)[0]
336 336 heads = []
337 337 for mark, n in self._bookmarks.iteritems():
338 338 if mark.split('@', 1)[0] == name:
339 339 heads.append(n)
340 340 return heads
341 341
342 342 @storecache('phaseroots')
343 343 def _phasecache(self):
344 344 return phases.phasecache(self, self._phasedefaults)
345 345
346 346 @storecache('obsstore')
347 347 def obsstore(self):
348 348 store = obsolete.obsstore(self.sopener)
349 349 if store and not obsolete._enabled:
350 350 # message is rare enough to not be translated
351 351 msg = 'obsolete feature not enabled but %i markers found!\n'
352 352 self.ui.warn(msg % len(list(store)))
353 353 return store
354 354
355 355 @storecache('00changelog.i')
356 356 def changelog(self):
357 357 c = changelog.changelog(self.sopener)
358 358 if 'HG_PENDING' in os.environ:
359 359 p = os.environ['HG_PENDING']
360 360 if p.startswith(self.root):
361 361 c.readpending('00changelog.i.a')
362 362 return c
363 363
364 364 @storecache('00manifest.i')
365 365 def manifest(self):
366 366 return manifest.manifest(self.sopener)
367 367
368 368 @repofilecache('dirstate')
369 369 def dirstate(self):
370 370 warned = [0]
371 371 def validate(node):
372 372 try:
373 373 self.changelog.rev(node)
374 374 return node
375 375 except error.LookupError:
376 376 if not warned[0]:
377 377 warned[0] = True
378 378 self.ui.warn(_("warning: ignoring unknown"
379 379 " working parent %s!\n") % short(node))
380 380 return nullid
381 381
382 382 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
383 383
384 384 def __getitem__(self, changeid):
385 385 if changeid is None:
386 386 return context.workingctx(self)
387 387 return context.changectx(self, changeid)
388 388
389 389 def __contains__(self, changeid):
390 390 try:
391 391 return bool(self.lookup(changeid))
392 392 except error.RepoLookupError:
393 393 return False
394 394
395 395 def __nonzero__(self):
396 396 return True
397 397
398 398 def __len__(self):
399 399 return len(self.changelog)
400 400
401 401 def __iter__(self):
402 402 return iter(self.changelog)
403 403
404 404 def revs(self, expr, *args):
405 405 '''Return a list of revisions matching the given revset'''
406 406 expr = revset.formatspec(expr, *args)
407 407 m = revset.match(None, expr)
408 408 return [r for r in m(self, list(self))]
409 409
410 410 def set(self, expr, *args):
411 411 '''
412 412 Yield a context for each matching revision, after doing arg
413 413 replacement via revset.formatspec
414 414 '''
415 415 for r in self.revs(expr, *args):
416 416 yield self[r]
417 417
418 418 def url(self):
419 419 return 'file:' + self.root
420 420
421 421 def hook(self, name, throw=False, **args):
422 422 return hook.hook(self.ui, self, name, throw, **args)
423 423
424 424 @unfilteredmethod
425 425 def _tag(self, names, node, message, local, user, date, extra={}):
426 426 if isinstance(names, str):
427 427 names = (names,)
428 428
429 429 branches = self.branchmap()
430 430 for name in names:
431 431 self.hook('pretag', throw=True, node=hex(node), tag=name,
432 432 local=local)
433 433 if name in branches:
434 434 self.ui.warn(_("warning: tag %s conflicts with existing"
435 435 " branch name\n") % name)
436 436
437 437 def writetags(fp, names, munge, prevtags):
438 438 fp.seek(0, 2)
439 439 if prevtags and prevtags[-1] != '\n':
440 440 fp.write('\n')
441 441 for name in names:
442 442 m = munge and munge(name) or name
443 443 if (self._tagscache.tagtypes and
444 444 name in self._tagscache.tagtypes):
445 445 old = self.tags().get(name, nullid)
446 446 fp.write('%s %s\n' % (hex(old), m))
447 447 fp.write('%s %s\n' % (hex(node), m))
448 448 fp.close()
449 449
450 450 prevtags = ''
451 451 if local:
452 452 try:
453 453 fp = self.opener('localtags', 'r+')
454 454 except IOError:
455 455 fp = self.opener('localtags', 'a')
456 456 else:
457 457 prevtags = fp.read()
458 458
459 459 # local tags are stored in the current charset
460 460 writetags(fp, names, None, prevtags)
461 461 for name in names:
462 462 self.hook('tag', node=hex(node), tag=name, local=local)
463 463 return
464 464
465 465 try:
466 466 fp = self.wfile('.hgtags', 'rb+')
467 467 except IOError, e:
468 468 if e.errno != errno.ENOENT:
469 469 raise
470 470 fp = self.wfile('.hgtags', 'ab')
471 471 else:
472 472 prevtags = fp.read()
473 473
474 474 # committed tags are stored in UTF-8
475 475 writetags(fp, names, encoding.fromlocal, prevtags)
476 476
477 477 fp.close()
478 478
479 479 self.invalidatecaches()
480 480
481 481 if '.hgtags' not in self.dirstate:
482 482 self[None].add(['.hgtags'])
483 483
484 484 m = matchmod.exact(self.root, '', ['.hgtags'])
485 485 tagnode = self.commit(message, user, date, extra=extra, match=m)
486 486
487 487 for name in names:
488 488 self.hook('tag', node=hex(node), tag=name, local=local)
489 489
490 490 return tagnode
491 491
492 492 def tag(self, names, node, message, local, user, date):
493 493 '''tag a revision with one or more symbolic names.
494 494
495 495 names is a list of strings or, when adding a single tag, names may be a
496 496 string.
497 497
498 498 if local is True, the tags are stored in a per-repository file.
499 499 otherwise, they are stored in the .hgtags file, and a new
500 500 changeset is committed with the change.
501 501
502 502 keyword arguments:
503 503
504 504 local: whether to store tags in non-version-controlled file
505 505 (default False)
506 506
507 507 message: commit message to use if committing
508 508
509 509 user: name of user to use if committing
510 510
511 511 date: date tuple to use if committing'''
512 512
513 513 if not local:
514 514 for x in self.status()[:5]:
515 515 if '.hgtags' in x:
516 516 raise util.Abort(_('working copy of .hgtags is changed '
517 517 '(please commit .hgtags manually)'))
518 518
519 519 self.tags() # instantiate the cache
520 520 self._tag(names, node, message, local, user, date)
521 521
522 522 @filteredpropertycache
523 523 def _tagscache(self):
524 524 '''Returns a tagscache object that contains various tags related
525 525 caches.'''
526 526
527 527 # This simplifies its cache management by having one decorated
528 528 # function (this one) and the rest simply fetch things from it.
529 529 class tagscache(object):
530 530 def __init__(self):
531 531 # These two define the set of tags for this repository. tags
532 532 # maps tag name to node; tagtypes maps tag name to 'global' or
533 533 # 'local'. (Global tags are defined by .hgtags across all
534 534 # heads, and local tags are defined in .hg/localtags.)
535 535 # They constitute the in-memory cache of tags.
536 536 self.tags = self.tagtypes = None
537 537
538 538 self.nodetagscache = self.tagslist = None
539 539
540 540 cache = tagscache()
541 541 cache.tags, cache.tagtypes = self._findtags()
542 542
543 543 return cache
544 544
545 545 def tags(self):
546 546 '''return a mapping of tag to node'''
547 547 t = {}
548 548 if self.changelog.filteredrevs:
549 549 tags, tt = self._findtags()
550 550 else:
551 551 tags = self._tagscache.tags
552 552 for k, v in tags.iteritems():
553 553 try:
554 554 # ignore tags to unknown nodes
555 555 self.changelog.rev(v)
556 556 t[k] = v
557 557 except (error.LookupError, ValueError):
558 558 pass
559 559 return t
560 560
561 561 def _findtags(self):
562 562 '''Do the hard work of finding tags. Return a pair of dicts
563 563 (tags, tagtypes) where tags maps tag name to node, and tagtypes
564 564 maps tag name to a string like \'global\' or \'local\'.
565 565 Subclasses or extensions are free to add their own tags, but
566 566 should be aware that the returned dicts will be retained for the
567 567 duration of the localrepo object.'''
568 568
569 569 # XXX what tagtype should subclasses/extensions use? Currently
570 570 # mq and bookmarks add tags, but do not set the tagtype at all.
571 571 # Should each extension invent its own tag type? Should there
572 572 # be one tagtype for all such "virtual" tags? Or is the status
573 573 # quo fine?
574 574
575 575 alltags = {} # map tag name to (node, hist)
576 576 tagtypes = {}
577 577
578 578 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
579 579 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
580 580
581 581 # Build the return dicts. Have to re-encode tag names because
582 582 # the tags module always uses UTF-8 (in order not to lose info
583 583 # writing to the cache), but the rest of Mercurial wants them in
584 584 # local encoding.
585 585 tags = {}
586 586 for (name, (node, hist)) in alltags.iteritems():
587 587 if node != nullid:
588 588 tags[encoding.tolocal(name)] = node
589 589 tags['tip'] = self.changelog.tip()
590 590 tagtypes = dict([(encoding.tolocal(name), value)
591 591 for (name, value) in tagtypes.iteritems()])
592 592 return (tags, tagtypes)
593 593
594 594 def tagtype(self, tagname):
595 595 '''
596 596 return the type of the given tag. result can be:
597 597
598 598 'local' : a local tag
599 599 'global' : a global tag
600 600 None : tag does not exist
601 601 '''
602 602
603 603 return self._tagscache.tagtypes.get(tagname)
604 604
605 605 def tagslist(self):
606 606 '''return a list of tags ordered by revision'''
607 607 if not self._tagscache.tagslist:
608 608 l = []
609 609 for t, n in self.tags().iteritems():
610 610 r = self.changelog.rev(n)
611 611 l.append((r, t, n))
612 612 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
613 613
614 614 return self._tagscache.tagslist
615 615
616 616 def nodetags(self, node):
617 617 '''return the tags associated with a node'''
618 618 if not self._tagscache.nodetagscache:
619 619 nodetagscache = {}
620 620 for t, n in self._tagscache.tags.iteritems():
621 621 nodetagscache.setdefault(n, []).append(t)
622 622 for tags in nodetagscache.itervalues():
623 623 tags.sort()
624 624 self._tagscache.nodetagscache = nodetagscache
625 625 return self._tagscache.nodetagscache.get(node, [])
626 626
627 627 def nodebookmarks(self, node):
628 628 marks = []
629 629 for bookmark, n in self._bookmarks.iteritems():
630 630 if n == node:
631 631 marks.append(bookmark)
632 632 return sorted(marks)
633 633
634 634 def branchmap(self):
635 635 '''returns a dictionary {branch: [branchheads]}'''
636 636 branchmap.updatecache(self)
637 637 return self._branchcaches[self.filtername]
638 638
639 639
640 640 def _branchtip(self, heads):
641 641 '''return the tipmost branch head in heads'''
642 642 tip = heads[-1]
643 643 for h in reversed(heads):
644 644 if not self[h].closesbranch():
645 645 tip = h
646 646 break
647 647 return tip
648 648
649 649 def branchtip(self, branch):
650 650 '''return the tip node for a given branch'''
651 651 if branch not in self.branchmap():
652 652 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
653 653 return self._branchtip(self.branchmap()[branch])
654 654
655 655 def branchtags(self):
656 656 '''return a dict where branch names map to the tipmost head of
657 657 the branch, open heads come before closed'''
658 658 bt = {}
659 659 for bn, heads in self.branchmap().iteritems():
660 660 bt[bn] = self._branchtip(heads)
661 661 return bt
662 662
663 663 def lookup(self, key):
664 664 return self[key].node()
665 665
666 666 def lookupbranch(self, key, remote=None):
667 667 repo = remote or self
668 668 if key in repo.branchmap():
669 669 return key
670 670
671 671 repo = (remote and remote.local()) and remote or self
672 672 return repo[key].branch()
673 673
674 674 def known(self, nodes):
675 675 nm = self.changelog.nodemap
676 676 pc = self._phasecache
677 677 result = []
678 678 for n in nodes:
679 679 r = nm.get(n)
680 680 resp = not (r is None or pc.phase(self, r) >= phases.secret)
681 681 result.append(resp)
682 682 return result
683 683
684 684 def local(self):
685 685 return self
686 686
687 687 def cancopy(self):
688 688 return self.local() # so statichttprepo's override of local() works
689 689
690 690 def join(self, f):
691 691 return os.path.join(self.path, f)
692 692
693 693 def wjoin(self, f):
694 694 return os.path.join(self.root, f)
695 695
696 696 def file(self, f):
697 697 if f[0] == '/':
698 698 f = f[1:]
699 699 return filelog.filelog(self.sopener, f)
700 700
701 701 def changectx(self, changeid):
702 702 return self[changeid]
703 703
704 704 def parents(self, changeid=None):
705 705 '''get list of changectxs for parents of changeid'''
706 706 return self[changeid].parents()
707 707
708 708 def setparents(self, p1, p2=nullid):
709 709 copies = self.dirstate.setparents(p1, p2)
710 710 pctx = self[p1]
711 711 if copies:
712 712 # Adjust copy records, the dirstate cannot do it, it
713 713 # requires access to parents manifests. Preserve them
714 714 # only for entries added to first parent.
715 715 for f in copies:
716 716 if f not in pctx and copies[f] in pctx:
717 717 self.dirstate.copy(copies[f], f)
718 718 if p2 == nullid:
719 719 for f, s in sorted(self.dirstate.copies().items()):
720 720 if f not in pctx and s not in pctx:
721 721 self.dirstate.copy(None, f)
722 722
723 723 def filectx(self, path, changeid=None, fileid=None):
724 724 """changeid can be a changeset revision, node, or tag.
725 725 fileid can be a file revision or node."""
726 726 return context.filectx(self, path, changeid, fileid)
727 727
728 728 def getcwd(self):
729 729 return self.dirstate.getcwd()
730 730
731 731 def pathto(self, f, cwd=None):
732 732 return self.dirstate.pathto(f, cwd)
733 733
734 734 def wfile(self, f, mode='r'):
735 735 return self.wopener(f, mode)
736 736
737 737 def _link(self, f):
738 738 return self.wvfs.islink(f)
739 739
740 740 def _loadfilter(self, filter):
741 741 if filter not in self.filterpats:
742 742 l = []
743 743 for pat, cmd in self.ui.configitems(filter):
744 744 if cmd == '!':
745 745 continue
746 746 mf = matchmod.match(self.root, '', [pat])
747 747 fn = None
748 748 params = cmd
749 749 for name, filterfn in self._datafilters.iteritems():
750 750 if cmd.startswith(name):
751 751 fn = filterfn
752 752 params = cmd[len(name):].lstrip()
753 753 break
754 754 if not fn:
755 755 fn = lambda s, c, **kwargs: util.filter(s, c)
756 756 # Wrap old filters not supporting keyword arguments
757 757 if not inspect.getargspec(fn)[2]:
758 758 oldfn = fn
759 759 fn = lambda s, c, **kwargs: oldfn(s, c)
760 760 l.append((mf, fn, params))
761 761 self.filterpats[filter] = l
762 762 return self.filterpats[filter]
763 763
764 764 def _filter(self, filterpats, filename, data):
765 765 for mf, fn, cmd in filterpats:
766 766 if mf(filename):
767 767 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
768 768 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
769 769 break
770 770
771 771 return data
772 772
773 773 @unfilteredpropertycache
774 774 def _encodefilterpats(self):
775 775 return self._loadfilter('encode')
776 776
777 777 @unfilteredpropertycache
778 778 def _decodefilterpats(self):
779 779 return self._loadfilter('decode')
780 780
781 781 def adddatafilter(self, name, filter):
782 782 self._datafilters[name] = filter
783 783
784 784 def wread(self, filename):
785 785 if self._link(filename):
786 786 data = self.wvfs.readlink(filename)
787 787 else:
788 788 data = self.wopener.read(filename)
789 789 return self._filter(self._encodefilterpats, filename, data)
790 790
791 791 def wwrite(self, filename, data, flags):
792 792 data = self._filter(self._decodefilterpats, filename, data)
793 793 if 'l' in flags:
794 794 self.wopener.symlink(data, filename)
795 795 else:
796 796 self.wopener.write(filename, data)
797 797 if 'x' in flags:
798 798 self.wvfs.setflags(filename, False, True)
799 799
800 800 def wwritedata(self, filename, data):
801 801 return self._filter(self._decodefilterpats, filename, data)
802 802
803 803 def transaction(self, desc):
804 804 tr = self._transref and self._transref() or None
805 805 if tr and tr.running():
806 806 return tr.nest()
807 807
808 808 # abort here if the journal already exists
809 809 if self.svfs.exists("journal"):
810 810 raise error.RepoError(
811 811 _("abandoned transaction found - run hg recover"))
812 812
813 813 self._writejournal(desc)
814 814 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
815 815
816 816 tr = transaction.transaction(self.ui.warn, self.sopener,
817 817 self.sjoin("journal"),
818 818 aftertrans(renames),
819 819 self.store.createmode)
820 820 self._transref = weakref.ref(tr)
821 821 return tr
822 822
823 823 def _journalfiles(self):
824 824 return ((self.svfs, 'journal'),
825 825 (self.vfs, 'journal.dirstate'),
826 826 (self.vfs, 'journal.branch'),
827 827 (self.vfs, 'journal.desc'),
828 828 (self.vfs, 'journal.bookmarks'),
829 829 (self.svfs, 'journal.phaseroots'))
830 830
831 831 def undofiles(self):
832 832 return [vfs.join(undoname(x)) for vfs, x in self._journalfiles()]
833 833
834 834 def _writejournal(self, desc):
835 835 self.opener.write("journal.dirstate",
836 836 self.opener.tryread("dirstate"))
837 837 self.opener.write("journal.branch",
838 838 encoding.fromlocal(self.dirstate.branch()))
839 839 self.opener.write("journal.desc",
840 840 "%d\n%s\n" % (len(self), desc))
841 841 self.opener.write("journal.bookmarks",
842 842 self.opener.tryread("bookmarks"))
843 843 self.sopener.write("journal.phaseroots",
844 844 self.sopener.tryread("phaseroots"))
845 845
846 846 def recover(self):
847 847 lock = self.lock()
848 848 try:
849 849 if self.svfs.exists("journal"):
850 850 self.ui.status(_("rolling back interrupted transaction\n"))
851 851 transaction.rollback(self.sopener, self.sjoin("journal"),
852 852 self.ui.warn)
853 853 self.invalidate()
854 854 return True
855 855 else:
856 856 self.ui.warn(_("no interrupted transaction available\n"))
857 857 return False
858 858 finally:
859 859 lock.release()
860 860
861 861 def rollback(self, dryrun=False, force=False):
862 862 wlock = lock = None
863 863 try:
864 864 wlock = self.wlock()
865 865 lock = self.lock()
866 866 if self.svfs.exists("undo"):
867 867 return self._rollback(dryrun, force)
868 868 else:
869 869 self.ui.warn(_("no rollback information available\n"))
870 870 return 1
871 871 finally:
872 872 release(lock, wlock)
873 873
874 874 @unfilteredmethod # Until we get smarter cache management
875 875 def _rollback(self, dryrun, force):
876 876 ui = self.ui
877 877 try:
878 878 args = self.opener.read('undo.desc').splitlines()
879 879 (oldlen, desc, detail) = (int(args[0]), args[1], None)
880 880 if len(args) >= 3:
881 881 detail = args[2]
882 882 oldtip = oldlen - 1
883 883
884 884 if detail and ui.verbose:
885 885 msg = (_('repository tip rolled back to revision %s'
886 886 ' (undo %s: %s)\n')
887 887 % (oldtip, desc, detail))
888 888 else:
889 889 msg = (_('repository tip rolled back to revision %s'
890 890 ' (undo %s)\n')
891 891 % (oldtip, desc))
892 892 except IOError:
893 893 msg = _('rolling back unknown transaction\n')
894 894 desc = None
895 895
896 896 if not force and self['.'] != self['tip'] and desc == 'commit':
897 897 raise util.Abort(
898 898 _('rollback of last commit while not checked out '
899 899 'may lose data'), hint=_('use -f to force'))
900 900
901 901 ui.status(msg)
902 902 if dryrun:
903 903 return 0
904 904
905 905 parents = self.dirstate.parents()
906 906 self.destroying()
907 907 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
908 908 if self.vfs.exists('undo.bookmarks'):
909 909 self.vfs.rename('undo.bookmarks', 'bookmarks')
910 910 if self.svfs.exists('undo.phaseroots'):
911 911 self.svfs.rename('undo.phaseroots', 'phaseroots')
912 912 self.invalidate()
913 913
914 914 parentgone = (parents[0] not in self.changelog.nodemap or
915 915 parents[1] not in self.changelog.nodemap)
916 916 if parentgone:
917 917 self.vfs.rename('undo.dirstate', 'dirstate')
918 918 try:
919 919 branch = self.opener.read('undo.branch')
920 920 self.dirstate.setbranch(encoding.tolocal(branch))
921 921 except IOError:
922 922 ui.warn(_('named branch could not be reset: '
923 923 'current branch is still \'%s\'\n')
924 924 % self.dirstate.branch())
925 925
926 926 self.dirstate.invalidate()
927 927 parents = tuple([p.rev() for p in self.parents()])
928 928 if len(parents) > 1:
929 929 ui.status(_('working directory now based on '
930 930 'revisions %d and %d\n') % parents)
931 931 else:
932 932 ui.status(_('working directory now based on '
933 933 'revision %d\n') % parents)
934 934 # TODO: if we know which new heads may result from this rollback, pass
935 935 # them to destroy(), which will prevent the branchhead cache from being
936 936 # invalidated.
937 937 self.destroyed()
938 938 return 0
939 939
940 940 def invalidatecaches(self):
941 941
942 942 if '_tagscache' in vars(self):
943 943 # can't use delattr on proxy
944 944 del self.__dict__['_tagscache']
945 945
946 946 self.unfiltered()._branchcaches.clear()
947 947 self.invalidatevolatilesets()
948 948
949 949 def invalidatevolatilesets(self):
950 950 self.filteredrevcache.clear()
951 951 obsolete.clearobscaches(self)
952 952
953 953 def invalidatedirstate(self):
954 954 '''Invalidates the dirstate, causing the next call to dirstate
955 955 to check if it was modified since the last time it was read,
956 956 rereading it if it has.
957 957
958 958 This is different to dirstate.invalidate() that it doesn't always
959 959 rereads the dirstate. Use dirstate.invalidate() if you want to
960 960 explicitly read the dirstate again (i.e. restoring it to a previous
961 961 known good state).'''
962 962 if hasunfilteredcache(self, 'dirstate'):
963 963 for k in self.dirstate._filecache:
964 964 try:
965 965 delattr(self.dirstate, k)
966 966 except AttributeError:
967 967 pass
968 968 delattr(self.unfiltered(), 'dirstate')
969 969
970 970 def invalidate(self):
971 971 unfiltered = self.unfiltered() # all file caches are stored unfiltered
972 972 for k in self._filecache:
973 973 # dirstate is invalidated separately in invalidatedirstate()
974 974 if k == 'dirstate':
975 975 continue
976 976
977 977 try:
978 978 delattr(unfiltered, k)
979 979 except AttributeError:
980 980 pass
981 981 self.invalidatecaches()
982 982
983 983 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
984 984 try:
985 985 l = lock.lock(lockname, 0, releasefn, desc=desc)
986 986 except error.LockHeld, inst:
987 987 if not wait:
988 988 raise
989 989 self.ui.warn(_("waiting for lock on %s held by %r\n") %
990 990 (desc, inst.locker))
991 991 # default to 600 seconds timeout
992 992 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
993 993 releasefn, desc=desc)
994 994 if acquirefn:
995 995 acquirefn()
996 996 return l
997 997
998 998 def _afterlock(self, callback):
999 999 """add a callback to the current repository lock.
1000 1000
1001 1001 The callback will be executed on lock release."""
1002 1002 l = self._lockref and self._lockref()
1003 1003 if l:
1004 1004 l.postrelease.append(callback)
1005 1005 else:
1006 1006 callback()
1007 1007
1008 1008 def lock(self, wait=True):
1009 1009 '''Lock the repository store (.hg/store) and return a weak reference
1010 1010 to the lock. Use this before modifying the store (e.g. committing or
1011 1011 stripping). If you are opening a transaction, get a lock as well.)'''
1012 1012 l = self._lockref and self._lockref()
1013 1013 if l is not None and l.held:
1014 1014 l.lock()
1015 1015 return l
1016 1016
1017 1017 def unlock():
1018 1018 self.store.write()
1019 1019 if hasunfilteredcache(self, '_phasecache'):
1020 1020 self._phasecache.write()
1021 1021 for k, ce in self._filecache.items():
1022 1022 if k == 'dirstate' or k not in self.__dict__:
1023 1023 continue
1024 1024 ce.refresh()
1025 1025
1026 1026 l = self._lock(self.sjoin("lock"), wait, unlock,
1027 1027 self.invalidate, _('repository %s') % self.origroot)
1028 1028 self._lockref = weakref.ref(l)
1029 1029 return l
1030 1030
1031 1031 def wlock(self, wait=True):
1032 1032 '''Lock the non-store parts of the repository (everything under
1033 1033 .hg except .hg/store) and return a weak reference to the lock.
1034 1034 Use this before modifying files in .hg.'''
1035 1035 l = self._wlockref and self._wlockref()
1036 1036 if l is not None and l.held:
1037 1037 l.lock()
1038 1038 return l
1039 1039
1040 1040 def unlock():
1041 1041 self.dirstate.write()
1042 1042 self._filecache['dirstate'].refresh()
1043 1043
1044 1044 l = self._lock(self.join("wlock"), wait, unlock,
1045 1045 self.invalidatedirstate, _('working directory of %s') %
1046 1046 self.origroot)
1047 1047 self._wlockref = weakref.ref(l)
1048 1048 return l
1049 1049
1050 1050 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1051 1051 """
1052 1052 commit an individual file as part of a larger transaction
1053 1053 """
1054 1054
1055 1055 fname = fctx.path()
1056 1056 text = fctx.data()
1057 1057 flog = self.file(fname)
1058 1058 fparent1 = manifest1.get(fname, nullid)
1059 1059 fparent2 = fparent2o = manifest2.get(fname, nullid)
1060 1060
1061 1061 meta = {}
1062 1062 copy = fctx.renamed()
1063 1063 if copy and copy[0] != fname:
1064 1064 # Mark the new revision of this file as a copy of another
1065 1065 # file. This copy data will effectively act as a parent
1066 1066 # of this new revision. If this is a merge, the first
1067 1067 # parent will be the nullid (meaning "look up the copy data")
1068 1068 # and the second one will be the other parent. For example:
1069 1069 #
1070 1070 # 0 --- 1 --- 3 rev1 changes file foo
1071 1071 # \ / rev2 renames foo to bar and changes it
1072 1072 # \- 2 -/ rev3 should have bar with all changes and
1073 1073 # should record that bar descends from
1074 1074 # bar in rev2 and foo in rev1
1075 1075 #
1076 1076 # this allows this merge to succeed:
1077 1077 #
1078 1078 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1079 1079 # \ / merging rev3 and rev4 should use bar@rev2
1080 1080 # \- 2 --- 4 as the merge base
1081 1081 #
1082 1082
1083 1083 cfname = copy[0]
1084 1084 crev = manifest1.get(cfname)
1085 1085 newfparent = fparent2
1086 1086
1087 1087 if manifest2: # branch merge
1088 1088 if fparent2 == nullid or crev is None: # copied on remote side
1089 1089 if cfname in manifest2:
1090 1090 crev = manifest2[cfname]
1091 1091 newfparent = fparent1
1092 1092
1093 1093 # find source in nearest ancestor if we've lost track
1094 1094 if not crev:
1095 1095 self.ui.debug(" %s: searching for copy revision for %s\n" %
1096 1096 (fname, cfname))
1097 1097 for ancestor in self[None].ancestors():
1098 1098 if cfname in ancestor:
1099 1099 crev = ancestor[cfname].filenode()
1100 1100 break
1101 1101
1102 1102 if crev:
1103 1103 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1104 1104 meta["copy"] = cfname
1105 1105 meta["copyrev"] = hex(crev)
1106 1106 fparent1, fparent2 = nullid, newfparent
1107 1107 else:
1108 1108 self.ui.warn(_("warning: can't find ancestor for '%s' "
1109 1109 "copied from '%s'!\n") % (fname, cfname))
1110 1110
1111 1111 elif fparent2 != nullid:
1112 1112 # is one parent an ancestor of the other?
1113 1113 fparentancestor = flog.ancestor(fparent1, fparent2)
1114 1114 if fparentancestor == fparent1:
1115 1115 fparent1, fparent2 = fparent2, nullid
1116 1116 elif fparentancestor == fparent2:
1117 1117 fparent2 = nullid
1118 1118
1119 1119 # is the file changed?
1120 1120 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1121 1121 changelist.append(fname)
1122 1122 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1123 1123
1124 1124 # are just the flags changed during merge?
1125 1125 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1126 1126 changelist.append(fname)
1127 1127
1128 1128 return fparent1
1129 1129
1130 1130 @unfilteredmethod
1131 1131 def commit(self, text="", user=None, date=None, match=None, force=False,
1132 1132 editor=False, extra={}):
1133 1133 """Add a new revision to current repository.
1134 1134
1135 1135 Revision information is gathered from the working directory,
1136 1136 match can be used to filter the committed files. If editor is
1137 1137 supplied, it is called to get a commit message.
1138 1138 """
1139 1139
1140 1140 def fail(f, msg):
1141 1141 raise util.Abort('%s: %s' % (f, msg))
1142 1142
1143 1143 if not match:
1144 1144 match = matchmod.always(self.root, '')
1145 1145
1146 1146 if not force:
1147 1147 vdirs = []
1148 1148 match.dir = vdirs.append
1149 1149 match.bad = fail
1150 1150
1151 1151 wlock = self.wlock()
1152 1152 try:
1153 1153 wctx = self[None]
1154 1154 merge = len(wctx.parents()) > 1
1155 1155
1156 1156 if (not force and merge and match and
1157 1157 (match.files() or match.anypats())):
1158 1158 raise util.Abort(_('cannot partially commit a merge '
1159 1159 '(do not specify files or patterns)'))
1160 1160
1161 1161 changes = self.status(match=match, clean=force)
1162 1162 if force:
1163 1163 changes[0].extend(changes[6]) # mq may commit unchanged files
1164 1164
1165 1165 # check subrepos
1166 1166 subs = []
1167 1167 commitsubs = set()
1168 1168 newstate = wctx.substate.copy()
1169 1169 # only manage subrepos and .hgsubstate if .hgsub is present
1170 1170 if '.hgsub' in wctx:
1171 1171 # we'll decide whether to track this ourselves, thanks
1172 1172 if '.hgsubstate' in changes[0]:
1173 1173 changes[0].remove('.hgsubstate')
1174 1174 if '.hgsubstate' in changes[2]:
1175 1175 changes[2].remove('.hgsubstate')
1176 1176
1177 1177 # compare current state to last committed state
1178 1178 # build new substate based on last committed state
1179 1179 oldstate = wctx.p1().substate
1180 1180 for s in sorted(newstate.keys()):
1181 1181 if not match(s):
1182 1182 # ignore working copy, use old state if present
1183 1183 if s in oldstate:
1184 1184 newstate[s] = oldstate[s]
1185 1185 continue
1186 1186 if not force:
1187 1187 raise util.Abort(
1188 1188 _("commit with new subrepo %s excluded") % s)
1189 1189 if wctx.sub(s).dirty(True):
1190 1190 if not self.ui.configbool('ui', 'commitsubrepos'):
1191 1191 raise util.Abort(
1192 1192 _("uncommitted changes in subrepo %s") % s,
1193 1193 hint=_("use --subrepos for recursive commit"))
1194 1194 subs.append(s)
1195 1195 commitsubs.add(s)
1196 1196 else:
1197 1197 bs = wctx.sub(s).basestate()
1198 1198 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1199 1199 if oldstate.get(s, (None, None, None))[1] != bs:
1200 1200 subs.append(s)
1201 1201
1202 1202 # check for removed subrepos
1203 1203 for p in wctx.parents():
1204 1204 r = [s for s in p.substate if s not in newstate]
1205 1205 subs += [s for s in r if match(s)]
1206 1206 if subs:
1207 1207 if (not match('.hgsub') and
1208 1208 '.hgsub' in (wctx.modified() + wctx.added())):
1209 1209 raise util.Abort(
1210 1210 _("can't commit subrepos without .hgsub"))
1211 1211 changes[0].insert(0, '.hgsubstate')
1212 1212
1213 1213 elif '.hgsub' in changes[2]:
1214 1214 # clean up .hgsubstate when .hgsub is removed
1215 1215 if ('.hgsubstate' in wctx and
1216 1216 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1217 1217 changes[2].insert(0, '.hgsubstate')
1218 1218
1219 1219 # make sure all explicit patterns are matched
1220 1220 if not force and match.files():
1221 1221 matched = set(changes[0] + changes[1] + changes[2])
1222 1222
1223 1223 for f in match.files():
1224 1224 f = self.dirstate.normalize(f)
1225 1225 if f == '.' or f in matched or f in wctx.substate:
1226 1226 continue
1227 1227 if f in changes[3]: # missing
1228 1228 fail(f, _('file not found!'))
1229 1229 if f in vdirs: # visited directory
1230 1230 d = f + '/'
1231 1231 for mf in matched:
1232 1232 if mf.startswith(d):
1233 1233 break
1234 1234 else:
1235 1235 fail(f, _("no match under directory!"))
1236 1236 elif f not in self.dirstate:
1237 1237 fail(f, _("file not tracked!"))
1238 1238
1239 1239 cctx = context.workingctx(self, text, user, date, extra, changes)
1240 1240
1241 1241 if (not force and not extra.get("close") and not merge
1242 1242 and not cctx.files()
1243 1243 and wctx.branch() == wctx.p1().branch()):
1244 1244 return None
1245 1245
1246 1246 if merge and cctx.deleted():
1247 1247 raise util.Abort(_("cannot commit merge with missing files"))
1248 1248
1249 1249 ms = mergemod.mergestate(self)
1250 1250 for f in changes[0]:
1251 1251 if f in ms and ms[f] == 'u':
1252 1252 raise util.Abort(_("unresolved merge conflicts "
1253 1253 "(see hg help resolve)"))
1254 1254
1255 1255 if editor:
1256 1256 cctx._text = editor(self, cctx, subs)
1257 1257 edited = (text != cctx._text)
1258 1258
1259 1259 # commit subs and write new state
1260 1260 if subs:
1261 1261 for s in sorted(commitsubs):
1262 1262 sub = wctx.sub(s)
1263 1263 self.ui.status(_('committing subrepository %s\n') %
1264 1264 subrepo.subrelpath(sub))
1265 1265 sr = sub.commit(cctx._text, user, date)
1266 1266 newstate[s] = (newstate[s][0], sr)
1267 1267 subrepo.writestate(self, newstate)
1268 1268
1269 1269 # Save commit message in case this transaction gets rolled back
1270 1270 # (e.g. by a pretxncommit hook). Leave the content alone on
1271 1271 # the assumption that the user will use the same editor again.
1272 1272 msgfn = self.savecommitmessage(cctx._text)
1273 1273
1274 1274 p1, p2 = self.dirstate.parents()
1275 1275 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1276 1276 try:
1277 1277 self.hook("precommit", throw=True, parent1=hookp1,
1278 1278 parent2=hookp2)
1279 1279 ret = self.commitctx(cctx, True)
1280 1280 except: # re-raises
1281 1281 if edited:
1282 1282 self.ui.write(
1283 1283 _('note: commit message saved in %s\n') % msgfn)
1284 1284 raise
1285 1285
1286 1286 # update bookmarks, dirstate and mergestate
1287 1287 bookmarks.update(self, [p1, p2], ret)
1288 1288 cctx.markcommitted(ret)
1289 1289 ms.reset()
1290 1290 finally:
1291 1291 wlock.release()
1292 1292
1293 1293 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1294 1294 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1295 1295 self._afterlock(commithook)
1296 1296 return ret
1297 1297
1298 1298 @unfilteredmethod
1299 1299 def commitctx(self, ctx, error=False):
1300 1300 """Add a new revision to current repository.
1301 1301 Revision information is passed via the context argument.
1302 1302 """
1303 1303
1304 1304 tr = lock = None
1305 1305 removed = list(ctx.removed())
1306 1306 p1, p2 = ctx.p1(), ctx.p2()
1307 1307 user = ctx.user()
1308 1308
1309 1309 lock = self.lock()
1310 1310 try:
1311 1311 tr = self.transaction("commit")
1312 1312 trp = weakref.proxy(tr)
1313 1313
1314 1314 if ctx.files():
1315 1315 m1 = p1.manifest().copy()
1316 1316 m2 = p2.manifest()
1317 1317
1318 1318 # check in files
1319 1319 new = {}
1320 1320 changed = []
1321 1321 linkrev = len(self)
1322 1322 for f in sorted(ctx.modified() + ctx.added()):
1323 1323 self.ui.note(f + "\n")
1324 1324 try:
1325 1325 fctx = ctx[f]
1326 1326 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1327 1327 changed)
1328 1328 m1.set(f, fctx.flags())
1329 1329 except OSError, inst:
1330 1330 self.ui.warn(_("trouble committing %s!\n") % f)
1331 1331 raise
1332 1332 except IOError, inst:
1333 1333 errcode = getattr(inst, 'errno', errno.ENOENT)
1334 1334 if error or errcode and errcode != errno.ENOENT:
1335 1335 self.ui.warn(_("trouble committing %s!\n") % f)
1336 1336 raise
1337 1337 else:
1338 1338 removed.append(f)
1339 1339
1340 1340 # update manifest
1341 1341 m1.update(new)
1342 1342 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1343 1343 drop = [f for f in removed if f in m1]
1344 1344 for f in drop:
1345 1345 del m1[f]
1346 1346 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1347 1347 p2.manifestnode(), (new, drop))
1348 1348 files = changed + removed
1349 1349 else:
1350 1350 mn = p1.manifestnode()
1351 1351 files = []
1352 1352
1353 1353 # update changelog
1354 1354 self.changelog.delayupdate()
1355 1355 n = self.changelog.add(mn, files, ctx.description(),
1356 1356 trp, p1.node(), p2.node(),
1357 1357 user, ctx.date(), ctx.extra().copy())
1358 1358 p = lambda: self.changelog.writepending() and self.root or ""
1359 1359 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1360 1360 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1361 1361 parent2=xp2, pending=p)
1362 1362 self.changelog.finalize(trp)
1363 1363 # set the new commit is proper phase
1364 1364 targetphase = phases.newcommitphase(self.ui)
1365 1365 if targetphase:
1366 1366 # retract boundary do not alter parent changeset.
1367 1367 # if a parent have higher the resulting phase will
1368 1368 # be compliant anyway
1369 1369 #
1370 1370 # if minimal phase was 0 we don't need to retract anything
1371 1371 phases.retractboundary(self, targetphase, [n])
1372 1372 tr.close()
1373 1373 branchmap.updatecache(self.filtered('served'))
1374 1374 return n
1375 1375 finally:
1376 1376 if tr:
1377 1377 tr.release()
1378 1378 lock.release()
1379 1379
1380 1380 @unfilteredmethod
1381 1381 def destroying(self):
1382 1382 '''Inform the repository that nodes are about to be destroyed.
1383 1383 Intended for use by strip and rollback, so there's a common
1384 1384 place for anything that has to be done before destroying history.
1385 1385
1386 1386 This is mostly useful for saving state that is in memory and waiting
1387 1387 to be flushed when the current lock is released. Because a call to
1388 1388 destroyed is imminent, the repo will be invalidated causing those
1389 1389 changes to stay in memory (waiting for the next unlock), or vanish
1390 1390 completely.
1391 1391 '''
1392 1392 # When using the same lock to commit and strip, the phasecache is left
1393 1393 # dirty after committing. Then when we strip, the repo is invalidated,
1394 1394 # causing those changes to disappear.
1395 1395 if '_phasecache' in vars(self):
1396 1396 self._phasecache.write()
1397 1397
1398 1398 @unfilteredmethod
1399 1399 def destroyed(self):
1400 1400 '''Inform the repository that nodes have been destroyed.
1401 1401 Intended for use by strip and rollback, so there's a common
1402 1402 place for anything that has to be done after destroying history.
1403 1403 '''
1404 1404 # When one tries to:
1405 1405 # 1) destroy nodes thus calling this method (e.g. strip)
1406 1406 # 2) use phasecache somewhere (e.g. commit)
1407 1407 #
1408 1408 # then 2) will fail because the phasecache contains nodes that were
1409 1409 # removed. We can either remove phasecache from the filecache,
1410 1410 # causing it to reload next time it is accessed, or simply filter
1411 1411 # the removed nodes now and write the updated cache.
1412 1412 self._phasecache.filterunknown(self)
1413 1413 self._phasecache.write()
1414 1414
1415 1415 # update the 'served' branch cache to help read only server process
1416 1416 # Thanks to branchcache collaboration this is done from the nearest
1417 1417 # filtered subset and it is expected to be fast.
1418 1418 branchmap.updatecache(self.filtered('served'))
1419 1419
1420 1420 # Ensure the persistent tag cache is updated. Doing it now
1421 1421 # means that the tag cache only has to worry about destroyed
1422 1422 # heads immediately after a strip/rollback. That in turn
1423 1423 # guarantees that "cachetip == currenttip" (comparing both rev
1424 1424 # and node) always means no nodes have been added or destroyed.
1425 1425
1426 1426 # XXX this is suboptimal when qrefresh'ing: we strip the current
1427 1427 # head, refresh the tag cache, then immediately add a new head.
1428 1428 # But I think doing it this way is necessary for the "instant
1429 1429 # tag cache retrieval" case to work.
1430 1430 self.invalidate()
1431 1431
1432 1432 def walk(self, match, node=None):
1433 1433 '''
1434 1434 walk recursively through the directory tree or a given
1435 1435 changeset, finding all files matched by the match
1436 1436 function
1437 1437 '''
1438 1438 return self[node].walk(match)
1439 1439
1440 1440 def status(self, node1='.', node2=None, match=None,
1441 1441 ignored=False, clean=False, unknown=False,
1442 1442 listsubrepos=False):
1443 1443 """return status of files between two nodes or node and working
1444 1444 directory.
1445 1445
1446 1446 If node1 is None, use the first dirstate parent instead.
1447 1447 If node2 is None, compare node1 with working directory.
1448 1448 """
1449 1449
1450 1450 def mfmatches(ctx):
1451 1451 mf = ctx.manifest().copy()
1452 1452 if match.always():
1453 1453 return mf
1454 1454 for fn in mf.keys():
1455 1455 if not match(fn):
1456 1456 del mf[fn]
1457 1457 return mf
1458 1458
1459 1459 if isinstance(node1, context.changectx):
1460 1460 ctx1 = node1
1461 1461 else:
1462 1462 ctx1 = self[node1]
1463 1463 if isinstance(node2, context.changectx):
1464 1464 ctx2 = node2
1465 1465 else:
1466 1466 ctx2 = self[node2]
1467 1467
1468 1468 working = ctx2.rev() is None
1469 1469 parentworking = working and ctx1 == self['.']
1470 1470 match = match or matchmod.always(self.root, self.getcwd())
1471 1471 listignored, listclean, listunknown = ignored, clean, unknown
1472 1472
1473 1473 # load earliest manifest first for caching reasons
1474 1474 if not working and ctx2.rev() < ctx1.rev():
1475 1475 ctx2.manifest()
1476 1476
1477 1477 if not parentworking:
1478 1478 def bad(f, msg):
1479 1479 # 'f' may be a directory pattern from 'match.files()',
1480 1480 # so 'f not in ctx1' is not enough
1481 1481 if f not in ctx1 and f not in ctx1.dirs():
1482 1482 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1483 1483 match.bad = bad
1484 1484
1485 1485 if working: # we need to scan the working dir
1486 1486 subrepos = []
1487 1487 if '.hgsub' in self.dirstate:
1488 1488 subrepos = sorted(ctx2.substate)
1489 1489 s = self.dirstate.status(match, subrepos, listignored,
1490 1490 listclean, listunknown)
1491 1491 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1492 1492
1493 1493 # check for any possibly clean files
1494 1494 if parentworking and cmp:
1495 1495 fixup = []
1496 1496 # do a full compare of any files that might have changed
1497 1497 for f in sorted(cmp):
1498 1498 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1499 1499 or ctx1[f].cmp(ctx2[f])):
1500 1500 modified.append(f)
1501 1501 else:
1502 1502 fixup.append(f)
1503 1503
1504 1504 # update dirstate for files that are actually clean
1505 1505 if fixup:
1506 1506 if listclean:
1507 1507 clean += fixup
1508 1508
1509 1509 try:
1510 1510 # updating the dirstate is optional
1511 1511 # so we don't wait on the lock
1512 1512 wlock = self.wlock(False)
1513 1513 try:
1514 1514 for f in fixup:
1515 1515 self.dirstate.normal(f)
1516 1516 finally:
1517 1517 wlock.release()
1518 1518 except error.LockError:
1519 1519 pass
1520 1520
1521 1521 if not parentworking:
1522 1522 mf1 = mfmatches(ctx1)
1523 1523 if working:
1524 1524 # we are comparing working dir against non-parent
1525 1525 # generate a pseudo-manifest for the working dir
1526 1526 mf2 = mfmatches(self['.'])
1527 1527 for f in cmp + modified + added:
1528 1528 mf2[f] = None
1529 1529 mf2.set(f, ctx2.flags(f))
1530 1530 for f in removed:
1531 1531 if f in mf2:
1532 1532 del mf2[f]
1533 1533 else:
1534 1534 # we are comparing two revisions
1535 1535 deleted, unknown, ignored = [], [], []
1536 1536 mf2 = mfmatches(ctx2)
1537 1537
1538 1538 modified, added, clean = [], [], []
1539 1539 withflags = mf1.withflags() | mf2.withflags()
1540 1540 for fn, mf2node in mf2.iteritems():
1541 1541 if fn in mf1:
1542 1542 if (fn not in deleted and
1543 1543 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1544 1544 (mf1[fn] != mf2node and
1545 1545 (mf2node or ctx1[fn].cmp(ctx2[fn]))))):
1546 1546 modified.append(fn)
1547 1547 elif listclean:
1548 1548 clean.append(fn)
1549 1549 del mf1[fn]
1550 1550 elif fn not in deleted:
1551 1551 added.append(fn)
1552 1552 removed = mf1.keys()
1553 1553
1554 1554 if working and modified and not self.dirstate._checklink:
1555 1555 # Symlink placeholders may get non-symlink-like contents
1556 1556 # via user error or dereferencing by NFS or Samba servers,
1557 1557 # so we filter out any placeholders that don't look like a
1558 1558 # symlink
1559 1559 sane = []
1560 1560 for f in modified:
1561 1561 if ctx2.flags(f) == 'l':
1562 1562 d = ctx2[f].data()
1563 1563 if len(d) >= 1024 or '\n' in d or util.binary(d):
1564 1564 self.ui.debug('ignoring suspect symlink placeholder'
1565 1565 ' "%s"\n' % f)
1566 1566 continue
1567 1567 sane.append(f)
1568 1568 modified = sane
1569 1569
1570 1570 r = modified, added, removed, deleted, unknown, ignored, clean
1571 1571
1572 1572 if listsubrepos:
1573 1573 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1574 1574 if working:
1575 1575 rev2 = None
1576 1576 else:
1577 1577 rev2 = ctx2.substate[subpath][1]
1578 1578 try:
1579 1579 submatch = matchmod.narrowmatcher(subpath, match)
1580 1580 s = sub.status(rev2, match=submatch, ignored=listignored,
1581 1581 clean=listclean, unknown=listunknown,
1582 1582 listsubrepos=True)
1583 1583 for rfiles, sfiles in zip(r, s):
1584 1584 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1585 1585 except error.LookupError:
1586 1586 self.ui.status(_("skipping missing subrepository: %s\n")
1587 1587 % subpath)
1588 1588
1589 1589 for l in r:
1590 1590 l.sort()
1591 1591 return r
1592 1592
1593 1593 def heads(self, start=None):
1594 1594 heads = self.changelog.heads(start)
1595 1595 # sort the output in rev descending order
1596 1596 return sorted(heads, key=self.changelog.rev, reverse=True)
1597 1597
1598 1598 def branchheads(self, branch=None, start=None, closed=False):
1599 1599 '''return a (possibly filtered) list of heads for the given branch
1600 1600
1601 1601 Heads are returned in topological order, from newest to oldest.
1602 1602 If branch is None, use the dirstate branch.
1603 1603 If start is not None, return only heads reachable from start.
1604 1604 If closed is True, return heads that are marked as closed as well.
1605 1605 '''
1606 1606 if branch is None:
1607 1607 branch = self[None].branch()
1608 1608 branches = self.branchmap()
1609 1609 if branch not in branches:
1610 1610 return []
1611 1611 # the cache returns heads ordered lowest to highest
1612 1612 bheads = list(reversed(branches[branch]))
1613 1613 if start is not None:
1614 1614 # filter out the heads that cannot be reached from startrev
1615 1615 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1616 1616 bheads = [h for h in bheads if h in fbheads]
1617 1617 if not closed:
1618 1618 bheads = [h for h in bheads if not self[h].closesbranch()]
1619 1619 return bheads
1620 1620
1621 1621 def branches(self, nodes):
1622 1622 if not nodes:
1623 1623 nodes = [self.changelog.tip()]
1624 1624 b = []
1625 1625 for n in nodes:
1626 1626 t = n
1627 1627 while True:
1628 1628 p = self.changelog.parents(n)
1629 1629 if p[1] != nullid or p[0] == nullid:
1630 1630 b.append((t, n, p[0], p[1]))
1631 1631 break
1632 1632 n = p[0]
1633 1633 return b
1634 1634
1635 1635 def between(self, pairs):
1636 1636 r = []
1637 1637
1638 1638 for top, bottom in pairs:
1639 1639 n, l, i = top, [], 0
1640 1640 f = 1
1641 1641
1642 1642 while n != bottom and n != nullid:
1643 1643 p = self.changelog.parents(n)[0]
1644 1644 if i == f:
1645 1645 l.append(n)
1646 1646 f = f * 2
1647 1647 n = p
1648 1648 i += 1
1649 1649
1650 1650 r.append(l)
1651 1651
1652 1652 return r
1653 1653
1654 1654 def pull(self, remote, heads=None, force=False):
1655 1655 # don't open transaction for nothing or you break future useful
1656 1656 # rollback call
1657 1657 tr = None
1658 1658 trname = 'pull\n' + util.hidepassword(remote.url())
1659 1659 lock = self.lock()
1660 1660 try:
1661 1661 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1662 1662 force=force)
1663 1663 common, fetch, rheads = tmp
1664 1664 if not fetch:
1665 1665 self.ui.status(_("no changes found\n"))
1666 1666 added = []
1667 1667 result = 0
1668 1668 else:
1669 1669 tr = self.transaction(trname)
1670 1670 if heads is None and list(common) == [nullid]:
1671 1671 self.ui.status(_("requesting all changes\n"))
1672 1672 elif heads is None and remote.capable('changegroupsubset'):
1673 1673 # issue1320, avoid a race if remote changed after discovery
1674 1674 heads = rheads
1675 1675
1676 1676 if remote.capable('getbundle'):
1677 1677 cg = remote.getbundle('pull', common=common,
1678 1678 heads=heads or rheads)
1679 1679 elif heads is None:
1680 1680 cg = remote.changegroup(fetch, 'pull')
1681 1681 elif not remote.capable('changegroupsubset'):
1682 1682 raise util.Abort(_("partial pull cannot be done because "
1683 1683 "other repository doesn't support "
1684 1684 "changegroupsubset."))
1685 1685 else:
1686 1686 cg = remote.changegroupsubset(fetch, heads, 'pull')
1687 1687 # we use unfiltered changelog here because hidden revision must
1688 1688 # be taken in account for phase synchronization. They may
1689 1689 # becomes public and becomes visible again.
1690 1690 cl = self.unfiltered().changelog
1691 1691 clstart = len(cl)
1692 1692 result = self.addchangegroup(cg, 'pull', remote.url())
1693 1693 clend = len(cl)
1694 1694 added = [cl.node(r) for r in xrange(clstart, clend)]
1695 1695
1696 1696 # compute target subset
1697 1697 if heads is None:
1698 1698 # We pulled every thing possible
1699 1699 # sync on everything common
1700 1700 subset = common + added
1701 1701 else:
1702 1702 # We pulled a specific subset
1703 1703 # sync on this subset
1704 1704 subset = heads
1705 1705
1706 1706 # Get remote phases data from remote
1707 1707 remotephases = remote.listkeys('phases')
1708 1708 publishing = bool(remotephases.get('publishing', False))
1709 1709 if remotephases and not publishing:
1710 1710 # remote is new and unpublishing
1711 1711 pheads, _dr = phases.analyzeremotephases(self, subset,
1712 1712 remotephases)
1713 1713 phases.advanceboundary(self, phases.public, pheads)
1714 1714 phases.advanceboundary(self, phases.draft, subset)
1715 1715 else:
1716 1716 # Remote is old or publishing all common changesets
1717 1717 # should be seen as public
1718 1718 phases.advanceboundary(self, phases.public, subset)
1719 1719
1720 1720 def gettransaction():
1721 1721 if tr is None:
1722 1722 return self.transaction(trname)
1723 1723 return tr
1724 1724
1725 1725 obstr = obsolete.syncpull(self, remote, gettransaction)
1726 1726 if obstr is not None:
1727 1727 tr = obstr
1728 1728
1729 1729 if tr is not None:
1730 1730 tr.close()
1731 1731 finally:
1732 1732 if tr is not None:
1733 1733 tr.release()
1734 1734 lock.release()
1735 1735
1736 1736 return result
1737 1737
1738 1738 def checkpush(self, force, revs):
1739 1739 """Extensions can override this function if additional checks have
1740 1740 to be performed before pushing, or call it if they override push
1741 1741 command.
1742 1742 """
1743 1743 pass
1744 1744
1745 1745 def push(self, remote, force=False, revs=None, newbranch=False):
1746 1746 '''Push outgoing changesets (limited by revs) from the current
1747 1747 repository to remote. Return an integer:
1748 1748 - None means nothing to push
1749 1749 - 0 means HTTP error
1750 1750 - 1 means we pushed and remote head count is unchanged *or*
1751 1751 we have outgoing changesets but refused to push
1752 1752 - other values as described by addchangegroup()
1753 1753 '''
1754 1754 # there are two ways to push to remote repo:
1755 1755 #
1756 1756 # addchangegroup assumes local user can lock remote
1757 1757 # repo (local filesystem, old ssh servers).
1758 1758 #
1759 1759 # unbundle assumes local user cannot lock remote repo (new ssh
1760 1760 # servers, http servers).
1761 1761
1762 1762 if not remote.canpush():
1763 1763 raise util.Abort(_("destination does not support push"))
1764 1764 unfi = self.unfiltered()
1765 1765 def localphasemove(nodes, phase=phases.public):
1766 1766 """move <nodes> to <phase> in the local source repo"""
1767 phases.advanceboundary(self, phase, nodes)
1767 if locallock is not None:
1768 phases.advanceboundary(self, phase, nodes)
1769 else:
1770 # repo is not locked, do not change any phases!
1771 # Informs the user that phases should have been moved when
1772 # applicable.
1773 actualmoves = [n for n in nodes if phase < self[n].phase()]
1774 phasestr = phases.phasenames[phase]
1775 if actualmoves:
1776 self.ui.status(_('cannot lock source repo, skipping local'
1777 ' %s phase update\n') % phasestr)
1768 1778 # get local lock as we might write phase data
1769 locallock = self.lock()
1779 locallock = None
1780 try:
1781 locallock = self.lock()
1782 except IOError, err:
1783 if err.errno != errno.EACCES:
1784 raise
1785 # source repo cannot be locked.
1786 # We do not abort the push, but just disable the local phase
1787 # synchronisation.
1788 msg = 'cannot lock source repository: %s\n' % err
1789 self.ui.debug(msg)
1770 1790 try:
1771 1791 self.checkpush(force, revs)
1772 1792 lock = None
1773 1793 unbundle = remote.capable('unbundle')
1774 1794 if not unbundle:
1775 1795 lock = remote.lock()
1776 1796 try:
1777 1797 # discovery
1778 1798 fci = discovery.findcommonincoming
1779 1799 commoninc = fci(unfi, remote, force=force)
1780 1800 common, inc, remoteheads = commoninc
1781 1801 fco = discovery.findcommonoutgoing
1782 1802 outgoing = fco(unfi, remote, onlyheads=revs,
1783 1803 commoninc=commoninc, force=force)
1784 1804
1785 1805
1786 1806 if not outgoing.missing:
1787 1807 # nothing to push
1788 1808 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
1789 1809 ret = None
1790 1810 else:
1791 1811 # something to push
1792 1812 if not force:
1793 1813 # if self.obsstore == False --> no obsolete
1794 1814 # then, save the iteration
1795 1815 if unfi.obsstore:
1796 1816 # this message are here for 80 char limit reason
1797 1817 mso = _("push includes obsolete changeset: %s!")
1798 1818 mst = "push includes %s changeset: %s!"
1799 1819 # plain versions for i18n tool to detect them
1800 1820 _("push includes unstable changeset: %s!")
1801 1821 _("push includes bumped changeset: %s!")
1802 1822 _("push includes divergent changeset: %s!")
1803 1823 # If we are to push if there is at least one
1804 1824 # obsolete or unstable changeset in missing, at
1805 1825 # least one of the missinghead will be obsolete or
1806 1826 # unstable. So checking heads only is ok
1807 1827 for node in outgoing.missingheads:
1808 1828 ctx = unfi[node]
1809 1829 if ctx.obsolete():
1810 1830 raise util.Abort(mso % ctx)
1811 1831 elif ctx.troubled():
1812 1832 raise util.Abort(_(mst)
1813 1833 % (ctx.troubles()[0],
1814 1834 ctx))
1815 1835 discovery.checkheads(unfi, remote, outgoing,
1816 1836 remoteheads, newbranch,
1817 1837 bool(inc))
1818 1838
1819 1839 # create a changegroup from local
1820 1840 if revs is None and not outgoing.excluded:
1821 1841 # push everything,
1822 1842 # use the fast path, no race possible on push
1823 1843 cg = self._changegroup(outgoing.missing, 'push')
1824 1844 else:
1825 1845 cg = self.getlocalbundle('push', outgoing)
1826 1846
1827 1847 # apply changegroup to remote
1828 1848 if unbundle:
1829 1849 # local repo finds heads on server, finds out what
1830 1850 # revs it must push. once revs transferred, if server
1831 1851 # finds it has different heads (someone else won
1832 1852 # commit/push race), server aborts.
1833 1853 if force:
1834 1854 remoteheads = ['force']
1835 1855 # ssh: return remote's addchangegroup()
1836 1856 # http: return remote's addchangegroup() or 0 for error
1837 1857 ret = remote.unbundle(cg, remoteheads, 'push')
1838 1858 else:
1839 1859 # we return an integer indicating remote head count
1840 1860 # change
1841 1861 ret = remote.addchangegroup(cg, 'push', self.url())
1842 1862
1843 1863 if ret:
1844 1864 # push succeed, synchronize target of the push
1845 1865 cheads = outgoing.missingheads
1846 1866 elif revs is None:
1847 1867 # All out push fails. synchronize all common
1848 1868 cheads = outgoing.commonheads
1849 1869 else:
1850 1870 # I want cheads = heads(::missingheads and ::commonheads)
1851 1871 # (missingheads is revs with secret changeset filtered out)
1852 1872 #
1853 1873 # This can be expressed as:
1854 1874 # cheads = ( (missingheads and ::commonheads)
1855 1875 # + (commonheads and ::missingheads))"
1856 1876 # )
1857 1877 #
1858 1878 # while trying to push we already computed the following:
1859 1879 # common = (::commonheads)
1860 1880 # missing = ((commonheads::missingheads) - commonheads)
1861 1881 #
1862 1882 # We can pick:
1863 1883 # * missingheads part of common (::commonheads)
1864 1884 common = set(outgoing.common)
1865 1885 cheads = [node for node in revs if node in common]
1866 1886 # and
1867 1887 # * commonheads parents on missing
1868 1888 revset = unfi.set('%ln and parents(roots(%ln))',
1869 1889 outgoing.commonheads,
1870 1890 outgoing.missing)
1871 1891 cheads.extend(c.node() for c in revset)
1872 1892 # even when we don't push, exchanging phase data is useful
1873 1893 remotephases = remote.listkeys('phases')
1874 1894 if (self.ui.configbool('ui', '_usedassubrepo', False)
1875 1895 and remotephases # server supports phases
1876 1896 and ret is None # nothing was pushed
1877 1897 and remotephases.get('publishing', False)):
1878 1898 # When:
1879 1899 # - this is a subrepo push
1880 1900 # - and remote support phase
1881 1901 # - and no changeset was pushed
1882 1902 # - and remote is publishing
1883 1903 # We may be in issue 3871 case!
1884 1904 # We drop the possible phase synchronisation done by
1885 1905 # courtesy to publish changesets possibly locally draft
1886 1906 # on the remote.
1887 1907 remotephases = {'publishing': 'True'}
1888 1908 if not remotephases: # old server or public only repo
1889 1909 localphasemove(cheads)
1890 1910 # don't push any phase data as there is nothing to push
1891 1911 else:
1892 1912 ana = phases.analyzeremotephases(self, cheads, remotephases)
1893 1913 pheads, droots = ana
1894 1914 ### Apply remote phase on local
1895 1915 if remotephases.get('publishing', False):
1896 1916 localphasemove(cheads)
1897 1917 else: # publish = False
1898 1918 localphasemove(pheads)
1899 1919 localphasemove(cheads, phases.draft)
1900 1920 ### Apply local phase on remote
1901 1921
1902 1922 # Get the list of all revs draft on remote by public here.
1903 1923 # XXX Beware that revset break if droots is not strictly
1904 1924 # XXX root we may want to ensure it is but it is costly
1905 1925 outdated = unfi.set('heads((%ln::%ln) and public())',
1906 1926 droots, cheads)
1907 1927 for newremotehead in outdated:
1908 1928 r = remote.pushkey('phases',
1909 1929 newremotehead.hex(),
1910 1930 str(phases.draft),
1911 1931 str(phases.public))
1912 1932 if not r:
1913 1933 self.ui.warn(_('updating %s to public failed!\n')
1914 1934 % newremotehead)
1915 1935 self.ui.debug('try to push obsolete markers to remote\n')
1916 1936 obsolete.syncpush(self, remote)
1917 1937 finally:
1918 1938 if lock is not None:
1919 1939 lock.release()
1920 1940 finally:
1921 locallock.release()
1941 if locallock is not None:
1942 locallock.release()
1922 1943
1923 1944 self.ui.debug("checking for updated bookmarks\n")
1924 1945 rb = remote.listkeys('bookmarks')
1925 1946 for k in rb.keys():
1926 1947 if k in unfi._bookmarks:
1927 1948 nr, nl = rb[k], hex(self._bookmarks[k])
1928 1949 if nr in unfi:
1929 1950 cr = unfi[nr]
1930 1951 cl = unfi[nl]
1931 1952 if bookmarks.validdest(unfi, cr, cl):
1932 1953 r = remote.pushkey('bookmarks', k, nr, nl)
1933 1954 if r:
1934 1955 self.ui.status(_("updating bookmark %s\n") % k)
1935 1956 else:
1936 1957 self.ui.warn(_('updating bookmark %s'
1937 1958 ' failed!\n') % k)
1938 1959
1939 1960 return ret
1940 1961
1941 1962 def changegroupinfo(self, nodes, source):
1942 1963 if self.ui.verbose or source == 'bundle':
1943 1964 self.ui.status(_("%d changesets found\n") % len(nodes))
1944 1965 if self.ui.debugflag:
1945 1966 self.ui.debug("list of changesets:\n")
1946 1967 for node in nodes:
1947 1968 self.ui.debug("%s\n" % hex(node))
1948 1969
1949 1970 def changegroupsubset(self, bases, heads, source):
1950 1971 """Compute a changegroup consisting of all the nodes that are
1951 1972 descendants of any of the bases and ancestors of any of the heads.
1952 1973 Return a chunkbuffer object whose read() method will return
1953 1974 successive changegroup chunks.
1954 1975
1955 1976 It is fairly complex as determining which filenodes and which
1956 1977 manifest nodes need to be included for the changeset to be complete
1957 1978 is non-trivial.
1958 1979
1959 1980 Another wrinkle is doing the reverse, figuring out which changeset in
1960 1981 the changegroup a particular filenode or manifestnode belongs to.
1961 1982 """
1962 1983 cl = self.changelog
1963 1984 if not bases:
1964 1985 bases = [nullid]
1965 1986 csets, bases, heads = cl.nodesbetween(bases, heads)
1966 1987 # We assume that all ancestors of bases are known
1967 1988 common = cl.ancestors([cl.rev(n) for n in bases])
1968 1989 return self._changegroupsubset(common, csets, heads, source)
1969 1990
1970 1991 def getlocalbundle(self, source, outgoing):
1971 1992 """Like getbundle, but taking a discovery.outgoing as an argument.
1972 1993
1973 1994 This is only implemented for local repos and reuses potentially
1974 1995 precomputed sets in outgoing."""
1975 1996 if not outgoing.missing:
1976 1997 return None
1977 1998 return self._changegroupsubset(outgoing.common,
1978 1999 outgoing.missing,
1979 2000 outgoing.missingheads,
1980 2001 source)
1981 2002
1982 2003 def getbundle(self, source, heads=None, common=None):
1983 2004 """Like changegroupsubset, but returns the set difference between the
1984 2005 ancestors of heads and the ancestors common.
1985 2006
1986 2007 If heads is None, use the local heads. If common is None, use [nullid].
1987 2008
1988 2009 The nodes in common might not all be known locally due to the way the
1989 2010 current discovery protocol works.
1990 2011 """
1991 2012 cl = self.changelog
1992 2013 if common:
1993 2014 hasnode = cl.hasnode
1994 2015 common = [n for n in common if hasnode(n)]
1995 2016 else:
1996 2017 common = [nullid]
1997 2018 if not heads:
1998 2019 heads = cl.heads()
1999 2020 return self.getlocalbundle(source,
2000 2021 discovery.outgoing(cl, common, heads))
2001 2022
2002 2023 @unfilteredmethod
2003 2024 def _changegroupsubset(self, commonrevs, csets, heads, source):
2004 2025
2005 2026 cl = self.changelog
2006 2027 mf = self.manifest
2007 2028 mfs = {} # needed manifests
2008 2029 fnodes = {} # needed file nodes
2009 2030 changedfiles = set()
2010 2031 fstate = ['', {}]
2011 2032 count = [0, 0]
2012 2033
2013 2034 # can we go through the fast path ?
2014 2035 heads.sort()
2015 2036 if heads == sorted(self.heads()):
2016 2037 return self._changegroup(csets, source)
2017 2038
2018 2039 # slow path
2019 2040 self.hook('preoutgoing', throw=True, source=source)
2020 2041 self.changegroupinfo(csets, source)
2021 2042
2022 2043 # filter any nodes that claim to be part of the known set
2023 2044 def prune(revlog, missing):
2024 2045 rr, rl = revlog.rev, revlog.linkrev
2025 2046 return [n for n in missing
2026 2047 if rl(rr(n)) not in commonrevs]
2027 2048
2028 2049 progress = self.ui.progress
2029 2050 _bundling = _('bundling')
2030 2051 _changesets = _('changesets')
2031 2052 _manifests = _('manifests')
2032 2053 _files = _('files')
2033 2054
2034 2055 def lookup(revlog, x):
2035 2056 if revlog == cl:
2036 2057 c = cl.read(x)
2037 2058 changedfiles.update(c[3])
2038 2059 mfs.setdefault(c[0], x)
2039 2060 count[0] += 1
2040 2061 progress(_bundling, count[0],
2041 2062 unit=_changesets, total=count[1])
2042 2063 return x
2043 2064 elif revlog == mf:
2044 2065 clnode = mfs[x]
2045 2066 mdata = mf.readfast(x)
2046 2067 for f, n in mdata.iteritems():
2047 2068 if f in changedfiles:
2048 2069 fnodes[f].setdefault(n, clnode)
2049 2070 count[0] += 1
2050 2071 progress(_bundling, count[0],
2051 2072 unit=_manifests, total=count[1])
2052 2073 return clnode
2053 2074 else:
2054 2075 progress(_bundling, count[0], item=fstate[0],
2055 2076 unit=_files, total=count[1])
2056 2077 return fstate[1][x]
2057 2078
2058 2079 bundler = changegroup.bundle10(lookup)
2059 2080 reorder = self.ui.config('bundle', 'reorder', 'auto')
2060 2081 if reorder == 'auto':
2061 2082 reorder = None
2062 2083 else:
2063 2084 reorder = util.parsebool(reorder)
2064 2085
2065 2086 def gengroup():
2066 2087 # Create a changenode group generator that will call our functions
2067 2088 # back to lookup the owning changenode and collect information.
2068 2089 count[:] = [0, len(csets)]
2069 2090 for chunk in cl.group(csets, bundler, reorder=reorder):
2070 2091 yield chunk
2071 2092 progress(_bundling, None)
2072 2093
2073 2094 # Create a generator for the manifestnodes that calls our lookup
2074 2095 # and data collection functions back.
2075 2096 for f in changedfiles:
2076 2097 fnodes[f] = {}
2077 2098 count[:] = [0, len(mfs)]
2078 2099 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2079 2100 yield chunk
2080 2101 progress(_bundling, None)
2081 2102
2082 2103 mfs.clear()
2083 2104
2084 2105 # Go through all our files in order sorted by name.
2085 2106 count[:] = [0, len(changedfiles)]
2086 2107 for fname in sorted(changedfiles):
2087 2108 filerevlog = self.file(fname)
2088 2109 if not len(filerevlog):
2089 2110 raise util.Abort(_("empty or missing revlog for %s")
2090 2111 % fname)
2091 2112 fstate[0] = fname
2092 2113 fstate[1] = fnodes.pop(fname, {})
2093 2114
2094 2115 nodelist = prune(filerevlog, fstate[1])
2095 2116 if nodelist:
2096 2117 count[0] += 1
2097 2118 yield bundler.fileheader(fname)
2098 2119 for chunk in filerevlog.group(nodelist, bundler, reorder):
2099 2120 yield chunk
2100 2121
2101 2122 # Signal that no more groups are left.
2102 2123 yield bundler.close()
2103 2124 progress(_bundling, None)
2104 2125
2105 2126 if csets:
2106 2127 self.hook('outgoing', node=hex(csets[0]), source=source)
2107 2128
2108 2129 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2109 2130
2110 2131 def changegroup(self, basenodes, source):
2111 2132 # to avoid a race we use changegroupsubset() (issue1320)
2112 2133 return self.changegroupsubset(basenodes, self.heads(), source)
2113 2134
2114 2135 @unfilteredmethod
2115 2136 def _changegroup(self, nodes, source):
2116 2137 """Compute the changegroup of all nodes that we have that a recipient
2117 2138 doesn't. Return a chunkbuffer object whose read() method will return
2118 2139 successive changegroup chunks.
2119 2140
2120 2141 This is much easier than the previous function as we can assume that
2121 2142 the recipient has any changenode we aren't sending them.
2122 2143
2123 2144 nodes is the set of nodes to send"""
2124 2145
2125 2146 cl = self.changelog
2126 2147 mf = self.manifest
2127 2148 mfs = {}
2128 2149 changedfiles = set()
2129 2150 fstate = ['']
2130 2151 count = [0, 0]
2131 2152
2132 2153 self.hook('preoutgoing', throw=True, source=source)
2133 2154 self.changegroupinfo(nodes, source)
2134 2155
2135 2156 revset = set([cl.rev(n) for n in nodes])
2136 2157
2137 2158 def gennodelst(log):
2138 2159 ln, llr = log.node, log.linkrev
2139 2160 return [ln(r) for r in log if llr(r) in revset]
2140 2161
2141 2162 progress = self.ui.progress
2142 2163 _bundling = _('bundling')
2143 2164 _changesets = _('changesets')
2144 2165 _manifests = _('manifests')
2145 2166 _files = _('files')
2146 2167
2147 2168 def lookup(revlog, x):
2148 2169 if revlog == cl:
2149 2170 c = cl.read(x)
2150 2171 changedfiles.update(c[3])
2151 2172 mfs.setdefault(c[0], x)
2152 2173 count[0] += 1
2153 2174 progress(_bundling, count[0],
2154 2175 unit=_changesets, total=count[1])
2155 2176 return x
2156 2177 elif revlog == mf:
2157 2178 count[0] += 1
2158 2179 progress(_bundling, count[0],
2159 2180 unit=_manifests, total=count[1])
2160 2181 return cl.node(revlog.linkrev(revlog.rev(x)))
2161 2182 else:
2162 2183 progress(_bundling, count[0], item=fstate[0],
2163 2184 total=count[1], unit=_files)
2164 2185 return cl.node(revlog.linkrev(revlog.rev(x)))
2165 2186
2166 2187 bundler = changegroup.bundle10(lookup)
2167 2188 reorder = self.ui.config('bundle', 'reorder', 'auto')
2168 2189 if reorder == 'auto':
2169 2190 reorder = None
2170 2191 else:
2171 2192 reorder = util.parsebool(reorder)
2172 2193
2173 2194 def gengroup():
2174 2195 '''yield a sequence of changegroup chunks (strings)'''
2175 2196 # construct a list of all changed files
2176 2197
2177 2198 count[:] = [0, len(nodes)]
2178 2199 for chunk in cl.group(nodes, bundler, reorder=reorder):
2179 2200 yield chunk
2180 2201 progress(_bundling, None)
2181 2202
2182 2203 count[:] = [0, len(mfs)]
2183 2204 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2184 2205 yield chunk
2185 2206 progress(_bundling, None)
2186 2207
2187 2208 count[:] = [0, len(changedfiles)]
2188 2209 for fname in sorted(changedfiles):
2189 2210 filerevlog = self.file(fname)
2190 2211 if not len(filerevlog):
2191 2212 raise util.Abort(_("empty or missing revlog for %s")
2192 2213 % fname)
2193 2214 fstate[0] = fname
2194 2215 nodelist = gennodelst(filerevlog)
2195 2216 if nodelist:
2196 2217 count[0] += 1
2197 2218 yield bundler.fileheader(fname)
2198 2219 for chunk in filerevlog.group(nodelist, bundler, reorder):
2199 2220 yield chunk
2200 2221 yield bundler.close()
2201 2222 progress(_bundling, None)
2202 2223
2203 2224 if nodes:
2204 2225 self.hook('outgoing', node=hex(nodes[0]), source=source)
2205 2226
2206 2227 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2207 2228
2208 2229 @unfilteredmethod
2209 2230 def addchangegroup(self, source, srctype, url, emptyok=False):
2210 2231 """Add the changegroup returned by source.read() to this repo.
2211 2232 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2212 2233 the URL of the repo where this changegroup is coming from.
2213 2234
2214 2235 Return an integer summarizing the change to this repo:
2215 2236 - nothing changed or no source: 0
2216 2237 - more heads than before: 1+added heads (2..n)
2217 2238 - fewer heads than before: -1-removed heads (-2..-n)
2218 2239 - number of heads stays the same: 1
2219 2240 """
2220 2241 def csmap(x):
2221 2242 self.ui.debug("add changeset %s\n" % short(x))
2222 2243 return len(cl)
2223 2244
2224 2245 def revmap(x):
2225 2246 return cl.rev(x)
2226 2247
2227 2248 if not source:
2228 2249 return 0
2229 2250
2230 2251 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2231 2252
2232 2253 changesets = files = revisions = 0
2233 2254 efiles = set()
2234 2255
2235 2256 # write changelog data to temp files so concurrent readers will not see
2236 2257 # inconsistent view
2237 2258 cl = self.changelog
2238 2259 cl.delayupdate()
2239 2260 oldheads = cl.heads()
2240 2261
2241 2262 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2242 2263 try:
2243 2264 trp = weakref.proxy(tr)
2244 2265 # pull off the changeset group
2245 2266 self.ui.status(_("adding changesets\n"))
2246 2267 clstart = len(cl)
2247 2268 class prog(object):
2248 2269 step = _('changesets')
2249 2270 count = 1
2250 2271 ui = self.ui
2251 2272 total = None
2252 2273 def __call__(self):
2253 2274 self.ui.progress(self.step, self.count, unit=_('chunks'),
2254 2275 total=self.total)
2255 2276 self.count += 1
2256 2277 pr = prog()
2257 2278 source.callback = pr
2258 2279
2259 2280 source.changelogheader()
2260 2281 srccontent = cl.addgroup(source, csmap, trp)
2261 2282 if not (srccontent or emptyok):
2262 2283 raise util.Abort(_("received changelog group is empty"))
2263 2284 clend = len(cl)
2264 2285 changesets = clend - clstart
2265 2286 for c in xrange(clstart, clend):
2266 2287 efiles.update(self[c].files())
2267 2288 efiles = len(efiles)
2268 2289 self.ui.progress(_('changesets'), None)
2269 2290
2270 2291 # pull off the manifest group
2271 2292 self.ui.status(_("adding manifests\n"))
2272 2293 pr.step = _('manifests')
2273 2294 pr.count = 1
2274 2295 pr.total = changesets # manifests <= changesets
2275 2296 # no need to check for empty manifest group here:
2276 2297 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2277 2298 # no new manifest will be created and the manifest group will
2278 2299 # be empty during the pull
2279 2300 source.manifestheader()
2280 2301 self.manifest.addgroup(source, revmap, trp)
2281 2302 self.ui.progress(_('manifests'), None)
2282 2303
2283 2304 needfiles = {}
2284 2305 if self.ui.configbool('server', 'validate', default=False):
2285 2306 # validate incoming csets have their manifests
2286 2307 for cset in xrange(clstart, clend):
2287 2308 mfest = self.changelog.read(self.changelog.node(cset))[0]
2288 2309 mfest = self.manifest.readdelta(mfest)
2289 2310 # store file nodes we must see
2290 2311 for f, n in mfest.iteritems():
2291 2312 needfiles.setdefault(f, set()).add(n)
2292 2313
2293 2314 # process the files
2294 2315 self.ui.status(_("adding file changes\n"))
2295 2316 pr.step = _('files')
2296 2317 pr.count = 1
2297 2318 pr.total = efiles
2298 2319 source.callback = None
2299 2320
2300 2321 while True:
2301 2322 chunkdata = source.filelogheader()
2302 2323 if not chunkdata:
2303 2324 break
2304 2325 f = chunkdata["filename"]
2305 2326 self.ui.debug("adding %s revisions\n" % f)
2306 2327 pr()
2307 2328 fl = self.file(f)
2308 2329 o = len(fl)
2309 2330 if not fl.addgroup(source, revmap, trp):
2310 2331 raise util.Abort(_("received file revlog group is empty"))
2311 2332 revisions += len(fl) - o
2312 2333 files += 1
2313 2334 if f in needfiles:
2314 2335 needs = needfiles[f]
2315 2336 for new in xrange(o, len(fl)):
2316 2337 n = fl.node(new)
2317 2338 if n in needs:
2318 2339 needs.remove(n)
2319 2340 else:
2320 2341 raise util.Abort(
2321 2342 _("received spurious file revlog entry"))
2322 2343 if not needs:
2323 2344 del needfiles[f]
2324 2345 self.ui.progress(_('files'), None)
2325 2346
2326 2347 for f, needs in needfiles.iteritems():
2327 2348 fl = self.file(f)
2328 2349 for n in needs:
2329 2350 try:
2330 2351 fl.rev(n)
2331 2352 except error.LookupError:
2332 2353 raise util.Abort(
2333 2354 _('missing file data for %s:%s - run hg verify') %
2334 2355 (f, hex(n)))
2335 2356
2336 2357 dh = 0
2337 2358 if oldheads:
2338 2359 heads = cl.heads()
2339 2360 dh = len(heads) - len(oldheads)
2340 2361 for h in heads:
2341 2362 if h not in oldheads and self[h].closesbranch():
2342 2363 dh -= 1
2343 2364 htext = ""
2344 2365 if dh:
2345 2366 htext = _(" (%+d heads)") % dh
2346 2367
2347 2368 self.ui.status(_("added %d changesets"
2348 2369 " with %d changes to %d files%s\n")
2349 2370 % (changesets, revisions, files, htext))
2350 2371 self.invalidatevolatilesets()
2351 2372
2352 2373 if changesets > 0:
2353 2374 p = lambda: cl.writepending() and self.root or ""
2354 2375 self.hook('pretxnchangegroup', throw=True,
2355 2376 node=hex(cl.node(clstart)), source=srctype,
2356 2377 url=url, pending=p)
2357 2378
2358 2379 added = [cl.node(r) for r in xrange(clstart, clend)]
2359 2380 publishing = self.ui.configbool('phases', 'publish', True)
2360 2381 if srctype == 'push':
2361 2382 # Old server can not push the boundary themself.
2362 2383 # New server won't push the boundary if changeset already
2363 2384 # existed locally as secrete
2364 2385 #
2365 2386 # We should not use added here but the list of all change in
2366 2387 # the bundle
2367 2388 if publishing:
2368 2389 phases.advanceboundary(self, phases.public, srccontent)
2369 2390 else:
2370 2391 phases.advanceboundary(self, phases.draft, srccontent)
2371 2392 phases.retractboundary(self, phases.draft, added)
2372 2393 elif srctype != 'strip':
2373 2394 # publishing only alter behavior during push
2374 2395 #
2375 2396 # strip should not touch boundary at all
2376 2397 phases.retractboundary(self, phases.draft, added)
2377 2398
2378 2399 # make changelog see real files again
2379 2400 cl.finalize(trp)
2380 2401
2381 2402 tr.close()
2382 2403
2383 2404 if changesets > 0:
2384 2405 if srctype != 'strip':
2385 2406 # During strip, branchcache is invalid but coming call to
2386 2407 # `destroyed` will repair it.
2387 2408 # In other case we can safely update cache on disk.
2388 2409 branchmap.updatecache(self.filtered('served'))
2389 2410 def runhooks():
2390 2411 # forcefully update the on-disk branch cache
2391 2412 self.ui.debug("updating the branch cache\n")
2392 2413 self.hook("changegroup", node=hex(cl.node(clstart)),
2393 2414 source=srctype, url=url)
2394 2415
2395 2416 for n in added:
2396 2417 self.hook("incoming", node=hex(n), source=srctype,
2397 2418 url=url)
2398 2419
2399 2420 newheads = [h for h in self.heads() if h not in oldheads]
2400 2421 self.ui.log("incoming",
2401 2422 "%s incoming changes - new heads: %s\n",
2402 2423 len(added),
2403 2424 ', '.join([hex(c[:6]) for c in newheads]))
2404 2425 self._afterlock(runhooks)
2405 2426
2406 2427 finally:
2407 2428 tr.release()
2408 2429 # never return 0 here:
2409 2430 if dh < 0:
2410 2431 return dh - 1
2411 2432 else:
2412 2433 return dh + 1
2413 2434
2414 2435 def stream_in(self, remote, requirements):
2415 2436 lock = self.lock()
2416 2437 try:
2417 2438 # Save remote branchmap. We will use it later
2418 2439 # to speed up branchcache creation
2419 2440 rbranchmap = None
2420 2441 if remote.capable("branchmap"):
2421 2442 rbranchmap = remote.branchmap()
2422 2443
2423 2444 fp = remote.stream_out()
2424 2445 l = fp.readline()
2425 2446 try:
2426 2447 resp = int(l)
2427 2448 except ValueError:
2428 2449 raise error.ResponseError(
2429 2450 _('unexpected response from remote server:'), l)
2430 2451 if resp == 1:
2431 2452 raise util.Abort(_('operation forbidden by server'))
2432 2453 elif resp == 2:
2433 2454 raise util.Abort(_('locking the remote repository failed'))
2434 2455 elif resp != 0:
2435 2456 raise util.Abort(_('the server sent an unknown error code'))
2436 2457 self.ui.status(_('streaming all changes\n'))
2437 2458 l = fp.readline()
2438 2459 try:
2439 2460 total_files, total_bytes = map(int, l.split(' ', 1))
2440 2461 except (ValueError, TypeError):
2441 2462 raise error.ResponseError(
2442 2463 _('unexpected response from remote server:'), l)
2443 2464 self.ui.status(_('%d files to transfer, %s of data\n') %
2444 2465 (total_files, util.bytecount(total_bytes)))
2445 2466 handled_bytes = 0
2446 2467 self.ui.progress(_('clone'), 0, total=total_bytes)
2447 2468 start = time.time()
2448 2469 for i in xrange(total_files):
2449 2470 # XXX doesn't support '\n' or '\r' in filenames
2450 2471 l = fp.readline()
2451 2472 try:
2452 2473 name, size = l.split('\0', 1)
2453 2474 size = int(size)
2454 2475 except (ValueError, TypeError):
2455 2476 raise error.ResponseError(
2456 2477 _('unexpected response from remote server:'), l)
2457 2478 if self.ui.debugflag:
2458 2479 self.ui.debug('adding %s (%s)\n' %
2459 2480 (name, util.bytecount(size)))
2460 2481 # for backwards compat, name was partially encoded
2461 2482 ofp = self.sopener(store.decodedir(name), 'w')
2462 2483 for chunk in util.filechunkiter(fp, limit=size):
2463 2484 handled_bytes += len(chunk)
2464 2485 self.ui.progress(_('clone'), handled_bytes,
2465 2486 total=total_bytes)
2466 2487 ofp.write(chunk)
2467 2488 ofp.close()
2468 2489 elapsed = time.time() - start
2469 2490 if elapsed <= 0:
2470 2491 elapsed = 0.001
2471 2492 self.ui.progress(_('clone'), None)
2472 2493 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2473 2494 (util.bytecount(total_bytes), elapsed,
2474 2495 util.bytecount(total_bytes / elapsed)))
2475 2496
2476 2497 # new requirements = old non-format requirements +
2477 2498 # new format-related
2478 2499 # requirements from the streamed-in repository
2479 2500 requirements.update(set(self.requirements) - self.supportedformats)
2480 2501 self._applyrequirements(requirements)
2481 2502 self._writerequirements()
2482 2503
2483 2504 if rbranchmap:
2484 2505 rbheads = []
2485 2506 for bheads in rbranchmap.itervalues():
2486 2507 rbheads.extend(bheads)
2487 2508
2488 2509 if rbheads:
2489 2510 rtiprev = max((int(self.changelog.rev(node))
2490 2511 for node in rbheads))
2491 2512 cache = branchmap.branchcache(rbranchmap,
2492 2513 self[rtiprev].node(),
2493 2514 rtiprev)
2494 2515 # Try to stick it as low as possible
2495 2516 # filter above served are unlikely to be fetch from a clone
2496 2517 for candidate in ('base', 'immutable', 'served'):
2497 2518 rview = self.filtered(candidate)
2498 2519 if cache.validfor(rview):
2499 2520 self._branchcaches[candidate] = cache
2500 2521 cache.write(rview)
2501 2522 break
2502 2523 self.invalidate()
2503 2524 return len(self.heads()) + 1
2504 2525 finally:
2505 2526 lock.release()
2506 2527
2507 2528 def clone(self, remote, heads=[], stream=False):
2508 2529 '''clone remote repository.
2509 2530
2510 2531 keyword arguments:
2511 2532 heads: list of revs to clone (forces use of pull)
2512 2533 stream: use streaming clone if possible'''
2513 2534
2514 2535 # now, all clients that can request uncompressed clones can
2515 2536 # read repo formats supported by all servers that can serve
2516 2537 # them.
2517 2538
2518 2539 # if revlog format changes, client will have to check version
2519 2540 # and format flags on "stream" capability, and use
2520 2541 # uncompressed only if compatible.
2521 2542
2522 2543 if not stream:
2523 2544 # if the server explicitly prefers to stream (for fast LANs)
2524 2545 stream = remote.capable('stream-preferred')
2525 2546
2526 2547 if stream and not heads:
2527 2548 # 'stream' means remote revlog format is revlogv1 only
2528 2549 if remote.capable('stream'):
2529 2550 return self.stream_in(remote, set(('revlogv1',)))
2530 2551 # otherwise, 'streamreqs' contains the remote revlog format
2531 2552 streamreqs = remote.capable('streamreqs')
2532 2553 if streamreqs:
2533 2554 streamreqs = set(streamreqs.split(','))
2534 2555 # if we support it, stream in and adjust our requirements
2535 2556 if not streamreqs - self.supportedformats:
2536 2557 return self.stream_in(remote, streamreqs)
2537 2558 return self.pull(remote, heads)
2538 2559
2539 2560 def pushkey(self, namespace, key, old, new):
2540 2561 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2541 2562 old=old, new=new)
2542 2563 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2543 2564 ret = pushkey.push(self, namespace, key, old, new)
2544 2565 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2545 2566 ret=ret)
2546 2567 return ret
2547 2568
2548 2569 def listkeys(self, namespace):
2549 2570 self.hook('prelistkeys', throw=True, namespace=namespace)
2550 2571 self.ui.debug('listing keys for "%s"\n' % namespace)
2551 2572 values = pushkey.list(self, namespace)
2552 2573 self.hook('listkeys', namespace=namespace, values=values)
2553 2574 return values
2554 2575
2555 2576 def debugwireargs(self, one, two, three=None, four=None, five=None):
2556 2577 '''used to test argument passing over the wire'''
2557 2578 return "%s %s %s %s %s" % (one, two, three, four, five)
2558 2579
2559 2580 def savecommitmessage(self, text):
2560 2581 fp = self.opener('last-message.txt', 'wb')
2561 2582 try:
2562 2583 fp.write(text)
2563 2584 finally:
2564 2585 fp.close()
2565 2586 return self.pathto(fp.name[len(self.root) + 1:])
2566 2587
2567 2588 # used to avoid circular references so destructors work
2568 2589 def aftertrans(files):
2569 2590 renamefiles = [tuple(t) for t in files]
2570 2591 def a():
2571 2592 for vfs, src, dest in renamefiles:
2572 2593 try:
2573 2594 vfs.rename(src, dest)
2574 2595 except OSError: # journal file does not yet exist
2575 2596 pass
2576 2597 return a
2577 2598
2578 2599 def undoname(fn):
2579 2600 base, name = os.path.split(fn)
2580 2601 assert name.startswith('journal')
2581 2602 return os.path.join(base, name.replace('journal', 'undo', 1))
2582 2603
2583 2604 def instance(ui, path, create):
2584 2605 return localrepository(ui, util.urllocalpath(path), create)
2585 2606
2586 2607 def islocal(path):
2587 2608 return True
@@ -1,1066 +1,1104
1 1 $ "$TESTDIR/hghave" killdaemons || exit 80
2 2
3 3 $ cat >> $HGRCPATH <<EOF
4 4 > [extensions]
5 5 > graphlog=
6 6 > EOF
7 7 $ hgph() { hg log -G --template "{rev} {phase} {desc} - {node|short}\n" $*; }
8 8
9 9 $ mkcommit() {
10 10 > echo "$1" > "$1"
11 11 > hg add "$1"
12 12 > message="$1"
13 13 > shift
14 14 > hg ci -m "$message" $*
15 15 > }
16 16
17 17 $ hg init alpha
18 18 $ cd alpha
19 19 $ mkcommit a-A
20 20 $ mkcommit a-B
21 21 $ mkcommit a-C
22 22 $ mkcommit a-D
23 23 $ hgph
24 24 @ 3 draft a-D - b555f63b6063
25 25 |
26 26 o 2 draft a-C - 54acac6f23ab
27 27 |
28 28 o 1 draft a-B - 548a3d25dbf0
29 29 |
30 30 o 0 draft a-A - 054250a37db4
31 31
32 32
33 33 $ hg init ../beta
34 34 $ hg push -r 1 ../beta
35 35 pushing to ../beta
36 36 searching for changes
37 37 adding changesets
38 38 adding manifests
39 39 adding file changes
40 40 added 2 changesets with 2 changes to 2 files
41 41 $ hgph
42 42 @ 3 draft a-D - b555f63b6063
43 43 |
44 44 o 2 draft a-C - 54acac6f23ab
45 45 |
46 46 o 1 public a-B - 548a3d25dbf0
47 47 |
48 48 o 0 public a-A - 054250a37db4
49 49
50 50
51 51 $ cd ../beta
52 52 $ hgph
53 53 o 1 public a-B - 548a3d25dbf0
54 54 |
55 55 o 0 public a-A - 054250a37db4
56 56
57 57 $ hg up -q
58 58 $ mkcommit b-A
59 59 $ hgph
60 60 @ 2 draft b-A - f54f1bb90ff3
61 61 |
62 62 o 1 public a-B - 548a3d25dbf0
63 63 |
64 64 o 0 public a-A - 054250a37db4
65 65
66 66 $ hg pull ../alpha
67 67 pulling from ../alpha
68 68 searching for changes
69 69 adding changesets
70 70 adding manifests
71 71 adding file changes
72 72 added 2 changesets with 2 changes to 2 files (+1 heads)
73 73 (run 'hg heads' to see heads, 'hg merge' to merge)
74 74 $ hgph
75 75 o 4 public a-D - b555f63b6063
76 76 |
77 77 o 3 public a-C - 54acac6f23ab
78 78 |
79 79 | @ 2 draft b-A - f54f1bb90ff3
80 80 |/
81 81 o 1 public a-B - 548a3d25dbf0
82 82 |
83 83 o 0 public a-A - 054250a37db4
84 84
85 85
86 86 pull did not updated ../alpha state.
87 87 push from alpha to beta should update phase even if nothing is transferred
88 88
89 89 $ cd ../alpha
90 90 $ hgph # not updated by remote pull
91 91 @ 3 draft a-D - b555f63b6063
92 92 |
93 93 o 2 draft a-C - 54acac6f23ab
94 94 |
95 95 o 1 public a-B - 548a3d25dbf0
96 96 |
97 97 o 0 public a-A - 054250a37db4
98 98
99 99 $ hg push ../beta
100 100 pushing to ../beta
101 101 searching for changes
102 102 no changes found
103 103 [1]
104 104 $ hgph
105 105 @ 3 public a-D - b555f63b6063
106 106 |
107 107 o 2 public a-C - 54acac6f23ab
108 108 |
109 109 o 1 public a-B - 548a3d25dbf0
110 110 |
111 111 o 0 public a-A - 054250a37db4
112 112
113 113
114 114 update must update phase of common changeset too
115 115
116 116 $ hg pull ../beta # getting b-A
117 117 pulling from ../beta
118 118 searching for changes
119 119 adding changesets
120 120 adding manifests
121 121 adding file changes
122 122 added 1 changesets with 1 changes to 1 files (+1 heads)
123 123 (run 'hg heads' to see heads, 'hg merge' to merge)
124 124
125 125 $ cd ../beta
126 126 $ hgph # not updated by remote pull
127 127 o 4 public a-D - b555f63b6063
128 128 |
129 129 o 3 public a-C - 54acac6f23ab
130 130 |
131 131 | @ 2 draft b-A - f54f1bb90ff3
132 132 |/
133 133 o 1 public a-B - 548a3d25dbf0
134 134 |
135 135 o 0 public a-A - 054250a37db4
136 136
137 137 $ hg pull ../alpha
138 138 pulling from ../alpha
139 139 searching for changes
140 140 no changes found
141 141 $ hgph
142 142 o 4 public a-D - b555f63b6063
143 143 |
144 144 o 3 public a-C - 54acac6f23ab
145 145 |
146 146 | @ 2 public b-A - f54f1bb90ff3
147 147 |/
148 148 o 1 public a-B - 548a3d25dbf0
149 149 |
150 150 o 0 public a-A - 054250a37db4
151 151
152 152
153 153 Publish configuration option
154 154 ----------------------------
155 155
156 156 Pull
157 157 ````
158 158
159 159 changegroup are added without phase movement
160 160
161 161 $ hg bundle -a ../base.bundle
162 162 5 changesets found
163 163 $ cd ..
164 164 $ hg init mu
165 165 $ cd mu
166 166 $ cat > .hg/hgrc << EOF
167 167 > [phases]
168 168 > publish=0
169 169 > EOF
170 170 $ hg unbundle ../base.bundle
171 171 adding changesets
172 172 adding manifests
173 173 adding file changes
174 174 added 5 changesets with 5 changes to 5 files (+1 heads)
175 175 (run 'hg heads' to see heads, 'hg merge' to merge)
176 176 $ hgph
177 177 o 4 draft a-D - b555f63b6063
178 178 |
179 179 o 3 draft a-C - 54acac6f23ab
180 180 |
181 181 | o 2 draft b-A - f54f1bb90ff3
182 182 |/
183 183 o 1 draft a-B - 548a3d25dbf0
184 184 |
185 185 o 0 draft a-A - 054250a37db4
186 186
187 187 $ cd ..
188 188
189 189 Pulling from publish=False to publish=False does not move boundary.
190 190
191 191 $ hg init nu
192 192 $ cd nu
193 193 $ cat > .hg/hgrc << EOF
194 194 > [phases]
195 195 > publish=0
196 196 > EOF
197 197 $ hg pull ../mu -r 54acac6f23ab
198 198 pulling from ../mu
199 199 adding changesets
200 200 adding manifests
201 201 adding file changes
202 202 added 3 changesets with 3 changes to 3 files
203 203 (run 'hg update' to get a working copy)
204 204 $ hgph
205 205 o 2 draft a-C - 54acac6f23ab
206 206 |
207 207 o 1 draft a-B - 548a3d25dbf0
208 208 |
209 209 o 0 draft a-A - 054250a37db4
210 210
211 211
212 212 Even for common
213 213
214 214 $ hg pull ../mu -r f54f1bb90ff3
215 215 pulling from ../mu
216 216 searching for changes
217 217 adding changesets
218 218 adding manifests
219 219 adding file changes
220 220 added 1 changesets with 1 changes to 1 files (+1 heads)
221 221 (run 'hg heads' to see heads, 'hg merge' to merge)
222 222 $ hgph
223 223 o 3 draft b-A - f54f1bb90ff3
224 224 |
225 225 | o 2 draft a-C - 54acac6f23ab
226 226 |/
227 227 o 1 draft a-B - 548a3d25dbf0
228 228 |
229 229 o 0 draft a-A - 054250a37db4
230 230
231 231
232 232
233 233 Pulling from Publish=True to Publish=False move boundary in common set.
234 234 we are in nu
235 235
236 236 $ hg pull ../alpha -r b555f63b6063
237 237 pulling from ../alpha
238 238 searching for changes
239 239 adding changesets
240 240 adding manifests
241 241 adding file changes
242 242 added 1 changesets with 1 changes to 1 files
243 243 (run 'hg update' to get a working copy)
244 244 $ hgph # f54f1bb90ff3 stay draft, not ancestor of -r
245 245 o 4 public a-D - b555f63b6063
246 246 |
247 247 | o 3 draft b-A - f54f1bb90ff3
248 248 | |
249 249 o | 2 public a-C - 54acac6f23ab
250 250 |/
251 251 o 1 public a-B - 548a3d25dbf0
252 252 |
253 253 o 0 public a-A - 054250a37db4
254 254
255 255
256 256 pulling from Publish=False to publish=False with some public
257 257
258 258 $ hg up -q f54f1bb90ff3
259 259 $ mkcommit n-A
260 260 $ mkcommit n-B
261 261 $ hgph
262 262 @ 6 draft n-B - 145e75495359
263 263 |
264 264 o 5 draft n-A - d6bcb4f74035
265 265 |
266 266 | o 4 public a-D - b555f63b6063
267 267 | |
268 268 o | 3 draft b-A - f54f1bb90ff3
269 269 | |
270 270 | o 2 public a-C - 54acac6f23ab
271 271 |/
272 272 o 1 public a-B - 548a3d25dbf0
273 273 |
274 274 o 0 public a-A - 054250a37db4
275 275
276 276 $ cd ../mu
277 277 $ hg pull ../nu
278 278 pulling from ../nu
279 279 searching for changes
280 280 adding changesets
281 281 adding manifests
282 282 adding file changes
283 283 added 2 changesets with 2 changes to 2 files
284 284 (run 'hg update' to get a working copy)
285 285 $ hgph
286 286 o 6 draft n-B - 145e75495359
287 287 |
288 288 o 5 draft n-A - d6bcb4f74035
289 289 |
290 290 | o 4 public a-D - b555f63b6063
291 291 | |
292 292 | o 3 public a-C - 54acac6f23ab
293 293 | |
294 294 o | 2 draft b-A - f54f1bb90ff3
295 295 |/
296 296 o 1 public a-B - 548a3d25dbf0
297 297 |
298 298 o 0 public a-A - 054250a37db4
299 299
300 300 $ cd ..
301 301
302 302 pulling into publish=True
303 303
304 304 $ cd alpha
305 305 $ hgph
306 306 o 4 public b-A - f54f1bb90ff3
307 307 |
308 308 | @ 3 public a-D - b555f63b6063
309 309 | |
310 310 | o 2 public a-C - 54acac6f23ab
311 311 |/
312 312 o 1 public a-B - 548a3d25dbf0
313 313 |
314 314 o 0 public a-A - 054250a37db4
315 315
316 316 $ hg pull ../mu
317 317 pulling from ../mu
318 318 searching for changes
319 319 adding changesets
320 320 adding manifests
321 321 adding file changes
322 322 added 2 changesets with 2 changes to 2 files
323 323 (run 'hg update' to get a working copy)
324 324 $ hgph
325 325 o 6 draft n-B - 145e75495359
326 326 |
327 327 o 5 draft n-A - d6bcb4f74035
328 328 |
329 329 o 4 public b-A - f54f1bb90ff3
330 330 |
331 331 | @ 3 public a-D - b555f63b6063
332 332 | |
333 333 | o 2 public a-C - 54acac6f23ab
334 334 |/
335 335 o 1 public a-B - 548a3d25dbf0
336 336 |
337 337 o 0 public a-A - 054250a37db4
338 338
339 339 $ cd ..
340 340
341 341 pulling back into original repo
342 342
343 343 $ cd nu
344 344 $ hg pull ../alpha
345 345 pulling from ../alpha
346 346 searching for changes
347 347 no changes found
348 348 $ hgph
349 349 @ 6 public n-B - 145e75495359
350 350 |
351 351 o 5 public n-A - d6bcb4f74035
352 352 |
353 353 | o 4 public a-D - b555f63b6063
354 354 | |
355 355 o | 3 public b-A - f54f1bb90ff3
356 356 | |
357 357 | o 2 public a-C - 54acac6f23ab
358 358 |/
359 359 o 1 public a-B - 548a3d25dbf0
360 360 |
361 361 o 0 public a-A - 054250a37db4
362 362
363 363
364 364 Push
365 365 ````
366 366
367 367 (inserted)
368 368
369 369 Test that phase are pushed even when they are nothing to pus
370 370 (this might be tested later bu are very convenient to not alter too much test)
371 371
372 372 Push back to alpha
373 373
374 374 $ hg push ../alpha # from nu
375 375 pushing to ../alpha
376 376 searching for changes
377 377 no changes found
378 378 [1]
379 379 $ cd ..
380 380 $ cd alpha
381 381 $ hgph
382 382 o 6 public n-B - 145e75495359
383 383 |
384 384 o 5 public n-A - d6bcb4f74035
385 385 |
386 386 o 4 public b-A - f54f1bb90ff3
387 387 |
388 388 | @ 3 public a-D - b555f63b6063
389 389 | |
390 390 | o 2 public a-C - 54acac6f23ab
391 391 |/
392 392 o 1 public a-B - 548a3d25dbf0
393 393 |
394 394 o 0 public a-A - 054250a37db4
395 395
396 396
397 397 (end insertion)
398 398
399 399
400 400 initial setup
401 401
402 402 $ hg glog # of alpha
403 403 o changeset: 6:145e75495359
404 404 | tag: tip
405 405 | user: test
406 406 | date: Thu Jan 01 00:00:00 1970 +0000
407 407 | summary: n-B
408 408 |
409 409 o changeset: 5:d6bcb4f74035
410 410 | user: test
411 411 | date: Thu Jan 01 00:00:00 1970 +0000
412 412 | summary: n-A
413 413 |
414 414 o changeset: 4:f54f1bb90ff3
415 415 | parent: 1:548a3d25dbf0
416 416 | user: test
417 417 | date: Thu Jan 01 00:00:00 1970 +0000
418 418 | summary: b-A
419 419 |
420 420 | @ changeset: 3:b555f63b6063
421 421 | | user: test
422 422 | | date: Thu Jan 01 00:00:00 1970 +0000
423 423 | | summary: a-D
424 424 | |
425 425 | o changeset: 2:54acac6f23ab
426 426 |/ user: test
427 427 | date: Thu Jan 01 00:00:00 1970 +0000
428 428 | summary: a-C
429 429 |
430 430 o changeset: 1:548a3d25dbf0
431 431 | user: test
432 432 | date: Thu Jan 01 00:00:00 1970 +0000
433 433 | summary: a-B
434 434 |
435 435 o changeset: 0:054250a37db4
436 436 user: test
437 437 date: Thu Jan 01 00:00:00 1970 +0000
438 438 summary: a-A
439 439
440 440 $ mkcommit a-E
441 441 $ mkcommit a-F
442 442 $ mkcommit a-G
443 443 $ hg up d6bcb4f74035 -q
444 444 $ mkcommit a-H
445 445 created new head
446 446 $ hgph
447 447 @ 10 draft a-H - 967b449fbc94
448 448 |
449 449 | o 9 draft a-G - 3e27b6f1eee1
450 450 | |
451 451 | o 8 draft a-F - b740e3e5c05d
452 452 | |
453 453 | o 7 draft a-E - e9f537e46dea
454 454 | |
455 455 +---o 6 public n-B - 145e75495359
456 456 | |
457 457 o | 5 public n-A - d6bcb4f74035
458 458 | |
459 459 o | 4 public b-A - f54f1bb90ff3
460 460 | |
461 461 | o 3 public a-D - b555f63b6063
462 462 | |
463 463 | o 2 public a-C - 54acac6f23ab
464 464 |/
465 465 o 1 public a-B - 548a3d25dbf0
466 466 |
467 467 o 0 public a-A - 054250a37db4
468 468
469 469
470 470 Pulling from bundle does not alter phases of changeset not present in the bundle
471 471
472 472 $ hg bundle --base 1 -r 6 -r 3 ../partial-bundle.hg
473 473 5 changesets found
474 474 $ hg pull ../partial-bundle.hg
475 475 pulling from ../partial-bundle.hg
476 476 searching for changes
477 477 no changes found
478 478 $ hgph
479 479 @ 10 draft a-H - 967b449fbc94
480 480 |
481 481 | o 9 draft a-G - 3e27b6f1eee1
482 482 | |
483 483 | o 8 draft a-F - b740e3e5c05d
484 484 | |
485 485 | o 7 draft a-E - e9f537e46dea
486 486 | |
487 487 +---o 6 public n-B - 145e75495359
488 488 | |
489 489 o | 5 public n-A - d6bcb4f74035
490 490 | |
491 491 o | 4 public b-A - f54f1bb90ff3
492 492 | |
493 493 | o 3 public a-D - b555f63b6063
494 494 | |
495 495 | o 2 public a-C - 54acac6f23ab
496 496 |/
497 497 o 1 public a-B - 548a3d25dbf0
498 498 |
499 499 o 0 public a-A - 054250a37db4
500 500
501 501
502 502 Pushing to Publish=False (unknown changeset)
503 503
504 504 $ hg push ../mu -r b740e3e5c05d # a-F
505 505 pushing to ../mu
506 506 searching for changes
507 507 adding changesets
508 508 adding manifests
509 509 adding file changes
510 510 added 2 changesets with 2 changes to 2 files
511 511 $ hgph
512 512 @ 10 draft a-H - 967b449fbc94
513 513 |
514 514 | o 9 draft a-G - 3e27b6f1eee1
515 515 | |
516 516 | o 8 draft a-F - b740e3e5c05d
517 517 | |
518 518 | o 7 draft a-E - e9f537e46dea
519 519 | |
520 520 +---o 6 public n-B - 145e75495359
521 521 | |
522 522 o | 5 public n-A - d6bcb4f74035
523 523 | |
524 524 o | 4 public b-A - f54f1bb90ff3
525 525 | |
526 526 | o 3 public a-D - b555f63b6063
527 527 | |
528 528 | o 2 public a-C - 54acac6f23ab
529 529 |/
530 530 o 1 public a-B - 548a3d25dbf0
531 531 |
532 532 o 0 public a-A - 054250a37db4
533 533
534 534
535 535 $ cd ../mu
536 536 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
537 537 > # not ancestor of -r
538 538 o 8 draft a-F - b740e3e5c05d
539 539 |
540 540 o 7 draft a-E - e9f537e46dea
541 541 |
542 542 | o 6 draft n-B - 145e75495359
543 543 | |
544 544 | o 5 draft n-A - d6bcb4f74035
545 545 | |
546 546 o | 4 public a-D - b555f63b6063
547 547 | |
548 548 o | 3 public a-C - 54acac6f23ab
549 549 | |
550 550 | o 2 draft b-A - f54f1bb90ff3
551 551 |/
552 552 o 1 public a-B - 548a3d25dbf0
553 553 |
554 554 o 0 public a-A - 054250a37db4
555 555
556 556
557 557 Pushing to Publish=True (unknown changeset)
558 558
559 559 $ hg push ../beta -r b740e3e5c05d
560 560 pushing to ../beta
561 561 searching for changes
562 562 adding changesets
563 563 adding manifests
564 564 adding file changes
565 565 added 2 changesets with 2 changes to 2 files
566 566 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
567 567 > # not ancestor of -r
568 568 o 8 public a-F - b740e3e5c05d
569 569 |
570 570 o 7 public a-E - e9f537e46dea
571 571 |
572 572 | o 6 draft n-B - 145e75495359
573 573 | |
574 574 | o 5 draft n-A - d6bcb4f74035
575 575 | |
576 576 o | 4 public a-D - b555f63b6063
577 577 | |
578 578 o | 3 public a-C - 54acac6f23ab
579 579 | |
580 580 | o 2 draft b-A - f54f1bb90ff3
581 581 |/
582 582 o 1 public a-B - 548a3d25dbf0
583 583 |
584 584 o 0 public a-A - 054250a37db4
585 585
586 586
587 587 Pushing to Publish=True (common changeset)
588 588
589 589 $ cd ../beta
590 590 $ hg push ../alpha
591 591 pushing to ../alpha
592 592 searching for changes
593 593 no changes found
594 594 [1]
595 595 $ hgph
596 596 o 6 public a-F - b740e3e5c05d
597 597 |
598 598 o 5 public a-E - e9f537e46dea
599 599 |
600 600 o 4 public a-D - b555f63b6063
601 601 |
602 602 o 3 public a-C - 54acac6f23ab
603 603 |
604 604 | @ 2 public b-A - f54f1bb90ff3
605 605 |/
606 606 o 1 public a-B - 548a3d25dbf0
607 607 |
608 608 o 0 public a-A - 054250a37db4
609 609
610 610 $ cd ../alpha
611 611 $ hgph
612 612 @ 10 draft a-H - 967b449fbc94
613 613 |
614 614 | o 9 draft a-G - 3e27b6f1eee1
615 615 | |
616 616 | o 8 public a-F - b740e3e5c05d
617 617 | |
618 618 | o 7 public a-E - e9f537e46dea
619 619 | |
620 620 +---o 6 public n-B - 145e75495359
621 621 | |
622 622 o | 5 public n-A - d6bcb4f74035
623 623 | |
624 624 o | 4 public b-A - f54f1bb90ff3
625 625 | |
626 626 | o 3 public a-D - b555f63b6063
627 627 | |
628 628 | o 2 public a-C - 54acac6f23ab
629 629 |/
630 630 o 1 public a-B - 548a3d25dbf0
631 631 |
632 632 o 0 public a-A - 054250a37db4
633 633
634 634
635 635 Pushing to Publish=False (common changeset that change phase + unknown one)
636 636
637 637 $ hg push ../mu -r 967b449fbc94 -f
638 638 pushing to ../mu
639 639 searching for changes
640 640 adding changesets
641 641 adding manifests
642 642 adding file changes
643 643 added 1 changesets with 1 changes to 1 files (+1 heads)
644 644 $ hgph
645 645 @ 10 draft a-H - 967b449fbc94
646 646 |
647 647 | o 9 draft a-G - 3e27b6f1eee1
648 648 | |
649 649 | o 8 public a-F - b740e3e5c05d
650 650 | |
651 651 | o 7 public a-E - e9f537e46dea
652 652 | |
653 653 +---o 6 public n-B - 145e75495359
654 654 | |
655 655 o | 5 public n-A - d6bcb4f74035
656 656 | |
657 657 o | 4 public b-A - f54f1bb90ff3
658 658 | |
659 659 | o 3 public a-D - b555f63b6063
660 660 | |
661 661 | o 2 public a-C - 54acac6f23ab
662 662 |/
663 663 o 1 public a-B - 548a3d25dbf0
664 664 |
665 665 o 0 public a-A - 054250a37db4
666 666
667 667 $ cd ../mu
668 668 $ hgph # d6bcb4f74035 should have changed phase
669 669 > # 145e75495359 is still draft. not ancestor of -r
670 670 o 9 draft a-H - 967b449fbc94
671 671 |
672 672 | o 8 public a-F - b740e3e5c05d
673 673 | |
674 674 | o 7 public a-E - e9f537e46dea
675 675 | |
676 676 +---o 6 draft n-B - 145e75495359
677 677 | |
678 678 o | 5 public n-A - d6bcb4f74035
679 679 | |
680 680 | o 4 public a-D - b555f63b6063
681 681 | |
682 682 | o 3 public a-C - 54acac6f23ab
683 683 | |
684 684 o | 2 public b-A - f54f1bb90ff3
685 685 |/
686 686 o 1 public a-B - 548a3d25dbf0
687 687 |
688 688 o 0 public a-A - 054250a37db4
689 689
690 690
691 691
692 692 Pushing to Publish=True (common changeset from publish=False)
693 693
694 694 (in mu)
695 695 $ hg push ../alpha
696 696 pushing to ../alpha
697 697 searching for changes
698 698 no changes found
699 699 [1]
700 700 $ hgph
701 701 o 9 public a-H - 967b449fbc94
702 702 |
703 703 | o 8 public a-F - b740e3e5c05d
704 704 | |
705 705 | o 7 public a-E - e9f537e46dea
706 706 | |
707 707 +---o 6 public n-B - 145e75495359
708 708 | |
709 709 o | 5 public n-A - d6bcb4f74035
710 710 | |
711 711 | o 4 public a-D - b555f63b6063
712 712 | |
713 713 | o 3 public a-C - 54acac6f23ab
714 714 | |
715 715 o | 2 public b-A - f54f1bb90ff3
716 716 |/
717 717 o 1 public a-B - 548a3d25dbf0
718 718 |
719 719 o 0 public a-A - 054250a37db4
720 720
721 721 $ hgph -R ../alpha # a-H should have been synced to 0
722 722 @ 10 public a-H - 967b449fbc94
723 723 |
724 724 | o 9 draft a-G - 3e27b6f1eee1
725 725 | |
726 726 | o 8 public a-F - b740e3e5c05d
727 727 | |
728 728 | o 7 public a-E - e9f537e46dea
729 729 | |
730 730 +---o 6 public n-B - 145e75495359
731 731 | |
732 732 o | 5 public n-A - d6bcb4f74035
733 733 | |
734 734 o | 4 public b-A - f54f1bb90ff3
735 735 | |
736 736 | o 3 public a-D - b555f63b6063
737 737 | |
738 738 | o 2 public a-C - 54acac6f23ab
739 739 |/
740 740 o 1 public a-B - 548a3d25dbf0
741 741 |
742 742 o 0 public a-A - 054250a37db4
743 743
744 744
745 745
746 746 Discovery locally secret changeset on a remote repository:
747 747
748 748 - should make it non-secret
749 749
750 750 $ cd ../alpha
751 751 $ mkcommit A-secret --config phases.new-commit=2
752 752 $ hgph
753 753 @ 11 secret A-secret - 435b5d83910c
754 754 |
755 755 o 10 public a-H - 967b449fbc94
756 756 |
757 757 | o 9 draft a-G - 3e27b6f1eee1
758 758 | |
759 759 | o 8 public a-F - b740e3e5c05d
760 760 | |
761 761 | o 7 public a-E - e9f537e46dea
762 762 | |
763 763 +---o 6 public n-B - 145e75495359
764 764 | |
765 765 o | 5 public n-A - d6bcb4f74035
766 766 | |
767 767 o | 4 public b-A - f54f1bb90ff3
768 768 | |
769 769 | o 3 public a-D - b555f63b6063
770 770 | |
771 771 | o 2 public a-C - 54acac6f23ab
772 772 |/
773 773 o 1 public a-B - 548a3d25dbf0
774 774 |
775 775 o 0 public a-A - 054250a37db4
776 776
777 777 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
778 778 1 changesets found
779 779 $ hg -R ../mu unbundle ../secret-bundle.hg
780 780 adding changesets
781 781 adding manifests
782 782 adding file changes
783 783 added 1 changesets with 1 changes to 1 files
784 784 (run 'hg update' to get a working copy)
785 785 $ hgph -R ../mu
786 786 o 10 draft A-secret - 435b5d83910c
787 787 |
788 788 o 9 public a-H - 967b449fbc94
789 789 |
790 790 | o 8 public a-F - b740e3e5c05d
791 791 | |
792 792 | o 7 public a-E - e9f537e46dea
793 793 | |
794 794 +---o 6 public n-B - 145e75495359
795 795 | |
796 796 o | 5 public n-A - d6bcb4f74035
797 797 | |
798 798 | o 4 public a-D - b555f63b6063
799 799 | |
800 800 | o 3 public a-C - 54acac6f23ab
801 801 | |
802 802 o | 2 public b-A - f54f1bb90ff3
803 803 |/
804 804 o 1 public a-B - 548a3d25dbf0
805 805 |
806 806 o 0 public a-A - 054250a37db4
807 807
808 808 $ hg pull ../mu
809 809 pulling from ../mu
810 810 searching for changes
811 811 no changes found
812 812 $ hgph
813 813 @ 11 draft A-secret - 435b5d83910c
814 814 |
815 815 o 10 public a-H - 967b449fbc94
816 816 |
817 817 | o 9 draft a-G - 3e27b6f1eee1
818 818 | |
819 819 | o 8 public a-F - b740e3e5c05d
820 820 | |
821 821 | o 7 public a-E - e9f537e46dea
822 822 | |
823 823 +---o 6 public n-B - 145e75495359
824 824 | |
825 825 o | 5 public n-A - d6bcb4f74035
826 826 | |
827 827 o | 4 public b-A - f54f1bb90ff3
828 828 | |
829 829 | o 3 public a-D - b555f63b6063
830 830 | |
831 831 | o 2 public a-C - 54acac6f23ab
832 832 |/
833 833 o 1 public a-B - 548a3d25dbf0
834 834 |
835 835 o 0 public a-A - 054250a37db4
836 836
837 837
838 838 pushing a locally public and draft changesets remotly secret should make them
839 839 appear on the remote side.
840 840
841 841
842 842 $ hg -R ../mu phase --secret --force 967b449fbc94
843 843 $ hg push -r 435b5d83910c ../mu
844 844 pushing to ../mu
845 845 searching for changes
846 846 abort: push creates new remote head 435b5d83910c!
847 847 (did you forget to merge? use push -f to force)
848 848 [255]
849 849 $ hg push -fr 435b5d83910c ../mu # because the push will create new visible head
850 850 pushing to ../mu
851 851 searching for changes
852 852 adding changesets
853 853 adding manifests
854 854 adding file changes
855 855 added 0 changesets with 0 changes to 2 files
856 856 $ hgph -R ../mu
857 857 o 10 draft A-secret - 435b5d83910c
858 858 |
859 859 o 9 public a-H - 967b449fbc94
860 860 |
861 861 | o 8 public a-F - b740e3e5c05d
862 862 | |
863 863 | o 7 public a-E - e9f537e46dea
864 864 | |
865 865 +---o 6 public n-B - 145e75495359
866 866 | |
867 867 o | 5 public n-A - d6bcb4f74035
868 868 | |
869 869 | o 4 public a-D - b555f63b6063
870 870 | |
871 871 | o 3 public a-C - 54acac6f23ab
872 872 | |
873 873 o | 2 public b-A - f54f1bb90ff3
874 874 |/
875 875 o 1 public a-B - 548a3d25dbf0
876 876 |
877 877 o 0 public a-A - 054250a37db4
878 878
879 879
880 880 pull new changeset with common draft locally
881 881
882 882 $ hg up -q 967b449fbc94 # create a new root for draft
883 883 $ mkcommit 'alpha-more'
884 884 created new head
885 885 $ hg push -fr . ../mu
886 886 pushing to ../mu
887 887 searching for changes
888 888 adding changesets
889 889 adding manifests
890 890 adding file changes
891 891 added 1 changesets with 1 changes to 1 files (+1 heads)
892 892 $ cd ../mu
893 893 $ hg phase --secret --force 1c5cfd894796
894 894 $ hg up -q 435b5d83910c
895 895 $ mkcommit 'mu-more'
896 896 $ cd ../alpha
897 897 $ hg pull ../mu
898 898 pulling from ../mu
899 899 searching for changes
900 900 adding changesets
901 901 adding manifests
902 902 adding file changes
903 903 added 1 changesets with 1 changes to 1 files
904 904 (run 'hg update' to get a working copy)
905 905 $ hgph
906 906 o 13 draft mu-more - 5237fb433fc8
907 907 |
908 908 | @ 12 draft alpha-more - 1c5cfd894796
909 909 | |
910 910 o | 11 draft A-secret - 435b5d83910c
911 911 |/
912 912 o 10 public a-H - 967b449fbc94
913 913 |
914 914 | o 9 draft a-G - 3e27b6f1eee1
915 915 | |
916 916 | o 8 public a-F - b740e3e5c05d
917 917 | |
918 918 | o 7 public a-E - e9f537e46dea
919 919 | |
920 920 +---o 6 public n-B - 145e75495359
921 921 | |
922 922 o | 5 public n-A - d6bcb4f74035
923 923 | |
924 924 o | 4 public b-A - f54f1bb90ff3
925 925 | |
926 926 | o 3 public a-D - b555f63b6063
927 927 | |
928 928 | o 2 public a-C - 54acac6f23ab
929 929 |/
930 930 o 1 public a-B - 548a3d25dbf0
931 931 |
932 932 o 0 public a-A - 054250a37db4
933 933
934 934
935 935 Test that test are properly ignored on remote event when existing locally
936 936
937 937 $ cd ..
938 938 $ hg clone -qU -r b555f63b6063 -r f54f1bb90ff3 beta gamma
939 939
940 940 # pathological case are
941 941 #
942 942 # * secret remotely
943 943 # * known locally
944 944 # * repo have uncommon changeset
945 945
946 946 $ hg -R beta phase --secret --force f54f1bb90ff3
947 947 $ hg -R gamma phase --draft --force f54f1bb90ff3
948 948
949 949 $ cd gamma
950 950 $ hg pull ../beta
951 951 pulling from ../beta
952 952 searching for changes
953 953 adding changesets
954 954 adding manifests
955 955 adding file changes
956 956 added 2 changesets with 2 changes to 2 files
957 957 (run 'hg update' to get a working copy)
958 958 $ hg phase f54f1bb90ff3
959 959 2: draft
960 960
961 961 same over the wire
962 962
963 963 $ cd ../beta
964 964 $ hg serve -p $HGPORT -d --pid-file=../beta.pid -E ../beta-error.log
965 965 $ cat ../beta.pid >> $DAEMON_PIDS
966 966 $ cd ../gamma
967 967
968 968 $ hg pull http://localhost:$HGPORT/
969 969 pulling from http://localhost:$HGPORT/
970 970 searching for changes
971 971 no changes found
972 972 $ hg phase f54f1bb90ff3
973 973 2: draft
974 974
975 975 check that secret local on both side are not synced to public
976 976
977 977 $ hg push -r b555f63b6063 http://localhost:$HGPORT/
978 978 pushing to http://localhost:$HGPORT/
979 979 searching for changes
980 980 no changes found
981 981 [1]
982 982 $ hg phase f54f1bb90ff3
983 983 2: draft
984 984
985 985 put the changeset in the draft state again
986 986 (first test after this one expect to be able to copy)
987 987
988 988 $ cd ..
989 989
990 990
991 991 Test Clone behavior
992 992
993 993 A. Clone without secret changeset
994 994
995 995 1. cloning non-publishing repository
996 996 (Phase should be preserved)
997 997
998 998 # make sure there is no secret so we can use a copy clone
999 999
1000 1000 $ hg -R mu phase --draft 'secret()'
1001 1001
1002 1002 $ hg clone -U mu Tau
1003 1003 $ hgph -R Tau
1004 1004 o 12 draft mu-more - 5237fb433fc8
1005 1005 |
1006 1006 | o 11 draft alpha-more - 1c5cfd894796
1007 1007 | |
1008 1008 o | 10 draft A-secret - 435b5d83910c
1009 1009 |/
1010 1010 o 9 public a-H - 967b449fbc94
1011 1011 |
1012 1012 | o 8 public a-F - b740e3e5c05d
1013 1013 | |
1014 1014 | o 7 public a-E - e9f537e46dea
1015 1015 | |
1016 1016 +---o 6 public n-B - 145e75495359
1017 1017 | |
1018 1018 o | 5 public n-A - d6bcb4f74035
1019 1019 | |
1020 1020 | o 4 public a-D - b555f63b6063
1021 1021 | |
1022 1022 | o 3 public a-C - 54acac6f23ab
1023 1023 | |
1024 1024 o | 2 public b-A - f54f1bb90ff3
1025 1025 |/
1026 1026 o 1 public a-B - 548a3d25dbf0
1027 1027 |
1028 1028 o 0 public a-A - 054250a37db4
1029 1029
1030 1030
1031 1031 2. cloning publishing repository
1032 1032
1033 1033 (everything should be public)
1034 1034
1035 1035 $ hg clone -U alpha Upsilon
1036 1036 $ hgph -R Upsilon
1037 1037 o 13 public mu-more - 5237fb433fc8
1038 1038 |
1039 1039 | o 12 public alpha-more - 1c5cfd894796
1040 1040 | |
1041 1041 o | 11 public A-secret - 435b5d83910c
1042 1042 |/
1043 1043 o 10 public a-H - 967b449fbc94
1044 1044 |
1045 1045 | o 9 public a-G - 3e27b6f1eee1
1046 1046 | |
1047 1047 | o 8 public a-F - b740e3e5c05d
1048 1048 | |
1049 1049 | o 7 public a-E - e9f537e46dea
1050 1050 | |
1051 1051 +---o 6 public n-B - 145e75495359
1052 1052 | |
1053 1053 o | 5 public n-A - d6bcb4f74035
1054 1054 | |
1055 1055 o | 4 public b-A - f54f1bb90ff3
1056 1056 | |
1057 1057 | o 3 public a-D - b555f63b6063
1058 1058 | |
1059 1059 | o 2 public a-C - 54acac6f23ab
1060 1060 |/
1061 1061 o 1 public a-B - 548a3d25dbf0
1062 1062 |
1063 1063 o 0 public a-A - 054250a37db4
1064 1064
1065
1066 Pushing From an unlockable repo
1067 --------------------------------
1068 (issue3684)
1069
1070 Unability to lock the source repo should not prevent the push. It will prevent
1071 the retrieval of remote phase during push. For example, pushing to a publishing
1072 server won't turn changeset public.
1073
1074 1. Test that push is not prevented
1075
1076 $ hg init Phi
1077 $ cd Upsilon
1078 $ chmod -R -w .hg
1079 $ hg push ../Phi
1080 pushing to ../Phi
1081 searching for changes
1082 adding changesets
1083 adding manifests
1084 adding file changes
1085 added 14 changesets with 14 changes to 14 files (+3 heads)
1086 $ chmod -R +w .hg
1087
1088 2. Test that failed phases movement are reported
1089
1090 $ hg phase --force --draft 3
1091 $ chmod -R -w .hg
1092 $ hg push ../Phi
1093 pushing to ../Phi
1094 searching for changes
1095 no changes found
1096 cannot lock source repo, skipping local public phase update
1097 [1]
1098 $ chmod -R +w .hg
1099 $ hgph Upsilon
1100
1101 $ cd ..
1102
1065 1103 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
1066 1104
General Comments 0
You need to be logged in to leave comments. Login now