##// END OF EJS Templates
obsolete: mark unreachable extinct changesets as hidden...
Pierre-Yves.David@ens-lyon.org -
r17208:8018f234 default
parent child Browse files
Show More
@@ -1,2573 +1,2586
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 # hiddenrevs: revs that should be hidden by command and tools
135 #
136 # This set is carried on the repo to ease initialisation and lazy
137 # loading it'll probably move back to changelog for efficienty and
138 # consistency reason
139 self.hiddenrevs = set()
140 134 try:
141 135 self.ui.readconfig(self.join("hgrc"), self.root)
142 136 extensions.loadall(self.ui)
143 137 except IOError:
144 138 pass
145 139
146 140 if not self.vfs.isdir():
147 141 if create:
148 142 if not self.wvfs.exists():
149 143 self.wvfs.makedirs()
150 144 self.vfs.makedir(notindexed=True)
151 145 requirements = self._baserequirements(create)
152 146 if self.ui.configbool('format', 'usestore', True):
153 147 self.vfs.mkdir("store")
154 148 requirements.append("store")
155 149 if self.ui.configbool('format', 'usefncache', True):
156 150 requirements.append("fncache")
157 151 if self.ui.configbool('format', 'dotencode', True):
158 152 requirements.append('dotencode')
159 153 # create an invalid changelog
160 154 self.vfs.append(
161 155 "00changelog.i",
162 156 '\0\0\0\2' # represents revlogv2
163 157 ' dummy changelog to prevent using the old repo layout'
164 158 )
165 159 if self.ui.configbool('format', 'generaldelta', False):
166 160 requirements.append("generaldelta")
167 161 requirements = set(requirements)
168 162 else:
169 163 raise error.RepoError(_("repository %s not found") % path)
170 164 elif create:
171 165 raise error.RepoError(_("repository %s already exists") % path)
172 166 else:
173 167 try:
174 168 requirements = scmutil.readrequires(self.vfs, self.supported)
175 169 except IOError, inst:
176 170 if inst.errno != errno.ENOENT:
177 171 raise
178 172 requirements = set()
179 173
180 174 self.sharedpath = self.path
181 175 try:
182 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
183 177 if not os.path.exists(s):
184 178 raise error.RepoError(
185 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
186 180 self.sharedpath = s
187 181 except IOError, inst:
188 182 if inst.errno != errno.ENOENT:
189 183 raise
190 184
191 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
192 186 self.spath = self.store.path
193 187 self.sopener = self.store.opener
194 188 self.svfs = self.sopener
195 189 self.sjoin = self.store.join
196 190 self.opener.createmode = self.store.createmode
197 191 self._applyrequirements(requirements)
198 192 if create:
199 193 self._writerequirements()
200 194
201 195
202 196 self._branchcache = None
203 197 self._branchcachetip = None
204 198 self.filterpats = {}
205 199 self._datafilters = {}
206 200 self._transref = self._lockref = self._wlockref = None
207 201
208 202 # A cache for various files under .hg/ that tracks file changes,
209 203 # (used by the filecache decorator)
210 204 #
211 205 # Maps a property name to its util.filecacheentry
212 206 self._filecache = {}
213 207
214 208 def close(self):
215 209 pass
216 210
217 211 def _restrictcapabilities(self, caps):
218 212 return caps
219 213
220 214 def _applyrequirements(self, requirements):
221 215 self.requirements = requirements
222 216 self.sopener.options = dict((r, 1) for r in requirements
223 217 if r in self.openerreqs)
224 218
225 219 def _writerequirements(self):
226 220 reqfile = self.opener("requires", "w")
227 221 for r in self.requirements:
228 222 reqfile.write("%s\n" % r)
229 223 reqfile.close()
230 224
231 225 def _checknested(self, path):
232 226 """Determine if path is a legal nested repository."""
233 227 if not path.startswith(self.root):
234 228 return False
235 229 subpath = path[len(self.root) + 1:]
236 230 normsubpath = util.pconvert(subpath)
237 231
238 232 # XXX: Checking against the current working copy is wrong in
239 233 # the sense that it can reject things like
240 234 #
241 235 # $ hg cat -r 10 sub/x.txt
242 236 #
243 237 # if sub/ is no longer a subrepository in the working copy
244 238 # parent revision.
245 239 #
246 240 # However, it can of course also allow things that would have
247 241 # been rejected before, such as the above cat command if sub/
248 242 # is a subrepository now, but was a normal directory before.
249 243 # The old path auditor would have rejected by mistake since it
250 244 # panics when it sees sub/.hg/.
251 245 #
252 246 # All in all, checking against the working copy seems sensible
253 247 # since we want to prevent access to nested repositories on
254 248 # the filesystem *now*.
255 249 ctx = self[None]
256 250 parts = util.splitpath(subpath)
257 251 while parts:
258 252 prefix = '/'.join(parts)
259 253 if prefix in ctx.substate:
260 254 if prefix == normsubpath:
261 255 return True
262 256 else:
263 257 sub = ctx.sub(prefix)
264 258 return sub.checknested(subpath[len(prefix) + 1:])
265 259 else:
266 260 parts.pop()
267 261 return False
268 262
269 263 def peer(self):
270 264 return localpeer(self) # not cached to avoid reference cycle
271 265
272 266 @filecache('bookmarks')
273 267 def _bookmarks(self):
274 268 return bookmarks.read(self)
275 269
276 270 @filecache('bookmarks.current')
277 271 def _bookmarkcurrent(self):
278 272 return bookmarks.readcurrent(self)
279 273
280 274 def _writebookmarks(self, marks):
281 275 bookmarks.write(self)
282 276
283 277 def bookmarkheads(self, bookmark):
284 278 name = bookmark.split('@', 1)[0]
285 279 heads = []
286 280 for mark, n in self._bookmarks.iteritems():
287 281 if mark.split('@', 1)[0] == name:
288 282 heads.append(n)
289 283 return heads
290 284
291 285 @storecache('phaseroots')
292 286 def _phasecache(self):
293 287 return phases.phasecache(self, self._phasedefaults)
294 288
295 289 @storecache('obsstore')
296 290 def obsstore(self):
297 291 store = obsolete.obsstore(self.sopener)
298 292 return store
299 293
294 @propertycache
295 def hiddenrevs(self):
296 """hiddenrevs: revs that should be hidden by command and tools
297
298 This set is carried on the repo to ease initialisation and lazy
299 loading it'll probably move back to changelog for efficienty and
300 consistency reason
301
302 Note that the hiddenrevs will needs invalidations when
303 - a new changesets is added (possible unstable above extinct)
304 - a new obsolete marker is added (possible new extinct changeset)
305 """
306 hidden = set()
307 if self.obsstore:
308 ### hide extinct changeset that are not accessible by any mean
309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 hidden.update(self.revs(hiddenquery))
311 return hidden
312
300 313 @storecache('00changelog.i')
301 314 def changelog(self):
302 315 c = changelog.changelog(self.sopener)
303 316 if 'HG_PENDING' in os.environ:
304 317 p = os.environ['HG_PENDING']
305 318 if p.startswith(self.root):
306 319 c.readpending('00changelog.i.a')
307 320 return c
308 321
309 322 @storecache('00manifest.i')
310 323 def manifest(self):
311 324 return manifest.manifest(self.sopener)
312 325
313 326 @filecache('dirstate')
314 327 def dirstate(self):
315 328 warned = [0]
316 329 def validate(node):
317 330 try:
318 331 self.changelog.rev(node)
319 332 return node
320 333 except error.LookupError:
321 334 if not warned[0]:
322 335 warned[0] = True
323 336 self.ui.warn(_("warning: ignoring unknown"
324 337 " working parent %s!\n") % short(node))
325 338 return nullid
326 339
327 340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
328 341
329 342 def __getitem__(self, changeid):
330 343 if changeid is None:
331 344 return context.workingctx(self)
332 345 return context.changectx(self, changeid)
333 346
334 347 def __contains__(self, changeid):
335 348 try:
336 349 return bool(self.lookup(changeid))
337 350 except error.RepoLookupError:
338 351 return False
339 352
340 353 def __nonzero__(self):
341 354 return True
342 355
343 356 def __len__(self):
344 357 return len(self.changelog)
345 358
346 359 def __iter__(self):
347 360 for i in xrange(len(self)):
348 361 yield i
349 362
350 363 def revs(self, expr, *args):
351 364 '''Return a list of revisions matching the given revset'''
352 365 expr = revset.formatspec(expr, *args)
353 366 m = revset.match(None, expr)
354 367 return [r for r in m(self, range(len(self)))]
355 368
356 369 def set(self, expr, *args):
357 370 '''
358 371 Yield a context for each matching revision, after doing arg
359 372 replacement via revset.formatspec
360 373 '''
361 374 for r in self.revs(expr, *args):
362 375 yield self[r]
363 376
364 377 def url(self):
365 378 return 'file:' + self.root
366 379
367 380 def hook(self, name, throw=False, **args):
368 381 return hook.hook(self.ui, self, name, throw, **args)
369 382
370 383 tag_disallowed = ':\r\n'
371 384
372 385 def _tag(self, names, node, message, local, user, date, extra={}):
373 386 if isinstance(names, str):
374 387 allchars = names
375 388 names = (names,)
376 389 else:
377 390 allchars = ''.join(names)
378 391 for c in self.tag_disallowed:
379 392 if c in allchars:
380 393 raise util.Abort(_('%r cannot be used in a tag name') % c)
381 394
382 395 branches = self.branchmap()
383 396 for name in names:
384 397 self.hook('pretag', throw=True, node=hex(node), tag=name,
385 398 local=local)
386 399 if name in branches:
387 400 self.ui.warn(_("warning: tag %s conflicts with existing"
388 401 " branch name\n") % name)
389 402
390 403 def writetags(fp, names, munge, prevtags):
391 404 fp.seek(0, 2)
392 405 if prevtags and prevtags[-1] != '\n':
393 406 fp.write('\n')
394 407 for name in names:
395 408 m = munge and munge(name) or name
396 409 if (self._tagscache.tagtypes and
397 410 name in self._tagscache.tagtypes):
398 411 old = self.tags().get(name, nullid)
399 412 fp.write('%s %s\n' % (hex(old), m))
400 413 fp.write('%s %s\n' % (hex(node), m))
401 414 fp.close()
402 415
403 416 prevtags = ''
404 417 if local:
405 418 try:
406 419 fp = self.opener('localtags', 'r+')
407 420 except IOError:
408 421 fp = self.opener('localtags', 'a')
409 422 else:
410 423 prevtags = fp.read()
411 424
412 425 # local tags are stored in the current charset
413 426 writetags(fp, names, None, prevtags)
414 427 for name in names:
415 428 self.hook('tag', node=hex(node), tag=name, local=local)
416 429 return
417 430
418 431 try:
419 432 fp = self.wfile('.hgtags', 'rb+')
420 433 except IOError, e:
421 434 if e.errno != errno.ENOENT:
422 435 raise
423 436 fp = self.wfile('.hgtags', 'ab')
424 437 else:
425 438 prevtags = fp.read()
426 439
427 440 # committed tags are stored in UTF-8
428 441 writetags(fp, names, encoding.fromlocal, prevtags)
429 442
430 443 fp.close()
431 444
432 445 self.invalidatecaches()
433 446
434 447 if '.hgtags' not in self.dirstate:
435 448 self[None].add(['.hgtags'])
436 449
437 450 m = matchmod.exact(self.root, '', ['.hgtags'])
438 451 tagnode = self.commit(message, user, date, extra=extra, match=m)
439 452
440 453 for name in names:
441 454 self.hook('tag', node=hex(node), tag=name, local=local)
442 455
443 456 return tagnode
444 457
445 458 def tag(self, names, node, message, local, user, date):
446 459 '''tag a revision with one or more symbolic names.
447 460
448 461 names is a list of strings or, when adding a single tag, names may be a
449 462 string.
450 463
451 464 if local is True, the tags are stored in a per-repository file.
452 465 otherwise, they are stored in the .hgtags file, and a new
453 466 changeset is committed with the change.
454 467
455 468 keyword arguments:
456 469
457 470 local: whether to store tags in non-version-controlled file
458 471 (default False)
459 472
460 473 message: commit message to use if committing
461 474
462 475 user: name of user to use if committing
463 476
464 477 date: date tuple to use if committing'''
465 478
466 479 if not local:
467 480 for x in self.status()[:5]:
468 481 if '.hgtags' in x:
469 482 raise util.Abort(_('working copy of .hgtags is changed '
470 483 '(please commit .hgtags manually)'))
471 484
472 485 self.tags() # instantiate the cache
473 486 self._tag(names, node, message, local, user, date)
474 487
475 488 @propertycache
476 489 def _tagscache(self):
477 490 '''Returns a tagscache object that contains various tags related
478 491 caches.'''
479 492
480 493 # This simplifies its cache management by having one decorated
481 494 # function (this one) and the rest simply fetch things from it.
482 495 class tagscache(object):
483 496 def __init__(self):
484 497 # These two define the set of tags for this repository. tags
485 498 # maps tag name to node; tagtypes maps tag name to 'global' or
486 499 # 'local'. (Global tags are defined by .hgtags across all
487 500 # heads, and local tags are defined in .hg/localtags.)
488 501 # They constitute the in-memory cache of tags.
489 502 self.tags = self.tagtypes = None
490 503
491 504 self.nodetagscache = self.tagslist = None
492 505
493 506 cache = tagscache()
494 507 cache.tags, cache.tagtypes = self._findtags()
495 508
496 509 return cache
497 510
498 511 def tags(self):
499 512 '''return a mapping of tag to node'''
500 513 t = {}
501 514 for k, v in self._tagscache.tags.iteritems():
502 515 try:
503 516 # ignore tags to unknown nodes
504 517 self.changelog.rev(v)
505 518 t[k] = v
506 519 except (error.LookupError, ValueError):
507 520 pass
508 521 return t
509 522
510 523 def _findtags(self):
511 524 '''Do the hard work of finding tags. Return a pair of dicts
512 525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
513 526 maps tag name to a string like \'global\' or \'local\'.
514 527 Subclasses or extensions are free to add their own tags, but
515 528 should be aware that the returned dicts will be retained for the
516 529 duration of the localrepo object.'''
517 530
518 531 # XXX what tagtype should subclasses/extensions use? Currently
519 532 # mq and bookmarks add tags, but do not set the tagtype at all.
520 533 # Should each extension invent its own tag type? Should there
521 534 # be one tagtype for all such "virtual" tags? Or is the status
522 535 # quo fine?
523 536
524 537 alltags = {} # map tag name to (node, hist)
525 538 tagtypes = {}
526 539
527 540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
528 541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
529 542
530 543 # Build the return dicts. Have to re-encode tag names because
531 544 # the tags module always uses UTF-8 (in order not to lose info
532 545 # writing to the cache), but the rest of Mercurial wants them in
533 546 # local encoding.
534 547 tags = {}
535 548 for (name, (node, hist)) in alltags.iteritems():
536 549 if node != nullid:
537 550 tags[encoding.tolocal(name)] = node
538 551 tags['tip'] = self.changelog.tip()
539 552 tagtypes = dict([(encoding.tolocal(name), value)
540 553 for (name, value) in tagtypes.iteritems()])
541 554 return (tags, tagtypes)
542 555
543 556 def tagtype(self, tagname):
544 557 '''
545 558 return the type of the given tag. result can be:
546 559
547 560 'local' : a local tag
548 561 'global' : a global tag
549 562 None : tag does not exist
550 563 '''
551 564
552 565 return self._tagscache.tagtypes.get(tagname)
553 566
554 567 def tagslist(self):
555 568 '''return a list of tags ordered by revision'''
556 569 if not self._tagscache.tagslist:
557 570 l = []
558 571 for t, n in self.tags().iteritems():
559 572 r = self.changelog.rev(n)
560 573 l.append((r, t, n))
561 574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
562 575
563 576 return self._tagscache.tagslist
564 577
565 578 def nodetags(self, node):
566 579 '''return the tags associated with a node'''
567 580 if not self._tagscache.nodetagscache:
568 581 nodetagscache = {}
569 582 for t, n in self._tagscache.tags.iteritems():
570 583 nodetagscache.setdefault(n, []).append(t)
571 584 for tags in nodetagscache.itervalues():
572 585 tags.sort()
573 586 self._tagscache.nodetagscache = nodetagscache
574 587 return self._tagscache.nodetagscache.get(node, [])
575 588
576 589 def nodebookmarks(self, node):
577 590 marks = []
578 591 for bookmark, n in self._bookmarks.iteritems():
579 592 if n == node:
580 593 marks.append(bookmark)
581 594 return sorted(marks)
582 595
583 596 def _branchtags(self, partial, lrev):
584 597 # TODO: rename this function?
585 598 tiprev = len(self) - 1
586 599 if lrev != tiprev:
587 600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
588 601 self._updatebranchcache(partial, ctxgen)
589 602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
590 603
591 604 return partial
592 605
593 606 def updatebranchcache(self):
594 607 tip = self.changelog.tip()
595 608 if self._branchcache is not None and self._branchcachetip == tip:
596 609 return
597 610
598 611 oldtip = self._branchcachetip
599 612 self._branchcachetip = tip
600 613 if oldtip is None or oldtip not in self.changelog.nodemap:
601 614 partial, last, lrev = self._readbranchcache()
602 615 else:
603 616 lrev = self.changelog.rev(oldtip)
604 617 partial = self._branchcache
605 618
606 619 self._branchtags(partial, lrev)
607 620 # this private cache holds all heads (not just the branch tips)
608 621 self._branchcache = partial
609 622
610 623 def branchmap(self):
611 624 '''returns a dictionary {branch: [branchheads]}'''
612 625 self.updatebranchcache()
613 626 return self._branchcache
614 627
615 628 def _branchtip(self, heads):
616 629 '''return the tipmost branch head in heads'''
617 630 tip = heads[-1]
618 631 for h in reversed(heads):
619 632 if not self[h].closesbranch():
620 633 tip = h
621 634 break
622 635 return tip
623 636
624 637 def branchtip(self, branch):
625 638 '''return the tip node for a given branch'''
626 639 if branch not in self.branchmap():
627 640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
628 641 return self._branchtip(self.branchmap()[branch])
629 642
630 643 def branchtags(self):
631 644 '''return a dict where branch names map to the tipmost head of
632 645 the branch, open heads come before closed'''
633 646 bt = {}
634 647 for bn, heads in self.branchmap().iteritems():
635 648 bt[bn] = self._branchtip(heads)
636 649 return bt
637 650
638 651 def _readbranchcache(self):
639 652 partial = {}
640 653 try:
641 654 f = self.opener("cache/branchheads")
642 655 lines = f.read().split('\n')
643 656 f.close()
644 657 except (IOError, OSError):
645 658 return {}, nullid, nullrev
646 659
647 660 try:
648 661 last, lrev = lines.pop(0).split(" ", 1)
649 662 last, lrev = bin(last), int(lrev)
650 663 if lrev >= len(self) or self[lrev].node() != last:
651 664 # invalidate the cache
652 665 raise ValueError('invalidating branch cache (tip differs)')
653 666 for l in lines:
654 667 if not l:
655 668 continue
656 669 node, label = l.split(" ", 1)
657 670 label = encoding.tolocal(label.strip())
658 671 if not node in self:
659 672 raise ValueError('invalidating branch cache because node '+
660 673 '%s does not exist' % node)
661 674 partial.setdefault(label, []).append(bin(node))
662 675 except KeyboardInterrupt:
663 676 raise
664 677 except Exception, inst:
665 678 if self.ui.debugflag:
666 679 self.ui.warn(str(inst), '\n')
667 680 partial, last, lrev = {}, nullid, nullrev
668 681 return partial, last, lrev
669 682
670 683 def _writebranchcache(self, branches, tip, tiprev):
671 684 try:
672 685 f = self.opener("cache/branchheads", "w", atomictemp=True)
673 686 f.write("%s %s\n" % (hex(tip), tiprev))
674 687 for label, nodes in branches.iteritems():
675 688 for node in nodes:
676 689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
677 690 f.close()
678 691 except (IOError, OSError):
679 692 pass
680 693
681 694 def _updatebranchcache(self, partial, ctxgen):
682 695 """Given a branchhead cache, partial, that may have extra nodes or be
683 696 missing heads, and a generator of nodes that are at least a superset of
684 697 heads missing, this function updates partial to be correct.
685 698 """
686 699 # collect new branch entries
687 700 newbranches = {}
688 701 for c in ctxgen:
689 702 newbranches.setdefault(c.branch(), []).append(c.node())
690 703 # if older branchheads are reachable from new ones, they aren't
691 704 # really branchheads. Note checking parents is insufficient:
692 705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
693 706 for branch, newnodes in newbranches.iteritems():
694 707 bheads = partial.setdefault(branch, [])
695 708 # Remove candidate heads that no longer are in the repo (e.g., as
696 709 # the result of a strip that just happened). Avoid using 'node in
697 710 # self' here because that dives down into branchcache code somewhat
698 711 # recrusively.
699 712 bheadrevs = [self.changelog.rev(node) for node in bheads
700 713 if self.changelog.hasnode(node)]
701 714 newheadrevs = [self.changelog.rev(node) for node in newnodes
702 715 if self.changelog.hasnode(node)]
703 716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
704 717 # Remove duplicates - nodes that are in newheadrevs and are already
705 718 # in bheadrevs. This can happen if you strip a node whose parent
706 719 # was already a head (because they're on different branches).
707 720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
708 721
709 722 # Starting from tip means fewer passes over reachable. If we know
710 723 # the new candidates are not ancestors of existing heads, we don't
711 724 # have to examine ancestors of existing heads
712 725 if ctxisnew:
713 726 iterrevs = sorted(newheadrevs)
714 727 else:
715 728 iterrevs = list(bheadrevs)
716 729
717 730 # This loop prunes out two kinds of heads - heads that are
718 731 # superceded by a head in newheadrevs, and newheadrevs that are not
719 732 # heads because an existing head is their descendant.
720 733 while iterrevs:
721 734 latest = iterrevs.pop()
722 735 if latest not in bheadrevs:
723 736 continue
724 737 ancestors = set(self.changelog.ancestors([latest],
725 738 bheadrevs[0]))
726 739 if ancestors:
727 740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
728 741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
729 742
730 743 # There may be branches that cease to exist when the last commit in the
731 744 # branch was stripped. This code filters them out. Note that the
732 745 # branch that ceased to exist may not be in newbranches because
733 746 # newbranches is the set of candidate heads, which when you strip the
734 747 # last commit in a branch will be the parent branch.
735 748 for branch in partial:
736 749 nodes = [head for head in partial[branch]
737 750 if self.changelog.hasnode(head)]
738 751 if not nodes:
739 752 del partial[branch]
740 753
741 754 def lookup(self, key):
742 755 return self[key].node()
743 756
744 757 def lookupbranch(self, key, remote=None):
745 758 repo = remote or self
746 759 if key in repo.branchmap():
747 760 return key
748 761
749 762 repo = (remote and remote.local()) and remote or self
750 763 return repo[key].branch()
751 764
752 765 def known(self, nodes):
753 766 nm = self.changelog.nodemap
754 767 pc = self._phasecache
755 768 result = []
756 769 for n in nodes:
757 770 r = nm.get(n)
758 771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
759 772 result.append(resp)
760 773 return result
761 774
762 775 def local(self):
763 776 return self
764 777
765 778 def cancopy(self):
766 779 return self.local() # so statichttprepo's override of local() works
767 780
768 781 def join(self, f):
769 782 return os.path.join(self.path, f)
770 783
771 784 def wjoin(self, f):
772 785 return os.path.join(self.root, f)
773 786
774 787 def file(self, f):
775 788 if f[0] == '/':
776 789 f = f[1:]
777 790 return filelog.filelog(self.sopener, f)
778 791
779 792 def changectx(self, changeid):
780 793 return self[changeid]
781 794
782 795 def parents(self, changeid=None):
783 796 '''get list of changectxs for parents of changeid'''
784 797 return self[changeid].parents()
785 798
786 799 def setparents(self, p1, p2=nullid):
787 800 copies = self.dirstate.setparents(p1, p2)
788 801 if copies:
789 802 # Adjust copy records, the dirstate cannot do it, it
790 803 # requires access to parents manifests. Preserve them
791 804 # only for entries added to first parent.
792 805 pctx = self[p1]
793 806 for f in copies:
794 807 if f not in pctx and copies[f] in pctx:
795 808 self.dirstate.copy(copies[f], f)
796 809
797 810 def filectx(self, path, changeid=None, fileid=None):
798 811 """changeid can be a changeset revision, node, or tag.
799 812 fileid can be a file revision or node."""
800 813 return context.filectx(self, path, changeid, fileid)
801 814
802 815 def getcwd(self):
803 816 return self.dirstate.getcwd()
804 817
805 818 def pathto(self, f, cwd=None):
806 819 return self.dirstate.pathto(f, cwd)
807 820
808 821 def wfile(self, f, mode='r'):
809 822 return self.wopener(f, mode)
810 823
811 824 def _link(self, f):
812 825 return os.path.islink(self.wjoin(f))
813 826
814 827 def _loadfilter(self, filter):
815 828 if filter not in self.filterpats:
816 829 l = []
817 830 for pat, cmd in self.ui.configitems(filter):
818 831 if cmd == '!':
819 832 continue
820 833 mf = matchmod.match(self.root, '', [pat])
821 834 fn = None
822 835 params = cmd
823 836 for name, filterfn in self._datafilters.iteritems():
824 837 if cmd.startswith(name):
825 838 fn = filterfn
826 839 params = cmd[len(name):].lstrip()
827 840 break
828 841 if not fn:
829 842 fn = lambda s, c, **kwargs: util.filter(s, c)
830 843 # Wrap old filters not supporting keyword arguments
831 844 if not inspect.getargspec(fn)[2]:
832 845 oldfn = fn
833 846 fn = lambda s, c, **kwargs: oldfn(s, c)
834 847 l.append((mf, fn, params))
835 848 self.filterpats[filter] = l
836 849 return self.filterpats[filter]
837 850
838 851 def _filter(self, filterpats, filename, data):
839 852 for mf, fn, cmd in filterpats:
840 853 if mf(filename):
841 854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
842 855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
843 856 break
844 857
845 858 return data
846 859
847 860 @propertycache
848 861 def _encodefilterpats(self):
849 862 return self._loadfilter('encode')
850 863
851 864 @propertycache
852 865 def _decodefilterpats(self):
853 866 return self._loadfilter('decode')
854 867
855 868 def adddatafilter(self, name, filter):
856 869 self._datafilters[name] = filter
857 870
858 871 def wread(self, filename):
859 872 if self._link(filename):
860 873 data = os.readlink(self.wjoin(filename))
861 874 else:
862 875 data = self.wopener.read(filename)
863 876 return self._filter(self._encodefilterpats, filename, data)
864 877
865 878 def wwrite(self, filename, data, flags):
866 879 data = self._filter(self._decodefilterpats, filename, data)
867 880 if 'l' in flags:
868 881 self.wopener.symlink(data, filename)
869 882 else:
870 883 self.wopener.write(filename, data)
871 884 if 'x' in flags:
872 885 util.setflags(self.wjoin(filename), False, True)
873 886
874 887 def wwritedata(self, filename, data):
875 888 return self._filter(self._decodefilterpats, filename, data)
876 889
877 890 def transaction(self, desc):
878 891 tr = self._transref and self._transref() or None
879 892 if tr and tr.running():
880 893 return tr.nest()
881 894
882 895 # abort here if the journal already exists
883 896 if os.path.exists(self.sjoin("journal")):
884 897 raise error.RepoError(
885 898 _("abandoned transaction found - run hg recover"))
886 899
887 900 self._writejournal(desc)
888 901 renames = [(x, undoname(x)) for x in self._journalfiles()]
889 902
890 903 tr = transaction.transaction(self.ui.warn, self.sopener,
891 904 self.sjoin("journal"),
892 905 aftertrans(renames),
893 906 self.store.createmode)
894 907 self._transref = weakref.ref(tr)
895 908 return tr
896 909
897 910 def _journalfiles(self):
898 911 return (self.sjoin('journal'), self.join('journal.dirstate'),
899 912 self.join('journal.branch'), self.join('journal.desc'),
900 913 self.join('journal.bookmarks'),
901 914 self.sjoin('journal.phaseroots'))
902 915
903 916 def undofiles(self):
904 917 return [undoname(x) for x in self._journalfiles()]
905 918
906 919 def _writejournal(self, desc):
907 920 self.opener.write("journal.dirstate",
908 921 self.opener.tryread("dirstate"))
909 922 self.opener.write("journal.branch",
910 923 encoding.fromlocal(self.dirstate.branch()))
911 924 self.opener.write("journal.desc",
912 925 "%d\n%s\n" % (len(self), desc))
913 926 self.opener.write("journal.bookmarks",
914 927 self.opener.tryread("bookmarks"))
915 928 self.sopener.write("journal.phaseroots",
916 929 self.sopener.tryread("phaseroots"))
917 930
918 931 def recover(self):
919 932 lock = self.lock()
920 933 try:
921 934 if os.path.exists(self.sjoin("journal")):
922 935 self.ui.status(_("rolling back interrupted transaction\n"))
923 936 transaction.rollback(self.sopener, self.sjoin("journal"),
924 937 self.ui.warn)
925 938 self.invalidate()
926 939 return True
927 940 else:
928 941 self.ui.warn(_("no interrupted transaction available\n"))
929 942 return False
930 943 finally:
931 944 lock.release()
932 945
933 946 def rollback(self, dryrun=False, force=False):
934 947 wlock = lock = None
935 948 try:
936 949 wlock = self.wlock()
937 950 lock = self.lock()
938 951 if os.path.exists(self.sjoin("undo")):
939 952 return self._rollback(dryrun, force)
940 953 else:
941 954 self.ui.warn(_("no rollback information available\n"))
942 955 return 1
943 956 finally:
944 957 release(lock, wlock)
945 958
946 959 def _rollback(self, dryrun, force):
947 960 ui = self.ui
948 961 try:
949 962 args = self.opener.read('undo.desc').splitlines()
950 963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
951 964 if len(args) >= 3:
952 965 detail = args[2]
953 966 oldtip = oldlen - 1
954 967
955 968 if detail and ui.verbose:
956 969 msg = (_('repository tip rolled back to revision %s'
957 970 ' (undo %s: %s)\n')
958 971 % (oldtip, desc, detail))
959 972 else:
960 973 msg = (_('repository tip rolled back to revision %s'
961 974 ' (undo %s)\n')
962 975 % (oldtip, desc))
963 976 except IOError:
964 977 msg = _('rolling back unknown transaction\n')
965 978 desc = None
966 979
967 980 if not force and self['.'] != self['tip'] and desc == 'commit':
968 981 raise util.Abort(
969 982 _('rollback of last commit while not checked out '
970 983 'may lose data'), hint=_('use -f to force'))
971 984
972 985 ui.status(msg)
973 986 if dryrun:
974 987 return 0
975 988
976 989 parents = self.dirstate.parents()
977 990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
978 991 if os.path.exists(self.join('undo.bookmarks')):
979 992 util.rename(self.join('undo.bookmarks'),
980 993 self.join('bookmarks'))
981 994 if os.path.exists(self.sjoin('undo.phaseroots')):
982 995 util.rename(self.sjoin('undo.phaseroots'),
983 996 self.sjoin('phaseroots'))
984 997 self.invalidate()
985 998
986 999 parentgone = (parents[0] not in self.changelog.nodemap or
987 1000 parents[1] not in self.changelog.nodemap)
988 1001 if parentgone:
989 1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
990 1003 try:
991 1004 branch = self.opener.read('undo.branch')
992 1005 self.dirstate.setbranch(branch)
993 1006 except IOError:
994 1007 ui.warn(_('named branch could not be reset: '
995 1008 'current branch is still \'%s\'\n')
996 1009 % self.dirstate.branch())
997 1010
998 1011 self.dirstate.invalidate()
999 1012 parents = tuple([p.rev() for p in self.parents()])
1000 1013 if len(parents) > 1:
1001 1014 ui.status(_('working directory now based on '
1002 1015 'revisions %d and %d\n') % parents)
1003 1016 else:
1004 1017 ui.status(_('working directory now based on '
1005 1018 'revision %d\n') % parents)
1006 1019 # TODO: if we know which new heads may result from this rollback, pass
1007 1020 # them to destroy(), which will prevent the branchhead cache from being
1008 1021 # invalidated.
1009 1022 self.destroyed()
1010 1023 return 0
1011 1024
1012 1025 def invalidatecaches(self):
1013 1026 def delcache(name):
1014 1027 try:
1015 1028 delattr(self, name)
1016 1029 except AttributeError:
1017 1030 pass
1018 1031
1019 1032 delcache('_tagscache')
1020 1033
1021 1034 self._branchcache = None # in UTF-8
1022 1035 self._branchcachetip = None
1023 1036
1024 1037 def invalidatedirstate(self):
1025 1038 '''Invalidates the dirstate, causing the next call to dirstate
1026 1039 to check if it was modified since the last time it was read,
1027 1040 rereading it if it has.
1028 1041
1029 1042 This is different to dirstate.invalidate() that it doesn't always
1030 1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1031 1044 explicitly read the dirstate again (i.e. restoring it to a previous
1032 1045 known good state).'''
1033 1046 if 'dirstate' in self.__dict__:
1034 1047 for k in self.dirstate._filecache:
1035 1048 try:
1036 1049 delattr(self.dirstate, k)
1037 1050 except AttributeError:
1038 1051 pass
1039 1052 delattr(self, 'dirstate')
1040 1053
1041 1054 def invalidate(self):
1042 1055 for k in self._filecache:
1043 1056 # dirstate is invalidated separately in invalidatedirstate()
1044 1057 if k == 'dirstate':
1045 1058 continue
1046 1059
1047 1060 try:
1048 1061 delattr(self, k)
1049 1062 except AttributeError:
1050 1063 pass
1051 1064 self.invalidatecaches()
1052 1065
1053 1066 # Discard all cache entries to force reloading everything.
1054 1067 self._filecache.clear()
1055 1068
1056 1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1057 1070 try:
1058 1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1059 1072 except error.LockHeld, inst:
1060 1073 if not wait:
1061 1074 raise
1062 1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1063 1076 (desc, inst.locker))
1064 1077 # default to 600 seconds timeout
1065 1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1066 1079 releasefn, desc=desc)
1067 1080 if acquirefn:
1068 1081 acquirefn()
1069 1082 return l
1070 1083
1071 1084 def _afterlock(self, callback):
1072 1085 """add a callback to the current repository lock.
1073 1086
1074 1087 The callback will be executed on lock release."""
1075 1088 l = self._lockref and self._lockref()
1076 1089 if l:
1077 1090 l.postrelease.append(callback)
1078 1091 else:
1079 1092 callback()
1080 1093
1081 1094 def lock(self, wait=True):
1082 1095 '''Lock the repository store (.hg/store) and return a weak reference
1083 1096 to the lock. Use this before modifying the store (e.g. committing or
1084 1097 stripping). If you are opening a transaction, get a lock as well.)'''
1085 1098 l = self._lockref and self._lockref()
1086 1099 if l is not None and l.held:
1087 1100 l.lock()
1088 1101 return l
1089 1102
1090 1103 def unlock():
1091 1104 self.store.write()
1092 1105 if '_phasecache' in vars(self):
1093 1106 self._phasecache.write()
1094 1107 for k, ce in self._filecache.items():
1095 1108 if k == 'dirstate':
1096 1109 continue
1097 1110 ce.refresh()
1098 1111
1099 1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1100 1113 self.invalidate, _('repository %s') % self.origroot)
1101 1114 self._lockref = weakref.ref(l)
1102 1115 return l
1103 1116
1104 1117 def wlock(self, wait=True):
1105 1118 '''Lock the non-store parts of the repository (everything under
1106 1119 .hg except .hg/store) and return a weak reference to the lock.
1107 1120 Use this before modifying files in .hg.'''
1108 1121 l = self._wlockref and self._wlockref()
1109 1122 if l is not None and l.held:
1110 1123 l.lock()
1111 1124 return l
1112 1125
1113 1126 def unlock():
1114 1127 self.dirstate.write()
1115 1128 ce = self._filecache.get('dirstate')
1116 1129 if ce:
1117 1130 ce.refresh()
1118 1131
1119 1132 l = self._lock(self.join("wlock"), wait, unlock,
1120 1133 self.invalidatedirstate, _('working directory of %s') %
1121 1134 self.origroot)
1122 1135 self._wlockref = weakref.ref(l)
1123 1136 return l
1124 1137
1125 1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1126 1139 """
1127 1140 commit an individual file as part of a larger transaction
1128 1141 """
1129 1142
1130 1143 fname = fctx.path()
1131 1144 text = fctx.data()
1132 1145 flog = self.file(fname)
1133 1146 fparent1 = manifest1.get(fname, nullid)
1134 1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1135 1148
1136 1149 meta = {}
1137 1150 copy = fctx.renamed()
1138 1151 if copy and copy[0] != fname:
1139 1152 # Mark the new revision of this file as a copy of another
1140 1153 # file. This copy data will effectively act as a parent
1141 1154 # of this new revision. If this is a merge, the first
1142 1155 # parent will be the nullid (meaning "look up the copy data")
1143 1156 # and the second one will be the other parent. For example:
1144 1157 #
1145 1158 # 0 --- 1 --- 3 rev1 changes file foo
1146 1159 # \ / rev2 renames foo to bar and changes it
1147 1160 # \- 2 -/ rev3 should have bar with all changes and
1148 1161 # should record that bar descends from
1149 1162 # bar in rev2 and foo in rev1
1150 1163 #
1151 1164 # this allows this merge to succeed:
1152 1165 #
1153 1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1154 1167 # \ / merging rev3 and rev4 should use bar@rev2
1155 1168 # \- 2 --- 4 as the merge base
1156 1169 #
1157 1170
1158 1171 cfname = copy[0]
1159 1172 crev = manifest1.get(cfname)
1160 1173 newfparent = fparent2
1161 1174
1162 1175 if manifest2: # branch merge
1163 1176 if fparent2 == nullid or crev is None: # copied on remote side
1164 1177 if cfname in manifest2:
1165 1178 crev = manifest2[cfname]
1166 1179 newfparent = fparent1
1167 1180
1168 1181 # find source in nearest ancestor if we've lost track
1169 1182 if not crev:
1170 1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1171 1184 (fname, cfname))
1172 1185 for ancestor in self[None].ancestors():
1173 1186 if cfname in ancestor:
1174 1187 crev = ancestor[cfname].filenode()
1175 1188 break
1176 1189
1177 1190 if crev:
1178 1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1179 1192 meta["copy"] = cfname
1180 1193 meta["copyrev"] = hex(crev)
1181 1194 fparent1, fparent2 = nullid, newfparent
1182 1195 else:
1183 1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1184 1197 "copied from '%s'!\n") % (fname, cfname))
1185 1198
1186 1199 elif fparent2 != nullid:
1187 1200 # is one parent an ancestor of the other?
1188 1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1189 1202 if fparentancestor == fparent1:
1190 1203 fparent1, fparent2 = fparent2, nullid
1191 1204 elif fparentancestor == fparent2:
1192 1205 fparent2 = nullid
1193 1206
1194 1207 # is the file changed?
1195 1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1196 1209 changelist.append(fname)
1197 1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1198 1211
1199 1212 # are just the flags changed during merge?
1200 1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1201 1214 changelist.append(fname)
1202 1215
1203 1216 return fparent1
1204 1217
1205 1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1206 1219 editor=False, extra={}):
1207 1220 """Add a new revision to current repository.
1208 1221
1209 1222 Revision information is gathered from the working directory,
1210 1223 match can be used to filter the committed files. If editor is
1211 1224 supplied, it is called to get a commit message.
1212 1225 """
1213 1226
1214 1227 def fail(f, msg):
1215 1228 raise util.Abort('%s: %s' % (f, msg))
1216 1229
1217 1230 if not match:
1218 1231 match = matchmod.always(self.root, '')
1219 1232
1220 1233 if not force:
1221 1234 vdirs = []
1222 1235 match.dir = vdirs.append
1223 1236 match.bad = fail
1224 1237
1225 1238 wlock = self.wlock()
1226 1239 try:
1227 1240 wctx = self[None]
1228 1241 merge = len(wctx.parents()) > 1
1229 1242
1230 1243 if (not force and merge and match and
1231 1244 (match.files() or match.anypats())):
1232 1245 raise util.Abort(_('cannot partially commit a merge '
1233 1246 '(do not specify files or patterns)'))
1234 1247
1235 1248 changes = self.status(match=match, clean=force)
1236 1249 if force:
1237 1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1238 1251
1239 1252 # check subrepos
1240 1253 subs = []
1241 1254 commitsubs = set()
1242 1255 newstate = wctx.substate.copy()
1243 1256 # only manage subrepos and .hgsubstate if .hgsub is present
1244 1257 if '.hgsub' in wctx:
1245 1258 # we'll decide whether to track this ourselves, thanks
1246 1259 if '.hgsubstate' in changes[0]:
1247 1260 changes[0].remove('.hgsubstate')
1248 1261 if '.hgsubstate' in changes[2]:
1249 1262 changes[2].remove('.hgsubstate')
1250 1263
1251 1264 # compare current state to last committed state
1252 1265 # build new substate based on last committed state
1253 1266 oldstate = wctx.p1().substate
1254 1267 for s in sorted(newstate.keys()):
1255 1268 if not match(s):
1256 1269 # ignore working copy, use old state if present
1257 1270 if s in oldstate:
1258 1271 newstate[s] = oldstate[s]
1259 1272 continue
1260 1273 if not force:
1261 1274 raise util.Abort(
1262 1275 _("commit with new subrepo %s excluded") % s)
1263 1276 if wctx.sub(s).dirty(True):
1264 1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1265 1278 raise util.Abort(
1266 1279 _("uncommitted changes in subrepo %s") % s,
1267 1280 hint=_("use --subrepos for recursive commit"))
1268 1281 subs.append(s)
1269 1282 commitsubs.add(s)
1270 1283 else:
1271 1284 bs = wctx.sub(s).basestate()
1272 1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1273 1286 if oldstate.get(s, (None, None, None))[1] != bs:
1274 1287 subs.append(s)
1275 1288
1276 1289 # check for removed subrepos
1277 1290 for p in wctx.parents():
1278 1291 r = [s for s in p.substate if s not in newstate]
1279 1292 subs += [s for s in r if match(s)]
1280 1293 if subs:
1281 1294 if (not match('.hgsub') and
1282 1295 '.hgsub' in (wctx.modified() + wctx.added())):
1283 1296 raise util.Abort(
1284 1297 _("can't commit subrepos without .hgsub"))
1285 1298 changes[0].insert(0, '.hgsubstate')
1286 1299
1287 1300 elif '.hgsub' in changes[2]:
1288 1301 # clean up .hgsubstate when .hgsub is removed
1289 1302 if ('.hgsubstate' in wctx and
1290 1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1291 1304 changes[2].insert(0, '.hgsubstate')
1292 1305
1293 1306 # make sure all explicit patterns are matched
1294 1307 if not force and match.files():
1295 1308 matched = set(changes[0] + changes[1] + changes[2])
1296 1309
1297 1310 for f in match.files():
1298 1311 if f == '.' or f in matched or f in wctx.substate:
1299 1312 continue
1300 1313 if f in changes[3]: # missing
1301 1314 fail(f, _('file not found!'))
1302 1315 if f in vdirs: # visited directory
1303 1316 d = f + '/'
1304 1317 for mf in matched:
1305 1318 if mf.startswith(d):
1306 1319 break
1307 1320 else:
1308 1321 fail(f, _("no match under directory!"))
1309 1322 elif f not in self.dirstate:
1310 1323 fail(f, _("file not tracked!"))
1311 1324
1312 1325 if (not force and not extra.get("close") and not merge
1313 1326 and not (changes[0] or changes[1] or changes[2])
1314 1327 and wctx.branch() == wctx.p1().branch()):
1315 1328 return None
1316 1329
1317 1330 if merge and changes[3]:
1318 1331 raise util.Abort(_("cannot commit merge with missing files"))
1319 1332
1320 1333 ms = mergemod.mergestate(self)
1321 1334 for f in changes[0]:
1322 1335 if f in ms and ms[f] == 'u':
1323 1336 raise util.Abort(_("unresolved merge conflicts "
1324 1337 "(see hg help resolve)"))
1325 1338
1326 1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1327 1340 if editor:
1328 1341 cctx._text = editor(self, cctx, subs)
1329 1342 edited = (text != cctx._text)
1330 1343
1331 1344 # commit subs and write new state
1332 1345 if subs:
1333 1346 for s in sorted(commitsubs):
1334 1347 sub = wctx.sub(s)
1335 1348 self.ui.status(_('committing subrepository %s\n') %
1336 1349 subrepo.subrelpath(sub))
1337 1350 sr = sub.commit(cctx._text, user, date)
1338 1351 newstate[s] = (newstate[s][0], sr)
1339 1352 subrepo.writestate(self, newstate)
1340 1353
1341 1354 # Save commit message in case this transaction gets rolled back
1342 1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 1356 # the assumption that the user will use the same editor again.
1344 1357 msgfn = self.savecommitmessage(cctx._text)
1345 1358
1346 1359 p1, p2 = self.dirstate.parents()
1347 1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1348 1361 try:
1349 1362 self.hook("precommit", throw=True, parent1=hookp1,
1350 1363 parent2=hookp2)
1351 1364 ret = self.commitctx(cctx, True)
1352 1365 except: # re-raises
1353 1366 if edited:
1354 1367 self.ui.write(
1355 1368 _('note: commit message saved in %s\n') % msgfn)
1356 1369 raise
1357 1370
1358 1371 # update bookmarks, dirstate and mergestate
1359 1372 bookmarks.update(self, [p1, p2], ret)
1360 1373 for f in changes[0] + changes[1]:
1361 1374 self.dirstate.normal(f)
1362 1375 for f in changes[2]:
1363 1376 self.dirstate.drop(f)
1364 1377 self.dirstate.setparents(ret)
1365 1378 ms.reset()
1366 1379 finally:
1367 1380 wlock.release()
1368 1381
1369 1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1370 1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1371 1384 self._afterlock(commithook)
1372 1385 return ret
1373 1386
1374 1387 def commitctx(self, ctx, error=False):
1375 1388 """Add a new revision to current repository.
1376 1389 Revision information is passed via the context argument.
1377 1390 """
1378 1391
1379 1392 tr = lock = None
1380 1393 removed = list(ctx.removed())
1381 1394 p1, p2 = ctx.p1(), ctx.p2()
1382 1395 user = ctx.user()
1383 1396
1384 1397 lock = self.lock()
1385 1398 try:
1386 1399 tr = self.transaction("commit")
1387 1400 trp = weakref.proxy(tr)
1388 1401
1389 1402 if ctx.files():
1390 1403 m1 = p1.manifest().copy()
1391 1404 m2 = p2.manifest()
1392 1405
1393 1406 # check in files
1394 1407 new = {}
1395 1408 changed = []
1396 1409 linkrev = len(self)
1397 1410 for f in sorted(ctx.modified() + ctx.added()):
1398 1411 self.ui.note(f + "\n")
1399 1412 try:
1400 1413 fctx = ctx[f]
1401 1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1402 1415 changed)
1403 1416 m1.set(f, fctx.flags())
1404 1417 except OSError, inst:
1405 1418 self.ui.warn(_("trouble committing %s!\n") % f)
1406 1419 raise
1407 1420 except IOError, inst:
1408 1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1409 1422 if error or errcode and errcode != errno.ENOENT:
1410 1423 self.ui.warn(_("trouble committing %s!\n") % f)
1411 1424 raise
1412 1425 else:
1413 1426 removed.append(f)
1414 1427
1415 1428 # update manifest
1416 1429 m1.update(new)
1417 1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1418 1431 drop = [f for f in removed if f in m1]
1419 1432 for f in drop:
1420 1433 del m1[f]
1421 1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1422 1435 p2.manifestnode(), (new, drop))
1423 1436 files = changed + removed
1424 1437 else:
1425 1438 mn = p1.manifestnode()
1426 1439 files = []
1427 1440
1428 1441 # update changelog
1429 1442 self.changelog.delayupdate()
1430 1443 n = self.changelog.add(mn, files, ctx.description(),
1431 1444 trp, p1.node(), p2.node(),
1432 1445 user, ctx.date(), ctx.extra().copy())
1433 1446 p = lambda: self.changelog.writepending() and self.root or ""
1434 1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1435 1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1436 1449 parent2=xp2, pending=p)
1437 1450 self.changelog.finalize(trp)
1438 1451 # set the new commit is proper phase
1439 1452 targetphase = phases.newcommitphase(self.ui)
1440 1453 if targetphase:
1441 1454 # retract boundary do not alter parent changeset.
1442 1455 # if a parent have higher the resulting phase will
1443 1456 # be compliant anyway
1444 1457 #
1445 1458 # if minimal phase was 0 we don't need to retract anything
1446 1459 phases.retractboundary(self, targetphase, [n])
1447 1460 tr.close()
1448 1461 self.updatebranchcache()
1449 1462 return n
1450 1463 finally:
1451 1464 if tr:
1452 1465 tr.release()
1453 1466 lock.release()
1454 1467
1455 1468 def destroyed(self, newheadnodes=None):
1456 1469 '''Inform the repository that nodes have been destroyed.
1457 1470 Intended for use by strip and rollback, so there's a common
1458 1471 place for anything that has to be done after destroying history.
1459 1472
1460 1473 If you know the branchheadcache was uptodate before nodes were removed
1461 1474 and you also know the set of candidate new heads that may have resulted
1462 1475 from the destruction, you can set newheadnodes. This will enable the
1463 1476 code to update the branchheads cache, rather than having future code
1464 1477 decide it's invalid and regenrating it from scratch.
1465 1478 '''
1466 1479 # If we have info, newheadnodes, on how to update the branch cache, do
1467 1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1468 1481 # will be caught the next time it is read.
1469 1482 if newheadnodes:
1470 1483 tiprev = len(self) - 1
1471 1484 ctxgen = (self[node] for node in newheadnodes
1472 1485 if self.changelog.hasnode(node))
1473 1486 self._updatebranchcache(self._branchcache, ctxgen)
1474 1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1475 1488 tiprev)
1476 1489
1477 1490 # Ensure the persistent tag cache is updated. Doing it now
1478 1491 # means that the tag cache only has to worry about destroyed
1479 1492 # heads immediately after a strip/rollback. That in turn
1480 1493 # guarantees that "cachetip == currenttip" (comparing both rev
1481 1494 # and node) always means no nodes have been added or destroyed.
1482 1495
1483 1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1484 1497 # head, refresh the tag cache, then immediately add a new head.
1485 1498 # But I think doing it this way is necessary for the "instant
1486 1499 # tag cache retrieval" case to work.
1487 1500 self.invalidatecaches()
1488 1501
1489 1502 def walk(self, match, node=None):
1490 1503 '''
1491 1504 walk recursively through the directory tree or a given
1492 1505 changeset, finding all files matched by the match
1493 1506 function
1494 1507 '''
1495 1508 return self[node].walk(match)
1496 1509
1497 1510 def status(self, node1='.', node2=None, match=None,
1498 1511 ignored=False, clean=False, unknown=False,
1499 1512 listsubrepos=False):
1500 1513 """return status of files between two nodes or node and working
1501 1514 directory.
1502 1515
1503 1516 If node1 is None, use the first dirstate parent instead.
1504 1517 If node2 is None, compare node1 with working directory.
1505 1518 """
1506 1519
1507 1520 def mfmatches(ctx):
1508 1521 mf = ctx.manifest().copy()
1509 1522 if match.always():
1510 1523 return mf
1511 1524 for fn in mf.keys():
1512 1525 if not match(fn):
1513 1526 del mf[fn]
1514 1527 return mf
1515 1528
1516 1529 if isinstance(node1, context.changectx):
1517 1530 ctx1 = node1
1518 1531 else:
1519 1532 ctx1 = self[node1]
1520 1533 if isinstance(node2, context.changectx):
1521 1534 ctx2 = node2
1522 1535 else:
1523 1536 ctx2 = self[node2]
1524 1537
1525 1538 working = ctx2.rev() is None
1526 1539 parentworking = working and ctx1 == self['.']
1527 1540 match = match or matchmod.always(self.root, self.getcwd())
1528 1541 listignored, listclean, listunknown = ignored, clean, unknown
1529 1542
1530 1543 # load earliest manifest first for caching reasons
1531 1544 if not working and ctx2.rev() < ctx1.rev():
1532 1545 ctx2.manifest()
1533 1546
1534 1547 if not parentworking:
1535 1548 def bad(f, msg):
1536 1549 # 'f' may be a directory pattern from 'match.files()',
1537 1550 # so 'f not in ctx1' is not enough
1538 1551 if f not in ctx1 and f not in ctx1.dirs():
1539 1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1540 1553 match.bad = bad
1541 1554
1542 1555 if working: # we need to scan the working dir
1543 1556 subrepos = []
1544 1557 if '.hgsub' in self.dirstate:
1545 1558 subrepos = ctx2.substate.keys()
1546 1559 s = self.dirstate.status(match, subrepos, listignored,
1547 1560 listclean, listunknown)
1548 1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1549 1562
1550 1563 # check for any possibly clean files
1551 1564 if parentworking and cmp:
1552 1565 fixup = []
1553 1566 # do a full compare of any files that might have changed
1554 1567 for f in sorted(cmp):
1555 1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1556 1569 or ctx1[f].cmp(ctx2[f])):
1557 1570 modified.append(f)
1558 1571 else:
1559 1572 fixup.append(f)
1560 1573
1561 1574 # update dirstate for files that are actually clean
1562 1575 if fixup:
1563 1576 if listclean:
1564 1577 clean += fixup
1565 1578
1566 1579 try:
1567 1580 # updating the dirstate is optional
1568 1581 # so we don't wait on the lock
1569 1582 wlock = self.wlock(False)
1570 1583 try:
1571 1584 for f in fixup:
1572 1585 self.dirstate.normal(f)
1573 1586 finally:
1574 1587 wlock.release()
1575 1588 except error.LockError:
1576 1589 pass
1577 1590
1578 1591 if not parentworking:
1579 1592 mf1 = mfmatches(ctx1)
1580 1593 if working:
1581 1594 # we are comparing working dir against non-parent
1582 1595 # generate a pseudo-manifest for the working dir
1583 1596 mf2 = mfmatches(self['.'])
1584 1597 for f in cmp + modified + added:
1585 1598 mf2[f] = None
1586 1599 mf2.set(f, ctx2.flags(f))
1587 1600 for f in removed:
1588 1601 if f in mf2:
1589 1602 del mf2[f]
1590 1603 else:
1591 1604 # we are comparing two revisions
1592 1605 deleted, unknown, ignored = [], [], []
1593 1606 mf2 = mfmatches(ctx2)
1594 1607
1595 1608 modified, added, clean = [], [], []
1596 1609 withflags = mf1.withflags() | mf2.withflags()
1597 1610 for fn in mf2:
1598 1611 if fn in mf1:
1599 1612 if (fn not in deleted and
1600 1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1601 1614 (mf1[fn] != mf2[fn] and
1602 1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1603 1616 modified.append(fn)
1604 1617 elif listclean:
1605 1618 clean.append(fn)
1606 1619 del mf1[fn]
1607 1620 elif fn not in deleted:
1608 1621 added.append(fn)
1609 1622 removed = mf1.keys()
1610 1623
1611 1624 if working and modified and not self.dirstate._checklink:
1612 1625 # Symlink placeholders may get non-symlink-like contents
1613 1626 # via user error or dereferencing by NFS or Samba servers,
1614 1627 # so we filter out any placeholders that don't look like a
1615 1628 # symlink
1616 1629 sane = []
1617 1630 for f in modified:
1618 1631 if ctx2.flags(f) == 'l':
1619 1632 d = ctx2[f].data()
1620 1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1621 1634 self.ui.debug('ignoring suspect symlink placeholder'
1622 1635 ' "%s"\n' % f)
1623 1636 continue
1624 1637 sane.append(f)
1625 1638 modified = sane
1626 1639
1627 1640 r = modified, added, removed, deleted, unknown, ignored, clean
1628 1641
1629 1642 if listsubrepos:
1630 1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1631 1644 if working:
1632 1645 rev2 = None
1633 1646 else:
1634 1647 rev2 = ctx2.substate[subpath][1]
1635 1648 try:
1636 1649 submatch = matchmod.narrowmatcher(subpath, match)
1637 1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1638 1651 clean=listclean, unknown=listunknown,
1639 1652 listsubrepos=True)
1640 1653 for rfiles, sfiles in zip(r, s):
1641 1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1642 1655 except error.LookupError:
1643 1656 self.ui.status(_("skipping missing subrepository: %s\n")
1644 1657 % subpath)
1645 1658
1646 1659 for l in r:
1647 1660 l.sort()
1648 1661 return r
1649 1662
1650 1663 def heads(self, start=None):
1651 1664 heads = self.changelog.heads(start)
1652 1665 # sort the output in rev descending order
1653 1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1654 1667
1655 1668 def branchheads(self, branch=None, start=None, closed=False):
1656 1669 '''return a (possibly filtered) list of heads for the given branch
1657 1670
1658 1671 Heads are returned in topological order, from newest to oldest.
1659 1672 If branch is None, use the dirstate branch.
1660 1673 If start is not None, return only heads reachable from start.
1661 1674 If closed is True, return heads that are marked as closed as well.
1662 1675 '''
1663 1676 if branch is None:
1664 1677 branch = self[None].branch()
1665 1678 branches = self.branchmap()
1666 1679 if branch not in branches:
1667 1680 return []
1668 1681 # the cache returns heads ordered lowest to highest
1669 1682 bheads = list(reversed(branches[branch]))
1670 1683 if start is not None:
1671 1684 # filter out the heads that cannot be reached from startrev
1672 1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1673 1686 bheads = [h for h in bheads if h in fbheads]
1674 1687 if not closed:
1675 1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1676 1689 return bheads
1677 1690
1678 1691 def branches(self, nodes):
1679 1692 if not nodes:
1680 1693 nodes = [self.changelog.tip()]
1681 1694 b = []
1682 1695 for n in nodes:
1683 1696 t = n
1684 1697 while True:
1685 1698 p = self.changelog.parents(n)
1686 1699 if p[1] != nullid or p[0] == nullid:
1687 1700 b.append((t, n, p[0], p[1]))
1688 1701 break
1689 1702 n = p[0]
1690 1703 return b
1691 1704
1692 1705 def between(self, pairs):
1693 1706 r = []
1694 1707
1695 1708 for top, bottom in pairs:
1696 1709 n, l, i = top, [], 0
1697 1710 f = 1
1698 1711
1699 1712 while n != bottom and n != nullid:
1700 1713 p = self.changelog.parents(n)[0]
1701 1714 if i == f:
1702 1715 l.append(n)
1703 1716 f = f * 2
1704 1717 n = p
1705 1718 i += 1
1706 1719
1707 1720 r.append(l)
1708 1721
1709 1722 return r
1710 1723
1711 1724 def pull(self, remote, heads=None, force=False):
1712 1725 # don't open transaction for nothing or you break future useful
1713 1726 # rollback call
1714 1727 tr = None
1715 1728 trname = 'pull\n' + util.hidepassword(remote.url())
1716 1729 lock = self.lock()
1717 1730 try:
1718 1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1719 1732 force=force)
1720 1733 common, fetch, rheads = tmp
1721 1734 if not fetch:
1722 1735 self.ui.status(_("no changes found\n"))
1723 1736 added = []
1724 1737 result = 0
1725 1738 else:
1726 1739 tr = self.transaction(trname)
1727 1740 if heads is None and list(common) == [nullid]:
1728 1741 self.ui.status(_("requesting all changes\n"))
1729 1742 elif heads is None and remote.capable('changegroupsubset'):
1730 1743 # issue1320, avoid a race if remote changed after discovery
1731 1744 heads = rheads
1732 1745
1733 1746 if remote.capable('getbundle'):
1734 1747 cg = remote.getbundle('pull', common=common,
1735 1748 heads=heads or rheads)
1736 1749 elif heads is None:
1737 1750 cg = remote.changegroup(fetch, 'pull')
1738 1751 elif not remote.capable('changegroupsubset'):
1739 1752 raise util.Abort(_("partial pull cannot be done because "
1740 1753 "other repository doesn't support "
1741 1754 "changegroupsubset."))
1742 1755 else:
1743 1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1744 1757 clstart = len(self.changelog)
1745 1758 result = self.addchangegroup(cg, 'pull', remote.url())
1746 1759 clend = len(self.changelog)
1747 1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1748 1761
1749 1762 # compute target subset
1750 1763 if heads is None:
1751 1764 # We pulled every thing possible
1752 1765 # sync on everything common
1753 1766 subset = common + added
1754 1767 else:
1755 1768 # We pulled a specific subset
1756 1769 # sync on this subset
1757 1770 subset = heads
1758 1771
1759 1772 # Get remote phases data from remote
1760 1773 remotephases = remote.listkeys('phases')
1761 1774 publishing = bool(remotephases.get('publishing', False))
1762 1775 if remotephases and not publishing:
1763 1776 # remote is new and unpublishing
1764 1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1765 1778 remotephases)
1766 1779 phases.advanceboundary(self, phases.public, pheads)
1767 1780 phases.advanceboundary(self, phases.draft, subset)
1768 1781 else:
1769 1782 # Remote is old or publishing all common changesets
1770 1783 # should be seen as public
1771 1784 phases.advanceboundary(self, phases.public, subset)
1772 1785
1773 1786 remoteobs = remote.listkeys('obsolete')
1774 1787 if 'dump' in remoteobs:
1775 1788 if tr is None:
1776 1789 tr = self.transaction(trname)
1777 1790 data = base85.b85decode(remoteobs['dump'])
1778 1791 self.obsstore.mergemarkers(tr, data)
1779 1792 if tr is not None:
1780 1793 tr.close()
1781 1794 finally:
1782 1795 if tr is not None:
1783 1796 tr.release()
1784 1797 lock.release()
1785 1798
1786 1799 return result
1787 1800
1788 1801 def checkpush(self, force, revs):
1789 1802 """Extensions can override this function if additional checks have
1790 1803 to be performed before pushing, or call it if they override push
1791 1804 command.
1792 1805 """
1793 1806 pass
1794 1807
1795 1808 def push(self, remote, force=False, revs=None, newbranch=False):
1796 1809 '''Push outgoing changesets (limited by revs) from the current
1797 1810 repository to remote. Return an integer:
1798 1811 - None means nothing to push
1799 1812 - 0 means HTTP error
1800 1813 - 1 means we pushed and remote head count is unchanged *or*
1801 1814 we have outgoing changesets but refused to push
1802 1815 - other values as described by addchangegroup()
1803 1816 '''
1804 1817 # there are two ways to push to remote repo:
1805 1818 #
1806 1819 # addchangegroup assumes local user can lock remote
1807 1820 # repo (local filesystem, old ssh servers).
1808 1821 #
1809 1822 # unbundle assumes local user cannot lock remote repo (new ssh
1810 1823 # servers, http servers).
1811 1824
1812 1825 if not remote.canpush():
1813 1826 raise util.Abort(_("destination does not support push"))
1814 1827 # get local lock as we might write phase data
1815 1828 locallock = self.lock()
1816 1829 try:
1817 1830 self.checkpush(force, revs)
1818 1831 lock = None
1819 1832 unbundle = remote.capable('unbundle')
1820 1833 if not unbundle:
1821 1834 lock = remote.lock()
1822 1835 try:
1823 1836 # discovery
1824 1837 fci = discovery.findcommonincoming
1825 1838 commoninc = fci(self, remote, force=force)
1826 1839 common, inc, remoteheads = commoninc
1827 1840 fco = discovery.findcommonoutgoing
1828 1841 outgoing = fco(self, remote, onlyheads=revs,
1829 1842 commoninc=commoninc, force=force)
1830 1843
1831 1844
1832 1845 if not outgoing.missing:
1833 1846 # nothing to push
1834 1847 scmutil.nochangesfound(self.ui, outgoing.excluded)
1835 1848 ret = None
1836 1849 else:
1837 1850 # something to push
1838 1851 if not force:
1839 1852 # if self.obsstore == False --> no obsolete
1840 1853 # then, save the iteration
1841 1854 if self.obsstore:
1842 1855 # this message are here for 80 char limit reason
1843 1856 mso = _("push includes an obsolete changeset: %s!")
1844 1857 msu = _("push includes an unstable changeset: %s!")
1845 1858 # If we are to push if there is at least one
1846 1859 # obsolete or unstable changeset in missing, at
1847 1860 # least one of the missinghead will be obsolete or
1848 1861 # unstable. So checking heads only is ok
1849 1862 for node in outgoing.missingheads:
1850 1863 ctx = self[node]
1851 1864 if ctx.obsolete():
1852 1865 raise util.Abort(_(mso) % ctx)
1853 1866 elif ctx.unstable():
1854 1867 raise util.Abort(_(msu) % ctx)
1855 1868 discovery.checkheads(self, remote, outgoing,
1856 1869 remoteheads, newbranch,
1857 1870 bool(inc))
1858 1871
1859 1872 # create a changegroup from local
1860 1873 if revs is None and not outgoing.excluded:
1861 1874 # push everything,
1862 1875 # use the fast path, no race possible on push
1863 1876 cg = self._changegroup(outgoing.missing, 'push')
1864 1877 else:
1865 1878 cg = self.getlocalbundle('push', outgoing)
1866 1879
1867 1880 # apply changegroup to remote
1868 1881 if unbundle:
1869 1882 # local repo finds heads on server, finds out what
1870 1883 # revs it must push. once revs transferred, if server
1871 1884 # finds it has different heads (someone else won
1872 1885 # commit/push race), server aborts.
1873 1886 if force:
1874 1887 remoteheads = ['force']
1875 1888 # ssh: return remote's addchangegroup()
1876 1889 # http: return remote's addchangegroup() or 0 for error
1877 1890 ret = remote.unbundle(cg, remoteheads, 'push')
1878 1891 else:
1879 1892 # we return an integer indicating remote head count
1880 1893 # change
1881 1894 ret = remote.addchangegroup(cg, 'push', self.url())
1882 1895
1883 1896 if ret:
1884 1897 # push succeed, synchonize target of the push
1885 1898 cheads = outgoing.missingheads
1886 1899 elif revs is None:
1887 1900 # All out push fails. synchronize all common
1888 1901 cheads = outgoing.commonheads
1889 1902 else:
1890 1903 # I want cheads = heads(::missingheads and ::commonheads)
1891 1904 # (missingheads is revs with secret changeset filtered out)
1892 1905 #
1893 1906 # This can be expressed as:
1894 1907 # cheads = ( (missingheads and ::commonheads)
1895 1908 # + (commonheads and ::missingheads))"
1896 1909 # )
1897 1910 #
1898 1911 # while trying to push we already computed the following:
1899 1912 # common = (::commonheads)
1900 1913 # missing = ((commonheads::missingheads) - commonheads)
1901 1914 #
1902 1915 # We can pick:
1903 1916 # * missingheads part of comon (::commonheads)
1904 1917 common = set(outgoing.common)
1905 1918 cheads = [node for node in revs if node in common]
1906 1919 # and
1907 1920 # * commonheads parents on missing
1908 1921 revset = self.set('%ln and parents(roots(%ln))',
1909 1922 outgoing.commonheads,
1910 1923 outgoing.missing)
1911 1924 cheads.extend(c.node() for c in revset)
1912 1925 # even when we don't push, exchanging phase data is useful
1913 1926 remotephases = remote.listkeys('phases')
1914 1927 if not remotephases: # old server or public only repo
1915 1928 phases.advanceboundary(self, phases.public, cheads)
1916 1929 # don't push any phase data as there is nothing to push
1917 1930 else:
1918 1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1919 1932 pheads, droots = ana
1920 1933 ### Apply remote phase on local
1921 1934 if remotephases.get('publishing', False):
1922 1935 phases.advanceboundary(self, phases.public, cheads)
1923 1936 else: # publish = False
1924 1937 phases.advanceboundary(self, phases.public, pheads)
1925 1938 phases.advanceboundary(self, phases.draft, cheads)
1926 1939 ### Apply local phase on remote
1927 1940
1928 1941 # Get the list of all revs draft on remote by public here.
1929 1942 # XXX Beware that revset break if droots is not strictly
1930 1943 # XXX root we may want to ensure it is but it is costly
1931 1944 outdated = self.set('heads((%ln::%ln) and public())',
1932 1945 droots, cheads)
1933 1946 for newremotehead in outdated:
1934 1947 r = remote.pushkey('phases',
1935 1948 newremotehead.hex(),
1936 1949 str(phases.draft),
1937 1950 str(phases.public))
1938 1951 if not r:
1939 1952 self.ui.warn(_('updating %s to public failed!\n')
1940 1953 % newremotehead)
1941 1954 if ('obsolete' in remote.listkeys('namespaces')
1942 1955 and self.obsstore):
1943 1956 data = self.listkeys('obsolete')['dump']
1944 1957 r = remote.pushkey('obsolete', 'dump', '', data)
1945 1958 if not r:
1946 1959 self.ui.warn(_('failed to push obsolete markers!\n'))
1947 1960 finally:
1948 1961 if lock is not None:
1949 1962 lock.release()
1950 1963 finally:
1951 1964 locallock.release()
1952 1965
1953 1966 self.ui.debug("checking for updated bookmarks\n")
1954 1967 rb = remote.listkeys('bookmarks')
1955 1968 for k in rb.keys():
1956 1969 if k in self._bookmarks:
1957 1970 nr, nl = rb[k], hex(self._bookmarks[k])
1958 1971 if nr in self:
1959 1972 cr = self[nr]
1960 1973 cl = self[nl]
1961 1974 if cl in cr.descendants():
1962 1975 r = remote.pushkey('bookmarks', k, nr, nl)
1963 1976 if r:
1964 1977 self.ui.status(_("updating bookmark %s\n") % k)
1965 1978 else:
1966 1979 self.ui.warn(_('updating bookmark %s'
1967 1980 ' failed!\n') % k)
1968 1981
1969 1982 return ret
1970 1983
1971 1984 def changegroupinfo(self, nodes, source):
1972 1985 if self.ui.verbose or source == 'bundle':
1973 1986 self.ui.status(_("%d changesets found\n") % len(nodes))
1974 1987 if self.ui.debugflag:
1975 1988 self.ui.debug("list of changesets:\n")
1976 1989 for node in nodes:
1977 1990 self.ui.debug("%s\n" % hex(node))
1978 1991
1979 1992 def changegroupsubset(self, bases, heads, source):
1980 1993 """Compute a changegroup consisting of all the nodes that are
1981 1994 descendants of any of the bases and ancestors of any of the heads.
1982 1995 Return a chunkbuffer object whose read() method will return
1983 1996 successive changegroup chunks.
1984 1997
1985 1998 It is fairly complex as determining which filenodes and which
1986 1999 manifest nodes need to be included for the changeset to be complete
1987 2000 is non-trivial.
1988 2001
1989 2002 Another wrinkle is doing the reverse, figuring out which changeset in
1990 2003 the changegroup a particular filenode or manifestnode belongs to.
1991 2004 """
1992 2005 cl = self.changelog
1993 2006 if not bases:
1994 2007 bases = [nullid]
1995 2008 csets, bases, heads = cl.nodesbetween(bases, heads)
1996 2009 # We assume that all ancestors of bases are known
1997 2010 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1998 2011 return self._changegroupsubset(common, csets, heads, source)
1999 2012
2000 2013 def getlocalbundle(self, source, outgoing):
2001 2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2002 2015
2003 2016 This is only implemented for local repos and reuses potentially
2004 2017 precomputed sets in outgoing."""
2005 2018 if not outgoing.missing:
2006 2019 return None
2007 2020 return self._changegroupsubset(outgoing.common,
2008 2021 outgoing.missing,
2009 2022 outgoing.missingheads,
2010 2023 source)
2011 2024
2012 2025 def getbundle(self, source, heads=None, common=None):
2013 2026 """Like changegroupsubset, but returns the set difference between the
2014 2027 ancestors of heads and the ancestors common.
2015 2028
2016 2029 If heads is None, use the local heads. If common is None, use [nullid].
2017 2030
2018 2031 The nodes in common might not all be known locally due to the way the
2019 2032 current discovery protocol works.
2020 2033 """
2021 2034 cl = self.changelog
2022 2035 if common:
2023 2036 nm = cl.nodemap
2024 2037 common = [n for n in common if n in nm]
2025 2038 else:
2026 2039 common = [nullid]
2027 2040 if not heads:
2028 2041 heads = cl.heads()
2029 2042 return self.getlocalbundle(source,
2030 2043 discovery.outgoing(cl, common, heads))
2031 2044
2032 2045 def _changegroupsubset(self, commonrevs, csets, heads, source):
2033 2046
2034 2047 cl = self.changelog
2035 2048 mf = self.manifest
2036 2049 mfs = {} # needed manifests
2037 2050 fnodes = {} # needed file nodes
2038 2051 changedfiles = set()
2039 2052 fstate = ['', {}]
2040 2053 count = [0, 0]
2041 2054
2042 2055 # can we go through the fast path ?
2043 2056 heads.sort()
2044 2057 if heads == sorted(self.heads()):
2045 2058 return self._changegroup(csets, source)
2046 2059
2047 2060 # slow path
2048 2061 self.hook('preoutgoing', throw=True, source=source)
2049 2062 self.changegroupinfo(csets, source)
2050 2063
2051 2064 # filter any nodes that claim to be part of the known set
2052 2065 def prune(revlog, missing):
2053 2066 rr, rl = revlog.rev, revlog.linkrev
2054 2067 return [n for n in missing
2055 2068 if rl(rr(n)) not in commonrevs]
2056 2069
2057 2070 progress = self.ui.progress
2058 2071 _bundling = _('bundling')
2059 2072 _changesets = _('changesets')
2060 2073 _manifests = _('manifests')
2061 2074 _files = _('files')
2062 2075
2063 2076 def lookup(revlog, x):
2064 2077 if revlog == cl:
2065 2078 c = cl.read(x)
2066 2079 changedfiles.update(c[3])
2067 2080 mfs.setdefault(c[0], x)
2068 2081 count[0] += 1
2069 2082 progress(_bundling, count[0],
2070 2083 unit=_changesets, total=count[1])
2071 2084 return x
2072 2085 elif revlog == mf:
2073 2086 clnode = mfs[x]
2074 2087 mdata = mf.readfast(x)
2075 2088 for f, n in mdata.iteritems():
2076 2089 if f in changedfiles:
2077 2090 fnodes[f].setdefault(n, clnode)
2078 2091 count[0] += 1
2079 2092 progress(_bundling, count[0],
2080 2093 unit=_manifests, total=count[1])
2081 2094 return clnode
2082 2095 else:
2083 2096 progress(_bundling, count[0], item=fstate[0],
2084 2097 unit=_files, total=count[1])
2085 2098 return fstate[1][x]
2086 2099
2087 2100 bundler = changegroup.bundle10(lookup)
2088 2101 reorder = self.ui.config('bundle', 'reorder', 'auto')
2089 2102 if reorder == 'auto':
2090 2103 reorder = None
2091 2104 else:
2092 2105 reorder = util.parsebool(reorder)
2093 2106
2094 2107 def gengroup():
2095 2108 # Create a changenode group generator that will call our functions
2096 2109 # back to lookup the owning changenode and collect information.
2097 2110 count[:] = [0, len(csets)]
2098 2111 for chunk in cl.group(csets, bundler, reorder=reorder):
2099 2112 yield chunk
2100 2113 progress(_bundling, None)
2101 2114
2102 2115 # Create a generator for the manifestnodes that calls our lookup
2103 2116 # and data collection functions back.
2104 2117 for f in changedfiles:
2105 2118 fnodes[f] = {}
2106 2119 count[:] = [0, len(mfs)]
2107 2120 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2108 2121 yield chunk
2109 2122 progress(_bundling, None)
2110 2123
2111 2124 mfs.clear()
2112 2125
2113 2126 # Go through all our files in order sorted by name.
2114 2127 count[:] = [0, len(changedfiles)]
2115 2128 for fname in sorted(changedfiles):
2116 2129 filerevlog = self.file(fname)
2117 2130 if not len(filerevlog):
2118 2131 raise util.Abort(_("empty or missing revlog for %s")
2119 2132 % fname)
2120 2133 fstate[0] = fname
2121 2134 fstate[1] = fnodes.pop(fname, {})
2122 2135
2123 2136 nodelist = prune(filerevlog, fstate[1])
2124 2137 if nodelist:
2125 2138 count[0] += 1
2126 2139 yield bundler.fileheader(fname)
2127 2140 for chunk in filerevlog.group(nodelist, bundler, reorder):
2128 2141 yield chunk
2129 2142
2130 2143 # Signal that no more groups are left.
2131 2144 yield bundler.close()
2132 2145 progress(_bundling, None)
2133 2146
2134 2147 if csets:
2135 2148 self.hook('outgoing', node=hex(csets[0]), source=source)
2136 2149
2137 2150 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2138 2151
2139 2152 def changegroup(self, basenodes, source):
2140 2153 # to avoid a race we use changegroupsubset() (issue1320)
2141 2154 return self.changegroupsubset(basenodes, self.heads(), source)
2142 2155
2143 2156 def _changegroup(self, nodes, source):
2144 2157 """Compute the changegroup of all nodes that we have that a recipient
2145 2158 doesn't. Return a chunkbuffer object whose read() method will return
2146 2159 successive changegroup chunks.
2147 2160
2148 2161 This is much easier than the previous function as we can assume that
2149 2162 the recipient has any changenode we aren't sending them.
2150 2163
2151 2164 nodes is the set of nodes to send"""
2152 2165
2153 2166 cl = self.changelog
2154 2167 mf = self.manifest
2155 2168 mfs = {}
2156 2169 changedfiles = set()
2157 2170 fstate = ['']
2158 2171 count = [0, 0]
2159 2172
2160 2173 self.hook('preoutgoing', throw=True, source=source)
2161 2174 self.changegroupinfo(nodes, source)
2162 2175
2163 2176 revset = set([cl.rev(n) for n in nodes])
2164 2177
2165 2178 def gennodelst(log):
2166 2179 ln, llr = log.node, log.linkrev
2167 2180 return [ln(r) for r in log if llr(r) in revset]
2168 2181
2169 2182 progress = self.ui.progress
2170 2183 _bundling = _('bundling')
2171 2184 _changesets = _('changesets')
2172 2185 _manifests = _('manifests')
2173 2186 _files = _('files')
2174 2187
2175 2188 def lookup(revlog, x):
2176 2189 if revlog == cl:
2177 2190 c = cl.read(x)
2178 2191 changedfiles.update(c[3])
2179 2192 mfs.setdefault(c[0], x)
2180 2193 count[0] += 1
2181 2194 progress(_bundling, count[0],
2182 2195 unit=_changesets, total=count[1])
2183 2196 return x
2184 2197 elif revlog == mf:
2185 2198 count[0] += 1
2186 2199 progress(_bundling, count[0],
2187 2200 unit=_manifests, total=count[1])
2188 2201 return cl.node(revlog.linkrev(revlog.rev(x)))
2189 2202 else:
2190 2203 progress(_bundling, count[0], item=fstate[0],
2191 2204 total=count[1], unit=_files)
2192 2205 return cl.node(revlog.linkrev(revlog.rev(x)))
2193 2206
2194 2207 bundler = changegroup.bundle10(lookup)
2195 2208 reorder = self.ui.config('bundle', 'reorder', 'auto')
2196 2209 if reorder == 'auto':
2197 2210 reorder = None
2198 2211 else:
2199 2212 reorder = util.parsebool(reorder)
2200 2213
2201 2214 def gengroup():
2202 2215 '''yield a sequence of changegroup chunks (strings)'''
2203 2216 # construct a list of all changed files
2204 2217
2205 2218 count[:] = [0, len(nodes)]
2206 2219 for chunk in cl.group(nodes, bundler, reorder=reorder):
2207 2220 yield chunk
2208 2221 progress(_bundling, None)
2209 2222
2210 2223 count[:] = [0, len(mfs)]
2211 2224 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2212 2225 yield chunk
2213 2226 progress(_bundling, None)
2214 2227
2215 2228 count[:] = [0, len(changedfiles)]
2216 2229 for fname in sorted(changedfiles):
2217 2230 filerevlog = self.file(fname)
2218 2231 if not len(filerevlog):
2219 2232 raise util.Abort(_("empty or missing revlog for %s")
2220 2233 % fname)
2221 2234 fstate[0] = fname
2222 2235 nodelist = gennodelst(filerevlog)
2223 2236 if nodelist:
2224 2237 count[0] += 1
2225 2238 yield bundler.fileheader(fname)
2226 2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2227 2240 yield chunk
2228 2241 yield bundler.close()
2229 2242 progress(_bundling, None)
2230 2243
2231 2244 if nodes:
2232 2245 self.hook('outgoing', node=hex(nodes[0]), source=source)
2233 2246
2234 2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2235 2248
2236 2249 def addchangegroup(self, source, srctype, url, emptyok=False):
2237 2250 """Add the changegroup returned by source.read() to this repo.
2238 2251 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2239 2252 the URL of the repo where this changegroup is coming from.
2240 2253
2241 2254 Return an integer summarizing the change to this repo:
2242 2255 - nothing changed or no source: 0
2243 2256 - more heads than before: 1+added heads (2..n)
2244 2257 - fewer heads than before: -1-removed heads (-2..-n)
2245 2258 - number of heads stays the same: 1
2246 2259 """
2247 2260 def csmap(x):
2248 2261 self.ui.debug("add changeset %s\n" % short(x))
2249 2262 return len(cl)
2250 2263
2251 2264 def revmap(x):
2252 2265 return cl.rev(x)
2253 2266
2254 2267 if not source:
2255 2268 return 0
2256 2269
2257 2270 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2258 2271
2259 2272 changesets = files = revisions = 0
2260 2273 efiles = set()
2261 2274
2262 2275 # write changelog data to temp files so concurrent readers will not see
2263 2276 # inconsistent view
2264 2277 cl = self.changelog
2265 2278 cl.delayupdate()
2266 2279 oldheads = cl.heads()
2267 2280
2268 2281 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2269 2282 try:
2270 2283 trp = weakref.proxy(tr)
2271 2284 # pull off the changeset group
2272 2285 self.ui.status(_("adding changesets\n"))
2273 2286 clstart = len(cl)
2274 2287 class prog(object):
2275 2288 step = _('changesets')
2276 2289 count = 1
2277 2290 ui = self.ui
2278 2291 total = None
2279 2292 def __call__(self):
2280 2293 self.ui.progress(self.step, self.count, unit=_('chunks'),
2281 2294 total=self.total)
2282 2295 self.count += 1
2283 2296 pr = prog()
2284 2297 source.callback = pr
2285 2298
2286 2299 source.changelogheader()
2287 2300 srccontent = cl.addgroup(source, csmap, trp)
2288 2301 if not (srccontent or emptyok):
2289 2302 raise util.Abort(_("received changelog group is empty"))
2290 2303 clend = len(cl)
2291 2304 changesets = clend - clstart
2292 2305 for c in xrange(clstart, clend):
2293 2306 efiles.update(self[c].files())
2294 2307 efiles = len(efiles)
2295 2308 self.ui.progress(_('changesets'), None)
2296 2309
2297 2310 # pull off the manifest group
2298 2311 self.ui.status(_("adding manifests\n"))
2299 2312 pr.step = _('manifests')
2300 2313 pr.count = 1
2301 2314 pr.total = changesets # manifests <= changesets
2302 2315 # no need to check for empty manifest group here:
2303 2316 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2304 2317 # no new manifest will be created and the manifest group will
2305 2318 # be empty during the pull
2306 2319 source.manifestheader()
2307 2320 self.manifest.addgroup(source, revmap, trp)
2308 2321 self.ui.progress(_('manifests'), None)
2309 2322
2310 2323 needfiles = {}
2311 2324 if self.ui.configbool('server', 'validate', default=False):
2312 2325 # validate incoming csets have their manifests
2313 2326 for cset in xrange(clstart, clend):
2314 2327 mfest = self.changelog.read(self.changelog.node(cset))[0]
2315 2328 mfest = self.manifest.readdelta(mfest)
2316 2329 # store file nodes we must see
2317 2330 for f, n in mfest.iteritems():
2318 2331 needfiles.setdefault(f, set()).add(n)
2319 2332
2320 2333 # process the files
2321 2334 self.ui.status(_("adding file changes\n"))
2322 2335 pr.step = _('files')
2323 2336 pr.count = 1
2324 2337 pr.total = efiles
2325 2338 source.callback = None
2326 2339
2327 2340 while True:
2328 2341 chunkdata = source.filelogheader()
2329 2342 if not chunkdata:
2330 2343 break
2331 2344 f = chunkdata["filename"]
2332 2345 self.ui.debug("adding %s revisions\n" % f)
2333 2346 pr()
2334 2347 fl = self.file(f)
2335 2348 o = len(fl)
2336 2349 if not fl.addgroup(source, revmap, trp):
2337 2350 raise util.Abort(_("received file revlog group is empty"))
2338 2351 revisions += len(fl) - o
2339 2352 files += 1
2340 2353 if f in needfiles:
2341 2354 needs = needfiles[f]
2342 2355 for new in xrange(o, len(fl)):
2343 2356 n = fl.node(new)
2344 2357 if n in needs:
2345 2358 needs.remove(n)
2346 2359 if not needs:
2347 2360 del needfiles[f]
2348 2361 self.ui.progress(_('files'), None)
2349 2362
2350 2363 for f, needs in needfiles.iteritems():
2351 2364 fl = self.file(f)
2352 2365 for n in needs:
2353 2366 try:
2354 2367 fl.rev(n)
2355 2368 except error.LookupError:
2356 2369 raise util.Abort(
2357 2370 _('missing file data for %s:%s - run hg verify') %
2358 2371 (f, hex(n)))
2359 2372
2360 2373 dh = 0
2361 2374 if oldheads:
2362 2375 heads = cl.heads()
2363 2376 dh = len(heads) - len(oldheads)
2364 2377 for h in heads:
2365 2378 if h not in oldheads and self[h].closesbranch():
2366 2379 dh -= 1
2367 2380 htext = ""
2368 2381 if dh:
2369 2382 htext = _(" (%+d heads)") % dh
2370 2383
2371 2384 self.ui.status(_("added %d changesets"
2372 2385 " with %d changes to %d files%s\n")
2373 2386 % (changesets, revisions, files, htext))
2374 2387
2375 2388 if changesets > 0:
2376 2389 p = lambda: cl.writepending() and self.root or ""
2377 2390 self.hook('pretxnchangegroup', throw=True,
2378 2391 node=hex(cl.node(clstart)), source=srctype,
2379 2392 url=url, pending=p)
2380 2393
2381 2394 added = [cl.node(r) for r in xrange(clstart, clend)]
2382 2395 publishing = self.ui.configbool('phases', 'publish', True)
2383 2396 if srctype == 'push':
2384 2397 # Old server can not push the boundary themself.
2385 2398 # New server won't push the boundary if changeset already
2386 2399 # existed locally as secrete
2387 2400 #
2388 2401 # We should not use added here but the list of all change in
2389 2402 # the bundle
2390 2403 if publishing:
2391 2404 phases.advanceboundary(self, phases.public, srccontent)
2392 2405 else:
2393 2406 phases.advanceboundary(self, phases.draft, srccontent)
2394 2407 phases.retractboundary(self, phases.draft, added)
2395 2408 elif srctype != 'strip':
2396 2409 # publishing only alter behavior during push
2397 2410 #
2398 2411 # strip should not touch boundary at all
2399 2412 phases.retractboundary(self, phases.draft, added)
2400 2413
2401 2414 # make changelog see real files again
2402 2415 cl.finalize(trp)
2403 2416
2404 2417 tr.close()
2405 2418
2406 2419 if changesets > 0:
2407 2420 def runhooks():
2408 2421 # forcefully update the on-disk branch cache
2409 2422 self.ui.debug("updating the branch cache\n")
2410 2423 self.updatebranchcache()
2411 2424 self.hook("changegroup", node=hex(cl.node(clstart)),
2412 2425 source=srctype, url=url)
2413 2426
2414 2427 for n in added:
2415 2428 self.hook("incoming", node=hex(n), source=srctype,
2416 2429 url=url)
2417 2430 self._afterlock(runhooks)
2418 2431
2419 2432 finally:
2420 2433 tr.release()
2421 2434 # never return 0 here:
2422 2435 if dh < 0:
2423 2436 return dh - 1
2424 2437 else:
2425 2438 return dh + 1
2426 2439
2427 2440 def stream_in(self, remote, requirements):
2428 2441 lock = self.lock()
2429 2442 try:
2430 2443 fp = remote.stream_out()
2431 2444 l = fp.readline()
2432 2445 try:
2433 2446 resp = int(l)
2434 2447 except ValueError:
2435 2448 raise error.ResponseError(
2436 2449 _('unexpected response from remote server:'), l)
2437 2450 if resp == 1:
2438 2451 raise util.Abort(_('operation forbidden by server'))
2439 2452 elif resp == 2:
2440 2453 raise util.Abort(_('locking the remote repository failed'))
2441 2454 elif resp != 0:
2442 2455 raise util.Abort(_('the server sent an unknown error code'))
2443 2456 self.ui.status(_('streaming all changes\n'))
2444 2457 l = fp.readline()
2445 2458 try:
2446 2459 total_files, total_bytes = map(int, l.split(' ', 1))
2447 2460 except (ValueError, TypeError):
2448 2461 raise error.ResponseError(
2449 2462 _('unexpected response from remote server:'), l)
2450 2463 self.ui.status(_('%d files to transfer, %s of data\n') %
2451 2464 (total_files, util.bytecount(total_bytes)))
2452 2465 handled_bytes = 0
2453 2466 self.ui.progress(_('clone'), 0, total=total_bytes)
2454 2467 start = time.time()
2455 2468 for i in xrange(total_files):
2456 2469 # XXX doesn't support '\n' or '\r' in filenames
2457 2470 l = fp.readline()
2458 2471 try:
2459 2472 name, size = l.split('\0', 1)
2460 2473 size = int(size)
2461 2474 except (ValueError, TypeError):
2462 2475 raise error.ResponseError(
2463 2476 _('unexpected response from remote server:'), l)
2464 2477 if self.ui.debugflag:
2465 2478 self.ui.debug('adding %s (%s)\n' %
2466 2479 (name, util.bytecount(size)))
2467 2480 # for backwards compat, name was partially encoded
2468 2481 ofp = self.sopener(store.decodedir(name), 'w')
2469 2482 for chunk in util.filechunkiter(fp, limit=size):
2470 2483 handled_bytes += len(chunk)
2471 2484 self.ui.progress(_('clone'), handled_bytes,
2472 2485 total=total_bytes)
2473 2486 ofp.write(chunk)
2474 2487 ofp.close()
2475 2488 elapsed = time.time() - start
2476 2489 if elapsed <= 0:
2477 2490 elapsed = 0.001
2478 2491 self.ui.progress(_('clone'), None)
2479 2492 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2480 2493 (util.bytecount(total_bytes), elapsed,
2481 2494 util.bytecount(total_bytes / elapsed)))
2482 2495
2483 2496 # new requirements = old non-format requirements +
2484 2497 # new format-related
2485 2498 # requirements from the streamed-in repository
2486 2499 requirements.update(set(self.requirements) - self.supportedformats)
2487 2500 self._applyrequirements(requirements)
2488 2501 self._writerequirements()
2489 2502
2490 2503 self.invalidate()
2491 2504 return len(self.heads()) + 1
2492 2505 finally:
2493 2506 lock.release()
2494 2507
2495 2508 def clone(self, remote, heads=[], stream=False):
2496 2509 '''clone remote repository.
2497 2510
2498 2511 keyword arguments:
2499 2512 heads: list of revs to clone (forces use of pull)
2500 2513 stream: use streaming clone if possible'''
2501 2514
2502 2515 # now, all clients that can request uncompressed clones can
2503 2516 # read repo formats supported by all servers that can serve
2504 2517 # them.
2505 2518
2506 2519 # if revlog format changes, client will have to check version
2507 2520 # and format flags on "stream" capability, and use
2508 2521 # uncompressed only if compatible.
2509 2522
2510 2523 if not stream:
2511 2524 # if the server explicitely prefer to stream (for fast LANs)
2512 2525 stream = remote.capable('stream-preferred')
2513 2526
2514 2527 if stream and not heads:
2515 2528 # 'stream' means remote revlog format is revlogv1 only
2516 2529 if remote.capable('stream'):
2517 2530 return self.stream_in(remote, set(('revlogv1',)))
2518 2531 # otherwise, 'streamreqs' contains the remote revlog format
2519 2532 streamreqs = remote.capable('streamreqs')
2520 2533 if streamreqs:
2521 2534 streamreqs = set(streamreqs.split(','))
2522 2535 # if we support it, stream in and adjust our requirements
2523 2536 if not streamreqs - self.supportedformats:
2524 2537 return self.stream_in(remote, streamreqs)
2525 2538 return self.pull(remote, heads)
2526 2539
2527 2540 def pushkey(self, namespace, key, old, new):
2528 2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2529 2542 old=old, new=new)
2530 2543 ret = pushkey.push(self, namespace, key, old, new)
2531 2544 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2532 2545 ret=ret)
2533 2546 return ret
2534 2547
2535 2548 def listkeys(self, namespace):
2536 2549 self.hook('prelistkeys', throw=True, namespace=namespace)
2537 2550 values = pushkey.list(self, namespace)
2538 2551 self.hook('listkeys', namespace=namespace, values=values)
2539 2552 return values
2540 2553
2541 2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2542 2555 '''used to test argument passing over the wire'''
2543 2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2544 2557
2545 2558 def savecommitmessage(self, text):
2546 2559 fp = self.opener('last-message.txt', 'wb')
2547 2560 try:
2548 2561 fp.write(text)
2549 2562 finally:
2550 2563 fp.close()
2551 2564 return self.pathto(fp.name[len(self.root)+1:])
2552 2565
2553 2566 # used to avoid circular references so destructors work
2554 2567 def aftertrans(files):
2555 2568 renamefiles = [tuple(t) for t in files]
2556 2569 def a():
2557 2570 for src, dest in renamefiles:
2558 2571 try:
2559 2572 util.rename(src, dest)
2560 2573 except OSError: # journal file does not yet exist
2561 2574 pass
2562 2575 return a
2563 2576
2564 2577 def undoname(fn):
2565 2578 base, name = os.path.split(fn)
2566 2579 assert name.startswith('journal')
2567 2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2568 2581
2569 2582 def instance(ui, path, create):
2570 2583 return localrepository(ui, util.urllocalpath(path), create)
2571 2584
2572 2585 def islocal(path):
2573 2586 return True
@@ -1,388 +1,359
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > [phases]
5 5 > # public changeset are not obsolete
6 6 > publish=false
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg id --debug -ir "desc('$1')"
15 15 > }
16 16
17 17
18 18 $ hg init tmpa
19 19 $ cd tmpa
20 20
21 21 Killing a single changeset without replacement
22 22
23 23 $ mkcommit kill_me
24 24 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
25 25 $ hg debugobsolete
26 26 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
27 27 $ cd ..
28 28
29 29 Killing a single changeset with replacement
30 30
31 31 $ hg init tmpb
32 32 $ cd tmpb
33 33 $ mkcommit a
34 34 $ mkcommit b
35 35 $ mkcommit original_c
36 36 $ hg up "desc('b')"
37 37 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
38 38 $ mkcommit new_c
39 39 created new head
40 40 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
41 41 $ hg debugobsolete
42 42 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
43 43
44 44 do it again (it read the obsstore before adding new changeset)
45 45
46 46 $ hg up '.^'
47 47 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 48 $ mkcommit new_2_c
49 49 created new head
50 50 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
51 51 $ hg debugobsolete
52 52 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
53 53 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
54 54
55 55 Register two markers with a missing node
56 56
57 57 $ hg up '.^'
58 58 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 59 $ mkcommit new_3_c
60 60 created new head
61 61 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
62 62 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
63 63 $ hg debugobsolete
64 64 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
65 65 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
66 66 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
67 67 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
68 68
69 69 Check that graphlog detect that a changeset is obsolete:
70 70
71 71 $ hg glog
72 72 @ changeset: 5:5601fb93a350
73 73 | tag: tip
74 74 | parent: 1:7c3bad9141dc
75 75 | user: test
76 76 | date: Thu Jan 01 00:00:00 1970 +0000
77 77 | summary: add new_3_c
78 78 |
79 | x changeset: 4:ca819180edb9
80 |/ parent: 1:7c3bad9141dc
81 | user: test
82 | date: Thu Jan 01 00:00:00 1970 +0000
83 | summary: add new_2_c
84 |
85 | x changeset: 3:cdbce2fbb163
86 |/ parent: 1:7c3bad9141dc
87 | user: test
88 | date: Thu Jan 01 00:00:00 1970 +0000
89 | summary: add new_c
90 |
91 | x changeset: 2:245bde4270cd
92 |/ user: test
93 | date: Thu Jan 01 00:00:00 1970 +0000
94 | summary: add original_c
95 |
96 79 o changeset: 1:7c3bad9141dc
97 80 | user: test
98 81 | date: Thu Jan 01 00:00:00 1970 +0000
99 82 | summary: add b
100 83 |
101 84 o changeset: 0:1f0dee641bb7
102 85 user: test
103 86 date: Thu Jan 01 00:00:00 1970 +0000
104 87 summary: add a
105 88
106 89
107 90 Check that public changeset are not accounted as obsolete:
108 91
109 92 $ hg phase --public 2
110 93 $ hg --config 'extensions.graphlog=' glog
111 94 @ changeset: 5:5601fb93a350
112 95 | tag: tip
113 96 | parent: 1:7c3bad9141dc
114 97 | user: test
115 98 | date: Thu Jan 01 00:00:00 1970 +0000
116 99 | summary: add new_3_c
117 100 |
118 | x changeset: 4:ca819180edb9
119 |/ parent: 1:7c3bad9141dc
120 | user: test
121 | date: Thu Jan 01 00:00:00 1970 +0000
122 | summary: add new_2_c
123 |
124 | x changeset: 3:cdbce2fbb163
125 |/ parent: 1:7c3bad9141dc
126 | user: test
127 | date: Thu Jan 01 00:00:00 1970 +0000
128 | summary: add new_c
129 |
130 101 | o changeset: 2:245bde4270cd
131 102 |/ user: test
132 103 | date: Thu Jan 01 00:00:00 1970 +0000
133 104 | summary: add original_c
134 105 |
135 106 o changeset: 1:7c3bad9141dc
136 107 | user: test
137 108 | date: Thu Jan 01 00:00:00 1970 +0000
138 109 | summary: add b
139 110 |
140 111 o changeset: 0:1f0dee641bb7
141 112 user: test
142 113 date: Thu Jan 01 00:00:00 1970 +0000
143 114 summary: add a
144 115
145 116
146 117 $ cd ..
147 118
148 119 Exchange Test
149 120 ============================
150 121
151 122 Destination repo does not have any data
152 123 ---------------------------------------
153 124
154 125 Try to pull markers
155 126 (extinct changeset are excluded but marker are pushed)
156 127
157 128 $ hg init tmpc
158 129 $ cd tmpc
159 130 $ hg pull ../tmpb
160 131 pulling from ../tmpb
161 132 requesting all changes
162 133 adding changesets
163 134 adding manifests
164 135 adding file changes
165 136 added 4 changesets with 4 changes to 4 files (+1 heads)
166 137 (run 'hg heads' to see heads, 'hg merge' to merge)
167 138 $ hg debugobsolete
168 139 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
169 140 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
170 141 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
171 142 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
172 143
173 144 Rollback//Transaction support
174 145
175 146 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
176 147 $ hg debugobsolete
177 148 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
178 149 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
179 150 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
180 151 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
181 152 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
182 153 $ hg rollback -n
183 154 repository tip rolled back to revision 3 (undo debugobsolete)
184 155 $ hg rollback
185 156 repository tip rolled back to revision 3 (undo debugobsolete)
186 157 $ hg debugobsolete
187 158 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
188 159 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
189 160 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
190 161 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
191 162
192 163 $ cd ..
193 164
194 165 Try to pull markers
195 166
196 167 $ hg init tmpd
197 168 $ hg -R tmpb push tmpd
198 169 pushing to tmpd
199 170 searching for changes
200 171 adding changesets
201 172 adding manifests
202 173 adding file changes
203 174 added 4 changesets with 4 changes to 4 files (+1 heads)
204 175 $ hg -R tmpd debugobsolete
205 176 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
206 177 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
207 178 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
208 179 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
209 180
210 181
211 182 Destination repo have existing data
212 183 ---------------------------------------
213 184
214 185 On pull
215 186
216 187 $ hg init tmpe
217 188 $ cd tmpe
218 189 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
219 190 $ hg pull ../tmpb
220 191 pulling from ../tmpb
221 192 requesting all changes
222 193 adding changesets
223 194 adding manifests
224 195 adding file changes
225 196 added 4 changesets with 4 changes to 4 files (+1 heads)
226 197 (run 'hg heads' to see heads, 'hg merge' to merge)
227 198 $ hg debugobsolete
228 199 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
229 200 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
230 201 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
231 202 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
232 203 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
233 204
234 205
235 206 On push
236 207
237 208 $ hg push ../tmpc
238 209 pushing to ../tmpc
239 210 searching for changes
240 211 no changes found
241 212 [1]
242 213 $ hg -R ../tmpc debugobsolete
243 214 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
244 215 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
245 216 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
246 217 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
247 218 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
248 219
249 220 detect outgoing obsolete and unstable
250 221 ---------------------------------------
251 222
252 223
253 224 $ hg glog
254 225 o changeset: 3:5601fb93a350
255 226 | tag: tip
256 227 | parent: 1:7c3bad9141dc
257 228 | user: test
258 229 | date: Thu Jan 01 00:00:00 1970 +0000
259 230 | summary: add new_3_c
260 231 |
261 232 | o changeset: 2:245bde4270cd
262 233 |/ user: test
263 234 | date: Thu Jan 01 00:00:00 1970 +0000
264 235 | summary: add original_c
265 236 |
266 237 o changeset: 1:7c3bad9141dc
267 238 | user: test
268 239 | date: Thu Jan 01 00:00:00 1970 +0000
269 240 | summary: add b
270 241 |
271 242 o changeset: 0:1f0dee641bb7
272 243 user: test
273 244 date: Thu Jan 01 00:00:00 1970 +0000
274 245 summary: add a
275 246
276 247 $ hg up 'desc("new_3_c")'
277 248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
278 249 $ mkcommit original_d
279 250 $ mkcommit original_e
280 251 $ hg debugobsolete `getid original_d` -d '0 0'
281 252 $ hg log -r 'obsolete()'
282 253 changeset: 4:7c694bff0650
283 254 user: test
284 255 date: Thu Jan 01 00:00:00 1970 +0000
285 256 summary: add original_d
286 257
287 258 $ hg glog -r '::unstable()'
288 259 @ changeset: 5:6e572121998e
289 260 | tag: tip
290 261 | user: test
291 262 | date: Thu Jan 01 00:00:00 1970 +0000
292 263 | summary: add original_e
293 264 |
294 265 x changeset: 4:7c694bff0650
295 266 | user: test
296 267 | date: Thu Jan 01 00:00:00 1970 +0000
297 268 | summary: add original_d
298 269 |
299 270 o changeset: 3:5601fb93a350
300 271 | parent: 1:7c3bad9141dc
301 272 | user: test
302 273 | date: Thu Jan 01 00:00:00 1970 +0000
303 274 | summary: add new_3_c
304 275 |
305 276 o changeset: 1:7c3bad9141dc
306 277 | user: test
307 278 | date: Thu Jan 01 00:00:00 1970 +0000
308 279 | summary: add b
309 280 |
310 281 o changeset: 0:1f0dee641bb7
311 282 user: test
312 283 date: Thu Jan 01 00:00:00 1970 +0000
313 284 summary: add a
314 285
315 286
316 287 refuse to push obsolete changeset
317 288
318 289 $ hg push ../tmpc/ -r 'desc("original_d")'
319 290 pushing to ../tmpc/
320 291 searching for changes
321 292 abort: push includes an obsolete changeset: 7c694bff0650!
322 293 [255]
323 294
324 295 refuse to push unstable changeset
325 296
326 297 $ hg push ../tmpc/
327 298 pushing to ../tmpc/
328 299 searching for changes
329 300 abort: push includes an unstable changeset: 6e572121998e!
330 301 [255]
331 302
332 303 Test that extinct changeset are properly detected
333 304
334 305 $ hg log -r 'extinct()'
335 306
336 307 Don't try to push extinct changeset
337 308
338 309 $ hg init ../tmpf
339 310 $ hg out ../tmpf
340 311 comparing with ../tmpf
341 312 searching for changes
342 313 changeset: 0:1f0dee641bb7
343 314 user: test
344 315 date: Thu Jan 01 00:00:00 1970 +0000
345 316 summary: add a
346 317
347 318 changeset: 1:7c3bad9141dc
348 319 user: test
349 320 date: Thu Jan 01 00:00:00 1970 +0000
350 321 summary: add b
351 322
352 323 changeset: 2:245bde4270cd
353 324 user: test
354 325 date: Thu Jan 01 00:00:00 1970 +0000
355 326 summary: add original_c
356 327
357 328 changeset: 3:5601fb93a350
358 329 parent: 1:7c3bad9141dc
359 330 user: test
360 331 date: Thu Jan 01 00:00:00 1970 +0000
361 332 summary: add new_3_c
362 333
363 334 changeset: 4:7c694bff0650
364 335 user: test
365 336 date: Thu Jan 01 00:00:00 1970 +0000
366 337 summary: add original_d
367 338
368 339 changeset: 5:6e572121998e
369 340 tag: tip
370 341 user: test
371 342 date: Thu Jan 01 00:00:00 1970 +0000
372 343 summary: add original_e
373 344
374 345 $ hg push ../tmpf -f # -f because be push unstable too
375 346 pushing to ../tmpf
376 347 searching for changes
377 348 adding changesets
378 349 adding manifests
379 350 adding file changes
380 351 added 6 changesets with 6 changes to 6 files (+1 heads)
381 352
382 353 no warning displayed
383 354
384 355 $ hg push ../tmpf
385 356 pushing to ../tmpf
386 357 searching for changes
387 358 no changes found
388 359 [1]
General Comments 0
You need to be logged in to leave comments. Login now