##// END OF EJS Templates
push: refuse to push bumped changeset...
Pierre-Yves David -
r17834:743d04dd default
parent child Browse files
Show More
@@ -1,2629 +1,2632 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wvfs = scmutil.vfs(path, expand=True)
121 121 self.wopener = self.wvfs
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.vfs = scmutil.vfs(self.path)
127 127 self.opener = self.vfs
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
186 186 self.spath = self.store.path
187 187 self.svfs = self.store.vfs
188 188 self.sopener = self.svfs
189 189 self.sjoin = self.store.join
190 190 self.vfs.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialization and lazy
303 303 loading; it'll probably move back to changelog for efficiency and
304 304 consistency reasons.
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309
310 310 hidden changesets cannot have non-hidden descendants
311 311 """
312 312 hidden = set()
313 313 if self.obsstore:
314 314 ### hide extinct changeset that are not accessible by any mean
315 315 hiddenquery = 'extinct() - ::(. + bookmark())'
316 316 hidden.update(self.revs(hiddenquery))
317 317 return hidden
318 318
319 319 @storecache('00changelog.i')
320 320 def changelog(self):
321 321 c = changelog.changelog(self.sopener)
322 322 if 'HG_PENDING' in os.environ:
323 323 p = os.environ['HG_PENDING']
324 324 if p.startswith(self.root):
325 325 c.readpending('00changelog.i.a')
326 326 return c
327 327
328 328 @storecache('00manifest.i')
329 329 def manifest(self):
330 330 return manifest.manifest(self.sopener)
331 331
332 332 @filecache('dirstate')
333 333 def dirstate(self):
334 334 warned = [0]
335 335 def validate(node):
336 336 try:
337 337 self.changelog.rev(node)
338 338 return node
339 339 except error.LookupError:
340 340 if not warned[0]:
341 341 warned[0] = True
342 342 self.ui.warn(_("warning: ignoring unknown"
343 343 " working parent %s!\n") % short(node))
344 344 return nullid
345 345
346 346 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
347 347
348 348 def __getitem__(self, changeid):
349 349 if changeid is None:
350 350 return context.workingctx(self)
351 351 return context.changectx(self, changeid)
352 352
353 353 def __contains__(self, changeid):
354 354 try:
355 355 return bool(self.lookup(changeid))
356 356 except error.RepoLookupError:
357 357 return False
358 358
359 359 def __nonzero__(self):
360 360 return True
361 361
362 362 def __len__(self):
363 363 return len(self.changelog)
364 364
365 365 def __iter__(self):
366 366 return iter(self.changelog)
367 367
368 368 def revs(self, expr, *args):
369 369 '''Return a list of revisions matching the given revset'''
370 370 expr = revset.formatspec(expr, *args)
371 371 m = revset.match(None, expr)
372 372 return [r for r in m(self, list(self))]
373 373
374 374 def set(self, expr, *args):
375 375 '''
376 376 Yield a context for each matching revision, after doing arg
377 377 replacement via revset.formatspec
378 378 '''
379 379 for r in self.revs(expr, *args):
380 380 yield self[r]
381 381
382 382 def url(self):
383 383 return 'file:' + self.root
384 384
385 385 def hook(self, name, throw=False, **args):
386 386 return hook.hook(self.ui, self, name, throw, **args)
387 387
388 388 def _tag(self, names, node, message, local, user, date, extra={}):
389 389 if isinstance(names, str):
390 390 names = (names,)
391 391
392 392 branches = self.branchmap()
393 393 for name in names:
394 394 self.hook('pretag', throw=True, node=hex(node), tag=name,
395 395 local=local)
396 396 if name in branches:
397 397 self.ui.warn(_("warning: tag %s conflicts with existing"
398 398 " branch name\n") % name)
399 399
400 400 def writetags(fp, names, munge, prevtags):
401 401 fp.seek(0, 2)
402 402 if prevtags and prevtags[-1] != '\n':
403 403 fp.write('\n')
404 404 for name in names:
405 405 m = munge and munge(name) or name
406 406 if (self._tagscache.tagtypes and
407 407 name in self._tagscache.tagtypes):
408 408 old = self.tags().get(name, nullid)
409 409 fp.write('%s %s\n' % (hex(old), m))
410 410 fp.write('%s %s\n' % (hex(node), m))
411 411 fp.close()
412 412
413 413 prevtags = ''
414 414 if local:
415 415 try:
416 416 fp = self.opener('localtags', 'r+')
417 417 except IOError:
418 418 fp = self.opener('localtags', 'a')
419 419 else:
420 420 prevtags = fp.read()
421 421
422 422 # local tags are stored in the current charset
423 423 writetags(fp, names, None, prevtags)
424 424 for name in names:
425 425 self.hook('tag', node=hex(node), tag=name, local=local)
426 426 return
427 427
428 428 try:
429 429 fp = self.wfile('.hgtags', 'rb+')
430 430 except IOError, e:
431 431 if e.errno != errno.ENOENT:
432 432 raise
433 433 fp = self.wfile('.hgtags', 'ab')
434 434 else:
435 435 prevtags = fp.read()
436 436
437 437 # committed tags are stored in UTF-8
438 438 writetags(fp, names, encoding.fromlocal, prevtags)
439 439
440 440 fp.close()
441 441
442 442 self.invalidatecaches()
443 443
444 444 if '.hgtags' not in self.dirstate:
445 445 self[None].add(['.hgtags'])
446 446
447 447 m = matchmod.exact(self.root, '', ['.hgtags'])
448 448 tagnode = self.commit(message, user, date, extra=extra, match=m)
449 449
450 450 for name in names:
451 451 self.hook('tag', node=hex(node), tag=name, local=local)
452 452
453 453 return tagnode
454 454
455 455 def tag(self, names, node, message, local, user, date):
456 456 '''tag a revision with one or more symbolic names.
457 457
458 458 names is a list of strings or, when adding a single tag, names may be a
459 459 string.
460 460
461 461 if local is True, the tags are stored in a per-repository file.
462 462 otherwise, they are stored in the .hgtags file, and a new
463 463 changeset is committed with the change.
464 464
465 465 keyword arguments:
466 466
467 467 local: whether to store tags in non-version-controlled file
468 468 (default False)
469 469
470 470 message: commit message to use if committing
471 471
472 472 user: name of user to use if committing
473 473
474 474 date: date tuple to use if committing'''
475 475
476 476 if not local:
477 477 for x in self.status()[:5]:
478 478 if '.hgtags' in x:
479 479 raise util.Abort(_('working copy of .hgtags is changed '
480 480 '(please commit .hgtags manually)'))
481 481
482 482 self.tags() # instantiate the cache
483 483 self._tag(names, node, message, local, user, date)
484 484
485 485 @propertycache
486 486 def _tagscache(self):
487 487 '''Returns a tagscache object that contains various tags related
488 488 caches.'''
489 489
490 490 # This simplifies its cache management by having one decorated
491 491 # function (this one) and the rest simply fetch things from it.
492 492 class tagscache(object):
493 493 def __init__(self):
494 494 # These two define the set of tags for this repository. tags
495 495 # maps tag name to node; tagtypes maps tag name to 'global' or
496 496 # 'local'. (Global tags are defined by .hgtags across all
497 497 # heads, and local tags are defined in .hg/localtags.)
498 498 # They constitute the in-memory cache of tags.
499 499 self.tags = self.tagtypes = None
500 500
501 501 self.nodetagscache = self.tagslist = None
502 502
503 503 cache = tagscache()
504 504 cache.tags, cache.tagtypes = self._findtags()
505 505
506 506 return cache
507 507
508 508 def tags(self):
509 509 '''return a mapping of tag to node'''
510 510 t = {}
511 511 if self.changelog.filteredrevs:
512 512 tags, tt = self._findtags()
513 513 else:
514 514 tags = self._tagscache.tags
515 515 for k, v in tags.iteritems():
516 516 try:
517 517 # ignore tags to unknown nodes
518 518 self.changelog.rev(v)
519 519 t[k] = v
520 520 except (error.LookupError, ValueError):
521 521 pass
522 522 return t
523 523
524 524 def _findtags(self):
525 525 '''Do the hard work of finding tags. Return a pair of dicts
526 526 (tags, tagtypes) where tags maps tag name to node, and tagtypes
527 527 maps tag name to a string like \'global\' or \'local\'.
528 528 Subclasses or extensions are free to add their own tags, but
529 529 should be aware that the returned dicts will be retained for the
530 530 duration of the localrepo object.'''
531 531
532 532 # XXX what tagtype should subclasses/extensions use? Currently
533 533 # mq and bookmarks add tags, but do not set the tagtype at all.
534 534 # Should each extension invent its own tag type? Should there
535 535 # be one tagtype for all such "virtual" tags? Or is the status
536 536 # quo fine?
537 537
538 538 alltags = {} # map tag name to (node, hist)
539 539 tagtypes = {}
540 540
541 541 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
542 542 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
543 543
544 544 # Build the return dicts. Have to re-encode tag names because
545 545 # the tags module always uses UTF-8 (in order not to lose info
546 546 # writing to the cache), but the rest of Mercurial wants them in
547 547 # local encoding.
548 548 tags = {}
549 549 for (name, (node, hist)) in alltags.iteritems():
550 550 if node != nullid:
551 551 tags[encoding.tolocal(name)] = node
552 552 tags['tip'] = self.changelog.tip()
553 553 tagtypes = dict([(encoding.tolocal(name), value)
554 554 for (name, value) in tagtypes.iteritems()])
555 555 return (tags, tagtypes)
556 556
557 557 def tagtype(self, tagname):
558 558 '''
559 559 return the type of the given tag. result can be:
560 560
561 561 'local' : a local tag
562 562 'global' : a global tag
563 563 None : tag does not exist
564 564 '''
565 565
566 566 return self._tagscache.tagtypes.get(tagname)
567 567
568 568 def tagslist(self):
569 569 '''return a list of tags ordered by revision'''
570 570 if not self._tagscache.tagslist:
571 571 l = []
572 572 for t, n in self.tags().iteritems():
573 573 r = self.changelog.rev(n)
574 574 l.append((r, t, n))
575 575 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
576 576
577 577 return self._tagscache.tagslist
578 578
579 579 def nodetags(self, node):
580 580 '''return the tags associated with a node'''
581 581 if not self._tagscache.nodetagscache:
582 582 nodetagscache = {}
583 583 for t, n in self._tagscache.tags.iteritems():
584 584 nodetagscache.setdefault(n, []).append(t)
585 585 for tags in nodetagscache.itervalues():
586 586 tags.sort()
587 587 self._tagscache.nodetagscache = nodetagscache
588 588 return self._tagscache.nodetagscache.get(node, [])
589 589
590 590 def nodebookmarks(self, node):
591 591 marks = []
592 592 for bookmark, n in self._bookmarks.iteritems():
593 593 if n == node:
594 594 marks.append(bookmark)
595 595 return sorted(marks)
596 596
597 597 def _branchtags(self, partial, lrev):
598 598 # TODO: rename this function?
599 599 tiprev = len(self) - 1
600 600 if lrev != tiprev:
601 601 ctxgen = (self[r] for r in self.changelog.revs(lrev + 1, tiprev))
602 602 self._updatebranchcache(partial, ctxgen)
603 603 self._writebranchcache(partial, self.changelog.tip(), tiprev)
604 604
605 605 return partial
606 606
607 607 def updatebranchcache(self):
608 608 tip = self.changelog.tip()
609 609 if self._branchcache is not None and self._branchcachetip == tip:
610 610 return
611 611
612 612 oldtip = self._branchcachetip
613 613 self._branchcachetip = tip
614 614 if oldtip is None or oldtip not in self.changelog.nodemap:
615 615 partial, last, lrev = self._readbranchcache()
616 616 else:
617 617 lrev = self.changelog.rev(oldtip)
618 618 partial = self._branchcache
619 619
620 620 self._branchtags(partial, lrev)
621 621 # this private cache holds all heads (not just the branch tips)
622 622 self._branchcache = partial
623 623
624 624 def branchmap(self):
625 625 '''returns a dictionary {branch: [branchheads]}'''
626 626 if self.changelog.filteredrevs:
627 627 # some changeset are excluded we can't use the cache
628 628 branchmap = {}
629 629 self._updatebranchcache(branchmap, (self[r] for r in self))
630 630 return branchmap
631 631 else:
632 632 self.updatebranchcache()
633 633 return self._branchcache
634 634
635 635
636 636 def _branchtip(self, heads):
637 637 '''return the tipmost branch head in heads'''
638 638 tip = heads[-1]
639 639 for h in reversed(heads):
640 640 if not self[h].closesbranch():
641 641 tip = h
642 642 break
643 643 return tip
644 644
645 645 def branchtip(self, branch):
646 646 '''return the tip node for a given branch'''
647 647 if branch not in self.branchmap():
648 648 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
649 649 return self._branchtip(self.branchmap()[branch])
650 650
651 651 def branchtags(self):
652 652 '''return a dict where branch names map to the tipmost head of
653 653 the branch, open heads come before closed'''
654 654 bt = {}
655 655 for bn, heads in self.branchmap().iteritems():
656 656 bt[bn] = self._branchtip(heads)
657 657 return bt
658 658
659 659 def _readbranchcache(self):
660 660 partial = {}
661 661 try:
662 662 f = self.opener("cache/branchheads")
663 663 lines = f.read().split('\n')
664 664 f.close()
665 665 except (IOError, OSError):
666 666 return {}, nullid, nullrev
667 667
668 668 try:
669 669 last, lrev = lines.pop(0).split(" ", 1)
670 670 last, lrev = bin(last), int(lrev)
671 671 if lrev >= len(self) or self[lrev].node() != last:
672 672 # invalidate the cache
673 673 raise ValueError('invalidating branch cache (tip differs)')
674 674 for l in lines:
675 675 if not l:
676 676 continue
677 677 node, label = l.split(" ", 1)
678 678 label = encoding.tolocal(label.strip())
679 679 if not node in self:
680 680 raise ValueError('invalidating branch cache because node '+
681 681 '%s does not exist' % node)
682 682 partial.setdefault(label, []).append(bin(node))
683 683 except KeyboardInterrupt:
684 684 raise
685 685 except Exception, inst:
686 686 if self.ui.debugflag:
687 687 self.ui.warn(str(inst), '\n')
688 688 partial, last, lrev = {}, nullid, nullrev
689 689 return partial, last, lrev
690 690
691 691 def _writebranchcache(self, branches, tip, tiprev):
692 692 try:
693 693 f = self.opener("cache/branchheads", "w", atomictemp=True)
694 694 f.write("%s %s\n" % (hex(tip), tiprev))
695 695 for label, nodes in branches.iteritems():
696 696 for node in nodes:
697 697 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
698 698 f.close()
699 699 except (IOError, OSError):
700 700 pass
701 701
702 702 def _updatebranchcache(self, partial, ctxgen):
703 703 """Given a branchhead cache, partial, that may have extra nodes or be
704 704 missing heads, and a generator of nodes that are at least a superset of
705 705 heads missing, this function updates partial to be correct.
706 706 """
707 707 # collect new branch entries
708 708 newbranches = {}
709 709 for c in ctxgen:
710 710 newbranches.setdefault(c.branch(), []).append(c.node())
711 711 # if older branchheads are reachable from new ones, they aren't
712 712 # really branchheads. Note checking parents is insufficient:
713 713 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
714 714 for branch, newnodes in newbranches.iteritems():
715 715 bheads = partial.setdefault(branch, [])
716 716 # Remove candidate heads that no longer are in the repo (e.g., as
717 717 # the result of a strip that just happened). Avoid using 'node in
718 718 # self' here because that dives down into branchcache code somewhat
719 719 # recursively.
720 720 bheadrevs = [self.changelog.rev(node) for node in bheads
721 721 if self.changelog.hasnode(node)]
722 722 newheadrevs = [self.changelog.rev(node) for node in newnodes
723 723 if self.changelog.hasnode(node)]
724 724 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
725 725 # Remove duplicates - nodes that are in newheadrevs and are already
726 726 # in bheadrevs. This can happen if you strip a node whose parent
727 727 # was already a head (because they're on different branches).
728 728 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
729 729
730 730 # Starting from tip means fewer passes over reachable. If we know
731 731 # the new candidates are not ancestors of existing heads, we don't
732 732 # have to examine ancestors of existing heads
733 733 if ctxisnew:
734 734 iterrevs = sorted(newheadrevs)
735 735 else:
736 736 iterrevs = list(bheadrevs)
737 737
738 738 # This loop prunes out two kinds of heads - heads that are
739 739 # superseded by a head in newheadrevs, and newheadrevs that are not
740 740 # heads because an existing head is their descendant.
741 741 while iterrevs:
742 742 latest = iterrevs.pop()
743 743 if latest not in bheadrevs:
744 744 continue
745 745 ancestors = set(self.changelog.ancestors([latest],
746 746 bheadrevs[0]))
747 747 if ancestors:
748 748 bheadrevs = [b for b in bheadrevs if b not in ancestors]
749 749 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
750 750
751 751 # There may be branches that cease to exist when the last commit in the
752 752 # branch was stripped. This code filters them out. Note that the
753 753 # branch that ceased to exist may not be in newbranches because
754 754 # newbranches is the set of candidate heads, which when you strip the
755 755 # last commit in a branch will be the parent branch.
756 756 for branch in partial.keys():
757 757 nodes = [head for head in partial[branch]
758 758 if self.changelog.hasnode(head)]
759 759 if not nodes:
760 760 del partial[branch]
761 761
762 762 def lookup(self, key):
763 763 return self[key].node()
764 764
765 765 def lookupbranch(self, key, remote=None):
766 766 repo = remote or self
767 767 if key in repo.branchmap():
768 768 return key
769 769
770 770 repo = (remote and remote.local()) and remote or self
771 771 return repo[key].branch()
772 772
773 773 def known(self, nodes):
774 774 nm = self.changelog.nodemap
775 775 pc = self._phasecache
776 776 result = []
777 777 for n in nodes:
778 778 r = nm.get(n)
779 779 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 780 result.append(resp)
781 781 return result
782 782
783 783 def local(self):
784 784 return self
785 785
786 786 def cancopy(self):
787 787 return self.local() # so statichttprepo's override of local() works
788 788
789 789 def join(self, f):
790 790 return os.path.join(self.path, f)
791 791
792 792 def wjoin(self, f):
793 793 return os.path.join(self.root, f)
794 794
795 795 def file(self, f):
796 796 if f[0] == '/':
797 797 f = f[1:]
798 798 return filelog.filelog(self.sopener, f)
799 799
800 800 def changectx(self, changeid):
801 801 return self[changeid]
802 802
803 803 def parents(self, changeid=None):
804 804 '''get list of changectxs for parents of changeid'''
805 805 return self[changeid].parents()
806 806
807 807 def setparents(self, p1, p2=nullid):
808 808 copies = self.dirstate.setparents(p1, p2)
809 809 if copies:
810 810 # Adjust copy records, the dirstate cannot do it, it
811 811 # requires access to parents manifests. Preserve them
812 812 # only for entries added to first parent.
813 813 pctx = self[p1]
814 814 for f in copies:
815 815 if f not in pctx and copies[f] in pctx:
816 816 self.dirstate.copy(copies[f], f)
817 817
818 818 def filectx(self, path, changeid=None, fileid=None):
819 819 """changeid can be a changeset revision, node, or tag.
820 820 fileid can be a file revision or node."""
821 821 return context.filectx(self, path, changeid, fileid)
822 822
823 823 def getcwd(self):
824 824 return self.dirstate.getcwd()
825 825
826 826 def pathto(self, f, cwd=None):
827 827 return self.dirstate.pathto(f, cwd)
828 828
829 829 def wfile(self, f, mode='r'):
830 830 return self.wopener(f, mode)
831 831
832 832 def _link(self, f):
833 833 return os.path.islink(self.wjoin(f))
834 834
835 835 def _loadfilter(self, filter):
836 836 if filter not in self.filterpats:
837 837 l = []
838 838 for pat, cmd in self.ui.configitems(filter):
839 839 if cmd == '!':
840 840 continue
841 841 mf = matchmod.match(self.root, '', [pat])
842 842 fn = None
843 843 params = cmd
844 844 for name, filterfn in self._datafilters.iteritems():
845 845 if cmd.startswith(name):
846 846 fn = filterfn
847 847 params = cmd[len(name):].lstrip()
848 848 break
849 849 if not fn:
850 850 fn = lambda s, c, **kwargs: util.filter(s, c)
851 851 # Wrap old filters not supporting keyword arguments
852 852 if not inspect.getargspec(fn)[2]:
853 853 oldfn = fn
854 854 fn = lambda s, c, **kwargs: oldfn(s, c)
855 855 l.append((mf, fn, params))
856 856 self.filterpats[filter] = l
857 857 return self.filterpats[filter]
858 858
859 859 def _filter(self, filterpats, filename, data):
860 860 for mf, fn, cmd in filterpats:
861 861 if mf(filename):
862 862 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
863 863 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
864 864 break
865 865
866 866 return data
867 867
868 868 @propertycache
869 869 def _encodefilterpats(self):
870 870 return self._loadfilter('encode')
871 871
872 872 @propertycache
873 873 def _decodefilterpats(self):
874 874 return self._loadfilter('decode')
875 875
876 876 def adddatafilter(self, name, filter):
877 877 self._datafilters[name] = filter
878 878
879 879 def wread(self, filename):
880 880 if self._link(filename):
881 881 data = os.readlink(self.wjoin(filename))
882 882 else:
883 883 data = self.wopener.read(filename)
884 884 return self._filter(self._encodefilterpats, filename, data)
885 885
886 886 def wwrite(self, filename, data, flags):
887 887 data = self._filter(self._decodefilterpats, filename, data)
888 888 if 'l' in flags:
889 889 self.wopener.symlink(data, filename)
890 890 else:
891 891 self.wopener.write(filename, data)
892 892 if 'x' in flags:
893 893 util.setflags(self.wjoin(filename), False, True)
894 894
895 895 def wwritedata(self, filename, data):
896 896 return self._filter(self._decodefilterpats, filename, data)
897 897
898 898 def transaction(self, desc):
899 899 tr = self._transref and self._transref() or None
900 900 if tr and tr.running():
901 901 return tr.nest()
902 902
903 903 # abort here if the journal already exists
904 904 if os.path.exists(self.sjoin("journal")):
905 905 raise error.RepoError(
906 906 _("abandoned transaction found - run hg recover"))
907 907
908 908 self._writejournal(desc)
909 909 renames = [(x, undoname(x)) for x in self._journalfiles()]
910 910
911 911 tr = transaction.transaction(self.ui.warn, self.sopener,
912 912 self.sjoin("journal"),
913 913 aftertrans(renames),
914 914 self.store.createmode)
915 915 self._transref = weakref.ref(tr)
916 916 return tr
917 917
918 918 def _journalfiles(self):
919 919 return (self.sjoin('journal'), self.join('journal.dirstate'),
920 920 self.join('journal.branch'), self.join('journal.desc'),
921 921 self.join('journal.bookmarks'),
922 922 self.sjoin('journal.phaseroots'))
923 923
924 924 def undofiles(self):
925 925 return [undoname(x) for x in self._journalfiles()]
926 926
927 927 def _writejournal(self, desc):
928 928 self.opener.write("journal.dirstate",
929 929 self.opener.tryread("dirstate"))
930 930 self.opener.write("journal.branch",
931 931 encoding.fromlocal(self.dirstate.branch()))
932 932 self.opener.write("journal.desc",
933 933 "%d\n%s\n" % (len(self), desc))
934 934 self.opener.write("journal.bookmarks",
935 935 self.opener.tryread("bookmarks"))
936 936 self.sopener.write("journal.phaseroots",
937 937 self.sopener.tryread("phaseroots"))
938 938
939 939 def recover(self):
940 940 lock = self.lock()
941 941 try:
942 942 if os.path.exists(self.sjoin("journal")):
943 943 self.ui.status(_("rolling back interrupted transaction\n"))
944 944 transaction.rollback(self.sopener, self.sjoin("journal"),
945 945 self.ui.warn)
946 946 self.invalidate()
947 947 return True
948 948 else:
949 949 self.ui.warn(_("no interrupted transaction available\n"))
950 950 return False
951 951 finally:
952 952 lock.release()
953 953
954 954 def rollback(self, dryrun=False, force=False):
955 955 wlock = lock = None
956 956 try:
957 957 wlock = self.wlock()
958 958 lock = self.lock()
959 959 if os.path.exists(self.sjoin("undo")):
960 960 return self._rollback(dryrun, force)
961 961 else:
962 962 self.ui.warn(_("no rollback information available\n"))
963 963 return 1
964 964 finally:
965 965 release(lock, wlock)
966 966
967 967 def _rollback(self, dryrun, force):
968 968 ui = self.ui
969 969 try:
970 970 args = self.opener.read('undo.desc').splitlines()
971 971 (oldlen, desc, detail) = (int(args[0]), args[1], None)
972 972 if len(args) >= 3:
973 973 detail = args[2]
974 974 oldtip = oldlen - 1
975 975
976 976 if detail and ui.verbose:
977 977 msg = (_('repository tip rolled back to revision %s'
978 978 ' (undo %s: %s)\n')
979 979 % (oldtip, desc, detail))
980 980 else:
981 981 msg = (_('repository tip rolled back to revision %s'
982 982 ' (undo %s)\n')
983 983 % (oldtip, desc))
984 984 except IOError:
985 985 msg = _('rolling back unknown transaction\n')
986 986 desc = None
987 987
988 988 if not force and self['.'] != self['tip'] and desc == 'commit':
989 989 raise util.Abort(
990 990 _('rollback of last commit while not checked out '
991 991 'may lose data'), hint=_('use -f to force'))
992 992
993 993 ui.status(msg)
994 994 if dryrun:
995 995 return 0
996 996
997 997 parents = self.dirstate.parents()
998 998 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
999 999 if os.path.exists(self.join('undo.bookmarks')):
1000 1000 util.rename(self.join('undo.bookmarks'),
1001 1001 self.join('bookmarks'))
1002 1002 if os.path.exists(self.sjoin('undo.phaseroots')):
1003 1003 util.rename(self.sjoin('undo.phaseroots'),
1004 1004 self.sjoin('phaseroots'))
1005 1005 self.invalidate()
1006 1006
1007 1007 # Discard all cache entries to force reloading everything.
1008 1008 self._filecache.clear()
1009 1009
1010 1010 parentgone = (parents[0] not in self.changelog.nodemap or
1011 1011 parents[1] not in self.changelog.nodemap)
1012 1012 if parentgone:
1013 1013 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1014 1014 try:
1015 1015 branch = self.opener.read('undo.branch')
1016 1016 self.dirstate.setbranch(encoding.tolocal(branch))
1017 1017 except IOError:
1018 1018 ui.warn(_('named branch could not be reset: '
1019 1019 'current branch is still \'%s\'\n')
1020 1020 % self.dirstate.branch())
1021 1021
1022 1022 self.dirstate.invalidate()
1023 1023 parents = tuple([p.rev() for p in self.parents()])
1024 1024 if len(parents) > 1:
1025 1025 ui.status(_('working directory now based on '
1026 1026 'revisions %d and %d\n') % parents)
1027 1027 else:
1028 1028 ui.status(_('working directory now based on '
1029 1029 'revision %d\n') % parents)
1030 1030 # TODO: if we know which new heads may result from this rollback, pass
1031 1031 # them to destroy(), which will prevent the branchhead cache from being
1032 1032 # invalidated.
1033 1033 self.destroyed()
1034 1034 return 0
1035 1035
1036 1036 def invalidatecaches(self):
1037 1037 def delcache(name):
1038 1038 try:
1039 1039 delattr(self, name)
1040 1040 except AttributeError:
1041 1041 pass
1042 1042
1043 1043 delcache('_tagscache')
1044 1044
1045 1045 self._branchcache = None # in UTF-8
1046 1046 self._branchcachetip = None
1047 1047 obsolete.clearobscaches(self)
1048 1048
1049 1049 def invalidatedirstate(self):
1050 1050 '''Invalidates the dirstate, causing the next call to dirstate
1051 1051 to check if it was modified since the last time it was read,
1052 1052 rereading it if it has.
1053 1053
1054 1054 This is different to dirstate.invalidate() that it doesn't always
1055 1055 rereads the dirstate. Use dirstate.invalidate() if you want to
1056 1056 explicitly read the dirstate again (i.e. restoring it to a previous
1057 1057 known good state).'''
1058 1058 if 'dirstate' in self.__dict__:
1059 1059 for k in self.dirstate._filecache:
1060 1060 try:
1061 1061 delattr(self.dirstate, k)
1062 1062 except AttributeError:
1063 1063 pass
1064 1064 delattr(self, 'dirstate')
1065 1065
1066 1066 def invalidate(self):
1067 1067 for k in self._filecache:
1068 1068 # dirstate is invalidated separately in invalidatedirstate()
1069 1069 if k == 'dirstate':
1070 1070 continue
1071 1071
1072 1072 try:
1073 1073 delattr(self, k)
1074 1074 except AttributeError:
1075 1075 pass
1076 1076 self.invalidatecaches()
1077 1077
1078 1078 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1079 1079 try:
1080 1080 l = lock.lock(lockname, 0, releasefn, desc=desc)
1081 1081 except error.LockHeld, inst:
1082 1082 if not wait:
1083 1083 raise
1084 1084 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1085 1085 (desc, inst.locker))
1086 1086 # default to 600 seconds timeout
1087 1087 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1088 1088 releasefn, desc=desc)
1089 1089 if acquirefn:
1090 1090 acquirefn()
1091 1091 return l
1092 1092
1093 1093 def _afterlock(self, callback):
1094 1094 """add a callback to the current repository lock.
1095 1095
1096 1096 The callback will be executed on lock release."""
1097 1097 l = self._lockref and self._lockref()
1098 1098 if l:
1099 1099 l.postrelease.append(callback)
1100 1100 else:
1101 1101 callback()
1102 1102
1103 1103 def lock(self, wait=True):
1104 1104 '''Lock the repository store (.hg/store) and return a weak reference
1105 1105 to the lock. Use this before modifying the store (e.g. committing or
1106 1106 stripping). If you are opening a transaction, get a lock as well.)'''
1107 1107 l = self._lockref and self._lockref()
1108 1108 if l is not None and l.held:
1109 1109 l.lock()
1110 1110 return l
1111 1111
1112 1112 def unlock():
1113 1113 self.store.write()
1114 1114 if '_phasecache' in vars(self):
1115 1115 self._phasecache.write()
1116 1116 for k, ce in self._filecache.items():
1117 1117 if k == 'dirstate':
1118 1118 continue
1119 1119 ce.refresh()
1120 1120
1121 1121 l = self._lock(self.sjoin("lock"), wait, unlock,
1122 1122 self.invalidate, _('repository %s') % self.origroot)
1123 1123 self._lockref = weakref.ref(l)
1124 1124 return l
1125 1125
1126 1126 def wlock(self, wait=True):
1127 1127 '''Lock the non-store parts of the repository (everything under
1128 1128 .hg except .hg/store) and return a weak reference to the lock.
1129 1129 Use this before modifying files in .hg.'''
1130 1130 l = self._wlockref and self._wlockref()
1131 1131 if l is not None and l.held:
1132 1132 l.lock()
1133 1133 return l
1134 1134
1135 1135 def unlock():
1136 1136 self.dirstate.write()
1137 1137 ce = self._filecache.get('dirstate')
1138 1138 if ce:
1139 1139 ce.refresh()
1140 1140
1141 1141 l = self._lock(self.join("wlock"), wait, unlock,
1142 1142 self.invalidatedirstate, _('working directory of %s') %
1143 1143 self.origroot)
1144 1144 self._wlockref = weakref.ref(l)
1145 1145 return l
1146 1146
1147 1147 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1148 1148 """
1149 1149 commit an individual file as part of a larger transaction
1150 1150 """
1151 1151
1152 1152 fname = fctx.path()
1153 1153 text = fctx.data()
1154 1154 flog = self.file(fname)
1155 1155 fparent1 = manifest1.get(fname, nullid)
1156 1156 fparent2 = fparent2o = manifest2.get(fname, nullid)
1157 1157
1158 1158 meta = {}
1159 1159 copy = fctx.renamed()
1160 1160 if copy and copy[0] != fname:
1161 1161 # Mark the new revision of this file as a copy of another
1162 1162 # file. This copy data will effectively act as a parent
1163 1163 # of this new revision. If this is a merge, the first
1164 1164 # parent will be the nullid (meaning "look up the copy data")
1165 1165 # and the second one will be the other parent. For example:
1166 1166 #
1167 1167 # 0 --- 1 --- 3 rev1 changes file foo
1168 1168 # \ / rev2 renames foo to bar and changes it
1169 1169 # \- 2 -/ rev3 should have bar with all changes and
1170 1170 # should record that bar descends from
1171 1171 # bar in rev2 and foo in rev1
1172 1172 #
1173 1173 # this allows this merge to succeed:
1174 1174 #
1175 1175 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1176 1176 # \ / merging rev3 and rev4 should use bar@rev2
1177 1177 # \- 2 --- 4 as the merge base
1178 1178 #
1179 1179
1180 1180 cfname = copy[0]
1181 1181 crev = manifest1.get(cfname)
1182 1182 newfparent = fparent2
1183 1183
1184 1184 if manifest2: # branch merge
1185 1185 if fparent2 == nullid or crev is None: # copied on remote side
1186 1186 if cfname in manifest2:
1187 1187 crev = manifest2[cfname]
1188 1188 newfparent = fparent1
1189 1189
1190 1190 # find source in nearest ancestor if we've lost track
1191 1191 if not crev:
1192 1192 self.ui.debug(" %s: searching for copy revision for %s\n" %
1193 1193 (fname, cfname))
1194 1194 for ancestor in self[None].ancestors():
1195 1195 if cfname in ancestor:
1196 1196 crev = ancestor[cfname].filenode()
1197 1197 break
1198 1198
1199 1199 if crev:
1200 1200 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1201 1201 meta["copy"] = cfname
1202 1202 meta["copyrev"] = hex(crev)
1203 1203 fparent1, fparent2 = nullid, newfparent
1204 1204 else:
1205 1205 self.ui.warn(_("warning: can't find ancestor for '%s' "
1206 1206 "copied from '%s'!\n") % (fname, cfname))
1207 1207
1208 1208 elif fparent2 != nullid:
1209 1209 # is one parent an ancestor of the other?
1210 1210 fparentancestor = flog.ancestor(fparent1, fparent2)
1211 1211 if fparentancestor == fparent1:
1212 1212 fparent1, fparent2 = fparent2, nullid
1213 1213 elif fparentancestor == fparent2:
1214 1214 fparent2 = nullid
1215 1215
1216 1216 # is the file changed?
1217 1217 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1218 1218 changelist.append(fname)
1219 1219 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1220 1220
1221 1221 # are just the flags changed during merge?
1222 1222 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1223 1223 changelist.append(fname)
1224 1224
1225 1225 return fparent1
1226 1226
1227 1227 def commit(self, text="", user=None, date=None, match=None, force=False,
1228 1228 editor=False, extra={}):
1229 1229 """Add a new revision to current repository.
1230 1230
1231 1231 Revision information is gathered from the working directory,
1232 1232 match can be used to filter the committed files. If editor is
1233 1233 supplied, it is called to get a commit message.
1234 1234 """
1235 1235
1236 1236 def fail(f, msg):
1237 1237 raise util.Abort('%s: %s' % (f, msg))
1238 1238
1239 1239 if not match:
1240 1240 match = matchmod.always(self.root, '')
1241 1241
1242 1242 if not force:
1243 1243 vdirs = []
1244 1244 match.dir = vdirs.append
1245 1245 match.bad = fail
1246 1246
1247 1247 wlock = self.wlock()
1248 1248 try:
1249 1249 wctx = self[None]
1250 1250 merge = len(wctx.parents()) > 1
1251 1251
1252 1252 if (not force and merge and match and
1253 1253 (match.files() or match.anypats())):
1254 1254 raise util.Abort(_('cannot partially commit a merge '
1255 1255 '(do not specify files or patterns)'))
1256 1256
1257 1257 changes = self.status(match=match, clean=force)
1258 1258 if force:
1259 1259 changes[0].extend(changes[6]) # mq may commit unchanged files
1260 1260
1261 1261 # check subrepos
1262 1262 subs = []
1263 1263 commitsubs = set()
1264 1264 newstate = wctx.substate.copy()
1265 1265 # only manage subrepos and .hgsubstate if .hgsub is present
1266 1266 if '.hgsub' in wctx:
1267 1267 # we'll decide whether to track this ourselves, thanks
1268 1268 if '.hgsubstate' in changes[0]:
1269 1269 changes[0].remove('.hgsubstate')
1270 1270 if '.hgsubstate' in changes[2]:
1271 1271 changes[2].remove('.hgsubstate')
1272 1272
1273 1273 # compare current state to last committed state
1274 1274 # build new substate based on last committed state
1275 1275 oldstate = wctx.p1().substate
1276 1276 for s in sorted(newstate.keys()):
1277 1277 if not match(s):
1278 1278 # ignore working copy, use old state if present
1279 1279 if s in oldstate:
1280 1280 newstate[s] = oldstate[s]
1281 1281 continue
1282 1282 if not force:
1283 1283 raise util.Abort(
1284 1284 _("commit with new subrepo %s excluded") % s)
1285 1285 if wctx.sub(s).dirty(True):
1286 1286 if not self.ui.configbool('ui', 'commitsubrepos'):
1287 1287 raise util.Abort(
1288 1288 _("uncommitted changes in subrepo %s") % s,
1289 1289 hint=_("use --subrepos for recursive commit"))
1290 1290 subs.append(s)
1291 1291 commitsubs.add(s)
1292 1292 else:
1293 1293 bs = wctx.sub(s).basestate()
1294 1294 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1295 1295 if oldstate.get(s, (None, None, None))[1] != bs:
1296 1296 subs.append(s)
1297 1297
1298 1298 # check for removed subrepos
1299 1299 for p in wctx.parents():
1300 1300 r = [s for s in p.substate if s not in newstate]
1301 1301 subs += [s for s in r if match(s)]
1302 1302 if subs:
1303 1303 if (not match('.hgsub') and
1304 1304 '.hgsub' in (wctx.modified() + wctx.added())):
1305 1305 raise util.Abort(
1306 1306 _("can't commit subrepos without .hgsub"))
1307 1307 changes[0].insert(0, '.hgsubstate')
1308 1308
1309 1309 elif '.hgsub' in changes[2]:
1310 1310 # clean up .hgsubstate when .hgsub is removed
1311 1311 if ('.hgsubstate' in wctx and
1312 1312 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1313 1313 changes[2].insert(0, '.hgsubstate')
1314 1314
1315 1315 # make sure all explicit patterns are matched
1316 1316 if not force and match.files():
1317 1317 matched = set(changes[0] + changes[1] + changes[2])
1318 1318
1319 1319 for f in match.files():
1320 1320 f = self.dirstate.normalize(f)
1321 1321 if f == '.' or f in matched or f in wctx.substate:
1322 1322 continue
1323 1323 if f in changes[3]: # missing
1324 1324 fail(f, _('file not found!'))
1325 1325 if f in vdirs: # visited directory
1326 1326 d = f + '/'
1327 1327 for mf in matched:
1328 1328 if mf.startswith(d):
1329 1329 break
1330 1330 else:
1331 1331 fail(f, _("no match under directory!"))
1332 1332 elif f not in self.dirstate:
1333 1333 fail(f, _("file not tracked!"))
1334 1334
1335 1335 if (not force and not extra.get("close") and not merge
1336 1336 and not (changes[0] or changes[1] or changes[2])
1337 1337 and wctx.branch() == wctx.p1().branch()):
1338 1338 return None
1339 1339
1340 1340 if merge and changes[3]:
1341 1341 raise util.Abort(_("cannot commit merge with missing files"))
1342 1342
1343 1343 ms = mergemod.mergestate(self)
1344 1344 for f in changes[0]:
1345 1345 if f in ms and ms[f] == 'u':
1346 1346 raise util.Abort(_("unresolved merge conflicts "
1347 1347 "(see hg help resolve)"))
1348 1348
1349 1349 cctx = context.workingctx(self, text, user, date, extra, changes)
1350 1350 if editor:
1351 1351 cctx._text = editor(self, cctx, subs)
1352 1352 edited = (text != cctx._text)
1353 1353
1354 1354 # commit subs and write new state
1355 1355 if subs:
1356 1356 for s in sorted(commitsubs):
1357 1357 sub = wctx.sub(s)
1358 1358 self.ui.status(_('committing subrepository %s\n') %
1359 1359 subrepo.subrelpath(sub))
1360 1360 sr = sub.commit(cctx._text, user, date)
1361 1361 newstate[s] = (newstate[s][0], sr)
1362 1362 subrepo.writestate(self, newstate)
1363 1363
1364 1364 # Save commit message in case this transaction gets rolled back
1365 1365 # (e.g. by a pretxncommit hook). Leave the content alone on
1366 1366 # the assumption that the user will use the same editor again.
1367 1367 msgfn = self.savecommitmessage(cctx._text)
1368 1368
1369 1369 p1, p2 = self.dirstate.parents()
1370 1370 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1371 1371 try:
1372 1372 self.hook("precommit", throw=True, parent1=hookp1,
1373 1373 parent2=hookp2)
1374 1374 ret = self.commitctx(cctx, True)
1375 1375 except: # re-raises
1376 1376 if edited:
1377 1377 self.ui.write(
1378 1378 _('note: commit message saved in %s\n') % msgfn)
1379 1379 raise
1380 1380
1381 1381 # update bookmarks, dirstate and mergestate
1382 1382 bookmarks.update(self, [p1, p2], ret)
1383 1383 for f in changes[0] + changes[1]:
1384 1384 self.dirstate.normal(f)
1385 1385 for f in changes[2]:
1386 1386 self.dirstate.drop(f)
1387 1387 self.dirstate.setparents(ret)
1388 1388 ms.reset()
1389 1389 finally:
1390 1390 wlock.release()
1391 1391
1392 1392 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1393 1393 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1394 1394 self._afterlock(commithook)
1395 1395 return ret
1396 1396
1397 1397 def commitctx(self, ctx, error=False):
1398 1398 """Add a new revision to current repository.
1399 1399 Revision information is passed via the context argument.
1400 1400 """
1401 1401
1402 1402 tr = lock = None
1403 1403 removed = list(ctx.removed())
1404 1404 p1, p2 = ctx.p1(), ctx.p2()
1405 1405 user = ctx.user()
1406 1406
1407 1407 lock = self.lock()
1408 1408 try:
1409 1409 tr = self.transaction("commit")
1410 1410 trp = weakref.proxy(tr)
1411 1411
1412 1412 if ctx.files():
1413 1413 m1 = p1.manifest().copy()
1414 1414 m2 = p2.manifest()
1415 1415
1416 1416 # check in files
1417 1417 new = {}
1418 1418 changed = []
1419 1419 linkrev = len(self)
1420 1420 for f in sorted(ctx.modified() + ctx.added()):
1421 1421 self.ui.note(f + "\n")
1422 1422 try:
1423 1423 fctx = ctx[f]
1424 1424 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1425 1425 changed)
1426 1426 m1.set(f, fctx.flags())
1427 1427 except OSError, inst:
1428 1428 self.ui.warn(_("trouble committing %s!\n") % f)
1429 1429 raise
1430 1430 except IOError, inst:
1431 1431 errcode = getattr(inst, 'errno', errno.ENOENT)
1432 1432 if error or errcode and errcode != errno.ENOENT:
1433 1433 self.ui.warn(_("trouble committing %s!\n") % f)
1434 1434 raise
1435 1435 else:
1436 1436 removed.append(f)
1437 1437
1438 1438 # update manifest
1439 1439 m1.update(new)
1440 1440 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1441 1441 drop = [f for f in removed if f in m1]
1442 1442 for f in drop:
1443 1443 del m1[f]
1444 1444 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1445 1445 p2.manifestnode(), (new, drop))
1446 1446 files = changed + removed
1447 1447 else:
1448 1448 mn = p1.manifestnode()
1449 1449 files = []
1450 1450
1451 1451 # update changelog
1452 1452 self.changelog.delayupdate()
1453 1453 n = self.changelog.add(mn, files, ctx.description(),
1454 1454 trp, p1.node(), p2.node(),
1455 1455 user, ctx.date(), ctx.extra().copy())
1456 1456 p = lambda: self.changelog.writepending() and self.root or ""
1457 1457 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1458 1458 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1459 1459 parent2=xp2, pending=p)
1460 1460 self.changelog.finalize(trp)
1461 1461 # set the new commit is proper phase
1462 1462 targetphase = phases.newcommitphase(self.ui)
1463 1463 if targetphase:
1464 1464 # retract boundary do not alter parent changeset.
1465 1465 # if a parent have higher the resulting phase will
1466 1466 # be compliant anyway
1467 1467 #
1468 1468 # if minimal phase was 0 we don't need to retract anything
1469 1469 phases.retractboundary(self, targetphase, [n])
1470 1470 tr.close()
1471 1471 self.updatebranchcache()
1472 1472 return n
1473 1473 finally:
1474 1474 if tr:
1475 1475 tr.release()
1476 1476 lock.release()
1477 1477
1478 1478 def destroyed(self, newheadnodes=None):
1479 1479 '''Inform the repository that nodes have been destroyed.
1480 1480 Intended for use by strip and rollback, so there's a common
1481 1481 place for anything that has to be done after destroying history.
1482 1482
1483 1483 If you know the branchheadcache was uptodate before nodes were removed
1484 1484 and you also know the set of candidate new heads that may have resulted
1485 1485 from the destruction, you can set newheadnodes. This will enable the
1486 1486 code to update the branchheads cache, rather than having future code
1487 1487 decide it's invalid and regenerating it from scratch.
1488 1488 '''
1489 1489 # If we have info, newheadnodes, on how to update the branch cache, do
1490 1490 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1491 1491 # will be caught the next time it is read.
1492 1492 if newheadnodes:
1493 1493 tiprev = len(self) - 1
1494 1494 ctxgen = (self[node] for node in newheadnodes
1495 1495 if self.changelog.hasnode(node))
1496 1496 self._updatebranchcache(self._branchcache, ctxgen)
1497 1497 self._writebranchcache(self._branchcache, self.changelog.tip(),
1498 1498 tiprev)
1499 1499
1500 1500 # Ensure the persistent tag cache is updated. Doing it now
1501 1501 # means that the tag cache only has to worry about destroyed
1502 1502 # heads immediately after a strip/rollback. That in turn
1503 1503 # guarantees that "cachetip == currenttip" (comparing both rev
1504 1504 # and node) always means no nodes have been added or destroyed.
1505 1505
1506 1506 # XXX this is suboptimal when qrefresh'ing: we strip the current
1507 1507 # head, refresh the tag cache, then immediately add a new head.
1508 1508 # But I think doing it this way is necessary for the "instant
1509 1509 # tag cache retrieval" case to work.
1510 1510 self.invalidatecaches()
1511 1511
1512 1512 # Discard all cache entries to force reloading everything.
1513 1513 self._filecache.clear()
1514 1514
1515 1515 def walk(self, match, node=None):
1516 1516 '''
1517 1517 walk recursively through the directory tree or a given
1518 1518 changeset, finding all files matched by the match
1519 1519 function
1520 1520 '''
1521 1521 return self[node].walk(match)
1522 1522
1523 1523 def status(self, node1='.', node2=None, match=None,
1524 1524 ignored=False, clean=False, unknown=False,
1525 1525 listsubrepos=False):
1526 1526 """return status of files between two nodes or node and working
1527 1527 directory.
1528 1528
1529 1529 If node1 is None, use the first dirstate parent instead.
1530 1530 If node2 is None, compare node1 with working directory.
1531 1531 """
1532 1532
1533 1533 def mfmatches(ctx):
1534 1534 mf = ctx.manifest().copy()
1535 1535 if match.always():
1536 1536 return mf
1537 1537 for fn in mf.keys():
1538 1538 if not match(fn):
1539 1539 del mf[fn]
1540 1540 return mf
1541 1541
1542 1542 if isinstance(node1, context.changectx):
1543 1543 ctx1 = node1
1544 1544 else:
1545 1545 ctx1 = self[node1]
1546 1546 if isinstance(node2, context.changectx):
1547 1547 ctx2 = node2
1548 1548 else:
1549 1549 ctx2 = self[node2]
1550 1550
1551 1551 working = ctx2.rev() is None
1552 1552 parentworking = working and ctx1 == self['.']
1553 1553 match = match or matchmod.always(self.root, self.getcwd())
1554 1554 listignored, listclean, listunknown = ignored, clean, unknown
1555 1555
1556 1556 # load earliest manifest first for caching reasons
1557 1557 if not working and ctx2.rev() < ctx1.rev():
1558 1558 ctx2.manifest()
1559 1559
1560 1560 if not parentworking:
1561 1561 def bad(f, msg):
1562 1562 # 'f' may be a directory pattern from 'match.files()',
1563 1563 # so 'f not in ctx1' is not enough
1564 1564 if f not in ctx1 and f not in ctx1.dirs():
1565 1565 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1566 1566 match.bad = bad
1567 1567
1568 1568 if working: # we need to scan the working dir
1569 1569 subrepos = []
1570 1570 if '.hgsub' in self.dirstate:
1571 1571 subrepos = ctx2.substate.keys()
1572 1572 s = self.dirstate.status(match, subrepos, listignored,
1573 1573 listclean, listunknown)
1574 1574 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1575 1575
1576 1576 # check for any possibly clean files
1577 1577 if parentworking and cmp:
1578 1578 fixup = []
1579 1579 # do a full compare of any files that might have changed
1580 1580 for f in sorted(cmp):
1581 1581 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1582 1582 or ctx1[f].cmp(ctx2[f])):
1583 1583 modified.append(f)
1584 1584 else:
1585 1585 fixup.append(f)
1586 1586
1587 1587 # update dirstate for files that are actually clean
1588 1588 if fixup:
1589 1589 if listclean:
1590 1590 clean += fixup
1591 1591
1592 1592 try:
1593 1593 # updating the dirstate is optional
1594 1594 # so we don't wait on the lock
1595 1595 wlock = self.wlock(False)
1596 1596 try:
1597 1597 for f in fixup:
1598 1598 self.dirstate.normal(f)
1599 1599 finally:
1600 1600 wlock.release()
1601 1601 except error.LockError:
1602 1602 pass
1603 1603
1604 1604 if not parentworking:
1605 1605 mf1 = mfmatches(ctx1)
1606 1606 if working:
1607 1607 # we are comparing working dir against non-parent
1608 1608 # generate a pseudo-manifest for the working dir
1609 1609 mf2 = mfmatches(self['.'])
1610 1610 for f in cmp + modified + added:
1611 1611 mf2[f] = None
1612 1612 mf2.set(f, ctx2.flags(f))
1613 1613 for f in removed:
1614 1614 if f in mf2:
1615 1615 del mf2[f]
1616 1616 else:
1617 1617 # we are comparing two revisions
1618 1618 deleted, unknown, ignored = [], [], []
1619 1619 mf2 = mfmatches(ctx2)
1620 1620
1621 1621 modified, added, clean = [], [], []
1622 1622 withflags = mf1.withflags() | mf2.withflags()
1623 1623 for fn in mf2:
1624 1624 if fn in mf1:
1625 1625 if (fn not in deleted and
1626 1626 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1627 1627 (mf1[fn] != mf2[fn] and
1628 1628 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1629 1629 modified.append(fn)
1630 1630 elif listclean:
1631 1631 clean.append(fn)
1632 1632 del mf1[fn]
1633 1633 elif fn not in deleted:
1634 1634 added.append(fn)
1635 1635 removed = mf1.keys()
1636 1636
1637 1637 if working and modified and not self.dirstate._checklink:
1638 1638 # Symlink placeholders may get non-symlink-like contents
1639 1639 # via user error or dereferencing by NFS or Samba servers,
1640 1640 # so we filter out any placeholders that don't look like a
1641 1641 # symlink
1642 1642 sane = []
1643 1643 for f in modified:
1644 1644 if ctx2.flags(f) == 'l':
1645 1645 d = ctx2[f].data()
1646 1646 if len(d) >= 1024 or '\n' in d or util.binary(d):
1647 1647 self.ui.debug('ignoring suspect symlink placeholder'
1648 1648 ' "%s"\n' % f)
1649 1649 continue
1650 1650 sane.append(f)
1651 1651 modified = sane
1652 1652
1653 1653 r = modified, added, removed, deleted, unknown, ignored, clean
1654 1654
1655 1655 if listsubrepos:
1656 1656 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1657 1657 if working:
1658 1658 rev2 = None
1659 1659 else:
1660 1660 rev2 = ctx2.substate[subpath][1]
1661 1661 try:
1662 1662 submatch = matchmod.narrowmatcher(subpath, match)
1663 1663 s = sub.status(rev2, match=submatch, ignored=listignored,
1664 1664 clean=listclean, unknown=listunknown,
1665 1665 listsubrepos=True)
1666 1666 for rfiles, sfiles in zip(r, s):
1667 1667 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1668 1668 except error.LookupError:
1669 1669 self.ui.status(_("skipping missing subrepository: %s\n")
1670 1670 % subpath)
1671 1671
1672 1672 for l in r:
1673 1673 l.sort()
1674 1674 return r
1675 1675
1676 1676 def heads(self, start=None):
1677 1677 heads = self.changelog.heads(start)
1678 1678 # sort the output in rev descending order
1679 1679 return sorted(heads, key=self.changelog.rev, reverse=True)
1680 1680
1681 1681 def branchheads(self, branch=None, start=None, closed=False):
1682 1682 '''return a (possibly filtered) list of heads for the given branch
1683 1683
1684 1684 Heads are returned in topological order, from newest to oldest.
1685 1685 If branch is None, use the dirstate branch.
1686 1686 If start is not None, return only heads reachable from start.
1687 1687 If closed is True, return heads that are marked as closed as well.
1688 1688 '''
1689 1689 if branch is None:
1690 1690 branch = self[None].branch()
1691 1691 branches = self.branchmap()
1692 1692 if branch not in branches:
1693 1693 return []
1694 1694 # the cache returns heads ordered lowest to highest
1695 1695 bheads = list(reversed(branches[branch]))
1696 1696 if start is not None:
1697 1697 # filter out the heads that cannot be reached from startrev
1698 1698 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1699 1699 bheads = [h for h in bheads if h in fbheads]
1700 1700 if not closed:
1701 1701 bheads = [h for h in bheads if not self[h].closesbranch()]
1702 1702 return bheads
1703 1703
1704 1704 def branches(self, nodes):
1705 1705 if not nodes:
1706 1706 nodes = [self.changelog.tip()]
1707 1707 b = []
1708 1708 for n in nodes:
1709 1709 t = n
1710 1710 while True:
1711 1711 p = self.changelog.parents(n)
1712 1712 if p[1] != nullid or p[0] == nullid:
1713 1713 b.append((t, n, p[0], p[1]))
1714 1714 break
1715 1715 n = p[0]
1716 1716 return b
1717 1717
1718 1718 def between(self, pairs):
1719 1719 r = []
1720 1720
1721 1721 for top, bottom in pairs:
1722 1722 n, l, i = top, [], 0
1723 1723 f = 1
1724 1724
1725 1725 while n != bottom and n != nullid:
1726 1726 p = self.changelog.parents(n)[0]
1727 1727 if i == f:
1728 1728 l.append(n)
1729 1729 f = f * 2
1730 1730 n = p
1731 1731 i += 1
1732 1732
1733 1733 r.append(l)
1734 1734
1735 1735 return r
1736 1736
1737 1737 def pull(self, remote, heads=None, force=False):
1738 1738 # don't open transaction for nothing or you break future useful
1739 1739 # rollback call
1740 1740 tr = None
1741 1741 trname = 'pull\n' + util.hidepassword(remote.url())
1742 1742 lock = self.lock()
1743 1743 try:
1744 1744 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1745 1745 force=force)
1746 1746 common, fetch, rheads = tmp
1747 1747 if not fetch:
1748 1748 self.ui.status(_("no changes found\n"))
1749 1749 added = []
1750 1750 result = 0
1751 1751 else:
1752 1752 tr = self.transaction(trname)
1753 1753 if heads is None and list(common) == [nullid]:
1754 1754 self.ui.status(_("requesting all changes\n"))
1755 1755 elif heads is None and remote.capable('changegroupsubset'):
1756 1756 # issue1320, avoid a race if remote changed after discovery
1757 1757 heads = rheads
1758 1758
1759 1759 if remote.capable('getbundle'):
1760 1760 cg = remote.getbundle('pull', common=common,
1761 1761 heads=heads or rheads)
1762 1762 elif heads is None:
1763 1763 cg = remote.changegroup(fetch, 'pull')
1764 1764 elif not remote.capable('changegroupsubset'):
1765 1765 raise util.Abort(_("partial pull cannot be done because "
1766 1766 "other repository doesn't support "
1767 1767 "changegroupsubset."))
1768 1768 else:
1769 1769 cg = remote.changegroupsubset(fetch, heads, 'pull')
1770 1770 clstart = len(self.changelog)
1771 1771 result = self.addchangegroup(cg, 'pull', remote.url())
1772 1772 clend = len(self.changelog)
1773 1773 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1774 1774
1775 1775 # compute target subset
1776 1776 if heads is None:
1777 1777 # We pulled every thing possible
1778 1778 # sync on everything common
1779 1779 subset = common + added
1780 1780 else:
1781 1781 # We pulled a specific subset
1782 1782 # sync on this subset
1783 1783 subset = heads
1784 1784
1785 1785 # Get remote phases data from remote
1786 1786 remotephases = remote.listkeys('phases')
1787 1787 publishing = bool(remotephases.get('publishing', False))
1788 1788 if remotephases and not publishing:
1789 1789 # remote is new and unpublishing
1790 1790 pheads, _dr = phases.analyzeremotephases(self, subset,
1791 1791 remotephases)
1792 1792 phases.advanceboundary(self, phases.public, pheads)
1793 1793 phases.advanceboundary(self, phases.draft, subset)
1794 1794 else:
1795 1795 # Remote is old or publishing all common changesets
1796 1796 # should be seen as public
1797 1797 phases.advanceboundary(self, phases.public, subset)
1798 1798
1799 1799 if obsolete._enabled:
1800 1800 self.ui.debug('fetching remote obsolete markers')
1801 1801 remoteobs = remote.listkeys('obsolete')
1802 1802 if 'dump0' in remoteobs:
1803 1803 if tr is None:
1804 1804 tr = self.transaction(trname)
1805 1805 for key in sorted(remoteobs, reverse=True):
1806 1806 if key.startswith('dump'):
1807 1807 data = base85.b85decode(remoteobs[key])
1808 1808 self.obsstore.mergemarkers(tr, data)
1809 1809 if tr is not None:
1810 1810 tr.close()
1811 1811 finally:
1812 1812 if tr is not None:
1813 1813 tr.release()
1814 1814 lock.release()
1815 1815
1816 1816 return result
1817 1817
1818 1818 def checkpush(self, force, revs):
1819 1819 """Extensions can override this function if additional checks have
1820 1820 to be performed before pushing, or call it if they override push
1821 1821 command.
1822 1822 """
1823 1823 pass
1824 1824
1825 1825 def push(self, remote, force=False, revs=None, newbranch=False):
1826 1826 '''Push outgoing changesets (limited by revs) from the current
1827 1827 repository to remote. Return an integer:
1828 1828 - None means nothing to push
1829 1829 - 0 means HTTP error
1830 1830 - 1 means we pushed and remote head count is unchanged *or*
1831 1831 we have outgoing changesets but refused to push
1832 1832 - other values as described by addchangegroup()
1833 1833 '''
1834 1834 # there are two ways to push to remote repo:
1835 1835 #
1836 1836 # addchangegroup assumes local user can lock remote
1837 1837 # repo (local filesystem, old ssh servers).
1838 1838 #
1839 1839 # unbundle assumes local user cannot lock remote repo (new ssh
1840 1840 # servers, http servers).
1841 1841
1842 1842 if not remote.canpush():
1843 1843 raise util.Abort(_("destination does not support push"))
1844 1844 # get local lock as we might write phase data
1845 1845 locallock = self.lock()
1846 1846 try:
1847 1847 self.checkpush(force, revs)
1848 1848 lock = None
1849 1849 unbundle = remote.capable('unbundle')
1850 1850 if not unbundle:
1851 1851 lock = remote.lock()
1852 1852 try:
1853 1853 # discovery
1854 1854 fci = discovery.findcommonincoming
1855 1855 commoninc = fci(self, remote, force=force)
1856 1856 common, inc, remoteheads = commoninc
1857 1857 fco = discovery.findcommonoutgoing
1858 1858 outgoing = fco(self, remote, onlyheads=revs,
1859 1859 commoninc=commoninc, force=force)
1860 1860
1861 1861
1862 1862 if not outgoing.missing:
1863 1863 # nothing to push
1864 1864 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1865 1865 ret = None
1866 1866 else:
1867 1867 # something to push
1868 1868 if not force:
1869 1869 # if self.obsstore == False --> no obsolete
1870 1870 # then, save the iteration
1871 1871 if self.obsstore:
1872 1872 # this message are here for 80 char limit reason
1873 1873 mso = _("push includes obsolete changeset: %s!")
1874 1874 msu = _("push includes unstable changeset: %s!")
1875 msb = _("push includes bumped changeset: %s!")
1875 1876 # If we are to push if there is at least one
1876 1877 # obsolete or unstable changeset in missing, at
1877 1878 # least one of the missinghead will be obsolete or
1878 1879 # unstable. So checking heads only is ok
1879 1880 for node in outgoing.missingheads:
1880 1881 ctx = self[node]
1881 1882 if ctx.obsolete():
1882 1883 raise util.Abort(_(mso) % ctx)
1883 1884 elif ctx.unstable():
1884 1885 raise util.Abort(_(msu) % ctx)
1886 elif ctx.bumped():
1887 raise util.Abort(_(msb) % ctx)
1885 1888 discovery.checkheads(self, remote, outgoing,
1886 1889 remoteheads, newbranch,
1887 1890 bool(inc))
1888 1891
1889 1892 # create a changegroup from local
1890 1893 if revs is None and not outgoing.excluded:
1891 1894 # push everything,
1892 1895 # use the fast path, no race possible on push
1893 1896 cg = self._changegroup(outgoing.missing, 'push')
1894 1897 else:
1895 1898 cg = self.getlocalbundle('push', outgoing)
1896 1899
1897 1900 # apply changegroup to remote
1898 1901 if unbundle:
1899 1902 # local repo finds heads on server, finds out what
1900 1903 # revs it must push. once revs transferred, if server
1901 1904 # finds it has different heads (someone else won
1902 1905 # commit/push race), server aborts.
1903 1906 if force:
1904 1907 remoteheads = ['force']
1905 1908 # ssh: return remote's addchangegroup()
1906 1909 # http: return remote's addchangegroup() or 0 for error
1907 1910 ret = remote.unbundle(cg, remoteheads, 'push')
1908 1911 else:
1909 1912 # we return an integer indicating remote head count
1910 1913 # change
1911 1914 ret = remote.addchangegroup(cg, 'push', self.url())
1912 1915
1913 1916 if ret:
1914 1917 # push succeed, synchronize target of the push
1915 1918 cheads = outgoing.missingheads
1916 1919 elif revs is None:
1917 1920 # All out push fails. synchronize all common
1918 1921 cheads = outgoing.commonheads
1919 1922 else:
1920 1923 # I want cheads = heads(::missingheads and ::commonheads)
1921 1924 # (missingheads is revs with secret changeset filtered out)
1922 1925 #
1923 1926 # This can be expressed as:
1924 1927 # cheads = ( (missingheads and ::commonheads)
1925 1928 # + (commonheads and ::missingheads))"
1926 1929 # )
1927 1930 #
1928 1931 # while trying to push we already computed the following:
1929 1932 # common = (::commonheads)
1930 1933 # missing = ((commonheads::missingheads) - commonheads)
1931 1934 #
1932 1935 # We can pick:
1933 1936 # * missingheads part of common (::commonheads)
1934 1937 common = set(outgoing.common)
1935 1938 cheads = [node for node in revs if node in common]
1936 1939 # and
1937 1940 # * commonheads parents on missing
1938 1941 revset = self.set('%ln and parents(roots(%ln))',
1939 1942 outgoing.commonheads,
1940 1943 outgoing.missing)
1941 1944 cheads.extend(c.node() for c in revset)
1942 1945 # even when we don't push, exchanging phase data is useful
1943 1946 remotephases = remote.listkeys('phases')
1944 1947 if not remotephases: # old server or public only repo
1945 1948 phases.advanceboundary(self, phases.public, cheads)
1946 1949 # don't push any phase data as there is nothing to push
1947 1950 else:
1948 1951 ana = phases.analyzeremotephases(self, cheads, remotephases)
1949 1952 pheads, droots = ana
1950 1953 ### Apply remote phase on local
1951 1954 if remotephases.get('publishing', False):
1952 1955 phases.advanceboundary(self, phases.public, cheads)
1953 1956 else: # publish = False
1954 1957 phases.advanceboundary(self, phases.public, pheads)
1955 1958 phases.advanceboundary(self, phases.draft, cheads)
1956 1959 ### Apply local phase on remote
1957 1960
1958 1961 # Get the list of all revs draft on remote by public here.
1959 1962 # XXX Beware that revset break if droots is not strictly
1960 1963 # XXX root we may want to ensure it is but it is costly
1961 1964 outdated = self.set('heads((%ln::%ln) and public())',
1962 1965 droots, cheads)
1963 1966 for newremotehead in outdated:
1964 1967 r = remote.pushkey('phases',
1965 1968 newremotehead.hex(),
1966 1969 str(phases.draft),
1967 1970 str(phases.public))
1968 1971 if not r:
1969 1972 self.ui.warn(_('updating %s to public failed!\n')
1970 1973 % newremotehead)
1971 1974 self.ui.debug('try to push obsolete markers to remote\n')
1972 1975 if (obsolete._enabled and self.obsstore and
1973 1976 'obsolete' in remote.listkeys('namespaces')):
1974 1977 rslts = []
1975 1978 remotedata = self.listkeys('obsolete')
1976 1979 for key in sorted(remotedata, reverse=True):
1977 1980 # reverse sort to ensure we end with dump0
1978 1981 data = remotedata[key]
1979 1982 rslts.append(remote.pushkey('obsolete', key, '', data))
1980 1983 if [r for r in rslts if not r]:
1981 1984 msg = _('failed to push some obsolete markers!\n')
1982 1985 self.ui.warn(msg)
1983 1986 finally:
1984 1987 if lock is not None:
1985 1988 lock.release()
1986 1989 finally:
1987 1990 locallock.release()
1988 1991
1989 1992 self.ui.debug("checking for updated bookmarks\n")
1990 1993 rb = remote.listkeys('bookmarks')
1991 1994 for k in rb.keys():
1992 1995 if k in self._bookmarks:
1993 1996 nr, nl = rb[k], hex(self._bookmarks[k])
1994 1997 if nr in self:
1995 1998 cr = self[nr]
1996 1999 cl = self[nl]
1997 2000 if bookmarks.validdest(self, cr, cl):
1998 2001 r = remote.pushkey('bookmarks', k, nr, nl)
1999 2002 if r:
2000 2003 self.ui.status(_("updating bookmark %s\n") % k)
2001 2004 else:
2002 2005 self.ui.warn(_('updating bookmark %s'
2003 2006 ' failed!\n') % k)
2004 2007
2005 2008 return ret
2006 2009
2007 2010 def changegroupinfo(self, nodes, source):
2008 2011 if self.ui.verbose or source == 'bundle':
2009 2012 self.ui.status(_("%d changesets found\n") % len(nodes))
2010 2013 if self.ui.debugflag:
2011 2014 self.ui.debug("list of changesets:\n")
2012 2015 for node in nodes:
2013 2016 self.ui.debug("%s\n" % hex(node))
2014 2017
2015 2018 def changegroupsubset(self, bases, heads, source):
2016 2019 """Compute a changegroup consisting of all the nodes that are
2017 2020 descendants of any of the bases and ancestors of any of the heads.
2018 2021 Return a chunkbuffer object whose read() method will return
2019 2022 successive changegroup chunks.
2020 2023
2021 2024 It is fairly complex as determining which filenodes and which
2022 2025 manifest nodes need to be included for the changeset to be complete
2023 2026 is non-trivial.
2024 2027
2025 2028 Another wrinkle is doing the reverse, figuring out which changeset in
2026 2029 the changegroup a particular filenode or manifestnode belongs to.
2027 2030 """
2028 2031 cl = self.changelog
2029 2032 if not bases:
2030 2033 bases = [nullid]
2031 2034 csets, bases, heads = cl.nodesbetween(bases, heads)
2032 2035 # We assume that all ancestors of bases are known
2033 2036 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2034 2037 return self._changegroupsubset(common, csets, heads, source)
2035 2038
2036 2039 def getlocalbundle(self, source, outgoing):
2037 2040 """Like getbundle, but taking a discovery.outgoing as an argument.
2038 2041
2039 2042 This is only implemented for local repos and reuses potentially
2040 2043 precomputed sets in outgoing."""
2041 2044 if not outgoing.missing:
2042 2045 return None
2043 2046 return self._changegroupsubset(outgoing.common,
2044 2047 outgoing.missing,
2045 2048 outgoing.missingheads,
2046 2049 source)
2047 2050
2048 2051 def getbundle(self, source, heads=None, common=None):
2049 2052 """Like changegroupsubset, but returns the set difference between the
2050 2053 ancestors of heads and the ancestors common.
2051 2054
2052 2055 If heads is None, use the local heads. If common is None, use [nullid].
2053 2056
2054 2057 The nodes in common might not all be known locally due to the way the
2055 2058 current discovery protocol works.
2056 2059 """
2057 2060 cl = self.changelog
2058 2061 if common:
2059 2062 nm = cl.nodemap
2060 2063 common = [n for n in common if n in nm]
2061 2064 else:
2062 2065 common = [nullid]
2063 2066 if not heads:
2064 2067 heads = cl.heads()
2065 2068 return self.getlocalbundle(source,
2066 2069 discovery.outgoing(cl, common, heads))
2067 2070
2068 2071 def _changegroupsubset(self, commonrevs, csets, heads, source):
2069 2072
2070 2073 cl = self.changelog
2071 2074 mf = self.manifest
2072 2075 mfs = {} # needed manifests
2073 2076 fnodes = {} # needed file nodes
2074 2077 changedfiles = set()
2075 2078 fstate = ['', {}]
2076 2079 count = [0, 0]
2077 2080
2078 2081 # can we go through the fast path ?
2079 2082 heads.sort()
2080 2083 if heads == sorted(self.heads()):
2081 2084 return self._changegroup(csets, source)
2082 2085
2083 2086 # slow path
2084 2087 self.hook('preoutgoing', throw=True, source=source)
2085 2088 self.changegroupinfo(csets, source)
2086 2089
2087 2090 # filter any nodes that claim to be part of the known set
2088 2091 def prune(revlog, missing):
2089 2092 rr, rl = revlog.rev, revlog.linkrev
2090 2093 return [n for n in missing
2091 2094 if rl(rr(n)) not in commonrevs]
2092 2095
2093 2096 progress = self.ui.progress
2094 2097 _bundling = _('bundling')
2095 2098 _changesets = _('changesets')
2096 2099 _manifests = _('manifests')
2097 2100 _files = _('files')
2098 2101
2099 2102 def lookup(revlog, x):
2100 2103 if revlog == cl:
2101 2104 c = cl.read(x)
2102 2105 changedfiles.update(c[3])
2103 2106 mfs.setdefault(c[0], x)
2104 2107 count[0] += 1
2105 2108 progress(_bundling, count[0],
2106 2109 unit=_changesets, total=count[1])
2107 2110 return x
2108 2111 elif revlog == mf:
2109 2112 clnode = mfs[x]
2110 2113 mdata = mf.readfast(x)
2111 2114 for f, n in mdata.iteritems():
2112 2115 if f in changedfiles:
2113 2116 fnodes[f].setdefault(n, clnode)
2114 2117 count[0] += 1
2115 2118 progress(_bundling, count[0],
2116 2119 unit=_manifests, total=count[1])
2117 2120 return clnode
2118 2121 else:
2119 2122 progress(_bundling, count[0], item=fstate[0],
2120 2123 unit=_files, total=count[1])
2121 2124 return fstate[1][x]
2122 2125
2123 2126 bundler = changegroup.bundle10(lookup)
2124 2127 reorder = self.ui.config('bundle', 'reorder', 'auto')
2125 2128 if reorder == 'auto':
2126 2129 reorder = None
2127 2130 else:
2128 2131 reorder = util.parsebool(reorder)
2129 2132
2130 2133 def gengroup():
2131 2134 # Create a changenode group generator that will call our functions
2132 2135 # back to lookup the owning changenode and collect information.
2133 2136 count[:] = [0, len(csets)]
2134 2137 for chunk in cl.group(csets, bundler, reorder=reorder):
2135 2138 yield chunk
2136 2139 progress(_bundling, None)
2137 2140
2138 2141 # Create a generator for the manifestnodes that calls our lookup
2139 2142 # and data collection functions back.
2140 2143 for f in changedfiles:
2141 2144 fnodes[f] = {}
2142 2145 count[:] = [0, len(mfs)]
2143 2146 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2144 2147 yield chunk
2145 2148 progress(_bundling, None)
2146 2149
2147 2150 mfs.clear()
2148 2151
2149 2152 # Go through all our files in order sorted by name.
2150 2153 count[:] = [0, len(changedfiles)]
2151 2154 for fname in sorted(changedfiles):
2152 2155 filerevlog = self.file(fname)
2153 2156 if not len(filerevlog):
2154 2157 raise util.Abort(_("empty or missing revlog for %s")
2155 2158 % fname)
2156 2159 fstate[0] = fname
2157 2160 fstate[1] = fnodes.pop(fname, {})
2158 2161
2159 2162 nodelist = prune(filerevlog, fstate[1])
2160 2163 if nodelist:
2161 2164 count[0] += 1
2162 2165 yield bundler.fileheader(fname)
2163 2166 for chunk in filerevlog.group(nodelist, bundler, reorder):
2164 2167 yield chunk
2165 2168
2166 2169 # Signal that no more groups are left.
2167 2170 yield bundler.close()
2168 2171 progress(_bundling, None)
2169 2172
2170 2173 if csets:
2171 2174 self.hook('outgoing', node=hex(csets[0]), source=source)
2172 2175
2173 2176 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2174 2177
2175 2178 def changegroup(self, basenodes, source):
2176 2179 # to avoid a race we use changegroupsubset() (issue1320)
2177 2180 return self.changegroupsubset(basenodes, self.heads(), source)
2178 2181
2179 2182 def _changegroup(self, nodes, source):
2180 2183 """Compute the changegroup of all nodes that we have that a recipient
2181 2184 doesn't. Return a chunkbuffer object whose read() method will return
2182 2185 successive changegroup chunks.
2183 2186
2184 2187 This is much easier than the previous function as we can assume that
2185 2188 the recipient has any changenode we aren't sending them.
2186 2189
2187 2190 nodes is the set of nodes to send"""
2188 2191
2189 2192 cl = self.changelog
2190 2193 mf = self.manifest
2191 2194 mfs = {}
2192 2195 changedfiles = set()
2193 2196 fstate = ['']
2194 2197 count = [0, 0]
2195 2198
2196 2199 self.hook('preoutgoing', throw=True, source=source)
2197 2200 self.changegroupinfo(nodes, source)
2198 2201
2199 2202 revset = set([cl.rev(n) for n in nodes])
2200 2203
2201 2204 def gennodelst(log):
2202 2205 ln, llr = log.node, log.linkrev
2203 2206 return [ln(r) for r in log if llr(r) in revset]
2204 2207
2205 2208 progress = self.ui.progress
2206 2209 _bundling = _('bundling')
2207 2210 _changesets = _('changesets')
2208 2211 _manifests = _('manifests')
2209 2212 _files = _('files')
2210 2213
2211 2214 def lookup(revlog, x):
2212 2215 if revlog == cl:
2213 2216 c = cl.read(x)
2214 2217 changedfiles.update(c[3])
2215 2218 mfs.setdefault(c[0], x)
2216 2219 count[0] += 1
2217 2220 progress(_bundling, count[0],
2218 2221 unit=_changesets, total=count[1])
2219 2222 return x
2220 2223 elif revlog == mf:
2221 2224 count[0] += 1
2222 2225 progress(_bundling, count[0],
2223 2226 unit=_manifests, total=count[1])
2224 2227 return cl.node(revlog.linkrev(revlog.rev(x)))
2225 2228 else:
2226 2229 progress(_bundling, count[0], item=fstate[0],
2227 2230 total=count[1], unit=_files)
2228 2231 return cl.node(revlog.linkrev(revlog.rev(x)))
2229 2232
2230 2233 bundler = changegroup.bundle10(lookup)
2231 2234 reorder = self.ui.config('bundle', 'reorder', 'auto')
2232 2235 if reorder == 'auto':
2233 2236 reorder = None
2234 2237 else:
2235 2238 reorder = util.parsebool(reorder)
2236 2239
2237 2240 def gengroup():
2238 2241 '''yield a sequence of changegroup chunks (strings)'''
2239 2242 # construct a list of all changed files
2240 2243
2241 2244 count[:] = [0, len(nodes)]
2242 2245 for chunk in cl.group(nodes, bundler, reorder=reorder):
2243 2246 yield chunk
2244 2247 progress(_bundling, None)
2245 2248
2246 2249 count[:] = [0, len(mfs)]
2247 2250 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2248 2251 yield chunk
2249 2252 progress(_bundling, None)
2250 2253
2251 2254 count[:] = [0, len(changedfiles)]
2252 2255 for fname in sorted(changedfiles):
2253 2256 filerevlog = self.file(fname)
2254 2257 if not len(filerevlog):
2255 2258 raise util.Abort(_("empty or missing revlog for %s")
2256 2259 % fname)
2257 2260 fstate[0] = fname
2258 2261 nodelist = gennodelst(filerevlog)
2259 2262 if nodelist:
2260 2263 count[0] += 1
2261 2264 yield bundler.fileheader(fname)
2262 2265 for chunk in filerevlog.group(nodelist, bundler, reorder):
2263 2266 yield chunk
2264 2267 yield bundler.close()
2265 2268 progress(_bundling, None)
2266 2269
2267 2270 if nodes:
2268 2271 self.hook('outgoing', node=hex(nodes[0]), source=source)
2269 2272
2270 2273 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2271 2274
2272 2275 def addchangegroup(self, source, srctype, url, emptyok=False):
2273 2276 """Add the changegroup returned by source.read() to this repo.
2274 2277 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2275 2278 the URL of the repo where this changegroup is coming from.
2276 2279
2277 2280 Return an integer summarizing the change to this repo:
2278 2281 - nothing changed or no source: 0
2279 2282 - more heads than before: 1+added heads (2..n)
2280 2283 - fewer heads than before: -1-removed heads (-2..-n)
2281 2284 - number of heads stays the same: 1
2282 2285 """
2283 2286 def csmap(x):
2284 2287 self.ui.debug("add changeset %s\n" % short(x))
2285 2288 return len(cl)
2286 2289
2287 2290 def revmap(x):
2288 2291 return cl.rev(x)
2289 2292
2290 2293 if not source:
2291 2294 return 0
2292 2295
2293 2296 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2294 2297
2295 2298 changesets = files = revisions = 0
2296 2299 efiles = set()
2297 2300
2298 2301 # write changelog data to temp files so concurrent readers will not see
2299 2302 # inconsistent view
2300 2303 cl = self.changelog
2301 2304 cl.delayupdate()
2302 2305 oldheads = cl.heads()
2303 2306
2304 2307 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2305 2308 try:
2306 2309 trp = weakref.proxy(tr)
2307 2310 # pull off the changeset group
2308 2311 self.ui.status(_("adding changesets\n"))
2309 2312 clstart = len(cl)
2310 2313 class prog(object):
2311 2314 step = _('changesets')
2312 2315 count = 1
2313 2316 ui = self.ui
2314 2317 total = None
2315 2318 def __call__(self):
2316 2319 self.ui.progress(self.step, self.count, unit=_('chunks'),
2317 2320 total=self.total)
2318 2321 self.count += 1
2319 2322 pr = prog()
2320 2323 source.callback = pr
2321 2324
2322 2325 source.changelogheader()
2323 2326 srccontent = cl.addgroup(source, csmap, trp)
2324 2327 if not (srccontent or emptyok):
2325 2328 raise util.Abort(_("received changelog group is empty"))
2326 2329 clend = len(cl)
2327 2330 changesets = clend - clstart
2328 2331 for c in xrange(clstart, clend):
2329 2332 efiles.update(self[c].files())
2330 2333 efiles = len(efiles)
2331 2334 self.ui.progress(_('changesets'), None)
2332 2335
2333 2336 # pull off the manifest group
2334 2337 self.ui.status(_("adding manifests\n"))
2335 2338 pr.step = _('manifests')
2336 2339 pr.count = 1
2337 2340 pr.total = changesets # manifests <= changesets
2338 2341 # no need to check for empty manifest group here:
2339 2342 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2340 2343 # no new manifest will be created and the manifest group will
2341 2344 # be empty during the pull
2342 2345 source.manifestheader()
2343 2346 self.manifest.addgroup(source, revmap, trp)
2344 2347 self.ui.progress(_('manifests'), None)
2345 2348
2346 2349 needfiles = {}
2347 2350 if self.ui.configbool('server', 'validate', default=False):
2348 2351 # validate incoming csets have their manifests
2349 2352 for cset in xrange(clstart, clend):
2350 2353 mfest = self.changelog.read(self.changelog.node(cset))[0]
2351 2354 mfest = self.manifest.readdelta(mfest)
2352 2355 # store file nodes we must see
2353 2356 for f, n in mfest.iteritems():
2354 2357 needfiles.setdefault(f, set()).add(n)
2355 2358
2356 2359 # process the files
2357 2360 self.ui.status(_("adding file changes\n"))
2358 2361 pr.step = _('files')
2359 2362 pr.count = 1
2360 2363 pr.total = efiles
2361 2364 source.callback = None
2362 2365
2363 2366 while True:
2364 2367 chunkdata = source.filelogheader()
2365 2368 if not chunkdata:
2366 2369 break
2367 2370 f = chunkdata["filename"]
2368 2371 self.ui.debug("adding %s revisions\n" % f)
2369 2372 pr()
2370 2373 fl = self.file(f)
2371 2374 o = len(fl)
2372 2375 if not fl.addgroup(source, revmap, trp):
2373 2376 raise util.Abort(_("received file revlog group is empty"))
2374 2377 revisions += len(fl) - o
2375 2378 files += 1
2376 2379 if f in needfiles:
2377 2380 needs = needfiles[f]
2378 2381 for new in xrange(o, len(fl)):
2379 2382 n = fl.node(new)
2380 2383 if n in needs:
2381 2384 needs.remove(n)
2382 2385 if not needs:
2383 2386 del needfiles[f]
2384 2387 self.ui.progress(_('files'), None)
2385 2388
2386 2389 for f, needs in needfiles.iteritems():
2387 2390 fl = self.file(f)
2388 2391 for n in needs:
2389 2392 try:
2390 2393 fl.rev(n)
2391 2394 except error.LookupError:
2392 2395 raise util.Abort(
2393 2396 _('missing file data for %s:%s - run hg verify') %
2394 2397 (f, hex(n)))
2395 2398
2396 2399 dh = 0
2397 2400 if oldheads:
2398 2401 heads = cl.heads()
2399 2402 dh = len(heads) - len(oldheads)
2400 2403 for h in heads:
2401 2404 if h not in oldheads and self[h].closesbranch():
2402 2405 dh -= 1
2403 2406 htext = ""
2404 2407 if dh:
2405 2408 htext = _(" (%+d heads)") % dh
2406 2409
2407 2410 self.ui.status(_("added %d changesets"
2408 2411 " with %d changes to %d files%s\n")
2409 2412 % (changesets, revisions, files, htext))
2410 2413 obsolete.clearobscaches(self)
2411 2414
2412 2415 if changesets > 0:
2413 2416 p = lambda: cl.writepending() and self.root or ""
2414 2417 self.hook('pretxnchangegroup', throw=True,
2415 2418 node=hex(cl.node(clstart)), source=srctype,
2416 2419 url=url, pending=p)
2417 2420
2418 2421 added = [cl.node(r) for r in xrange(clstart, clend)]
2419 2422 publishing = self.ui.configbool('phases', 'publish', True)
2420 2423 if srctype == 'push':
2421 2424 # Old server can not push the boundary themself.
2422 2425 # New server won't push the boundary if changeset already
2423 2426 # existed locally as secrete
2424 2427 #
2425 2428 # We should not use added here but the list of all change in
2426 2429 # the bundle
2427 2430 if publishing:
2428 2431 phases.advanceboundary(self, phases.public, srccontent)
2429 2432 else:
2430 2433 phases.advanceboundary(self, phases.draft, srccontent)
2431 2434 phases.retractboundary(self, phases.draft, added)
2432 2435 elif srctype != 'strip':
2433 2436 # publishing only alter behavior during push
2434 2437 #
2435 2438 # strip should not touch boundary at all
2436 2439 phases.retractboundary(self, phases.draft, added)
2437 2440
2438 2441 # make changelog see real files again
2439 2442 cl.finalize(trp)
2440 2443
2441 2444 tr.close()
2442 2445
2443 2446 if changesets > 0:
2444 2447 self.updatebranchcache()
2445 2448 def runhooks():
2446 2449 # forcefully update the on-disk branch cache
2447 2450 self.ui.debug("updating the branch cache\n")
2448 2451 self.hook("changegroup", node=hex(cl.node(clstart)),
2449 2452 source=srctype, url=url)
2450 2453
2451 2454 for n in added:
2452 2455 self.hook("incoming", node=hex(n), source=srctype,
2453 2456 url=url)
2454 2457 self._afterlock(runhooks)
2455 2458
2456 2459 finally:
2457 2460 tr.release()
2458 2461 # never return 0 here:
2459 2462 if dh < 0:
2460 2463 return dh - 1
2461 2464 else:
2462 2465 return dh + 1
2463 2466
2464 2467 def stream_in(self, remote, requirements):
2465 2468 lock = self.lock()
2466 2469 try:
2467 2470 # Save remote branchmap. We will use it later
2468 2471 # to speed up branchcache creation
2469 2472 rbranchmap = None
2470 2473 if remote.capable("branchmap"):
2471 2474 rbranchmap = remote.branchmap()
2472 2475
2473 2476 fp = remote.stream_out()
2474 2477 l = fp.readline()
2475 2478 try:
2476 2479 resp = int(l)
2477 2480 except ValueError:
2478 2481 raise error.ResponseError(
2479 2482 _('unexpected response from remote server:'), l)
2480 2483 if resp == 1:
2481 2484 raise util.Abort(_('operation forbidden by server'))
2482 2485 elif resp == 2:
2483 2486 raise util.Abort(_('locking the remote repository failed'))
2484 2487 elif resp != 0:
2485 2488 raise util.Abort(_('the server sent an unknown error code'))
2486 2489 self.ui.status(_('streaming all changes\n'))
2487 2490 l = fp.readline()
2488 2491 try:
2489 2492 total_files, total_bytes = map(int, l.split(' ', 1))
2490 2493 except (ValueError, TypeError):
2491 2494 raise error.ResponseError(
2492 2495 _('unexpected response from remote server:'), l)
2493 2496 self.ui.status(_('%d files to transfer, %s of data\n') %
2494 2497 (total_files, util.bytecount(total_bytes)))
2495 2498 handled_bytes = 0
2496 2499 self.ui.progress(_('clone'), 0, total=total_bytes)
2497 2500 start = time.time()
2498 2501 for i in xrange(total_files):
2499 2502 # XXX doesn't support '\n' or '\r' in filenames
2500 2503 l = fp.readline()
2501 2504 try:
2502 2505 name, size = l.split('\0', 1)
2503 2506 size = int(size)
2504 2507 except (ValueError, TypeError):
2505 2508 raise error.ResponseError(
2506 2509 _('unexpected response from remote server:'), l)
2507 2510 if self.ui.debugflag:
2508 2511 self.ui.debug('adding %s (%s)\n' %
2509 2512 (name, util.bytecount(size)))
2510 2513 # for backwards compat, name was partially encoded
2511 2514 ofp = self.sopener(store.decodedir(name), 'w')
2512 2515 for chunk in util.filechunkiter(fp, limit=size):
2513 2516 handled_bytes += len(chunk)
2514 2517 self.ui.progress(_('clone'), handled_bytes,
2515 2518 total=total_bytes)
2516 2519 ofp.write(chunk)
2517 2520 ofp.close()
2518 2521 elapsed = time.time() - start
2519 2522 if elapsed <= 0:
2520 2523 elapsed = 0.001
2521 2524 self.ui.progress(_('clone'), None)
2522 2525 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2523 2526 (util.bytecount(total_bytes), elapsed,
2524 2527 util.bytecount(total_bytes / elapsed)))
2525 2528
2526 2529 # new requirements = old non-format requirements +
2527 2530 # new format-related
2528 2531 # requirements from the streamed-in repository
2529 2532 requirements.update(set(self.requirements) - self.supportedformats)
2530 2533 self._applyrequirements(requirements)
2531 2534 self._writerequirements()
2532 2535
2533 2536 if rbranchmap:
2534 2537 rbheads = []
2535 2538 for bheads in rbranchmap.itervalues():
2536 2539 rbheads.extend(bheads)
2537 2540
2538 2541 self.branchcache = rbranchmap
2539 2542 if rbheads:
2540 2543 rtiprev = max((int(self.changelog.rev(node))
2541 2544 for node in rbheads))
2542 2545 self._writebranchcache(self.branchcache,
2543 2546 self[rtiprev].node(), rtiprev)
2544 2547 self.invalidate()
2545 2548 return len(self.heads()) + 1
2546 2549 finally:
2547 2550 lock.release()
2548 2551
2549 2552 def clone(self, remote, heads=[], stream=False):
2550 2553 '''clone remote repository.
2551 2554
2552 2555 keyword arguments:
2553 2556 heads: list of revs to clone (forces use of pull)
2554 2557 stream: use streaming clone if possible'''
2555 2558
2556 2559 # now, all clients that can request uncompressed clones can
2557 2560 # read repo formats supported by all servers that can serve
2558 2561 # them.
2559 2562
2560 2563 # if revlog format changes, client will have to check version
2561 2564 # and format flags on "stream" capability, and use
2562 2565 # uncompressed only if compatible.
2563 2566
2564 2567 if not stream:
2565 2568 # if the server explicitly prefers to stream (for fast LANs)
2566 2569 stream = remote.capable('stream-preferred')
2567 2570
2568 2571 if stream and not heads:
2569 2572 # 'stream' means remote revlog format is revlogv1 only
2570 2573 if remote.capable('stream'):
2571 2574 return self.stream_in(remote, set(('revlogv1',)))
2572 2575 # otherwise, 'streamreqs' contains the remote revlog format
2573 2576 streamreqs = remote.capable('streamreqs')
2574 2577 if streamreqs:
2575 2578 streamreqs = set(streamreqs.split(','))
2576 2579 # if we support it, stream in and adjust our requirements
2577 2580 if not streamreqs - self.supportedformats:
2578 2581 return self.stream_in(remote, streamreqs)
2579 2582 return self.pull(remote, heads)
2580 2583
2581 2584 def pushkey(self, namespace, key, old, new):
2582 2585 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2583 2586 old=old, new=new)
2584 2587 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2585 2588 ret = pushkey.push(self, namespace, key, old, new)
2586 2589 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2587 2590 ret=ret)
2588 2591 return ret
2589 2592
2590 2593 def listkeys(self, namespace):
2591 2594 self.hook('prelistkeys', throw=True, namespace=namespace)
2592 2595 self.ui.debug('listing keys for "%s"\n' % namespace)
2593 2596 values = pushkey.list(self, namespace)
2594 2597 self.hook('listkeys', namespace=namespace, values=values)
2595 2598 return values
2596 2599
2597 2600 def debugwireargs(self, one, two, three=None, four=None, five=None):
2598 2601 '''used to test argument passing over the wire'''
2599 2602 return "%s %s %s %s %s" % (one, two, three, four, five)
2600 2603
2601 2604 def savecommitmessage(self, text):
2602 2605 fp = self.opener('last-message.txt', 'wb')
2603 2606 try:
2604 2607 fp.write(text)
2605 2608 finally:
2606 2609 fp.close()
2607 2610 return self.pathto(fp.name[len(self.root)+1:])
2608 2611
2609 2612 # used to avoid circular references so destructors work
2610 2613 def aftertrans(files):
2611 2614 renamefiles = [tuple(t) for t in files]
2612 2615 def a():
2613 2616 for src, dest in renamefiles:
2614 2617 try:
2615 2618 util.rename(src, dest)
2616 2619 except OSError: # journal file does not yet exist
2617 2620 pass
2618 2621 return a
2619 2622
2620 2623 def undoname(fn):
2621 2624 base, name = os.path.split(fn)
2622 2625 assert name.startswith('journal')
2623 2626 return os.path.join(base, name.replace('journal', 'undo', 1))
2624 2627
2625 2628 def instance(ui, path, create):
2626 2629 return localrepository(ui, util.urllocalpath(path), create)
2627 2630
2628 2631 def islocal(path):
2629 2632 return True
@@ -1,272 +1,273 b''
1 1 Check that obsolete properly strip heads
2 2 $ cat > obs.py << EOF
3 3 > import mercurial.obsolete
4 4 > mercurial.obsolete._enabled = True
5 5 > EOF
6 6 $ cat >> $HGRCPATH << EOF
7 7 > [phases]
8 8 > # public changeset are not obsolete
9 9 > publish=false
10 10 > [ui]
11 11 > logtemplate='{node|short} ({phase}) {desc|firstline}\n'
12 12 > [extensions]
13 13 > graphlog=
14 14 > EOF
15 15 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
16 16 $ mkcommit() {
17 17 > echo "$1" > "$1"
18 18 > hg add "$1"
19 19 > hg ci -m "add $1"
20 20 > }
21 21 $ getid() {
22 22 > hg id --debug -ir "desc('$1')"
23 23 > }
24 24
25 25
26 26 $ hg init remote
27 27 $ cd remote
28 28 $ mkcommit base
29 29 $ hg phase --public .
30 30 $ cd ..
31 31 $ cp -r remote base
32 32 $ hg clone remote local
33 33 updating to branch default
34 34 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
35 35 $ cd local
36 36
37 37 New head replaces old head
38 38 ==========================
39 39
40 40 setup
41 (we add the 1 flags to prevent bumped error during the test)
41 42
42 43 $ mkcommit old
43 44 $ hg push
44 45 pushing to $TESTTMP/remote (glob)
45 46 searching for changes
46 47 adding changesets
47 48 adding manifests
48 49 adding file changes
49 50 added 1 changesets with 1 changes to 1 files
50 51 $ hg up -q '.^'
51 52 $ mkcommit new
52 53 created new head
53 $ hg debugobsolete `getid old` `getid new`
54 $ hg debugobsolete --flags 1 `getid old` `getid new`
54 55 $ hg glog --hidden
55 56 @ 71e3228bffe1 (draft) add new
56 57 |
57 58 | x c70b08862e08 (draft) add old
58 59 |/
59 60 o b4952fcf48cf (public) add base
60 61
61 62 $ cp -r ../remote ../backup1
62 63
63 64 old exists remotely as draft. It is obsoleted by new that we now push.
64 65 Push should not warn about creating new head
65 66
66 67 $ hg push
67 68 pushing to $TESTTMP/remote (glob)
68 69 searching for changes
69 70 adding changesets
70 71 adding manifests
71 72 adding file changes
72 73 added 1 changesets with 1 changes to 1 files (+1 heads)
73 74
74 75 old head is now public (public local version)
75 76 =============================================
76 77
77 78 setup
78 79
79 80 $ rm -fr ../remote
80 81 $ cp -r ../backup1 ../remote
81 82 $ hg -R ../remote phase --public c70b08862e08
82 83 $ hg pull -v
83 84 pulling from $TESTTMP/remote (glob)
84 85 searching for changes
85 86 no changes found
86 87 $ hg glog --hidden
87 88 @ 71e3228bffe1 (draft) add new
88 89 |
89 90 | o c70b08862e08 (public) add old
90 91 |/
91 92 o b4952fcf48cf (public) add base
92 93
93 94
94 95 Abort: old will still be an head because it's public.
95 96
96 97 $ hg push
97 98 pushing to $TESTTMP/remote (glob)
98 99 searching for changes
99 100 abort: push creates new remote head 71e3228bffe1!
100 101 (did you forget to merge? use push -f to force)
101 102 [255]
102 103
103 104 old head is now public (public remote version)
104 105 ==============================================
105 106
106 107 TODO: Not implemented yet.
107 108
108 109 # setup
109 110 #
110 111 # $ rm -fr ../remote
111 112 # $ cp -r ../backup1 ../remote
112 113 # $ hg -R ../remote phase --public c70b08862e08
113 114 # $ hg phase --draft --force c70b08862e08
114 115 # $ hg glog --hidden
115 116 # @ 71e3228bffe1 (draft) add new
116 117 # |
117 118 # | x c70b08862e08 (draft) add old
118 119 # |/
119 120 # o b4952fcf48cf (public) add base
120 121 #
121 122 #
122 123 #
123 124 # Abort: old will still be an head because it's public.
124 125 #
125 126 # $ hg push
126 127 # pushing to $TESTTMP/remote
127 128 # searching for changes
128 129 # abort: push creates new remote head 71e3228bffe1!
129 130 # (did you forget to merge? use push -f to force)
130 131 # [255]
131 132
132 133 old head is obsolete but replacement is not pushed
133 134 ==================================================
134 135
135 136 setup
136 137
137 138 $ rm -fr ../remote
138 139 $ cp -r ../backup1 ../remote
139 140 $ hg phase --draft --force '(0::) - 0'
140 141 $ hg up -q '.^'
141 142 $ mkcommit other
142 143 created new head
143 144 $ hg glog --hidden
144 145 @ d7d41ccbd4de (draft) add other
145 146 |
146 147 | o 71e3228bffe1 (draft) add new
147 148 |/
148 149 | x c70b08862e08 (draft) add old
149 150 |/
150 151 o b4952fcf48cf (public) add base
151 152
152 153
153 154 old exists remotely as draft. It is obsoleted by new but we don't push new.
154 155 Push should abort on new head
155 156
156 157 $ hg push -r 'desc("other")'
157 158 pushing to $TESTTMP/remote (glob)
158 159 searching for changes
159 160 abort: push creates new remote head d7d41ccbd4de!
160 161 (did you forget to merge? use push -f to force)
161 162 [255]
162 163
163 164
164 165
165 166 Both precursors and successors are already know remotely. Descendant adds heads
166 167 ===============================================================================
167 168
168 169 setup. (The obsolete marker is known locally only
169 170
170 171 $ cd ..
171 172 $ rm -rf local
172 173 $ hg clone remote local
173 174 updating to branch default
174 175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
175 176 $ cd local
176 177 $ mkcommit old
177 178 old already tracked!
178 179 nothing changed
179 180 [1]
180 181 $ hg up -q '.^'
181 182 $ mkcommit new
182 183 created new head
183 184 $ hg push -f
184 185 pushing to $TESTTMP/remote (glob)
185 186 searching for changes
186 187 adding changesets
187 188 adding manifests
188 189 adding file changes
189 190 added 1 changesets with 1 changes to 1 files (+1 heads)
190 191 $ mkcommit desc1
191 192 $ hg up -q '.^'
192 193 $ mkcommit desc2
193 194 created new head
194 195 $ hg debugobsolete `getid old` `getid new`
195 196 $ hg glog --hidden
196 197 @ 5fe37041cc2b (draft) add desc2
197 198 |
198 199 | o a3ef1d111c5f (draft) add desc1
199 200 |/
200 201 o 71e3228bffe1 (draft) add new
201 202 |
202 203 | x c70b08862e08 (draft) add old
203 204 |/
204 205 o b4952fcf48cf (public) add base
205 206
206 207 $ hg glog --hidden -R ../remote
207 208 o 71e3228bffe1 (draft) add new
208 209 |
209 210 | o c70b08862e08 (draft) add old
210 211 |/
211 212 @ b4952fcf48cf (public) add base
212 213
213 214 $ cp -r ../remote ../backup2
214 215
215 216 Push should not warn about adding new heads. We create one, but we'll delete
216 217 one anyway.
217 218
218 219 $ hg push
219 220 pushing to $TESTTMP/remote (glob)
220 221 searching for changes
221 222 adding changesets
222 223 adding manifests
223 224 adding file changes
224 225 added 2 changesets with 2 changes to 2 files (+1 heads)
225 226
226 227
227 228 Remote head is unknown but obsoleted by a local changeset
228 229 =========================================================
229 230
230 231 setup
231 232
232 233 $ rm -fr ../remote
233 234 $ cp -r ../backup1 ../remote
234 235 $ cd ..
235 236 $ rm -rf local
236 237 $ hg clone remote local -r 0
237 238 adding changesets
238 239 adding manifests
239 240 adding file changes
240 241 added 1 changesets with 1 changes to 1 files
241 242 updating to branch default
242 243 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
243 244 $ cd local
244 245 $ mkcommit new
245 246 $ hg -R ../remote id --debug -r tip
246 247 c70b08862e0838ea6d7c59c85da2f1ed6c8d67da tip
247 248 $ hg id --debug -r tip
248 249 71e3228bffe1886550777233d6c97bb5a6b2a650 tip
249 250 $ hg debugobsolete c70b08862e0838ea6d7c59c85da2f1ed6c8d67da 71e3228bffe1886550777233d6c97bb5a6b2a650
250 251 $ hg glog --hidden
251 252 @ 71e3228bffe1 (draft) add new
252 253 |
253 254 o b4952fcf48cf (public) add base
254 255
255 256 $ hg glog --hidden -R ../remote
256 257 o c70b08862e08 (draft) add old
257 258 |
258 259 @ b4952fcf48cf (public) add base
259 260
260 261
261 262 Push should not complain about new heads.
262 263
263 264 It should not complain about "unsynced remote changes!" either but that's not
264 265 handled yet.
265 266
266 267 $ hg push --traceback
267 268 pushing to $TESTTMP/remote (glob)
268 269 searching for changes
269 270 adding changesets
270 271 adding manifests
271 272 adding file changes
272 273 added 1 changesets with 1 changes to 1 files (+1 heads)
@@ -1,584 +1,592 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > [phases]
5 5 > # public changeset are not obsolete
6 6 > publish=false
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg id --debug -ir "desc('$1')"
15 15 > }
16 16
17 17 $ cat > debugkeys.py <<EOF
18 18 > def reposetup(ui, repo):
19 19 > class debugkeysrepo(repo.__class__):
20 20 > def listkeys(self, namespace):
21 21 > ui.write('listkeys %s\n' % (namespace,))
22 22 > return super(debugkeysrepo, self).listkeys(namespace)
23 23 >
24 24 > if repo.local():
25 25 > repo.__class__ = debugkeysrepo
26 26 > EOF
27 27
28 28 $ hg init tmpa
29 29 $ cd tmpa
30 30 $ mkcommit kill_me
31 31
32 32 Checking that the feature is properly disabled
33 33
34 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 35 abort: obsolete feature is not enabled on this repo
36 36 [255]
37 37
38 38 Enabling it
39 39
40 40 $ cat > ../obs.py << EOF
41 41 > import mercurial.obsolete
42 42 > mercurial.obsolete._enabled = True
43 43 > EOF
44 44 $ echo '[extensions]' >> $HGRCPATH
45 45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
46 46
47 47 Killing a single changeset without replacement
48 48
49 49 $ hg debugobsolete 0
50 50 abort: changeset references must be full hexadecimal node identifiers
51 51 [255]
52 52 $ hg debugobsolete '00'
53 53 abort: changeset references must be full hexadecimal node identifiers
54 54 [255]
55 55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
56 56 $ hg debugobsolete
57 57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
58 58 $ cd ..
59 59
60 60 Killing a single changeset with replacement
61 61
62 62 $ hg init tmpb
63 63 $ cd tmpb
64 64 $ mkcommit a
65 65 $ mkcommit b
66 66 $ mkcommit original_c
67 67 $ hg up "desc('b')"
68 68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
69 69 $ mkcommit new_c
70 70 created new head
71 71 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
72 72 $ hg debugobsolete --flag 12 `getid original_c` `getid new_c` -d '56 12'
73 73 $ hg log -r 'hidden()' --template '{rev}:{node|short} {desc}\n' --hidden
74 74 2:245bde4270cd add original_c
75 75 $ hg debugobsolete
76 76 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
77 77
78 78 do it again (it read the obsstore before adding new changeset)
79 79
80 80 $ hg up '.^'
81 81 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
82 82 $ mkcommit new_2_c
83 83 created new head
84 84 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
85 85 $ hg debugobsolete
86 86 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
87 87 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
88 88
89 89 Register two markers with a missing node
90 90
91 91 $ hg up '.^'
92 92 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
93 93 $ mkcommit new_3_c
94 94 created new head
95 95 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
96 96 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
97 97 $ hg debugobsolete
98 98 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
99 99 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
100 100 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
101 101 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
102 102
103 103 Refuse pathological nullid successors
104 104 $ hg debugobsolete -d '9001 0' 1337133713371337133713371337133713371337 0000000000000000000000000000000000000000
105 105 transaction abort!
106 106 rollback completed
107 107 abort: bad obsolescence marker detected: invalid successors nullid
108 108 [255]
109 109
110 110 Check that graphlog detect that a changeset is obsolete:
111 111
112 112 $ hg glog
113 113 @ changeset: 5:5601fb93a350
114 114 | tag: tip
115 115 | parent: 1:7c3bad9141dc
116 116 | user: test
117 117 | date: Thu Jan 01 00:00:00 1970 +0000
118 118 | summary: add new_3_c
119 119 |
120 120 o changeset: 1:7c3bad9141dc
121 121 | user: test
122 122 | date: Thu Jan 01 00:00:00 1970 +0000
123 123 | summary: add b
124 124 |
125 125 o changeset: 0:1f0dee641bb7
126 126 user: test
127 127 date: Thu Jan 01 00:00:00 1970 +0000
128 128 summary: add a
129 129
130 130
131 131 Check that public changeset are not accounted as obsolete:
132 132
133 133 $ hg phase --public 2
134 134 $ hg --config 'extensions.graphlog=' glog
135 135 @ changeset: 5:5601fb93a350
136 136 | tag: tip
137 137 | parent: 1:7c3bad9141dc
138 138 | user: test
139 139 | date: Thu Jan 01 00:00:00 1970 +0000
140 140 | summary: add new_3_c
141 141 |
142 142 | o changeset: 2:245bde4270cd
143 143 |/ user: test
144 144 | date: Thu Jan 01 00:00:00 1970 +0000
145 145 | summary: add original_c
146 146 |
147 147 o changeset: 1:7c3bad9141dc
148 148 | user: test
149 149 | date: Thu Jan 01 00:00:00 1970 +0000
150 150 | summary: add b
151 151 |
152 152 o changeset: 0:1f0dee641bb7
153 153 user: test
154 154 date: Thu Jan 01 00:00:00 1970 +0000
155 155 summary: add a
156 156
157 157
158 158 And that bumped changeset are detected
159 159 --------------------------------------
160 160
161 161 If we didn't filtered obsolete changesets out, 3 and 4 would show up too. Also
162 162 note that the bumped changeset (5:5601fb93a350) is not a direct successor of
163 163 the public changeset
164 164
165 165 $ hg log --hidden -r 'bumped()'
166 166 changeset: 5:5601fb93a350
167 167 tag: tip
168 168 parent: 1:7c3bad9141dc
169 169 user: test
170 170 date: Thu Jan 01 00:00:00 1970 +0000
171 171 summary: add new_3_c
172 172
173 173
174 And that we can't push bumped changeset
175
176 $ hg push ../tmpa
177 pushing to ../tmpa
178 searching for changes
179 abort: push includes bumped changeset: 5601fb93a350!
180 [255]
181
174 182 Fixing "bumped" situation
175 183 We need to create a clone of 5 and add a special marker with a flag
176 184
177 185 $ hg up '5^'
178 186 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
179 187 $ hg revert -ar 5
180 188 adding new_3_c
181 189 $ hg ci -m 'add n3w_3_c'
182 190 created new head
183 191 $ hg debugobsolete -d '1338 0' --flags 1 `getid new_3_c` `getid n3w_3_c`
184 192 $ hg log -r 'bumped()'
185 193 $ hg log -G
186 194 @ changeset: 6:6f9641995072
187 195 | tag: tip
188 196 | parent: 1:7c3bad9141dc
189 197 | user: test
190 198 | date: Thu Jan 01 00:00:00 1970 +0000
191 199 | summary: add n3w_3_c
192 200 |
193 201 | o changeset: 2:245bde4270cd
194 202 |/ user: test
195 203 | date: Thu Jan 01 00:00:00 1970 +0000
196 204 | summary: add original_c
197 205 |
198 206 o changeset: 1:7c3bad9141dc
199 207 | user: test
200 208 | date: Thu Jan 01 00:00:00 1970 +0000
201 209 | summary: add b
202 210 |
203 211 o changeset: 0:1f0dee641bb7
204 212 user: test
205 213 date: Thu Jan 01 00:00:00 1970 +0000
206 214 summary: add a
207 215
208 216
209 217
210 218
211 219 $ cd ..
212 220
213 221 Exchange Test
214 222 ============================
215 223
216 224 Destination repo does not have any data
217 225 ---------------------------------------
218 226
219 227 Try to pull markers
220 228 (extinct changeset are excluded but marker are pushed)
221 229
222 230 $ hg init tmpc
223 231 $ cd tmpc
224 232 $ hg pull ../tmpb
225 233 pulling from ../tmpb
226 234 requesting all changes
227 235 adding changesets
228 236 adding manifests
229 237 adding file changes
230 238 added 4 changesets with 4 changes to 4 files (+1 heads)
231 239 (run 'hg heads' to see heads, 'hg merge' to merge)
232 240 $ hg debugobsolete
233 241 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
234 242 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
235 243 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
236 244 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
237 245 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
238 246
239 247 Rollback//Transaction support
240 248
241 249 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
242 250 $ hg debugobsolete
243 251 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
244 252 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
245 253 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
246 254 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
247 255 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
248 256 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
249 257 $ hg rollback -n
250 258 repository tip rolled back to revision 3 (undo debugobsolete)
251 259 $ hg rollback
252 260 repository tip rolled back to revision 3 (undo debugobsolete)
253 261 $ hg debugobsolete
254 262 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
255 263 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
256 264 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
257 265 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
258 266 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
259 267
260 268 $ cd ..
261 269
262 270 Try to pull markers
263 271
264 272 $ hg init tmpd
265 273 $ hg -R tmpb push tmpd
266 274 pushing to tmpd
267 275 searching for changes
268 276 adding changesets
269 277 adding manifests
270 278 adding file changes
271 279 added 4 changesets with 4 changes to 4 files (+1 heads)
272 280 $ hg -R tmpd debugobsolete
273 281 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
274 282 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
275 283 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
276 284 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
277 285 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
278 286
279 287 Check obsolete keys are exchanged only if source has an obsolete store
280 288
281 289 $ hg init empty
282 290 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
283 291 pushing to tmpd
284 292 no changes found
285 293 listkeys phases
286 294 listkeys bookmarks
287 295 [1]
288 296
289 297 clone support
290 298 (markers are copied and extinct changesets are included to allow hardlinks)
291 299
292 300 $ hg clone tmpb clone-dest
293 301 updating to branch default
294 302 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 303 $ hg -R clone-dest log -G --hidden
296 304 @ changeset: 6:6f9641995072
297 305 | tag: tip
298 306 | parent: 1:7c3bad9141dc
299 307 | user: test
300 308 | date: Thu Jan 01 00:00:00 1970 +0000
301 309 | summary: add n3w_3_c
302 310 |
303 311 | x changeset: 5:5601fb93a350
304 312 |/ parent: 1:7c3bad9141dc
305 313 | user: test
306 314 | date: Thu Jan 01 00:00:00 1970 +0000
307 315 | summary: add new_3_c
308 316 |
309 317 | x changeset: 4:ca819180edb9
310 318 |/ parent: 1:7c3bad9141dc
311 319 | user: test
312 320 | date: Thu Jan 01 00:00:00 1970 +0000
313 321 | summary: add new_2_c
314 322 |
315 323 | x changeset: 3:cdbce2fbb163
316 324 |/ parent: 1:7c3bad9141dc
317 325 | user: test
318 326 | date: Thu Jan 01 00:00:00 1970 +0000
319 327 | summary: add new_c
320 328 |
321 329 | o changeset: 2:245bde4270cd
322 330 |/ user: test
323 331 | date: Thu Jan 01 00:00:00 1970 +0000
324 332 | summary: add original_c
325 333 |
326 334 o changeset: 1:7c3bad9141dc
327 335 | user: test
328 336 | date: Thu Jan 01 00:00:00 1970 +0000
329 337 | summary: add b
330 338 |
331 339 o changeset: 0:1f0dee641bb7
332 340 user: test
333 341 date: Thu Jan 01 00:00:00 1970 +0000
334 342 summary: add a
335 343
336 344 $ hg -R clone-dest debugobsolete
337 345 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
338 346 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
339 347 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
340 348 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
341 349 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
342 350
343 351
344 352 Destination repo have existing data
345 353 ---------------------------------------
346 354
347 355 On pull
348 356
349 357 $ hg init tmpe
350 358 $ cd tmpe
351 359 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
352 360 $ hg pull ../tmpb
353 361 pulling from ../tmpb
354 362 requesting all changes
355 363 adding changesets
356 364 adding manifests
357 365 adding file changes
358 366 added 4 changesets with 4 changes to 4 files (+1 heads)
359 367 (run 'hg heads' to see heads, 'hg merge' to merge)
360 368 $ hg debugobsolete
361 369 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
362 370 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
363 371 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
364 372 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
365 373 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
366 374 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
367 375
368 376
369 377 On push
370 378
371 379 $ hg push ../tmpc
372 380 pushing to ../tmpc
373 381 searching for changes
374 382 no changes found
375 383 [1]
376 384 $ hg -R ../tmpc debugobsolete
377 385 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f C {'date': '56 12', 'user': 'test'}
378 386 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
379 387 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
380 388 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
381 389 5601fb93a350734d935195fee37f4054c529ff39 6f96419950729f3671185b847352890f074f7557 1 {'date': '1338 0', 'user': 'test'}
382 390 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
383 391
384 392 detect outgoing obsolete and unstable
385 393 ---------------------------------------
386 394
387 395
388 396 $ hg glog
389 397 o changeset: 3:6f9641995072
390 398 | tag: tip
391 399 | parent: 1:7c3bad9141dc
392 400 | user: test
393 401 | date: Thu Jan 01 00:00:00 1970 +0000
394 402 | summary: add n3w_3_c
395 403 |
396 404 | o changeset: 2:245bde4270cd
397 405 |/ user: test
398 406 | date: Thu Jan 01 00:00:00 1970 +0000
399 407 | summary: add original_c
400 408 |
401 409 o changeset: 1:7c3bad9141dc
402 410 | user: test
403 411 | date: Thu Jan 01 00:00:00 1970 +0000
404 412 | summary: add b
405 413 |
406 414 o changeset: 0:1f0dee641bb7
407 415 user: test
408 416 date: Thu Jan 01 00:00:00 1970 +0000
409 417 summary: add a
410 418
411 419 $ hg up 'desc("n3w_3_c")'
412 420 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
413 421 $ mkcommit original_d
414 422 $ mkcommit original_e
415 423 $ hg debugobsolete `getid original_d` -d '0 0'
416 424 $ hg log -r 'obsolete()'
417 425 changeset: 4:94b33453f93b
418 426 user: test
419 427 date: Thu Jan 01 00:00:00 1970 +0000
420 428 summary: add original_d
421 429
422 430 $ hg glog -r '::unstable()'
423 431 @ changeset: 5:cda648ca50f5
424 432 | tag: tip
425 433 | user: test
426 434 | date: Thu Jan 01 00:00:00 1970 +0000
427 435 | summary: add original_e
428 436 |
429 437 x changeset: 4:94b33453f93b
430 438 | user: test
431 439 | date: Thu Jan 01 00:00:00 1970 +0000
432 440 | summary: add original_d
433 441 |
434 442 o changeset: 3:6f9641995072
435 443 | parent: 1:7c3bad9141dc
436 444 | user: test
437 445 | date: Thu Jan 01 00:00:00 1970 +0000
438 446 | summary: add n3w_3_c
439 447 |
440 448 o changeset: 1:7c3bad9141dc
441 449 | user: test
442 450 | date: Thu Jan 01 00:00:00 1970 +0000
443 451 | summary: add b
444 452 |
445 453 o changeset: 0:1f0dee641bb7
446 454 user: test
447 455 date: Thu Jan 01 00:00:00 1970 +0000
448 456 summary: add a
449 457
450 458
451 459 refuse to push obsolete changeset
452 460
453 461 $ hg push ../tmpc/ -r 'desc("original_d")'
454 462 pushing to ../tmpc/
455 463 searching for changes
456 464 abort: push includes obsolete changeset: 94b33453f93b!
457 465 [255]
458 466
459 467 refuse to push unstable changeset
460 468
461 469 $ hg push ../tmpc/
462 470 pushing to ../tmpc/
463 471 searching for changes
464 472 abort: push includes unstable changeset: cda648ca50f5!
465 473 [255]
466 474
467 475 Test that extinct changeset are properly detected
468 476
469 477 $ hg log -r 'extinct()'
470 478
471 479 Don't try to push extinct changeset
472 480
473 481 $ hg init ../tmpf
474 482 $ hg out ../tmpf
475 483 comparing with ../tmpf
476 484 searching for changes
477 485 changeset: 0:1f0dee641bb7
478 486 user: test
479 487 date: Thu Jan 01 00:00:00 1970 +0000
480 488 summary: add a
481 489
482 490 changeset: 1:7c3bad9141dc
483 491 user: test
484 492 date: Thu Jan 01 00:00:00 1970 +0000
485 493 summary: add b
486 494
487 495 changeset: 2:245bde4270cd
488 496 user: test
489 497 date: Thu Jan 01 00:00:00 1970 +0000
490 498 summary: add original_c
491 499
492 500 changeset: 3:6f9641995072
493 501 parent: 1:7c3bad9141dc
494 502 user: test
495 503 date: Thu Jan 01 00:00:00 1970 +0000
496 504 summary: add n3w_3_c
497 505
498 506 changeset: 4:94b33453f93b
499 507 user: test
500 508 date: Thu Jan 01 00:00:00 1970 +0000
501 509 summary: add original_d
502 510
503 511 changeset: 5:cda648ca50f5
504 512 tag: tip
505 513 user: test
506 514 date: Thu Jan 01 00:00:00 1970 +0000
507 515 summary: add original_e
508 516
509 517 $ hg push ../tmpf -f # -f because be push unstable too
510 518 pushing to ../tmpf
511 519 searching for changes
512 520 adding changesets
513 521 adding manifests
514 522 adding file changes
515 523 added 6 changesets with 6 changes to 6 files (+1 heads)
516 524
517 525 no warning displayed
518 526
519 527 $ hg push ../tmpf
520 528 pushing to ../tmpf
521 529 searching for changes
522 530 no changes found
523 531 [1]
524 532
525 533 Do not warn about new head when the new head is a successors of a remote one
526 534
527 535 $ hg glog
528 536 @ changeset: 5:cda648ca50f5
529 537 | tag: tip
530 538 | user: test
531 539 | date: Thu Jan 01 00:00:00 1970 +0000
532 540 | summary: add original_e
533 541 |
534 542 x changeset: 4:94b33453f93b
535 543 | user: test
536 544 | date: Thu Jan 01 00:00:00 1970 +0000
537 545 | summary: add original_d
538 546 |
539 547 o changeset: 3:6f9641995072
540 548 | parent: 1:7c3bad9141dc
541 549 | user: test
542 550 | date: Thu Jan 01 00:00:00 1970 +0000
543 551 | summary: add n3w_3_c
544 552 |
545 553 | o changeset: 2:245bde4270cd
546 554 |/ user: test
547 555 | date: Thu Jan 01 00:00:00 1970 +0000
548 556 | summary: add original_c
549 557 |
550 558 o changeset: 1:7c3bad9141dc
551 559 | user: test
552 560 | date: Thu Jan 01 00:00:00 1970 +0000
553 561 | summary: add b
554 562 |
555 563 o changeset: 0:1f0dee641bb7
556 564 user: test
557 565 date: Thu Jan 01 00:00:00 1970 +0000
558 566 summary: add a
559 567
560 568 $ hg up -q 'desc(n3w_3_c)'
561 569 $ mkcommit obsolete_e
562 570 created new head
563 571 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
564 572 $ hg push ../tmpf
565 573 pushing to ../tmpf
566 574 searching for changes
567 575 adding changesets
568 576 adding manifests
569 577 adding file changes
570 578 added 1 changesets with 1 changes to 1 files (+1 heads)
571 579
572 580 Checking _enable=False warning if obsolete marker exists
573 581
574 582 $ echo '[extensions]' >> $HGRCPATH
575 583 $ echo "obs=!" >> $HGRCPATH
576 584 $ hg log -r tip
577 585 obsolete feature not enabled but 8 markers found!
578 586 changeset: 6:3de5eca88c00
579 587 tag: tip
580 588 parent: 3:6f9641995072
581 589 user: test
582 590 date: Thu Jan 01 00:00:00 1970 +0000
583 591 summary: add obsolete_e
584 592
General Comments 0
You need to be logged in to leave comments. Login now