##// END OF EJS Templates
obsolete: fix typos in comments introduced by 6955d69a52a4
Thomas Arendsen Hein -
r17306:7d2967de stable
parent child Browse files
Show More
@@ -1,2602 +1,2602 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 186 self.spath = self.store.path
187 187 self.sopener = self.store.opener
188 188 self.svfs = self.sopener
189 189 self.sjoin = self.store.join
190 190 self.opener.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 if store and not obsolete._enabled:
293 # message is rare enough to not be stranlated
293 # message is rare enough to not be translated
294 294 msg = 'obsolete feature not enabled but %i markers found!\n'
295 295 self.ui.warn(msg % len(list(store)))
296 296 return store
297 297
298 298 @propertycache
299 299 def hiddenrevs(self):
300 300 """hiddenrevs: revs that should be hidden by command and tools
301 301
302 302 This set is carried on the repo to ease initialisation and lazy
303 303 loading it'll probably move back to changelog for efficienty and
304 304 consistency reason
305 305
306 306 Note that the hiddenrevs will needs invalidations when
307 307 - a new changesets is added (possible unstable above extinct)
308 308 - a new obsolete marker is added (possible new extinct changeset)
309 309 """
310 310 hidden = set()
311 311 if self.obsstore:
312 312 ### hide extinct changeset that are not accessible by any mean
313 313 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
314 314 hidden.update(self.revs(hiddenquery))
315 315 return hidden
316 316
317 317 @storecache('00changelog.i')
318 318 def changelog(self):
319 319 c = changelog.changelog(self.sopener)
320 320 if 'HG_PENDING' in os.environ:
321 321 p = os.environ['HG_PENDING']
322 322 if p.startswith(self.root):
323 323 c.readpending('00changelog.i.a')
324 324 return c
325 325
326 326 @storecache('00manifest.i')
327 327 def manifest(self):
328 328 return manifest.manifest(self.sopener)
329 329
330 330 @filecache('dirstate')
331 331 def dirstate(self):
332 332 warned = [0]
333 333 def validate(node):
334 334 try:
335 335 self.changelog.rev(node)
336 336 return node
337 337 except error.LookupError:
338 338 if not warned[0]:
339 339 warned[0] = True
340 340 self.ui.warn(_("warning: ignoring unknown"
341 341 " working parent %s!\n") % short(node))
342 342 return nullid
343 343
344 344 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
345 345
346 346 def __getitem__(self, changeid):
347 347 if changeid is None:
348 348 return context.workingctx(self)
349 349 return context.changectx(self, changeid)
350 350
351 351 def __contains__(self, changeid):
352 352 try:
353 353 return bool(self.lookup(changeid))
354 354 except error.RepoLookupError:
355 355 return False
356 356
357 357 def __nonzero__(self):
358 358 return True
359 359
360 360 def __len__(self):
361 361 return len(self.changelog)
362 362
363 363 def __iter__(self):
364 364 for i in xrange(len(self)):
365 365 yield i
366 366
367 367 def revs(self, expr, *args):
368 368 '''Return a list of revisions matching the given revset'''
369 369 expr = revset.formatspec(expr, *args)
370 370 m = revset.match(None, expr)
371 371 return [r for r in m(self, range(len(self)))]
372 372
373 373 def set(self, expr, *args):
374 374 '''
375 375 Yield a context for each matching revision, after doing arg
376 376 replacement via revset.formatspec
377 377 '''
378 378 for r in self.revs(expr, *args):
379 379 yield self[r]
380 380
381 381 def url(self):
382 382 return 'file:' + self.root
383 383
384 384 def hook(self, name, throw=False, **args):
385 385 return hook.hook(self.ui, self, name, throw, **args)
386 386
387 387 tag_disallowed = ':\r\n'
388 388
389 389 def _tag(self, names, node, message, local, user, date, extra={}):
390 390 if isinstance(names, str):
391 391 allchars = names
392 392 names = (names,)
393 393 else:
394 394 allchars = ''.join(names)
395 395 for c in self.tag_disallowed:
396 396 if c in allchars:
397 397 raise util.Abort(_('%r cannot be used in a tag name') % c)
398 398
399 399 branches = self.branchmap()
400 400 for name in names:
401 401 self.hook('pretag', throw=True, node=hex(node), tag=name,
402 402 local=local)
403 403 if name in branches:
404 404 self.ui.warn(_("warning: tag %s conflicts with existing"
405 405 " branch name\n") % name)
406 406
407 407 def writetags(fp, names, munge, prevtags):
408 408 fp.seek(0, 2)
409 409 if prevtags and prevtags[-1] != '\n':
410 410 fp.write('\n')
411 411 for name in names:
412 412 m = munge and munge(name) or name
413 413 if (self._tagscache.tagtypes and
414 414 name in self._tagscache.tagtypes):
415 415 old = self.tags().get(name, nullid)
416 416 fp.write('%s %s\n' % (hex(old), m))
417 417 fp.write('%s %s\n' % (hex(node), m))
418 418 fp.close()
419 419
420 420 prevtags = ''
421 421 if local:
422 422 try:
423 423 fp = self.opener('localtags', 'r+')
424 424 except IOError:
425 425 fp = self.opener('localtags', 'a')
426 426 else:
427 427 prevtags = fp.read()
428 428
429 429 # local tags are stored in the current charset
430 430 writetags(fp, names, None, prevtags)
431 431 for name in names:
432 432 self.hook('tag', node=hex(node), tag=name, local=local)
433 433 return
434 434
435 435 try:
436 436 fp = self.wfile('.hgtags', 'rb+')
437 437 except IOError, e:
438 438 if e.errno != errno.ENOENT:
439 439 raise
440 440 fp = self.wfile('.hgtags', 'ab')
441 441 else:
442 442 prevtags = fp.read()
443 443
444 444 # committed tags are stored in UTF-8
445 445 writetags(fp, names, encoding.fromlocal, prevtags)
446 446
447 447 fp.close()
448 448
449 449 self.invalidatecaches()
450 450
451 451 if '.hgtags' not in self.dirstate:
452 452 self[None].add(['.hgtags'])
453 453
454 454 m = matchmod.exact(self.root, '', ['.hgtags'])
455 455 tagnode = self.commit(message, user, date, extra=extra, match=m)
456 456
457 457 for name in names:
458 458 self.hook('tag', node=hex(node), tag=name, local=local)
459 459
460 460 return tagnode
461 461
462 462 def tag(self, names, node, message, local, user, date):
463 463 '''tag a revision with one or more symbolic names.
464 464
465 465 names is a list of strings or, when adding a single tag, names may be a
466 466 string.
467 467
468 468 if local is True, the tags are stored in a per-repository file.
469 469 otherwise, they are stored in the .hgtags file, and a new
470 470 changeset is committed with the change.
471 471
472 472 keyword arguments:
473 473
474 474 local: whether to store tags in non-version-controlled file
475 475 (default False)
476 476
477 477 message: commit message to use if committing
478 478
479 479 user: name of user to use if committing
480 480
481 481 date: date tuple to use if committing'''
482 482
483 483 if not local:
484 484 for x in self.status()[:5]:
485 485 if '.hgtags' in x:
486 486 raise util.Abort(_('working copy of .hgtags is changed '
487 487 '(please commit .hgtags manually)'))
488 488
489 489 self.tags() # instantiate the cache
490 490 self._tag(names, node, message, local, user, date)
491 491
492 492 @propertycache
493 493 def _tagscache(self):
494 494 '''Returns a tagscache object that contains various tags related
495 495 caches.'''
496 496
497 497 # This simplifies its cache management by having one decorated
498 498 # function (this one) and the rest simply fetch things from it.
499 499 class tagscache(object):
500 500 def __init__(self):
501 501 # These two define the set of tags for this repository. tags
502 502 # maps tag name to node; tagtypes maps tag name to 'global' or
503 503 # 'local'. (Global tags are defined by .hgtags across all
504 504 # heads, and local tags are defined in .hg/localtags.)
505 505 # They constitute the in-memory cache of tags.
506 506 self.tags = self.tagtypes = None
507 507
508 508 self.nodetagscache = self.tagslist = None
509 509
510 510 cache = tagscache()
511 511 cache.tags, cache.tagtypes = self._findtags()
512 512
513 513 return cache
514 514
515 515 def tags(self):
516 516 '''return a mapping of tag to node'''
517 517 t = {}
518 518 for k, v in self._tagscache.tags.iteritems():
519 519 try:
520 520 # ignore tags to unknown nodes
521 521 self.changelog.rev(v)
522 522 t[k] = v
523 523 except (error.LookupError, ValueError):
524 524 pass
525 525 return t
526 526
527 527 def _findtags(self):
528 528 '''Do the hard work of finding tags. Return a pair of dicts
529 529 (tags, tagtypes) where tags maps tag name to node, and tagtypes
530 530 maps tag name to a string like \'global\' or \'local\'.
531 531 Subclasses or extensions are free to add their own tags, but
532 532 should be aware that the returned dicts will be retained for the
533 533 duration of the localrepo object.'''
534 534
535 535 # XXX what tagtype should subclasses/extensions use? Currently
536 536 # mq and bookmarks add tags, but do not set the tagtype at all.
537 537 # Should each extension invent its own tag type? Should there
538 538 # be one tagtype for all such "virtual" tags? Or is the status
539 539 # quo fine?
540 540
541 541 alltags = {} # map tag name to (node, hist)
542 542 tagtypes = {}
543 543
544 544 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
545 545 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
546 546
547 547 # Build the return dicts. Have to re-encode tag names because
548 548 # the tags module always uses UTF-8 (in order not to lose info
549 549 # writing to the cache), but the rest of Mercurial wants them in
550 550 # local encoding.
551 551 tags = {}
552 552 for (name, (node, hist)) in alltags.iteritems():
553 553 if node != nullid:
554 554 tags[encoding.tolocal(name)] = node
555 555 tags['tip'] = self.changelog.tip()
556 556 tagtypes = dict([(encoding.tolocal(name), value)
557 557 for (name, value) in tagtypes.iteritems()])
558 558 return (tags, tagtypes)
559 559
560 560 def tagtype(self, tagname):
561 561 '''
562 562 return the type of the given tag. result can be:
563 563
564 564 'local' : a local tag
565 565 'global' : a global tag
566 566 None : tag does not exist
567 567 '''
568 568
569 569 return self._tagscache.tagtypes.get(tagname)
570 570
571 571 def tagslist(self):
572 572 '''return a list of tags ordered by revision'''
573 573 if not self._tagscache.tagslist:
574 574 l = []
575 575 for t, n in self.tags().iteritems():
576 576 r = self.changelog.rev(n)
577 577 l.append((r, t, n))
578 578 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
579 579
580 580 return self._tagscache.tagslist
581 581
582 582 def nodetags(self, node):
583 583 '''return the tags associated with a node'''
584 584 if not self._tagscache.nodetagscache:
585 585 nodetagscache = {}
586 586 for t, n in self._tagscache.tags.iteritems():
587 587 nodetagscache.setdefault(n, []).append(t)
588 588 for tags in nodetagscache.itervalues():
589 589 tags.sort()
590 590 self._tagscache.nodetagscache = nodetagscache
591 591 return self._tagscache.nodetagscache.get(node, [])
592 592
593 593 def nodebookmarks(self, node):
594 594 marks = []
595 595 for bookmark, n in self._bookmarks.iteritems():
596 596 if n == node:
597 597 marks.append(bookmark)
598 598 return sorted(marks)
599 599
600 600 def _branchtags(self, partial, lrev):
601 601 # TODO: rename this function?
602 602 tiprev = len(self) - 1
603 603 if lrev != tiprev:
604 604 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
605 605 self._updatebranchcache(partial, ctxgen)
606 606 self._writebranchcache(partial, self.changelog.tip(), tiprev)
607 607
608 608 return partial
609 609
610 610 def updatebranchcache(self):
611 611 tip = self.changelog.tip()
612 612 if self._branchcache is not None and self._branchcachetip == tip:
613 613 return
614 614
615 615 oldtip = self._branchcachetip
616 616 self._branchcachetip = tip
617 617 if oldtip is None or oldtip not in self.changelog.nodemap:
618 618 partial, last, lrev = self._readbranchcache()
619 619 else:
620 620 lrev = self.changelog.rev(oldtip)
621 621 partial = self._branchcache
622 622
623 623 self._branchtags(partial, lrev)
624 624 # this private cache holds all heads (not just the branch tips)
625 625 self._branchcache = partial
626 626
627 627 def branchmap(self):
628 628 '''returns a dictionary {branch: [branchheads]}'''
629 629 self.updatebranchcache()
630 630 return self._branchcache
631 631
632 632 def _branchtip(self, heads):
633 633 '''return the tipmost branch head in heads'''
634 634 tip = heads[-1]
635 635 for h in reversed(heads):
636 636 if not self[h].closesbranch():
637 637 tip = h
638 638 break
639 639 return tip
640 640
641 641 def branchtip(self, branch):
642 642 '''return the tip node for a given branch'''
643 643 if branch not in self.branchmap():
644 644 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
645 645 return self._branchtip(self.branchmap()[branch])
646 646
647 647 def branchtags(self):
648 648 '''return a dict where branch names map to the tipmost head of
649 649 the branch, open heads come before closed'''
650 650 bt = {}
651 651 for bn, heads in self.branchmap().iteritems():
652 652 bt[bn] = self._branchtip(heads)
653 653 return bt
654 654
655 655 def _readbranchcache(self):
656 656 partial = {}
657 657 try:
658 658 f = self.opener("cache/branchheads")
659 659 lines = f.read().split('\n')
660 660 f.close()
661 661 except (IOError, OSError):
662 662 return {}, nullid, nullrev
663 663
664 664 try:
665 665 last, lrev = lines.pop(0).split(" ", 1)
666 666 last, lrev = bin(last), int(lrev)
667 667 if lrev >= len(self) or self[lrev].node() != last:
668 668 # invalidate the cache
669 669 raise ValueError('invalidating branch cache (tip differs)')
670 670 for l in lines:
671 671 if not l:
672 672 continue
673 673 node, label = l.split(" ", 1)
674 674 label = encoding.tolocal(label.strip())
675 675 if not node in self:
676 676 raise ValueError('invalidating branch cache because node '+
677 677 '%s does not exist' % node)
678 678 partial.setdefault(label, []).append(bin(node))
679 679 except KeyboardInterrupt:
680 680 raise
681 681 except Exception, inst:
682 682 if self.ui.debugflag:
683 683 self.ui.warn(str(inst), '\n')
684 684 partial, last, lrev = {}, nullid, nullrev
685 685 return partial, last, lrev
686 686
687 687 def _writebranchcache(self, branches, tip, tiprev):
688 688 try:
689 689 f = self.opener("cache/branchheads", "w", atomictemp=True)
690 690 f.write("%s %s\n" % (hex(tip), tiprev))
691 691 for label, nodes in branches.iteritems():
692 692 for node in nodes:
693 693 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
694 694 f.close()
695 695 except (IOError, OSError):
696 696 pass
697 697
698 698 def _updatebranchcache(self, partial, ctxgen):
699 699 """Given a branchhead cache, partial, that may have extra nodes or be
700 700 missing heads, and a generator of nodes that are at least a superset of
701 701 heads missing, this function updates partial to be correct.
702 702 """
703 703 # collect new branch entries
704 704 newbranches = {}
705 705 for c in ctxgen:
706 706 newbranches.setdefault(c.branch(), []).append(c.node())
707 707 # if older branchheads are reachable from new ones, they aren't
708 708 # really branchheads. Note checking parents is insufficient:
709 709 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
710 710 for branch, newnodes in newbranches.iteritems():
711 711 bheads = partial.setdefault(branch, [])
712 712 # Remove candidate heads that no longer are in the repo (e.g., as
713 713 # the result of a strip that just happened). Avoid using 'node in
714 714 # self' here because that dives down into branchcache code somewhat
715 715 # recrusively.
716 716 bheadrevs = [self.changelog.rev(node) for node in bheads
717 717 if self.changelog.hasnode(node)]
718 718 newheadrevs = [self.changelog.rev(node) for node in newnodes
719 719 if self.changelog.hasnode(node)]
720 720 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
721 721 # Remove duplicates - nodes that are in newheadrevs and are already
722 722 # in bheadrevs. This can happen if you strip a node whose parent
723 723 # was already a head (because they're on different branches).
724 724 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
725 725
726 726 # Starting from tip means fewer passes over reachable. If we know
727 727 # the new candidates are not ancestors of existing heads, we don't
728 728 # have to examine ancestors of existing heads
729 729 if ctxisnew:
730 730 iterrevs = sorted(newheadrevs)
731 731 else:
732 732 iterrevs = list(bheadrevs)
733 733
734 734 # This loop prunes out two kinds of heads - heads that are
735 735 # superceded by a head in newheadrevs, and newheadrevs that are not
736 736 # heads because an existing head is their descendant.
737 737 while iterrevs:
738 738 latest = iterrevs.pop()
739 739 if latest not in bheadrevs:
740 740 continue
741 741 ancestors = set(self.changelog.ancestors([latest],
742 742 bheadrevs[0]))
743 743 if ancestors:
744 744 bheadrevs = [b for b in bheadrevs if b not in ancestors]
745 745 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
746 746
747 747 # There may be branches that cease to exist when the last commit in the
748 748 # branch was stripped. This code filters them out. Note that the
749 749 # branch that ceased to exist may not be in newbranches because
750 750 # newbranches is the set of candidate heads, which when you strip the
751 751 # last commit in a branch will be the parent branch.
752 752 for branch in partial.keys():
753 753 nodes = [head for head in partial[branch]
754 754 if self.changelog.hasnode(head)]
755 755 if not nodes:
756 756 del partial[branch]
757 757
758 758 def lookup(self, key):
759 759 return self[key].node()
760 760
761 761 def lookupbranch(self, key, remote=None):
762 762 repo = remote or self
763 763 if key in repo.branchmap():
764 764 return key
765 765
766 766 repo = (remote and remote.local()) and remote or self
767 767 return repo[key].branch()
768 768
769 769 def known(self, nodes):
770 770 nm = self.changelog.nodemap
771 771 pc = self._phasecache
772 772 result = []
773 773 for n in nodes:
774 774 r = nm.get(n)
775 775 resp = not (r is None or pc.phase(self, r) >= phases.secret)
776 776 result.append(resp)
777 777 return result
778 778
779 779 def local(self):
780 780 return self
781 781
782 782 def cancopy(self):
783 783 return self.local() # so statichttprepo's override of local() works
784 784
785 785 def join(self, f):
786 786 return os.path.join(self.path, f)
787 787
788 788 def wjoin(self, f):
789 789 return os.path.join(self.root, f)
790 790
791 791 def file(self, f):
792 792 if f[0] == '/':
793 793 f = f[1:]
794 794 return filelog.filelog(self.sopener, f)
795 795
796 796 def changectx(self, changeid):
797 797 return self[changeid]
798 798
799 799 def parents(self, changeid=None):
800 800 '''get list of changectxs for parents of changeid'''
801 801 return self[changeid].parents()
802 802
803 803 def setparents(self, p1, p2=nullid):
804 804 copies = self.dirstate.setparents(p1, p2)
805 805 if copies:
806 806 # Adjust copy records, the dirstate cannot do it, it
807 807 # requires access to parents manifests. Preserve them
808 808 # only for entries added to first parent.
809 809 pctx = self[p1]
810 810 for f in copies:
811 811 if f not in pctx and copies[f] in pctx:
812 812 self.dirstate.copy(copies[f], f)
813 813
814 814 def filectx(self, path, changeid=None, fileid=None):
815 815 """changeid can be a changeset revision, node, or tag.
816 816 fileid can be a file revision or node."""
817 817 return context.filectx(self, path, changeid, fileid)
818 818
819 819 def getcwd(self):
820 820 return self.dirstate.getcwd()
821 821
822 822 def pathto(self, f, cwd=None):
823 823 return self.dirstate.pathto(f, cwd)
824 824
825 825 def wfile(self, f, mode='r'):
826 826 return self.wopener(f, mode)
827 827
828 828 def _link(self, f):
829 829 return os.path.islink(self.wjoin(f))
830 830
831 831 def _loadfilter(self, filter):
832 832 if filter not in self.filterpats:
833 833 l = []
834 834 for pat, cmd in self.ui.configitems(filter):
835 835 if cmd == '!':
836 836 continue
837 837 mf = matchmod.match(self.root, '', [pat])
838 838 fn = None
839 839 params = cmd
840 840 for name, filterfn in self._datafilters.iteritems():
841 841 if cmd.startswith(name):
842 842 fn = filterfn
843 843 params = cmd[len(name):].lstrip()
844 844 break
845 845 if not fn:
846 846 fn = lambda s, c, **kwargs: util.filter(s, c)
847 847 # Wrap old filters not supporting keyword arguments
848 848 if not inspect.getargspec(fn)[2]:
849 849 oldfn = fn
850 850 fn = lambda s, c, **kwargs: oldfn(s, c)
851 851 l.append((mf, fn, params))
852 852 self.filterpats[filter] = l
853 853 return self.filterpats[filter]
854 854
855 855 def _filter(self, filterpats, filename, data):
856 856 for mf, fn, cmd in filterpats:
857 857 if mf(filename):
858 858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
859 859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
860 860 break
861 861
862 862 return data
863 863
864 864 @propertycache
865 865 def _encodefilterpats(self):
866 866 return self._loadfilter('encode')
867 867
868 868 @propertycache
869 869 def _decodefilterpats(self):
870 870 return self._loadfilter('decode')
871 871
872 872 def adddatafilter(self, name, filter):
873 873 self._datafilters[name] = filter
874 874
875 875 def wread(self, filename):
876 876 if self._link(filename):
877 877 data = os.readlink(self.wjoin(filename))
878 878 else:
879 879 data = self.wopener.read(filename)
880 880 return self._filter(self._encodefilterpats, filename, data)
881 881
882 882 def wwrite(self, filename, data, flags):
883 883 data = self._filter(self._decodefilterpats, filename, data)
884 884 if 'l' in flags:
885 885 self.wopener.symlink(data, filename)
886 886 else:
887 887 self.wopener.write(filename, data)
888 888 if 'x' in flags:
889 889 util.setflags(self.wjoin(filename), False, True)
890 890
891 891 def wwritedata(self, filename, data):
892 892 return self._filter(self._decodefilterpats, filename, data)
893 893
894 894 def transaction(self, desc):
895 895 tr = self._transref and self._transref() or None
896 896 if tr and tr.running():
897 897 return tr.nest()
898 898
899 899 # abort here if the journal already exists
900 900 if os.path.exists(self.sjoin("journal")):
901 901 raise error.RepoError(
902 902 _("abandoned transaction found - run hg recover"))
903 903
904 904 self._writejournal(desc)
905 905 renames = [(x, undoname(x)) for x in self._journalfiles()]
906 906
907 907 tr = transaction.transaction(self.ui.warn, self.sopener,
908 908 self.sjoin("journal"),
909 909 aftertrans(renames),
910 910 self.store.createmode)
911 911 self._transref = weakref.ref(tr)
912 912 return tr
913 913
914 914 def _journalfiles(self):
915 915 return (self.sjoin('journal'), self.join('journal.dirstate'),
916 916 self.join('journal.branch'), self.join('journal.desc'),
917 917 self.join('journal.bookmarks'),
918 918 self.sjoin('journal.phaseroots'))
919 919
920 920 def undofiles(self):
921 921 return [undoname(x) for x in self._journalfiles()]
922 922
923 923 def _writejournal(self, desc):
924 924 self.opener.write("journal.dirstate",
925 925 self.opener.tryread("dirstate"))
926 926 self.opener.write("journal.branch",
927 927 encoding.fromlocal(self.dirstate.branch()))
928 928 self.opener.write("journal.desc",
929 929 "%d\n%s\n" % (len(self), desc))
930 930 self.opener.write("journal.bookmarks",
931 931 self.opener.tryread("bookmarks"))
932 932 self.sopener.write("journal.phaseroots",
933 933 self.sopener.tryread("phaseroots"))
934 934
935 935 def recover(self):
936 936 lock = self.lock()
937 937 try:
938 938 if os.path.exists(self.sjoin("journal")):
939 939 self.ui.status(_("rolling back interrupted transaction\n"))
940 940 transaction.rollback(self.sopener, self.sjoin("journal"),
941 941 self.ui.warn)
942 942 self.invalidate()
943 943 return True
944 944 else:
945 945 self.ui.warn(_("no interrupted transaction available\n"))
946 946 return False
947 947 finally:
948 948 lock.release()
949 949
950 950 def rollback(self, dryrun=False, force=False):
951 951 wlock = lock = None
952 952 try:
953 953 wlock = self.wlock()
954 954 lock = self.lock()
955 955 if os.path.exists(self.sjoin("undo")):
956 956 return self._rollback(dryrun, force)
957 957 else:
958 958 self.ui.warn(_("no rollback information available\n"))
959 959 return 1
960 960 finally:
961 961 release(lock, wlock)
962 962
963 963 def _rollback(self, dryrun, force):
964 964 ui = self.ui
965 965 try:
966 966 args = self.opener.read('undo.desc').splitlines()
967 967 (oldlen, desc, detail) = (int(args[0]), args[1], None)
968 968 if len(args) >= 3:
969 969 detail = args[2]
970 970 oldtip = oldlen - 1
971 971
972 972 if detail and ui.verbose:
973 973 msg = (_('repository tip rolled back to revision %s'
974 974 ' (undo %s: %s)\n')
975 975 % (oldtip, desc, detail))
976 976 else:
977 977 msg = (_('repository tip rolled back to revision %s'
978 978 ' (undo %s)\n')
979 979 % (oldtip, desc))
980 980 except IOError:
981 981 msg = _('rolling back unknown transaction\n')
982 982 desc = None
983 983
984 984 if not force and self['.'] != self['tip'] and desc == 'commit':
985 985 raise util.Abort(
986 986 _('rollback of last commit while not checked out '
987 987 'may lose data'), hint=_('use -f to force'))
988 988
989 989 ui.status(msg)
990 990 if dryrun:
991 991 return 0
992 992
993 993 parents = self.dirstate.parents()
994 994 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
995 995 if os.path.exists(self.join('undo.bookmarks')):
996 996 util.rename(self.join('undo.bookmarks'),
997 997 self.join('bookmarks'))
998 998 if os.path.exists(self.sjoin('undo.phaseroots')):
999 999 util.rename(self.sjoin('undo.phaseroots'),
1000 1000 self.sjoin('phaseroots'))
1001 1001 self.invalidate()
1002 1002
1003 1003 parentgone = (parents[0] not in self.changelog.nodemap or
1004 1004 parents[1] not in self.changelog.nodemap)
1005 1005 if parentgone:
1006 1006 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1007 1007 try:
1008 1008 branch = self.opener.read('undo.branch')
1009 1009 self.dirstate.setbranch(branch)
1010 1010 except IOError:
1011 1011 ui.warn(_('named branch could not be reset: '
1012 1012 'current branch is still \'%s\'\n')
1013 1013 % self.dirstate.branch())
1014 1014
1015 1015 self.dirstate.invalidate()
1016 1016 parents = tuple([p.rev() for p in self.parents()])
1017 1017 if len(parents) > 1:
1018 1018 ui.status(_('working directory now based on '
1019 1019 'revisions %d and %d\n') % parents)
1020 1020 else:
1021 1021 ui.status(_('working directory now based on '
1022 1022 'revision %d\n') % parents)
1023 1023 # TODO: if we know which new heads may result from this rollback, pass
1024 1024 # them to destroy(), which will prevent the branchhead cache from being
1025 1025 # invalidated.
1026 1026 self.destroyed()
1027 1027 return 0
1028 1028
1029 1029 def invalidatecaches(self):
1030 1030 def delcache(name):
1031 1031 try:
1032 1032 delattr(self, name)
1033 1033 except AttributeError:
1034 1034 pass
1035 1035
1036 1036 delcache('_tagscache')
1037 1037
1038 1038 self._branchcache = None # in UTF-8
1039 1039 self._branchcachetip = None
1040 1040
1041 1041 def invalidatedirstate(self):
1042 1042 '''Invalidates the dirstate, causing the next call to dirstate
1043 1043 to check if it was modified since the last time it was read,
1044 1044 rereading it if it has.
1045 1045
1046 1046 This is different to dirstate.invalidate() that it doesn't always
1047 1047 rereads the dirstate. Use dirstate.invalidate() if you want to
1048 1048 explicitly read the dirstate again (i.e. restoring it to a previous
1049 1049 known good state).'''
1050 1050 if 'dirstate' in self.__dict__:
1051 1051 for k in self.dirstate._filecache:
1052 1052 try:
1053 1053 delattr(self.dirstate, k)
1054 1054 except AttributeError:
1055 1055 pass
1056 1056 delattr(self, 'dirstate')
1057 1057
1058 1058 def invalidate(self):
1059 1059 for k in self._filecache:
1060 1060 # dirstate is invalidated separately in invalidatedirstate()
1061 1061 if k == 'dirstate':
1062 1062 continue
1063 1063
1064 1064 try:
1065 1065 delattr(self, k)
1066 1066 except AttributeError:
1067 1067 pass
1068 1068 self.invalidatecaches()
1069 1069
1070 1070 # Discard all cache entries to force reloading everything.
1071 1071 self._filecache.clear()
1072 1072
1073 1073 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1074 1074 try:
1075 1075 l = lock.lock(lockname, 0, releasefn, desc=desc)
1076 1076 except error.LockHeld, inst:
1077 1077 if not wait:
1078 1078 raise
1079 1079 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1080 1080 (desc, inst.locker))
1081 1081 # default to 600 seconds timeout
1082 1082 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1083 1083 releasefn, desc=desc)
1084 1084 if acquirefn:
1085 1085 acquirefn()
1086 1086 return l
1087 1087
1088 1088 def _afterlock(self, callback):
1089 1089 """add a callback to the current repository lock.
1090 1090
1091 1091 The callback will be executed on lock release."""
1092 1092 l = self._lockref and self._lockref()
1093 1093 if l:
1094 1094 l.postrelease.append(callback)
1095 1095 else:
1096 1096 callback()
1097 1097
1098 1098 def lock(self, wait=True):
1099 1099 '''Lock the repository store (.hg/store) and return a weak reference
1100 1100 to the lock. Use this before modifying the store (e.g. committing or
1101 1101 stripping). If you are opening a transaction, get a lock as well.)'''
1102 1102 l = self._lockref and self._lockref()
1103 1103 if l is not None and l.held:
1104 1104 l.lock()
1105 1105 return l
1106 1106
1107 1107 def unlock():
1108 1108 self.store.write()
1109 1109 if '_phasecache' in vars(self):
1110 1110 self._phasecache.write()
1111 1111 for k, ce in self._filecache.items():
1112 1112 if k == 'dirstate':
1113 1113 continue
1114 1114 ce.refresh()
1115 1115
1116 1116 l = self._lock(self.sjoin("lock"), wait, unlock,
1117 1117 self.invalidate, _('repository %s') % self.origroot)
1118 1118 self._lockref = weakref.ref(l)
1119 1119 return l
1120 1120
1121 1121 def wlock(self, wait=True):
1122 1122 '''Lock the non-store parts of the repository (everything under
1123 1123 .hg except .hg/store) and return a weak reference to the lock.
1124 1124 Use this before modifying files in .hg.'''
1125 1125 l = self._wlockref and self._wlockref()
1126 1126 if l is not None and l.held:
1127 1127 l.lock()
1128 1128 return l
1129 1129
1130 1130 def unlock():
1131 1131 self.dirstate.write()
1132 1132 ce = self._filecache.get('dirstate')
1133 1133 if ce:
1134 1134 ce.refresh()
1135 1135
1136 1136 l = self._lock(self.join("wlock"), wait, unlock,
1137 1137 self.invalidatedirstate, _('working directory of %s') %
1138 1138 self.origroot)
1139 1139 self._wlockref = weakref.ref(l)
1140 1140 return l
1141 1141
1142 1142 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1143 1143 """
1144 1144 commit an individual file as part of a larger transaction
1145 1145 """
1146 1146
1147 1147 fname = fctx.path()
1148 1148 text = fctx.data()
1149 1149 flog = self.file(fname)
1150 1150 fparent1 = manifest1.get(fname, nullid)
1151 1151 fparent2 = fparent2o = manifest2.get(fname, nullid)
1152 1152
1153 1153 meta = {}
1154 1154 copy = fctx.renamed()
1155 1155 if copy and copy[0] != fname:
1156 1156 # Mark the new revision of this file as a copy of another
1157 1157 # file. This copy data will effectively act as a parent
1158 1158 # of this new revision. If this is a merge, the first
1159 1159 # parent will be the nullid (meaning "look up the copy data")
1160 1160 # and the second one will be the other parent. For example:
1161 1161 #
1162 1162 # 0 --- 1 --- 3 rev1 changes file foo
1163 1163 # \ / rev2 renames foo to bar and changes it
1164 1164 # \- 2 -/ rev3 should have bar with all changes and
1165 1165 # should record that bar descends from
1166 1166 # bar in rev2 and foo in rev1
1167 1167 #
1168 1168 # this allows this merge to succeed:
1169 1169 #
1170 1170 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1171 1171 # \ / merging rev3 and rev4 should use bar@rev2
1172 1172 # \- 2 --- 4 as the merge base
1173 1173 #
1174 1174
1175 1175 cfname = copy[0]
1176 1176 crev = manifest1.get(cfname)
1177 1177 newfparent = fparent2
1178 1178
1179 1179 if manifest2: # branch merge
1180 1180 if fparent2 == nullid or crev is None: # copied on remote side
1181 1181 if cfname in manifest2:
1182 1182 crev = manifest2[cfname]
1183 1183 newfparent = fparent1
1184 1184
1185 1185 # find source in nearest ancestor if we've lost track
1186 1186 if not crev:
1187 1187 self.ui.debug(" %s: searching for copy revision for %s\n" %
1188 1188 (fname, cfname))
1189 1189 for ancestor in self[None].ancestors():
1190 1190 if cfname in ancestor:
1191 1191 crev = ancestor[cfname].filenode()
1192 1192 break
1193 1193
1194 1194 if crev:
1195 1195 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1196 1196 meta["copy"] = cfname
1197 1197 meta["copyrev"] = hex(crev)
1198 1198 fparent1, fparent2 = nullid, newfparent
1199 1199 else:
1200 1200 self.ui.warn(_("warning: can't find ancestor for '%s' "
1201 1201 "copied from '%s'!\n") % (fname, cfname))
1202 1202
1203 1203 elif fparent2 != nullid:
1204 1204 # is one parent an ancestor of the other?
1205 1205 fparentancestor = flog.ancestor(fparent1, fparent2)
1206 1206 if fparentancestor == fparent1:
1207 1207 fparent1, fparent2 = fparent2, nullid
1208 1208 elif fparentancestor == fparent2:
1209 1209 fparent2 = nullid
1210 1210
1211 1211 # is the file changed?
1212 1212 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1213 1213 changelist.append(fname)
1214 1214 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1215 1215
1216 1216 # are just the flags changed during merge?
1217 1217 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1218 1218 changelist.append(fname)
1219 1219
1220 1220 return fparent1
1221 1221
1222 1222 def commit(self, text="", user=None, date=None, match=None, force=False,
1223 1223 editor=False, extra={}):
1224 1224 """Add a new revision to current repository.
1225 1225
1226 1226 Revision information is gathered from the working directory,
1227 1227 match can be used to filter the committed files. If editor is
1228 1228 supplied, it is called to get a commit message.
1229 1229 """
1230 1230
1231 1231 def fail(f, msg):
1232 1232 raise util.Abort('%s: %s' % (f, msg))
1233 1233
1234 1234 if not match:
1235 1235 match = matchmod.always(self.root, '')
1236 1236
1237 1237 if not force:
1238 1238 vdirs = []
1239 1239 match.dir = vdirs.append
1240 1240 match.bad = fail
1241 1241
1242 1242 wlock = self.wlock()
1243 1243 try:
1244 1244 wctx = self[None]
1245 1245 merge = len(wctx.parents()) > 1
1246 1246
1247 1247 if (not force and merge and match and
1248 1248 (match.files() or match.anypats())):
1249 1249 raise util.Abort(_('cannot partially commit a merge '
1250 1250 '(do not specify files or patterns)'))
1251 1251
1252 1252 changes = self.status(match=match, clean=force)
1253 1253 if force:
1254 1254 changes[0].extend(changes[6]) # mq may commit unchanged files
1255 1255
1256 1256 # check subrepos
1257 1257 subs = []
1258 1258 commitsubs = set()
1259 1259 newstate = wctx.substate.copy()
1260 1260 # only manage subrepos and .hgsubstate if .hgsub is present
1261 1261 if '.hgsub' in wctx:
1262 1262 # we'll decide whether to track this ourselves, thanks
1263 1263 if '.hgsubstate' in changes[0]:
1264 1264 changes[0].remove('.hgsubstate')
1265 1265 if '.hgsubstate' in changes[2]:
1266 1266 changes[2].remove('.hgsubstate')
1267 1267
1268 1268 # compare current state to last committed state
1269 1269 # build new substate based on last committed state
1270 1270 oldstate = wctx.p1().substate
1271 1271 for s in sorted(newstate.keys()):
1272 1272 if not match(s):
1273 1273 # ignore working copy, use old state if present
1274 1274 if s in oldstate:
1275 1275 newstate[s] = oldstate[s]
1276 1276 continue
1277 1277 if not force:
1278 1278 raise util.Abort(
1279 1279 _("commit with new subrepo %s excluded") % s)
1280 1280 if wctx.sub(s).dirty(True):
1281 1281 if not self.ui.configbool('ui', 'commitsubrepos'):
1282 1282 raise util.Abort(
1283 1283 _("uncommitted changes in subrepo %s") % s,
1284 1284 hint=_("use --subrepos for recursive commit"))
1285 1285 subs.append(s)
1286 1286 commitsubs.add(s)
1287 1287 else:
1288 1288 bs = wctx.sub(s).basestate()
1289 1289 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1290 1290 if oldstate.get(s, (None, None, None))[1] != bs:
1291 1291 subs.append(s)
1292 1292
1293 1293 # check for removed subrepos
1294 1294 for p in wctx.parents():
1295 1295 r = [s for s in p.substate if s not in newstate]
1296 1296 subs += [s for s in r if match(s)]
1297 1297 if subs:
1298 1298 if (not match('.hgsub') and
1299 1299 '.hgsub' in (wctx.modified() + wctx.added())):
1300 1300 raise util.Abort(
1301 1301 _("can't commit subrepos without .hgsub"))
1302 1302 changes[0].insert(0, '.hgsubstate')
1303 1303
1304 1304 elif '.hgsub' in changes[2]:
1305 1305 # clean up .hgsubstate when .hgsub is removed
1306 1306 if ('.hgsubstate' in wctx and
1307 1307 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1308 1308 changes[2].insert(0, '.hgsubstate')
1309 1309
1310 1310 # make sure all explicit patterns are matched
1311 1311 if not force and match.files():
1312 1312 matched = set(changes[0] + changes[1] + changes[2])
1313 1313
1314 1314 for f in match.files():
1315 1315 if f == '.' or f in matched or f in wctx.substate:
1316 1316 continue
1317 1317 if f in changes[3]: # missing
1318 1318 fail(f, _('file not found!'))
1319 1319 if f in vdirs: # visited directory
1320 1320 d = f + '/'
1321 1321 for mf in matched:
1322 1322 if mf.startswith(d):
1323 1323 break
1324 1324 else:
1325 1325 fail(f, _("no match under directory!"))
1326 1326 elif f not in self.dirstate:
1327 1327 fail(f, _("file not tracked!"))
1328 1328
1329 1329 if (not force and not extra.get("close") and not merge
1330 1330 and not (changes[0] or changes[1] or changes[2])
1331 1331 and wctx.branch() == wctx.p1().branch()):
1332 1332 return None
1333 1333
1334 1334 if merge and changes[3]:
1335 1335 raise util.Abort(_("cannot commit merge with missing files"))
1336 1336
1337 1337 ms = mergemod.mergestate(self)
1338 1338 for f in changes[0]:
1339 1339 if f in ms and ms[f] == 'u':
1340 1340 raise util.Abort(_("unresolved merge conflicts "
1341 1341 "(see hg help resolve)"))
1342 1342
1343 1343 cctx = context.workingctx(self, text, user, date, extra, changes)
1344 1344 if editor:
1345 1345 cctx._text = editor(self, cctx, subs)
1346 1346 edited = (text != cctx._text)
1347 1347
1348 1348 # commit subs and write new state
1349 1349 if subs:
1350 1350 for s in sorted(commitsubs):
1351 1351 sub = wctx.sub(s)
1352 1352 self.ui.status(_('committing subrepository %s\n') %
1353 1353 subrepo.subrelpath(sub))
1354 1354 sr = sub.commit(cctx._text, user, date)
1355 1355 newstate[s] = (newstate[s][0], sr)
1356 1356 subrepo.writestate(self, newstate)
1357 1357
1358 1358 # Save commit message in case this transaction gets rolled back
1359 1359 # (e.g. by a pretxncommit hook). Leave the content alone on
1360 1360 # the assumption that the user will use the same editor again.
1361 1361 msgfn = self.savecommitmessage(cctx._text)
1362 1362
1363 1363 p1, p2 = self.dirstate.parents()
1364 1364 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1365 1365 try:
1366 1366 self.hook("precommit", throw=True, parent1=hookp1,
1367 1367 parent2=hookp2)
1368 1368 ret = self.commitctx(cctx, True)
1369 1369 except: # re-raises
1370 1370 if edited:
1371 1371 self.ui.write(
1372 1372 _('note: commit message saved in %s\n') % msgfn)
1373 1373 raise
1374 1374
1375 1375 # update bookmarks, dirstate and mergestate
1376 1376 bookmarks.update(self, [p1, p2], ret)
1377 1377 for f in changes[0] + changes[1]:
1378 1378 self.dirstate.normal(f)
1379 1379 for f in changes[2]:
1380 1380 self.dirstate.drop(f)
1381 1381 self.dirstate.setparents(ret)
1382 1382 ms.reset()
1383 1383 finally:
1384 1384 wlock.release()
1385 1385
1386 1386 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1387 1387 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1388 1388 self._afterlock(commithook)
1389 1389 return ret
1390 1390
1391 1391 def commitctx(self, ctx, error=False):
1392 1392 """Add a new revision to current repository.
1393 1393 Revision information is passed via the context argument.
1394 1394 """
1395 1395
1396 1396 tr = lock = None
1397 1397 removed = list(ctx.removed())
1398 1398 p1, p2 = ctx.p1(), ctx.p2()
1399 1399 user = ctx.user()
1400 1400
1401 1401 lock = self.lock()
1402 1402 try:
1403 1403 tr = self.transaction("commit")
1404 1404 trp = weakref.proxy(tr)
1405 1405
1406 1406 if ctx.files():
1407 1407 m1 = p1.manifest().copy()
1408 1408 m2 = p2.manifest()
1409 1409
1410 1410 # check in files
1411 1411 new = {}
1412 1412 changed = []
1413 1413 linkrev = len(self)
1414 1414 for f in sorted(ctx.modified() + ctx.added()):
1415 1415 self.ui.note(f + "\n")
1416 1416 try:
1417 1417 fctx = ctx[f]
1418 1418 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1419 1419 changed)
1420 1420 m1.set(f, fctx.flags())
1421 1421 except OSError, inst:
1422 1422 self.ui.warn(_("trouble committing %s!\n") % f)
1423 1423 raise
1424 1424 except IOError, inst:
1425 1425 errcode = getattr(inst, 'errno', errno.ENOENT)
1426 1426 if error or errcode and errcode != errno.ENOENT:
1427 1427 self.ui.warn(_("trouble committing %s!\n") % f)
1428 1428 raise
1429 1429 else:
1430 1430 removed.append(f)
1431 1431
1432 1432 # update manifest
1433 1433 m1.update(new)
1434 1434 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1435 1435 drop = [f for f in removed if f in m1]
1436 1436 for f in drop:
1437 1437 del m1[f]
1438 1438 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1439 1439 p2.manifestnode(), (new, drop))
1440 1440 files = changed + removed
1441 1441 else:
1442 1442 mn = p1.manifestnode()
1443 1443 files = []
1444 1444
1445 1445 # update changelog
1446 1446 self.changelog.delayupdate()
1447 1447 n = self.changelog.add(mn, files, ctx.description(),
1448 1448 trp, p1.node(), p2.node(),
1449 1449 user, ctx.date(), ctx.extra().copy())
1450 1450 p = lambda: self.changelog.writepending() and self.root or ""
1451 1451 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1452 1452 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1453 1453 parent2=xp2, pending=p)
1454 1454 self.changelog.finalize(trp)
1455 1455 # set the new commit is proper phase
1456 1456 targetphase = phases.newcommitphase(self.ui)
1457 1457 if targetphase:
1458 1458 # retract boundary do not alter parent changeset.
1459 1459 # if a parent have higher the resulting phase will
1460 1460 # be compliant anyway
1461 1461 #
1462 1462 # if minimal phase was 0 we don't need to retract anything
1463 1463 phases.retractboundary(self, targetphase, [n])
1464 1464 tr.close()
1465 1465 self.updatebranchcache()
1466 1466 return n
1467 1467 finally:
1468 1468 if tr:
1469 1469 tr.release()
1470 1470 lock.release()
1471 1471
1472 1472 def destroyed(self, newheadnodes=None):
1473 1473 '''Inform the repository that nodes have been destroyed.
1474 1474 Intended for use by strip and rollback, so there's a common
1475 1475 place for anything that has to be done after destroying history.
1476 1476
1477 1477 If you know the branchheadcache was uptodate before nodes were removed
1478 1478 and you also know the set of candidate new heads that may have resulted
1479 1479 from the destruction, you can set newheadnodes. This will enable the
1480 1480 code to update the branchheads cache, rather than having future code
1481 1481 decide it's invalid and regenrating it from scratch.
1482 1482 '''
1483 1483 # If we have info, newheadnodes, on how to update the branch cache, do
1484 1484 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1485 1485 # will be caught the next time it is read.
1486 1486 if newheadnodes:
1487 1487 tiprev = len(self) - 1
1488 1488 ctxgen = (self[node] for node in newheadnodes
1489 1489 if self.changelog.hasnode(node))
1490 1490 self._updatebranchcache(self._branchcache, ctxgen)
1491 1491 self._writebranchcache(self._branchcache, self.changelog.tip(),
1492 1492 tiprev)
1493 1493
1494 1494 # Ensure the persistent tag cache is updated. Doing it now
1495 1495 # means that the tag cache only has to worry about destroyed
1496 1496 # heads immediately after a strip/rollback. That in turn
1497 1497 # guarantees that "cachetip == currenttip" (comparing both rev
1498 1498 # and node) always means no nodes have been added or destroyed.
1499 1499
1500 1500 # XXX this is suboptimal when qrefresh'ing: we strip the current
1501 1501 # head, refresh the tag cache, then immediately add a new head.
1502 1502 # But I think doing it this way is necessary for the "instant
1503 1503 # tag cache retrieval" case to work.
1504 1504 self.invalidatecaches()
1505 1505
1506 1506 def walk(self, match, node=None):
1507 1507 '''
1508 1508 walk recursively through the directory tree or a given
1509 1509 changeset, finding all files matched by the match
1510 1510 function
1511 1511 '''
1512 1512 return self[node].walk(match)
1513 1513
1514 1514 def status(self, node1='.', node2=None, match=None,
1515 1515 ignored=False, clean=False, unknown=False,
1516 1516 listsubrepos=False):
1517 1517 """return status of files between two nodes or node and working
1518 1518 directory.
1519 1519
1520 1520 If node1 is None, use the first dirstate parent instead.
1521 1521 If node2 is None, compare node1 with working directory.
1522 1522 """
1523 1523
1524 1524 def mfmatches(ctx):
1525 1525 mf = ctx.manifest().copy()
1526 1526 if match.always():
1527 1527 return mf
1528 1528 for fn in mf.keys():
1529 1529 if not match(fn):
1530 1530 del mf[fn]
1531 1531 return mf
1532 1532
1533 1533 if isinstance(node1, context.changectx):
1534 1534 ctx1 = node1
1535 1535 else:
1536 1536 ctx1 = self[node1]
1537 1537 if isinstance(node2, context.changectx):
1538 1538 ctx2 = node2
1539 1539 else:
1540 1540 ctx2 = self[node2]
1541 1541
1542 1542 working = ctx2.rev() is None
1543 1543 parentworking = working and ctx1 == self['.']
1544 1544 match = match or matchmod.always(self.root, self.getcwd())
1545 1545 listignored, listclean, listunknown = ignored, clean, unknown
1546 1546
1547 1547 # load earliest manifest first for caching reasons
1548 1548 if not working and ctx2.rev() < ctx1.rev():
1549 1549 ctx2.manifest()
1550 1550
1551 1551 if not parentworking:
1552 1552 def bad(f, msg):
1553 1553 # 'f' may be a directory pattern from 'match.files()',
1554 1554 # so 'f not in ctx1' is not enough
1555 1555 if f not in ctx1 and f not in ctx1.dirs():
1556 1556 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1557 1557 match.bad = bad
1558 1558
1559 1559 if working: # we need to scan the working dir
1560 1560 subrepos = []
1561 1561 if '.hgsub' in self.dirstate:
1562 1562 subrepos = ctx2.substate.keys()
1563 1563 s = self.dirstate.status(match, subrepos, listignored,
1564 1564 listclean, listunknown)
1565 1565 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1566 1566
1567 1567 # check for any possibly clean files
1568 1568 if parentworking and cmp:
1569 1569 fixup = []
1570 1570 # do a full compare of any files that might have changed
1571 1571 for f in sorted(cmp):
1572 1572 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1573 1573 or ctx1[f].cmp(ctx2[f])):
1574 1574 modified.append(f)
1575 1575 else:
1576 1576 fixup.append(f)
1577 1577
1578 1578 # update dirstate for files that are actually clean
1579 1579 if fixup:
1580 1580 if listclean:
1581 1581 clean += fixup
1582 1582
1583 1583 try:
1584 1584 # updating the dirstate is optional
1585 1585 # so we don't wait on the lock
1586 1586 wlock = self.wlock(False)
1587 1587 try:
1588 1588 for f in fixup:
1589 1589 self.dirstate.normal(f)
1590 1590 finally:
1591 1591 wlock.release()
1592 1592 except error.LockError:
1593 1593 pass
1594 1594
1595 1595 if not parentworking:
1596 1596 mf1 = mfmatches(ctx1)
1597 1597 if working:
1598 1598 # we are comparing working dir against non-parent
1599 1599 # generate a pseudo-manifest for the working dir
1600 1600 mf2 = mfmatches(self['.'])
1601 1601 for f in cmp + modified + added:
1602 1602 mf2[f] = None
1603 1603 mf2.set(f, ctx2.flags(f))
1604 1604 for f in removed:
1605 1605 if f in mf2:
1606 1606 del mf2[f]
1607 1607 else:
1608 1608 # we are comparing two revisions
1609 1609 deleted, unknown, ignored = [], [], []
1610 1610 mf2 = mfmatches(ctx2)
1611 1611
1612 1612 modified, added, clean = [], [], []
1613 1613 withflags = mf1.withflags() | mf2.withflags()
1614 1614 for fn in mf2:
1615 1615 if fn in mf1:
1616 1616 if (fn not in deleted and
1617 1617 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1618 1618 (mf1[fn] != mf2[fn] and
1619 1619 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1620 1620 modified.append(fn)
1621 1621 elif listclean:
1622 1622 clean.append(fn)
1623 1623 del mf1[fn]
1624 1624 elif fn not in deleted:
1625 1625 added.append(fn)
1626 1626 removed = mf1.keys()
1627 1627
1628 1628 if working and modified and not self.dirstate._checklink:
1629 1629 # Symlink placeholders may get non-symlink-like contents
1630 1630 # via user error or dereferencing by NFS or Samba servers,
1631 1631 # so we filter out any placeholders that don't look like a
1632 1632 # symlink
1633 1633 sane = []
1634 1634 for f in modified:
1635 1635 if ctx2.flags(f) == 'l':
1636 1636 d = ctx2[f].data()
1637 1637 if len(d) >= 1024 or '\n' in d or util.binary(d):
1638 1638 self.ui.debug('ignoring suspect symlink placeholder'
1639 1639 ' "%s"\n' % f)
1640 1640 continue
1641 1641 sane.append(f)
1642 1642 modified = sane
1643 1643
1644 1644 r = modified, added, removed, deleted, unknown, ignored, clean
1645 1645
1646 1646 if listsubrepos:
1647 1647 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1648 1648 if working:
1649 1649 rev2 = None
1650 1650 else:
1651 1651 rev2 = ctx2.substate[subpath][1]
1652 1652 try:
1653 1653 submatch = matchmod.narrowmatcher(subpath, match)
1654 1654 s = sub.status(rev2, match=submatch, ignored=listignored,
1655 1655 clean=listclean, unknown=listunknown,
1656 1656 listsubrepos=True)
1657 1657 for rfiles, sfiles in zip(r, s):
1658 1658 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1659 1659 except error.LookupError:
1660 1660 self.ui.status(_("skipping missing subrepository: %s\n")
1661 1661 % subpath)
1662 1662
1663 1663 for l in r:
1664 1664 l.sort()
1665 1665 return r
1666 1666
1667 1667 def heads(self, start=None):
1668 1668 heads = self.changelog.heads(start)
1669 1669 # sort the output in rev descending order
1670 1670 return sorted(heads, key=self.changelog.rev, reverse=True)
1671 1671
1672 1672 def branchheads(self, branch=None, start=None, closed=False):
1673 1673 '''return a (possibly filtered) list of heads for the given branch
1674 1674
1675 1675 Heads are returned in topological order, from newest to oldest.
1676 1676 If branch is None, use the dirstate branch.
1677 1677 If start is not None, return only heads reachable from start.
1678 1678 If closed is True, return heads that are marked as closed as well.
1679 1679 '''
1680 1680 if branch is None:
1681 1681 branch = self[None].branch()
1682 1682 branches = self.branchmap()
1683 1683 if branch not in branches:
1684 1684 return []
1685 1685 # the cache returns heads ordered lowest to highest
1686 1686 bheads = list(reversed(branches[branch]))
1687 1687 if start is not None:
1688 1688 # filter out the heads that cannot be reached from startrev
1689 1689 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1690 1690 bheads = [h for h in bheads if h in fbheads]
1691 1691 if not closed:
1692 1692 bheads = [h for h in bheads if not self[h].closesbranch()]
1693 1693 return bheads
1694 1694
1695 1695 def branches(self, nodes):
1696 1696 if not nodes:
1697 1697 nodes = [self.changelog.tip()]
1698 1698 b = []
1699 1699 for n in nodes:
1700 1700 t = n
1701 1701 while True:
1702 1702 p = self.changelog.parents(n)
1703 1703 if p[1] != nullid or p[0] == nullid:
1704 1704 b.append((t, n, p[0], p[1]))
1705 1705 break
1706 1706 n = p[0]
1707 1707 return b
1708 1708
1709 1709 def between(self, pairs):
1710 1710 r = []
1711 1711
1712 1712 for top, bottom in pairs:
1713 1713 n, l, i = top, [], 0
1714 1714 f = 1
1715 1715
1716 1716 while n != bottom and n != nullid:
1717 1717 p = self.changelog.parents(n)[0]
1718 1718 if i == f:
1719 1719 l.append(n)
1720 1720 f = f * 2
1721 1721 n = p
1722 1722 i += 1
1723 1723
1724 1724 r.append(l)
1725 1725
1726 1726 return r
1727 1727
1728 1728 def pull(self, remote, heads=None, force=False):
1729 1729 # don't open transaction for nothing or you break future useful
1730 1730 # rollback call
1731 1731 tr = None
1732 1732 trname = 'pull\n' + util.hidepassword(remote.url())
1733 1733 lock = self.lock()
1734 1734 try:
1735 1735 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1736 1736 force=force)
1737 1737 common, fetch, rheads = tmp
1738 1738 if not fetch:
1739 1739 self.ui.status(_("no changes found\n"))
1740 1740 added = []
1741 1741 result = 0
1742 1742 else:
1743 1743 tr = self.transaction(trname)
1744 1744 if heads is None and list(common) == [nullid]:
1745 1745 self.ui.status(_("requesting all changes\n"))
1746 1746 elif heads is None and remote.capable('changegroupsubset'):
1747 1747 # issue1320, avoid a race if remote changed after discovery
1748 1748 heads = rheads
1749 1749
1750 1750 if remote.capable('getbundle'):
1751 1751 cg = remote.getbundle('pull', common=common,
1752 1752 heads=heads or rheads)
1753 1753 elif heads is None:
1754 1754 cg = remote.changegroup(fetch, 'pull')
1755 1755 elif not remote.capable('changegroupsubset'):
1756 1756 raise util.Abort(_("partial pull cannot be done because "
1757 1757 "other repository doesn't support "
1758 1758 "changegroupsubset."))
1759 1759 else:
1760 1760 cg = remote.changegroupsubset(fetch, heads, 'pull')
1761 1761 clstart = len(self.changelog)
1762 1762 result = self.addchangegroup(cg, 'pull', remote.url())
1763 1763 clend = len(self.changelog)
1764 1764 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1765 1765
1766 1766 # compute target subset
1767 1767 if heads is None:
1768 1768 # We pulled every thing possible
1769 1769 # sync on everything common
1770 1770 subset = common + added
1771 1771 else:
1772 1772 # We pulled a specific subset
1773 1773 # sync on this subset
1774 1774 subset = heads
1775 1775
1776 1776 # Get remote phases data from remote
1777 1777 remotephases = remote.listkeys('phases')
1778 1778 publishing = bool(remotephases.get('publishing', False))
1779 1779 if remotephases and not publishing:
1780 1780 # remote is new and unpublishing
1781 1781 pheads, _dr = phases.analyzeremotephases(self, subset,
1782 1782 remotephases)
1783 1783 phases.advanceboundary(self, phases.public, pheads)
1784 1784 phases.advanceboundary(self, phases.draft, subset)
1785 1785 else:
1786 1786 # Remote is old or publishing all common changesets
1787 1787 # should be seen as public
1788 1788 phases.advanceboundary(self, phases.public, subset)
1789 1789
1790 1790 if obsolete._enabled:
1791 1791 self.ui.debug('fetching remote obsolete markers')
1792 1792 remoteobs = remote.listkeys('obsolete')
1793 1793 if 'dump0' in remoteobs:
1794 1794 if tr is None:
1795 1795 tr = self.transaction(trname)
1796 1796 for key in sorted(remoteobs, reverse=True):
1797 1797 if key.startswith('dump'):
1798 1798 data = base85.b85decode(remoteobs[key])
1799 1799 self.obsstore.mergemarkers(tr, data)
1800 1800 if tr is not None:
1801 1801 tr.close()
1802 1802 finally:
1803 1803 if tr is not None:
1804 1804 tr.release()
1805 1805 lock.release()
1806 1806
1807 1807 return result
1808 1808
1809 1809 def checkpush(self, force, revs):
1810 1810 """Extensions can override this function if additional checks have
1811 1811 to be performed before pushing, or call it if they override push
1812 1812 command.
1813 1813 """
1814 1814 pass
1815 1815
1816 1816 def push(self, remote, force=False, revs=None, newbranch=False):
1817 1817 '''Push outgoing changesets (limited by revs) from the current
1818 1818 repository to remote. Return an integer:
1819 1819 - None means nothing to push
1820 1820 - 0 means HTTP error
1821 1821 - 1 means we pushed and remote head count is unchanged *or*
1822 1822 we have outgoing changesets but refused to push
1823 1823 - other values as described by addchangegroup()
1824 1824 '''
1825 1825 # there are two ways to push to remote repo:
1826 1826 #
1827 1827 # addchangegroup assumes local user can lock remote
1828 1828 # repo (local filesystem, old ssh servers).
1829 1829 #
1830 1830 # unbundle assumes local user cannot lock remote repo (new ssh
1831 1831 # servers, http servers).
1832 1832
1833 1833 if not remote.canpush():
1834 1834 raise util.Abort(_("destination does not support push"))
1835 1835 # get local lock as we might write phase data
1836 1836 locallock = self.lock()
1837 1837 try:
1838 1838 self.checkpush(force, revs)
1839 1839 lock = None
1840 1840 unbundle = remote.capable('unbundle')
1841 1841 if not unbundle:
1842 1842 lock = remote.lock()
1843 1843 try:
1844 1844 # discovery
1845 1845 fci = discovery.findcommonincoming
1846 1846 commoninc = fci(self, remote, force=force)
1847 1847 common, inc, remoteheads = commoninc
1848 1848 fco = discovery.findcommonoutgoing
1849 1849 outgoing = fco(self, remote, onlyheads=revs,
1850 1850 commoninc=commoninc, force=force)
1851 1851
1852 1852
1853 1853 if not outgoing.missing:
1854 1854 # nothing to push
1855 1855 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1856 1856 ret = None
1857 1857 else:
1858 1858 # something to push
1859 1859 if not force:
1860 1860 # if self.obsstore == False --> no obsolete
1861 1861 # then, save the iteration
1862 1862 if self.obsstore:
1863 1863 # this message are here for 80 char limit reason
1864 1864 mso = _("push includes an obsolete changeset: %s!")
1865 1865 msu = _("push includes an unstable changeset: %s!")
1866 1866 # If we are to push if there is at least one
1867 1867 # obsolete or unstable changeset in missing, at
1868 1868 # least one of the missinghead will be obsolete or
1869 1869 # unstable. So checking heads only is ok
1870 1870 for node in outgoing.missingheads:
1871 1871 ctx = self[node]
1872 1872 if ctx.obsolete():
1873 1873 raise util.Abort(_(mso) % ctx)
1874 1874 elif ctx.unstable():
1875 1875 raise util.Abort(_(msu) % ctx)
1876 1876 discovery.checkheads(self, remote, outgoing,
1877 1877 remoteheads, newbranch,
1878 1878 bool(inc))
1879 1879
1880 1880 # create a changegroup from local
1881 1881 if revs is None and not outgoing.excluded:
1882 1882 # push everything,
1883 1883 # use the fast path, no race possible on push
1884 1884 cg = self._changegroup(outgoing.missing, 'push')
1885 1885 else:
1886 1886 cg = self.getlocalbundle('push', outgoing)
1887 1887
1888 1888 # apply changegroup to remote
1889 1889 if unbundle:
1890 1890 # local repo finds heads on server, finds out what
1891 1891 # revs it must push. once revs transferred, if server
1892 1892 # finds it has different heads (someone else won
1893 1893 # commit/push race), server aborts.
1894 1894 if force:
1895 1895 remoteheads = ['force']
1896 1896 # ssh: return remote's addchangegroup()
1897 1897 # http: return remote's addchangegroup() or 0 for error
1898 1898 ret = remote.unbundle(cg, remoteheads, 'push')
1899 1899 else:
1900 1900 # we return an integer indicating remote head count
1901 1901 # change
1902 1902 ret = remote.addchangegroup(cg, 'push', self.url())
1903 1903
1904 1904 if ret:
1905 1905 # push succeed, synchonize target of the push
1906 1906 cheads = outgoing.missingheads
1907 1907 elif revs is None:
1908 1908 # All out push fails. synchronize all common
1909 1909 cheads = outgoing.commonheads
1910 1910 else:
1911 1911 # I want cheads = heads(::missingheads and ::commonheads)
1912 1912 # (missingheads is revs with secret changeset filtered out)
1913 1913 #
1914 1914 # This can be expressed as:
1915 1915 # cheads = ( (missingheads and ::commonheads)
1916 1916 # + (commonheads and ::missingheads))"
1917 1917 # )
1918 1918 #
1919 1919 # while trying to push we already computed the following:
1920 1920 # common = (::commonheads)
1921 1921 # missing = ((commonheads::missingheads) - commonheads)
1922 1922 #
1923 1923 # We can pick:
1924 1924 # * missingheads part of comon (::commonheads)
1925 1925 common = set(outgoing.common)
1926 1926 cheads = [node for node in revs if node in common]
1927 1927 # and
1928 1928 # * commonheads parents on missing
1929 1929 revset = self.set('%ln and parents(roots(%ln))',
1930 1930 outgoing.commonheads,
1931 1931 outgoing.missing)
1932 1932 cheads.extend(c.node() for c in revset)
1933 1933 # even when we don't push, exchanging phase data is useful
1934 1934 remotephases = remote.listkeys('phases')
1935 1935 if not remotephases: # old server or public only repo
1936 1936 phases.advanceboundary(self, phases.public, cheads)
1937 1937 # don't push any phase data as there is nothing to push
1938 1938 else:
1939 1939 ana = phases.analyzeremotephases(self, cheads, remotephases)
1940 1940 pheads, droots = ana
1941 1941 ### Apply remote phase on local
1942 1942 if remotephases.get('publishing', False):
1943 1943 phases.advanceboundary(self, phases.public, cheads)
1944 1944 else: # publish = False
1945 1945 phases.advanceboundary(self, phases.public, pheads)
1946 1946 phases.advanceboundary(self, phases.draft, cheads)
1947 1947 ### Apply local phase on remote
1948 1948
1949 1949 # Get the list of all revs draft on remote by public here.
1950 1950 # XXX Beware that revset break if droots is not strictly
1951 1951 # XXX root we may want to ensure it is but it is costly
1952 1952 outdated = self.set('heads((%ln::%ln) and public())',
1953 1953 droots, cheads)
1954 1954 for newremotehead in outdated:
1955 1955 r = remote.pushkey('phases',
1956 1956 newremotehead.hex(),
1957 1957 str(phases.draft),
1958 1958 str(phases.public))
1959 1959 if not r:
1960 1960 self.ui.warn(_('updating %s to public failed!\n')
1961 1961 % newremotehead)
1962 1962 self.ui.debug('try to push obsolete markers to remote\n')
1963 1963 if (obsolete._enabled and self.obsstore and
1964 1964 'obsolete' in remote.listkeys('namespaces')):
1965 1965 rslts = []
1966 1966 remotedata = self.listkeys('obsolete')
1967 1967 for key in sorted(remotedata, reverse=True):
1968 1968 # reverse sort to ensure we end with dump0
1969 1969 data = remotedata[key]
1970 1970 rslts.append(remote.pushkey('obsolete', key, '', data))
1971 1971 if [r for r in rslts if not r]:
1972 1972 msg = _('failed to push some obsolete markers!\n')
1973 1973 self.ui.warn(msg)
1974 1974 finally:
1975 1975 if lock is not None:
1976 1976 lock.release()
1977 1977 finally:
1978 1978 locallock.release()
1979 1979
1980 1980 self.ui.debug("checking for updated bookmarks\n")
1981 1981 rb = remote.listkeys('bookmarks')
1982 1982 for k in rb.keys():
1983 1983 if k in self._bookmarks:
1984 1984 nr, nl = rb[k], hex(self._bookmarks[k])
1985 1985 if nr in self:
1986 1986 cr = self[nr]
1987 1987 cl = self[nl]
1988 1988 if cl in cr.descendants():
1989 1989 r = remote.pushkey('bookmarks', k, nr, nl)
1990 1990 if r:
1991 1991 self.ui.status(_("updating bookmark %s\n") % k)
1992 1992 else:
1993 1993 self.ui.warn(_('updating bookmark %s'
1994 1994 ' failed!\n') % k)
1995 1995
1996 1996 return ret
1997 1997
1998 1998 def changegroupinfo(self, nodes, source):
1999 1999 if self.ui.verbose or source == 'bundle':
2000 2000 self.ui.status(_("%d changesets found\n") % len(nodes))
2001 2001 if self.ui.debugflag:
2002 2002 self.ui.debug("list of changesets:\n")
2003 2003 for node in nodes:
2004 2004 self.ui.debug("%s\n" % hex(node))
2005 2005
2006 2006 def changegroupsubset(self, bases, heads, source):
2007 2007 """Compute a changegroup consisting of all the nodes that are
2008 2008 descendants of any of the bases and ancestors of any of the heads.
2009 2009 Return a chunkbuffer object whose read() method will return
2010 2010 successive changegroup chunks.
2011 2011
2012 2012 It is fairly complex as determining which filenodes and which
2013 2013 manifest nodes need to be included for the changeset to be complete
2014 2014 is non-trivial.
2015 2015
2016 2016 Another wrinkle is doing the reverse, figuring out which changeset in
2017 2017 the changegroup a particular filenode or manifestnode belongs to.
2018 2018 """
2019 2019 cl = self.changelog
2020 2020 if not bases:
2021 2021 bases = [nullid]
2022 2022 csets, bases, heads = cl.nodesbetween(bases, heads)
2023 2023 # We assume that all ancestors of bases are known
2024 2024 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2025 2025 return self._changegroupsubset(common, csets, heads, source)
2026 2026
2027 2027 def getlocalbundle(self, source, outgoing):
2028 2028 """Like getbundle, but taking a discovery.outgoing as an argument.
2029 2029
2030 2030 This is only implemented for local repos and reuses potentially
2031 2031 precomputed sets in outgoing."""
2032 2032 if not outgoing.missing:
2033 2033 return None
2034 2034 return self._changegroupsubset(outgoing.common,
2035 2035 outgoing.missing,
2036 2036 outgoing.missingheads,
2037 2037 source)
2038 2038
2039 2039 def getbundle(self, source, heads=None, common=None):
2040 2040 """Like changegroupsubset, but returns the set difference between the
2041 2041 ancestors of heads and the ancestors common.
2042 2042
2043 2043 If heads is None, use the local heads. If common is None, use [nullid].
2044 2044
2045 2045 The nodes in common might not all be known locally due to the way the
2046 2046 current discovery protocol works.
2047 2047 """
2048 2048 cl = self.changelog
2049 2049 if common:
2050 2050 nm = cl.nodemap
2051 2051 common = [n for n in common if n in nm]
2052 2052 else:
2053 2053 common = [nullid]
2054 2054 if not heads:
2055 2055 heads = cl.heads()
2056 2056 return self.getlocalbundle(source,
2057 2057 discovery.outgoing(cl, common, heads))
2058 2058
2059 2059 def _changegroupsubset(self, commonrevs, csets, heads, source):
2060 2060
2061 2061 cl = self.changelog
2062 2062 mf = self.manifest
2063 2063 mfs = {} # needed manifests
2064 2064 fnodes = {} # needed file nodes
2065 2065 changedfiles = set()
2066 2066 fstate = ['', {}]
2067 2067 count = [0, 0]
2068 2068
2069 2069 # can we go through the fast path ?
2070 2070 heads.sort()
2071 2071 if heads == sorted(self.heads()):
2072 2072 return self._changegroup(csets, source)
2073 2073
2074 2074 # slow path
2075 2075 self.hook('preoutgoing', throw=True, source=source)
2076 2076 self.changegroupinfo(csets, source)
2077 2077
2078 2078 # filter any nodes that claim to be part of the known set
2079 2079 def prune(revlog, missing):
2080 2080 rr, rl = revlog.rev, revlog.linkrev
2081 2081 return [n for n in missing
2082 2082 if rl(rr(n)) not in commonrevs]
2083 2083
2084 2084 progress = self.ui.progress
2085 2085 _bundling = _('bundling')
2086 2086 _changesets = _('changesets')
2087 2087 _manifests = _('manifests')
2088 2088 _files = _('files')
2089 2089
2090 2090 def lookup(revlog, x):
2091 2091 if revlog == cl:
2092 2092 c = cl.read(x)
2093 2093 changedfiles.update(c[3])
2094 2094 mfs.setdefault(c[0], x)
2095 2095 count[0] += 1
2096 2096 progress(_bundling, count[0],
2097 2097 unit=_changesets, total=count[1])
2098 2098 return x
2099 2099 elif revlog == mf:
2100 2100 clnode = mfs[x]
2101 2101 mdata = mf.readfast(x)
2102 2102 for f, n in mdata.iteritems():
2103 2103 if f in changedfiles:
2104 2104 fnodes[f].setdefault(n, clnode)
2105 2105 count[0] += 1
2106 2106 progress(_bundling, count[0],
2107 2107 unit=_manifests, total=count[1])
2108 2108 return clnode
2109 2109 else:
2110 2110 progress(_bundling, count[0], item=fstate[0],
2111 2111 unit=_files, total=count[1])
2112 2112 return fstate[1][x]
2113 2113
2114 2114 bundler = changegroup.bundle10(lookup)
2115 2115 reorder = self.ui.config('bundle', 'reorder', 'auto')
2116 2116 if reorder == 'auto':
2117 2117 reorder = None
2118 2118 else:
2119 2119 reorder = util.parsebool(reorder)
2120 2120
2121 2121 def gengroup():
2122 2122 # Create a changenode group generator that will call our functions
2123 2123 # back to lookup the owning changenode and collect information.
2124 2124 count[:] = [0, len(csets)]
2125 2125 for chunk in cl.group(csets, bundler, reorder=reorder):
2126 2126 yield chunk
2127 2127 progress(_bundling, None)
2128 2128
2129 2129 # Create a generator for the manifestnodes that calls our lookup
2130 2130 # and data collection functions back.
2131 2131 for f in changedfiles:
2132 2132 fnodes[f] = {}
2133 2133 count[:] = [0, len(mfs)]
2134 2134 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2135 2135 yield chunk
2136 2136 progress(_bundling, None)
2137 2137
2138 2138 mfs.clear()
2139 2139
2140 2140 # Go through all our files in order sorted by name.
2141 2141 count[:] = [0, len(changedfiles)]
2142 2142 for fname in sorted(changedfiles):
2143 2143 filerevlog = self.file(fname)
2144 2144 if not len(filerevlog):
2145 2145 raise util.Abort(_("empty or missing revlog for %s")
2146 2146 % fname)
2147 2147 fstate[0] = fname
2148 2148 fstate[1] = fnodes.pop(fname, {})
2149 2149
2150 2150 nodelist = prune(filerevlog, fstate[1])
2151 2151 if nodelist:
2152 2152 count[0] += 1
2153 2153 yield bundler.fileheader(fname)
2154 2154 for chunk in filerevlog.group(nodelist, bundler, reorder):
2155 2155 yield chunk
2156 2156
2157 2157 # Signal that no more groups are left.
2158 2158 yield bundler.close()
2159 2159 progress(_bundling, None)
2160 2160
2161 2161 if csets:
2162 2162 self.hook('outgoing', node=hex(csets[0]), source=source)
2163 2163
2164 2164 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2165 2165
2166 2166 def changegroup(self, basenodes, source):
2167 2167 # to avoid a race we use changegroupsubset() (issue1320)
2168 2168 return self.changegroupsubset(basenodes, self.heads(), source)
2169 2169
2170 2170 def _changegroup(self, nodes, source):
2171 2171 """Compute the changegroup of all nodes that we have that a recipient
2172 2172 doesn't. Return a chunkbuffer object whose read() method will return
2173 2173 successive changegroup chunks.
2174 2174
2175 2175 This is much easier than the previous function as we can assume that
2176 2176 the recipient has any changenode we aren't sending them.
2177 2177
2178 2178 nodes is the set of nodes to send"""
2179 2179
2180 2180 cl = self.changelog
2181 2181 mf = self.manifest
2182 2182 mfs = {}
2183 2183 changedfiles = set()
2184 2184 fstate = ['']
2185 2185 count = [0, 0]
2186 2186
2187 2187 self.hook('preoutgoing', throw=True, source=source)
2188 2188 self.changegroupinfo(nodes, source)
2189 2189
2190 2190 revset = set([cl.rev(n) for n in nodes])
2191 2191
2192 2192 def gennodelst(log):
2193 2193 ln, llr = log.node, log.linkrev
2194 2194 return [ln(r) for r in log if llr(r) in revset]
2195 2195
2196 2196 progress = self.ui.progress
2197 2197 _bundling = _('bundling')
2198 2198 _changesets = _('changesets')
2199 2199 _manifests = _('manifests')
2200 2200 _files = _('files')
2201 2201
2202 2202 def lookup(revlog, x):
2203 2203 if revlog == cl:
2204 2204 c = cl.read(x)
2205 2205 changedfiles.update(c[3])
2206 2206 mfs.setdefault(c[0], x)
2207 2207 count[0] += 1
2208 2208 progress(_bundling, count[0],
2209 2209 unit=_changesets, total=count[1])
2210 2210 return x
2211 2211 elif revlog == mf:
2212 2212 count[0] += 1
2213 2213 progress(_bundling, count[0],
2214 2214 unit=_manifests, total=count[1])
2215 2215 return cl.node(revlog.linkrev(revlog.rev(x)))
2216 2216 else:
2217 2217 progress(_bundling, count[0], item=fstate[0],
2218 2218 total=count[1], unit=_files)
2219 2219 return cl.node(revlog.linkrev(revlog.rev(x)))
2220 2220
2221 2221 bundler = changegroup.bundle10(lookup)
2222 2222 reorder = self.ui.config('bundle', 'reorder', 'auto')
2223 2223 if reorder == 'auto':
2224 2224 reorder = None
2225 2225 else:
2226 2226 reorder = util.parsebool(reorder)
2227 2227
2228 2228 def gengroup():
2229 2229 '''yield a sequence of changegroup chunks (strings)'''
2230 2230 # construct a list of all changed files
2231 2231
2232 2232 count[:] = [0, len(nodes)]
2233 2233 for chunk in cl.group(nodes, bundler, reorder=reorder):
2234 2234 yield chunk
2235 2235 progress(_bundling, None)
2236 2236
2237 2237 count[:] = [0, len(mfs)]
2238 2238 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2239 2239 yield chunk
2240 2240 progress(_bundling, None)
2241 2241
2242 2242 count[:] = [0, len(changedfiles)]
2243 2243 for fname in sorted(changedfiles):
2244 2244 filerevlog = self.file(fname)
2245 2245 if not len(filerevlog):
2246 2246 raise util.Abort(_("empty or missing revlog for %s")
2247 2247 % fname)
2248 2248 fstate[0] = fname
2249 2249 nodelist = gennodelst(filerevlog)
2250 2250 if nodelist:
2251 2251 count[0] += 1
2252 2252 yield bundler.fileheader(fname)
2253 2253 for chunk in filerevlog.group(nodelist, bundler, reorder):
2254 2254 yield chunk
2255 2255 yield bundler.close()
2256 2256 progress(_bundling, None)
2257 2257
2258 2258 if nodes:
2259 2259 self.hook('outgoing', node=hex(nodes[0]), source=source)
2260 2260
2261 2261 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2262 2262
2263 2263 def addchangegroup(self, source, srctype, url, emptyok=False):
2264 2264 """Add the changegroup returned by source.read() to this repo.
2265 2265 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2266 2266 the URL of the repo where this changegroup is coming from.
2267 2267
2268 2268 Return an integer summarizing the change to this repo:
2269 2269 - nothing changed or no source: 0
2270 2270 - more heads than before: 1+added heads (2..n)
2271 2271 - fewer heads than before: -1-removed heads (-2..-n)
2272 2272 - number of heads stays the same: 1
2273 2273 """
2274 2274 def csmap(x):
2275 2275 self.ui.debug("add changeset %s\n" % short(x))
2276 2276 return len(cl)
2277 2277
2278 2278 def revmap(x):
2279 2279 return cl.rev(x)
2280 2280
2281 2281 if not source:
2282 2282 return 0
2283 2283
2284 2284 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2285 2285
2286 2286 changesets = files = revisions = 0
2287 2287 efiles = set()
2288 2288
2289 2289 # write changelog data to temp files so concurrent readers will not see
2290 2290 # inconsistent view
2291 2291 cl = self.changelog
2292 2292 cl.delayupdate()
2293 2293 oldheads = cl.heads()
2294 2294
2295 2295 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2296 2296 try:
2297 2297 trp = weakref.proxy(tr)
2298 2298 # pull off the changeset group
2299 2299 self.ui.status(_("adding changesets\n"))
2300 2300 clstart = len(cl)
2301 2301 class prog(object):
2302 2302 step = _('changesets')
2303 2303 count = 1
2304 2304 ui = self.ui
2305 2305 total = None
2306 2306 def __call__(self):
2307 2307 self.ui.progress(self.step, self.count, unit=_('chunks'),
2308 2308 total=self.total)
2309 2309 self.count += 1
2310 2310 pr = prog()
2311 2311 source.callback = pr
2312 2312
2313 2313 source.changelogheader()
2314 2314 srccontent = cl.addgroup(source, csmap, trp)
2315 2315 if not (srccontent or emptyok):
2316 2316 raise util.Abort(_("received changelog group is empty"))
2317 2317 clend = len(cl)
2318 2318 changesets = clend - clstart
2319 2319 for c in xrange(clstart, clend):
2320 2320 efiles.update(self[c].files())
2321 2321 efiles = len(efiles)
2322 2322 self.ui.progress(_('changesets'), None)
2323 2323
2324 2324 # pull off the manifest group
2325 2325 self.ui.status(_("adding manifests\n"))
2326 2326 pr.step = _('manifests')
2327 2327 pr.count = 1
2328 2328 pr.total = changesets # manifests <= changesets
2329 2329 # no need to check for empty manifest group here:
2330 2330 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2331 2331 # no new manifest will be created and the manifest group will
2332 2332 # be empty during the pull
2333 2333 source.manifestheader()
2334 2334 self.manifest.addgroup(source, revmap, trp)
2335 2335 self.ui.progress(_('manifests'), None)
2336 2336
2337 2337 needfiles = {}
2338 2338 if self.ui.configbool('server', 'validate', default=False):
2339 2339 # validate incoming csets have their manifests
2340 2340 for cset in xrange(clstart, clend):
2341 2341 mfest = self.changelog.read(self.changelog.node(cset))[0]
2342 2342 mfest = self.manifest.readdelta(mfest)
2343 2343 # store file nodes we must see
2344 2344 for f, n in mfest.iteritems():
2345 2345 needfiles.setdefault(f, set()).add(n)
2346 2346
2347 2347 # process the files
2348 2348 self.ui.status(_("adding file changes\n"))
2349 2349 pr.step = _('files')
2350 2350 pr.count = 1
2351 2351 pr.total = efiles
2352 2352 source.callback = None
2353 2353
2354 2354 while True:
2355 2355 chunkdata = source.filelogheader()
2356 2356 if not chunkdata:
2357 2357 break
2358 2358 f = chunkdata["filename"]
2359 2359 self.ui.debug("adding %s revisions\n" % f)
2360 2360 pr()
2361 2361 fl = self.file(f)
2362 2362 o = len(fl)
2363 2363 if not fl.addgroup(source, revmap, trp):
2364 2364 raise util.Abort(_("received file revlog group is empty"))
2365 2365 revisions += len(fl) - o
2366 2366 files += 1
2367 2367 if f in needfiles:
2368 2368 needs = needfiles[f]
2369 2369 for new in xrange(o, len(fl)):
2370 2370 n = fl.node(new)
2371 2371 if n in needs:
2372 2372 needs.remove(n)
2373 2373 if not needs:
2374 2374 del needfiles[f]
2375 2375 self.ui.progress(_('files'), None)
2376 2376
2377 2377 for f, needs in needfiles.iteritems():
2378 2378 fl = self.file(f)
2379 2379 for n in needs:
2380 2380 try:
2381 2381 fl.rev(n)
2382 2382 except error.LookupError:
2383 2383 raise util.Abort(
2384 2384 _('missing file data for %s:%s - run hg verify') %
2385 2385 (f, hex(n)))
2386 2386
2387 2387 dh = 0
2388 2388 if oldheads:
2389 2389 heads = cl.heads()
2390 2390 dh = len(heads) - len(oldheads)
2391 2391 for h in heads:
2392 2392 if h not in oldheads and self[h].closesbranch():
2393 2393 dh -= 1
2394 2394 htext = ""
2395 2395 if dh:
2396 2396 htext = _(" (%+d heads)") % dh
2397 2397
2398 2398 self.ui.status(_("added %d changesets"
2399 2399 " with %d changes to %d files%s\n")
2400 2400 % (changesets, revisions, files, htext))
2401 2401
2402 2402 if changesets > 0:
2403 2403 p = lambda: cl.writepending() and self.root or ""
2404 2404 self.hook('pretxnchangegroup', throw=True,
2405 2405 node=hex(cl.node(clstart)), source=srctype,
2406 2406 url=url, pending=p)
2407 2407
2408 2408 added = [cl.node(r) for r in xrange(clstart, clend)]
2409 2409 publishing = self.ui.configbool('phases', 'publish', True)
2410 2410 if srctype == 'push':
2411 2411 # Old server can not push the boundary themself.
2412 2412 # New server won't push the boundary if changeset already
2413 2413 # existed locally as secrete
2414 2414 #
2415 2415 # We should not use added here but the list of all change in
2416 2416 # the bundle
2417 2417 if publishing:
2418 2418 phases.advanceboundary(self, phases.public, srccontent)
2419 2419 else:
2420 2420 phases.advanceboundary(self, phases.draft, srccontent)
2421 2421 phases.retractboundary(self, phases.draft, added)
2422 2422 elif srctype != 'strip':
2423 2423 # publishing only alter behavior during push
2424 2424 #
2425 2425 # strip should not touch boundary at all
2426 2426 phases.retractboundary(self, phases.draft, added)
2427 2427
2428 2428 # make changelog see real files again
2429 2429 cl.finalize(trp)
2430 2430
2431 2431 tr.close()
2432 2432
2433 2433 if changesets > 0:
2434 2434 def runhooks():
2435 2435 # forcefully update the on-disk branch cache
2436 2436 self.ui.debug("updating the branch cache\n")
2437 2437 self.updatebranchcache()
2438 2438 self.hook("changegroup", node=hex(cl.node(clstart)),
2439 2439 source=srctype, url=url)
2440 2440
2441 2441 for n in added:
2442 2442 self.hook("incoming", node=hex(n), source=srctype,
2443 2443 url=url)
2444 2444 self._afterlock(runhooks)
2445 2445
2446 2446 finally:
2447 2447 tr.release()
2448 2448 # never return 0 here:
2449 2449 if dh < 0:
2450 2450 return dh - 1
2451 2451 else:
2452 2452 return dh + 1
2453 2453
2454 2454 def stream_in(self, remote, requirements):
2455 2455 lock = self.lock()
2456 2456 try:
2457 2457 fp = remote.stream_out()
2458 2458 l = fp.readline()
2459 2459 try:
2460 2460 resp = int(l)
2461 2461 except ValueError:
2462 2462 raise error.ResponseError(
2463 2463 _('unexpected response from remote server:'), l)
2464 2464 if resp == 1:
2465 2465 raise util.Abort(_('operation forbidden by server'))
2466 2466 elif resp == 2:
2467 2467 raise util.Abort(_('locking the remote repository failed'))
2468 2468 elif resp != 0:
2469 2469 raise util.Abort(_('the server sent an unknown error code'))
2470 2470 self.ui.status(_('streaming all changes\n'))
2471 2471 l = fp.readline()
2472 2472 try:
2473 2473 total_files, total_bytes = map(int, l.split(' ', 1))
2474 2474 except (ValueError, TypeError):
2475 2475 raise error.ResponseError(
2476 2476 _('unexpected response from remote server:'), l)
2477 2477 self.ui.status(_('%d files to transfer, %s of data\n') %
2478 2478 (total_files, util.bytecount(total_bytes)))
2479 2479 handled_bytes = 0
2480 2480 self.ui.progress(_('clone'), 0, total=total_bytes)
2481 2481 start = time.time()
2482 2482 for i in xrange(total_files):
2483 2483 # XXX doesn't support '\n' or '\r' in filenames
2484 2484 l = fp.readline()
2485 2485 try:
2486 2486 name, size = l.split('\0', 1)
2487 2487 size = int(size)
2488 2488 except (ValueError, TypeError):
2489 2489 raise error.ResponseError(
2490 2490 _('unexpected response from remote server:'), l)
2491 2491 if self.ui.debugflag:
2492 2492 self.ui.debug('adding %s (%s)\n' %
2493 2493 (name, util.bytecount(size)))
2494 2494 # for backwards compat, name was partially encoded
2495 2495 ofp = self.sopener(store.decodedir(name), 'w')
2496 2496 for chunk in util.filechunkiter(fp, limit=size):
2497 2497 handled_bytes += len(chunk)
2498 2498 self.ui.progress(_('clone'), handled_bytes,
2499 2499 total=total_bytes)
2500 2500 ofp.write(chunk)
2501 2501 ofp.close()
2502 2502 elapsed = time.time() - start
2503 2503 if elapsed <= 0:
2504 2504 elapsed = 0.001
2505 2505 self.ui.progress(_('clone'), None)
2506 2506 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2507 2507 (util.bytecount(total_bytes), elapsed,
2508 2508 util.bytecount(total_bytes / elapsed)))
2509 2509
2510 2510 # new requirements = old non-format requirements +
2511 2511 # new format-related
2512 2512 # requirements from the streamed-in repository
2513 2513 requirements.update(set(self.requirements) - self.supportedformats)
2514 2514 self._applyrequirements(requirements)
2515 2515 self._writerequirements()
2516 2516
2517 2517 self.invalidate()
2518 2518 return len(self.heads()) + 1
2519 2519 finally:
2520 2520 lock.release()
2521 2521
2522 2522 def clone(self, remote, heads=[], stream=False):
2523 2523 '''clone remote repository.
2524 2524
2525 2525 keyword arguments:
2526 2526 heads: list of revs to clone (forces use of pull)
2527 2527 stream: use streaming clone if possible'''
2528 2528
2529 2529 # now, all clients that can request uncompressed clones can
2530 2530 # read repo formats supported by all servers that can serve
2531 2531 # them.
2532 2532
2533 2533 # if revlog format changes, client will have to check version
2534 2534 # and format flags on "stream" capability, and use
2535 2535 # uncompressed only if compatible.
2536 2536
2537 2537 if not stream:
2538 2538 # if the server explicitely prefer to stream (for fast LANs)
2539 2539 stream = remote.capable('stream-preferred')
2540 2540
2541 2541 if stream and not heads:
2542 2542 # 'stream' means remote revlog format is revlogv1 only
2543 2543 if remote.capable('stream'):
2544 2544 return self.stream_in(remote, set(('revlogv1',)))
2545 2545 # otherwise, 'streamreqs' contains the remote revlog format
2546 2546 streamreqs = remote.capable('streamreqs')
2547 2547 if streamreqs:
2548 2548 streamreqs = set(streamreqs.split(','))
2549 2549 # if we support it, stream in and adjust our requirements
2550 2550 if not streamreqs - self.supportedformats:
2551 2551 return self.stream_in(remote, streamreqs)
2552 2552 return self.pull(remote, heads)
2553 2553
2554 2554 def pushkey(self, namespace, key, old, new):
2555 2555 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2556 2556 old=old, new=new)
2557 2557 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2558 2558 ret = pushkey.push(self, namespace, key, old, new)
2559 2559 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2560 2560 ret=ret)
2561 2561 return ret
2562 2562
2563 2563 def listkeys(self, namespace):
2564 2564 self.hook('prelistkeys', throw=True, namespace=namespace)
2565 2565 self.ui.debug('listing keys for "%s"\n' % namespace)
2566 2566 values = pushkey.list(self, namespace)
2567 2567 self.hook('listkeys', namespace=namespace, values=values)
2568 2568 return values
2569 2569
2570 2570 def debugwireargs(self, one, two, three=None, four=None, five=None):
2571 2571 '''used to test argument passing over the wire'''
2572 2572 return "%s %s %s %s %s" % (one, two, three, four, five)
2573 2573
2574 2574 def savecommitmessage(self, text):
2575 2575 fp = self.opener('last-message.txt', 'wb')
2576 2576 try:
2577 2577 fp.write(text)
2578 2578 finally:
2579 2579 fp.close()
2580 2580 return self.pathto(fp.name[len(self.root)+1:])
2581 2581
2582 2582 # used to avoid circular references so destructors work
2583 2583 def aftertrans(files):
2584 2584 renamefiles = [tuple(t) for t in files]
2585 2585 def a():
2586 2586 for src, dest in renamefiles:
2587 2587 try:
2588 2588 util.rename(src, dest)
2589 2589 except OSError: # journal file does not yet exist
2590 2590 pass
2591 2591 return a
2592 2592
2593 2593 def undoname(fn):
2594 2594 base, name = os.path.split(fn)
2595 2595 assert name.startswith('journal')
2596 2596 return os.path.join(base, name.replace('journal', 'undo', 1))
2597 2597
2598 2598 def instance(ui, path, create):
2599 2599 return localrepository(ui, util.urllocalpath(path), create)
2600 2600
2601 2601 def islocal(path):
2602 2602 return True
@@ -1,508 +1,508 b''
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > [phases]
5 5 > # public changeset are not obsolete
6 6 > publish=false
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg id --debug -ir "desc('$1')"
15 15 > }
16 16
17 17 $ cat > debugkeys.py <<EOF
18 18 > def reposetup(ui, repo):
19 19 > class debugkeysrepo(repo.__class__):
20 20 > def listkeys(self, namespace):
21 21 > ui.write('listkeys %s\n' % (namespace,))
22 22 > return super(debugkeysrepo, self).listkeys(namespace)
23 23 >
24 24 > if repo.local():
25 25 > repo.__class__ = debugkeysrepo
26 26 > EOF
27 27
28 28 $ hg init tmpa
29 29 $ cd tmpa
30 30 $ mkcommit kill_me
31 31
32 32 Checking that the feature is properly disabled
33 33
34 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
35 35 abort: obsolete feature is not enabled on this repo
36 36 [255]
37 37
38 38 Enabling it
39 39
40 40 $ cat > ../obs.py << EOF
41 41 > import mercurial.obsolete
42 42 > mercurial.obsolete._enabled = True
43 43 > EOF
44 44 $ echo '[extensions]' >> $HGRCPATH
45 45 $ echo "obs=${TESTTMP}/obs.py" >> $HGRCPATH
46 46
47 47 Killing a single changeset without replacement
48 48
49 49 $ hg debugobsolete 0
50 50 abort: changeset references must be full hexadecimal node identifiers
51 51 [255]
52 52 $ hg debugobsolete '00'
53 53 abort: changeset references must be full hexadecimal node identifiers
54 54 [255]
55 55 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
56 56 $ hg debugobsolete
57 57 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
58 58 $ cd ..
59 59
60 60 Killing a single changeset with replacement
61 61
62 62 $ hg init tmpb
63 63 $ cd tmpb
64 64 $ mkcommit a
65 65 $ mkcommit b
66 66 $ mkcommit original_c
67 67 $ hg up "desc('b')"
68 68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
69 69 $ mkcommit new_c
70 70 created new head
71 71 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
72 72 $ hg debugobsolete
73 73 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
74 74
75 75 do it again (it read the obsstore before adding new changeset)
76 76
77 77 $ hg up '.^'
78 78 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
79 79 $ mkcommit new_2_c
80 80 created new head
81 81 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
82 82 $ hg debugobsolete
83 83 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
84 84 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
85 85
86 86 Register two markers with a missing node
87 87
88 88 $ hg up '.^'
89 89 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
90 90 $ mkcommit new_3_c
91 91 created new head
92 92 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
93 93 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
94 94 $ hg debugobsolete
95 95 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
96 96 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
97 97 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
98 98 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
99 99
100 100 Check that graphlog detect that a changeset is obsolete:
101 101
102 102 $ hg glog
103 103 @ changeset: 5:5601fb93a350
104 104 | tag: tip
105 105 | parent: 1:7c3bad9141dc
106 106 | user: test
107 107 | date: Thu Jan 01 00:00:00 1970 +0000
108 108 | summary: add new_3_c
109 109 |
110 110 o changeset: 1:7c3bad9141dc
111 111 | user: test
112 112 | date: Thu Jan 01 00:00:00 1970 +0000
113 113 | summary: add b
114 114 |
115 115 o changeset: 0:1f0dee641bb7
116 116 user: test
117 117 date: Thu Jan 01 00:00:00 1970 +0000
118 118 summary: add a
119 119
120 120
121 121 Check that public changeset are not accounted as obsolete:
122 122
123 123 $ hg phase --public 2
124 124 $ hg --config 'extensions.graphlog=' glog
125 125 @ changeset: 5:5601fb93a350
126 126 | tag: tip
127 127 | parent: 1:7c3bad9141dc
128 128 | user: test
129 129 | date: Thu Jan 01 00:00:00 1970 +0000
130 130 | summary: add new_3_c
131 131 |
132 132 | o changeset: 2:245bde4270cd
133 133 |/ user: test
134 134 | date: Thu Jan 01 00:00:00 1970 +0000
135 135 | summary: add original_c
136 136 |
137 137 o changeset: 1:7c3bad9141dc
138 138 | user: test
139 139 | date: Thu Jan 01 00:00:00 1970 +0000
140 140 | summary: add b
141 141 |
142 142 o changeset: 0:1f0dee641bb7
143 143 user: test
144 144 date: Thu Jan 01 00:00:00 1970 +0000
145 145 summary: add a
146 146
147 147
148 148 $ cd ..
149 149
150 150 Exchange Test
151 151 ============================
152 152
153 153 Destination repo does not have any data
154 154 ---------------------------------------
155 155
156 156 Try to pull markers
157 157 (extinct changeset are excluded but marker are pushed)
158 158
159 159 $ hg init tmpc
160 160 $ cd tmpc
161 161 $ hg pull ../tmpb
162 162 pulling from ../tmpb
163 163 requesting all changes
164 164 adding changesets
165 165 adding manifests
166 166 adding file changes
167 167 added 4 changesets with 4 changes to 4 files (+1 heads)
168 168 (run 'hg heads' to see heads, 'hg merge' to merge)
169 169 $ hg debugobsolete
170 170 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
171 171 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
172 172 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
173 173 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
174 174
175 175 Rollback//Transaction support
176 176
177 177 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
178 178 $ hg debugobsolete
179 179 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
180 180 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
181 181 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
182 182 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
183 183 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
184 184 $ hg rollback -n
185 185 repository tip rolled back to revision 3 (undo debugobsolete)
186 186 $ hg rollback
187 187 repository tip rolled back to revision 3 (undo debugobsolete)
188 188 $ hg debugobsolete
189 189 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
190 190 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
191 191 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
192 192 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
193 193
194 194 $ cd ..
195 195
196 196 Try to pull markers
197 197
198 198 $ hg init tmpd
199 199 $ hg -R tmpb push tmpd
200 200 pushing to tmpd
201 201 searching for changes
202 202 adding changesets
203 203 adding manifests
204 204 adding file changes
205 205 added 4 changesets with 4 changes to 4 files (+1 heads)
206 206 $ hg -R tmpd debugobsolete
207 207 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
208 208 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
209 209 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
210 210 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
211 211
212 212 Check obsolete keys are exchanged only if source has an obsolete store
213 213
214 214 $ hg init empty
215 215 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
216 216 pushing to tmpd
217 217 no changes found
218 218 listkeys phases
219 219 listkeys bookmarks
220 220 [1]
221 221
222 222 clone support
223 223 (markers are copied and extinct changesets are included to allow hardlinks)
224 224
225 225 $ hg clone tmpb clone-dest
226 226 updating to branch default
227 227 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
228 228 $ hg -R clone-dest log -G --hidden
229 229 @ changeset: 5:5601fb93a350
230 230 | tag: tip
231 231 | parent: 1:7c3bad9141dc
232 232 | user: test
233 233 | date: Thu Jan 01 00:00:00 1970 +0000
234 234 | summary: add new_3_c
235 235 |
236 236 | x changeset: 4:ca819180edb9
237 237 |/ parent: 1:7c3bad9141dc
238 238 | user: test
239 239 | date: Thu Jan 01 00:00:00 1970 +0000
240 240 | summary: add new_2_c
241 241 |
242 242 | x changeset: 3:cdbce2fbb163
243 243 |/ parent: 1:7c3bad9141dc
244 244 | user: test
245 245 | date: Thu Jan 01 00:00:00 1970 +0000
246 246 | summary: add new_c
247 247 |
248 248 | o changeset: 2:245bde4270cd
249 249 |/ user: test
250 250 | date: Thu Jan 01 00:00:00 1970 +0000
251 251 | summary: add original_c
252 252 |
253 253 o changeset: 1:7c3bad9141dc
254 254 | user: test
255 255 | date: Thu Jan 01 00:00:00 1970 +0000
256 256 | summary: add b
257 257 |
258 258 o changeset: 0:1f0dee641bb7
259 259 user: test
260 260 date: Thu Jan 01 00:00:00 1970 +0000
261 261 summary: add a
262 262
263 263 $ hg -R clone-dest debugobsolete
264 264 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
265 265 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
266 266 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
267 267 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
268 268
269 269
270 270 Destination repo have existing data
271 271 ---------------------------------------
272 272
273 273 On pull
274 274
275 275 $ hg init tmpe
276 276 $ cd tmpe
277 277 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
278 278 $ hg pull ../tmpb
279 279 pulling from ../tmpb
280 280 requesting all changes
281 281 adding changesets
282 282 adding manifests
283 283 adding file changes
284 284 added 4 changesets with 4 changes to 4 files (+1 heads)
285 285 (run 'hg heads' to see heads, 'hg merge' to merge)
286 286 $ hg debugobsolete
287 287 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
288 288 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
289 289 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
290 290 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
291 291 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
292 292
293 293
294 294 On push
295 295
296 296 $ hg push ../tmpc
297 297 pushing to ../tmpc
298 298 searching for changes
299 299 no changes found
300 300 [1]
301 301 $ hg -R ../tmpc debugobsolete
302 302 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
303 303 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
304 304 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
305 305 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
306 306 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
307 307
308 308 detect outgoing obsolete and unstable
309 309 ---------------------------------------
310 310
311 311
312 312 $ hg glog
313 313 o changeset: 3:5601fb93a350
314 314 | tag: tip
315 315 | parent: 1:7c3bad9141dc
316 316 | user: test
317 317 | date: Thu Jan 01 00:00:00 1970 +0000
318 318 | summary: add new_3_c
319 319 |
320 320 | o changeset: 2:245bde4270cd
321 321 |/ user: test
322 322 | date: Thu Jan 01 00:00:00 1970 +0000
323 323 | summary: add original_c
324 324 |
325 325 o changeset: 1:7c3bad9141dc
326 326 | user: test
327 327 | date: Thu Jan 01 00:00:00 1970 +0000
328 328 | summary: add b
329 329 |
330 330 o changeset: 0:1f0dee641bb7
331 331 user: test
332 332 date: Thu Jan 01 00:00:00 1970 +0000
333 333 summary: add a
334 334
335 335 $ hg up 'desc("new_3_c")'
336 336 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
337 337 $ mkcommit original_d
338 338 $ mkcommit original_e
339 339 $ hg debugobsolete `getid original_d` -d '0 0'
340 340 $ hg log -r 'obsolete()'
341 341 changeset: 4:7c694bff0650
342 342 user: test
343 343 date: Thu Jan 01 00:00:00 1970 +0000
344 344 summary: add original_d
345 345
346 346 $ hg glog -r '::unstable()'
347 347 @ changeset: 5:6e572121998e
348 348 | tag: tip
349 349 | user: test
350 350 | date: Thu Jan 01 00:00:00 1970 +0000
351 351 | summary: add original_e
352 352 |
353 353 x changeset: 4:7c694bff0650
354 354 | user: test
355 355 | date: Thu Jan 01 00:00:00 1970 +0000
356 356 | summary: add original_d
357 357 |
358 358 o changeset: 3:5601fb93a350
359 359 | parent: 1:7c3bad9141dc
360 360 | user: test
361 361 | date: Thu Jan 01 00:00:00 1970 +0000
362 362 | summary: add new_3_c
363 363 |
364 364 o changeset: 1:7c3bad9141dc
365 365 | user: test
366 366 | date: Thu Jan 01 00:00:00 1970 +0000
367 367 | summary: add b
368 368 |
369 369 o changeset: 0:1f0dee641bb7
370 370 user: test
371 371 date: Thu Jan 01 00:00:00 1970 +0000
372 372 summary: add a
373 373
374 374
375 375 refuse to push obsolete changeset
376 376
377 377 $ hg push ../tmpc/ -r 'desc("original_d")'
378 378 pushing to ../tmpc/
379 379 searching for changes
380 380 abort: push includes an obsolete changeset: 7c694bff0650!
381 381 [255]
382 382
383 383 refuse to push unstable changeset
384 384
385 385 $ hg push ../tmpc/
386 386 pushing to ../tmpc/
387 387 searching for changes
388 388 abort: push includes an unstable changeset: 6e572121998e!
389 389 [255]
390 390
391 391 Test that extinct changeset are properly detected
392 392
393 393 $ hg log -r 'extinct()'
394 394
395 395 Don't try to push extinct changeset
396 396
397 397 $ hg init ../tmpf
398 398 $ hg out ../tmpf
399 399 comparing with ../tmpf
400 400 searching for changes
401 401 changeset: 0:1f0dee641bb7
402 402 user: test
403 403 date: Thu Jan 01 00:00:00 1970 +0000
404 404 summary: add a
405 405
406 406 changeset: 1:7c3bad9141dc
407 407 user: test
408 408 date: Thu Jan 01 00:00:00 1970 +0000
409 409 summary: add b
410 410
411 411 changeset: 2:245bde4270cd
412 412 user: test
413 413 date: Thu Jan 01 00:00:00 1970 +0000
414 414 summary: add original_c
415 415
416 416 changeset: 3:5601fb93a350
417 417 parent: 1:7c3bad9141dc
418 418 user: test
419 419 date: Thu Jan 01 00:00:00 1970 +0000
420 420 summary: add new_3_c
421 421
422 422 changeset: 4:7c694bff0650
423 423 user: test
424 424 date: Thu Jan 01 00:00:00 1970 +0000
425 425 summary: add original_d
426 426
427 427 changeset: 5:6e572121998e
428 428 tag: tip
429 429 user: test
430 430 date: Thu Jan 01 00:00:00 1970 +0000
431 431 summary: add original_e
432 432
433 433 $ hg push ../tmpf -f # -f because be push unstable too
434 434 pushing to ../tmpf
435 435 searching for changes
436 436 adding changesets
437 437 adding manifests
438 438 adding file changes
439 439 added 6 changesets with 6 changes to 6 files (+1 heads)
440 440
441 441 no warning displayed
442 442
443 443 $ hg push ../tmpf
444 444 pushing to ../tmpf
445 445 searching for changes
446 446 no changes found
447 447 [1]
448 448
449 449 Do not warn about new head when the new head is a successors of a remote one
450 450
451 451 $ hg glog
452 452 @ changeset: 5:6e572121998e
453 453 | tag: tip
454 454 | user: test
455 455 | date: Thu Jan 01 00:00:00 1970 +0000
456 456 | summary: add original_e
457 457 |
458 458 x changeset: 4:7c694bff0650
459 459 | user: test
460 460 | date: Thu Jan 01 00:00:00 1970 +0000
461 461 | summary: add original_d
462 462 |
463 463 o changeset: 3:5601fb93a350
464 464 | parent: 1:7c3bad9141dc
465 465 | user: test
466 466 | date: Thu Jan 01 00:00:00 1970 +0000
467 467 | summary: add new_3_c
468 468 |
469 469 | o changeset: 2:245bde4270cd
470 470 |/ user: test
471 471 | date: Thu Jan 01 00:00:00 1970 +0000
472 472 | summary: add original_c
473 473 |
474 474 o changeset: 1:7c3bad9141dc
475 475 | user: test
476 476 | date: Thu Jan 01 00:00:00 1970 +0000
477 477 | summary: add b
478 478 |
479 479 o changeset: 0:1f0dee641bb7
480 480 user: test
481 481 date: Thu Jan 01 00:00:00 1970 +0000
482 482 summary: add a
483 483
484 484 $ hg up -q 'desc(new_3_c)'
485 485 $ mkcommit obsolete_e
486 486 created new head
487 487 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
488 488 $ hg push ../tmpf
489 489 pushing to ../tmpf
490 490 searching for changes
491 491 adding changesets
492 492 adding manifests
493 493 adding file changes
494 494 added 1 changesets with 1 changes to 1 files (+1 heads)
495 495
496 Checking _enable=False warning if obsolete marker exist
496 Checking _enable=False warning if obsolete marker exists
497 497
498 498 $ echo '[extensions]' >> $HGRCPATH
499 499 $ echo "obs=!" >> $HGRCPATH
500 500 $ hg log -r tip
501 501 obsolete feature not enabled but 7 markers found!
502 502 changeset: 6:d6a026544050
503 503 tag: tip
504 504 parent: 3:5601fb93a350
505 505 user: test
506 506 date: Thu Jan 01 00:00:00 1970 +0000
507 507 summary: add obsolete_e
508 508
General Comments 0
You need to be logged in to leave comments. Login now