##// END OF EJS Templates
push: do not try to push remote obsolete if local has none
Patrick Mezard -
r17252:16fad732 stable
parent child Browse files
Show More
@@ -1,2586 +1,2586
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import bin, hex, nullid, nullrev, short
8 8 from i18n import _
9 9 import peer, changegroup, subrepo, discovery, pushkey, obsolete
10 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
11 11 import lock, transaction, store, encoding, base85
12 12 import scmutil, util, extensions, hook, error, revset
13 13 import match as matchmod
14 14 import merge as mergemod
15 15 import tags as tagsmod
16 16 from lock import release
17 17 import weakref, errno, os, time, inspect
18 18 propertycache = util.propertycache
19 19 filecache = scmutil.filecache
20 20
21 21 class storecache(filecache):
22 22 """filecache for files in the store"""
23 23 def join(self, obj, fname):
24 24 return obj.sjoin(fname)
25 25
26 26 MODERNCAPS = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle'))
27 27 LEGACYCAPS = MODERNCAPS.union(set(['changegroupsubset']))
28 28
29 29 class localpeer(peer.peerrepository):
30 30 '''peer for a local repo; reflects only the most recent API'''
31 31
32 32 def __init__(self, repo, caps=MODERNCAPS):
33 33 peer.peerrepository.__init__(self)
34 34 self._repo = repo
35 35 self.ui = repo.ui
36 36 self._caps = repo._restrictcapabilities(caps)
37 37 self.requirements = repo.requirements
38 38 self.supportedformats = repo.supportedformats
39 39
40 40 def close(self):
41 41 self._repo.close()
42 42
43 43 def _capabilities(self):
44 44 return self._caps
45 45
46 46 def local(self):
47 47 return self._repo
48 48
49 49 def canpush(self):
50 50 return True
51 51
52 52 def url(self):
53 53 return self._repo.url()
54 54
55 55 def lookup(self, key):
56 56 return self._repo.lookup(key)
57 57
58 58 def branchmap(self):
59 59 return discovery.visiblebranchmap(self._repo)
60 60
61 61 def heads(self):
62 62 return discovery.visibleheads(self._repo)
63 63
64 64 def known(self, nodes):
65 65 return self._repo.known(nodes)
66 66
67 67 def getbundle(self, source, heads=None, common=None):
68 68 return self._repo.getbundle(source, heads=heads, common=common)
69 69
70 70 # TODO We might want to move the next two calls into legacypeer and add
71 71 # unbundle instead.
72 72
73 73 def lock(self):
74 74 return self._repo.lock()
75 75
76 76 def addchangegroup(self, cg, source, url):
77 77 return self._repo.addchangegroup(cg, source, url)
78 78
79 79 def pushkey(self, namespace, key, old, new):
80 80 return self._repo.pushkey(namespace, key, old, new)
81 81
82 82 def listkeys(self, namespace):
83 83 return self._repo.listkeys(namespace)
84 84
85 85 def debugwireargs(self, one, two, three=None, four=None, five=None):
86 86 '''used to test argument passing over the wire'''
87 87 return "%s %s %s %s %s" % (one, two, three, four, five)
88 88
89 89 class locallegacypeer(localpeer):
90 90 '''peer extension which implements legacy methods too; used for tests with
91 91 restricted capabilities'''
92 92
93 93 def __init__(self, repo):
94 94 localpeer.__init__(self, repo, caps=LEGACYCAPS)
95 95
96 96 def branches(self, nodes):
97 97 return self._repo.branches(nodes)
98 98
99 99 def between(self, pairs):
100 100 return self._repo.between(pairs)
101 101
102 102 def changegroup(self, basenodes, source):
103 103 return self._repo.changegroup(basenodes, source)
104 104
105 105 def changegroupsubset(self, bases, heads, source):
106 106 return self._repo.changegroupsubset(bases, heads, source)
107 107
108 108 class localrepository(object):
109 109
110 110 supportedformats = set(('revlogv1', 'generaldelta'))
111 111 supported = supportedformats | set(('store', 'fncache', 'shared',
112 112 'dotencode'))
113 113 openerreqs = set(('revlogv1', 'generaldelta'))
114 114 requirements = ['revlogv1']
115 115
116 116 def _baserequirements(self, create):
117 117 return self.requirements[:]
118 118
119 119 def __init__(self, baseui, path=None, create=False):
120 120 self.wopener = scmutil.opener(path, expand=True)
121 121 self.wvfs = self.wopener
122 122 self.root = self.wvfs.base
123 123 self.path = self.wvfs.join(".hg")
124 124 self.origroot = path
125 125 self.auditor = scmutil.pathauditor(self.root, self._checknested)
126 126 self.opener = scmutil.opener(self.path)
127 127 self.vfs = self.opener
128 128 self.baseui = baseui
129 129 self.ui = baseui.copy()
130 130 # A list of callback to shape the phase if no data were found.
131 131 # Callback are in the form: func(repo, roots) --> processed root.
132 132 # This list it to be filled by extension during repo setup
133 133 self._phasedefaults = []
134 134 try:
135 135 self.ui.readconfig(self.join("hgrc"), self.root)
136 136 extensions.loadall(self.ui)
137 137 except IOError:
138 138 pass
139 139
140 140 if not self.vfs.isdir():
141 141 if create:
142 142 if not self.wvfs.exists():
143 143 self.wvfs.makedirs()
144 144 self.vfs.makedir(notindexed=True)
145 145 requirements = self._baserequirements(create)
146 146 if self.ui.configbool('format', 'usestore', True):
147 147 self.vfs.mkdir("store")
148 148 requirements.append("store")
149 149 if self.ui.configbool('format', 'usefncache', True):
150 150 requirements.append("fncache")
151 151 if self.ui.configbool('format', 'dotencode', True):
152 152 requirements.append('dotencode')
153 153 # create an invalid changelog
154 154 self.vfs.append(
155 155 "00changelog.i",
156 156 '\0\0\0\2' # represents revlogv2
157 157 ' dummy changelog to prevent using the old repo layout'
158 158 )
159 159 if self.ui.configbool('format', 'generaldelta', False):
160 160 requirements.append("generaldelta")
161 161 requirements = set(requirements)
162 162 else:
163 163 raise error.RepoError(_("repository %s not found") % path)
164 164 elif create:
165 165 raise error.RepoError(_("repository %s already exists") % path)
166 166 else:
167 167 try:
168 168 requirements = scmutil.readrequires(self.vfs, self.supported)
169 169 except IOError, inst:
170 170 if inst.errno != errno.ENOENT:
171 171 raise
172 172 requirements = set()
173 173
174 174 self.sharedpath = self.path
175 175 try:
176 176 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
177 177 if not os.path.exists(s):
178 178 raise error.RepoError(
179 179 _('.hg/sharedpath points to nonexistent directory %s') % s)
180 180 self.sharedpath = s
181 181 except IOError, inst:
182 182 if inst.errno != errno.ENOENT:
183 183 raise
184 184
185 185 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
186 186 self.spath = self.store.path
187 187 self.sopener = self.store.opener
188 188 self.svfs = self.sopener
189 189 self.sjoin = self.store.join
190 190 self.opener.createmode = self.store.createmode
191 191 self._applyrequirements(requirements)
192 192 if create:
193 193 self._writerequirements()
194 194
195 195
196 196 self._branchcache = None
197 197 self._branchcachetip = None
198 198 self.filterpats = {}
199 199 self._datafilters = {}
200 200 self._transref = self._lockref = self._wlockref = None
201 201
202 202 # A cache for various files under .hg/ that tracks file changes,
203 203 # (used by the filecache decorator)
204 204 #
205 205 # Maps a property name to its util.filecacheentry
206 206 self._filecache = {}
207 207
208 208 def close(self):
209 209 pass
210 210
211 211 def _restrictcapabilities(self, caps):
212 212 return caps
213 213
214 214 def _applyrequirements(self, requirements):
215 215 self.requirements = requirements
216 216 self.sopener.options = dict((r, 1) for r in requirements
217 217 if r in self.openerreqs)
218 218
219 219 def _writerequirements(self):
220 220 reqfile = self.opener("requires", "w")
221 221 for r in self.requirements:
222 222 reqfile.write("%s\n" % r)
223 223 reqfile.close()
224 224
225 225 def _checknested(self, path):
226 226 """Determine if path is a legal nested repository."""
227 227 if not path.startswith(self.root):
228 228 return False
229 229 subpath = path[len(self.root) + 1:]
230 230 normsubpath = util.pconvert(subpath)
231 231
232 232 # XXX: Checking against the current working copy is wrong in
233 233 # the sense that it can reject things like
234 234 #
235 235 # $ hg cat -r 10 sub/x.txt
236 236 #
237 237 # if sub/ is no longer a subrepository in the working copy
238 238 # parent revision.
239 239 #
240 240 # However, it can of course also allow things that would have
241 241 # been rejected before, such as the above cat command if sub/
242 242 # is a subrepository now, but was a normal directory before.
243 243 # The old path auditor would have rejected by mistake since it
244 244 # panics when it sees sub/.hg/.
245 245 #
246 246 # All in all, checking against the working copy seems sensible
247 247 # since we want to prevent access to nested repositories on
248 248 # the filesystem *now*.
249 249 ctx = self[None]
250 250 parts = util.splitpath(subpath)
251 251 while parts:
252 252 prefix = '/'.join(parts)
253 253 if prefix in ctx.substate:
254 254 if prefix == normsubpath:
255 255 return True
256 256 else:
257 257 sub = ctx.sub(prefix)
258 258 return sub.checknested(subpath[len(prefix) + 1:])
259 259 else:
260 260 parts.pop()
261 261 return False
262 262
263 263 def peer(self):
264 264 return localpeer(self) # not cached to avoid reference cycle
265 265
266 266 @filecache('bookmarks')
267 267 def _bookmarks(self):
268 268 return bookmarks.read(self)
269 269
270 270 @filecache('bookmarks.current')
271 271 def _bookmarkcurrent(self):
272 272 return bookmarks.readcurrent(self)
273 273
274 274 def _writebookmarks(self, marks):
275 275 bookmarks.write(self)
276 276
277 277 def bookmarkheads(self, bookmark):
278 278 name = bookmark.split('@', 1)[0]
279 279 heads = []
280 280 for mark, n in self._bookmarks.iteritems():
281 281 if mark.split('@', 1)[0] == name:
282 282 heads.append(n)
283 283 return heads
284 284
285 285 @storecache('phaseroots')
286 286 def _phasecache(self):
287 287 return phases.phasecache(self, self._phasedefaults)
288 288
289 289 @storecache('obsstore')
290 290 def obsstore(self):
291 291 store = obsolete.obsstore(self.sopener)
292 292 return store
293 293
294 294 @propertycache
295 295 def hiddenrevs(self):
296 296 """hiddenrevs: revs that should be hidden by command and tools
297 297
298 298 This set is carried on the repo to ease initialisation and lazy
299 299 loading it'll probably move back to changelog for efficienty and
300 300 consistency reason
301 301
302 302 Note that the hiddenrevs will needs invalidations when
303 303 - a new changesets is added (possible unstable above extinct)
304 304 - a new obsolete marker is added (possible new extinct changeset)
305 305 """
306 306 hidden = set()
307 307 if self.obsstore:
308 308 ### hide extinct changeset that are not accessible by any mean
309 309 hiddenquery = 'extinct() - ::(. + bookmark() + tagged())'
310 310 hidden.update(self.revs(hiddenquery))
311 311 return hidden
312 312
313 313 @storecache('00changelog.i')
314 314 def changelog(self):
315 315 c = changelog.changelog(self.sopener)
316 316 if 'HG_PENDING' in os.environ:
317 317 p = os.environ['HG_PENDING']
318 318 if p.startswith(self.root):
319 319 c.readpending('00changelog.i.a')
320 320 return c
321 321
322 322 @storecache('00manifest.i')
323 323 def manifest(self):
324 324 return manifest.manifest(self.sopener)
325 325
326 326 @filecache('dirstate')
327 327 def dirstate(self):
328 328 warned = [0]
329 329 def validate(node):
330 330 try:
331 331 self.changelog.rev(node)
332 332 return node
333 333 except error.LookupError:
334 334 if not warned[0]:
335 335 warned[0] = True
336 336 self.ui.warn(_("warning: ignoring unknown"
337 337 " working parent %s!\n") % short(node))
338 338 return nullid
339 339
340 340 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
341 341
342 342 def __getitem__(self, changeid):
343 343 if changeid is None:
344 344 return context.workingctx(self)
345 345 return context.changectx(self, changeid)
346 346
347 347 def __contains__(self, changeid):
348 348 try:
349 349 return bool(self.lookup(changeid))
350 350 except error.RepoLookupError:
351 351 return False
352 352
353 353 def __nonzero__(self):
354 354 return True
355 355
356 356 def __len__(self):
357 357 return len(self.changelog)
358 358
359 359 def __iter__(self):
360 360 for i in xrange(len(self)):
361 361 yield i
362 362
363 363 def revs(self, expr, *args):
364 364 '''Return a list of revisions matching the given revset'''
365 365 expr = revset.formatspec(expr, *args)
366 366 m = revset.match(None, expr)
367 367 return [r for r in m(self, range(len(self)))]
368 368
369 369 def set(self, expr, *args):
370 370 '''
371 371 Yield a context for each matching revision, after doing arg
372 372 replacement via revset.formatspec
373 373 '''
374 374 for r in self.revs(expr, *args):
375 375 yield self[r]
376 376
377 377 def url(self):
378 378 return 'file:' + self.root
379 379
380 380 def hook(self, name, throw=False, **args):
381 381 return hook.hook(self.ui, self, name, throw, **args)
382 382
383 383 tag_disallowed = ':\r\n'
384 384
385 385 def _tag(self, names, node, message, local, user, date, extra={}):
386 386 if isinstance(names, str):
387 387 allchars = names
388 388 names = (names,)
389 389 else:
390 390 allchars = ''.join(names)
391 391 for c in self.tag_disallowed:
392 392 if c in allchars:
393 393 raise util.Abort(_('%r cannot be used in a tag name') % c)
394 394
395 395 branches = self.branchmap()
396 396 for name in names:
397 397 self.hook('pretag', throw=True, node=hex(node), tag=name,
398 398 local=local)
399 399 if name in branches:
400 400 self.ui.warn(_("warning: tag %s conflicts with existing"
401 401 " branch name\n") % name)
402 402
403 403 def writetags(fp, names, munge, prevtags):
404 404 fp.seek(0, 2)
405 405 if prevtags and prevtags[-1] != '\n':
406 406 fp.write('\n')
407 407 for name in names:
408 408 m = munge and munge(name) or name
409 409 if (self._tagscache.tagtypes and
410 410 name in self._tagscache.tagtypes):
411 411 old = self.tags().get(name, nullid)
412 412 fp.write('%s %s\n' % (hex(old), m))
413 413 fp.write('%s %s\n' % (hex(node), m))
414 414 fp.close()
415 415
416 416 prevtags = ''
417 417 if local:
418 418 try:
419 419 fp = self.opener('localtags', 'r+')
420 420 except IOError:
421 421 fp = self.opener('localtags', 'a')
422 422 else:
423 423 prevtags = fp.read()
424 424
425 425 # local tags are stored in the current charset
426 426 writetags(fp, names, None, prevtags)
427 427 for name in names:
428 428 self.hook('tag', node=hex(node), tag=name, local=local)
429 429 return
430 430
431 431 try:
432 432 fp = self.wfile('.hgtags', 'rb+')
433 433 except IOError, e:
434 434 if e.errno != errno.ENOENT:
435 435 raise
436 436 fp = self.wfile('.hgtags', 'ab')
437 437 else:
438 438 prevtags = fp.read()
439 439
440 440 # committed tags are stored in UTF-8
441 441 writetags(fp, names, encoding.fromlocal, prevtags)
442 442
443 443 fp.close()
444 444
445 445 self.invalidatecaches()
446 446
447 447 if '.hgtags' not in self.dirstate:
448 448 self[None].add(['.hgtags'])
449 449
450 450 m = matchmod.exact(self.root, '', ['.hgtags'])
451 451 tagnode = self.commit(message, user, date, extra=extra, match=m)
452 452
453 453 for name in names:
454 454 self.hook('tag', node=hex(node), tag=name, local=local)
455 455
456 456 return tagnode
457 457
458 458 def tag(self, names, node, message, local, user, date):
459 459 '''tag a revision with one or more symbolic names.
460 460
461 461 names is a list of strings or, when adding a single tag, names may be a
462 462 string.
463 463
464 464 if local is True, the tags are stored in a per-repository file.
465 465 otherwise, they are stored in the .hgtags file, and a new
466 466 changeset is committed with the change.
467 467
468 468 keyword arguments:
469 469
470 470 local: whether to store tags in non-version-controlled file
471 471 (default False)
472 472
473 473 message: commit message to use if committing
474 474
475 475 user: name of user to use if committing
476 476
477 477 date: date tuple to use if committing'''
478 478
479 479 if not local:
480 480 for x in self.status()[:5]:
481 481 if '.hgtags' in x:
482 482 raise util.Abort(_('working copy of .hgtags is changed '
483 483 '(please commit .hgtags manually)'))
484 484
485 485 self.tags() # instantiate the cache
486 486 self._tag(names, node, message, local, user, date)
487 487
488 488 @propertycache
489 489 def _tagscache(self):
490 490 '''Returns a tagscache object that contains various tags related
491 491 caches.'''
492 492
493 493 # This simplifies its cache management by having one decorated
494 494 # function (this one) and the rest simply fetch things from it.
495 495 class tagscache(object):
496 496 def __init__(self):
497 497 # These two define the set of tags for this repository. tags
498 498 # maps tag name to node; tagtypes maps tag name to 'global' or
499 499 # 'local'. (Global tags are defined by .hgtags across all
500 500 # heads, and local tags are defined in .hg/localtags.)
501 501 # They constitute the in-memory cache of tags.
502 502 self.tags = self.tagtypes = None
503 503
504 504 self.nodetagscache = self.tagslist = None
505 505
506 506 cache = tagscache()
507 507 cache.tags, cache.tagtypes = self._findtags()
508 508
509 509 return cache
510 510
511 511 def tags(self):
512 512 '''return a mapping of tag to node'''
513 513 t = {}
514 514 for k, v in self._tagscache.tags.iteritems():
515 515 try:
516 516 # ignore tags to unknown nodes
517 517 self.changelog.rev(v)
518 518 t[k] = v
519 519 except (error.LookupError, ValueError):
520 520 pass
521 521 return t
522 522
523 523 def _findtags(self):
524 524 '''Do the hard work of finding tags. Return a pair of dicts
525 525 (tags, tagtypes) where tags maps tag name to node, and tagtypes
526 526 maps tag name to a string like \'global\' or \'local\'.
527 527 Subclasses or extensions are free to add their own tags, but
528 528 should be aware that the returned dicts will be retained for the
529 529 duration of the localrepo object.'''
530 530
531 531 # XXX what tagtype should subclasses/extensions use? Currently
532 532 # mq and bookmarks add tags, but do not set the tagtype at all.
533 533 # Should each extension invent its own tag type? Should there
534 534 # be one tagtype for all such "virtual" tags? Or is the status
535 535 # quo fine?
536 536
537 537 alltags = {} # map tag name to (node, hist)
538 538 tagtypes = {}
539 539
540 540 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
541 541 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
542 542
543 543 # Build the return dicts. Have to re-encode tag names because
544 544 # the tags module always uses UTF-8 (in order not to lose info
545 545 # writing to the cache), but the rest of Mercurial wants them in
546 546 # local encoding.
547 547 tags = {}
548 548 for (name, (node, hist)) in alltags.iteritems():
549 549 if node != nullid:
550 550 tags[encoding.tolocal(name)] = node
551 551 tags['tip'] = self.changelog.tip()
552 552 tagtypes = dict([(encoding.tolocal(name), value)
553 553 for (name, value) in tagtypes.iteritems()])
554 554 return (tags, tagtypes)
555 555
556 556 def tagtype(self, tagname):
557 557 '''
558 558 return the type of the given tag. result can be:
559 559
560 560 'local' : a local tag
561 561 'global' : a global tag
562 562 None : tag does not exist
563 563 '''
564 564
565 565 return self._tagscache.tagtypes.get(tagname)
566 566
567 567 def tagslist(self):
568 568 '''return a list of tags ordered by revision'''
569 569 if not self._tagscache.tagslist:
570 570 l = []
571 571 for t, n in self.tags().iteritems():
572 572 r = self.changelog.rev(n)
573 573 l.append((r, t, n))
574 574 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
575 575
576 576 return self._tagscache.tagslist
577 577
578 578 def nodetags(self, node):
579 579 '''return the tags associated with a node'''
580 580 if not self._tagscache.nodetagscache:
581 581 nodetagscache = {}
582 582 for t, n in self._tagscache.tags.iteritems():
583 583 nodetagscache.setdefault(n, []).append(t)
584 584 for tags in nodetagscache.itervalues():
585 585 tags.sort()
586 586 self._tagscache.nodetagscache = nodetagscache
587 587 return self._tagscache.nodetagscache.get(node, [])
588 588
589 589 def nodebookmarks(self, node):
590 590 marks = []
591 591 for bookmark, n in self._bookmarks.iteritems():
592 592 if n == node:
593 593 marks.append(bookmark)
594 594 return sorted(marks)
595 595
596 596 def _branchtags(self, partial, lrev):
597 597 # TODO: rename this function?
598 598 tiprev = len(self) - 1
599 599 if lrev != tiprev:
600 600 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
601 601 self._updatebranchcache(partial, ctxgen)
602 602 self._writebranchcache(partial, self.changelog.tip(), tiprev)
603 603
604 604 return partial
605 605
606 606 def updatebranchcache(self):
607 607 tip = self.changelog.tip()
608 608 if self._branchcache is not None and self._branchcachetip == tip:
609 609 return
610 610
611 611 oldtip = self._branchcachetip
612 612 self._branchcachetip = tip
613 613 if oldtip is None or oldtip not in self.changelog.nodemap:
614 614 partial, last, lrev = self._readbranchcache()
615 615 else:
616 616 lrev = self.changelog.rev(oldtip)
617 617 partial = self._branchcache
618 618
619 619 self._branchtags(partial, lrev)
620 620 # this private cache holds all heads (not just the branch tips)
621 621 self._branchcache = partial
622 622
623 623 def branchmap(self):
624 624 '''returns a dictionary {branch: [branchheads]}'''
625 625 self.updatebranchcache()
626 626 return self._branchcache
627 627
628 628 def _branchtip(self, heads):
629 629 '''return the tipmost branch head in heads'''
630 630 tip = heads[-1]
631 631 for h in reversed(heads):
632 632 if not self[h].closesbranch():
633 633 tip = h
634 634 break
635 635 return tip
636 636
637 637 def branchtip(self, branch):
638 638 '''return the tip node for a given branch'''
639 639 if branch not in self.branchmap():
640 640 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
641 641 return self._branchtip(self.branchmap()[branch])
642 642
643 643 def branchtags(self):
644 644 '''return a dict where branch names map to the tipmost head of
645 645 the branch, open heads come before closed'''
646 646 bt = {}
647 647 for bn, heads in self.branchmap().iteritems():
648 648 bt[bn] = self._branchtip(heads)
649 649 return bt
650 650
651 651 def _readbranchcache(self):
652 652 partial = {}
653 653 try:
654 654 f = self.opener("cache/branchheads")
655 655 lines = f.read().split('\n')
656 656 f.close()
657 657 except (IOError, OSError):
658 658 return {}, nullid, nullrev
659 659
660 660 try:
661 661 last, lrev = lines.pop(0).split(" ", 1)
662 662 last, lrev = bin(last), int(lrev)
663 663 if lrev >= len(self) or self[lrev].node() != last:
664 664 # invalidate the cache
665 665 raise ValueError('invalidating branch cache (tip differs)')
666 666 for l in lines:
667 667 if not l:
668 668 continue
669 669 node, label = l.split(" ", 1)
670 670 label = encoding.tolocal(label.strip())
671 671 if not node in self:
672 672 raise ValueError('invalidating branch cache because node '+
673 673 '%s does not exist' % node)
674 674 partial.setdefault(label, []).append(bin(node))
675 675 except KeyboardInterrupt:
676 676 raise
677 677 except Exception, inst:
678 678 if self.ui.debugflag:
679 679 self.ui.warn(str(inst), '\n')
680 680 partial, last, lrev = {}, nullid, nullrev
681 681 return partial, last, lrev
682 682
683 683 def _writebranchcache(self, branches, tip, tiprev):
684 684 try:
685 685 f = self.opener("cache/branchheads", "w", atomictemp=True)
686 686 f.write("%s %s\n" % (hex(tip), tiprev))
687 687 for label, nodes in branches.iteritems():
688 688 for node in nodes:
689 689 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
690 690 f.close()
691 691 except (IOError, OSError):
692 692 pass
693 693
694 694 def _updatebranchcache(self, partial, ctxgen):
695 695 """Given a branchhead cache, partial, that may have extra nodes or be
696 696 missing heads, and a generator of nodes that are at least a superset of
697 697 heads missing, this function updates partial to be correct.
698 698 """
699 699 # collect new branch entries
700 700 newbranches = {}
701 701 for c in ctxgen:
702 702 newbranches.setdefault(c.branch(), []).append(c.node())
703 703 # if older branchheads are reachable from new ones, they aren't
704 704 # really branchheads. Note checking parents is insufficient:
705 705 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
706 706 for branch, newnodes in newbranches.iteritems():
707 707 bheads = partial.setdefault(branch, [])
708 708 # Remove candidate heads that no longer are in the repo (e.g., as
709 709 # the result of a strip that just happened). Avoid using 'node in
710 710 # self' here because that dives down into branchcache code somewhat
711 711 # recrusively.
712 712 bheadrevs = [self.changelog.rev(node) for node in bheads
713 713 if self.changelog.hasnode(node)]
714 714 newheadrevs = [self.changelog.rev(node) for node in newnodes
715 715 if self.changelog.hasnode(node)]
716 716 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
717 717 # Remove duplicates - nodes that are in newheadrevs and are already
718 718 # in bheadrevs. This can happen if you strip a node whose parent
719 719 # was already a head (because they're on different branches).
720 720 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
721 721
722 722 # Starting from tip means fewer passes over reachable. If we know
723 723 # the new candidates are not ancestors of existing heads, we don't
724 724 # have to examine ancestors of existing heads
725 725 if ctxisnew:
726 726 iterrevs = sorted(newheadrevs)
727 727 else:
728 728 iterrevs = list(bheadrevs)
729 729
730 730 # This loop prunes out two kinds of heads - heads that are
731 731 # superceded by a head in newheadrevs, and newheadrevs that are not
732 732 # heads because an existing head is their descendant.
733 733 while iterrevs:
734 734 latest = iterrevs.pop()
735 735 if latest not in bheadrevs:
736 736 continue
737 737 ancestors = set(self.changelog.ancestors([latest],
738 738 bheadrevs[0]))
739 739 if ancestors:
740 740 bheadrevs = [b for b in bheadrevs if b not in ancestors]
741 741 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
742 742
743 743 # There may be branches that cease to exist when the last commit in the
744 744 # branch was stripped. This code filters them out. Note that the
745 745 # branch that ceased to exist may not be in newbranches because
746 746 # newbranches is the set of candidate heads, which when you strip the
747 747 # last commit in a branch will be the parent branch.
748 748 for branch in partial.keys():
749 749 nodes = [head for head in partial[branch]
750 750 if self.changelog.hasnode(head)]
751 751 if not nodes:
752 752 del partial[branch]
753 753
754 754 def lookup(self, key):
755 755 return self[key].node()
756 756
757 757 def lookupbranch(self, key, remote=None):
758 758 repo = remote or self
759 759 if key in repo.branchmap():
760 760 return key
761 761
762 762 repo = (remote and remote.local()) and remote or self
763 763 return repo[key].branch()
764 764
765 765 def known(self, nodes):
766 766 nm = self.changelog.nodemap
767 767 pc = self._phasecache
768 768 result = []
769 769 for n in nodes:
770 770 r = nm.get(n)
771 771 resp = not (r is None or pc.phase(self, r) >= phases.secret)
772 772 result.append(resp)
773 773 return result
774 774
775 775 def local(self):
776 776 return self
777 777
778 778 def cancopy(self):
779 779 return self.local() # so statichttprepo's override of local() works
780 780
781 781 def join(self, f):
782 782 return os.path.join(self.path, f)
783 783
784 784 def wjoin(self, f):
785 785 return os.path.join(self.root, f)
786 786
787 787 def file(self, f):
788 788 if f[0] == '/':
789 789 f = f[1:]
790 790 return filelog.filelog(self.sopener, f)
791 791
792 792 def changectx(self, changeid):
793 793 return self[changeid]
794 794
795 795 def parents(self, changeid=None):
796 796 '''get list of changectxs for parents of changeid'''
797 797 return self[changeid].parents()
798 798
799 799 def setparents(self, p1, p2=nullid):
800 800 copies = self.dirstate.setparents(p1, p2)
801 801 if copies:
802 802 # Adjust copy records, the dirstate cannot do it, it
803 803 # requires access to parents manifests. Preserve them
804 804 # only for entries added to first parent.
805 805 pctx = self[p1]
806 806 for f in copies:
807 807 if f not in pctx and copies[f] in pctx:
808 808 self.dirstate.copy(copies[f], f)
809 809
810 810 def filectx(self, path, changeid=None, fileid=None):
811 811 """changeid can be a changeset revision, node, or tag.
812 812 fileid can be a file revision or node."""
813 813 return context.filectx(self, path, changeid, fileid)
814 814
815 815 def getcwd(self):
816 816 return self.dirstate.getcwd()
817 817
818 818 def pathto(self, f, cwd=None):
819 819 return self.dirstate.pathto(f, cwd)
820 820
821 821 def wfile(self, f, mode='r'):
822 822 return self.wopener(f, mode)
823 823
824 824 def _link(self, f):
825 825 return os.path.islink(self.wjoin(f))
826 826
827 827 def _loadfilter(self, filter):
828 828 if filter not in self.filterpats:
829 829 l = []
830 830 for pat, cmd in self.ui.configitems(filter):
831 831 if cmd == '!':
832 832 continue
833 833 mf = matchmod.match(self.root, '', [pat])
834 834 fn = None
835 835 params = cmd
836 836 for name, filterfn in self._datafilters.iteritems():
837 837 if cmd.startswith(name):
838 838 fn = filterfn
839 839 params = cmd[len(name):].lstrip()
840 840 break
841 841 if not fn:
842 842 fn = lambda s, c, **kwargs: util.filter(s, c)
843 843 # Wrap old filters not supporting keyword arguments
844 844 if not inspect.getargspec(fn)[2]:
845 845 oldfn = fn
846 846 fn = lambda s, c, **kwargs: oldfn(s, c)
847 847 l.append((mf, fn, params))
848 848 self.filterpats[filter] = l
849 849 return self.filterpats[filter]
850 850
851 851 def _filter(self, filterpats, filename, data):
852 852 for mf, fn, cmd in filterpats:
853 853 if mf(filename):
854 854 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
855 855 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
856 856 break
857 857
858 858 return data
859 859
860 860 @propertycache
861 861 def _encodefilterpats(self):
862 862 return self._loadfilter('encode')
863 863
864 864 @propertycache
865 865 def _decodefilterpats(self):
866 866 return self._loadfilter('decode')
867 867
868 868 def adddatafilter(self, name, filter):
869 869 self._datafilters[name] = filter
870 870
871 871 def wread(self, filename):
872 872 if self._link(filename):
873 873 data = os.readlink(self.wjoin(filename))
874 874 else:
875 875 data = self.wopener.read(filename)
876 876 return self._filter(self._encodefilterpats, filename, data)
877 877
878 878 def wwrite(self, filename, data, flags):
879 879 data = self._filter(self._decodefilterpats, filename, data)
880 880 if 'l' in flags:
881 881 self.wopener.symlink(data, filename)
882 882 else:
883 883 self.wopener.write(filename, data)
884 884 if 'x' in flags:
885 885 util.setflags(self.wjoin(filename), False, True)
886 886
887 887 def wwritedata(self, filename, data):
888 888 return self._filter(self._decodefilterpats, filename, data)
889 889
890 890 def transaction(self, desc):
891 891 tr = self._transref and self._transref() or None
892 892 if tr and tr.running():
893 893 return tr.nest()
894 894
895 895 # abort here if the journal already exists
896 896 if os.path.exists(self.sjoin("journal")):
897 897 raise error.RepoError(
898 898 _("abandoned transaction found - run hg recover"))
899 899
900 900 self._writejournal(desc)
901 901 renames = [(x, undoname(x)) for x in self._journalfiles()]
902 902
903 903 tr = transaction.transaction(self.ui.warn, self.sopener,
904 904 self.sjoin("journal"),
905 905 aftertrans(renames),
906 906 self.store.createmode)
907 907 self._transref = weakref.ref(tr)
908 908 return tr
909 909
910 910 def _journalfiles(self):
911 911 return (self.sjoin('journal'), self.join('journal.dirstate'),
912 912 self.join('journal.branch'), self.join('journal.desc'),
913 913 self.join('journal.bookmarks'),
914 914 self.sjoin('journal.phaseroots'))
915 915
916 916 def undofiles(self):
917 917 return [undoname(x) for x in self._journalfiles()]
918 918
919 919 def _writejournal(self, desc):
920 920 self.opener.write("journal.dirstate",
921 921 self.opener.tryread("dirstate"))
922 922 self.opener.write("journal.branch",
923 923 encoding.fromlocal(self.dirstate.branch()))
924 924 self.opener.write("journal.desc",
925 925 "%d\n%s\n" % (len(self), desc))
926 926 self.opener.write("journal.bookmarks",
927 927 self.opener.tryread("bookmarks"))
928 928 self.sopener.write("journal.phaseroots",
929 929 self.sopener.tryread("phaseroots"))
930 930
931 931 def recover(self):
932 932 lock = self.lock()
933 933 try:
934 934 if os.path.exists(self.sjoin("journal")):
935 935 self.ui.status(_("rolling back interrupted transaction\n"))
936 936 transaction.rollback(self.sopener, self.sjoin("journal"),
937 937 self.ui.warn)
938 938 self.invalidate()
939 939 return True
940 940 else:
941 941 self.ui.warn(_("no interrupted transaction available\n"))
942 942 return False
943 943 finally:
944 944 lock.release()
945 945
946 946 def rollback(self, dryrun=False, force=False):
947 947 wlock = lock = None
948 948 try:
949 949 wlock = self.wlock()
950 950 lock = self.lock()
951 951 if os.path.exists(self.sjoin("undo")):
952 952 return self._rollback(dryrun, force)
953 953 else:
954 954 self.ui.warn(_("no rollback information available\n"))
955 955 return 1
956 956 finally:
957 957 release(lock, wlock)
958 958
959 959 def _rollback(self, dryrun, force):
960 960 ui = self.ui
961 961 try:
962 962 args = self.opener.read('undo.desc').splitlines()
963 963 (oldlen, desc, detail) = (int(args[0]), args[1], None)
964 964 if len(args) >= 3:
965 965 detail = args[2]
966 966 oldtip = oldlen - 1
967 967
968 968 if detail and ui.verbose:
969 969 msg = (_('repository tip rolled back to revision %s'
970 970 ' (undo %s: %s)\n')
971 971 % (oldtip, desc, detail))
972 972 else:
973 973 msg = (_('repository tip rolled back to revision %s'
974 974 ' (undo %s)\n')
975 975 % (oldtip, desc))
976 976 except IOError:
977 977 msg = _('rolling back unknown transaction\n')
978 978 desc = None
979 979
980 980 if not force and self['.'] != self['tip'] and desc == 'commit':
981 981 raise util.Abort(
982 982 _('rollback of last commit while not checked out '
983 983 'may lose data'), hint=_('use -f to force'))
984 984
985 985 ui.status(msg)
986 986 if dryrun:
987 987 return 0
988 988
989 989 parents = self.dirstate.parents()
990 990 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
991 991 if os.path.exists(self.join('undo.bookmarks')):
992 992 util.rename(self.join('undo.bookmarks'),
993 993 self.join('bookmarks'))
994 994 if os.path.exists(self.sjoin('undo.phaseroots')):
995 995 util.rename(self.sjoin('undo.phaseroots'),
996 996 self.sjoin('phaseroots'))
997 997 self.invalidate()
998 998
999 999 parentgone = (parents[0] not in self.changelog.nodemap or
1000 1000 parents[1] not in self.changelog.nodemap)
1001 1001 if parentgone:
1002 1002 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
1003 1003 try:
1004 1004 branch = self.opener.read('undo.branch')
1005 1005 self.dirstate.setbranch(branch)
1006 1006 except IOError:
1007 1007 ui.warn(_('named branch could not be reset: '
1008 1008 'current branch is still \'%s\'\n')
1009 1009 % self.dirstate.branch())
1010 1010
1011 1011 self.dirstate.invalidate()
1012 1012 parents = tuple([p.rev() for p in self.parents()])
1013 1013 if len(parents) > 1:
1014 1014 ui.status(_('working directory now based on '
1015 1015 'revisions %d and %d\n') % parents)
1016 1016 else:
1017 1017 ui.status(_('working directory now based on '
1018 1018 'revision %d\n') % parents)
1019 1019 # TODO: if we know which new heads may result from this rollback, pass
1020 1020 # them to destroy(), which will prevent the branchhead cache from being
1021 1021 # invalidated.
1022 1022 self.destroyed()
1023 1023 return 0
1024 1024
1025 1025 def invalidatecaches(self):
1026 1026 def delcache(name):
1027 1027 try:
1028 1028 delattr(self, name)
1029 1029 except AttributeError:
1030 1030 pass
1031 1031
1032 1032 delcache('_tagscache')
1033 1033
1034 1034 self._branchcache = None # in UTF-8
1035 1035 self._branchcachetip = None
1036 1036
1037 1037 def invalidatedirstate(self):
1038 1038 '''Invalidates the dirstate, causing the next call to dirstate
1039 1039 to check if it was modified since the last time it was read,
1040 1040 rereading it if it has.
1041 1041
1042 1042 This is different to dirstate.invalidate() that it doesn't always
1043 1043 rereads the dirstate. Use dirstate.invalidate() if you want to
1044 1044 explicitly read the dirstate again (i.e. restoring it to a previous
1045 1045 known good state).'''
1046 1046 if 'dirstate' in self.__dict__:
1047 1047 for k in self.dirstate._filecache:
1048 1048 try:
1049 1049 delattr(self.dirstate, k)
1050 1050 except AttributeError:
1051 1051 pass
1052 1052 delattr(self, 'dirstate')
1053 1053
1054 1054 def invalidate(self):
1055 1055 for k in self._filecache:
1056 1056 # dirstate is invalidated separately in invalidatedirstate()
1057 1057 if k == 'dirstate':
1058 1058 continue
1059 1059
1060 1060 try:
1061 1061 delattr(self, k)
1062 1062 except AttributeError:
1063 1063 pass
1064 1064 self.invalidatecaches()
1065 1065
1066 1066 # Discard all cache entries to force reloading everything.
1067 1067 self._filecache.clear()
1068 1068
1069 1069 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
1070 1070 try:
1071 1071 l = lock.lock(lockname, 0, releasefn, desc=desc)
1072 1072 except error.LockHeld, inst:
1073 1073 if not wait:
1074 1074 raise
1075 1075 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1076 1076 (desc, inst.locker))
1077 1077 # default to 600 seconds timeout
1078 1078 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
1079 1079 releasefn, desc=desc)
1080 1080 if acquirefn:
1081 1081 acquirefn()
1082 1082 return l
1083 1083
1084 1084 def _afterlock(self, callback):
1085 1085 """add a callback to the current repository lock.
1086 1086
1087 1087 The callback will be executed on lock release."""
1088 1088 l = self._lockref and self._lockref()
1089 1089 if l:
1090 1090 l.postrelease.append(callback)
1091 1091 else:
1092 1092 callback()
1093 1093
1094 1094 def lock(self, wait=True):
1095 1095 '''Lock the repository store (.hg/store) and return a weak reference
1096 1096 to the lock. Use this before modifying the store (e.g. committing or
1097 1097 stripping). If you are opening a transaction, get a lock as well.)'''
1098 1098 l = self._lockref and self._lockref()
1099 1099 if l is not None and l.held:
1100 1100 l.lock()
1101 1101 return l
1102 1102
1103 1103 def unlock():
1104 1104 self.store.write()
1105 1105 if '_phasecache' in vars(self):
1106 1106 self._phasecache.write()
1107 1107 for k, ce in self._filecache.items():
1108 1108 if k == 'dirstate':
1109 1109 continue
1110 1110 ce.refresh()
1111 1111
1112 1112 l = self._lock(self.sjoin("lock"), wait, unlock,
1113 1113 self.invalidate, _('repository %s') % self.origroot)
1114 1114 self._lockref = weakref.ref(l)
1115 1115 return l
1116 1116
1117 1117 def wlock(self, wait=True):
1118 1118 '''Lock the non-store parts of the repository (everything under
1119 1119 .hg except .hg/store) and return a weak reference to the lock.
1120 1120 Use this before modifying files in .hg.'''
1121 1121 l = self._wlockref and self._wlockref()
1122 1122 if l is not None and l.held:
1123 1123 l.lock()
1124 1124 return l
1125 1125
1126 1126 def unlock():
1127 1127 self.dirstate.write()
1128 1128 ce = self._filecache.get('dirstate')
1129 1129 if ce:
1130 1130 ce.refresh()
1131 1131
1132 1132 l = self._lock(self.join("wlock"), wait, unlock,
1133 1133 self.invalidatedirstate, _('working directory of %s') %
1134 1134 self.origroot)
1135 1135 self._wlockref = weakref.ref(l)
1136 1136 return l
1137 1137
1138 1138 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1139 1139 """
1140 1140 commit an individual file as part of a larger transaction
1141 1141 """
1142 1142
1143 1143 fname = fctx.path()
1144 1144 text = fctx.data()
1145 1145 flog = self.file(fname)
1146 1146 fparent1 = manifest1.get(fname, nullid)
1147 1147 fparent2 = fparent2o = manifest2.get(fname, nullid)
1148 1148
1149 1149 meta = {}
1150 1150 copy = fctx.renamed()
1151 1151 if copy and copy[0] != fname:
1152 1152 # Mark the new revision of this file as a copy of another
1153 1153 # file. This copy data will effectively act as a parent
1154 1154 # of this new revision. If this is a merge, the first
1155 1155 # parent will be the nullid (meaning "look up the copy data")
1156 1156 # and the second one will be the other parent. For example:
1157 1157 #
1158 1158 # 0 --- 1 --- 3 rev1 changes file foo
1159 1159 # \ / rev2 renames foo to bar and changes it
1160 1160 # \- 2 -/ rev3 should have bar with all changes and
1161 1161 # should record that bar descends from
1162 1162 # bar in rev2 and foo in rev1
1163 1163 #
1164 1164 # this allows this merge to succeed:
1165 1165 #
1166 1166 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1167 1167 # \ / merging rev3 and rev4 should use bar@rev2
1168 1168 # \- 2 --- 4 as the merge base
1169 1169 #
1170 1170
1171 1171 cfname = copy[0]
1172 1172 crev = manifest1.get(cfname)
1173 1173 newfparent = fparent2
1174 1174
1175 1175 if manifest2: # branch merge
1176 1176 if fparent2 == nullid or crev is None: # copied on remote side
1177 1177 if cfname in manifest2:
1178 1178 crev = manifest2[cfname]
1179 1179 newfparent = fparent1
1180 1180
1181 1181 # find source in nearest ancestor if we've lost track
1182 1182 if not crev:
1183 1183 self.ui.debug(" %s: searching for copy revision for %s\n" %
1184 1184 (fname, cfname))
1185 1185 for ancestor in self[None].ancestors():
1186 1186 if cfname in ancestor:
1187 1187 crev = ancestor[cfname].filenode()
1188 1188 break
1189 1189
1190 1190 if crev:
1191 1191 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1192 1192 meta["copy"] = cfname
1193 1193 meta["copyrev"] = hex(crev)
1194 1194 fparent1, fparent2 = nullid, newfparent
1195 1195 else:
1196 1196 self.ui.warn(_("warning: can't find ancestor for '%s' "
1197 1197 "copied from '%s'!\n") % (fname, cfname))
1198 1198
1199 1199 elif fparent2 != nullid:
1200 1200 # is one parent an ancestor of the other?
1201 1201 fparentancestor = flog.ancestor(fparent1, fparent2)
1202 1202 if fparentancestor == fparent1:
1203 1203 fparent1, fparent2 = fparent2, nullid
1204 1204 elif fparentancestor == fparent2:
1205 1205 fparent2 = nullid
1206 1206
1207 1207 # is the file changed?
1208 1208 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1209 1209 changelist.append(fname)
1210 1210 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1211 1211
1212 1212 # are just the flags changed during merge?
1213 1213 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1214 1214 changelist.append(fname)
1215 1215
1216 1216 return fparent1
1217 1217
1218 1218 def commit(self, text="", user=None, date=None, match=None, force=False,
1219 1219 editor=False, extra={}):
1220 1220 """Add a new revision to current repository.
1221 1221
1222 1222 Revision information is gathered from the working directory,
1223 1223 match can be used to filter the committed files. If editor is
1224 1224 supplied, it is called to get a commit message.
1225 1225 """
1226 1226
1227 1227 def fail(f, msg):
1228 1228 raise util.Abort('%s: %s' % (f, msg))
1229 1229
1230 1230 if not match:
1231 1231 match = matchmod.always(self.root, '')
1232 1232
1233 1233 if not force:
1234 1234 vdirs = []
1235 1235 match.dir = vdirs.append
1236 1236 match.bad = fail
1237 1237
1238 1238 wlock = self.wlock()
1239 1239 try:
1240 1240 wctx = self[None]
1241 1241 merge = len(wctx.parents()) > 1
1242 1242
1243 1243 if (not force and merge and match and
1244 1244 (match.files() or match.anypats())):
1245 1245 raise util.Abort(_('cannot partially commit a merge '
1246 1246 '(do not specify files or patterns)'))
1247 1247
1248 1248 changes = self.status(match=match, clean=force)
1249 1249 if force:
1250 1250 changes[0].extend(changes[6]) # mq may commit unchanged files
1251 1251
1252 1252 # check subrepos
1253 1253 subs = []
1254 1254 commitsubs = set()
1255 1255 newstate = wctx.substate.copy()
1256 1256 # only manage subrepos and .hgsubstate if .hgsub is present
1257 1257 if '.hgsub' in wctx:
1258 1258 # we'll decide whether to track this ourselves, thanks
1259 1259 if '.hgsubstate' in changes[0]:
1260 1260 changes[0].remove('.hgsubstate')
1261 1261 if '.hgsubstate' in changes[2]:
1262 1262 changes[2].remove('.hgsubstate')
1263 1263
1264 1264 # compare current state to last committed state
1265 1265 # build new substate based on last committed state
1266 1266 oldstate = wctx.p1().substate
1267 1267 for s in sorted(newstate.keys()):
1268 1268 if not match(s):
1269 1269 # ignore working copy, use old state if present
1270 1270 if s in oldstate:
1271 1271 newstate[s] = oldstate[s]
1272 1272 continue
1273 1273 if not force:
1274 1274 raise util.Abort(
1275 1275 _("commit with new subrepo %s excluded") % s)
1276 1276 if wctx.sub(s).dirty(True):
1277 1277 if not self.ui.configbool('ui', 'commitsubrepos'):
1278 1278 raise util.Abort(
1279 1279 _("uncommitted changes in subrepo %s") % s,
1280 1280 hint=_("use --subrepos for recursive commit"))
1281 1281 subs.append(s)
1282 1282 commitsubs.add(s)
1283 1283 else:
1284 1284 bs = wctx.sub(s).basestate()
1285 1285 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1286 1286 if oldstate.get(s, (None, None, None))[1] != bs:
1287 1287 subs.append(s)
1288 1288
1289 1289 # check for removed subrepos
1290 1290 for p in wctx.parents():
1291 1291 r = [s for s in p.substate if s not in newstate]
1292 1292 subs += [s for s in r if match(s)]
1293 1293 if subs:
1294 1294 if (not match('.hgsub') and
1295 1295 '.hgsub' in (wctx.modified() + wctx.added())):
1296 1296 raise util.Abort(
1297 1297 _("can't commit subrepos without .hgsub"))
1298 1298 changes[0].insert(0, '.hgsubstate')
1299 1299
1300 1300 elif '.hgsub' in changes[2]:
1301 1301 # clean up .hgsubstate when .hgsub is removed
1302 1302 if ('.hgsubstate' in wctx and
1303 1303 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1304 1304 changes[2].insert(0, '.hgsubstate')
1305 1305
1306 1306 # make sure all explicit patterns are matched
1307 1307 if not force and match.files():
1308 1308 matched = set(changes[0] + changes[1] + changes[2])
1309 1309
1310 1310 for f in match.files():
1311 1311 if f == '.' or f in matched or f in wctx.substate:
1312 1312 continue
1313 1313 if f in changes[3]: # missing
1314 1314 fail(f, _('file not found!'))
1315 1315 if f in vdirs: # visited directory
1316 1316 d = f + '/'
1317 1317 for mf in matched:
1318 1318 if mf.startswith(d):
1319 1319 break
1320 1320 else:
1321 1321 fail(f, _("no match under directory!"))
1322 1322 elif f not in self.dirstate:
1323 1323 fail(f, _("file not tracked!"))
1324 1324
1325 1325 if (not force and not extra.get("close") and not merge
1326 1326 and not (changes[0] or changes[1] or changes[2])
1327 1327 and wctx.branch() == wctx.p1().branch()):
1328 1328 return None
1329 1329
1330 1330 if merge and changes[3]:
1331 1331 raise util.Abort(_("cannot commit merge with missing files"))
1332 1332
1333 1333 ms = mergemod.mergestate(self)
1334 1334 for f in changes[0]:
1335 1335 if f in ms and ms[f] == 'u':
1336 1336 raise util.Abort(_("unresolved merge conflicts "
1337 1337 "(see hg help resolve)"))
1338 1338
1339 1339 cctx = context.workingctx(self, text, user, date, extra, changes)
1340 1340 if editor:
1341 1341 cctx._text = editor(self, cctx, subs)
1342 1342 edited = (text != cctx._text)
1343 1343
1344 1344 # commit subs and write new state
1345 1345 if subs:
1346 1346 for s in sorted(commitsubs):
1347 1347 sub = wctx.sub(s)
1348 1348 self.ui.status(_('committing subrepository %s\n') %
1349 1349 subrepo.subrelpath(sub))
1350 1350 sr = sub.commit(cctx._text, user, date)
1351 1351 newstate[s] = (newstate[s][0], sr)
1352 1352 subrepo.writestate(self, newstate)
1353 1353
1354 1354 # Save commit message in case this transaction gets rolled back
1355 1355 # (e.g. by a pretxncommit hook). Leave the content alone on
1356 1356 # the assumption that the user will use the same editor again.
1357 1357 msgfn = self.savecommitmessage(cctx._text)
1358 1358
1359 1359 p1, p2 = self.dirstate.parents()
1360 1360 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1361 1361 try:
1362 1362 self.hook("precommit", throw=True, parent1=hookp1,
1363 1363 parent2=hookp2)
1364 1364 ret = self.commitctx(cctx, True)
1365 1365 except: # re-raises
1366 1366 if edited:
1367 1367 self.ui.write(
1368 1368 _('note: commit message saved in %s\n') % msgfn)
1369 1369 raise
1370 1370
1371 1371 # update bookmarks, dirstate and mergestate
1372 1372 bookmarks.update(self, [p1, p2], ret)
1373 1373 for f in changes[0] + changes[1]:
1374 1374 self.dirstate.normal(f)
1375 1375 for f in changes[2]:
1376 1376 self.dirstate.drop(f)
1377 1377 self.dirstate.setparents(ret)
1378 1378 ms.reset()
1379 1379 finally:
1380 1380 wlock.release()
1381 1381
1382 1382 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1383 1383 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1384 1384 self._afterlock(commithook)
1385 1385 return ret
1386 1386
1387 1387 def commitctx(self, ctx, error=False):
1388 1388 """Add a new revision to current repository.
1389 1389 Revision information is passed via the context argument.
1390 1390 """
1391 1391
1392 1392 tr = lock = None
1393 1393 removed = list(ctx.removed())
1394 1394 p1, p2 = ctx.p1(), ctx.p2()
1395 1395 user = ctx.user()
1396 1396
1397 1397 lock = self.lock()
1398 1398 try:
1399 1399 tr = self.transaction("commit")
1400 1400 trp = weakref.proxy(tr)
1401 1401
1402 1402 if ctx.files():
1403 1403 m1 = p1.manifest().copy()
1404 1404 m2 = p2.manifest()
1405 1405
1406 1406 # check in files
1407 1407 new = {}
1408 1408 changed = []
1409 1409 linkrev = len(self)
1410 1410 for f in sorted(ctx.modified() + ctx.added()):
1411 1411 self.ui.note(f + "\n")
1412 1412 try:
1413 1413 fctx = ctx[f]
1414 1414 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1415 1415 changed)
1416 1416 m1.set(f, fctx.flags())
1417 1417 except OSError, inst:
1418 1418 self.ui.warn(_("trouble committing %s!\n") % f)
1419 1419 raise
1420 1420 except IOError, inst:
1421 1421 errcode = getattr(inst, 'errno', errno.ENOENT)
1422 1422 if error or errcode and errcode != errno.ENOENT:
1423 1423 self.ui.warn(_("trouble committing %s!\n") % f)
1424 1424 raise
1425 1425 else:
1426 1426 removed.append(f)
1427 1427
1428 1428 # update manifest
1429 1429 m1.update(new)
1430 1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 1431 drop = [f for f in removed if f in m1]
1432 1432 for f in drop:
1433 1433 del m1[f]
1434 1434 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1435 1435 p2.manifestnode(), (new, drop))
1436 1436 files = changed + removed
1437 1437 else:
1438 1438 mn = p1.manifestnode()
1439 1439 files = []
1440 1440
1441 1441 # update changelog
1442 1442 self.changelog.delayupdate()
1443 1443 n = self.changelog.add(mn, files, ctx.description(),
1444 1444 trp, p1.node(), p2.node(),
1445 1445 user, ctx.date(), ctx.extra().copy())
1446 1446 p = lambda: self.changelog.writepending() and self.root or ""
1447 1447 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1448 1448 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1449 1449 parent2=xp2, pending=p)
1450 1450 self.changelog.finalize(trp)
1451 1451 # set the new commit is proper phase
1452 1452 targetphase = phases.newcommitphase(self.ui)
1453 1453 if targetphase:
1454 1454 # retract boundary do not alter parent changeset.
1455 1455 # if a parent have higher the resulting phase will
1456 1456 # be compliant anyway
1457 1457 #
1458 1458 # if minimal phase was 0 we don't need to retract anything
1459 1459 phases.retractboundary(self, targetphase, [n])
1460 1460 tr.close()
1461 1461 self.updatebranchcache()
1462 1462 return n
1463 1463 finally:
1464 1464 if tr:
1465 1465 tr.release()
1466 1466 lock.release()
1467 1467
1468 1468 def destroyed(self, newheadnodes=None):
1469 1469 '''Inform the repository that nodes have been destroyed.
1470 1470 Intended for use by strip and rollback, so there's a common
1471 1471 place for anything that has to be done after destroying history.
1472 1472
1473 1473 If you know the branchheadcache was uptodate before nodes were removed
1474 1474 and you also know the set of candidate new heads that may have resulted
1475 1475 from the destruction, you can set newheadnodes. This will enable the
1476 1476 code to update the branchheads cache, rather than having future code
1477 1477 decide it's invalid and regenrating it from scratch.
1478 1478 '''
1479 1479 # If we have info, newheadnodes, on how to update the branch cache, do
1480 1480 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1481 1481 # will be caught the next time it is read.
1482 1482 if newheadnodes:
1483 1483 tiprev = len(self) - 1
1484 1484 ctxgen = (self[node] for node in newheadnodes
1485 1485 if self.changelog.hasnode(node))
1486 1486 self._updatebranchcache(self._branchcache, ctxgen)
1487 1487 self._writebranchcache(self._branchcache, self.changelog.tip(),
1488 1488 tiprev)
1489 1489
1490 1490 # Ensure the persistent tag cache is updated. Doing it now
1491 1491 # means that the tag cache only has to worry about destroyed
1492 1492 # heads immediately after a strip/rollback. That in turn
1493 1493 # guarantees that "cachetip == currenttip" (comparing both rev
1494 1494 # and node) always means no nodes have been added or destroyed.
1495 1495
1496 1496 # XXX this is suboptimal when qrefresh'ing: we strip the current
1497 1497 # head, refresh the tag cache, then immediately add a new head.
1498 1498 # But I think doing it this way is necessary for the "instant
1499 1499 # tag cache retrieval" case to work.
1500 1500 self.invalidatecaches()
1501 1501
1502 1502 def walk(self, match, node=None):
1503 1503 '''
1504 1504 walk recursively through the directory tree or a given
1505 1505 changeset, finding all files matched by the match
1506 1506 function
1507 1507 '''
1508 1508 return self[node].walk(match)
1509 1509
1510 1510 def status(self, node1='.', node2=None, match=None,
1511 1511 ignored=False, clean=False, unknown=False,
1512 1512 listsubrepos=False):
1513 1513 """return status of files between two nodes or node and working
1514 1514 directory.
1515 1515
1516 1516 If node1 is None, use the first dirstate parent instead.
1517 1517 If node2 is None, compare node1 with working directory.
1518 1518 """
1519 1519
1520 1520 def mfmatches(ctx):
1521 1521 mf = ctx.manifest().copy()
1522 1522 if match.always():
1523 1523 return mf
1524 1524 for fn in mf.keys():
1525 1525 if not match(fn):
1526 1526 del mf[fn]
1527 1527 return mf
1528 1528
1529 1529 if isinstance(node1, context.changectx):
1530 1530 ctx1 = node1
1531 1531 else:
1532 1532 ctx1 = self[node1]
1533 1533 if isinstance(node2, context.changectx):
1534 1534 ctx2 = node2
1535 1535 else:
1536 1536 ctx2 = self[node2]
1537 1537
1538 1538 working = ctx2.rev() is None
1539 1539 parentworking = working and ctx1 == self['.']
1540 1540 match = match or matchmod.always(self.root, self.getcwd())
1541 1541 listignored, listclean, listunknown = ignored, clean, unknown
1542 1542
1543 1543 # load earliest manifest first for caching reasons
1544 1544 if not working and ctx2.rev() < ctx1.rev():
1545 1545 ctx2.manifest()
1546 1546
1547 1547 if not parentworking:
1548 1548 def bad(f, msg):
1549 1549 # 'f' may be a directory pattern from 'match.files()',
1550 1550 # so 'f not in ctx1' is not enough
1551 1551 if f not in ctx1 and f not in ctx1.dirs():
1552 1552 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1553 1553 match.bad = bad
1554 1554
1555 1555 if working: # we need to scan the working dir
1556 1556 subrepos = []
1557 1557 if '.hgsub' in self.dirstate:
1558 1558 subrepos = ctx2.substate.keys()
1559 1559 s = self.dirstate.status(match, subrepos, listignored,
1560 1560 listclean, listunknown)
1561 1561 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1562 1562
1563 1563 # check for any possibly clean files
1564 1564 if parentworking and cmp:
1565 1565 fixup = []
1566 1566 # do a full compare of any files that might have changed
1567 1567 for f in sorted(cmp):
1568 1568 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1569 1569 or ctx1[f].cmp(ctx2[f])):
1570 1570 modified.append(f)
1571 1571 else:
1572 1572 fixup.append(f)
1573 1573
1574 1574 # update dirstate for files that are actually clean
1575 1575 if fixup:
1576 1576 if listclean:
1577 1577 clean += fixup
1578 1578
1579 1579 try:
1580 1580 # updating the dirstate is optional
1581 1581 # so we don't wait on the lock
1582 1582 wlock = self.wlock(False)
1583 1583 try:
1584 1584 for f in fixup:
1585 1585 self.dirstate.normal(f)
1586 1586 finally:
1587 1587 wlock.release()
1588 1588 except error.LockError:
1589 1589 pass
1590 1590
1591 1591 if not parentworking:
1592 1592 mf1 = mfmatches(ctx1)
1593 1593 if working:
1594 1594 # we are comparing working dir against non-parent
1595 1595 # generate a pseudo-manifest for the working dir
1596 1596 mf2 = mfmatches(self['.'])
1597 1597 for f in cmp + modified + added:
1598 1598 mf2[f] = None
1599 1599 mf2.set(f, ctx2.flags(f))
1600 1600 for f in removed:
1601 1601 if f in mf2:
1602 1602 del mf2[f]
1603 1603 else:
1604 1604 # we are comparing two revisions
1605 1605 deleted, unknown, ignored = [], [], []
1606 1606 mf2 = mfmatches(ctx2)
1607 1607
1608 1608 modified, added, clean = [], [], []
1609 1609 withflags = mf1.withflags() | mf2.withflags()
1610 1610 for fn in mf2:
1611 1611 if fn in mf1:
1612 1612 if (fn not in deleted and
1613 1613 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1614 1614 (mf1[fn] != mf2[fn] and
1615 1615 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1616 1616 modified.append(fn)
1617 1617 elif listclean:
1618 1618 clean.append(fn)
1619 1619 del mf1[fn]
1620 1620 elif fn not in deleted:
1621 1621 added.append(fn)
1622 1622 removed = mf1.keys()
1623 1623
1624 1624 if working and modified and not self.dirstate._checklink:
1625 1625 # Symlink placeholders may get non-symlink-like contents
1626 1626 # via user error or dereferencing by NFS or Samba servers,
1627 1627 # so we filter out any placeholders that don't look like a
1628 1628 # symlink
1629 1629 sane = []
1630 1630 for f in modified:
1631 1631 if ctx2.flags(f) == 'l':
1632 1632 d = ctx2[f].data()
1633 1633 if len(d) >= 1024 or '\n' in d or util.binary(d):
1634 1634 self.ui.debug('ignoring suspect symlink placeholder'
1635 1635 ' "%s"\n' % f)
1636 1636 continue
1637 1637 sane.append(f)
1638 1638 modified = sane
1639 1639
1640 1640 r = modified, added, removed, deleted, unknown, ignored, clean
1641 1641
1642 1642 if listsubrepos:
1643 1643 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1644 1644 if working:
1645 1645 rev2 = None
1646 1646 else:
1647 1647 rev2 = ctx2.substate[subpath][1]
1648 1648 try:
1649 1649 submatch = matchmod.narrowmatcher(subpath, match)
1650 1650 s = sub.status(rev2, match=submatch, ignored=listignored,
1651 1651 clean=listclean, unknown=listunknown,
1652 1652 listsubrepos=True)
1653 1653 for rfiles, sfiles in zip(r, s):
1654 1654 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1655 1655 except error.LookupError:
1656 1656 self.ui.status(_("skipping missing subrepository: %s\n")
1657 1657 % subpath)
1658 1658
1659 1659 for l in r:
1660 1660 l.sort()
1661 1661 return r
1662 1662
1663 1663 def heads(self, start=None):
1664 1664 heads = self.changelog.heads(start)
1665 1665 # sort the output in rev descending order
1666 1666 return sorted(heads, key=self.changelog.rev, reverse=True)
1667 1667
1668 1668 def branchheads(self, branch=None, start=None, closed=False):
1669 1669 '''return a (possibly filtered) list of heads for the given branch
1670 1670
1671 1671 Heads are returned in topological order, from newest to oldest.
1672 1672 If branch is None, use the dirstate branch.
1673 1673 If start is not None, return only heads reachable from start.
1674 1674 If closed is True, return heads that are marked as closed as well.
1675 1675 '''
1676 1676 if branch is None:
1677 1677 branch = self[None].branch()
1678 1678 branches = self.branchmap()
1679 1679 if branch not in branches:
1680 1680 return []
1681 1681 # the cache returns heads ordered lowest to highest
1682 1682 bheads = list(reversed(branches[branch]))
1683 1683 if start is not None:
1684 1684 # filter out the heads that cannot be reached from startrev
1685 1685 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1686 1686 bheads = [h for h in bheads if h in fbheads]
1687 1687 if not closed:
1688 1688 bheads = [h for h in bheads if not self[h].closesbranch()]
1689 1689 return bheads
1690 1690
1691 1691 def branches(self, nodes):
1692 1692 if not nodes:
1693 1693 nodes = [self.changelog.tip()]
1694 1694 b = []
1695 1695 for n in nodes:
1696 1696 t = n
1697 1697 while True:
1698 1698 p = self.changelog.parents(n)
1699 1699 if p[1] != nullid or p[0] == nullid:
1700 1700 b.append((t, n, p[0], p[1]))
1701 1701 break
1702 1702 n = p[0]
1703 1703 return b
1704 1704
1705 1705 def between(self, pairs):
1706 1706 r = []
1707 1707
1708 1708 for top, bottom in pairs:
1709 1709 n, l, i = top, [], 0
1710 1710 f = 1
1711 1711
1712 1712 while n != bottom and n != nullid:
1713 1713 p = self.changelog.parents(n)[0]
1714 1714 if i == f:
1715 1715 l.append(n)
1716 1716 f = f * 2
1717 1717 n = p
1718 1718 i += 1
1719 1719
1720 1720 r.append(l)
1721 1721
1722 1722 return r
1723 1723
1724 1724 def pull(self, remote, heads=None, force=False):
1725 1725 # don't open transaction for nothing or you break future useful
1726 1726 # rollback call
1727 1727 tr = None
1728 1728 trname = 'pull\n' + util.hidepassword(remote.url())
1729 1729 lock = self.lock()
1730 1730 try:
1731 1731 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1732 1732 force=force)
1733 1733 common, fetch, rheads = tmp
1734 1734 if not fetch:
1735 1735 self.ui.status(_("no changes found\n"))
1736 1736 added = []
1737 1737 result = 0
1738 1738 else:
1739 1739 tr = self.transaction(trname)
1740 1740 if heads is None and list(common) == [nullid]:
1741 1741 self.ui.status(_("requesting all changes\n"))
1742 1742 elif heads is None and remote.capable('changegroupsubset'):
1743 1743 # issue1320, avoid a race if remote changed after discovery
1744 1744 heads = rheads
1745 1745
1746 1746 if remote.capable('getbundle'):
1747 1747 cg = remote.getbundle('pull', common=common,
1748 1748 heads=heads or rheads)
1749 1749 elif heads is None:
1750 1750 cg = remote.changegroup(fetch, 'pull')
1751 1751 elif not remote.capable('changegroupsubset'):
1752 1752 raise util.Abort(_("partial pull cannot be done because "
1753 1753 "other repository doesn't support "
1754 1754 "changegroupsubset."))
1755 1755 else:
1756 1756 cg = remote.changegroupsubset(fetch, heads, 'pull')
1757 1757 clstart = len(self.changelog)
1758 1758 result = self.addchangegroup(cg, 'pull', remote.url())
1759 1759 clend = len(self.changelog)
1760 1760 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1761 1761
1762 1762 # compute target subset
1763 1763 if heads is None:
1764 1764 # We pulled every thing possible
1765 1765 # sync on everything common
1766 1766 subset = common + added
1767 1767 else:
1768 1768 # We pulled a specific subset
1769 1769 # sync on this subset
1770 1770 subset = heads
1771 1771
1772 1772 # Get remote phases data from remote
1773 1773 remotephases = remote.listkeys('phases')
1774 1774 publishing = bool(remotephases.get('publishing', False))
1775 1775 if remotephases and not publishing:
1776 1776 # remote is new and unpublishing
1777 1777 pheads, _dr = phases.analyzeremotephases(self, subset,
1778 1778 remotephases)
1779 1779 phases.advanceboundary(self, phases.public, pheads)
1780 1780 phases.advanceboundary(self, phases.draft, subset)
1781 1781 else:
1782 1782 # Remote is old or publishing all common changesets
1783 1783 # should be seen as public
1784 1784 phases.advanceboundary(self, phases.public, subset)
1785 1785
1786 1786 remoteobs = remote.listkeys('obsolete')
1787 1787 if 'dump' in remoteobs:
1788 1788 if tr is None:
1789 1789 tr = self.transaction(trname)
1790 1790 data = base85.b85decode(remoteobs['dump'])
1791 1791 self.obsstore.mergemarkers(tr, data)
1792 1792 if tr is not None:
1793 1793 tr.close()
1794 1794 finally:
1795 1795 if tr is not None:
1796 1796 tr.release()
1797 1797 lock.release()
1798 1798
1799 1799 return result
1800 1800
1801 1801 def checkpush(self, force, revs):
1802 1802 """Extensions can override this function if additional checks have
1803 1803 to be performed before pushing, or call it if they override push
1804 1804 command.
1805 1805 """
1806 1806 pass
1807 1807
1808 1808 def push(self, remote, force=False, revs=None, newbranch=False):
1809 1809 '''Push outgoing changesets (limited by revs) from the current
1810 1810 repository to remote. Return an integer:
1811 1811 - None means nothing to push
1812 1812 - 0 means HTTP error
1813 1813 - 1 means we pushed and remote head count is unchanged *or*
1814 1814 we have outgoing changesets but refused to push
1815 1815 - other values as described by addchangegroup()
1816 1816 '''
1817 1817 # there are two ways to push to remote repo:
1818 1818 #
1819 1819 # addchangegroup assumes local user can lock remote
1820 1820 # repo (local filesystem, old ssh servers).
1821 1821 #
1822 1822 # unbundle assumes local user cannot lock remote repo (new ssh
1823 1823 # servers, http servers).
1824 1824
1825 1825 if not remote.canpush():
1826 1826 raise util.Abort(_("destination does not support push"))
1827 1827 # get local lock as we might write phase data
1828 1828 locallock = self.lock()
1829 1829 try:
1830 1830 self.checkpush(force, revs)
1831 1831 lock = None
1832 1832 unbundle = remote.capable('unbundle')
1833 1833 if not unbundle:
1834 1834 lock = remote.lock()
1835 1835 try:
1836 1836 # discovery
1837 1837 fci = discovery.findcommonincoming
1838 1838 commoninc = fci(self, remote, force=force)
1839 1839 common, inc, remoteheads = commoninc
1840 1840 fco = discovery.findcommonoutgoing
1841 1841 outgoing = fco(self, remote, onlyheads=revs,
1842 1842 commoninc=commoninc, force=force)
1843 1843
1844 1844
1845 1845 if not outgoing.missing:
1846 1846 # nothing to push
1847 1847 scmutil.nochangesfound(self.ui, self, outgoing.excluded)
1848 1848 ret = None
1849 1849 else:
1850 1850 # something to push
1851 1851 if not force:
1852 1852 # if self.obsstore == False --> no obsolete
1853 1853 # then, save the iteration
1854 1854 if self.obsstore:
1855 1855 # this message are here for 80 char limit reason
1856 1856 mso = _("push includes an obsolete changeset: %s!")
1857 1857 msu = _("push includes an unstable changeset: %s!")
1858 1858 # If we are to push if there is at least one
1859 1859 # obsolete or unstable changeset in missing, at
1860 1860 # least one of the missinghead will be obsolete or
1861 1861 # unstable. So checking heads only is ok
1862 1862 for node in outgoing.missingheads:
1863 1863 ctx = self[node]
1864 1864 if ctx.obsolete():
1865 1865 raise util.Abort(_(mso) % ctx)
1866 1866 elif ctx.unstable():
1867 1867 raise util.Abort(_(msu) % ctx)
1868 1868 discovery.checkheads(self, remote, outgoing,
1869 1869 remoteheads, newbranch,
1870 1870 bool(inc))
1871 1871
1872 1872 # create a changegroup from local
1873 1873 if revs is None and not outgoing.excluded:
1874 1874 # push everything,
1875 1875 # use the fast path, no race possible on push
1876 1876 cg = self._changegroup(outgoing.missing, 'push')
1877 1877 else:
1878 1878 cg = self.getlocalbundle('push', outgoing)
1879 1879
1880 1880 # apply changegroup to remote
1881 1881 if unbundle:
1882 1882 # local repo finds heads on server, finds out what
1883 1883 # revs it must push. once revs transferred, if server
1884 1884 # finds it has different heads (someone else won
1885 1885 # commit/push race), server aborts.
1886 1886 if force:
1887 1887 remoteheads = ['force']
1888 1888 # ssh: return remote's addchangegroup()
1889 1889 # http: return remote's addchangegroup() or 0 for error
1890 1890 ret = remote.unbundle(cg, remoteheads, 'push')
1891 1891 else:
1892 1892 # we return an integer indicating remote head count
1893 1893 # change
1894 1894 ret = remote.addchangegroup(cg, 'push', self.url())
1895 1895
1896 1896 if ret:
1897 1897 # push succeed, synchonize target of the push
1898 1898 cheads = outgoing.missingheads
1899 1899 elif revs is None:
1900 1900 # All out push fails. synchronize all common
1901 1901 cheads = outgoing.commonheads
1902 1902 else:
1903 1903 # I want cheads = heads(::missingheads and ::commonheads)
1904 1904 # (missingheads is revs with secret changeset filtered out)
1905 1905 #
1906 1906 # This can be expressed as:
1907 1907 # cheads = ( (missingheads and ::commonheads)
1908 1908 # + (commonheads and ::missingheads))"
1909 1909 # )
1910 1910 #
1911 1911 # while trying to push we already computed the following:
1912 1912 # common = (::commonheads)
1913 1913 # missing = ((commonheads::missingheads) - commonheads)
1914 1914 #
1915 1915 # We can pick:
1916 1916 # * missingheads part of comon (::commonheads)
1917 1917 common = set(outgoing.common)
1918 1918 cheads = [node for node in revs if node in common]
1919 1919 # and
1920 1920 # * commonheads parents on missing
1921 1921 revset = self.set('%ln and parents(roots(%ln))',
1922 1922 outgoing.commonheads,
1923 1923 outgoing.missing)
1924 1924 cheads.extend(c.node() for c in revset)
1925 1925 # even when we don't push, exchanging phase data is useful
1926 1926 remotephases = remote.listkeys('phases')
1927 1927 if not remotephases: # old server or public only repo
1928 1928 phases.advanceboundary(self, phases.public, cheads)
1929 1929 # don't push any phase data as there is nothing to push
1930 1930 else:
1931 1931 ana = phases.analyzeremotephases(self, cheads, remotephases)
1932 1932 pheads, droots = ana
1933 1933 ### Apply remote phase on local
1934 1934 if remotephases.get('publishing', False):
1935 1935 phases.advanceboundary(self, phases.public, cheads)
1936 1936 else: # publish = False
1937 1937 phases.advanceboundary(self, phases.public, pheads)
1938 1938 phases.advanceboundary(self, phases.draft, cheads)
1939 1939 ### Apply local phase on remote
1940 1940
1941 1941 # Get the list of all revs draft on remote by public here.
1942 1942 # XXX Beware that revset break if droots is not strictly
1943 1943 # XXX root we may want to ensure it is but it is costly
1944 1944 outdated = self.set('heads((%ln::%ln) and public())',
1945 1945 droots, cheads)
1946 1946 for newremotehead in outdated:
1947 1947 r = remote.pushkey('phases',
1948 1948 newremotehead.hex(),
1949 1949 str(phases.draft),
1950 1950 str(phases.public))
1951 1951 if not r:
1952 1952 self.ui.warn(_('updating %s to public failed!\n')
1953 1953 % newremotehead)
1954 if ('obsolete' in remote.listkeys('namespaces')
1955 and self.obsstore):
1954 if (self.obsstore and
1955 'obsolete' in remote.listkeys('namespaces')):
1956 1956 data = self.listkeys('obsolete')['dump']
1957 1957 r = remote.pushkey('obsolete', 'dump', '', data)
1958 1958 if not r:
1959 1959 self.ui.warn(_('failed to push obsolete markers!\n'))
1960 1960 finally:
1961 1961 if lock is not None:
1962 1962 lock.release()
1963 1963 finally:
1964 1964 locallock.release()
1965 1965
1966 1966 self.ui.debug("checking for updated bookmarks\n")
1967 1967 rb = remote.listkeys('bookmarks')
1968 1968 for k in rb.keys():
1969 1969 if k in self._bookmarks:
1970 1970 nr, nl = rb[k], hex(self._bookmarks[k])
1971 1971 if nr in self:
1972 1972 cr = self[nr]
1973 1973 cl = self[nl]
1974 1974 if cl in cr.descendants():
1975 1975 r = remote.pushkey('bookmarks', k, nr, nl)
1976 1976 if r:
1977 1977 self.ui.status(_("updating bookmark %s\n") % k)
1978 1978 else:
1979 1979 self.ui.warn(_('updating bookmark %s'
1980 1980 ' failed!\n') % k)
1981 1981
1982 1982 return ret
1983 1983
1984 1984 def changegroupinfo(self, nodes, source):
1985 1985 if self.ui.verbose or source == 'bundle':
1986 1986 self.ui.status(_("%d changesets found\n") % len(nodes))
1987 1987 if self.ui.debugflag:
1988 1988 self.ui.debug("list of changesets:\n")
1989 1989 for node in nodes:
1990 1990 self.ui.debug("%s\n" % hex(node))
1991 1991
1992 1992 def changegroupsubset(self, bases, heads, source):
1993 1993 """Compute a changegroup consisting of all the nodes that are
1994 1994 descendants of any of the bases and ancestors of any of the heads.
1995 1995 Return a chunkbuffer object whose read() method will return
1996 1996 successive changegroup chunks.
1997 1997
1998 1998 It is fairly complex as determining which filenodes and which
1999 1999 manifest nodes need to be included for the changeset to be complete
2000 2000 is non-trivial.
2001 2001
2002 2002 Another wrinkle is doing the reverse, figuring out which changeset in
2003 2003 the changegroup a particular filenode or manifestnode belongs to.
2004 2004 """
2005 2005 cl = self.changelog
2006 2006 if not bases:
2007 2007 bases = [nullid]
2008 2008 csets, bases, heads = cl.nodesbetween(bases, heads)
2009 2009 # We assume that all ancestors of bases are known
2010 2010 common = set(cl.ancestors([cl.rev(n) for n in bases]))
2011 2011 return self._changegroupsubset(common, csets, heads, source)
2012 2012
2013 2013 def getlocalbundle(self, source, outgoing):
2014 2014 """Like getbundle, but taking a discovery.outgoing as an argument.
2015 2015
2016 2016 This is only implemented for local repos and reuses potentially
2017 2017 precomputed sets in outgoing."""
2018 2018 if not outgoing.missing:
2019 2019 return None
2020 2020 return self._changegroupsubset(outgoing.common,
2021 2021 outgoing.missing,
2022 2022 outgoing.missingheads,
2023 2023 source)
2024 2024
2025 2025 def getbundle(self, source, heads=None, common=None):
2026 2026 """Like changegroupsubset, but returns the set difference between the
2027 2027 ancestors of heads and the ancestors common.
2028 2028
2029 2029 If heads is None, use the local heads. If common is None, use [nullid].
2030 2030
2031 2031 The nodes in common might not all be known locally due to the way the
2032 2032 current discovery protocol works.
2033 2033 """
2034 2034 cl = self.changelog
2035 2035 if common:
2036 2036 nm = cl.nodemap
2037 2037 common = [n for n in common if n in nm]
2038 2038 else:
2039 2039 common = [nullid]
2040 2040 if not heads:
2041 2041 heads = cl.heads()
2042 2042 return self.getlocalbundle(source,
2043 2043 discovery.outgoing(cl, common, heads))
2044 2044
2045 2045 def _changegroupsubset(self, commonrevs, csets, heads, source):
2046 2046
2047 2047 cl = self.changelog
2048 2048 mf = self.manifest
2049 2049 mfs = {} # needed manifests
2050 2050 fnodes = {} # needed file nodes
2051 2051 changedfiles = set()
2052 2052 fstate = ['', {}]
2053 2053 count = [0, 0]
2054 2054
2055 2055 # can we go through the fast path ?
2056 2056 heads.sort()
2057 2057 if heads == sorted(self.heads()):
2058 2058 return self._changegroup(csets, source)
2059 2059
2060 2060 # slow path
2061 2061 self.hook('preoutgoing', throw=True, source=source)
2062 2062 self.changegroupinfo(csets, source)
2063 2063
2064 2064 # filter any nodes that claim to be part of the known set
2065 2065 def prune(revlog, missing):
2066 2066 rr, rl = revlog.rev, revlog.linkrev
2067 2067 return [n for n in missing
2068 2068 if rl(rr(n)) not in commonrevs]
2069 2069
2070 2070 progress = self.ui.progress
2071 2071 _bundling = _('bundling')
2072 2072 _changesets = _('changesets')
2073 2073 _manifests = _('manifests')
2074 2074 _files = _('files')
2075 2075
2076 2076 def lookup(revlog, x):
2077 2077 if revlog == cl:
2078 2078 c = cl.read(x)
2079 2079 changedfiles.update(c[3])
2080 2080 mfs.setdefault(c[0], x)
2081 2081 count[0] += 1
2082 2082 progress(_bundling, count[0],
2083 2083 unit=_changesets, total=count[1])
2084 2084 return x
2085 2085 elif revlog == mf:
2086 2086 clnode = mfs[x]
2087 2087 mdata = mf.readfast(x)
2088 2088 for f, n in mdata.iteritems():
2089 2089 if f in changedfiles:
2090 2090 fnodes[f].setdefault(n, clnode)
2091 2091 count[0] += 1
2092 2092 progress(_bundling, count[0],
2093 2093 unit=_manifests, total=count[1])
2094 2094 return clnode
2095 2095 else:
2096 2096 progress(_bundling, count[0], item=fstate[0],
2097 2097 unit=_files, total=count[1])
2098 2098 return fstate[1][x]
2099 2099
2100 2100 bundler = changegroup.bundle10(lookup)
2101 2101 reorder = self.ui.config('bundle', 'reorder', 'auto')
2102 2102 if reorder == 'auto':
2103 2103 reorder = None
2104 2104 else:
2105 2105 reorder = util.parsebool(reorder)
2106 2106
2107 2107 def gengroup():
2108 2108 # Create a changenode group generator that will call our functions
2109 2109 # back to lookup the owning changenode and collect information.
2110 2110 count[:] = [0, len(csets)]
2111 2111 for chunk in cl.group(csets, bundler, reorder=reorder):
2112 2112 yield chunk
2113 2113 progress(_bundling, None)
2114 2114
2115 2115 # Create a generator for the manifestnodes that calls our lookup
2116 2116 # and data collection functions back.
2117 2117 for f in changedfiles:
2118 2118 fnodes[f] = {}
2119 2119 count[:] = [0, len(mfs)]
2120 2120 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
2121 2121 yield chunk
2122 2122 progress(_bundling, None)
2123 2123
2124 2124 mfs.clear()
2125 2125
2126 2126 # Go through all our files in order sorted by name.
2127 2127 count[:] = [0, len(changedfiles)]
2128 2128 for fname in sorted(changedfiles):
2129 2129 filerevlog = self.file(fname)
2130 2130 if not len(filerevlog):
2131 2131 raise util.Abort(_("empty or missing revlog for %s")
2132 2132 % fname)
2133 2133 fstate[0] = fname
2134 2134 fstate[1] = fnodes.pop(fname, {})
2135 2135
2136 2136 nodelist = prune(filerevlog, fstate[1])
2137 2137 if nodelist:
2138 2138 count[0] += 1
2139 2139 yield bundler.fileheader(fname)
2140 2140 for chunk in filerevlog.group(nodelist, bundler, reorder):
2141 2141 yield chunk
2142 2142
2143 2143 # Signal that no more groups are left.
2144 2144 yield bundler.close()
2145 2145 progress(_bundling, None)
2146 2146
2147 2147 if csets:
2148 2148 self.hook('outgoing', node=hex(csets[0]), source=source)
2149 2149
2150 2150 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2151 2151
2152 2152 def changegroup(self, basenodes, source):
2153 2153 # to avoid a race we use changegroupsubset() (issue1320)
2154 2154 return self.changegroupsubset(basenodes, self.heads(), source)
2155 2155
2156 2156 def _changegroup(self, nodes, source):
2157 2157 """Compute the changegroup of all nodes that we have that a recipient
2158 2158 doesn't. Return a chunkbuffer object whose read() method will return
2159 2159 successive changegroup chunks.
2160 2160
2161 2161 This is much easier than the previous function as we can assume that
2162 2162 the recipient has any changenode we aren't sending them.
2163 2163
2164 2164 nodes is the set of nodes to send"""
2165 2165
2166 2166 cl = self.changelog
2167 2167 mf = self.manifest
2168 2168 mfs = {}
2169 2169 changedfiles = set()
2170 2170 fstate = ['']
2171 2171 count = [0, 0]
2172 2172
2173 2173 self.hook('preoutgoing', throw=True, source=source)
2174 2174 self.changegroupinfo(nodes, source)
2175 2175
2176 2176 revset = set([cl.rev(n) for n in nodes])
2177 2177
2178 2178 def gennodelst(log):
2179 2179 ln, llr = log.node, log.linkrev
2180 2180 return [ln(r) for r in log if llr(r) in revset]
2181 2181
2182 2182 progress = self.ui.progress
2183 2183 _bundling = _('bundling')
2184 2184 _changesets = _('changesets')
2185 2185 _manifests = _('manifests')
2186 2186 _files = _('files')
2187 2187
2188 2188 def lookup(revlog, x):
2189 2189 if revlog == cl:
2190 2190 c = cl.read(x)
2191 2191 changedfiles.update(c[3])
2192 2192 mfs.setdefault(c[0], x)
2193 2193 count[0] += 1
2194 2194 progress(_bundling, count[0],
2195 2195 unit=_changesets, total=count[1])
2196 2196 return x
2197 2197 elif revlog == mf:
2198 2198 count[0] += 1
2199 2199 progress(_bundling, count[0],
2200 2200 unit=_manifests, total=count[1])
2201 2201 return cl.node(revlog.linkrev(revlog.rev(x)))
2202 2202 else:
2203 2203 progress(_bundling, count[0], item=fstate[0],
2204 2204 total=count[1], unit=_files)
2205 2205 return cl.node(revlog.linkrev(revlog.rev(x)))
2206 2206
2207 2207 bundler = changegroup.bundle10(lookup)
2208 2208 reorder = self.ui.config('bundle', 'reorder', 'auto')
2209 2209 if reorder == 'auto':
2210 2210 reorder = None
2211 2211 else:
2212 2212 reorder = util.parsebool(reorder)
2213 2213
2214 2214 def gengroup():
2215 2215 '''yield a sequence of changegroup chunks (strings)'''
2216 2216 # construct a list of all changed files
2217 2217
2218 2218 count[:] = [0, len(nodes)]
2219 2219 for chunk in cl.group(nodes, bundler, reorder=reorder):
2220 2220 yield chunk
2221 2221 progress(_bundling, None)
2222 2222
2223 2223 count[:] = [0, len(mfs)]
2224 2224 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2225 2225 yield chunk
2226 2226 progress(_bundling, None)
2227 2227
2228 2228 count[:] = [0, len(changedfiles)]
2229 2229 for fname in sorted(changedfiles):
2230 2230 filerevlog = self.file(fname)
2231 2231 if not len(filerevlog):
2232 2232 raise util.Abort(_("empty or missing revlog for %s")
2233 2233 % fname)
2234 2234 fstate[0] = fname
2235 2235 nodelist = gennodelst(filerevlog)
2236 2236 if nodelist:
2237 2237 count[0] += 1
2238 2238 yield bundler.fileheader(fname)
2239 2239 for chunk in filerevlog.group(nodelist, bundler, reorder):
2240 2240 yield chunk
2241 2241 yield bundler.close()
2242 2242 progress(_bundling, None)
2243 2243
2244 2244 if nodes:
2245 2245 self.hook('outgoing', node=hex(nodes[0]), source=source)
2246 2246
2247 2247 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2248 2248
2249 2249 def addchangegroup(self, source, srctype, url, emptyok=False):
2250 2250 """Add the changegroup returned by source.read() to this repo.
2251 2251 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2252 2252 the URL of the repo where this changegroup is coming from.
2253 2253
2254 2254 Return an integer summarizing the change to this repo:
2255 2255 - nothing changed or no source: 0
2256 2256 - more heads than before: 1+added heads (2..n)
2257 2257 - fewer heads than before: -1-removed heads (-2..-n)
2258 2258 - number of heads stays the same: 1
2259 2259 """
2260 2260 def csmap(x):
2261 2261 self.ui.debug("add changeset %s\n" % short(x))
2262 2262 return len(cl)
2263 2263
2264 2264 def revmap(x):
2265 2265 return cl.rev(x)
2266 2266
2267 2267 if not source:
2268 2268 return 0
2269 2269
2270 2270 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2271 2271
2272 2272 changesets = files = revisions = 0
2273 2273 efiles = set()
2274 2274
2275 2275 # write changelog data to temp files so concurrent readers will not see
2276 2276 # inconsistent view
2277 2277 cl = self.changelog
2278 2278 cl.delayupdate()
2279 2279 oldheads = cl.heads()
2280 2280
2281 2281 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2282 2282 try:
2283 2283 trp = weakref.proxy(tr)
2284 2284 # pull off the changeset group
2285 2285 self.ui.status(_("adding changesets\n"))
2286 2286 clstart = len(cl)
2287 2287 class prog(object):
2288 2288 step = _('changesets')
2289 2289 count = 1
2290 2290 ui = self.ui
2291 2291 total = None
2292 2292 def __call__(self):
2293 2293 self.ui.progress(self.step, self.count, unit=_('chunks'),
2294 2294 total=self.total)
2295 2295 self.count += 1
2296 2296 pr = prog()
2297 2297 source.callback = pr
2298 2298
2299 2299 source.changelogheader()
2300 2300 srccontent = cl.addgroup(source, csmap, trp)
2301 2301 if not (srccontent or emptyok):
2302 2302 raise util.Abort(_("received changelog group is empty"))
2303 2303 clend = len(cl)
2304 2304 changesets = clend - clstart
2305 2305 for c in xrange(clstart, clend):
2306 2306 efiles.update(self[c].files())
2307 2307 efiles = len(efiles)
2308 2308 self.ui.progress(_('changesets'), None)
2309 2309
2310 2310 # pull off the manifest group
2311 2311 self.ui.status(_("adding manifests\n"))
2312 2312 pr.step = _('manifests')
2313 2313 pr.count = 1
2314 2314 pr.total = changesets # manifests <= changesets
2315 2315 # no need to check for empty manifest group here:
2316 2316 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2317 2317 # no new manifest will be created and the manifest group will
2318 2318 # be empty during the pull
2319 2319 source.manifestheader()
2320 2320 self.manifest.addgroup(source, revmap, trp)
2321 2321 self.ui.progress(_('manifests'), None)
2322 2322
2323 2323 needfiles = {}
2324 2324 if self.ui.configbool('server', 'validate', default=False):
2325 2325 # validate incoming csets have their manifests
2326 2326 for cset in xrange(clstart, clend):
2327 2327 mfest = self.changelog.read(self.changelog.node(cset))[0]
2328 2328 mfest = self.manifest.readdelta(mfest)
2329 2329 # store file nodes we must see
2330 2330 for f, n in mfest.iteritems():
2331 2331 needfiles.setdefault(f, set()).add(n)
2332 2332
2333 2333 # process the files
2334 2334 self.ui.status(_("adding file changes\n"))
2335 2335 pr.step = _('files')
2336 2336 pr.count = 1
2337 2337 pr.total = efiles
2338 2338 source.callback = None
2339 2339
2340 2340 while True:
2341 2341 chunkdata = source.filelogheader()
2342 2342 if not chunkdata:
2343 2343 break
2344 2344 f = chunkdata["filename"]
2345 2345 self.ui.debug("adding %s revisions\n" % f)
2346 2346 pr()
2347 2347 fl = self.file(f)
2348 2348 o = len(fl)
2349 2349 if not fl.addgroup(source, revmap, trp):
2350 2350 raise util.Abort(_("received file revlog group is empty"))
2351 2351 revisions += len(fl) - o
2352 2352 files += 1
2353 2353 if f in needfiles:
2354 2354 needs = needfiles[f]
2355 2355 for new in xrange(o, len(fl)):
2356 2356 n = fl.node(new)
2357 2357 if n in needs:
2358 2358 needs.remove(n)
2359 2359 if not needs:
2360 2360 del needfiles[f]
2361 2361 self.ui.progress(_('files'), None)
2362 2362
2363 2363 for f, needs in needfiles.iteritems():
2364 2364 fl = self.file(f)
2365 2365 for n in needs:
2366 2366 try:
2367 2367 fl.rev(n)
2368 2368 except error.LookupError:
2369 2369 raise util.Abort(
2370 2370 _('missing file data for %s:%s - run hg verify') %
2371 2371 (f, hex(n)))
2372 2372
2373 2373 dh = 0
2374 2374 if oldheads:
2375 2375 heads = cl.heads()
2376 2376 dh = len(heads) - len(oldheads)
2377 2377 for h in heads:
2378 2378 if h not in oldheads and self[h].closesbranch():
2379 2379 dh -= 1
2380 2380 htext = ""
2381 2381 if dh:
2382 2382 htext = _(" (%+d heads)") % dh
2383 2383
2384 2384 self.ui.status(_("added %d changesets"
2385 2385 " with %d changes to %d files%s\n")
2386 2386 % (changesets, revisions, files, htext))
2387 2387
2388 2388 if changesets > 0:
2389 2389 p = lambda: cl.writepending() and self.root or ""
2390 2390 self.hook('pretxnchangegroup', throw=True,
2391 2391 node=hex(cl.node(clstart)), source=srctype,
2392 2392 url=url, pending=p)
2393 2393
2394 2394 added = [cl.node(r) for r in xrange(clstart, clend)]
2395 2395 publishing = self.ui.configbool('phases', 'publish', True)
2396 2396 if srctype == 'push':
2397 2397 # Old server can not push the boundary themself.
2398 2398 # New server won't push the boundary if changeset already
2399 2399 # existed locally as secrete
2400 2400 #
2401 2401 # We should not use added here but the list of all change in
2402 2402 # the bundle
2403 2403 if publishing:
2404 2404 phases.advanceboundary(self, phases.public, srccontent)
2405 2405 else:
2406 2406 phases.advanceboundary(self, phases.draft, srccontent)
2407 2407 phases.retractboundary(self, phases.draft, added)
2408 2408 elif srctype != 'strip':
2409 2409 # publishing only alter behavior during push
2410 2410 #
2411 2411 # strip should not touch boundary at all
2412 2412 phases.retractboundary(self, phases.draft, added)
2413 2413
2414 2414 # make changelog see real files again
2415 2415 cl.finalize(trp)
2416 2416
2417 2417 tr.close()
2418 2418
2419 2419 if changesets > 0:
2420 2420 def runhooks():
2421 2421 # forcefully update the on-disk branch cache
2422 2422 self.ui.debug("updating the branch cache\n")
2423 2423 self.updatebranchcache()
2424 2424 self.hook("changegroup", node=hex(cl.node(clstart)),
2425 2425 source=srctype, url=url)
2426 2426
2427 2427 for n in added:
2428 2428 self.hook("incoming", node=hex(n), source=srctype,
2429 2429 url=url)
2430 2430 self._afterlock(runhooks)
2431 2431
2432 2432 finally:
2433 2433 tr.release()
2434 2434 # never return 0 here:
2435 2435 if dh < 0:
2436 2436 return dh - 1
2437 2437 else:
2438 2438 return dh + 1
2439 2439
2440 2440 def stream_in(self, remote, requirements):
2441 2441 lock = self.lock()
2442 2442 try:
2443 2443 fp = remote.stream_out()
2444 2444 l = fp.readline()
2445 2445 try:
2446 2446 resp = int(l)
2447 2447 except ValueError:
2448 2448 raise error.ResponseError(
2449 2449 _('unexpected response from remote server:'), l)
2450 2450 if resp == 1:
2451 2451 raise util.Abort(_('operation forbidden by server'))
2452 2452 elif resp == 2:
2453 2453 raise util.Abort(_('locking the remote repository failed'))
2454 2454 elif resp != 0:
2455 2455 raise util.Abort(_('the server sent an unknown error code'))
2456 2456 self.ui.status(_('streaming all changes\n'))
2457 2457 l = fp.readline()
2458 2458 try:
2459 2459 total_files, total_bytes = map(int, l.split(' ', 1))
2460 2460 except (ValueError, TypeError):
2461 2461 raise error.ResponseError(
2462 2462 _('unexpected response from remote server:'), l)
2463 2463 self.ui.status(_('%d files to transfer, %s of data\n') %
2464 2464 (total_files, util.bytecount(total_bytes)))
2465 2465 handled_bytes = 0
2466 2466 self.ui.progress(_('clone'), 0, total=total_bytes)
2467 2467 start = time.time()
2468 2468 for i in xrange(total_files):
2469 2469 # XXX doesn't support '\n' or '\r' in filenames
2470 2470 l = fp.readline()
2471 2471 try:
2472 2472 name, size = l.split('\0', 1)
2473 2473 size = int(size)
2474 2474 except (ValueError, TypeError):
2475 2475 raise error.ResponseError(
2476 2476 _('unexpected response from remote server:'), l)
2477 2477 if self.ui.debugflag:
2478 2478 self.ui.debug('adding %s (%s)\n' %
2479 2479 (name, util.bytecount(size)))
2480 2480 # for backwards compat, name was partially encoded
2481 2481 ofp = self.sopener(store.decodedir(name), 'w')
2482 2482 for chunk in util.filechunkiter(fp, limit=size):
2483 2483 handled_bytes += len(chunk)
2484 2484 self.ui.progress(_('clone'), handled_bytes,
2485 2485 total=total_bytes)
2486 2486 ofp.write(chunk)
2487 2487 ofp.close()
2488 2488 elapsed = time.time() - start
2489 2489 if elapsed <= 0:
2490 2490 elapsed = 0.001
2491 2491 self.ui.progress(_('clone'), None)
2492 2492 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2493 2493 (util.bytecount(total_bytes), elapsed,
2494 2494 util.bytecount(total_bytes / elapsed)))
2495 2495
2496 2496 # new requirements = old non-format requirements +
2497 2497 # new format-related
2498 2498 # requirements from the streamed-in repository
2499 2499 requirements.update(set(self.requirements) - self.supportedformats)
2500 2500 self._applyrequirements(requirements)
2501 2501 self._writerequirements()
2502 2502
2503 2503 self.invalidate()
2504 2504 return len(self.heads()) + 1
2505 2505 finally:
2506 2506 lock.release()
2507 2507
2508 2508 def clone(self, remote, heads=[], stream=False):
2509 2509 '''clone remote repository.
2510 2510
2511 2511 keyword arguments:
2512 2512 heads: list of revs to clone (forces use of pull)
2513 2513 stream: use streaming clone if possible'''
2514 2514
2515 2515 # now, all clients that can request uncompressed clones can
2516 2516 # read repo formats supported by all servers that can serve
2517 2517 # them.
2518 2518
2519 2519 # if revlog format changes, client will have to check version
2520 2520 # and format flags on "stream" capability, and use
2521 2521 # uncompressed only if compatible.
2522 2522
2523 2523 if not stream:
2524 2524 # if the server explicitely prefer to stream (for fast LANs)
2525 2525 stream = remote.capable('stream-preferred')
2526 2526
2527 2527 if stream and not heads:
2528 2528 # 'stream' means remote revlog format is revlogv1 only
2529 2529 if remote.capable('stream'):
2530 2530 return self.stream_in(remote, set(('revlogv1',)))
2531 2531 # otherwise, 'streamreqs' contains the remote revlog format
2532 2532 streamreqs = remote.capable('streamreqs')
2533 2533 if streamreqs:
2534 2534 streamreqs = set(streamreqs.split(','))
2535 2535 # if we support it, stream in and adjust our requirements
2536 2536 if not streamreqs - self.supportedformats:
2537 2537 return self.stream_in(remote, streamreqs)
2538 2538 return self.pull(remote, heads)
2539 2539
2540 2540 def pushkey(self, namespace, key, old, new):
2541 2541 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2542 2542 old=old, new=new)
2543 2543 ret = pushkey.push(self, namespace, key, old, new)
2544 2544 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2545 2545 ret=ret)
2546 2546 return ret
2547 2547
2548 2548 def listkeys(self, namespace):
2549 2549 self.hook('prelistkeys', throw=True, namespace=namespace)
2550 2550 values = pushkey.list(self, namespace)
2551 2551 self.hook('listkeys', namespace=namespace, values=values)
2552 2552 return values
2553 2553
2554 2554 def debugwireargs(self, one, two, three=None, four=None, five=None):
2555 2555 '''used to test argument passing over the wire'''
2556 2556 return "%s %s %s %s %s" % (one, two, three, four, five)
2557 2557
2558 2558 def savecommitmessage(self, text):
2559 2559 fp = self.opener('last-message.txt', 'wb')
2560 2560 try:
2561 2561 fp.write(text)
2562 2562 finally:
2563 2563 fp.close()
2564 2564 return self.pathto(fp.name[len(self.root)+1:])
2565 2565
2566 2566 # used to avoid circular references so destructors work
2567 2567 def aftertrans(files):
2568 2568 renamefiles = [tuple(t) for t in files]
2569 2569 def a():
2570 2570 for src, dest in renamefiles:
2571 2571 try:
2572 2572 util.rename(src, dest)
2573 2573 except OSError: # journal file does not yet exist
2574 2574 pass
2575 2575 return a
2576 2576
2577 2577 def undoname(fn):
2578 2578 base, name = os.path.split(fn)
2579 2579 assert name.startswith('journal')
2580 2580 return os.path.join(base, name.replace('journal', 'undo', 1))
2581 2581
2582 2582 def instance(ui, path, create):
2583 2583 return localrepository(ui, util.urllocalpath(path), create)
2584 2584
2585 2585 def islocal(path):
2586 2586 return True
@@ -1,646 +1,645
1 1 commit hooks can see env vars
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ cat > .hg/hgrc <<EOF
6 6 > [hooks]
7 7 > commit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit"
8 8 > commit.b = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit.b"
9 9 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= python \"$TESTDIR/printenv.py\" precommit"
10 10 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxncommit"
11 11 > pretxncommit.tip = hg -q tip
12 12 > pre-identify = python "$TESTDIR/printenv.py" pre-identify 1
13 13 > pre-cat = python "$TESTDIR/printenv.py" pre-cat
14 14 > post-cat = python "$TESTDIR/printenv.py" post-cat
15 15 > EOF
16 16 $ echo a > a
17 17 $ hg add a
18 18 $ hg commit -m a
19 19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
20 20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
21 21 0:cb9a9f314b8b
22 22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
24 24
25 25 $ hg clone . ../b
26 26 updating to branch default
27 27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 $ cd ../b
29 29
30 30 changegroup hooks can see env vars
31 31
32 32 $ cat > .hg/hgrc <<EOF
33 33 > [hooks]
34 34 > prechangegroup = python "$TESTDIR/printenv.py" prechangegroup
35 35 > changegroup = python "$TESTDIR/printenv.py" changegroup
36 36 > incoming = python "$TESTDIR/printenv.py" incoming
37 37 > EOF
38 38
39 39 pretxncommit and commit hooks can see both parents of merge
40 40
41 41 $ cd ../a
42 42 $ echo b >> a
43 43 $ hg commit -m a1 -d "1 0"
44 44 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
45 45 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
46 46 1:ab228980c14d
47 47 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
48 48 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
49 49 $ hg update -C 0
50 50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 51 $ echo b > b
52 52 $ hg add b
53 53 $ hg commit -m b -d '1 0'
54 54 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
55 55 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
56 56 2:ee9deb46ab31
57 57 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
58 58 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
59 59 created new head
60 60 $ hg merge 1
61 61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 62 (branch merge, don't forget to commit)
63 63 $ hg commit -m merge -d '2 0'
64 64 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
65 65 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
66 66 3:07f3376c1e65
67 67 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
68 68 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
69 69
70 70 test generic hooks
71 71
72 72 $ hg id
73 73 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
74 74 warning: pre-identify hook exited with status 1
75 75 [1]
76 76 $ hg cat b
77 77 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
78 78 b
79 79 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
80 80
81 81 $ cd ../b
82 82 $ hg pull ../a
83 83 pulling from ../a
84 84 searching for changes
85 85 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 3 changesets with 2 changes to 2 files
90 90 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 91 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
92 92 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
93 93 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
94 94 (run 'hg update' to get a working copy)
95 95
96 96 tag hooks can see env vars
97 97
98 98 $ cd ../a
99 99 $ cat >> .hg/hgrc <<EOF
100 100 > pretag = python "$TESTDIR/printenv.py" pretag
101 101 > tag = sh -c "HG_PARENT1= HG_PARENT2= python \"$TESTDIR/printenv.py\" tag"
102 102 > EOF
103 103 $ hg tag -d '3 0' a
104 104 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
105 105 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
106 106 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
107 107 4:539e4b31b6dc
108 108 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
109 109 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
110 110 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
111 111 $ hg tag -l la
112 112 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
113 113 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
114 114
115 115 pretag hook can forbid tagging
116 116
117 117 $ echo "pretag.forbid = python \"$TESTDIR/printenv.py\" pretag.forbid 1" >> .hg/hgrc
118 118 $ hg tag -d '4 0' fa
119 119 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
120 120 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
121 121 abort: pretag.forbid hook exited with status 1
122 122 [255]
123 123 $ hg tag -l fla
124 124 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
125 125 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
126 126 abort: pretag.forbid hook exited with status 1
127 127 [255]
128 128
129 129 pretxncommit hook can see changeset, can roll back txn, changeset no
130 130 more there after
131 131
132 132 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
133 133 $ echo "pretxncommit.forbid1 = python \"$TESTDIR/printenv.py\" pretxncommit.forbid 1" >> .hg/hgrc
134 134 $ echo z > z
135 135 $ hg add z
136 136 $ hg -q tip
137 137 4:539e4b31b6dc
138 138 $ hg commit -m 'fail' -d '4 0'
139 139 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
140 140 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
141 141 5:6f611f8018c1
142 142 5:6f611f8018c1
143 143 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
144 144 transaction abort!
145 145 rollback completed
146 146 abort: pretxncommit.forbid1 hook exited with status 1
147 147 [255]
148 148 $ hg -q tip
149 149 4:539e4b31b6dc
150 150
151 151 precommit hook can prevent commit
152 152
153 153 $ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
154 154 $ hg commit -m 'fail' -d '4 0'
155 155 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
156 156 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
157 157 abort: precommit.forbid hook exited with status 1
158 158 [255]
159 159 $ hg -q tip
160 160 4:539e4b31b6dc
161 161
162 162 preupdate hook can prevent update
163 163
164 164 $ echo "preupdate = python \"$TESTDIR/printenv.py\" preupdate" >> .hg/hgrc
165 165 $ hg update 1
166 166 preupdate hook: HG_PARENT1=ab228980c14d
167 167 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
168 168
169 169 update hook
170 170
171 171 $ echo "update = python \"$TESTDIR/printenv.py\" update" >> .hg/hgrc
172 172 $ hg update
173 173 preupdate hook: HG_PARENT1=539e4b31b6dc
174 174 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
175 175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 176
177 177 pushkey hook
178 178
179 179 $ echo "pushkey = python \"$TESTDIR/printenv.py\" pushkey" >> .hg/hgrc
180 180 $ cd ../b
181 181 $ hg bookmark -r null foo
182 182 $ hg push -B foo ../a
183 183 pushing to ../a
184 184 searching for changes
185 185 no changes found
186 186 exporting bookmark foo
187 187 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
188 188 [1]
189 189 $ cd ../a
190 190
191 191 listkeys hook
192 192
193 193 $ echo "listkeys = python \"$TESTDIR/printenv.py\" listkeys" >> .hg/hgrc
194 194 $ hg bookmark -r null bar
195 195 $ cd ../b
196 196 $ hg pull -B bar ../a
197 197 pulling from ../a
198 198 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
199 199 no changes found
200 200 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
201 201 listkeys hook: HG_NAMESPACE=obsolete HG_VALUES={}
202 202 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
203 203 adding remote bookmark bar
204 204 importing bookmark bar
205 205 $ cd ../a
206 206
207 207 test that prepushkey can prevent incoming keys
208 208
209 209 $ echo "prepushkey = python \"$TESTDIR/printenv.py\" prepushkey.forbid 1" >> .hg/hgrc
210 210 $ cd ../b
211 211 $ hg bookmark -r null baz
212 212 $ hg push -B baz ../a
213 213 pushing to ../a
214 214 searching for changes
215 215 no changes found
216 216 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
217 listkeys hook: HG_NAMESPACE=namespaces HG_VALUES={'bookmarks': '', 'namespaces': '', 'obsolete': '', 'phases': ''}
218 217 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
219 218 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
220 219 exporting bookmark baz
221 220 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
222 221 abort: prepushkey hook exited with status 1
223 222 [255]
224 223 $ cd ../a
225 224
226 225 test that prelistkeys can prevent listing keys
227 226
228 227 $ echo "prelistkeys = python \"$TESTDIR/printenv.py\" prelistkeys.forbid 1" >> .hg/hgrc
229 228 $ hg bookmark -r null quux
230 229 $ cd ../b
231 230 $ hg pull -B quux ../a
232 231 pulling from ../a
233 232 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
234 233 abort: prelistkeys hook exited with status 1
235 234 [255]
236 235 $ cd ../a
237 236
238 237 prechangegroup hook can prevent incoming changes
239 238
240 239 $ cd ../b
241 240 $ hg -q tip
242 241 3:07f3376c1e65
243 242 $ cat > .hg/hgrc <<EOF
244 243 > [hooks]
245 244 > prechangegroup.forbid = python "$TESTDIR/printenv.py" prechangegroup.forbid 1
246 245 > EOF
247 246 $ hg pull ../a
248 247 pulling from ../a
249 248 searching for changes
250 249 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
251 250 abort: prechangegroup.forbid hook exited with status 1
252 251 [255]
253 252
254 253 pretxnchangegroup hook can see incoming changes, can roll back txn,
255 254 incoming changes no longer there after
256 255
257 256 $ cat > .hg/hgrc <<EOF
258 257 > [hooks]
259 258 > pretxnchangegroup.forbid0 = hg tip -q
260 259 > pretxnchangegroup.forbid1 = python "$TESTDIR/printenv.py" pretxnchangegroup.forbid 1
261 260 > EOF
262 261 $ hg pull ../a
263 262 pulling from ../a
264 263 searching for changes
265 264 adding changesets
266 265 adding manifests
267 266 adding file changes
268 267 added 1 changesets with 1 changes to 1 files
269 268 4:539e4b31b6dc
270 269 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
271 270 transaction abort!
272 271 rollback completed
273 272 abort: pretxnchangegroup.forbid1 hook exited with status 1
274 273 [255]
275 274 $ hg -q tip
276 275 3:07f3376c1e65
277 276
278 277 outgoing hooks can see env vars
279 278
280 279 $ rm .hg/hgrc
281 280 $ cat > ../a/.hg/hgrc <<EOF
282 281 > [hooks]
283 282 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
284 283 > outgoing = python "$TESTDIR/printenv.py" outgoing
285 284 > EOF
286 285 $ hg pull ../a
287 286 pulling from ../a
288 287 searching for changes
289 288 preoutgoing hook: HG_SOURCE=pull
290 289 adding changesets
291 290 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
292 291 adding manifests
293 292 adding file changes
294 293 added 1 changesets with 1 changes to 1 files
295 294 adding remote bookmark quux
296 295 (run 'hg update' to get a working copy)
297 296 $ hg rollback
298 297 repository tip rolled back to revision 3 (undo pull)
299 298
300 299 preoutgoing hook can prevent outgoing changes
301 300
302 301 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> ../a/.hg/hgrc
303 302 $ hg pull ../a
304 303 pulling from ../a
305 304 searching for changes
306 305 preoutgoing hook: HG_SOURCE=pull
307 306 preoutgoing.forbid hook: HG_SOURCE=pull
308 307 abort: preoutgoing.forbid hook exited with status 1
309 308 [255]
310 309
311 310 outgoing hooks work for local clones
312 311
313 312 $ cd ..
314 313 $ cat > a/.hg/hgrc <<EOF
315 314 > [hooks]
316 315 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
317 316 > outgoing = python "$TESTDIR/printenv.py" outgoing
318 317 > EOF
319 318 $ hg clone a c
320 319 preoutgoing hook: HG_SOURCE=clone
321 320 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
322 321 updating to branch default
323 322 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
324 323 $ rm -rf c
325 324
326 325 preoutgoing hook can prevent outgoing changes for local clones
327 326
328 327 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> a/.hg/hgrc
329 328 $ hg clone a zzz
330 329 preoutgoing hook: HG_SOURCE=clone
331 330 preoutgoing.forbid hook: HG_SOURCE=clone
332 331 abort: preoutgoing.forbid hook exited with status 1
333 332 [255]
334 333
335 334 $ cd "$TESTTMP/b"
336 335
337 336 $ cat > hooktests.py <<EOF
338 337 > from mercurial import util
339 338 >
340 339 > uncallable = 0
341 340 >
342 341 > def printargs(args):
343 342 > args.pop('ui', None)
344 343 > args.pop('repo', None)
345 344 > a = list(args.items())
346 345 > a.sort()
347 346 > print 'hook args:'
348 347 > for k, v in a:
349 348 > print ' ', k, v
350 349 >
351 350 > def passhook(**args):
352 351 > printargs(args)
353 352 >
354 353 > def failhook(**args):
355 354 > printargs(args)
356 355 > return True
357 356 >
358 357 > class LocalException(Exception):
359 358 > pass
360 359 >
361 360 > def raisehook(**args):
362 361 > raise LocalException('exception from hook')
363 362 >
364 363 > def aborthook(**args):
365 364 > raise util.Abort('raise abort from hook')
366 365 >
367 366 > def brokenhook(**args):
368 367 > return 1 + {}
369 368 >
370 369 > def verbosehook(ui, **args):
371 370 > ui.note('verbose output from hook\n')
372 371 >
373 372 > def printtags(ui, repo, **args):
374 373 > print repo.tags().keys()
375 374 >
376 375 > class container:
377 376 > unreachable = 1
378 377 > EOF
379 378
380 379 test python hooks
381 380
382 381 #if windows
383 382 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
384 383 #else
385 384 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
386 385 #endif
387 386 $ export PYTHONPATH
388 387
389 388 $ echo '[hooks]' > ../a/.hg/hgrc
390 389 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
391 390 $ hg pull ../a 2>&1 | grep 'raised an exception'
392 391 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
393 392
394 393 $ echo '[hooks]' > ../a/.hg/hgrc
395 394 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
396 395 $ hg pull ../a 2>&1 | grep 'raised an exception'
397 396 error: preoutgoing.raise hook raised an exception: exception from hook
398 397
399 398 $ echo '[hooks]' > ../a/.hg/hgrc
400 399 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
401 400 $ hg pull ../a
402 401 pulling from ../a
403 402 searching for changes
404 403 error: preoutgoing.abort hook failed: raise abort from hook
405 404 abort: raise abort from hook
406 405 [255]
407 406
408 407 $ echo '[hooks]' > ../a/.hg/hgrc
409 408 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
410 409 $ hg pull ../a
411 410 pulling from ../a
412 411 searching for changes
413 412 hook args:
414 413 hooktype preoutgoing
415 414 source pull
416 415 abort: preoutgoing.fail hook failed
417 416 [255]
418 417
419 418 $ echo '[hooks]' > ../a/.hg/hgrc
420 419 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
421 420 $ hg pull ../a
422 421 pulling from ../a
423 422 searching for changes
424 423 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
425 424 [255]
426 425
427 426 $ echo '[hooks]' > ../a/.hg/hgrc
428 427 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
429 428 $ hg pull ../a
430 429 pulling from ../a
431 430 searching for changes
432 431 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
433 432 [255]
434 433
435 434 $ echo '[hooks]' > ../a/.hg/hgrc
436 435 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
437 436 $ hg pull ../a
438 437 pulling from ../a
439 438 searching for changes
440 439 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
441 440 [255]
442 441
443 442 $ echo '[hooks]' > ../a/.hg/hgrc
444 443 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
445 444 $ hg pull ../a
446 445 pulling from ../a
447 446 searching for changes
448 447 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
449 448 [255]
450 449
451 450 $ echo '[hooks]' > ../a/.hg/hgrc
452 451 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
453 452 $ hg pull ../a
454 453 pulling from ../a
455 454 searching for changes
456 455 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
457 456 [255]
458 457
459 458 $ echo '[hooks]' > ../a/.hg/hgrc
460 459 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
461 460 $ hg pull ../a
462 461 pulling from ../a
463 462 searching for changes
464 463 hook args:
465 464 hooktype preoutgoing
466 465 source pull
467 466 adding changesets
468 467 adding manifests
469 468 adding file changes
470 469 added 1 changesets with 1 changes to 1 files
471 470 adding remote bookmark quux
472 471 (run 'hg update' to get a working copy)
473 472
474 473 make sure --traceback works
475 474
476 475 $ echo '[hooks]' > .hg/hgrc
477 476 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
478 477
479 478 $ echo aa > a
480 479 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
481 480 Traceback (most recent call last):
482 481
483 482 $ cd ..
484 483 $ hg init c
485 484 $ cd c
486 485
487 486 $ cat > hookext.py <<EOF
488 487 > def autohook(**args):
489 488 > print "Automatically installed hook"
490 489 >
491 490 > def reposetup(ui, repo):
492 491 > repo.ui.setconfig("hooks", "commit.auto", autohook)
493 492 > EOF
494 493 $ echo '[extensions]' >> .hg/hgrc
495 494 $ echo 'hookext = hookext.py' >> .hg/hgrc
496 495
497 496 $ touch foo
498 497 $ hg add foo
499 498 $ hg ci -d '0 0' -m 'add foo'
500 499 Automatically installed hook
501 500 $ echo >> foo
502 501 $ hg ci --debug -d '0 0' -m 'change foo'
503 502 foo
504 503 calling hook commit.auto: <function autohook at *> (glob)
505 504 Automatically installed hook
506 505 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
507 506
508 507 $ hg showconfig hooks
509 508 hooks.commit.auto=<function autohook at *> (glob)
510 509
511 510 test python hook configured with python:[file]:[hook] syntax
512 511
513 512 $ cd ..
514 513 $ mkdir d
515 514 $ cd d
516 515 $ hg init repo
517 516 $ mkdir hooks
518 517
519 518 $ cd hooks
520 519 $ cat > testhooks.py <<EOF
521 520 > def testhook(**args):
522 521 > print 'hook works'
523 522 > EOF
524 523 $ echo '[hooks]' > ../repo/.hg/hgrc
525 524 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
526 525
527 526 $ cd ../repo
528 527 $ hg commit -d '0 0'
529 528 hook works
530 529 nothing changed
531 530 [1]
532 531
533 532 $ echo '[hooks]' > .hg/hgrc
534 533 $ echo "update.ne = python:`pwd`/nonexisting.py:testhook" >> .hg/hgrc
535 534 $ echo "pre-identify.npmd = python:`pwd`/:no_python_module_dir" >> .hg/hgrc
536 535
537 536 $ hg up null
538 537 loading update.ne hook failed:
539 538 abort: No such file or directory: $TESTTMP/d/repo/nonexisting.py
540 539 [255]
541 540
542 541 $ hg id
543 542 loading pre-identify.npmd hook failed:
544 543 abort: No module named repo!
545 544 [255]
546 545
547 546 $ cd ../../b
548 547
549 548 make sure --traceback works on hook import failure
550 549
551 550 $ cat > importfail.py <<EOF
552 551 > import somebogusmodule
553 552 > # dereference something in the module to force demandimport to load it
554 553 > somebogusmodule.whatever
555 554 > EOF
556 555
557 556 $ echo '[hooks]' > .hg/hgrc
558 557 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
559 558
560 559 $ echo a >> a
561 560 $ hg --traceback commit -ma 2>&1 | egrep -v '^( +File| {4}[a-zA-Z(])'
562 561 exception from first failed import attempt:
563 562 Traceback (most recent call last):
564 563 ImportError: No module named somebogusmodule
565 564 exception from second failed import attempt:
566 565 Traceback (most recent call last):
567 566 ImportError: No module named hgext_importfail
568 567 Traceback (most recent call last):
569 568 Abort: precommit.importfail hook is invalid (import of "importfail" failed)
570 569 abort: precommit.importfail hook is invalid (import of "importfail" failed)
571 570
572 571 Issue1827: Hooks Update & Commit not completely post operation
573 572
574 573 commit and update hooks should run after command completion
575 574
576 575 $ echo '[hooks]' > .hg/hgrc
577 576 $ echo 'commit = hg id' >> .hg/hgrc
578 577 $ echo 'update = hg id' >> .hg/hgrc
579 578 $ echo bb > a
580 579 $ hg ci -ma
581 580 223eafe2750c tip
582 581 $ hg up 0
583 582 cb9a9f314b8b
584 583 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
585 584
586 585 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
587 586 that is passed to pre/post hooks
588 587
589 588 $ echo '[hooks]' > .hg/hgrc
590 589 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
591 590 $ hg id
592 591 cb9a9f314b8b
593 592 $ hg id --verbose
594 593 calling hook pre-identify: hooktests.verbosehook
595 594 verbose output from hook
596 595 cb9a9f314b8b
597 596
598 597 Ensure hooks can be prioritized
599 598
600 599 $ echo '[hooks]' > .hg/hgrc
601 600 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
602 601 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
603 602 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
604 603 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
605 604 $ hg id --verbose
606 605 calling hook pre-identify.b: hooktests.verbosehook
607 606 verbose output from hook
608 607 calling hook pre-identify.a: hooktests.verbosehook
609 608 verbose output from hook
610 609 calling hook pre-identify.c: hooktests.verbosehook
611 610 verbose output from hook
612 611 cb9a9f314b8b
613 612
614 613 new tags must be visible in pretxncommit (issue3210)
615 614
616 615 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
617 616 $ hg tag -f foo
618 617 ['a', 'foo', 'tip']
619 618
620 619 new commits must be visible in pretxnchangegroup (issue3428)
621 620
622 621 $ cd ..
623 622 $ hg init to
624 623 $ echo '[hooks]' >> to/.hg/hgrc
625 624 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
626 625 $ echo a >> to/a
627 626 $ hg --cwd to ci -Ama
628 627 adding a
629 628 $ hg clone to from
630 629 updating to branch default
631 630 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
632 631 $ echo aa >> from/a
633 632 $ hg --cwd from ci -mb
634 633 $ hg --cwd from push
635 634 pushing to $TESTTMP/to (glob)
636 635 searching for changes
637 636 adding changesets
638 637 adding manifests
639 638 adding file changes
640 639 added 1 changesets with 1 changes to 1 files
641 640 changeset: 1:9836a07b9b9d
642 641 tag: tip
643 642 user: test
644 643 date: Thu Jan 01 00:00:00 1970 +0000
645 644 summary: b
646 645
@@ -1,453 +1,473
1 1 $ cat >> $HGRCPATH << EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > [phases]
5 5 > # public changeset are not obsolete
6 6 > publish=false
7 7 > EOF
8 8 $ mkcommit() {
9 9 > echo "$1" > "$1"
10 10 > hg add "$1"
11 11 > hg ci -m "add $1"
12 12 > }
13 13 $ getid() {
14 14 > hg id --debug -ir "desc('$1')"
15 15 > }
16 16
17 $ cat > debugkeys.py <<EOF
18 > def reposetup(ui, repo):
19 > class debugkeysrepo(repo.__class__):
20 > def listkeys(self, namespace):
21 > ui.write('listkeys %s\n' % (namespace,))
22 > return super(debugkeysrepo, self).listkeys(namespace)
23 >
24 > if repo.local():
25 > repo.__class__ = debugkeysrepo
26 > EOF
17 27
18 28 $ hg init tmpa
19 29 $ cd tmpa
20 30
21 31 Killing a single changeset without replacement
22 32
23 33 $ mkcommit kill_me
24 34 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
25 35 $ hg debugobsolete
26 36 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
27 37 $ cd ..
28 38
29 39 Killing a single changeset with replacement
30 40
31 41 $ hg init tmpb
32 42 $ cd tmpb
33 43 $ mkcommit a
34 44 $ mkcommit b
35 45 $ mkcommit original_c
36 46 $ hg up "desc('b')"
37 47 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
38 48 $ mkcommit new_c
39 49 created new head
40 50 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
41 51 $ hg debugobsolete
42 52 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
43 53
44 54 do it again (it read the obsstore before adding new changeset)
45 55
46 56 $ hg up '.^'
47 57 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
48 58 $ mkcommit new_2_c
49 59 created new head
50 60 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
51 61 $ hg debugobsolete
52 62 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
53 63 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
54 64
55 65 Register two markers with a missing node
56 66
57 67 $ hg up '.^'
58 68 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
59 69 $ mkcommit new_3_c
60 70 created new head
61 71 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
62 72 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
63 73 $ hg debugobsolete
64 74 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
65 75 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
66 76 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
67 77 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
68 78
69 79 Check that graphlog detect that a changeset is obsolete:
70 80
71 81 $ hg glog
72 82 @ changeset: 5:5601fb93a350
73 83 | tag: tip
74 84 | parent: 1:7c3bad9141dc
75 85 | user: test
76 86 | date: Thu Jan 01 00:00:00 1970 +0000
77 87 | summary: add new_3_c
78 88 |
79 89 o changeset: 1:7c3bad9141dc
80 90 | user: test
81 91 | date: Thu Jan 01 00:00:00 1970 +0000
82 92 | summary: add b
83 93 |
84 94 o changeset: 0:1f0dee641bb7
85 95 user: test
86 96 date: Thu Jan 01 00:00:00 1970 +0000
87 97 summary: add a
88 98
89 99
90 100 Check that public changeset are not accounted as obsolete:
91 101
92 102 $ hg phase --public 2
93 103 $ hg --config 'extensions.graphlog=' glog
94 104 @ changeset: 5:5601fb93a350
95 105 | tag: tip
96 106 | parent: 1:7c3bad9141dc
97 107 | user: test
98 108 | date: Thu Jan 01 00:00:00 1970 +0000
99 109 | summary: add new_3_c
100 110 |
101 111 | o changeset: 2:245bde4270cd
102 112 |/ user: test
103 113 | date: Thu Jan 01 00:00:00 1970 +0000
104 114 | summary: add original_c
105 115 |
106 116 o changeset: 1:7c3bad9141dc
107 117 | user: test
108 118 | date: Thu Jan 01 00:00:00 1970 +0000
109 119 | summary: add b
110 120 |
111 121 o changeset: 0:1f0dee641bb7
112 122 user: test
113 123 date: Thu Jan 01 00:00:00 1970 +0000
114 124 summary: add a
115 125
116 126
117 127 $ cd ..
118 128
119 129 Exchange Test
120 130 ============================
121 131
122 132 Destination repo does not have any data
123 133 ---------------------------------------
124 134
125 135 Try to pull markers
126 136 (extinct changeset are excluded but marker are pushed)
127 137
128 138 $ hg init tmpc
129 139 $ cd tmpc
130 140 $ hg pull ../tmpb
131 141 pulling from ../tmpb
132 142 requesting all changes
133 143 adding changesets
134 144 adding manifests
135 145 adding file changes
136 146 added 4 changesets with 4 changes to 4 files (+1 heads)
137 147 (run 'hg heads' to see heads, 'hg merge' to merge)
138 148 $ hg debugobsolete
139 149 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
140 150 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
141 151 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
142 152 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
143 153
144 154 Rollback//Transaction support
145 155
146 156 $ hg debugobsolete -d '1340 0' aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
147 157 $ hg debugobsolete
148 158 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
149 159 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
150 160 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
151 161 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
152 162 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb 0 {'date': '1340 0', 'user': 'test'}
153 163 $ hg rollback -n
154 164 repository tip rolled back to revision 3 (undo debugobsolete)
155 165 $ hg rollback
156 166 repository tip rolled back to revision 3 (undo debugobsolete)
157 167 $ hg debugobsolete
158 168 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
159 169 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
160 170 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
161 171 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
162 172
163 173 $ cd ..
164 174
165 175 Try to pull markers
166 176
167 177 $ hg init tmpd
168 178 $ hg -R tmpb push tmpd
169 179 pushing to tmpd
170 180 searching for changes
171 181 adding changesets
172 182 adding manifests
173 183 adding file changes
174 184 added 4 changesets with 4 changes to 4 files (+1 heads)
175 185 $ hg -R tmpd debugobsolete
176 186 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
177 187 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
178 188 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
179 189 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
180 190
191 Check obsolete keys are exchanged only if source has an obsolete store
192
193 $ hg init empty
194 $ hg --config extensions.debugkeys=debugkeys.py -R empty push tmpd
195 pushing to tmpd
196 no changes found
197 listkeys phases
198 listkeys bookmarks
199 [1]
200
181 201 clone support
182 202 (markers are copied and extinct changesets are included to allow hardlinks)
183 203
184 204 $ hg clone tmpb clone-dest
185 205 updating to branch default
186 206 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
187 207 $ hg -R clone-dest log -G --hidden
188 208 @ changeset: 5:5601fb93a350
189 209 | tag: tip
190 210 | parent: 1:7c3bad9141dc
191 211 | user: test
192 212 | date: Thu Jan 01 00:00:00 1970 +0000
193 213 | summary: add new_3_c
194 214 |
195 215 | x changeset: 4:ca819180edb9
196 216 |/ parent: 1:7c3bad9141dc
197 217 | user: test
198 218 | date: Thu Jan 01 00:00:00 1970 +0000
199 219 | summary: add new_2_c
200 220 |
201 221 | x changeset: 3:cdbce2fbb163
202 222 |/ parent: 1:7c3bad9141dc
203 223 | user: test
204 224 | date: Thu Jan 01 00:00:00 1970 +0000
205 225 | summary: add new_c
206 226 |
207 227 | o changeset: 2:245bde4270cd
208 228 |/ user: test
209 229 | date: Thu Jan 01 00:00:00 1970 +0000
210 230 | summary: add original_c
211 231 |
212 232 o changeset: 1:7c3bad9141dc
213 233 | user: test
214 234 | date: Thu Jan 01 00:00:00 1970 +0000
215 235 | summary: add b
216 236 |
217 237 o changeset: 0:1f0dee641bb7
218 238 user: test
219 239 date: Thu Jan 01 00:00:00 1970 +0000
220 240 summary: add a
221 241
222 242 $ hg -R clone-dest debugobsolete
223 243 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
224 244 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
225 245 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
226 246 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
227 247
228 248
229 249 Destination repo have existing data
230 250 ---------------------------------------
231 251
232 252 On pull
233 253
234 254 $ hg init tmpe
235 255 $ cd tmpe
236 256 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
237 257 $ hg pull ../tmpb
238 258 pulling from ../tmpb
239 259 requesting all changes
240 260 adding changesets
241 261 adding manifests
242 262 adding file changes
243 263 added 4 changesets with 4 changes to 4 files (+1 heads)
244 264 (run 'hg heads' to see heads, 'hg merge' to merge)
245 265 $ hg debugobsolete
246 266 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
247 267 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
248 268 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
249 269 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
250 270 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
251 271
252 272
253 273 On push
254 274
255 275 $ hg push ../tmpc
256 276 pushing to ../tmpc
257 277 searching for changes
258 278 no changes found
259 279 [1]
260 280 $ hg -R ../tmpc debugobsolete
261 281 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
262 282 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
263 283 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
264 284 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
265 285 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
266 286
267 287 detect outgoing obsolete and unstable
268 288 ---------------------------------------
269 289
270 290
271 291 $ hg glog
272 292 o changeset: 3:5601fb93a350
273 293 | tag: tip
274 294 | parent: 1:7c3bad9141dc
275 295 | user: test
276 296 | date: Thu Jan 01 00:00:00 1970 +0000
277 297 | summary: add new_3_c
278 298 |
279 299 | o changeset: 2:245bde4270cd
280 300 |/ user: test
281 301 | date: Thu Jan 01 00:00:00 1970 +0000
282 302 | summary: add original_c
283 303 |
284 304 o changeset: 1:7c3bad9141dc
285 305 | user: test
286 306 | date: Thu Jan 01 00:00:00 1970 +0000
287 307 | summary: add b
288 308 |
289 309 o changeset: 0:1f0dee641bb7
290 310 user: test
291 311 date: Thu Jan 01 00:00:00 1970 +0000
292 312 summary: add a
293 313
294 314 $ hg up 'desc("new_3_c")'
295 315 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
296 316 $ mkcommit original_d
297 317 $ mkcommit original_e
298 318 $ hg debugobsolete `getid original_d` -d '0 0'
299 319 $ hg log -r 'obsolete()'
300 320 changeset: 4:7c694bff0650
301 321 user: test
302 322 date: Thu Jan 01 00:00:00 1970 +0000
303 323 summary: add original_d
304 324
305 325 $ hg glog -r '::unstable()'
306 326 @ changeset: 5:6e572121998e
307 327 | tag: tip
308 328 | user: test
309 329 | date: Thu Jan 01 00:00:00 1970 +0000
310 330 | summary: add original_e
311 331 |
312 332 x changeset: 4:7c694bff0650
313 333 | user: test
314 334 | date: Thu Jan 01 00:00:00 1970 +0000
315 335 | summary: add original_d
316 336 |
317 337 o changeset: 3:5601fb93a350
318 338 | parent: 1:7c3bad9141dc
319 339 | user: test
320 340 | date: Thu Jan 01 00:00:00 1970 +0000
321 341 | summary: add new_3_c
322 342 |
323 343 o changeset: 1:7c3bad9141dc
324 344 | user: test
325 345 | date: Thu Jan 01 00:00:00 1970 +0000
326 346 | summary: add b
327 347 |
328 348 o changeset: 0:1f0dee641bb7
329 349 user: test
330 350 date: Thu Jan 01 00:00:00 1970 +0000
331 351 summary: add a
332 352
333 353
334 354 refuse to push obsolete changeset
335 355
336 356 $ hg push ../tmpc/ -r 'desc("original_d")'
337 357 pushing to ../tmpc/
338 358 searching for changes
339 359 abort: push includes an obsolete changeset: 7c694bff0650!
340 360 [255]
341 361
342 362 refuse to push unstable changeset
343 363
344 364 $ hg push ../tmpc/
345 365 pushing to ../tmpc/
346 366 searching for changes
347 367 abort: push includes an unstable changeset: 6e572121998e!
348 368 [255]
349 369
350 370 Test that extinct changeset are properly detected
351 371
352 372 $ hg log -r 'extinct()'
353 373
354 374 Don't try to push extinct changeset
355 375
356 376 $ hg init ../tmpf
357 377 $ hg out ../tmpf
358 378 comparing with ../tmpf
359 379 searching for changes
360 380 changeset: 0:1f0dee641bb7
361 381 user: test
362 382 date: Thu Jan 01 00:00:00 1970 +0000
363 383 summary: add a
364 384
365 385 changeset: 1:7c3bad9141dc
366 386 user: test
367 387 date: Thu Jan 01 00:00:00 1970 +0000
368 388 summary: add b
369 389
370 390 changeset: 2:245bde4270cd
371 391 user: test
372 392 date: Thu Jan 01 00:00:00 1970 +0000
373 393 summary: add original_c
374 394
375 395 changeset: 3:5601fb93a350
376 396 parent: 1:7c3bad9141dc
377 397 user: test
378 398 date: Thu Jan 01 00:00:00 1970 +0000
379 399 summary: add new_3_c
380 400
381 401 changeset: 4:7c694bff0650
382 402 user: test
383 403 date: Thu Jan 01 00:00:00 1970 +0000
384 404 summary: add original_d
385 405
386 406 changeset: 5:6e572121998e
387 407 tag: tip
388 408 user: test
389 409 date: Thu Jan 01 00:00:00 1970 +0000
390 410 summary: add original_e
391 411
392 412 $ hg push ../tmpf -f # -f because be push unstable too
393 413 pushing to ../tmpf
394 414 searching for changes
395 415 adding changesets
396 416 adding manifests
397 417 adding file changes
398 418 added 6 changesets with 6 changes to 6 files (+1 heads)
399 419
400 420 no warning displayed
401 421
402 422 $ hg push ../tmpf
403 423 pushing to ../tmpf
404 424 searching for changes
405 425 no changes found
406 426 [1]
407 427
408 428 Do not warn about new head when the new head is a successors of a remote one
409 429
410 430 $ hg glog
411 431 @ changeset: 5:6e572121998e
412 432 | tag: tip
413 433 | user: test
414 434 | date: Thu Jan 01 00:00:00 1970 +0000
415 435 | summary: add original_e
416 436 |
417 437 x changeset: 4:7c694bff0650
418 438 | user: test
419 439 | date: Thu Jan 01 00:00:00 1970 +0000
420 440 | summary: add original_d
421 441 |
422 442 o changeset: 3:5601fb93a350
423 443 | parent: 1:7c3bad9141dc
424 444 | user: test
425 445 | date: Thu Jan 01 00:00:00 1970 +0000
426 446 | summary: add new_3_c
427 447 |
428 448 | o changeset: 2:245bde4270cd
429 449 |/ user: test
430 450 | date: Thu Jan 01 00:00:00 1970 +0000
431 451 | summary: add original_c
432 452 |
433 453 o changeset: 1:7c3bad9141dc
434 454 | user: test
435 455 | date: Thu Jan 01 00:00:00 1970 +0000
436 456 | summary: add b
437 457 |
438 458 o changeset: 0:1f0dee641bb7
439 459 user: test
440 460 date: Thu Jan 01 00:00:00 1970 +0000
441 461 summary: add a
442 462
443 463 $ hg up -q 'desc(new_3_c)'
444 464 $ mkcommit obsolete_e
445 465 created new head
446 466 $ hg debugobsolete `getid 'original_e'` `getid 'obsolete_e'`
447 467 $ hg push ../tmpf
448 468 pushing to ../tmpf
449 469 searching for changes
450 470 adding changesets
451 471 adding manifests
452 472 adding file changes
453 473 added 1 changesets with 1 changes to 1 files (+1 heads)
General Comments 0
You need to be logged in to leave comments. Login now