##// END OF EJS Templates
push: prevent moving phases outside of the pushed subset
Pierre-Yves David -
r15956:5653f2d1 stable
parent child Browse files
Show More
@@ -1,2281 +1,2313 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39 self._dirtyphases = False
40 40 # A list of callback to shape the phase if no data were found.
41 41 # Callback are in the form: func(repo, roots) --> processed root.
42 42 # This list it to be filled by extension during repo setup
43 43 self._phasedefaults = []
44 44
45 45 try:
46 46 self.ui.readconfig(self.join("hgrc"), self.root)
47 47 extensions.loadall(self.ui)
48 48 except IOError:
49 49 pass
50 50
51 51 if not os.path.isdir(self.path):
52 52 if create:
53 53 if not os.path.exists(path):
54 54 util.makedirs(path)
55 55 util.makedir(self.path, notindexed=True)
56 56 requirements = ["revlogv1"]
57 57 if self.ui.configbool('format', 'usestore', True):
58 58 os.mkdir(os.path.join(self.path, "store"))
59 59 requirements.append("store")
60 60 if self.ui.configbool('format', 'usefncache', True):
61 61 requirements.append("fncache")
62 62 if self.ui.configbool('format', 'dotencode', True):
63 63 requirements.append('dotencode')
64 64 # create an invalid changelog
65 65 self.opener.append(
66 66 "00changelog.i",
67 67 '\0\0\0\2' # represents revlogv2
68 68 ' dummy changelog to prevent using the old repo layout'
69 69 )
70 70 if self.ui.configbool('format', 'generaldelta', False):
71 71 requirements.append("generaldelta")
72 72 requirements = set(requirements)
73 73 else:
74 74 raise error.RepoError(_("repository %s not found") % path)
75 75 elif create:
76 76 raise error.RepoError(_("repository %s already exists") % path)
77 77 else:
78 78 try:
79 79 requirements = scmutil.readrequires(self.opener, self.supported)
80 80 except IOError, inst:
81 81 if inst.errno != errno.ENOENT:
82 82 raise
83 83 requirements = set()
84 84
85 85 self.sharedpath = self.path
86 86 try:
87 87 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
88 88 if not os.path.exists(s):
89 89 raise error.RepoError(
90 90 _('.hg/sharedpath points to nonexistent directory %s') % s)
91 91 self.sharedpath = s
92 92 except IOError, inst:
93 93 if inst.errno != errno.ENOENT:
94 94 raise
95 95
96 96 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
97 97 self.spath = self.store.path
98 98 self.sopener = self.store.opener
99 99 self.sjoin = self.store.join
100 100 self.opener.createmode = self.store.createmode
101 101 self._applyrequirements(requirements)
102 102 if create:
103 103 self._writerequirements()
104 104
105 105
106 106 self._branchcache = None
107 107 self._branchcachetip = None
108 108 self.filterpats = {}
109 109 self._datafilters = {}
110 110 self._transref = self._lockref = self._wlockref = None
111 111
112 112 # A cache for various files under .hg/ that tracks file changes,
113 113 # (used by the filecache decorator)
114 114 #
115 115 # Maps a property name to its util.filecacheentry
116 116 self._filecache = {}
117 117
118 118 def _applyrequirements(self, requirements):
119 119 self.requirements = requirements
120 120 openerreqs = set(('revlogv1', 'generaldelta'))
121 121 self.sopener.options = dict((r, 1) for r in requirements
122 122 if r in openerreqs)
123 123
124 124 def _writerequirements(self):
125 125 reqfile = self.opener("requires", "w")
126 126 for r in self.requirements:
127 127 reqfile.write("%s\n" % r)
128 128 reqfile.close()
129 129
130 130 def _checknested(self, path):
131 131 """Determine if path is a legal nested repository."""
132 132 if not path.startswith(self.root):
133 133 return False
134 134 subpath = path[len(self.root) + 1:]
135 135 normsubpath = util.pconvert(subpath)
136 136
137 137 # XXX: Checking against the current working copy is wrong in
138 138 # the sense that it can reject things like
139 139 #
140 140 # $ hg cat -r 10 sub/x.txt
141 141 #
142 142 # if sub/ is no longer a subrepository in the working copy
143 143 # parent revision.
144 144 #
145 145 # However, it can of course also allow things that would have
146 146 # been rejected before, such as the above cat command if sub/
147 147 # is a subrepository now, but was a normal directory before.
148 148 # The old path auditor would have rejected by mistake since it
149 149 # panics when it sees sub/.hg/.
150 150 #
151 151 # All in all, checking against the working copy seems sensible
152 152 # since we want to prevent access to nested repositories on
153 153 # the filesystem *now*.
154 154 ctx = self[None]
155 155 parts = util.splitpath(subpath)
156 156 while parts:
157 157 prefix = '/'.join(parts)
158 158 if prefix in ctx.substate:
159 159 if prefix == normsubpath:
160 160 return True
161 161 else:
162 162 sub = ctx.sub(prefix)
163 163 return sub.checknested(subpath[len(prefix) + 1:])
164 164 else:
165 165 parts.pop()
166 166 return False
167 167
168 168 @filecache('bookmarks')
169 169 def _bookmarks(self):
170 170 return bookmarks.read(self)
171 171
172 172 @filecache('bookmarks.current')
173 173 def _bookmarkcurrent(self):
174 174 return bookmarks.readcurrent(self)
175 175
176 176 def _writebookmarks(self, marks):
177 177 bookmarks.write(self)
178 178
179 179 @filecache('phaseroots')
180 180 def _phaseroots(self):
181 181 self._dirtyphases = False
182 182 phaseroots = phases.readroots(self)
183 183 phases.filterunknown(self, phaseroots)
184 184 return phaseroots
185 185
186 186 @propertycache
187 187 def _phaserev(self):
188 188 cache = [phases.public] * len(self)
189 189 for phase in phases.trackedphases:
190 190 roots = map(self.changelog.rev, self._phaseroots[phase])
191 191 if roots:
192 192 for rev in roots:
193 193 cache[rev] = phase
194 194 for rev in self.changelog.descendants(*roots):
195 195 cache[rev] = phase
196 196 return cache
197 197
198 198 @filecache('00changelog.i', True)
199 199 def changelog(self):
200 200 c = changelog.changelog(self.sopener)
201 201 if 'HG_PENDING' in os.environ:
202 202 p = os.environ['HG_PENDING']
203 203 if p.startswith(self.root):
204 204 c.readpending('00changelog.i.a')
205 205 return c
206 206
207 207 @filecache('00manifest.i', True)
208 208 def manifest(self):
209 209 return manifest.manifest(self.sopener)
210 210
211 211 @filecache('dirstate')
212 212 def dirstate(self):
213 213 warned = [0]
214 214 def validate(node):
215 215 try:
216 216 self.changelog.rev(node)
217 217 return node
218 218 except error.LookupError:
219 219 if not warned[0]:
220 220 warned[0] = True
221 221 self.ui.warn(_("warning: ignoring unknown"
222 222 " working parent %s!\n") % short(node))
223 223 return nullid
224 224
225 225 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
226 226
227 227 def __getitem__(self, changeid):
228 228 if changeid is None:
229 229 return context.workingctx(self)
230 230 return context.changectx(self, changeid)
231 231
232 232 def __contains__(self, changeid):
233 233 try:
234 234 return bool(self.lookup(changeid))
235 235 except error.RepoLookupError:
236 236 return False
237 237
238 238 def __nonzero__(self):
239 239 return True
240 240
241 241 def __len__(self):
242 242 return len(self.changelog)
243 243
244 244 def __iter__(self):
245 245 for i in xrange(len(self)):
246 246 yield i
247 247
248 248 def revs(self, expr, *args):
249 249 '''Return a list of revisions matching the given revset'''
250 250 expr = revset.formatspec(expr, *args)
251 251 m = revset.match(None, expr)
252 252 return [r for r in m(self, range(len(self)))]
253 253
254 254 def set(self, expr, *args):
255 255 '''
256 256 Yield a context for each matching revision, after doing arg
257 257 replacement via revset.formatspec
258 258 '''
259 259 for r in self.revs(expr, *args):
260 260 yield self[r]
261 261
262 262 def url(self):
263 263 return 'file:' + self.root
264 264
265 265 def hook(self, name, throw=False, **args):
266 266 return hook.hook(self.ui, self, name, throw, **args)
267 267
268 268 tag_disallowed = ':\r\n'
269 269
270 270 def _tag(self, names, node, message, local, user, date, extra={}):
271 271 if isinstance(names, str):
272 272 allchars = names
273 273 names = (names,)
274 274 else:
275 275 allchars = ''.join(names)
276 276 for c in self.tag_disallowed:
277 277 if c in allchars:
278 278 raise util.Abort(_('%r cannot be used in a tag name') % c)
279 279
280 280 branches = self.branchmap()
281 281 for name in names:
282 282 self.hook('pretag', throw=True, node=hex(node), tag=name,
283 283 local=local)
284 284 if name in branches:
285 285 self.ui.warn(_("warning: tag %s conflicts with existing"
286 286 " branch name\n") % name)
287 287
288 288 def writetags(fp, names, munge, prevtags):
289 289 fp.seek(0, 2)
290 290 if prevtags and prevtags[-1] != '\n':
291 291 fp.write('\n')
292 292 for name in names:
293 293 m = munge and munge(name) or name
294 294 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
295 295 old = self.tags().get(name, nullid)
296 296 fp.write('%s %s\n' % (hex(old), m))
297 297 fp.write('%s %s\n' % (hex(node), m))
298 298 fp.close()
299 299
300 300 prevtags = ''
301 301 if local:
302 302 try:
303 303 fp = self.opener('localtags', 'r+')
304 304 except IOError:
305 305 fp = self.opener('localtags', 'a')
306 306 else:
307 307 prevtags = fp.read()
308 308
309 309 # local tags are stored in the current charset
310 310 writetags(fp, names, None, prevtags)
311 311 for name in names:
312 312 self.hook('tag', node=hex(node), tag=name, local=local)
313 313 return
314 314
315 315 try:
316 316 fp = self.wfile('.hgtags', 'rb+')
317 317 except IOError, e:
318 318 if e.errno != errno.ENOENT:
319 319 raise
320 320 fp = self.wfile('.hgtags', 'ab')
321 321 else:
322 322 prevtags = fp.read()
323 323
324 324 # committed tags are stored in UTF-8
325 325 writetags(fp, names, encoding.fromlocal, prevtags)
326 326
327 327 fp.close()
328 328
329 329 self.invalidatecaches()
330 330
331 331 if '.hgtags' not in self.dirstate:
332 332 self[None].add(['.hgtags'])
333 333
334 334 m = matchmod.exact(self.root, '', ['.hgtags'])
335 335 tagnode = self.commit(message, user, date, extra=extra, match=m)
336 336
337 337 for name in names:
338 338 self.hook('tag', node=hex(node), tag=name, local=local)
339 339
340 340 return tagnode
341 341
342 342 def tag(self, names, node, message, local, user, date):
343 343 '''tag a revision with one or more symbolic names.
344 344
345 345 names is a list of strings or, when adding a single tag, names may be a
346 346 string.
347 347
348 348 if local is True, the tags are stored in a per-repository file.
349 349 otherwise, they are stored in the .hgtags file, and a new
350 350 changeset is committed with the change.
351 351
352 352 keyword arguments:
353 353
354 354 local: whether to store tags in non-version-controlled file
355 355 (default False)
356 356
357 357 message: commit message to use if committing
358 358
359 359 user: name of user to use if committing
360 360
361 361 date: date tuple to use if committing'''
362 362
363 363 if not local:
364 364 for x in self.status()[:5]:
365 365 if '.hgtags' in x:
366 366 raise util.Abort(_('working copy of .hgtags is changed '
367 367 '(please commit .hgtags manually)'))
368 368
369 369 self.tags() # instantiate the cache
370 370 self._tag(names, node, message, local, user, date)
371 371
372 372 @propertycache
373 373 def _tagscache(self):
374 374 '''Returns a tagscache object that contains various tags related caches.'''
375 375
376 376 # This simplifies its cache management by having one decorated
377 377 # function (this one) and the rest simply fetch things from it.
378 378 class tagscache(object):
379 379 def __init__(self):
380 380 # These two define the set of tags for this repository. tags
381 381 # maps tag name to node; tagtypes maps tag name to 'global' or
382 382 # 'local'. (Global tags are defined by .hgtags across all
383 383 # heads, and local tags are defined in .hg/localtags.)
384 384 # They constitute the in-memory cache of tags.
385 385 self.tags = self.tagtypes = None
386 386
387 387 self.nodetagscache = self.tagslist = None
388 388
389 389 cache = tagscache()
390 390 cache.tags, cache.tagtypes = self._findtags()
391 391
392 392 return cache
393 393
394 394 def tags(self):
395 395 '''return a mapping of tag to node'''
396 396 return self._tagscache.tags
397 397
398 398 def _findtags(self):
399 399 '''Do the hard work of finding tags. Return a pair of dicts
400 400 (tags, tagtypes) where tags maps tag name to node, and tagtypes
401 401 maps tag name to a string like \'global\' or \'local\'.
402 402 Subclasses or extensions are free to add their own tags, but
403 403 should be aware that the returned dicts will be retained for the
404 404 duration of the localrepo object.'''
405 405
406 406 # XXX what tagtype should subclasses/extensions use? Currently
407 407 # mq and bookmarks add tags, but do not set the tagtype at all.
408 408 # Should each extension invent its own tag type? Should there
409 409 # be one tagtype for all such "virtual" tags? Or is the status
410 410 # quo fine?
411 411
412 412 alltags = {} # map tag name to (node, hist)
413 413 tagtypes = {}
414 414
415 415 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
416 416 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
417 417
418 418 # Build the return dicts. Have to re-encode tag names because
419 419 # the tags module always uses UTF-8 (in order not to lose info
420 420 # writing to the cache), but the rest of Mercurial wants them in
421 421 # local encoding.
422 422 tags = {}
423 423 for (name, (node, hist)) in alltags.iteritems():
424 424 if node != nullid:
425 425 try:
426 426 # ignore tags to unknown nodes
427 427 self.changelog.lookup(node)
428 428 tags[encoding.tolocal(name)] = node
429 429 except error.LookupError:
430 430 pass
431 431 tags['tip'] = self.changelog.tip()
432 432 tagtypes = dict([(encoding.tolocal(name), value)
433 433 for (name, value) in tagtypes.iteritems()])
434 434 return (tags, tagtypes)
435 435
436 436 def tagtype(self, tagname):
437 437 '''
438 438 return the type of the given tag. result can be:
439 439
440 440 'local' : a local tag
441 441 'global' : a global tag
442 442 None : tag does not exist
443 443 '''
444 444
445 445 return self._tagscache.tagtypes.get(tagname)
446 446
447 447 def tagslist(self):
448 448 '''return a list of tags ordered by revision'''
449 449 if not self._tagscache.tagslist:
450 450 l = []
451 451 for t, n in self.tags().iteritems():
452 452 r = self.changelog.rev(n)
453 453 l.append((r, t, n))
454 454 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
455 455
456 456 return self._tagscache.tagslist
457 457
458 458 def nodetags(self, node):
459 459 '''return the tags associated with a node'''
460 460 if not self._tagscache.nodetagscache:
461 461 nodetagscache = {}
462 462 for t, n in self.tags().iteritems():
463 463 nodetagscache.setdefault(n, []).append(t)
464 464 for tags in nodetagscache.itervalues():
465 465 tags.sort()
466 466 self._tagscache.nodetagscache = nodetagscache
467 467 return self._tagscache.nodetagscache.get(node, [])
468 468
469 469 def nodebookmarks(self, node):
470 470 marks = []
471 471 for bookmark, n in self._bookmarks.iteritems():
472 472 if n == node:
473 473 marks.append(bookmark)
474 474 return sorted(marks)
475 475
476 476 def _branchtags(self, partial, lrev):
477 477 # TODO: rename this function?
478 478 tiprev = len(self) - 1
479 479 if lrev != tiprev:
480 480 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
481 481 self._updatebranchcache(partial, ctxgen)
482 482 self._writebranchcache(partial, self.changelog.tip(), tiprev)
483 483
484 484 return partial
485 485
486 486 def updatebranchcache(self):
487 487 tip = self.changelog.tip()
488 488 if self._branchcache is not None and self._branchcachetip == tip:
489 489 return
490 490
491 491 oldtip = self._branchcachetip
492 492 self._branchcachetip = tip
493 493 if oldtip is None or oldtip not in self.changelog.nodemap:
494 494 partial, last, lrev = self._readbranchcache()
495 495 else:
496 496 lrev = self.changelog.rev(oldtip)
497 497 partial = self._branchcache
498 498
499 499 self._branchtags(partial, lrev)
500 500 # this private cache holds all heads (not just tips)
501 501 self._branchcache = partial
502 502
503 503 def branchmap(self):
504 504 '''returns a dictionary {branch: [branchheads]}'''
505 505 self.updatebranchcache()
506 506 return self._branchcache
507 507
508 508 def branchtags(self):
509 509 '''return a dict where branch names map to the tipmost head of
510 510 the branch, open heads come before closed'''
511 511 bt = {}
512 512 for bn, heads in self.branchmap().iteritems():
513 513 tip = heads[-1]
514 514 for h in reversed(heads):
515 515 if 'close' not in self.changelog.read(h)[5]:
516 516 tip = h
517 517 break
518 518 bt[bn] = tip
519 519 return bt
520 520
521 521 def _readbranchcache(self):
522 522 partial = {}
523 523 try:
524 524 f = self.opener("cache/branchheads")
525 525 lines = f.read().split('\n')
526 526 f.close()
527 527 except (IOError, OSError):
528 528 return {}, nullid, nullrev
529 529
530 530 try:
531 531 last, lrev = lines.pop(0).split(" ", 1)
532 532 last, lrev = bin(last), int(lrev)
533 533 if lrev >= len(self) or self[lrev].node() != last:
534 534 # invalidate the cache
535 535 raise ValueError('invalidating branch cache (tip differs)')
536 536 for l in lines:
537 537 if not l:
538 538 continue
539 539 node, label = l.split(" ", 1)
540 540 label = encoding.tolocal(label.strip())
541 541 partial.setdefault(label, []).append(bin(node))
542 542 except KeyboardInterrupt:
543 543 raise
544 544 except Exception, inst:
545 545 if self.ui.debugflag:
546 546 self.ui.warn(str(inst), '\n')
547 547 partial, last, lrev = {}, nullid, nullrev
548 548 return partial, last, lrev
549 549
550 550 def _writebranchcache(self, branches, tip, tiprev):
551 551 try:
552 552 f = self.opener("cache/branchheads", "w", atomictemp=True)
553 553 f.write("%s %s\n" % (hex(tip), tiprev))
554 554 for label, nodes in branches.iteritems():
555 555 for node in nodes:
556 556 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
557 557 f.close()
558 558 except (IOError, OSError):
559 559 pass
560 560
561 561 def _updatebranchcache(self, partial, ctxgen):
562 562 # collect new branch entries
563 563 newbranches = {}
564 564 for c in ctxgen:
565 565 newbranches.setdefault(c.branch(), []).append(c.node())
566 566 # if older branchheads are reachable from new ones, they aren't
567 567 # really branchheads. Note checking parents is insufficient:
568 568 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
569 569 for branch, newnodes in newbranches.iteritems():
570 570 bheads = partial.setdefault(branch, [])
571 571 bheads.extend(newnodes)
572 572 if len(bheads) <= 1:
573 573 continue
574 574 bheads = sorted(bheads, key=lambda x: self[x].rev())
575 575 # starting from tip means fewer passes over reachable
576 576 while newnodes:
577 577 latest = newnodes.pop()
578 578 if latest not in bheads:
579 579 continue
580 580 minbhrev = self[bheads[0]].node()
581 581 reachable = self.changelog.reachable(latest, minbhrev)
582 582 reachable.remove(latest)
583 583 if reachable:
584 584 bheads = [b for b in bheads if b not in reachable]
585 585 partial[branch] = bheads
586 586
587 587 def lookup(self, key):
588 588 if isinstance(key, int):
589 589 return self.changelog.node(key)
590 590 elif key == '.':
591 591 return self.dirstate.p1()
592 592 elif key == 'null':
593 593 return nullid
594 594 elif key == 'tip':
595 595 return self.changelog.tip()
596 596 n = self.changelog._match(key)
597 597 if n:
598 598 return n
599 599 if key in self._bookmarks:
600 600 return self._bookmarks[key]
601 601 if key in self.tags():
602 602 return self.tags()[key]
603 603 if key in self.branchtags():
604 604 return self.branchtags()[key]
605 605 n = self.changelog._partialmatch(key)
606 606 if n:
607 607 return n
608 608
609 609 # can't find key, check if it might have come from damaged dirstate
610 610 if key in self.dirstate.parents():
611 611 raise error.Abort(_("working directory has unknown parent '%s'!")
612 612 % short(key))
613 613 try:
614 614 if len(key) == 20:
615 615 key = hex(key)
616 616 except TypeError:
617 617 pass
618 618 raise error.RepoLookupError(_("unknown revision '%s'") % key)
619 619
620 620 def lookupbranch(self, key, remote=None):
621 621 repo = remote or self
622 622 if key in repo.branchmap():
623 623 return key
624 624
625 625 repo = (remote and remote.local()) and remote or self
626 626 return repo[key].branch()
627 627
628 628 def known(self, nodes):
629 629 nm = self.changelog.nodemap
630 630 result = []
631 631 for n in nodes:
632 632 r = nm.get(n)
633 633 resp = not (r is None or self._phaserev[r] >= phases.secret)
634 634 result.append(resp)
635 635 return result
636 636
637 637 def local(self):
638 638 return self
639 639
640 640 def cancopy(self):
641 641 return (repo.repository.cancopy(self)
642 642 and not self._phaseroots[phases.secret])
643 643
644 644 def join(self, f):
645 645 return os.path.join(self.path, f)
646 646
647 647 def wjoin(self, f):
648 648 return os.path.join(self.root, f)
649 649
650 650 def file(self, f):
651 651 if f[0] == '/':
652 652 f = f[1:]
653 653 return filelog.filelog(self.sopener, f)
654 654
655 655 def changectx(self, changeid):
656 656 return self[changeid]
657 657
658 658 def parents(self, changeid=None):
659 659 '''get list of changectxs for parents of changeid'''
660 660 return self[changeid].parents()
661 661
662 662 def filectx(self, path, changeid=None, fileid=None):
663 663 """changeid can be a changeset revision, node, or tag.
664 664 fileid can be a file revision or node."""
665 665 return context.filectx(self, path, changeid, fileid)
666 666
667 667 def getcwd(self):
668 668 return self.dirstate.getcwd()
669 669
670 670 def pathto(self, f, cwd=None):
671 671 return self.dirstate.pathto(f, cwd)
672 672
673 673 def wfile(self, f, mode='r'):
674 674 return self.wopener(f, mode)
675 675
676 676 def _link(self, f):
677 677 return os.path.islink(self.wjoin(f))
678 678
679 679 def _loadfilter(self, filter):
680 680 if filter not in self.filterpats:
681 681 l = []
682 682 for pat, cmd in self.ui.configitems(filter):
683 683 if cmd == '!':
684 684 continue
685 685 mf = matchmod.match(self.root, '', [pat])
686 686 fn = None
687 687 params = cmd
688 688 for name, filterfn in self._datafilters.iteritems():
689 689 if cmd.startswith(name):
690 690 fn = filterfn
691 691 params = cmd[len(name):].lstrip()
692 692 break
693 693 if not fn:
694 694 fn = lambda s, c, **kwargs: util.filter(s, c)
695 695 # Wrap old filters not supporting keyword arguments
696 696 if not inspect.getargspec(fn)[2]:
697 697 oldfn = fn
698 698 fn = lambda s, c, **kwargs: oldfn(s, c)
699 699 l.append((mf, fn, params))
700 700 self.filterpats[filter] = l
701 701 return self.filterpats[filter]
702 702
703 703 def _filter(self, filterpats, filename, data):
704 704 for mf, fn, cmd in filterpats:
705 705 if mf(filename):
706 706 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
707 707 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
708 708 break
709 709
710 710 return data
711 711
712 712 @propertycache
713 713 def _encodefilterpats(self):
714 714 return self._loadfilter('encode')
715 715
716 716 @propertycache
717 717 def _decodefilterpats(self):
718 718 return self._loadfilter('decode')
719 719
720 720 def adddatafilter(self, name, filter):
721 721 self._datafilters[name] = filter
722 722
723 723 def wread(self, filename):
724 724 if self._link(filename):
725 725 data = os.readlink(self.wjoin(filename))
726 726 else:
727 727 data = self.wopener.read(filename)
728 728 return self._filter(self._encodefilterpats, filename, data)
729 729
730 730 def wwrite(self, filename, data, flags):
731 731 data = self._filter(self._decodefilterpats, filename, data)
732 732 if 'l' in flags:
733 733 self.wopener.symlink(data, filename)
734 734 else:
735 735 self.wopener.write(filename, data)
736 736 if 'x' in flags:
737 737 util.setflags(self.wjoin(filename), False, True)
738 738
739 739 def wwritedata(self, filename, data):
740 740 return self._filter(self._decodefilterpats, filename, data)
741 741
742 742 def transaction(self, desc):
743 743 tr = self._transref and self._transref() or None
744 744 if tr and tr.running():
745 745 return tr.nest()
746 746
747 747 # abort here if the journal already exists
748 748 if os.path.exists(self.sjoin("journal")):
749 749 raise error.RepoError(
750 750 _("abandoned transaction found - run hg recover"))
751 751
752 752 journalfiles = self._writejournal(desc)
753 753 renames = [(x, undoname(x)) for x in journalfiles]
754 754
755 755 tr = transaction.transaction(self.ui.warn, self.sopener,
756 756 self.sjoin("journal"),
757 757 aftertrans(renames),
758 758 self.store.createmode)
759 759 self._transref = weakref.ref(tr)
760 760 return tr
761 761
762 762 def _writejournal(self, desc):
763 763 # save dirstate for rollback
764 764 try:
765 765 ds = self.opener.read("dirstate")
766 766 except IOError:
767 767 ds = ""
768 768 self.opener.write("journal.dirstate", ds)
769 769 self.opener.write("journal.branch",
770 770 encoding.fromlocal(self.dirstate.branch()))
771 771 self.opener.write("journal.desc",
772 772 "%d\n%s\n" % (len(self), desc))
773 773
774 774 bkname = self.join('bookmarks')
775 775 if os.path.exists(bkname):
776 776 util.copyfile(bkname, self.join('journal.bookmarks'))
777 777 else:
778 778 self.opener.write('journal.bookmarks', '')
779 779 phasesname = self.sjoin('phaseroots')
780 780 if os.path.exists(phasesname):
781 781 util.copyfile(phasesname, self.sjoin('journal.phaseroots'))
782 782 else:
783 783 self.sopener.write('journal.phaseroots', '')
784 784
785 785 return (self.sjoin('journal'), self.join('journal.dirstate'),
786 786 self.join('journal.branch'), self.join('journal.desc'),
787 787 self.join('journal.bookmarks'),
788 788 self.sjoin('journal.phaseroots'))
789 789
790 790 def recover(self):
791 791 lock = self.lock()
792 792 try:
793 793 if os.path.exists(self.sjoin("journal")):
794 794 self.ui.status(_("rolling back interrupted transaction\n"))
795 795 transaction.rollback(self.sopener, self.sjoin("journal"),
796 796 self.ui.warn)
797 797 self.invalidate()
798 798 return True
799 799 else:
800 800 self.ui.warn(_("no interrupted transaction available\n"))
801 801 return False
802 802 finally:
803 803 lock.release()
804 804
805 805 def rollback(self, dryrun=False, force=False):
806 806 wlock = lock = None
807 807 try:
808 808 wlock = self.wlock()
809 809 lock = self.lock()
810 810 if os.path.exists(self.sjoin("undo")):
811 811 return self._rollback(dryrun, force)
812 812 else:
813 813 self.ui.warn(_("no rollback information available\n"))
814 814 return 1
815 815 finally:
816 816 release(lock, wlock)
817 817
818 818 def _rollback(self, dryrun, force):
819 819 ui = self.ui
820 820 try:
821 821 args = self.opener.read('undo.desc').splitlines()
822 822 (oldlen, desc, detail) = (int(args[0]), args[1], None)
823 823 if len(args) >= 3:
824 824 detail = args[2]
825 825 oldtip = oldlen - 1
826 826
827 827 if detail and ui.verbose:
828 828 msg = (_('repository tip rolled back to revision %s'
829 829 ' (undo %s: %s)\n')
830 830 % (oldtip, desc, detail))
831 831 else:
832 832 msg = (_('repository tip rolled back to revision %s'
833 833 ' (undo %s)\n')
834 834 % (oldtip, desc))
835 835 except IOError:
836 836 msg = _('rolling back unknown transaction\n')
837 837 desc = None
838 838
839 839 if not force and self['.'] != self['tip'] and desc == 'commit':
840 840 raise util.Abort(
841 841 _('rollback of last commit while not checked out '
842 842 'may lose data'), hint=_('use -f to force'))
843 843
844 844 ui.status(msg)
845 845 if dryrun:
846 846 return 0
847 847
848 848 parents = self.dirstate.parents()
849 849 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
850 850 if os.path.exists(self.join('undo.bookmarks')):
851 851 util.rename(self.join('undo.bookmarks'),
852 852 self.join('bookmarks'))
853 853 if os.path.exists(self.sjoin('undo.phaseroots')):
854 854 util.rename(self.sjoin('undo.phaseroots'),
855 855 self.sjoin('phaseroots'))
856 856 self.invalidate()
857 857
858 858 parentgone = (parents[0] not in self.changelog.nodemap or
859 859 parents[1] not in self.changelog.nodemap)
860 860 if parentgone:
861 861 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
862 862 try:
863 863 branch = self.opener.read('undo.branch')
864 864 self.dirstate.setbranch(branch)
865 865 except IOError:
866 866 ui.warn(_('named branch could not be reset: '
867 867 'current branch is still \'%s\'\n')
868 868 % self.dirstate.branch())
869 869
870 870 self.dirstate.invalidate()
871 871 parents = tuple([p.rev() for p in self.parents()])
872 872 if len(parents) > 1:
873 873 ui.status(_('working directory now based on '
874 874 'revisions %d and %d\n') % parents)
875 875 else:
876 876 ui.status(_('working directory now based on '
877 877 'revision %d\n') % parents)
878 878 self.destroyed()
879 879 return 0
880 880
881 881 def invalidatecaches(self):
882 882 try:
883 883 delattr(self, '_tagscache')
884 884 except AttributeError:
885 885 pass
886 886
887 887 self._branchcache = None # in UTF-8
888 888 self._branchcachetip = None
889 889
890 890 def invalidatedirstate(self):
891 891 '''Invalidates the dirstate, causing the next call to dirstate
892 892 to check if it was modified since the last time it was read,
893 893 rereading it if it has.
894 894
895 895 This is different to dirstate.invalidate() that it doesn't always
896 896 rereads the dirstate. Use dirstate.invalidate() if you want to
897 897 explicitly read the dirstate again (i.e. restoring it to a previous
898 898 known good state).'''
899 899 try:
900 900 delattr(self, 'dirstate')
901 901 except AttributeError:
902 902 pass
903 903
904 904 def invalidate(self):
905 905 for k in self._filecache:
906 906 # dirstate is invalidated separately in invalidatedirstate()
907 907 if k == 'dirstate':
908 908 continue
909 909
910 910 try:
911 911 delattr(self, k)
912 912 except AttributeError:
913 913 pass
914 914 self.invalidatecaches()
915 915
916 916 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
917 917 try:
918 918 l = lock.lock(lockname, 0, releasefn, desc=desc)
919 919 except error.LockHeld, inst:
920 920 if not wait:
921 921 raise
922 922 self.ui.warn(_("waiting for lock on %s held by %r\n") %
923 923 (desc, inst.locker))
924 924 # default to 600 seconds timeout
925 925 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
926 926 releasefn, desc=desc)
927 927 if acquirefn:
928 928 acquirefn()
929 929 return l
930 930
931 931 def _afterlock(self, callback):
932 932 """add a callback to the current repository lock.
933 933
934 934 The callback will be executed on lock release."""
935 935 l = self._lockref and self._lockref()
936 936 if l:
937 937 l.postrelease.append(callback)
938 938
939 939 def lock(self, wait=True):
940 940 '''Lock the repository store (.hg/store) and return a weak reference
941 941 to the lock. Use this before modifying the store (e.g. committing or
942 942 stripping). If you are opening a transaction, get a lock as well.)'''
943 943 l = self._lockref and self._lockref()
944 944 if l is not None and l.held:
945 945 l.lock()
946 946 return l
947 947
948 948 def unlock():
949 949 self.store.write()
950 950 if self._dirtyphases:
951 951 phases.writeroots(self)
952 952 for k, ce in self._filecache.items():
953 953 if k == 'dirstate':
954 954 continue
955 955 ce.refresh()
956 956
957 957 l = self._lock(self.sjoin("lock"), wait, unlock,
958 958 self.invalidate, _('repository %s') % self.origroot)
959 959 self._lockref = weakref.ref(l)
960 960 return l
961 961
962 962 def wlock(self, wait=True):
963 963 '''Lock the non-store parts of the repository (everything under
964 964 .hg except .hg/store) and return a weak reference to the lock.
965 965 Use this before modifying files in .hg.'''
966 966 l = self._wlockref and self._wlockref()
967 967 if l is not None and l.held:
968 968 l.lock()
969 969 return l
970 970
971 971 def unlock():
972 972 self.dirstate.write()
973 973 ce = self._filecache.get('dirstate')
974 974 if ce:
975 975 ce.refresh()
976 976
977 977 l = self._lock(self.join("wlock"), wait, unlock,
978 978 self.invalidatedirstate, _('working directory of %s') %
979 979 self.origroot)
980 980 self._wlockref = weakref.ref(l)
981 981 return l
982 982
983 983 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
984 984 """
985 985 commit an individual file as part of a larger transaction
986 986 """
987 987
988 988 fname = fctx.path()
989 989 text = fctx.data()
990 990 flog = self.file(fname)
991 991 fparent1 = manifest1.get(fname, nullid)
992 992 fparent2 = fparent2o = manifest2.get(fname, nullid)
993 993
994 994 meta = {}
995 995 copy = fctx.renamed()
996 996 if copy and copy[0] != fname:
997 997 # Mark the new revision of this file as a copy of another
998 998 # file. This copy data will effectively act as a parent
999 999 # of this new revision. If this is a merge, the first
1000 1000 # parent will be the nullid (meaning "look up the copy data")
1001 1001 # and the second one will be the other parent. For example:
1002 1002 #
1003 1003 # 0 --- 1 --- 3 rev1 changes file foo
1004 1004 # \ / rev2 renames foo to bar and changes it
1005 1005 # \- 2 -/ rev3 should have bar with all changes and
1006 1006 # should record that bar descends from
1007 1007 # bar in rev2 and foo in rev1
1008 1008 #
1009 1009 # this allows this merge to succeed:
1010 1010 #
1011 1011 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1012 1012 # \ / merging rev3 and rev4 should use bar@rev2
1013 1013 # \- 2 --- 4 as the merge base
1014 1014 #
1015 1015
1016 1016 cfname = copy[0]
1017 1017 crev = manifest1.get(cfname)
1018 1018 newfparent = fparent2
1019 1019
1020 1020 if manifest2: # branch merge
1021 1021 if fparent2 == nullid or crev is None: # copied on remote side
1022 1022 if cfname in manifest2:
1023 1023 crev = manifest2[cfname]
1024 1024 newfparent = fparent1
1025 1025
1026 1026 # find source in nearest ancestor if we've lost track
1027 1027 if not crev:
1028 1028 self.ui.debug(" %s: searching for copy revision for %s\n" %
1029 1029 (fname, cfname))
1030 1030 for ancestor in self[None].ancestors():
1031 1031 if cfname in ancestor:
1032 1032 crev = ancestor[cfname].filenode()
1033 1033 break
1034 1034
1035 1035 if crev:
1036 1036 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1037 1037 meta["copy"] = cfname
1038 1038 meta["copyrev"] = hex(crev)
1039 1039 fparent1, fparent2 = nullid, newfparent
1040 1040 else:
1041 1041 self.ui.warn(_("warning: can't find ancestor for '%s' "
1042 1042 "copied from '%s'!\n") % (fname, cfname))
1043 1043
1044 1044 elif fparent2 != nullid:
1045 1045 # is one parent an ancestor of the other?
1046 1046 fparentancestor = flog.ancestor(fparent1, fparent2)
1047 1047 if fparentancestor == fparent1:
1048 1048 fparent1, fparent2 = fparent2, nullid
1049 1049 elif fparentancestor == fparent2:
1050 1050 fparent2 = nullid
1051 1051
1052 1052 # is the file changed?
1053 1053 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1054 1054 changelist.append(fname)
1055 1055 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1056 1056
1057 1057 # are just the flags changed during merge?
1058 1058 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1059 1059 changelist.append(fname)
1060 1060
1061 1061 return fparent1
1062 1062
1063 1063 def commit(self, text="", user=None, date=None, match=None, force=False,
1064 1064 editor=False, extra={}):
1065 1065 """Add a new revision to current repository.
1066 1066
1067 1067 Revision information is gathered from the working directory,
1068 1068 match can be used to filter the committed files. If editor is
1069 1069 supplied, it is called to get a commit message.
1070 1070 """
1071 1071
1072 1072 def fail(f, msg):
1073 1073 raise util.Abort('%s: %s' % (f, msg))
1074 1074
1075 1075 if not match:
1076 1076 match = matchmod.always(self.root, '')
1077 1077
1078 1078 if not force:
1079 1079 vdirs = []
1080 1080 match.dir = vdirs.append
1081 1081 match.bad = fail
1082 1082
1083 1083 wlock = self.wlock()
1084 1084 try:
1085 1085 wctx = self[None]
1086 1086 merge = len(wctx.parents()) > 1
1087 1087
1088 1088 if (not force and merge and match and
1089 1089 (match.files() or match.anypats())):
1090 1090 raise util.Abort(_('cannot partially commit a merge '
1091 1091 '(do not specify files or patterns)'))
1092 1092
1093 1093 changes = self.status(match=match, clean=force)
1094 1094 if force:
1095 1095 changes[0].extend(changes[6]) # mq may commit unchanged files
1096 1096
1097 1097 # check subrepos
1098 1098 subs = []
1099 1099 removedsubs = set()
1100 1100 if '.hgsub' in wctx:
1101 1101 # only manage subrepos and .hgsubstate if .hgsub is present
1102 1102 for p in wctx.parents():
1103 1103 removedsubs.update(s for s in p.substate if match(s))
1104 1104 for s in wctx.substate:
1105 1105 removedsubs.discard(s)
1106 1106 if match(s) and wctx.sub(s).dirty():
1107 1107 subs.append(s)
1108 1108 if (subs or removedsubs):
1109 1109 if (not match('.hgsub') and
1110 1110 '.hgsub' in (wctx.modified() + wctx.added())):
1111 1111 raise util.Abort(
1112 1112 _("can't commit subrepos without .hgsub"))
1113 1113 if '.hgsubstate' not in changes[0]:
1114 1114 changes[0].insert(0, '.hgsubstate')
1115 1115 if '.hgsubstate' in changes[2]:
1116 1116 changes[2].remove('.hgsubstate')
1117 1117 elif '.hgsub' in changes[2]:
1118 1118 # clean up .hgsubstate when .hgsub is removed
1119 1119 if ('.hgsubstate' in wctx and
1120 1120 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1121 1121 changes[2].insert(0, '.hgsubstate')
1122 1122
1123 1123 if subs and not self.ui.configbool('ui', 'commitsubrepos', False):
1124 1124 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1125 1125 if changedsubs:
1126 1126 raise util.Abort(_("uncommitted changes in subrepo %s")
1127 1127 % changedsubs[0],
1128 1128 hint=_("use --subrepos for recursive commit"))
1129 1129
1130 1130 # make sure all explicit patterns are matched
1131 1131 if not force and match.files():
1132 1132 matched = set(changes[0] + changes[1] + changes[2])
1133 1133
1134 1134 for f in match.files():
1135 1135 if f == '.' or f in matched or f in wctx.substate:
1136 1136 continue
1137 1137 if f in changes[3]: # missing
1138 1138 fail(f, _('file not found!'))
1139 1139 if f in vdirs: # visited directory
1140 1140 d = f + '/'
1141 1141 for mf in matched:
1142 1142 if mf.startswith(d):
1143 1143 break
1144 1144 else:
1145 1145 fail(f, _("no match under directory!"))
1146 1146 elif f not in self.dirstate:
1147 1147 fail(f, _("file not tracked!"))
1148 1148
1149 1149 if (not force and not extra.get("close") and not merge
1150 1150 and not (changes[0] or changes[1] or changes[2])
1151 1151 and wctx.branch() == wctx.p1().branch()):
1152 1152 return None
1153 1153
1154 1154 ms = mergemod.mergestate(self)
1155 1155 for f in changes[0]:
1156 1156 if f in ms and ms[f] == 'u':
1157 1157 raise util.Abort(_("unresolved merge conflicts "
1158 1158 "(see hg help resolve)"))
1159 1159
1160 1160 cctx = context.workingctx(self, text, user, date, extra, changes)
1161 1161 if editor:
1162 1162 cctx._text = editor(self, cctx, subs)
1163 1163 edited = (text != cctx._text)
1164 1164
1165 1165 # commit subs
1166 1166 if subs or removedsubs:
1167 1167 state = wctx.substate.copy()
1168 1168 for s in sorted(subs):
1169 1169 sub = wctx.sub(s)
1170 1170 self.ui.status(_('committing subrepository %s\n') %
1171 1171 subrepo.subrelpath(sub))
1172 1172 sr = sub.commit(cctx._text, user, date)
1173 1173 state[s] = (state[s][0], sr)
1174 1174 subrepo.writestate(self, state)
1175 1175
1176 1176 # Save commit message in case this transaction gets rolled back
1177 1177 # (e.g. by a pretxncommit hook). Leave the content alone on
1178 1178 # the assumption that the user will use the same editor again.
1179 1179 msgfn = self.savecommitmessage(cctx._text)
1180 1180
1181 1181 p1, p2 = self.dirstate.parents()
1182 1182 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1183 1183 try:
1184 1184 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1185 1185 ret = self.commitctx(cctx, True)
1186 1186 except:
1187 1187 if edited:
1188 1188 self.ui.write(
1189 1189 _('note: commit message saved in %s\n') % msgfn)
1190 1190 raise
1191 1191
1192 1192 # update bookmarks, dirstate and mergestate
1193 1193 bookmarks.update(self, p1, ret)
1194 1194 for f in changes[0] + changes[1]:
1195 1195 self.dirstate.normal(f)
1196 1196 for f in changes[2]:
1197 1197 self.dirstate.drop(f)
1198 1198 self.dirstate.setparents(ret)
1199 1199 ms.reset()
1200 1200 finally:
1201 1201 wlock.release()
1202 1202
1203 1203 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1204 1204 return ret
1205 1205
1206 1206 def commitctx(self, ctx, error=False):
1207 1207 """Add a new revision to current repository.
1208 1208 Revision information is passed via the context argument.
1209 1209 """
1210 1210
1211 1211 tr = lock = None
1212 1212 removed = list(ctx.removed())
1213 1213 p1, p2 = ctx.p1(), ctx.p2()
1214 1214 user = ctx.user()
1215 1215
1216 1216 lock = self.lock()
1217 1217 try:
1218 1218 tr = self.transaction("commit")
1219 1219 trp = weakref.proxy(tr)
1220 1220
1221 1221 if ctx.files():
1222 1222 m1 = p1.manifest().copy()
1223 1223 m2 = p2.manifest()
1224 1224
1225 1225 # check in files
1226 1226 new = {}
1227 1227 changed = []
1228 1228 linkrev = len(self)
1229 1229 for f in sorted(ctx.modified() + ctx.added()):
1230 1230 self.ui.note(f + "\n")
1231 1231 try:
1232 1232 fctx = ctx[f]
1233 1233 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1234 1234 changed)
1235 1235 m1.set(f, fctx.flags())
1236 1236 except OSError, inst:
1237 1237 self.ui.warn(_("trouble committing %s!\n") % f)
1238 1238 raise
1239 1239 except IOError, inst:
1240 1240 errcode = getattr(inst, 'errno', errno.ENOENT)
1241 1241 if error or errcode and errcode != errno.ENOENT:
1242 1242 self.ui.warn(_("trouble committing %s!\n") % f)
1243 1243 raise
1244 1244 else:
1245 1245 removed.append(f)
1246 1246
1247 1247 # update manifest
1248 1248 m1.update(new)
1249 1249 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1250 1250 drop = [f for f in removed if f in m1]
1251 1251 for f in drop:
1252 1252 del m1[f]
1253 1253 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1254 1254 p2.manifestnode(), (new, drop))
1255 1255 files = changed + removed
1256 1256 else:
1257 1257 mn = p1.manifestnode()
1258 1258 files = []
1259 1259
1260 1260 # update changelog
1261 1261 self.changelog.delayupdate()
1262 1262 n = self.changelog.add(mn, files, ctx.description(),
1263 1263 trp, p1.node(), p2.node(),
1264 1264 user, ctx.date(), ctx.extra().copy())
1265 1265 p = lambda: self.changelog.writepending() and self.root or ""
1266 1266 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1267 1267 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1268 1268 parent2=xp2, pending=p)
1269 1269 self.changelog.finalize(trp)
1270 1270 # set the new commit is proper phase
1271 1271 targetphase = self.ui.configint('phases', 'new-commit',
1272 1272 phases.draft)
1273 1273 if targetphase:
1274 1274 # retract boundary do not alter parent changeset.
1275 1275 # if a parent have higher the resulting phase will
1276 1276 # be compliant anyway
1277 1277 #
1278 1278 # if minimal phase was 0 we don't need to retract anything
1279 1279 phases.retractboundary(self, targetphase, [n])
1280 1280 tr.close()
1281 1281 self.updatebranchcache()
1282 1282 return n
1283 1283 finally:
1284 1284 if tr:
1285 1285 tr.release()
1286 1286 lock.release()
1287 1287
1288 1288 def destroyed(self):
1289 1289 '''Inform the repository that nodes have been destroyed.
1290 1290 Intended for use by strip and rollback, so there's a common
1291 1291 place for anything that has to be done after destroying history.'''
1292 1292 # XXX it might be nice if we could take the list of destroyed
1293 1293 # nodes, but I don't see an easy way for rollback() to do that
1294 1294
1295 1295 # Ensure the persistent tag cache is updated. Doing it now
1296 1296 # means that the tag cache only has to worry about destroyed
1297 1297 # heads immediately after a strip/rollback. That in turn
1298 1298 # guarantees that "cachetip == currenttip" (comparing both rev
1299 1299 # and node) always means no nodes have been added or destroyed.
1300 1300
1301 1301 # XXX this is suboptimal when qrefresh'ing: we strip the current
1302 1302 # head, refresh the tag cache, then immediately add a new head.
1303 1303 # But I think doing it this way is necessary for the "instant
1304 1304 # tag cache retrieval" case to work.
1305 1305 self.invalidatecaches()
1306 1306
1307 1307 def walk(self, match, node=None):
1308 1308 '''
1309 1309 walk recursively through the directory tree or a given
1310 1310 changeset, finding all files matched by the match
1311 1311 function
1312 1312 '''
1313 1313 return self[node].walk(match)
1314 1314
1315 1315 def status(self, node1='.', node2=None, match=None,
1316 1316 ignored=False, clean=False, unknown=False,
1317 1317 listsubrepos=False):
1318 1318 """return status of files between two nodes or node and working directory
1319 1319
1320 1320 If node1 is None, use the first dirstate parent instead.
1321 1321 If node2 is None, compare node1 with working directory.
1322 1322 """
1323 1323
1324 1324 def mfmatches(ctx):
1325 1325 mf = ctx.manifest().copy()
1326 1326 for fn in mf.keys():
1327 1327 if not match(fn):
1328 1328 del mf[fn]
1329 1329 return mf
1330 1330
1331 1331 if isinstance(node1, context.changectx):
1332 1332 ctx1 = node1
1333 1333 else:
1334 1334 ctx1 = self[node1]
1335 1335 if isinstance(node2, context.changectx):
1336 1336 ctx2 = node2
1337 1337 else:
1338 1338 ctx2 = self[node2]
1339 1339
1340 1340 working = ctx2.rev() is None
1341 1341 parentworking = working and ctx1 == self['.']
1342 1342 match = match or matchmod.always(self.root, self.getcwd())
1343 1343 listignored, listclean, listunknown = ignored, clean, unknown
1344 1344
1345 1345 # load earliest manifest first for caching reasons
1346 1346 if not working and ctx2.rev() < ctx1.rev():
1347 1347 ctx2.manifest()
1348 1348
1349 1349 if not parentworking:
1350 1350 def bad(f, msg):
1351 1351 if f not in ctx1:
1352 1352 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1353 1353 match.bad = bad
1354 1354
1355 1355 if working: # we need to scan the working dir
1356 1356 subrepos = []
1357 1357 if '.hgsub' in self.dirstate:
1358 1358 subrepos = ctx2.substate.keys()
1359 1359 s = self.dirstate.status(match, subrepos, listignored,
1360 1360 listclean, listunknown)
1361 1361 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1362 1362
1363 1363 # check for any possibly clean files
1364 1364 if parentworking and cmp:
1365 1365 fixup = []
1366 1366 # do a full compare of any files that might have changed
1367 1367 for f in sorted(cmp):
1368 1368 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1369 1369 or ctx1[f].cmp(ctx2[f])):
1370 1370 modified.append(f)
1371 1371 else:
1372 1372 fixup.append(f)
1373 1373
1374 1374 # update dirstate for files that are actually clean
1375 1375 if fixup:
1376 1376 if listclean:
1377 1377 clean += fixup
1378 1378
1379 1379 try:
1380 1380 # updating the dirstate is optional
1381 1381 # so we don't wait on the lock
1382 1382 wlock = self.wlock(False)
1383 1383 try:
1384 1384 for f in fixup:
1385 1385 self.dirstate.normal(f)
1386 1386 finally:
1387 1387 wlock.release()
1388 1388 except error.LockError:
1389 1389 pass
1390 1390
1391 1391 if not parentworking:
1392 1392 mf1 = mfmatches(ctx1)
1393 1393 if working:
1394 1394 # we are comparing working dir against non-parent
1395 1395 # generate a pseudo-manifest for the working dir
1396 1396 mf2 = mfmatches(self['.'])
1397 1397 for f in cmp + modified + added:
1398 1398 mf2[f] = None
1399 1399 mf2.set(f, ctx2.flags(f))
1400 1400 for f in removed:
1401 1401 if f in mf2:
1402 1402 del mf2[f]
1403 1403 else:
1404 1404 # we are comparing two revisions
1405 1405 deleted, unknown, ignored = [], [], []
1406 1406 mf2 = mfmatches(ctx2)
1407 1407
1408 1408 modified, added, clean = [], [], []
1409 1409 for fn in mf2:
1410 1410 if fn in mf1:
1411 1411 if (fn not in deleted and
1412 1412 (mf1.flags(fn) != mf2.flags(fn) or
1413 1413 (mf1[fn] != mf2[fn] and
1414 1414 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1415 1415 modified.append(fn)
1416 1416 elif listclean:
1417 1417 clean.append(fn)
1418 1418 del mf1[fn]
1419 1419 elif fn not in deleted:
1420 1420 added.append(fn)
1421 1421 removed = mf1.keys()
1422 1422
1423 1423 if working and modified and not self.dirstate._checklink:
1424 1424 # Symlink placeholders may get non-symlink-like contents
1425 1425 # via user error or dereferencing by NFS or Samba servers,
1426 1426 # so we filter out any placeholders that don't look like a
1427 1427 # symlink
1428 1428 sane = []
1429 1429 for f in modified:
1430 1430 if ctx2.flags(f) == 'l':
1431 1431 d = ctx2[f].data()
1432 1432 if len(d) >= 1024 or '\n' in d or util.binary(d):
1433 1433 self.ui.debug('ignoring suspect symlink placeholder'
1434 1434 ' "%s"\n' % f)
1435 1435 continue
1436 1436 sane.append(f)
1437 1437 modified = sane
1438 1438
1439 1439 r = modified, added, removed, deleted, unknown, ignored, clean
1440 1440
1441 1441 if listsubrepos:
1442 1442 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1443 1443 if working:
1444 1444 rev2 = None
1445 1445 else:
1446 1446 rev2 = ctx2.substate[subpath][1]
1447 1447 try:
1448 1448 submatch = matchmod.narrowmatcher(subpath, match)
1449 1449 s = sub.status(rev2, match=submatch, ignored=listignored,
1450 1450 clean=listclean, unknown=listunknown,
1451 1451 listsubrepos=True)
1452 1452 for rfiles, sfiles in zip(r, s):
1453 1453 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1454 1454 except error.LookupError:
1455 1455 self.ui.status(_("skipping missing subrepository: %s\n")
1456 1456 % subpath)
1457 1457
1458 1458 for l in r:
1459 1459 l.sort()
1460 1460 return r
1461 1461
1462 1462 def heads(self, start=None):
1463 1463 heads = self.changelog.heads(start)
1464 1464 # sort the output in rev descending order
1465 1465 return sorted(heads, key=self.changelog.rev, reverse=True)
1466 1466
1467 1467 def branchheads(self, branch=None, start=None, closed=False):
1468 1468 '''return a (possibly filtered) list of heads for the given branch
1469 1469
1470 1470 Heads are returned in topological order, from newest to oldest.
1471 1471 If branch is None, use the dirstate branch.
1472 1472 If start is not None, return only heads reachable from start.
1473 1473 If closed is True, return heads that are marked as closed as well.
1474 1474 '''
1475 1475 if branch is None:
1476 1476 branch = self[None].branch()
1477 1477 branches = self.branchmap()
1478 1478 if branch not in branches:
1479 1479 return []
1480 1480 # the cache returns heads ordered lowest to highest
1481 1481 bheads = list(reversed(branches[branch]))
1482 1482 if start is not None:
1483 1483 # filter out the heads that cannot be reached from startrev
1484 1484 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1485 1485 bheads = [h for h in bheads if h in fbheads]
1486 1486 if not closed:
1487 1487 bheads = [h for h in bheads if
1488 1488 ('close' not in self.changelog.read(h)[5])]
1489 1489 return bheads
1490 1490
1491 1491 def branches(self, nodes):
1492 1492 if not nodes:
1493 1493 nodes = [self.changelog.tip()]
1494 1494 b = []
1495 1495 for n in nodes:
1496 1496 t = n
1497 1497 while True:
1498 1498 p = self.changelog.parents(n)
1499 1499 if p[1] != nullid or p[0] == nullid:
1500 1500 b.append((t, n, p[0], p[1]))
1501 1501 break
1502 1502 n = p[0]
1503 1503 return b
1504 1504
1505 1505 def between(self, pairs):
1506 1506 r = []
1507 1507
1508 1508 for top, bottom in pairs:
1509 1509 n, l, i = top, [], 0
1510 1510 f = 1
1511 1511
1512 1512 while n != bottom and n != nullid:
1513 1513 p = self.changelog.parents(n)[0]
1514 1514 if i == f:
1515 1515 l.append(n)
1516 1516 f = f * 2
1517 1517 n = p
1518 1518 i += 1
1519 1519
1520 1520 r.append(l)
1521 1521
1522 1522 return r
1523 1523
1524 1524 def pull(self, remote, heads=None, force=False):
1525 1525 lock = self.lock()
1526 1526 try:
1527 1527 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1528 1528 force=force)
1529 1529 common, fetch, rheads = tmp
1530 1530 if not fetch:
1531 1531 self.ui.status(_("no changes found\n"))
1532 1532 added = []
1533 1533 result = 0
1534 1534 else:
1535 1535 if heads is None and list(common) == [nullid]:
1536 1536 self.ui.status(_("requesting all changes\n"))
1537 1537 elif heads is None and remote.capable('changegroupsubset'):
1538 1538 # issue1320, avoid a race if remote changed after discovery
1539 1539 heads = rheads
1540 1540
1541 1541 if remote.capable('getbundle'):
1542 1542 cg = remote.getbundle('pull', common=common,
1543 1543 heads=heads or rheads)
1544 1544 elif heads is None:
1545 1545 cg = remote.changegroup(fetch, 'pull')
1546 1546 elif not remote.capable('changegroupsubset'):
1547 1547 raise util.Abort(_("partial pull cannot be done because "
1548 1548 "other repository doesn't support "
1549 1549 "changegroupsubset."))
1550 1550 else:
1551 1551 cg = remote.changegroupsubset(fetch, heads, 'pull')
1552 1552 clstart = len(self.changelog)
1553 1553 result = self.addchangegroup(cg, 'pull', remote.url())
1554 1554 clend = len(self.changelog)
1555 1555 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1556 1556
1557 # compute target subset
1558 if heads is None:
1559 # We pulled every thing possible
1560 # sync on everything common
1561 subset = common + added
1562 else:
1563 # We pulled a specific subset
1564 # sync on this subset
1565 subset = heads
1557 1566
1558 1567 # Get remote phases data from remote
1559 1568 remotephases = remote.listkeys('phases')
1560 1569 publishing = bool(remotephases.get('publishing', False))
1561 1570 if remotephases and not publishing:
1562 1571 # remote is new and unpublishing
1563 subset = common + added
1564 1572 pheads, _dr = phases.analyzeremotephases(self, subset,
1565 1573 remotephases)
1566 1574 phases.advanceboundary(self, phases.public, pheads)
1567 phases.advanceboundary(self, phases.draft, common + added)
1575 phases.advanceboundary(self, phases.draft, subset)
1568 1576 else:
1569 1577 # Remote is old or publishing all common changesets
1570 1578 # should be seen as public
1571 phases.advanceboundary(self, phases.public, common + added)
1579 phases.advanceboundary(self, phases.public, subset)
1572 1580 finally:
1573 1581 lock.release()
1574 1582
1575 1583 return result
1576 1584
1577 1585 def checkpush(self, force, revs):
1578 1586 """Extensions can override this function if additional checks have
1579 1587 to be performed before pushing, or call it if they override push
1580 1588 command.
1581 1589 """
1582 1590 pass
1583 1591
1584 1592 def push(self, remote, force=False, revs=None, newbranch=False):
1585 1593 '''Push outgoing changesets (limited by revs) from the current
1586 1594 repository to remote. Return an integer:
1587 1595 - 0 means HTTP error *or* nothing to push
1588 1596 - 1 means we pushed and remote head count is unchanged *or*
1589 1597 we have outgoing changesets but refused to push
1590 1598 - other values as described by addchangegroup()
1591 1599 '''
1592 1600 # there are two ways to push to remote repo:
1593 1601 #
1594 1602 # addchangegroup assumes local user can lock remote
1595 1603 # repo (local filesystem, old ssh servers).
1596 1604 #
1597 1605 # unbundle assumes local user cannot lock remote repo (new ssh
1598 1606 # servers, http servers).
1599 1607
1600 1608 # get local lock as we might write phase data
1601 1609 locallock = self.lock()
1602 1610 try:
1603 1611 self.checkpush(force, revs)
1604 1612 lock = None
1605 1613 unbundle = remote.capable('unbundle')
1606 1614 if not unbundle:
1607 1615 lock = remote.lock()
1608 1616 try:
1609 1617 # discovery
1610 1618 fci = discovery.findcommonincoming
1611 1619 commoninc = fci(self, remote, force=force)
1612 1620 common, inc, remoteheads = commoninc
1613 1621 fco = discovery.findcommonoutgoing
1614 1622 outgoing = fco(self, remote, onlyheads=revs,
1615 1623 commoninc=commoninc, force=force)
1616 1624
1617 1625
1618 1626 if not outgoing.missing:
1619 1627 # nothing to push
1620 1628 if outgoing.excluded:
1621 1629 msg = "no changes to push but %i secret changesets\n"
1622 1630 self.ui.status(_(msg) % len(outgoing.excluded))
1623 1631 else:
1624 1632 self.ui.status(_("no changes found\n"))
1625 1633 ret = 1
1626 1634 else:
1627 1635 # something to push
1628 1636 if not force:
1629 1637 discovery.checkheads(self, remote, outgoing,
1630 1638 remoteheads, newbranch)
1631 1639
1632 1640 # create a changegroup from local
1633 1641 if revs is None and not outgoing.excluded:
1634 1642 # push everything,
1635 1643 # use the fast path, no race possible on push
1636 1644 cg = self._changegroup(outgoing.missing, 'push')
1637 1645 else:
1638 1646 cg = self.getlocalbundle('push', outgoing)
1639 1647
1640 1648 # apply changegroup to remote
1641 1649 if unbundle:
1642 1650 # local repo finds heads on server, finds out what
1643 1651 # revs it must push. once revs transferred, if server
1644 1652 # finds it has different heads (someone else won
1645 1653 # commit/push race), server aborts.
1646 1654 if force:
1647 1655 remoteheads = ['force']
1648 1656 # ssh: return remote's addchangegroup()
1649 1657 # http: return remote's addchangegroup() or 0 for error
1650 1658 ret = remote.unbundle(cg, remoteheads, 'push')
1651 1659 else:
1652 1660 # we return an integer indicating remote head count change
1653 1661 ret = remote.addchangegroup(cg, 'push', self.url())
1654 1662
1655 cheads = outgoing.commonheads[:]
1656 1663 if ret:
1657 # push succeed, synchonize common + pushed
1658 # this is a no-op if there was nothing to push
1659 cheads += outgoing.missingheads
1664 # push succeed, synchonize target of the push
1665 cheads = outgoing.missingheads
1666 elif revs is None:
1667 # All out push fails. synchronize all common
1668 cheads = outgoing.commonheads
1669 else:
1670 # I want cheads = heads(::missingheads and ::commonheads)
1671 # (missingheads is revs with secret changeset filtered out)
1672 #
1673 # This can be expressed as:
1674 # cheads = ( (missingheads and ::commonheads)
1675 # + (commonheads and ::missingheads))"
1676 # )
1677 #
1678 # while trying to push we already computed the following:
1679 # common = (::commonheads)
1680 # missing = ((commonheads::missingheads) - commonheads)
1681 #
1682 # We can pick:
1683 # * missingheads part of comon (::commonheads)
1684 common = set(outgoing.common)
1685 cheads = [n for node in revs if n in common]
1686 # and
1687 # * commonheads parents on missing
1688 rvset = repo.revset('%ln and parents(roots(%ln))',
1689 outgoing.commonheads,
1690 outgoing.missing)
1691 cheads.extend(c.node() for c in rvset)
1660 1692 # even when we don't push, exchanging phase data is useful
1661 1693 remotephases = remote.listkeys('phases')
1662 1694 if not remotephases: # old server or public only repo
1663 1695 phases.advanceboundary(self, phases.public, cheads)
1664 1696 # don't push any phase data as there is nothing to push
1665 1697 else:
1666 1698 ana = phases.analyzeremotephases(self, cheads, remotephases)
1667 1699 pheads, droots = ana
1668 1700 ### Apply remote phase on local
1669 1701 if remotephases.get('publishing', False):
1670 1702 phases.advanceboundary(self, phases.public, cheads)
1671 1703 else: # publish = False
1672 1704 phases.advanceboundary(self, phases.public, pheads)
1673 1705 phases.advanceboundary(self, phases.draft, cheads)
1674 1706 ### Apply local phase on remote
1675 1707
1676 1708 # Get the list of all revs draft on remote by public here.
1677 1709 # XXX Beware that revset break if droots is not strictly
1678 1710 # XXX root we may want to ensure it is but it is costly
1679 1711 outdated = self.set('heads((%ln::%ln) and public())',
1680 1712 droots, cheads)
1681 1713 for newremotehead in outdated:
1682 1714 r = remote.pushkey('phases',
1683 1715 newremotehead.hex(),
1684 1716 str(phases.draft),
1685 1717 str(phases.public))
1686 1718 if not r:
1687 1719 self.ui.warn(_('updating %s to public failed!\n')
1688 1720 % newremotehead)
1689 1721 finally:
1690 1722 if lock is not None:
1691 1723 lock.release()
1692 1724 finally:
1693 1725 locallock.release()
1694 1726
1695 1727 self.ui.debug("checking for updated bookmarks\n")
1696 1728 rb = remote.listkeys('bookmarks')
1697 1729 for k in rb.keys():
1698 1730 if k in self._bookmarks:
1699 1731 nr, nl = rb[k], hex(self._bookmarks[k])
1700 1732 if nr in self:
1701 1733 cr = self[nr]
1702 1734 cl = self[nl]
1703 1735 if cl in cr.descendants():
1704 1736 r = remote.pushkey('bookmarks', k, nr, nl)
1705 1737 if r:
1706 1738 self.ui.status(_("updating bookmark %s\n") % k)
1707 1739 else:
1708 1740 self.ui.warn(_('updating bookmark %s'
1709 1741 ' failed!\n') % k)
1710 1742
1711 1743 return ret
1712 1744
1713 1745 def changegroupinfo(self, nodes, source):
1714 1746 if self.ui.verbose or source == 'bundle':
1715 1747 self.ui.status(_("%d changesets found\n") % len(nodes))
1716 1748 if self.ui.debugflag:
1717 1749 self.ui.debug("list of changesets:\n")
1718 1750 for node in nodes:
1719 1751 self.ui.debug("%s\n" % hex(node))
1720 1752
1721 1753 def changegroupsubset(self, bases, heads, source):
1722 1754 """Compute a changegroup consisting of all the nodes that are
1723 1755 descendants of any of the bases and ancestors of any of the heads.
1724 1756 Return a chunkbuffer object whose read() method will return
1725 1757 successive changegroup chunks.
1726 1758
1727 1759 It is fairly complex as determining which filenodes and which
1728 1760 manifest nodes need to be included for the changeset to be complete
1729 1761 is non-trivial.
1730 1762
1731 1763 Another wrinkle is doing the reverse, figuring out which changeset in
1732 1764 the changegroup a particular filenode or manifestnode belongs to.
1733 1765 """
1734 1766 cl = self.changelog
1735 1767 if not bases:
1736 1768 bases = [nullid]
1737 1769 csets, bases, heads = cl.nodesbetween(bases, heads)
1738 1770 # We assume that all ancestors of bases are known
1739 1771 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1740 1772 return self._changegroupsubset(common, csets, heads, source)
1741 1773
1742 1774 def getlocalbundle(self, source, outgoing):
1743 1775 """Like getbundle, but taking a discovery.outgoing as an argument.
1744 1776
1745 1777 This is only implemented for local repos and reuses potentially
1746 1778 precomputed sets in outgoing."""
1747 1779 if not outgoing.missing:
1748 1780 return None
1749 1781 return self._changegroupsubset(outgoing.common,
1750 1782 outgoing.missing,
1751 1783 outgoing.missingheads,
1752 1784 source)
1753 1785
1754 1786 def getbundle(self, source, heads=None, common=None):
1755 1787 """Like changegroupsubset, but returns the set difference between the
1756 1788 ancestors of heads and the ancestors common.
1757 1789
1758 1790 If heads is None, use the local heads. If common is None, use [nullid].
1759 1791
1760 1792 The nodes in common might not all be known locally due to the way the
1761 1793 current discovery protocol works.
1762 1794 """
1763 1795 cl = self.changelog
1764 1796 if common:
1765 1797 nm = cl.nodemap
1766 1798 common = [n for n in common if n in nm]
1767 1799 else:
1768 1800 common = [nullid]
1769 1801 if not heads:
1770 1802 heads = cl.heads()
1771 1803 return self.getlocalbundle(source,
1772 1804 discovery.outgoing(cl, common, heads))
1773 1805
1774 1806 def _changegroupsubset(self, commonrevs, csets, heads, source):
1775 1807
1776 1808 cl = self.changelog
1777 1809 mf = self.manifest
1778 1810 mfs = {} # needed manifests
1779 1811 fnodes = {} # needed file nodes
1780 1812 changedfiles = set()
1781 1813 fstate = ['', {}]
1782 1814 count = [0]
1783 1815
1784 1816 # can we go through the fast path ?
1785 1817 heads.sort()
1786 1818 if heads == sorted(self.heads()):
1787 1819 return self._changegroup(csets, source)
1788 1820
1789 1821 # slow path
1790 1822 self.hook('preoutgoing', throw=True, source=source)
1791 1823 self.changegroupinfo(csets, source)
1792 1824
1793 1825 # filter any nodes that claim to be part of the known set
1794 1826 def prune(revlog, missing):
1795 1827 return [n for n in missing
1796 1828 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1797 1829
1798 1830 def lookup(revlog, x):
1799 1831 if revlog == cl:
1800 1832 c = cl.read(x)
1801 1833 changedfiles.update(c[3])
1802 1834 mfs.setdefault(c[0], x)
1803 1835 count[0] += 1
1804 1836 self.ui.progress(_('bundling'), count[0],
1805 1837 unit=_('changesets'), total=len(csets))
1806 1838 return x
1807 1839 elif revlog == mf:
1808 1840 clnode = mfs[x]
1809 1841 mdata = mf.readfast(x)
1810 1842 for f in changedfiles:
1811 1843 if f in mdata:
1812 1844 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1813 1845 count[0] += 1
1814 1846 self.ui.progress(_('bundling'), count[0],
1815 1847 unit=_('manifests'), total=len(mfs))
1816 1848 return mfs[x]
1817 1849 else:
1818 1850 self.ui.progress(
1819 1851 _('bundling'), count[0], item=fstate[0],
1820 1852 unit=_('files'), total=len(changedfiles))
1821 1853 return fstate[1][x]
1822 1854
1823 1855 bundler = changegroup.bundle10(lookup)
1824 1856 reorder = self.ui.config('bundle', 'reorder', 'auto')
1825 1857 if reorder == 'auto':
1826 1858 reorder = None
1827 1859 else:
1828 1860 reorder = util.parsebool(reorder)
1829 1861
1830 1862 def gengroup():
1831 1863 # Create a changenode group generator that will call our functions
1832 1864 # back to lookup the owning changenode and collect information.
1833 1865 for chunk in cl.group(csets, bundler, reorder=reorder):
1834 1866 yield chunk
1835 1867 self.ui.progress(_('bundling'), None)
1836 1868
1837 1869 # Create a generator for the manifestnodes that calls our lookup
1838 1870 # and data collection functions back.
1839 1871 count[0] = 0
1840 1872 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1841 1873 yield chunk
1842 1874 self.ui.progress(_('bundling'), None)
1843 1875
1844 1876 mfs.clear()
1845 1877
1846 1878 # Go through all our files in order sorted by name.
1847 1879 count[0] = 0
1848 1880 for fname in sorted(changedfiles):
1849 1881 filerevlog = self.file(fname)
1850 1882 if not len(filerevlog):
1851 1883 raise util.Abort(_("empty or missing revlog for %s") % fname)
1852 1884 fstate[0] = fname
1853 1885 fstate[1] = fnodes.pop(fname, {})
1854 1886
1855 1887 nodelist = prune(filerevlog, fstate[1])
1856 1888 if nodelist:
1857 1889 count[0] += 1
1858 1890 yield bundler.fileheader(fname)
1859 1891 for chunk in filerevlog.group(nodelist, bundler, reorder):
1860 1892 yield chunk
1861 1893
1862 1894 # Signal that no more groups are left.
1863 1895 yield bundler.close()
1864 1896 self.ui.progress(_('bundling'), None)
1865 1897
1866 1898 if csets:
1867 1899 self.hook('outgoing', node=hex(csets[0]), source=source)
1868 1900
1869 1901 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1870 1902
1871 1903 def changegroup(self, basenodes, source):
1872 1904 # to avoid a race we use changegroupsubset() (issue1320)
1873 1905 return self.changegroupsubset(basenodes, self.heads(), source)
1874 1906
1875 1907 def _changegroup(self, nodes, source):
1876 1908 """Compute the changegroup of all nodes that we have that a recipient
1877 1909 doesn't. Return a chunkbuffer object whose read() method will return
1878 1910 successive changegroup chunks.
1879 1911
1880 1912 This is much easier than the previous function as we can assume that
1881 1913 the recipient has any changenode we aren't sending them.
1882 1914
1883 1915 nodes is the set of nodes to send"""
1884 1916
1885 1917 cl = self.changelog
1886 1918 mf = self.manifest
1887 1919 mfs = {}
1888 1920 changedfiles = set()
1889 1921 fstate = ['']
1890 1922 count = [0]
1891 1923
1892 1924 self.hook('preoutgoing', throw=True, source=source)
1893 1925 self.changegroupinfo(nodes, source)
1894 1926
1895 1927 revset = set([cl.rev(n) for n in nodes])
1896 1928
1897 1929 def gennodelst(log):
1898 1930 return [log.node(r) for r in log if log.linkrev(r) in revset]
1899 1931
1900 1932 def lookup(revlog, x):
1901 1933 if revlog == cl:
1902 1934 c = cl.read(x)
1903 1935 changedfiles.update(c[3])
1904 1936 mfs.setdefault(c[0], x)
1905 1937 count[0] += 1
1906 1938 self.ui.progress(_('bundling'), count[0],
1907 1939 unit=_('changesets'), total=len(nodes))
1908 1940 return x
1909 1941 elif revlog == mf:
1910 1942 count[0] += 1
1911 1943 self.ui.progress(_('bundling'), count[0],
1912 1944 unit=_('manifests'), total=len(mfs))
1913 1945 return cl.node(revlog.linkrev(revlog.rev(x)))
1914 1946 else:
1915 1947 self.ui.progress(
1916 1948 _('bundling'), count[0], item=fstate[0],
1917 1949 total=len(changedfiles), unit=_('files'))
1918 1950 return cl.node(revlog.linkrev(revlog.rev(x)))
1919 1951
1920 1952 bundler = changegroup.bundle10(lookup)
1921 1953 reorder = self.ui.config('bundle', 'reorder', 'auto')
1922 1954 if reorder == 'auto':
1923 1955 reorder = None
1924 1956 else:
1925 1957 reorder = util.parsebool(reorder)
1926 1958
1927 1959 def gengroup():
1928 1960 '''yield a sequence of changegroup chunks (strings)'''
1929 1961 # construct a list of all changed files
1930 1962
1931 1963 for chunk in cl.group(nodes, bundler, reorder=reorder):
1932 1964 yield chunk
1933 1965 self.ui.progress(_('bundling'), None)
1934 1966
1935 1967 count[0] = 0
1936 1968 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1937 1969 yield chunk
1938 1970 self.ui.progress(_('bundling'), None)
1939 1971
1940 1972 count[0] = 0
1941 1973 for fname in sorted(changedfiles):
1942 1974 filerevlog = self.file(fname)
1943 1975 if not len(filerevlog):
1944 1976 raise util.Abort(_("empty or missing revlog for %s") % fname)
1945 1977 fstate[0] = fname
1946 1978 nodelist = gennodelst(filerevlog)
1947 1979 if nodelist:
1948 1980 count[0] += 1
1949 1981 yield bundler.fileheader(fname)
1950 1982 for chunk in filerevlog.group(nodelist, bundler, reorder):
1951 1983 yield chunk
1952 1984 yield bundler.close()
1953 1985 self.ui.progress(_('bundling'), None)
1954 1986
1955 1987 if nodes:
1956 1988 self.hook('outgoing', node=hex(nodes[0]), source=source)
1957 1989
1958 1990 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1959 1991
1960 1992 def addchangegroup(self, source, srctype, url, emptyok=False):
1961 1993 """Add the changegroup returned by source.read() to this repo.
1962 1994 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1963 1995 the URL of the repo where this changegroup is coming from.
1964 1996
1965 1997 Return an integer summarizing the change to this repo:
1966 1998 - nothing changed or no source: 0
1967 1999 - more heads than before: 1+added heads (2..n)
1968 2000 - fewer heads than before: -1-removed heads (-2..-n)
1969 2001 - number of heads stays the same: 1
1970 2002 """
1971 2003 def csmap(x):
1972 2004 self.ui.debug("add changeset %s\n" % short(x))
1973 2005 return len(cl)
1974 2006
1975 2007 def revmap(x):
1976 2008 return cl.rev(x)
1977 2009
1978 2010 if not source:
1979 2011 return 0
1980 2012
1981 2013 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1982 2014
1983 2015 changesets = files = revisions = 0
1984 2016 efiles = set()
1985 2017
1986 2018 # write changelog data to temp files so concurrent readers will not see
1987 2019 # inconsistent view
1988 2020 cl = self.changelog
1989 2021 cl.delayupdate()
1990 2022 oldheads = cl.heads()
1991 2023
1992 2024 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1993 2025 try:
1994 2026 trp = weakref.proxy(tr)
1995 2027 # pull off the changeset group
1996 2028 self.ui.status(_("adding changesets\n"))
1997 2029 clstart = len(cl)
1998 2030 class prog(object):
1999 2031 step = _('changesets')
2000 2032 count = 1
2001 2033 ui = self.ui
2002 2034 total = None
2003 2035 def __call__(self):
2004 2036 self.ui.progress(self.step, self.count, unit=_('chunks'),
2005 2037 total=self.total)
2006 2038 self.count += 1
2007 2039 pr = prog()
2008 2040 source.callback = pr
2009 2041
2010 2042 source.changelogheader()
2011 2043 srccontent = cl.addgroup(source, csmap, trp)
2012 2044 if not (srccontent or emptyok):
2013 2045 raise util.Abort(_("received changelog group is empty"))
2014 2046 clend = len(cl)
2015 2047 changesets = clend - clstart
2016 2048 for c in xrange(clstart, clend):
2017 2049 efiles.update(self[c].files())
2018 2050 efiles = len(efiles)
2019 2051 self.ui.progress(_('changesets'), None)
2020 2052
2021 2053 # pull off the manifest group
2022 2054 self.ui.status(_("adding manifests\n"))
2023 2055 pr.step = _('manifests')
2024 2056 pr.count = 1
2025 2057 pr.total = changesets # manifests <= changesets
2026 2058 # no need to check for empty manifest group here:
2027 2059 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2028 2060 # no new manifest will be created and the manifest group will
2029 2061 # be empty during the pull
2030 2062 source.manifestheader()
2031 2063 self.manifest.addgroup(source, revmap, trp)
2032 2064 self.ui.progress(_('manifests'), None)
2033 2065
2034 2066 needfiles = {}
2035 2067 if self.ui.configbool('server', 'validate', default=False):
2036 2068 # validate incoming csets have their manifests
2037 2069 for cset in xrange(clstart, clend):
2038 2070 mfest = self.changelog.read(self.changelog.node(cset))[0]
2039 2071 mfest = self.manifest.readdelta(mfest)
2040 2072 # store file nodes we must see
2041 2073 for f, n in mfest.iteritems():
2042 2074 needfiles.setdefault(f, set()).add(n)
2043 2075
2044 2076 # process the files
2045 2077 self.ui.status(_("adding file changes\n"))
2046 2078 pr.step = _('files')
2047 2079 pr.count = 1
2048 2080 pr.total = efiles
2049 2081 source.callback = None
2050 2082
2051 2083 while True:
2052 2084 chunkdata = source.filelogheader()
2053 2085 if not chunkdata:
2054 2086 break
2055 2087 f = chunkdata["filename"]
2056 2088 self.ui.debug("adding %s revisions\n" % f)
2057 2089 pr()
2058 2090 fl = self.file(f)
2059 2091 o = len(fl)
2060 2092 if not fl.addgroup(source, revmap, trp):
2061 2093 raise util.Abort(_("received file revlog group is empty"))
2062 2094 revisions += len(fl) - o
2063 2095 files += 1
2064 2096 if f in needfiles:
2065 2097 needs = needfiles[f]
2066 2098 for new in xrange(o, len(fl)):
2067 2099 n = fl.node(new)
2068 2100 if n in needs:
2069 2101 needs.remove(n)
2070 2102 if not needs:
2071 2103 del needfiles[f]
2072 2104 self.ui.progress(_('files'), None)
2073 2105
2074 2106 for f, needs in needfiles.iteritems():
2075 2107 fl = self.file(f)
2076 2108 for n in needs:
2077 2109 try:
2078 2110 fl.rev(n)
2079 2111 except error.LookupError:
2080 2112 raise util.Abort(
2081 2113 _('missing file data for %s:%s - run hg verify') %
2082 2114 (f, hex(n)))
2083 2115
2084 2116 dh = 0
2085 2117 if oldheads:
2086 2118 heads = cl.heads()
2087 2119 dh = len(heads) - len(oldheads)
2088 2120 for h in heads:
2089 2121 if h not in oldheads and 'close' in self[h].extra():
2090 2122 dh -= 1
2091 2123 htext = ""
2092 2124 if dh:
2093 2125 htext = _(" (%+d heads)") % dh
2094 2126
2095 2127 self.ui.status(_("added %d changesets"
2096 2128 " with %d changes to %d files%s\n")
2097 2129 % (changesets, revisions, files, htext))
2098 2130
2099 2131 if changesets > 0:
2100 2132 p = lambda: cl.writepending() and self.root or ""
2101 2133 self.hook('pretxnchangegroup', throw=True,
2102 2134 node=hex(cl.node(clstart)), source=srctype,
2103 2135 url=url, pending=p)
2104 2136
2105 2137 added = [cl.node(r) for r in xrange(clstart, clend)]
2106 2138 publishing = self.ui.configbool('phases', 'publish', True)
2107 2139 if srctype == 'push':
2108 2140 # Old server can not push the boundary themself.
2109 2141 # New server won't push the boundary if changeset already
2110 2142 # existed locally as secrete
2111 2143 #
2112 2144 # We should not use added here but the list of all change in
2113 2145 # the bundle
2114 2146 if publishing:
2115 2147 phases.advanceboundary(self, phases.public, srccontent)
2116 2148 else:
2117 2149 phases.advanceboundary(self, phases.draft, srccontent)
2118 2150 phases.retractboundary(self, phases.draft, added)
2119 2151 elif srctype != 'strip':
2120 2152 # publishing only alter behavior during push
2121 2153 #
2122 2154 # strip should not touch boundary at all
2123 2155 phases.retractboundary(self, phases.draft, added)
2124 2156
2125 2157 # make changelog see real files again
2126 2158 cl.finalize(trp)
2127 2159
2128 2160 tr.close()
2129 2161
2130 2162 if changesets > 0:
2131 2163 def runhooks():
2132 2164 # forcefully update the on-disk branch cache
2133 2165 self.ui.debug("updating the branch cache\n")
2134 2166 self.updatebranchcache()
2135 2167 self.hook("changegroup", node=hex(cl.node(clstart)),
2136 2168 source=srctype, url=url)
2137 2169
2138 2170 for n in added:
2139 2171 self.hook("incoming", node=hex(n), source=srctype,
2140 2172 url=url)
2141 2173 self._afterlock(runhooks)
2142 2174
2143 2175 finally:
2144 2176 tr.release()
2145 2177 # never return 0 here:
2146 2178 if dh < 0:
2147 2179 return dh - 1
2148 2180 else:
2149 2181 return dh + 1
2150 2182
2151 2183 def stream_in(self, remote, requirements):
2152 2184 lock = self.lock()
2153 2185 try:
2154 2186 fp = remote.stream_out()
2155 2187 l = fp.readline()
2156 2188 try:
2157 2189 resp = int(l)
2158 2190 except ValueError:
2159 2191 raise error.ResponseError(
2160 2192 _('Unexpected response from remote server:'), l)
2161 2193 if resp == 1:
2162 2194 raise util.Abort(_('operation forbidden by server'))
2163 2195 elif resp == 2:
2164 2196 raise util.Abort(_('locking the remote repository failed'))
2165 2197 elif resp != 0:
2166 2198 raise util.Abort(_('the server sent an unknown error code'))
2167 2199 self.ui.status(_('streaming all changes\n'))
2168 2200 l = fp.readline()
2169 2201 try:
2170 2202 total_files, total_bytes = map(int, l.split(' ', 1))
2171 2203 except (ValueError, TypeError):
2172 2204 raise error.ResponseError(
2173 2205 _('Unexpected response from remote server:'), l)
2174 2206 self.ui.status(_('%d files to transfer, %s of data\n') %
2175 2207 (total_files, util.bytecount(total_bytes)))
2176 2208 start = time.time()
2177 2209 for i in xrange(total_files):
2178 2210 # XXX doesn't support '\n' or '\r' in filenames
2179 2211 l = fp.readline()
2180 2212 try:
2181 2213 name, size = l.split('\0', 1)
2182 2214 size = int(size)
2183 2215 except (ValueError, TypeError):
2184 2216 raise error.ResponseError(
2185 2217 _('Unexpected response from remote server:'), l)
2186 2218 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2187 2219 # for backwards compat, name was partially encoded
2188 2220 ofp = self.sopener(store.decodedir(name), 'w')
2189 2221 for chunk in util.filechunkiter(fp, limit=size):
2190 2222 ofp.write(chunk)
2191 2223 ofp.close()
2192 2224 elapsed = time.time() - start
2193 2225 if elapsed <= 0:
2194 2226 elapsed = 0.001
2195 2227 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2196 2228 (util.bytecount(total_bytes), elapsed,
2197 2229 util.bytecount(total_bytes / elapsed)))
2198 2230
2199 2231 # new requirements = old non-format requirements + new format-related
2200 2232 # requirements from the streamed-in repository
2201 2233 requirements.update(set(self.requirements) - self.supportedformats)
2202 2234 self._applyrequirements(requirements)
2203 2235 self._writerequirements()
2204 2236
2205 2237 self.invalidate()
2206 2238 return len(self.heads()) + 1
2207 2239 finally:
2208 2240 lock.release()
2209 2241
2210 2242 def clone(self, remote, heads=[], stream=False):
2211 2243 '''clone remote repository.
2212 2244
2213 2245 keyword arguments:
2214 2246 heads: list of revs to clone (forces use of pull)
2215 2247 stream: use streaming clone if possible'''
2216 2248
2217 2249 # now, all clients that can request uncompressed clones can
2218 2250 # read repo formats supported by all servers that can serve
2219 2251 # them.
2220 2252
2221 2253 # if revlog format changes, client will have to check version
2222 2254 # and format flags on "stream" capability, and use
2223 2255 # uncompressed only if compatible.
2224 2256
2225 2257 if stream and not heads:
2226 2258 # 'stream' means remote revlog format is revlogv1 only
2227 2259 if remote.capable('stream'):
2228 2260 return self.stream_in(remote, set(('revlogv1',)))
2229 2261 # otherwise, 'streamreqs' contains the remote revlog format
2230 2262 streamreqs = remote.capable('streamreqs')
2231 2263 if streamreqs:
2232 2264 streamreqs = set(streamreqs.split(','))
2233 2265 # if we support it, stream in and adjust our requirements
2234 2266 if not streamreqs - self.supportedformats:
2235 2267 return self.stream_in(remote, streamreqs)
2236 2268 return self.pull(remote, heads)
2237 2269
2238 2270 def pushkey(self, namespace, key, old, new):
2239 2271 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2240 2272 old=old, new=new)
2241 2273 ret = pushkey.push(self, namespace, key, old, new)
2242 2274 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2243 2275 ret=ret)
2244 2276 return ret
2245 2277
2246 2278 def listkeys(self, namespace):
2247 2279 self.hook('prelistkeys', throw=True, namespace=namespace)
2248 2280 values = pushkey.list(self, namespace)
2249 2281 self.hook('listkeys', namespace=namespace, values=values)
2250 2282 return values
2251 2283
2252 2284 def debugwireargs(self, one, two, three=None, four=None, five=None):
2253 2285 '''used to test argument passing over the wire'''
2254 2286 return "%s %s %s %s %s" % (one, two, three, four, five)
2255 2287
2256 2288 def savecommitmessage(self, text):
2257 2289 fp = self.opener('last-message.txt', 'wb')
2258 2290 try:
2259 2291 fp.write(text)
2260 2292 finally:
2261 2293 fp.close()
2262 2294 return self.pathto(fp.name[len(self.root)+1:])
2263 2295
2264 2296 # used to avoid circular references so destructors work
2265 2297 def aftertrans(files):
2266 2298 renamefiles = [tuple(t) for t in files]
2267 2299 def a():
2268 2300 for src, dest in renamefiles:
2269 2301 util.rename(src, dest)
2270 2302 return a
2271 2303
2272 2304 def undoname(fn):
2273 2305 base, name = os.path.split(fn)
2274 2306 assert name.startswith('journal')
2275 2307 return os.path.join(base, name.replace('journal', 'undo', 1))
2276 2308
2277 2309 def instance(ui, path, create):
2278 2310 return localrepository(ui, util.urllocalpath(path), create)
2279 2311
2280 2312 def islocal(path):
2281 2313 return True
@@ -1,581 +1,580 b''
1 1 $ "$TESTDIR/hghave" system-sh || exit 80
2 2
3 3 commit hooks can see env vars
4 4
5 5 $ hg init a
6 6 $ cd a
7 7 $ echo "[hooks]" > .hg/hgrc
8 8 $ echo 'commit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit' >> .hg/hgrc
9 9 $ echo 'commit.b = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py commit.b' >> .hg/hgrc
10 10 $ echo 'precommit = unset HG_LOCAL HG_NODE HG_TAG; python "$TESTDIR"/printenv.py precommit' >> .hg/hgrc
11 11 $ echo 'pretxncommit = unset HG_LOCAL HG_TAG; python "$TESTDIR"/printenv.py pretxncommit' >> .hg/hgrc
12 12 $ echo 'pretxncommit.tip = hg -q tip' >> .hg/hgrc
13 13 $ echo 'pre-identify = python "$TESTDIR"/printenv.py pre-identify 1' >> .hg/hgrc
14 14 $ echo 'pre-cat = python "$TESTDIR"/printenv.py pre-cat' >> .hg/hgrc
15 15 $ echo 'post-cat = python "$TESTDIR"/printenv.py post-cat' >> .hg/hgrc
16 16 $ echo a > a
17 17 $ hg add a
18 18 $ hg commit -m a
19 19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
20 20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
21 21 0:cb9a9f314b8b
22 22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
24 24
25 25 $ hg clone . ../b
26 26 updating to branch default
27 27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 $ cd ../b
29 29
30 30 changegroup hooks can see env vars
31 31
32 32 $ echo '[hooks]' > .hg/hgrc
33 33 $ echo 'prechangegroup = python "$TESTDIR"/printenv.py prechangegroup' >> .hg/hgrc
34 34 $ echo 'changegroup = python "$TESTDIR"/printenv.py changegroup' >> .hg/hgrc
35 35 $ echo 'incoming = python "$TESTDIR"/printenv.py incoming' >> .hg/hgrc
36 36
37 37 pretxncommit and commit hooks can see both parents of merge
38 38
39 39 $ cd ../a
40 40 $ echo b >> a
41 41 $ hg commit -m a1 -d "1 0"
42 42 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
43 43 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
44 44 1:ab228980c14d
45 45 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
46 46 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
47 47 $ hg update -C 0
48 48 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
49 49 $ echo b > b
50 50 $ hg add b
51 51 $ hg commit -m b -d '1 0'
52 52 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
53 53 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
54 54 2:ee9deb46ab31
55 55 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
56 56 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
57 57 created new head
58 58 $ hg merge 1
59 59 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
60 60 (branch merge, don't forget to commit)
61 61 $ hg commit -m merge -d '2 0'
62 62 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
63 63 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
64 64 3:07f3376c1e65
65 65 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
66 66 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
67 67
68 68 test generic hooks
69 69
70 70 $ hg id
71 71 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
72 72 warning: pre-identify hook exited with status 1
73 73 [1]
74 74 $ hg cat b
75 75 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
76 76 b
77 77 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
78 78
79 79 $ cd ../b
80 80 $ hg pull ../a
81 81 pulling from ../a
82 82 searching for changes
83 83 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
84 84 adding changesets
85 85 adding manifests
86 86 adding file changes
87 87 added 3 changesets with 2 changes to 2 files
88 88 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
89 89 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
90 90 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 91 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
92 92 (run 'hg update' to get a working copy)
93 93
94 94 tag hooks can see env vars
95 95
96 96 $ cd ../a
97 97 $ echo 'pretag = python "$TESTDIR"/printenv.py pretag' >> .hg/hgrc
98 98 $ echo 'tag = unset HG_PARENT1 HG_PARENT2; python "$TESTDIR"/printenv.py tag' >> .hg/hgrc
99 99 $ hg tag -d '3 0' a
100 100 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
101 101 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
102 102 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
103 103 4:539e4b31b6dc
104 104 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
105 105 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
106 106 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
107 107 $ hg tag -l la
108 108 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
109 109 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
110 110
111 111 pretag hook can forbid tagging
112 112
113 113 $ echo 'pretag.forbid = python "$TESTDIR"/printenv.py pretag.forbid 1' >> .hg/hgrc
114 114 $ hg tag -d '4 0' fa
115 115 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
116 116 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
117 117 abort: pretag.forbid hook exited with status 1
118 118 [255]
119 119 $ hg tag -l fla
120 120 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
121 121 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
122 122 abort: pretag.forbid hook exited with status 1
123 123 [255]
124 124
125 125 pretxncommit hook can see changeset, can roll back txn, changeset no
126 126 more there after
127 127
128 128 $ echo 'pretxncommit.forbid0 = hg tip -q' >> .hg/hgrc
129 129 $ echo 'pretxncommit.forbid1 = python "$TESTDIR"/printenv.py pretxncommit.forbid 1' >> .hg/hgrc
130 130 $ echo z > z
131 131 $ hg add z
132 132 $ hg -q tip
133 133 4:539e4b31b6dc
134 134 $ hg commit -m 'fail' -d '4 0'
135 135 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
136 136 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
137 137 5:6f611f8018c1
138 138 5:6f611f8018c1
139 139 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
140 140 transaction abort!
141 141 rollback completed
142 142 abort: pretxncommit.forbid1 hook exited with status 1
143 143 [255]
144 144 $ hg -q tip
145 145 4:539e4b31b6dc
146 146
147 147 precommit hook can prevent commit
148 148
149 149 $ echo 'precommit.forbid = python "$TESTDIR"/printenv.py precommit.forbid 1' >> .hg/hgrc
150 150 $ hg commit -m 'fail' -d '4 0'
151 151 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
152 152 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
153 153 abort: precommit.forbid hook exited with status 1
154 154 [255]
155 155 $ hg -q tip
156 156 4:539e4b31b6dc
157 157
158 158 preupdate hook can prevent update
159 159
160 160 $ echo 'preupdate = python "$TESTDIR"/printenv.py preupdate' >> .hg/hgrc
161 161 $ hg update 1
162 162 preupdate hook: HG_PARENT1=ab228980c14d
163 163 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
164 164
165 165 update hook
166 166
167 167 $ echo 'update = python "$TESTDIR"/printenv.py update' >> .hg/hgrc
168 168 $ hg update
169 169 preupdate hook: HG_PARENT1=539e4b31b6dc
170 170 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
171 171 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 172
173 173 pushkey hook
174 174
175 175 $ echo 'pushkey = python "$TESTDIR"/printenv.py pushkey' >> .hg/hgrc
176 176 $ cd ../b
177 177 $ hg bookmark -r null foo
178 178 $ hg push -B foo ../a
179 179 pushing to ../a
180 180 searching for changes
181 181 no changes found
182 pushkey hook: HG_KEY=07f3376c1e655977439df2a814e3cc14b27abac2 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 HG_RET=1
183 182 exporting bookmark foo
184 183 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
185 184 $ cd ../a
186 185
187 186 listkeys hook
188 187
189 188 $ echo 'listkeys = python "$TESTDIR"/printenv.py listkeys' >> .hg/hgrc
190 189 $ hg bookmark -r null bar
191 190 $ cd ../b
192 191 $ hg pull -B bar ../a
193 192 pulling from ../a
194 193 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
195 194 no changes found
196 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
195 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
197 196 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
198 197 importing bookmark bar
199 198 $ cd ../a
200 199
201 200 test that prepushkey can prevent incoming keys
202 201
203 202 $ echo 'prepushkey = python "$TESTDIR"/printenv.py prepushkey.forbid 1' >> .hg/hgrc
204 203 $ cd ../b
205 204 $ hg bookmark -r null baz
206 205 $ hg push -B baz ../a
207 206 pushing to ../a
208 207 searching for changes
209 208 no changes found
210 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10': '1', 'publishing': 'True'}
209 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
211 210 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
212 211 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
213 212 exporting bookmark baz
214 213 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
215 214 abort: prepushkey hook exited with status 1
216 215 [255]
217 216 $ cd ../a
218 217
219 218 test that prelistkeys can prevent listing keys
220 219
221 220 $ echo 'prelistkeys = python "$TESTDIR"/printenv.py prelistkeys.forbid 1' >> .hg/hgrc
222 221 $ hg bookmark -r null quux
223 222 $ cd ../b
224 223 $ hg pull -B quux ../a
225 224 pulling from ../a
226 225 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
227 226 abort: prelistkeys hook exited with status 1
228 227 [255]
229 228 $ cd ../a
230 229
231 230 prechangegroup hook can prevent incoming changes
232 231
233 232 $ cd ../b
234 233 $ hg -q tip
235 234 3:07f3376c1e65
236 235 $ echo '[hooks]' > .hg/hgrc
237 236 $ echo 'prechangegroup.forbid = python "$TESTDIR"/printenv.py prechangegroup.forbid 1' >> .hg/hgrc
238 237 $ hg pull ../a
239 238 pulling from ../a
240 239 searching for changes
241 240 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
242 241 abort: prechangegroup.forbid hook exited with status 1
243 242 [255]
244 243
245 244 pretxnchangegroup hook can see incoming changes, can roll back txn,
246 245 incoming changes no longer there after
247 246
248 247 $ echo '[hooks]' > .hg/hgrc
249 248 $ echo 'pretxnchangegroup.forbid0 = hg tip -q' >> .hg/hgrc
250 249 $ echo 'pretxnchangegroup.forbid1 = python "$TESTDIR"/printenv.py pretxnchangegroup.forbid 1' >> .hg/hgrc
251 250 $ hg pull ../a
252 251 pulling from ../a
253 252 searching for changes
254 253 adding changesets
255 254 adding manifests
256 255 adding file changes
257 256 added 1 changesets with 1 changes to 1 files
258 257 4:539e4b31b6dc
259 258 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
260 259 transaction abort!
261 260 rollback completed
262 261 abort: pretxnchangegroup.forbid1 hook exited with status 1
263 262 [255]
264 263 $ hg -q tip
265 264 3:07f3376c1e65
266 265
267 266 outgoing hooks can see env vars
268 267
269 268 $ rm .hg/hgrc
270 269 $ echo '[hooks]' > ../a/.hg/hgrc
271 270 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> ../a/.hg/hgrc
272 271 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> ../a/.hg/hgrc
273 272 $ hg pull ../a
274 273 pulling from ../a
275 274 searching for changes
276 275 preoutgoing hook: HG_SOURCE=pull
277 276 adding changesets
278 277 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
279 278 adding manifests
280 279 adding file changes
281 280 added 1 changesets with 1 changes to 1 files
282 281 (run 'hg update' to get a working copy)
283 282 $ hg rollback
284 283 repository tip rolled back to revision 3 (undo pull)
285 284
286 285 preoutgoing hook can prevent outgoing changes
287 286
288 287 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> ../a/.hg/hgrc
289 288 $ hg pull ../a
290 289 pulling from ../a
291 290 searching for changes
292 291 preoutgoing hook: HG_SOURCE=pull
293 292 preoutgoing.forbid hook: HG_SOURCE=pull
294 293 abort: preoutgoing.forbid hook exited with status 1
295 294 [255]
296 295
297 296 outgoing hooks work for local clones
298 297
299 298 $ cd ..
300 299 $ echo '[hooks]' > a/.hg/hgrc
301 300 $ echo 'preoutgoing = python "$TESTDIR"/printenv.py preoutgoing' >> a/.hg/hgrc
302 301 $ echo 'outgoing = python "$TESTDIR"/printenv.py outgoing' >> a/.hg/hgrc
303 302 $ hg clone a c
304 303 preoutgoing hook: HG_SOURCE=clone
305 304 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
306 305 updating to branch default
307 306 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
308 307 $ rm -rf c
309 308
310 309 preoutgoing hook can prevent outgoing changes for local clones
311 310
312 311 $ echo 'preoutgoing.forbid = python "$TESTDIR"/printenv.py preoutgoing.forbid 1' >> a/.hg/hgrc
313 312 $ hg clone a zzz
314 313 preoutgoing hook: HG_SOURCE=clone
315 314 preoutgoing.forbid hook: HG_SOURCE=clone
316 315 abort: preoutgoing.forbid hook exited with status 1
317 316 [255]
318 317 $ cd b
319 318
320 319 $ cat > hooktests.py <<EOF
321 320 > from mercurial import util
322 321 >
323 322 > uncallable = 0
324 323 >
325 324 > def printargs(args):
326 325 > args.pop('ui', None)
327 326 > args.pop('repo', None)
328 327 > a = list(args.items())
329 328 > a.sort()
330 329 > print 'hook args:'
331 330 > for k, v in a:
332 331 > print ' ', k, v
333 332 >
334 333 > def passhook(**args):
335 334 > printargs(args)
336 335 >
337 336 > def failhook(**args):
338 337 > printargs(args)
339 338 > return True
340 339 >
341 340 > class LocalException(Exception):
342 341 > pass
343 342 >
344 343 > def raisehook(**args):
345 344 > raise LocalException('exception from hook')
346 345 >
347 346 > def aborthook(**args):
348 347 > raise util.Abort('raise abort from hook')
349 348 >
350 349 > def brokenhook(**args):
351 350 > return 1 + {}
352 351 >
353 352 > def verbosehook(ui, **args):
354 353 > ui.note('verbose output from hook\n')
355 354 >
356 355 > def printtags(ui, repo, **args):
357 356 > print repo.tags().keys()
358 357 >
359 358 > class container:
360 359 > unreachable = 1
361 360 > EOF
362 361
363 362 test python hooks
364 363
365 364 $ PYTHONPATH="`pwd`:$PYTHONPATH"
366 365 $ export PYTHONPATH
367 366
368 367 $ echo '[hooks]' > ../a/.hg/hgrc
369 368 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
370 369 $ hg pull ../a 2>&1 | grep 'raised an exception'
371 370 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
372 371
373 372 $ echo '[hooks]' > ../a/.hg/hgrc
374 373 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
375 374 $ hg pull ../a 2>&1 | grep 'raised an exception'
376 375 error: preoutgoing.raise hook raised an exception: exception from hook
377 376
378 377 $ echo '[hooks]' > ../a/.hg/hgrc
379 378 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
380 379 $ hg pull ../a
381 380 pulling from ../a
382 381 searching for changes
383 382 error: preoutgoing.abort hook failed: raise abort from hook
384 383 abort: raise abort from hook
385 384 [255]
386 385
387 386 $ echo '[hooks]' > ../a/.hg/hgrc
388 387 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
389 388 $ hg pull ../a
390 389 pulling from ../a
391 390 searching for changes
392 391 hook args:
393 392 hooktype preoutgoing
394 393 source pull
395 394 abort: preoutgoing.fail hook failed
396 395 [255]
397 396
398 397 $ echo '[hooks]' > ../a/.hg/hgrc
399 398 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
400 399 $ hg pull ../a
401 400 pulling from ../a
402 401 searching for changes
403 402 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
404 403 [255]
405 404
406 405 $ echo '[hooks]' > ../a/.hg/hgrc
407 406 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
408 407 $ hg pull ../a
409 408 pulling from ../a
410 409 searching for changes
411 410 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
412 411 [255]
413 412
414 413 $ echo '[hooks]' > ../a/.hg/hgrc
415 414 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
416 415 $ hg pull ../a
417 416 pulling from ../a
418 417 searching for changes
419 418 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
420 419 [255]
421 420
422 421 $ echo '[hooks]' > ../a/.hg/hgrc
423 422 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
424 423 $ hg pull ../a
425 424 pulling from ../a
426 425 searching for changes
427 426 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
428 427 [255]
429 428
430 429 $ echo '[hooks]' > ../a/.hg/hgrc
431 430 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
432 431 $ hg pull ../a
433 432 pulling from ../a
434 433 searching for changes
435 434 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
436 435 [255]
437 436
438 437 $ echo '[hooks]' > ../a/.hg/hgrc
439 438 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
440 439 $ hg pull ../a
441 440 pulling from ../a
442 441 searching for changes
443 442 hook args:
444 443 hooktype preoutgoing
445 444 source pull
446 445 adding changesets
447 446 adding manifests
448 447 adding file changes
449 448 added 1 changesets with 1 changes to 1 files
450 449 (run 'hg update' to get a working copy)
451 450
452 451 make sure --traceback works
453 452
454 453 $ echo '[hooks]' > .hg/hgrc
455 454 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
456 455
457 456 $ echo aa > a
458 457 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
459 458 Traceback (most recent call last):
460 459
461 460 $ cd ..
462 461 $ hg init c
463 462 $ cd c
464 463
465 464 $ cat > hookext.py <<EOF
466 465 > def autohook(**args):
467 466 > print "Automatically installed hook"
468 467 >
469 468 > def reposetup(ui, repo):
470 469 > repo.ui.setconfig("hooks", "commit.auto", autohook)
471 470 > EOF
472 471 $ echo '[extensions]' >> .hg/hgrc
473 472 $ echo 'hookext = hookext.py' >> .hg/hgrc
474 473
475 474 $ touch foo
476 475 $ hg add foo
477 476 $ hg ci -d '0 0' -m 'add foo'
478 477 Automatically installed hook
479 478 $ echo >> foo
480 479 $ hg ci --debug -d '0 0' -m 'change foo'
481 480 foo
482 481 calling hook commit.auto: <function autohook at *> (glob)
483 482 Automatically installed hook
484 483 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
485 484
486 485 $ hg showconfig hooks
487 486 hooks.commit.auto=<function autohook at *> (glob)
488 487
489 488 test python hook configured with python:[file]:[hook] syntax
490 489
491 490 $ cd ..
492 491 $ mkdir d
493 492 $ cd d
494 493 $ hg init repo
495 494 $ mkdir hooks
496 495
497 496 $ cd hooks
498 497 $ cat > testhooks.py <<EOF
499 498 > def testhook(**args):
500 499 > print 'hook works'
501 500 > EOF
502 501 $ echo '[hooks]' > ../repo/.hg/hgrc
503 502 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
504 503
505 504 $ cd ../repo
506 505 $ hg commit -d '0 0'
507 506 hook works
508 507 nothing changed
509 508 [1]
510 509
511 510 $ cd ../../b
512 511
513 512 make sure --traceback works on hook import failure
514 513
515 514 $ cat > importfail.py <<EOF
516 515 > import somebogusmodule
517 516 > # dereference something in the module to force demandimport to load it
518 517 > somebogusmodule.whatever
519 518 > EOF
520 519
521 520 $ echo '[hooks]' > .hg/hgrc
522 521 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
523 522
524 523 $ echo a >> a
525 524 $ hg --traceback commit -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
526 525 exception from first failed import attempt:
527 526 Traceback (most recent call last):
528 527 ImportError: No module named somebogusmodule
529 528 exception from second failed import attempt:
530 529 Traceback (most recent call last):
531 530 ImportError: No module named hgext_importfail
532 531 Traceback (most recent call last):
533 532
534 533 Issue1827: Hooks Update & Commit not completely post operation
535 534
536 535 commit and update hooks should run after command completion
537 536
538 537 $ echo '[hooks]' > .hg/hgrc
539 538 $ echo 'commit = hg id' >> .hg/hgrc
540 539 $ echo 'update = hg id' >> .hg/hgrc
541 540 $ echo bb > a
542 541 $ hg ci -ma
543 542 223eafe2750c tip
544 543 $ hg up 0
545 544 cb9a9f314b8b
546 545 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
547 546
548 547 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
549 548 that is passed to pre/post hooks
550 549
551 550 $ echo '[hooks]' > .hg/hgrc
552 551 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
553 552 $ hg id
554 553 cb9a9f314b8b
555 554 $ hg id --verbose
556 555 calling hook pre-identify: hooktests.verbosehook
557 556 verbose output from hook
558 557 cb9a9f314b8b
559 558
560 559 Ensure hooks can be prioritized
561 560
562 561 $ echo '[hooks]' > .hg/hgrc
563 562 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
564 563 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
565 564 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
566 565 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
567 566 $ hg id --verbose
568 567 calling hook pre-identify.b: hooktests.verbosehook
569 568 verbose output from hook
570 569 calling hook pre-identify.a: hooktests.verbosehook
571 570 verbose output from hook
572 571 calling hook pre-identify.c: hooktests.verbosehook
573 572 verbose output from hook
574 573 cb9a9f314b8b
575 574
576 575 new tags must be visible in pretxncommit (issue3210)
577 576
578 577 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
579 578 $ hg tag -f foo
580 579 ['a', 'foo', 'tip']
581 580
@@ -1,1016 +1,1018 b''
1 1 $ cat >> $HGRCPATH <<EOF
2 2 > [extensions]
3 3 > graphlog=
4 4 > EOF
5 5 $ alias hgph='hg log -G --template "{rev} {phase} {desc} - {node|short}\n"'
6 6
7 7 $ mkcommit() {
8 8 > echo "$1" > "$1"
9 9 > hg add "$1"
10 10 > message="$1"
11 11 > shift
12 12 > hg ci -m "$message" $*
13 13 > }
14 14
15 15 $ hg init alpha
16 16 $ cd alpha
17 17 $ mkcommit a-A
18 18 $ mkcommit a-B
19 19 $ mkcommit a-C
20 20 $ mkcommit a-D
21 21 $ hgph
22 22 @ 3 draft a-D - b555f63b6063
23 23 |
24 24 o 2 draft a-C - 54acac6f23ab
25 25 |
26 26 o 1 draft a-B - 548a3d25dbf0
27 27 |
28 28 o 0 draft a-A - 054250a37db4
29 29
30 30
31 31 $ hg init ../beta
32 32 $ hg push -r 1 ../beta
33 33 pushing to ../beta
34 34 searching for changes
35 35 adding changesets
36 36 adding manifests
37 37 adding file changes
38 38 added 2 changesets with 2 changes to 2 files
39 39 $ hgph
40 40 @ 3 draft a-D - b555f63b6063
41 41 |
42 42 o 2 draft a-C - 54acac6f23ab
43 43 |
44 44 o 1 public a-B - 548a3d25dbf0
45 45 |
46 46 o 0 public a-A - 054250a37db4
47 47
48 48
49 49 $ cd ../beta
50 50 $ hgph
51 51 o 1 public a-B - 548a3d25dbf0
52 52 |
53 53 o 0 public a-A - 054250a37db4
54 54
55 55 $ hg up -q
56 56 $ mkcommit b-A
57 57 $ hgph
58 58 @ 2 draft b-A - f54f1bb90ff3
59 59 |
60 60 o 1 public a-B - 548a3d25dbf0
61 61 |
62 62 o 0 public a-A - 054250a37db4
63 63
64 64 $ hg pull ../alpha
65 65 pulling from ../alpha
66 66 searching for changes
67 67 adding changesets
68 68 adding manifests
69 69 adding file changes
70 70 added 2 changesets with 2 changes to 2 files (+1 heads)
71 71 (run 'hg heads' to see heads, 'hg merge' to merge)
72 72 $ hgph
73 73 o 4 public a-D - b555f63b6063
74 74 |
75 75 o 3 public a-C - 54acac6f23ab
76 76 |
77 77 | @ 2 draft b-A - f54f1bb90ff3
78 78 |/
79 79 o 1 public a-B - 548a3d25dbf0
80 80 |
81 81 o 0 public a-A - 054250a37db4
82 82
83 83
84 84 pull did not updated ../alpha state.
85 85 push from alpha to beta should update phase even if nothing is transfered
86 86
87 87 $ cd ../alpha
88 88 $ hgph # not updated by remote pull
89 89 @ 3 draft a-D - b555f63b6063
90 90 |
91 91 o 2 draft a-C - 54acac6f23ab
92 92 |
93 93 o 1 public a-B - 548a3d25dbf0
94 94 |
95 95 o 0 public a-A - 054250a37db4
96 96
97 97 $ hg push ../beta
98 98 pushing to ../beta
99 99 searching for changes
100 100 no changes found
101 101 $ hgph
102 102 @ 3 public a-D - b555f63b6063
103 103 |
104 104 o 2 public a-C - 54acac6f23ab
105 105 |
106 106 o 1 public a-B - 548a3d25dbf0
107 107 |
108 108 o 0 public a-A - 054250a37db4
109 109
110 110
111 111 update must update phase of common changeset too
112 112
113 113 $ hg pull ../beta # getting b-A
114 114 pulling from ../beta
115 115 searching for changes
116 116 adding changesets
117 117 adding manifests
118 118 adding file changes
119 119 added 1 changesets with 1 changes to 1 files (+1 heads)
120 120 (run 'hg heads' to see heads, 'hg merge' to merge)
121 121
122 122 $ cd ../beta
123 123 $ hgph # not updated by remote pull
124 124 o 4 public a-D - b555f63b6063
125 125 |
126 126 o 3 public a-C - 54acac6f23ab
127 127 |
128 128 | @ 2 draft b-A - f54f1bb90ff3
129 129 |/
130 130 o 1 public a-B - 548a3d25dbf0
131 131 |
132 132 o 0 public a-A - 054250a37db4
133 133
134 134 $ hg pull ../alpha
135 135 pulling from ../alpha
136 136 searching for changes
137 137 no changes found
138 138 $ hgph
139 139 o 4 public a-D - b555f63b6063
140 140 |
141 141 o 3 public a-C - 54acac6f23ab
142 142 |
143 143 | @ 2 public b-A - f54f1bb90ff3
144 144 |/
145 145 o 1 public a-B - 548a3d25dbf0
146 146 |
147 147 o 0 public a-A - 054250a37db4
148 148
149 149
150 150 Publish configuration option
151 151 ----------------------------
152 152
153 153 Pull
154 154 ````
155 155
156 156 changegroup are added without phase movement
157 157
158 158 $ hg bundle -a ../base.bundle
159 159 5 changesets found
160 160 $ cd ..
161 161 $ hg init mu
162 162 $ cd mu
163 163 $ cat > .hg/hgrc << EOF
164 164 > [phases]
165 165 > publish=0
166 166 > EOF
167 167 $ hg unbundle ../base.bundle
168 168 adding changesets
169 169 adding manifests
170 170 adding file changes
171 171 added 5 changesets with 5 changes to 5 files (+1 heads)
172 172 (run 'hg heads' to see heads, 'hg merge' to merge)
173 173 $ hgph
174 174 o 4 draft a-D - b555f63b6063
175 175 |
176 176 o 3 draft a-C - 54acac6f23ab
177 177 |
178 178 | o 2 draft b-A - f54f1bb90ff3
179 179 |/
180 180 o 1 draft a-B - 548a3d25dbf0
181 181 |
182 182 o 0 draft a-A - 054250a37db4
183 183
184 184 $ cd ..
185 185
186 186 Pulling from publish=False to publish=False does not move boundary.
187 187
188 188 $ hg init nu
189 189 $ cd nu
190 190 $ cat > .hg/hgrc << EOF
191 191 > [phases]
192 192 > publish=0
193 193 > EOF
194 194 $ hg pull ../mu -r 54acac6f23ab
195 195 pulling from ../mu
196 196 adding changesets
197 197 adding manifests
198 198 adding file changes
199 199 added 3 changesets with 3 changes to 3 files
200 200 (run 'hg update' to get a working copy)
201 201 $ hgph
202 202 o 2 draft a-C - 54acac6f23ab
203 203 |
204 204 o 1 draft a-B - 548a3d25dbf0
205 205 |
206 206 o 0 draft a-A - 054250a37db4
207 207
208 208
209 209 Even for common
210 210
211 211 $ hg pull ../mu -r f54f1bb90ff3
212 212 pulling from ../mu
213 213 searching for changes
214 214 adding changesets
215 215 adding manifests
216 216 adding file changes
217 217 added 1 changesets with 1 changes to 1 files (+1 heads)
218 218 (run 'hg heads' to see heads, 'hg merge' to merge)
219 219 $ hgph
220 220 o 3 draft b-A - f54f1bb90ff3
221 221 |
222 222 | o 2 draft a-C - 54acac6f23ab
223 223 |/
224 224 o 1 draft a-B - 548a3d25dbf0
225 225 |
226 226 o 0 draft a-A - 054250a37db4
227 227
228 228
229 229
230 230 Pulling from Publish=True to Publish=False move boundary in common set.
231 231 we are in nu
232 232
233 233 $ hg pull ../alpha -r b555f63b6063
234 234 pulling from ../alpha
235 235 searching for changes
236 236 adding changesets
237 237 adding manifests
238 238 adding file changes
239 239 added 1 changesets with 1 changes to 1 files
240 240 (run 'hg update' to get a working copy)
241 $ hgph
241 $ hgph # f54f1bb90ff3 stay draft, not ancestor of -r
242 242 o 4 public a-D - b555f63b6063
243 243 |
244 | o 3 public b-A - f54f1bb90ff3
244 | o 3 draft b-A - f54f1bb90ff3
245 245 | |
246 246 o | 2 public a-C - 54acac6f23ab
247 247 |/
248 248 o 1 public a-B - 548a3d25dbf0
249 249 |
250 250 o 0 public a-A - 054250a37db4
251 251
252 252
253 253 pulling from Publish=False to publish=False with some public
254 254
255 255 $ hg up -q f54f1bb90ff3
256 256 $ mkcommit n-A
257 257 $ mkcommit n-B
258 258 $ hgph
259 259 @ 6 draft n-B - 145e75495359
260 260 |
261 261 o 5 draft n-A - d6bcb4f74035
262 262 |
263 263 | o 4 public a-D - b555f63b6063
264 264 | |
265 o | 3 public b-A - f54f1bb90ff3
265 o | 3 draft b-A - f54f1bb90ff3
266 266 | |
267 267 | o 2 public a-C - 54acac6f23ab
268 268 |/
269 269 o 1 public a-B - 548a3d25dbf0
270 270 |
271 271 o 0 public a-A - 054250a37db4
272 272
273 273 $ cd ../mu
274 274 $ hg pull ../nu
275 275 pulling from ../nu
276 276 searching for changes
277 277 adding changesets
278 278 adding manifests
279 279 adding file changes
280 280 added 2 changesets with 2 changes to 2 files
281 281 (run 'hg update' to get a working copy)
282 282 $ hgph
283 283 o 6 draft n-B - 145e75495359
284 284 |
285 285 o 5 draft n-A - d6bcb4f74035
286 286 |
287 287 | o 4 public a-D - b555f63b6063
288 288 | |
289 289 | o 3 public a-C - 54acac6f23ab
290 290 | |
291 o | 2 public b-A - f54f1bb90ff3
291 o | 2 draft b-A - f54f1bb90ff3
292 292 |/
293 293 o 1 public a-B - 548a3d25dbf0
294 294 |
295 295 o 0 public a-A - 054250a37db4
296 296
297 297 $ cd ..
298 298
299 299 pulling into publish=True
300 300
301 301 $ cd alpha
302 302 $ hgph
303 303 o 4 public b-A - f54f1bb90ff3
304 304 |
305 305 | @ 3 public a-D - b555f63b6063
306 306 | |
307 307 | o 2 public a-C - 54acac6f23ab
308 308 |/
309 309 o 1 public a-B - 548a3d25dbf0
310 310 |
311 311 o 0 public a-A - 054250a37db4
312 312
313 313 $ hg pull ../mu
314 314 pulling from ../mu
315 315 searching for changes
316 316 adding changesets
317 317 adding manifests
318 318 adding file changes
319 319 added 2 changesets with 2 changes to 2 files
320 320 (run 'hg update' to get a working copy)
321 321 $ hgph
322 322 o 6 draft n-B - 145e75495359
323 323 |
324 324 o 5 draft n-A - d6bcb4f74035
325 325 |
326 326 o 4 public b-A - f54f1bb90ff3
327 327 |
328 328 | @ 3 public a-D - b555f63b6063
329 329 | |
330 330 | o 2 public a-C - 54acac6f23ab
331 331 |/
332 332 o 1 public a-B - 548a3d25dbf0
333 333 |
334 334 o 0 public a-A - 054250a37db4
335 335
336 336 $ cd ..
337 337
338 338 pulling back into original repo
339 339
340 340 $ cd nu
341 341 $ hg pull ../alpha
342 342 pulling from ../alpha
343 343 searching for changes
344 344 no changes found
345 345 $ hgph
346 346 @ 6 public n-B - 145e75495359
347 347 |
348 348 o 5 public n-A - d6bcb4f74035
349 349 |
350 350 | o 4 public a-D - b555f63b6063
351 351 | |
352 352 o | 3 public b-A - f54f1bb90ff3
353 353 | |
354 354 | o 2 public a-C - 54acac6f23ab
355 355 |/
356 356 o 1 public a-B - 548a3d25dbf0
357 357 |
358 358 o 0 public a-A - 054250a37db4
359 359
360 360
361 361 Push
362 362 ````
363 363
364 364 (inserted)
365 365
366 366 Test that phase are pushed even when they are nothing to pus
367 367 (this might be tested later bu are very convenient to not alter too much test)
368 368
369 369 Push back to alpha
370 370
371 371 $ hg push ../alpha # from nu
372 372 pushing to ../alpha
373 373 searching for changes
374 374 no changes found
375 375 $ cd ..
376 376 $ cd alpha
377 377 $ hgph
378 378 o 6 public n-B - 145e75495359
379 379 |
380 380 o 5 public n-A - d6bcb4f74035
381 381 |
382 382 o 4 public b-A - f54f1bb90ff3
383 383 |
384 384 | @ 3 public a-D - b555f63b6063
385 385 | |
386 386 | o 2 public a-C - 54acac6f23ab
387 387 |/
388 388 o 1 public a-B - 548a3d25dbf0
389 389 |
390 390 o 0 public a-A - 054250a37db4
391 391
392 392
393 393 (end insertion)
394 394
395 395
396 396 initial setup
397 397
398 398 $ hg glog # of alpha
399 399 o changeset: 6:145e75495359
400 400 | tag: tip
401 401 | user: test
402 402 | date: Thu Jan 01 00:00:00 1970 +0000
403 403 | summary: n-B
404 404 |
405 405 o changeset: 5:d6bcb4f74035
406 406 | user: test
407 407 | date: Thu Jan 01 00:00:00 1970 +0000
408 408 | summary: n-A
409 409 |
410 410 o changeset: 4:f54f1bb90ff3
411 411 | parent: 1:548a3d25dbf0
412 412 | user: test
413 413 | date: Thu Jan 01 00:00:00 1970 +0000
414 414 | summary: b-A
415 415 |
416 416 | @ changeset: 3:b555f63b6063
417 417 | | user: test
418 418 | | date: Thu Jan 01 00:00:00 1970 +0000
419 419 | | summary: a-D
420 420 | |
421 421 | o changeset: 2:54acac6f23ab
422 422 |/ user: test
423 423 | date: Thu Jan 01 00:00:00 1970 +0000
424 424 | summary: a-C
425 425 |
426 426 o changeset: 1:548a3d25dbf0
427 427 | user: test
428 428 | date: Thu Jan 01 00:00:00 1970 +0000
429 429 | summary: a-B
430 430 |
431 431 o changeset: 0:054250a37db4
432 432 user: test
433 433 date: Thu Jan 01 00:00:00 1970 +0000
434 434 summary: a-A
435 435
436 436 $ mkcommit a-E
437 437 $ mkcommit a-F
438 438 $ mkcommit a-G
439 439 $ hg up d6bcb4f74035 -q
440 440 $ mkcommit a-H
441 441 created new head
442 442 $ hgph
443 443 @ 10 draft a-H - 967b449fbc94
444 444 |
445 445 | o 9 draft a-G - 3e27b6f1eee1
446 446 | |
447 447 | o 8 draft a-F - b740e3e5c05d
448 448 | |
449 449 | o 7 draft a-E - e9f537e46dea
450 450 | |
451 451 +---o 6 public n-B - 145e75495359
452 452 | |
453 453 o | 5 public n-A - d6bcb4f74035
454 454 | |
455 455 o | 4 public b-A - f54f1bb90ff3
456 456 | |
457 457 | o 3 public a-D - b555f63b6063
458 458 | |
459 459 | o 2 public a-C - 54acac6f23ab
460 460 |/
461 461 o 1 public a-B - 548a3d25dbf0
462 462 |
463 463 o 0 public a-A - 054250a37db4
464 464
465 465
466 466 Pushing to Publish=False (unknown changeset)
467 467
468 468 $ hg push ../mu -r b740e3e5c05d # a-F
469 469 pushing to ../mu
470 470 searching for changes
471 471 adding changesets
472 472 adding manifests
473 473 adding file changes
474 474 added 2 changesets with 2 changes to 2 files
475 475 $ hgph
476 476 @ 10 draft a-H - 967b449fbc94
477 477 |
478 478 | o 9 draft a-G - 3e27b6f1eee1
479 479 | |
480 480 | o 8 draft a-F - b740e3e5c05d
481 481 | |
482 482 | o 7 draft a-E - e9f537e46dea
483 483 | |
484 484 +---o 6 public n-B - 145e75495359
485 485 | |
486 486 o | 5 public n-A - d6bcb4f74035
487 487 | |
488 488 o | 4 public b-A - f54f1bb90ff3
489 489 | |
490 490 | o 3 public a-D - b555f63b6063
491 491 | |
492 492 | o 2 public a-C - 54acac6f23ab
493 493 |/
494 494 o 1 public a-B - 548a3d25dbf0
495 495 |
496 496 o 0 public a-A - 054250a37db4
497 497
498 498
499 499 $ cd ../mu
500 $ hgph # d6bcb4f74035 and 145e75495359 changed because common is too smart
500 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
501 > # not ancestor of -r
501 502 o 8 draft a-F - b740e3e5c05d
502 503 |
503 504 o 7 draft a-E - e9f537e46dea
504 505 |
505 | o 6 public n-B - 145e75495359
506 | o 6 draft n-B - 145e75495359
506 507 | |
507 | o 5 public n-A - d6bcb4f74035
508 | o 5 draft n-A - d6bcb4f74035
508 509 | |
509 510 o | 4 public a-D - b555f63b6063
510 511 | |
511 512 o | 3 public a-C - 54acac6f23ab
512 513 | |
513 | o 2 public b-A - f54f1bb90ff3
514 | o 2 draft b-A - f54f1bb90ff3
514 515 |/
515 516 o 1 public a-B - 548a3d25dbf0
516 517 |
517 518 o 0 public a-A - 054250a37db4
518 519
519 520
520 521 Pushing to Publish=True (unknown changeset)
521 522
522 523 $ hg push ../beta -r b740e3e5c05d
523 524 pushing to ../beta
524 525 searching for changes
525 526 adding changesets
526 527 adding manifests
527 528 adding file changes
528 529 added 2 changesets with 2 changes to 2 files
529 $ hgph # again d6bcb4f74035 and 145e75495359 changed because common is too smart
530 $ hgph # again f54f1bb90ff3, d6bcb4f74035 and 145e75495359 stay draft,
531 > # not ancestor of -r
530 532 o 8 public a-F - b740e3e5c05d
531 533 |
532 534 o 7 public a-E - e9f537e46dea
533 535 |
534 | o 6 public n-B - 145e75495359
536 | o 6 draft n-B - 145e75495359
535 537 | |
536 | o 5 public n-A - d6bcb4f74035
538 | o 5 draft n-A - d6bcb4f74035
537 539 | |
538 540 o | 4 public a-D - b555f63b6063
539 541 | |
540 542 o | 3 public a-C - 54acac6f23ab
541 543 | |
542 | o 2 public b-A - f54f1bb90ff3
544 | o 2 draft b-A - f54f1bb90ff3
543 545 |/
544 546 o 1 public a-B - 548a3d25dbf0
545 547 |
546 548 o 0 public a-A - 054250a37db4
547 549
548 550
549 551 Pushing to Publish=True (common changeset)
550 552
551 553 $ cd ../beta
552 554 $ hg push ../alpha
553 555 pushing to ../alpha
554 556 searching for changes
555 557 no changes found
556 558 $ hgph
557 559 o 6 public a-F - b740e3e5c05d
558 560 |
559 561 o 5 public a-E - e9f537e46dea
560 562 |
561 563 o 4 public a-D - b555f63b6063
562 564 |
563 565 o 3 public a-C - 54acac6f23ab
564 566 |
565 567 | @ 2 public b-A - f54f1bb90ff3
566 568 |/
567 569 o 1 public a-B - 548a3d25dbf0
568 570 |
569 571 o 0 public a-A - 054250a37db4
570 572
571 573 $ cd ../alpha
572 $ hgph # e9f537e46dea and b740e3e5c05d should have been sync to 0
574 $ hgph
573 575 @ 10 draft a-H - 967b449fbc94
574 576 |
575 577 | o 9 draft a-G - 3e27b6f1eee1
576 578 | |
577 579 | o 8 public a-F - b740e3e5c05d
578 580 | |
579 581 | o 7 public a-E - e9f537e46dea
580 582 | |
581 583 +---o 6 public n-B - 145e75495359
582 584 | |
583 585 o | 5 public n-A - d6bcb4f74035
584 586 | |
585 587 o | 4 public b-A - f54f1bb90ff3
586 588 | |
587 589 | o 3 public a-D - b555f63b6063
588 590 | |
589 591 | o 2 public a-C - 54acac6f23ab
590 592 |/
591 593 o 1 public a-B - 548a3d25dbf0
592 594 |
593 595 o 0 public a-A - 054250a37db4
594 596
595 597
596 598 Pushing to Publish=False (common changeset that change phase + unknown one)
597 599
598 600 $ hg push ../mu -r 967b449fbc94 -f
599 601 pushing to ../mu
600 602 searching for changes
601 603 adding changesets
602 604 adding manifests
603 605 adding file changes
604 606 added 1 changesets with 1 changes to 1 files (+1 heads)
605 607 $ hgph
606 608 @ 10 draft a-H - 967b449fbc94
607 609 |
608 610 | o 9 draft a-G - 3e27b6f1eee1
609 611 | |
610 612 | o 8 public a-F - b740e3e5c05d
611 613 | |
612 614 | o 7 public a-E - e9f537e46dea
613 615 | |
614 616 +---o 6 public n-B - 145e75495359
615 617 | |
616 618 o | 5 public n-A - d6bcb4f74035
617 619 | |
618 620 o | 4 public b-A - f54f1bb90ff3
619 621 | |
620 622 | o 3 public a-D - b555f63b6063
621 623 | |
622 624 | o 2 public a-C - 54acac6f23ab
623 625 |/
624 626 o 1 public a-B - 548a3d25dbf0
625 627 |
626 628 o 0 public a-A - 054250a37db4
627 629
628 630 $ cd ../mu
629 631 $ hgph # d6bcb4f74035 should have changed phase
630 > # again d6bcb4f74035 and 145e75495359 changed because common was too smart
632 > # 145e75495359 is still draft. not ancestor of -r
631 633 o 9 draft a-H - 967b449fbc94
632 634 |
633 635 | o 8 public a-F - b740e3e5c05d
634 636 | |
635 637 | o 7 public a-E - e9f537e46dea
636 638 | |
637 +---o 6 public n-B - 145e75495359
639 +---o 6 draft n-B - 145e75495359
638 640 | |
639 641 o | 5 public n-A - d6bcb4f74035
640 642 | |
641 643 | o 4 public a-D - b555f63b6063
642 644 | |
643 645 | o 3 public a-C - 54acac6f23ab
644 646 | |
645 647 o | 2 public b-A - f54f1bb90ff3
646 648 |/
647 649 o 1 public a-B - 548a3d25dbf0
648 650 |
649 651 o 0 public a-A - 054250a37db4
650 652
651 653
652 654
653 655 Pushing to Publish=True (common changeset from publish=False)
654 656
655 657 (in mu)
656 658 $ hg push ../alpha
657 659 pushing to ../alpha
658 660 searching for changes
659 661 no changes found
660 662 $ hgph
661 663 o 9 public a-H - 967b449fbc94
662 664 |
663 665 | o 8 public a-F - b740e3e5c05d
664 666 | |
665 667 | o 7 public a-E - e9f537e46dea
666 668 | |
667 669 +---o 6 public n-B - 145e75495359
668 670 | |
669 671 o | 5 public n-A - d6bcb4f74035
670 672 | |
671 673 | o 4 public a-D - b555f63b6063
672 674 | |
673 675 | o 3 public a-C - 54acac6f23ab
674 676 | |
675 677 o | 2 public b-A - f54f1bb90ff3
676 678 |/
677 679 o 1 public a-B - 548a3d25dbf0
678 680 |
679 681 o 0 public a-A - 054250a37db4
680 682
681 683 $ hgph -R ../alpha # a-H should have been synced to 0
682 684 @ 10 public a-H - 967b449fbc94
683 685 |
684 686 | o 9 draft a-G - 3e27b6f1eee1
685 687 | |
686 688 | o 8 public a-F - b740e3e5c05d
687 689 | |
688 690 | o 7 public a-E - e9f537e46dea
689 691 | |
690 692 +---o 6 public n-B - 145e75495359
691 693 | |
692 694 o | 5 public n-A - d6bcb4f74035
693 695 | |
694 696 o | 4 public b-A - f54f1bb90ff3
695 697 | |
696 698 | o 3 public a-D - b555f63b6063
697 699 | |
698 700 | o 2 public a-C - 54acac6f23ab
699 701 |/
700 702 o 1 public a-B - 548a3d25dbf0
701 703 |
702 704 o 0 public a-A - 054250a37db4
703 705
704 706
705 707
706 708 Discovery locally secret changeset on a remote repository:
707 709
708 710 - should make it non-secret
709 711
710 712 $ cd ../alpha
711 713 $ mkcommit A-secret --config phases.new-commit=2
712 714 $ hgph
713 715 @ 11 secret A-secret - 435b5d83910c
714 716 |
715 717 o 10 public a-H - 967b449fbc94
716 718 |
717 719 | o 9 draft a-G - 3e27b6f1eee1
718 720 | |
719 721 | o 8 public a-F - b740e3e5c05d
720 722 | |
721 723 | o 7 public a-E - e9f537e46dea
722 724 | |
723 725 +---o 6 public n-B - 145e75495359
724 726 | |
725 727 o | 5 public n-A - d6bcb4f74035
726 728 | |
727 729 o | 4 public b-A - f54f1bb90ff3
728 730 | |
729 731 | o 3 public a-D - b555f63b6063
730 732 | |
731 733 | o 2 public a-C - 54acac6f23ab
732 734 |/
733 735 o 1 public a-B - 548a3d25dbf0
734 736 |
735 737 o 0 public a-A - 054250a37db4
736 738
737 739 $ hg bundle --base 'parents(.)' -r . ../secret-bundle.hg
738 740 1 changesets found
739 741 $ hg -R ../mu unbundle ../secret-bundle.hg
740 742 adding changesets
741 743 adding manifests
742 744 adding file changes
743 745 added 1 changesets with 1 changes to 1 files
744 746 (run 'hg update' to get a working copy)
745 747 $ hgph -R ../mu
746 748 o 10 draft A-secret - 435b5d83910c
747 749 |
748 750 o 9 public a-H - 967b449fbc94
749 751 |
750 752 | o 8 public a-F - b740e3e5c05d
751 753 | |
752 754 | o 7 public a-E - e9f537e46dea
753 755 | |
754 756 +---o 6 public n-B - 145e75495359
755 757 | |
756 758 o | 5 public n-A - d6bcb4f74035
757 759 | |
758 760 | o 4 public a-D - b555f63b6063
759 761 | |
760 762 | o 3 public a-C - 54acac6f23ab
761 763 | |
762 764 o | 2 public b-A - f54f1bb90ff3
763 765 |/
764 766 o 1 public a-B - 548a3d25dbf0
765 767 |
766 768 o 0 public a-A - 054250a37db4
767 769
768 770 $ hg pull ../mu
769 771 pulling from ../mu
770 772 searching for changes
771 773 no changes found
772 774 $ hgph
773 775 @ 11 draft A-secret - 435b5d83910c
774 776 |
775 777 o 10 public a-H - 967b449fbc94
776 778 |
777 779 | o 9 draft a-G - 3e27b6f1eee1
778 780 | |
779 781 | o 8 public a-F - b740e3e5c05d
780 782 | |
781 783 | o 7 public a-E - e9f537e46dea
782 784 | |
783 785 +---o 6 public n-B - 145e75495359
784 786 | |
785 787 o | 5 public n-A - d6bcb4f74035
786 788 | |
787 789 o | 4 public b-A - f54f1bb90ff3
788 790 | |
789 791 | o 3 public a-D - b555f63b6063
790 792 | |
791 793 | o 2 public a-C - 54acac6f23ab
792 794 |/
793 795 o 1 public a-B - 548a3d25dbf0
794 796 |
795 797 o 0 public a-A - 054250a37db4
796 798
797 799
798 800 pushing a locally public and draft changesets remotly secret should make them appear on the remote side
799 801
800 802 $ hg -R ../mu phase --secret --force 967b449fbc94
801 803 $ hg push -r 435b5d83910c ../mu
802 804 pushing to ../mu
803 805 searching for changes
804 806 adding changesets
805 807 adding manifests
806 808 adding file changes
807 809 added 0 changesets with 0 changes to 2 files
808 810 $ hgph -R ../mu
809 811 o 10 draft A-secret - 435b5d83910c
810 812 |
811 813 o 9 public a-H - 967b449fbc94
812 814 |
813 815 | o 8 public a-F - b740e3e5c05d
814 816 | |
815 817 | o 7 public a-E - e9f537e46dea
816 818 | |
817 819 +---o 6 public n-B - 145e75495359
818 820 | |
819 821 o | 5 public n-A - d6bcb4f74035
820 822 | |
821 823 | o 4 public a-D - b555f63b6063
822 824 | |
823 825 | o 3 public a-C - 54acac6f23ab
824 826 | |
825 827 o | 2 public b-A - f54f1bb90ff3
826 828 |/
827 829 o 1 public a-B - 548a3d25dbf0
828 830 |
829 831 o 0 public a-A - 054250a37db4
830 832
831 833
832 834 pull new changeset with common draft locally
833 835
834 836 $ hg up -q 967b449fbc94 # create a new root for draft
835 837 $ mkcommit 'alpha-more'
836 838 created new head
837 839 $ hg push -fr . ../mu
838 840 pushing to ../mu
839 841 searching for changes
840 842 adding changesets
841 843 adding manifests
842 844 adding file changes
843 845 added 1 changesets with 1 changes to 1 files (+1 heads)
844 846 $ cd ../mu
845 847 $ hg phase --secret --force 1c5cfd894796
846 848 $ hg up -q 435b5d83910c
847 849 $ mkcommit 'mu-more'
848 850 $ cd ../alpha
849 851 $ hg pull ../mu
850 852 pulling from ../mu
851 853 searching for changes
852 854 adding changesets
853 855 adding manifests
854 856 adding file changes
855 857 added 1 changesets with 1 changes to 1 files
856 858 (run 'hg update' to get a working copy)
857 859 $ hgph
858 860 o 13 draft mu-more - 5237fb433fc8
859 861 |
860 862 | @ 12 draft alpha-more - 1c5cfd894796
861 863 | |
862 864 o | 11 draft A-secret - 435b5d83910c
863 865 |/
864 866 o 10 public a-H - 967b449fbc94
865 867 |
866 868 | o 9 draft a-G - 3e27b6f1eee1
867 869 | |
868 870 | o 8 public a-F - b740e3e5c05d
869 871 | |
870 872 | o 7 public a-E - e9f537e46dea
871 873 | |
872 874 +---o 6 public n-B - 145e75495359
873 875 | |
874 876 o | 5 public n-A - d6bcb4f74035
875 877 | |
876 878 o | 4 public b-A - f54f1bb90ff3
877 879 | |
878 880 | o 3 public a-D - b555f63b6063
879 881 | |
880 882 | o 2 public a-C - 54acac6f23ab
881 883 |/
882 884 o 1 public a-B - 548a3d25dbf0
883 885 |
884 886 o 0 public a-A - 054250a37db4
885 887
886 888
887 889 Test that test are properly ignored on remote event when existing locally
888 890
889 891 $ cd ..
890 892 $ hg clone -qU -r b555f63b6063 -r f54f1bb90ff3 beta gamma
891 893
892 894 # pathological case are
893 895 #
894 896 # * secret remotely
895 897 # * known locally
896 898 # * repo have uncommon changeset
897 899
898 900 $ hg -R beta phase --secret --force f54f1bb90ff3
899 901 $ hg -R gamma phase --draft --force f54f1bb90ff3
900 902
901 903 $ cd gamma
902 904 $ hg pull ../beta
903 905 pulling from ../beta
904 906 searching for changes
905 907 adding changesets
906 908 adding manifests
907 909 adding file changes
908 910 added 2 changesets with 2 changes to 2 files
909 911 (run 'hg update' to get a working copy)
910 912 $ hg phase f54f1bb90ff3
911 913 2: draft
912 914
913 915 same over the wire
914 916
915 917 $ cd ../beta
916 918 $ hg serve -p $HGPORT -d --pid-file=../beta.pid -E ../beta-error.log
917 919 $ cat ../beta.pid >> $DAEMON_PIDS
918 920 $ cd ../gamma
919 921
920 922 $ hg pull http://localhost:$HGPORT/
921 923 pulling from http://localhost:$HGPORT/
922 924 searching for changes
923 925 no changes found
924 926 $ hg phase f54f1bb90ff3
925 927 2: draft
926 928
927 929 check that secret local on both side are not synced to public
928 930
929 931 $ hg push -r b555f63b6063 http://localhost:$HGPORT/
930 932 pushing to http://localhost:$HGPORT/
931 933 searching for changes
932 934 no changes found
933 935 $ hg phase f54f1bb90ff3
934 936 2: draft
935 937
936 938 put the changeset in the draft state again
937 939 (first test after this one expect to be able to copy)
938 940
939 941 $ cd ..
940 942
941 943
942 944 Test Clone behavior
943 945
944 946 A. Clone without secret changeset
945 947
946 948 1. cloning non-publishing repository
947 949 (Phase should be preserved)
948 950
949 951 # make sure there is no secret so we can use a copy clone
950 952
951 953 $ hg -R mu phase --draft 'secret()'
952 954
953 955 $ hg clone -U mu Tau
954 956 $ hgph -R Tau
955 957 o 12 draft mu-more - 5237fb433fc8
956 958 |
957 959 | o 11 draft alpha-more - 1c5cfd894796
958 960 | |
959 961 o | 10 draft A-secret - 435b5d83910c
960 962 |/
961 963 o 9 public a-H - 967b449fbc94
962 964 |
963 965 | o 8 public a-F - b740e3e5c05d
964 966 | |
965 967 | o 7 public a-E - e9f537e46dea
966 968 | |
967 969 +---o 6 public n-B - 145e75495359
968 970 | |
969 971 o | 5 public n-A - d6bcb4f74035
970 972 | |
971 973 | o 4 public a-D - b555f63b6063
972 974 | |
973 975 | o 3 public a-C - 54acac6f23ab
974 976 | |
975 977 o | 2 public b-A - f54f1bb90ff3
976 978 |/
977 979 o 1 public a-B - 548a3d25dbf0
978 980 |
979 981 o 0 public a-A - 054250a37db4
980 982
981 983
982 984 2. cloning publishing repository
983 985
984 986 (everything should be public)
985 987
986 988 $ hg clone -U alpha Upsilon
987 989 $ hgph -R Upsilon
988 990 o 13 public mu-more - 5237fb433fc8
989 991 |
990 992 | o 12 public alpha-more - 1c5cfd894796
991 993 | |
992 994 o | 11 public A-secret - 435b5d83910c
993 995 |/
994 996 o 10 public a-H - 967b449fbc94
995 997 |
996 998 | o 9 public a-G - 3e27b6f1eee1
997 999 | |
998 1000 | o 8 public a-F - b740e3e5c05d
999 1001 | |
1000 1002 | o 7 public a-E - e9f537e46dea
1001 1003 | |
1002 1004 +---o 6 public n-B - 145e75495359
1003 1005 | |
1004 1006 o | 5 public n-A - d6bcb4f74035
1005 1007 | |
1006 1008 o | 4 public b-A - f54f1bb90ff3
1007 1009 | |
1008 1010 | o 3 public a-D - b555f63b6063
1009 1011 | |
1010 1012 | o 2 public a-C - 54acac6f23ab
1011 1013 |/
1012 1014 o 1 public a-B - 548a3d25dbf0
1013 1015 |
1014 1016 o 0 public a-A - 054250a37db4
1015 1017
1016 1018
General Comments 0
You need to be logged in to leave comments. Login now