##// END OF EJS Templates
obsolete: exchange obsolete marker over pushkey...
Pierre-Yves.David@ens-lyon.org -
r17075:28ed1c45 default
parent child Browse files
Show More
@@ -1,2443 +1,2453 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7
8 7 from node import bin, hex, nullid, nullrev, short
9 8 from i18n import _
10 9 import repo, changegroup, subrepo, discovery, pushkey, obsolete
11 10 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 import lock, transaction, store, encoding
11 import lock, transaction, store, encoding, base85
13 12 import scmutil, util, extensions, hook, error, revset
14 13 import match as matchmod
15 14 import merge as mergemod
16 15 import tags as tagsmod
17 16 from lock import release
18 17 import weakref, errno, os, time, inspect
19 18 propertycache = util.propertycache
20 19 filecache = scmutil.filecache
21 20
22 21 class storecache(filecache):
23 22 """filecache for files in the store"""
24 23 def join(self, obj, fname):
25 24 return obj.sjoin(fname)
26 25
27 26 class localrepository(repo.repository):
28 27 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
29 28 'known', 'getbundle'))
30 29 supportedformats = set(('revlogv1', 'generaldelta'))
31 30 supported = supportedformats | set(('store', 'fncache', 'shared',
32 31 'dotencode'))
33 32
34 33 def __init__(self, baseui, path=None, create=False):
35 34 repo.repository.__init__(self)
36 35 self.root = os.path.realpath(util.expandpath(path))
37 36 self.path = os.path.join(self.root, ".hg")
38 37 self.origroot = path
39 38 self.auditor = scmutil.pathauditor(self.root, self._checknested)
40 39 self.opener = scmutil.opener(self.path)
41 40 self.wopener = scmutil.opener(self.root)
42 41 self.baseui = baseui
43 42 self.ui = baseui.copy()
44 43 # A list of callback to shape the phase if no data were found.
45 44 # Callback are in the form: func(repo, roots) --> processed root.
46 45 # This list it to be filled by extension during repo setup
47 46 self._phasedefaults = []
48 47
49 48 try:
50 49 self.ui.readconfig(self.join("hgrc"), self.root)
51 50 extensions.loadall(self.ui)
52 51 except IOError:
53 52 pass
54 53
55 54 if not os.path.isdir(self.path):
56 55 if create:
57 56 if not os.path.exists(path):
58 57 util.makedirs(path)
59 58 util.makedir(self.path, notindexed=True)
60 59 requirements = ["revlogv1"]
61 60 if self.ui.configbool('format', 'usestore', True):
62 61 os.mkdir(os.path.join(self.path, "store"))
63 62 requirements.append("store")
64 63 if self.ui.configbool('format', 'usefncache', True):
65 64 requirements.append("fncache")
66 65 if self.ui.configbool('format', 'dotencode', True):
67 66 requirements.append('dotencode')
68 67 # create an invalid changelog
69 68 self.opener.append(
70 69 "00changelog.i",
71 70 '\0\0\0\2' # represents revlogv2
72 71 ' dummy changelog to prevent using the old repo layout'
73 72 )
74 73 if self.ui.configbool('format', 'generaldelta', False):
75 74 requirements.append("generaldelta")
76 75 requirements = set(requirements)
77 76 else:
78 77 raise error.RepoError(_("repository %s not found") % path)
79 78 elif create:
80 79 raise error.RepoError(_("repository %s already exists") % path)
81 80 else:
82 81 try:
83 82 requirements = scmutil.readrequires(self.opener, self.supported)
84 83 except IOError, inst:
85 84 if inst.errno != errno.ENOENT:
86 85 raise
87 86 requirements = set()
88 87
89 88 self.sharedpath = self.path
90 89 try:
91 90 s = os.path.realpath(self.opener.read("sharedpath").rstrip('\n'))
92 91 if not os.path.exists(s):
93 92 raise error.RepoError(
94 93 _('.hg/sharedpath points to nonexistent directory %s') % s)
95 94 self.sharedpath = s
96 95 except IOError, inst:
97 96 if inst.errno != errno.ENOENT:
98 97 raise
99 98
100 99 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
101 100 self.spath = self.store.path
102 101 self.sopener = self.store.opener
103 102 self.sjoin = self.store.join
104 103 self.opener.createmode = self.store.createmode
105 104 self._applyrequirements(requirements)
106 105 if create:
107 106 self._writerequirements()
108 107
109 108
110 109 self._branchcache = None
111 110 self._branchcachetip = None
112 111 self.filterpats = {}
113 112 self._datafilters = {}
114 113 self._transref = self._lockref = self._wlockref = None
115 114
116 115 # A cache for various files under .hg/ that tracks file changes,
117 116 # (used by the filecache decorator)
118 117 #
119 118 # Maps a property name to its util.filecacheentry
120 119 self._filecache = {}
121 120
122 121 def _applyrequirements(self, requirements):
123 122 self.requirements = requirements
124 123 openerreqs = set(('revlogv1', 'generaldelta'))
125 124 self.sopener.options = dict((r, 1) for r in requirements
126 125 if r in openerreqs)
127 126
128 127 def _writerequirements(self):
129 128 reqfile = self.opener("requires", "w")
130 129 for r in self.requirements:
131 130 reqfile.write("%s\n" % r)
132 131 reqfile.close()
133 132
134 133 def _checknested(self, path):
135 134 """Determine if path is a legal nested repository."""
136 135 if not path.startswith(self.root):
137 136 return False
138 137 subpath = path[len(self.root) + 1:]
139 138 normsubpath = util.pconvert(subpath)
140 139
141 140 # XXX: Checking against the current working copy is wrong in
142 141 # the sense that it can reject things like
143 142 #
144 143 # $ hg cat -r 10 sub/x.txt
145 144 #
146 145 # if sub/ is no longer a subrepository in the working copy
147 146 # parent revision.
148 147 #
149 148 # However, it can of course also allow things that would have
150 149 # been rejected before, such as the above cat command if sub/
151 150 # is a subrepository now, but was a normal directory before.
152 151 # The old path auditor would have rejected by mistake since it
153 152 # panics when it sees sub/.hg/.
154 153 #
155 154 # All in all, checking against the working copy seems sensible
156 155 # since we want to prevent access to nested repositories on
157 156 # the filesystem *now*.
158 157 ctx = self[None]
159 158 parts = util.splitpath(subpath)
160 159 while parts:
161 160 prefix = '/'.join(parts)
162 161 if prefix in ctx.substate:
163 162 if prefix == normsubpath:
164 163 return True
165 164 else:
166 165 sub = ctx.sub(prefix)
167 166 return sub.checknested(subpath[len(prefix) + 1:])
168 167 else:
169 168 parts.pop()
170 169 return False
171 170
172 171 @filecache('bookmarks')
173 172 def _bookmarks(self):
174 173 return bookmarks.read(self)
175 174
176 175 @filecache('bookmarks.current')
177 176 def _bookmarkcurrent(self):
178 177 return bookmarks.readcurrent(self)
179 178
180 179 def _writebookmarks(self, marks):
181 180 bookmarks.write(self)
182 181
183 182 def bookmarkheads(self, bookmark):
184 183 name = bookmark.split('@', 1)[0]
185 184 heads = []
186 185 for mark, n in self._bookmarks.iteritems():
187 186 if mark.split('@', 1)[0] == name:
188 187 heads.append(n)
189 188 return heads
190 189
191 190 @storecache('phaseroots')
192 191 def _phasecache(self):
193 192 return phases.phasecache(self, self._phasedefaults)
194 193
195 194 @storecache('obsstore')
196 195 def obsstore(self):
197 196 store = obsolete.obsstore()
198 197 data = self.sopener.tryread('obsstore')
199 198 if data:
200 199 store.loadmarkers(data)
201 200 return store
202 201
203 202 @storecache('00changelog.i')
204 203 def changelog(self):
205 204 c = changelog.changelog(self.sopener)
206 205 if 'HG_PENDING' in os.environ:
207 206 p = os.environ['HG_PENDING']
208 207 if p.startswith(self.root):
209 208 c.readpending('00changelog.i.a')
210 209 return c
211 210
212 211 @storecache('00manifest.i')
213 212 def manifest(self):
214 213 return manifest.manifest(self.sopener)
215 214
216 215 @filecache('dirstate')
217 216 def dirstate(self):
218 217 warned = [0]
219 218 def validate(node):
220 219 try:
221 220 self.changelog.rev(node)
222 221 return node
223 222 except error.LookupError:
224 223 if not warned[0]:
225 224 warned[0] = True
226 225 self.ui.warn(_("warning: ignoring unknown"
227 226 " working parent %s!\n") % short(node))
228 227 return nullid
229 228
230 229 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
231 230
232 231 def __getitem__(self, changeid):
233 232 if changeid is None:
234 233 return context.workingctx(self)
235 234 return context.changectx(self, changeid)
236 235
237 236 def __contains__(self, changeid):
238 237 try:
239 238 return bool(self.lookup(changeid))
240 239 except error.RepoLookupError:
241 240 return False
242 241
243 242 def __nonzero__(self):
244 243 return True
245 244
246 245 def __len__(self):
247 246 return len(self.changelog)
248 247
249 248 def __iter__(self):
250 249 for i in xrange(len(self)):
251 250 yield i
252 251
253 252 def revs(self, expr, *args):
254 253 '''Return a list of revisions matching the given revset'''
255 254 expr = revset.formatspec(expr, *args)
256 255 m = revset.match(None, expr)
257 256 return [r for r in m(self, range(len(self)))]
258 257
259 258 def set(self, expr, *args):
260 259 '''
261 260 Yield a context for each matching revision, after doing arg
262 261 replacement via revset.formatspec
263 262 '''
264 263 for r in self.revs(expr, *args):
265 264 yield self[r]
266 265
267 266 def url(self):
268 267 return 'file:' + self.root
269 268
270 269 def hook(self, name, throw=False, **args):
271 270 return hook.hook(self.ui, self, name, throw, **args)
272 271
273 272 tag_disallowed = ':\r\n'
274 273
275 274 def _tag(self, names, node, message, local, user, date, extra={}):
276 275 if isinstance(names, str):
277 276 allchars = names
278 277 names = (names,)
279 278 else:
280 279 allchars = ''.join(names)
281 280 for c in self.tag_disallowed:
282 281 if c in allchars:
283 282 raise util.Abort(_('%r cannot be used in a tag name') % c)
284 283
285 284 branches = self.branchmap()
286 285 for name in names:
287 286 self.hook('pretag', throw=True, node=hex(node), tag=name,
288 287 local=local)
289 288 if name in branches:
290 289 self.ui.warn(_("warning: tag %s conflicts with existing"
291 290 " branch name\n") % name)
292 291
293 292 def writetags(fp, names, munge, prevtags):
294 293 fp.seek(0, 2)
295 294 if prevtags and prevtags[-1] != '\n':
296 295 fp.write('\n')
297 296 for name in names:
298 297 m = munge and munge(name) or name
299 298 if (self._tagscache.tagtypes and
300 299 name in self._tagscache.tagtypes):
301 300 old = self.tags().get(name, nullid)
302 301 fp.write('%s %s\n' % (hex(old), m))
303 302 fp.write('%s %s\n' % (hex(node), m))
304 303 fp.close()
305 304
306 305 prevtags = ''
307 306 if local:
308 307 try:
309 308 fp = self.opener('localtags', 'r+')
310 309 except IOError:
311 310 fp = self.opener('localtags', 'a')
312 311 else:
313 312 prevtags = fp.read()
314 313
315 314 # local tags are stored in the current charset
316 315 writetags(fp, names, None, prevtags)
317 316 for name in names:
318 317 self.hook('tag', node=hex(node), tag=name, local=local)
319 318 return
320 319
321 320 try:
322 321 fp = self.wfile('.hgtags', 'rb+')
323 322 except IOError, e:
324 323 if e.errno != errno.ENOENT:
325 324 raise
326 325 fp = self.wfile('.hgtags', 'ab')
327 326 else:
328 327 prevtags = fp.read()
329 328
330 329 # committed tags are stored in UTF-8
331 330 writetags(fp, names, encoding.fromlocal, prevtags)
332 331
333 332 fp.close()
334 333
335 334 self.invalidatecaches()
336 335
337 336 if '.hgtags' not in self.dirstate:
338 337 self[None].add(['.hgtags'])
339 338
340 339 m = matchmod.exact(self.root, '', ['.hgtags'])
341 340 tagnode = self.commit(message, user, date, extra=extra, match=m)
342 341
343 342 for name in names:
344 343 self.hook('tag', node=hex(node), tag=name, local=local)
345 344
346 345 return tagnode
347 346
348 347 def tag(self, names, node, message, local, user, date):
349 348 '''tag a revision with one or more symbolic names.
350 349
351 350 names is a list of strings or, when adding a single tag, names may be a
352 351 string.
353 352
354 353 if local is True, the tags are stored in a per-repository file.
355 354 otherwise, they are stored in the .hgtags file, and a new
356 355 changeset is committed with the change.
357 356
358 357 keyword arguments:
359 358
360 359 local: whether to store tags in non-version-controlled file
361 360 (default False)
362 361
363 362 message: commit message to use if committing
364 363
365 364 user: name of user to use if committing
366 365
367 366 date: date tuple to use if committing'''
368 367
369 368 if not local:
370 369 for x in self.status()[:5]:
371 370 if '.hgtags' in x:
372 371 raise util.Abort(_('working copy of .hgtags is changed '
373 372 '(please commit .hgtags manually)'))
374 373
375 374 self.tags() # instantiate the cache
376 375 self._tag(names, node, message, local, user, date)
377 376
378 377 @propertycache
379 378 def _tagscache(self):
380 379 '''Returns a tagscache object that contains various tags related
381 380 caches.'''
382 381
383 382 # This simplifies its cache management by having one decorated
384 383 # function (this one) and the rest simply fetch things from it.
385 384 class tagscache(object):
386 385 def __init__(self):
387 386 # These two define the set of tags for this repository. tags
388 387 # maps tag name to node; tagtypes maps tag name to 'global' or
389 388 # 'local'. (Global tags are defined by .hgtags across all
390 389 # heads, and local tags are defined in .hg/localtags.)
391 390 # They constitute the in-memory cache of tags.
392 391 self.tags = self.tagtypes = None
393 392
394 393 self.nodetagscache = self.tagslist = None
395 394
396 395 cache = tagscache()
397 396 cache.tags, cache.tagtypes = self._findtags()
398 397
399 398 return cache
400 399
401 400 def tags(self):
402 401 '''return a mapping of tag to node'''
403 402 t = {}
404 403 for k, v in self._tagscache.tags.iteritems():
405 404 try:
406 405 # ignore tags to unknown nodes
407 406 self.changelog.rev(v)
408 407 t[k] = v
409 408 except (error.LookupError, ValueError):
410 409 pass
411 410 return t
412 411
413 412 def _findtags(self):
414 413 '''Do the hard work of finding tags. Return a pair of dicts
415 414 (tags, tagtypes) where tags maps tag name to node, and tagtypes
416 415 maps tag name to a string like \'global\' or \'local\'.
417 416 Subclasses or extensions are free to add their own tags, but
418 417 should be aware that the returned dicts will be retained for the
419 418 duration of the localrepo object.'''
420 419
421 420 # XXX what tagtype should subclasses/extensions use? Currently
422 421 # mq and bookmarks add tags, but do not set the tagtype at all.
423 422 # Should each extension invent its own tag type? Should there
424 423 # be one tagtype for all such "virtual" tags? Or is the status
425 424 # quo fine?
426 425
427 426 alltags = {} # map tag name to (node, hist)
428 427 tagtypes = {}
429 428
430 429 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
431 430 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
432 431
433 432 # Build the return dicts. Have to re-encode tag names because
434 433 # the tags module always uses UTF-8 (in order not to lose info
435 434 # writing to the cache), but the rest of Mercurial wants them in
436 435 # local encoding.
437 436 tags = {}
438 437 for (name, (node, hist)) in alltags.iteritems():
439 438 if node != nullid:
440 439 tags[encoding.tolocal(name)] = node
441 440 tags['tip'] = self.changelog.tip()
442 441 tagtypes = dict([(encoding.tolocal(name), value)
443 442 for (name, value) in tagtypes.iteritems()])
444 443 return (tags, tagtypes)
445 444
446 445 def tagtype(self, tagname):
447 446 '''
448 447 return the type of the given tag. result can be:
449 448
450 449 'local' : a local tag
451 450 'global' : a global tag
452 451 None : tag does not exist
453 452 '''
454 453
455 454 return self._tagscache.tagtypes.get(tagname)
456 455
457 456 def tagslist(self):
458 457 '''return a list of tags ordered by revision'''
459 458 if not self._tagscache.tagslist:
460 459 l = []
461 460 for t, n in self.tags().iteritems():
462 461 r = self.changelog.rev(n)
463 462 l.append((r, t, n))
464 463 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
465 464
466 465 return self._tagscache.tagslist
467 466
468 467 def nodetags(self, node):
469 468 '''return the tags associated with a node'''
470 469 if not self._tagscache.nodetagscache:
471 470 nodetagscache = {}
472 471 for t, n in self._tagscache.tags.iteritems():
473 472 nodetagscache.setdefault(n, []).append(t)
474 473 for tags in nodetagscache.itervalues():
475 474 tags.sort()
476 475 self._tagscache.nodetagscache = nodetagscache
477 476 return self._tagscache.nodetagscache.get(node, [])
478 477
479 478 def nodebookmarks(self, node):
480 479 marks = []
481 480 for bookmark, n in self._bookmarks.iteritems():
482 481 if n == node:
483 482 marks.append(bookmark)
484 483 return sorted(marks)
485 484
486 485 def _branchtags(self, partial, lrev):
487 486 # TODO: rename this function?
488 487 tiprev = len(self) - 1
489 488 if lrev != tiprev:
490 489 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
491 490 self._updatebranchcache(partial, ctxgen)
492 491 self._writebranchcache(partial, self.changelog.tip(), tiprev)
493 492
494 493 return partial
495 494
496 495 def updatebranchcache(self):
497 496 tip = self.changelog.tip()
498 497 if self._branchcache is not None and self._branchcachetip == tip:
499 498 return
500 499
501 500 oldtip = self._branchcachetip
502 501 self._branchcachetip = tip
503 502 if oldtip is None or oldtip not in self.changelog.nodemap:
504 503 partial, last, lrev = self._readbranchcache()
505 504 else:
506 505 lrev = self.changelog.rev(oldtip)
507 506 partial = self._branchcache
508 507
509 508 self._branchtags(partial, lrev)
510 509 # this private cache holds all heads (not just the branch tips)
511 510 self._branchcache = partial
512 511
513 512 def branchmap(self):
514 513 '''returns a dictionary {branch: [branchheads]}'''
515 514 self.updatebranchcache()
516 515 return self._branchcache
517 516
518 517 def _branchtip(self, heads):
519 518 '''return the tipmost branch head in heads'''
520 519 tip = heads[-1]
521 520 for h in reversed(heads):
522 521 if not self[h].closesbranch():
523 522 tip = h
524 523 break
525 524 return tip
526 525
527 526 def branchtip(self, branch):
528 527 '''return the tip node for a given branch'''
529 528 if branch not in self.branchmap():
530 529 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
531 530 return self._branchtip(self.branchmap()[branch])
532 531
533 532 def branchtags(self):
534 533 '''return a dict where branch names map to the tipmost head of
535 534 the branch, open heads come before closed'''
536 535 bt = {}
537 536 for bn, heads in self.branchmap().iteritems():
538 537 bt[bn] = self._branchtip(heads)
539 538 return bt
540 539
541 540 def _readbranchcache(self):
542 541 partial = {}
543 542 try:
544 543 f = self.opener("cache/branchheads")
545 544 lines = f.read().split('\n')
546 545 f.close()
547 546 except (IOError, OSError):
548 547 return {}, nullid, nullrev
549 548
550 549 try:
551 550 last, lrev = lines.pop(0).split(" ", 1)
552 551 last, lrev = bin(last), int(lrev)
553 552 if lrev >= len(self) or self[lrev].node() != last:
554 553 # invalidate the cache
555 554 raise ValueError('invalidating branch cache (tip differs)')
556 555 for l in lines:
557 556 if not l:
558 557 continue
559 558 node, label = l.split(" ", 1)
560 559 label = encoding.tolocal(label.strip())
561 560 if not node in self:
562 561 raise ValueError('invalidating branch cache because node '+
563 562 '%s does not exist' % node)
564 563 partial.setdefault(label, []).append(bin(node))
565 564 except KeyboardInterrupt:
566 565 raise
567 566 except Exception, inst:
568 567 if self.ui.debugflag:
569 568 self.ui.warn(str(inst), '\n')
570 569 partial, last, lrev = {}, nullid, nullrev
571 570 return partial, last, lrev
572 571
573 572 def _writebranchcache(self, branches, tip, tiprev):
574 573 try:
575 574 f = self.opener("cache/branchheads", "w", atomictemp=True)
576 575 f.write("%s %s\n" % (hex(tip), tiprev))
577 576 for label, nodes in branches.iteritems():
578 577 for node in nodes:
579 578 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
580 579 f.close()
581 580 except (IOError, OSError):
582 581 pass
583 582
584 583 def _updatebranchcache(self, partial, ctxgen):
585 584 """Given a branchhead cache, partial, that may have extra nodes or be
586 585 missing heads, and a generator of nodes that are at least a superset of
587 586 heads missing, this function updates partial to be correct.
588 587 """
589 588 # collect new branch entries
590 589 newbranches = {}
591 590 for c in ctxgen:
592 591 newbranches.setdefault(c.branch(), []).append(c.node())
593 592 # if older branchheads are reachable from new ones, they aren't
594 593 # really branchheads. Note checking parents is insufficient:
595 594 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
596 595 for branch, newnodes in newbranches.iteritems():
597 596 bheads = partial.setdefault(branch, [])
598 597 # Remove candidate heads that no longer are in the repo (e.g., as
599 598 # the result of a strip that just happened). Avoid using 'node in
600 599 # self' here because that dives down into branchcache code somewhat
601 600 # recrusively.
602 601 bheadrevs = [self.changelog.rev(node) for node in bheads
603 602 if self.changelog.hasnode(node)]
604 603 newheadrevs = [self.changelog.rev(node) for node in newnodes
605 604 if self.changelog.hasnode(node)]
606 605 ctxisnew = bheadrevs and min(newheadrevs) > max(bheadrevs)
607 606 # Remove duplicates - nodes that are in newheadrevs and are already
608 607 # in bheadrevs. This can happen if you strip a node whose parent
609 608 # was already a head (because they're on different branches).
610 609 bheadrevs = sorted(set(bheadrevs).union(newheadrevs))
611 610
612 611 # Starting from tip means fewer passes over reachable. If we know
613 612 # the new candidates are not ancestors of existing heads, we don't
614 613 # have to examine ancestors of existing heads
615 614 if ctxisnew:
616 615 iterrevs = sorted(newheadrevs)
617 616 else:
618 617 iterrevs = list(bheadrevs)
619 618
620 619 # This loop prunes out two kinds of heads - heads that are
621 620 # superceded by a head in newheadrevs, and newheadrevs that are not
622 621 # heads because an existing head is their descendant.
623 622 while iterrevs:
624 623 latest = iterrevs.pop()
625 624 if latest not in bheadrevs:
626 625 continue
627 626 ancestors = set(self.changelog.ancestors([latest],
628 627 bheadrevs[0]))
629 628 if ancestors:
630 629 bheadrevs = [b for b in bheadrevs if b not in ancestors]
631 630 partial[branch] = [self.changelog.node(rev) for rev in bheadrevs]
632 631
633 632 # There may be branches that cease to exist when the last commit in the
634 633 # branch was stripped. This code filters them out. Note that the
635 634 # branch that ceased to exist may not be in newbranches because
636 635 # newbranches is the set of candidate heads, which when you strip the
637 636 # last commit in a branch will be the parent branch.
638 637 for branch in partial:
639 638 nodes = [head for head in partial[branch]
640 639 if self.changelog.hasnode(head)]
641 640 if not nodes:
642 641 del partial[branch]
643 642
644 643 def lookup(self, key):
645 644 return self[key].node()
646 645
647 646 def lookupbranch(self, key, remote=None):
648 647 repo = remote or self
649 648 if key in repo.branchmap():
650 649 return key
651 650
652 651 repo = (remote and remote.local()) and remote or self
653 652 return repo[key].branch()
654 653
655 654 def known(self, nodes):
656 655 nm = self.changelog.nodemap
657 656 pc = self._phasecache
658 657 result = []
659 658 for n in nodes:
660 659 r = nm.get(n)
661 660 resp = not (r is None or pc.phase(self, r) >= phases.secret)
662 661 result.append(resp)
663 662 return result
664 663
665 664 def local(self):
666 665 return self
667 666
668 667 def join(self, f):
669 668 return os.path.join(self.path, f)
670 669
671 670 def wjoin(self, f):
672 671 return os.path.join(self.root, f)
673 672
674 673 def file(self, f):
675 674 if f[0] == '/':
676 675 f = f[1:]
677 676 return filelog.filelog(self.sopener, f)
678 677
679 678 def changectx(self, changeid):
680 679 return self[changeid]
681 680
682 681 def parents(self, changeid=None):
683 682 '''get list of changectxs for parents of changeid'''
684 683 return self[changeid].parents()
685 684
686 685 def setparents(self, p1, p2=nullid):
687 686 copies = self.dirstate.setparents(p1, p2)
688 687 if copies:
689 688 # Adjust copy records, the dirstate cannot do it, it
690 689 # requires access to parents manifests. Preserve them
691 690 # only for entries added to first parent.
692 691 pctx = self[p1]
693 692 for f in copies:
694 693 if f not in pctx and copies[f] in pctx:
695 694 self.dirstate.copy(copies[f], f)
696 695
697 696 def filectx(self, path, changeid=None, fileid=None):
698 697 """changeid can be a changeset revision, node, or tag.
699 698 fileid can be a file revision or node."""
700 699 return context.filectx(self, path, changeid, fileid)
701 700
702 701 def getcwd(self):
703 702 return self.dirstate.getcwd()
704 703
705 704 def pathto(self, f, cwd=None):
706 705 return self.dirstate.pathto(f, cwd)
707 706
708 707 def wfile(self, f, mode='r'):
709 708 return self.wopener(f, mode)
710 709
711 710 def _link(self, f):
712 711 return os.path.islink(self.wjoin(f))
713 712
714 713 def _loadfilter(self, filter):
715 714 if filter not in self.filterpats:
716 715 l = []
717 716 for pat, cmd in self.ui.configitems(filter):
718 717 if cmd == '!':
719 718 continue
720 719 mf = matchmod.match(self.root, '', [pat])
721 720 fn = None
722 721 params = cmd
723 722 for name, filterfn in self._datafilters.iteritems():
724 723 if cmd.startswith(name):
725 724 fn = filterfn
726 725 params = cmd[len(name):].lstrip()
727 726 break
728 727 if not fn:
729 728 fn = lambda s, c, **kwargs: util.filter(s, c)
730 729 # Wrap old filters not supporting keyword arguments
731 730 if not inspect.getargspec(fn)[2]:
732 731 oldfn = fn
733 732 fn = lambda s, c, **kwargs: oldfn(s, c)
734 733 l.append((mf, fn, params))
735 734 self.filterpats[filter] = l
736 735 return self.filterpats[filter]
737 736
738 737 def _filter(self, filterpats, filename, data):
739 738 for mf, fn, cmd in filterpats:
740 739 if mf(filename):
741 740 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
742 741 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
743 742 break
744 743
745 744 return data
746 745
747 746 @propertycache
748 747 def _encodefilterpats(self):
749 748 return self._loadfilter('encode')
750 749
751 750 @propertycache
752 751 def _decodefilterpats(self):
753 752 return self._loadfilter('decode')
754 753
755 754 def adddatafilter(self, name, filter):
756 755 self._datafilters[name] = filter
757 756
758 757 def wread(self, filename):
759 758 if self._link(filename):
760 759 data = os.readlink(self.wjoin(filename))
761 760 else:
762 761 data = self.wopener.read(filename)
763 762 return self._filter(self._encodefilterpats, filename, data)
764 763
765 764 def wwrite(self, filename, data, flags):
766 765 data = self._filter(self._decodefilterpats, filename, data)
767 766 if 'l' in flags:
768 767 self.wopener.symlink(data, filename)
769 768 else:
770 769 self.wopener.write(filename, data)
771 770 if 'x' in flags:
772 771 util.setflags(self.wjoin(filename), False, True)
773 772
774 773 def wwritedata(self, filename, data):
775 774 return self._filter(self._decodefilterpats, filename, data)
776 775
777 776 def transaction(self, desc):
778 777 tr = self._transref and self._transref() or None
779 778 if tr and tr.running():
780 779 return tr.nest()
781 780
782 781 # abort here if the journal already exists
783 782 if os.path.exists(self.sjoin("journal")):
784 783 raise error.RepoError(
785 784 _("abandoned transaction found - run hg recover"))
786 785
787 786 self._writejournal(desc)
788 787 renames = [(x, undoname(x)) for x in self._journalfiles()]
789 788
790 789 tr = transaction.transaction(self.ui.warn, self.sopener,
791 790 self.sjoin("journal"),
792 791 aftertrans(renames),
793 792 self.store.createmode)
794 793 self._transref = weakref.ref(tr)
795 794 return tr
796 795
797 796 def _journalfiles(self):
798 797 return (self.sjoin('journal'), self.join('journal.dirstate'),
799 798 self.join('journal.branch'), self.join('journal.desc'),
800 799 self.join('journal.bookmarks'),
801 800 self.sjoin('journal.phaseroots'))
802 801
803 802 def undofiles(self):
804 803 return [undoname(x) for x in self._journalfiles()]
805 804
806 805 def _writejournal(self, desc):
807 806 self.opener.write("journal.dirstate",
808 807 self.opener.tryread("dirstate"))
809 808 self.opener.write("journal.branch",
810 809 encoding.fromlocal(self.dirstate.branch()))
811 810 self.opener.write("journal.desc",
812 811 "%d\n%s\n" % (len(self), desc))
813 812 self.opener.write("journal.bookmarks",
814 813 self.opener.tryread("bookmarks"))
815 814 self.sopener.write("journal.phaseroots",
816 815 self.sopener.tryread("phaseroots"))
817 816
818 817 def recover(self):
819 818 lock = self.lock()
820 819 try:
821 820 if os.path.exists(self.sjoin("journal")):
822 821 self.ui.status(_("rolling back interrupted transaction\n"))
823 822 transaction.rollback(self.sopener, self.sjoin("journal"),
824 823 self.ui.warn)
825 824 self.invalidate()
826 825 return True
827 826 else:
828 827 self.ui.warn(_("no interrupted transaction available\n"))
829 828 return False
830 829 finally:
831 830 lock.release()
832 831
833 832 def rollback(self, dryrun=False, force=False):
834 833 wlock = lock = None
835 834 try:
836 835 wlock = self.wlock()
837 836 lock = self.lock()
838 837 if os.path.exists(self.sjoin("undo")):
839 838 return self._rollback(dryrun, force)
840 839 else:
841 840 self.ui.warn(_("no rollback information available\n"))
842 841 return 1
843 842 finally:
844 843 release(lock, wlock)
845 844
846 845 def _rollback(self, dryrun, force):
847 846 ui = self.ui
848 847 try:
849 848 args = self.opener.read('undo.desc').splitlines()
850 849 (oldlen, desc, detail) = (int(args[0]), args[1], None)
851 850 if len(args) >= 3:
852 851 detail = args[2]
853 852 oldtip = oldlen - 1
854 853
855 854 if detail and ui.verbose:
856 855 msg = (_('repository tip rolled back to revision %s'
857 856 ' (undo %s: %s)\n')
858 857 % (oldtip, desc, detail))
859 858 else:
860 859 msg = (_('repository tip rolled back to revision %s'
861 860 ' (undo %s)\n')
862 861 % (oldtip, desc))
863 862 except IOError:
864 863 msg = _('rolling back unknown transaction\n')
865 864 desc = None
866 865
867 866 if not force and self['.'] != self['tip'] and desc == 'commit':
868 867 raise util.Abort(
869 868 _('rollback of last commit while not checked out '
870 869 'may lose data'), hint=_('use -f to force'))
871 870
872 871 ui.status(msg)
873 872 if dryrun:
874 873 return 0
875 874
876 875 parents = self.dirstate.parents()
877 876 transaction.rollback(self.sopener, self.sjoin('undo'), ui.warn)
878 877 if os.path.exists(self.join('undo.bookmarks')):
879 878 util.rename(self.join('undo.bookmarks'),
880 879 self.join('bookmarks'))
881 880 if os.path.exists(self.sjoin('undo.phaseroots')):
882 881 util.rename(self.sjoin('undo.phaseroots'),
883 882 self.sjoin('phaseroots'))
884 883 self.invalidate()
885 884
886 885 parentgone = (parents[0] not in self.changelog.nodemap or
887 886 parents[1] not in self.changelog.nodemap)
888 887 if parentgone:
889 888 util.rename(self.join('undo.dirstate'), self.join('dirstate'))
890 889 try:
891 890 branch = self.opener.read('undo.branch')
892 891 self.dirstate.setbranch(branch)
893 892 except IOError:
894 893 ui.warn(_('named branch could not be reset: '
895 894 'current branch is still \'%s\'\n')
896 895 % self.dirstate.branch())
897 896
898 897 self.dirstate.invalidate()
899 898 parents = tuple([p.rev() for p in self.parents()])
900 899 if len(parents) > 1:
901 900 ui.status(_('working directory now based on '
902 901 'revisions %d and %d\n') % parents)
903 902 else:
904 903 ui.status(_('working directory now based on '
905 904 'revision %d\n') % parents)
906 905 # TODO: if we know which new heads may result from this rollback, pass
907 906 # them to destroy(), which will prevent the branchhead cache from being
908 907 # invalidated.
909 908 self.destroyed()
910 909 return 0
911 910
912 911 def invalidatecaches(self):
913 912 def delcache(name):
914 913 try:
915 914 delattr(self, name)
916 915 except AttributeError:
917 916 pass
918 917
919 918 delcache('_tagscache')
920 919
921 920 self._branchcache = None # in UTF-8
922 921 self._branchcachetip = None
923 922
924 923 def invalidatedirstate(self):
925 924 '''Invalidates the dirstate, causing the next call to dirstate
926 925 to check if it was modified since the last time it was read,
927 926 rereading it if it has.
928 927
929 928 This is different to dirstate.invalidate() that it doesn't always
930 929 rereads the dirstate. Use dirstate.invalidate() if you want to
931 930 explicitly read the dirstate again (i.e. restoring it to a previous
932 931 known good state).'''
933 932 if 'dirstate' in self.__dict__:
934 933 for k in self.dirstate._filecache:
935 934 try:
936 935 delattr(self.dirstate, k)
937 936 except AttributeError:
938 937 pass
939 938 delattr(self, 'dirstate')
940 939
941 940 def invalidate(self):
942 941 for k in self._filecache:
943 942 # dirstate is invalidated separately in invalidatedirstate()
944 943 if k == 'dirstate':
945 944 continue
946 945
947 946 try:
948 947 delattr(self, k)
949 948 except AttributeError:
950 949 pass
951 950 self.invalidatecaches()
952 951
953 952 # Discard all cache entries to force reloading everything.
954 953 self._filecache.clear()
955 954
956 955 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
957 956 try:
958 957 l = lock.lock(lockname, 0, releasefn, desc=desc)
959 958 except error.LockHeld, inst:
960 959 if not wait:
961 960 raise
962 961 self.ui.warn(_("waiting for lock on %s held by %r\n") %
963 962 (desc, inst.locker))
964 963 # default to 600 seconds timeout
965 964 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
966 965 releasefn, desc=desc)
967 966 if acquirefn:
968 967 acquirefn()
969 968 return l
970 969
971 970 def _afterlock(self, callback):
972 971 """add a callback to the current repository lock.
973 972
974 973 The callback will be executed on lock release."""
975 974 l = self._lockref and self._lockref()
976 975 if l:
977 976 l.postrelease.append(callback)
978 977 else:
979 978 callback()
980 979
981 980 def lock(self, wait=True):
982 981 '''Lock the repository store (.hg/store) and return a weak reference
983 982 to the lock. Use this before modifying the store (e.g. committing or
984 983 stripping). If you are opening a transaction, get a lock as well.)'''
985 984 l = self._lockref and self._lockref()
986 985 if l is not None and l.held:
987 986 l.lock()
988 987 return l
989 988
990 989 def unlock():
991 990 self.store.write()
992 991 if '_phasecache' in vars(self):
993 992 self._phasecache.write()
994 993 if 'obsstore' in vars(self) and self.obsstore._new:
995 994 # XXX: transaction logic should be used here. But for
996 995 # now rewriting the whole file is good enough.
997 996 f = self.sopener('obsstore', 'wb', atomictemp=True)
998 997 try:
999 998 self.obsstore.flushmarkers(f)
1000 999 f.close()
1001 1000 except: # re-raises
1002 1001 f.discard()
1003 1002 raise
1004 1003 for k, ce in self._filecache.items():
1005 1004 if k == 'dirstate':
1006 1005 continue
1007 1006 ce.refresh()
1008 1007
1009 1008 l = self._lock(self.sjoin("lock"), wait, unlock,
1010 1009 self.invalidate, _('repository %s') % self.origroot)
1011 1010 self._lockref = weakref.ref(l)
1012 1011 return l
1013 1012
1014 1013 def wlock(self, wait=True):
1015 1014 '''Lock the non-store parts of the repository (everything under
1016 1015 .hg except .hg/store) and return a weak reference to the lock.
1017 1016 Use this before modifying files in .hg.'''
1018 1017 l = self._wlockref and self._wlockref()
1019 1018 if l is not None and l.held:
1020 1019 l.lock()
1021 1020 return l
1022 1021
1023 1022 def unlock():
1024 1023 self.dirstate.write()
1025 1024 ce = self._filecache.get('dirstate')
1026 1025 if ce:
1027 1026 ce.refresh()
1028 1027
1029 1028 l = self._lock(self.join("wlock"), wait, unlock,
1030 1029 self.invalidatedirstate, _('working directory of %s') %
1031 1030 self.origroot)
1032 1031 self._wlockref = weakref.ref(l)
1033 1032 return l
1034 1033
1035 1034 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1036 1035 """
1037 1036 commit an individual file as part of a larger transaction
1038 1037 """
1039 1038
1040 1039 fname = fctx.path()
1041 1040 text = fctx.data()
1042 1041 flog = self.file(fname)
1043 1042 fparent1 = manifest1.get(fname, nullid)
1044 1043 fparent2 = fparent2o = manifest2.get(fname, nullid)
1045 1044
1046 1045 meta = {}
1047 1046 copy = fctx.renamed()
1048 1047 if copy and copy[0] != fname:
1049 1048 # Mark the new revision of this file as a copy of another
1050 1049 # file. This copy data will effectively act as a parent
1051 1050 # of this new revision. If this is a merge, the first
1052 1051 # parent will be the nullid (meaning "look up the copy data")
1053 1052 # and the second one will be the other parent. For example:
1054 1053 #
1055 1054 # 0 --- 1 --- 3 rev1 changes file foo
1056 1055 # \ / rev2 renames foo to bar and changes it
1057 1056 # \- 2 -/ rev3 should have bar with all changes and
1058 1057 # should record that bar descends from
1059 1058 # bar in rev2 and foo in rev1
1060 1059 #
1061 1060 # this allows this merge to succeed:
1062 1061 #
1063 1062 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1064 1063 # \ / merging rev3 and rev4 should use bar@rev2
1065 1064 # \- 2 --- 4 as the merge base
1066 1065 #
1067 1066
1068 1067 cfname = copy[0]
1069 1068 crev = manifest1.get(cfname)
1070 1069 newfparent = fparent2
1071 1070
1072 1071 if manifest2: # branch merge
1073 1072 if fparent2 == nullid or crev is None: # copied on remote side
1074 1073 if cfname in manifest2:
1075 1074 crev = manifest2[cfname]
1076 1075 newfparent = fparent1
1077 1076
1078 1077 # find source in nearest ancestor if we've lost track
1079 1078 if not crev:
1080 1079 self.ui.debug(" %s: searching for copy revision for %s\n" %
1081 1080 (fname, cfname))
1082 1081 for ancestor in self[None].ancestors():
1083 1082 if cfname in ancestor:
1084 1083 crev = ancestor[cfname].filenode()
1085 1084 break
1086 1085
1087 1086 if crev:
1088 1087 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1089 1088 meta["copy"] = cfname
1090 1089 meta["copyrev"] = hex(crev)
1091 1090 fparent1, fparent2 = nullid, newfparent
1092 1091 else:
1093 1092 self.ui.warn(_("warning: can't find ancestor for '%s' "
1094 1093 "copied from '%s'!\n") % (fname, cfname))
1095 1094
1096 1095 elif fparent2 != nullid:
1097 1096 # is one parent an ancestor of the other?
1098 1097 fparentancestor = flog.ancestor(fparent1, fparent2)
1099 1098 if fparentancestor == fparent1:
1100 1099 fparent1, fparent2 = fparent2, nullid
1101 1100 elif fparentancestor == fparent2:
1102 1101 fparent2 = nullid
1103 1102
1104 1103 # is the file changed?
1105 1104 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1106 1105 changelist.append(fname)
1107 1106 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1108 1107
1109 1108 # are just the flags changed during merge?
1110 1109 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
1111 1110 changelist.append(fname)
1112 1111
1113 1112 return fparent1
1114 1113
1115 1114 def commit(self, text="", user=None, date=None, match=None, force=False,
1116 1115 editor=False, extra={}):
1117 1116 """Add a new revision to current repository.
1118 1117
1119 1118 Revision information is gathered from the working directory,
1120 1119 match can be used to filter the committed files. If editor is
1121 1120 supplied, it is called to get a commit message.
1122 1121 """
1123 1122
1124 1123 def fail(f, msg):
1125 1124 raise util.Abort('%s: %s' % (f, msg))
1126 1125
1127 1126 if not match:
1128 1127 match = matchmod.always(self.root, '')
1129 1128
1130 1129 if not force:
1131 1130 vdirs = []
1132 1131 match.dir = vdirs.append
1133 1132 match.bad = fail
1134 1133
1135 1134 wlock = self.wlock()
1136 1135 try:
1137 1136 wctx = self[None]
1138 1137 merge = len(wctx.parents()) > 1
1139 1138
1140 1139 if (not force and merge and match and
1141 1140 (match.files() or match.anypats())):
1142 1141 raise util.Abort(_('cannot partially commit a merge '
1143 1142 '(do not specify files or patterns)'))
1144 1143
1145 1144 changes = self.status(match=match, clean=force)
1146 1145 if force:
1147 1146 changes[0].extend(changes[6]) # mq may commit unchanged files
1148 1147
1149 1148 # check subrepos
1150 1149 subs = []
1151 1150 commitsubs = set()
1152 1151 newstate = wctx.substate.copy()
1153 1152 # only manage subrepos and .hgsubstate if .hgsub is present
1154 1153 if '.hgsub' in wctx:
1155 1154 # we'll decide whether to track this ourselves, thanks
1156 1155 if '.hgsubstate' in changes[0]:
1157 1156 changes[0].remove('.hgsubstate')
1158 1157 if '.hgsubstate' in changes[2]:
1159 1158 changes[2].remove('.hgsubstate')
1160 1159
1161 1160 # compare current state to last committed state
1162 1161 # build new substate based on last committed state
1163 1162 oldstate = wctx.p1().substate
1164 1163 for s in sorted(newstate.keys()):
1165 1164 if not match(s):
1166 1165 # ignore working copy, use old state if present
1167 1166 if s in oldstate:
1168 1167 newstate[s] = oldstate[s]
1169 1168 continue
1170 1169 if not force:
1171 1170 raise util.Abort(
1172 1171 _("commit with new subrepo %s excluded") % s)
1173 1172 if wctx.sub(s).dirty(True):
1174 1173 if not self.ui.configbool('ui', 'commitsubrepos'):
1175 1174 raise util.Abort(
1176 1175 _("uncommitted changes in subrepo %s") % s,
1177 1176 hint=_("use --subrepos for recursive commit"))
1178 1177 subs.append(s)
1179 1178 commitsubs.add(s)
1180 1179 else:
1181 1180 bs = wctx.sub(s).basestate()
1182 1181 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1183 1182 if oldstate.get(s, (None, None, None))[1] != bs:
1184 1183 subs.append(s)
1185 1184
1186 1185 # check for removed subrepos
1187 1186 for p in wctx.parents():
1188 1187 r = [s for s in p.substate if s not in newstate]
1189 1188 subs += [s for s in r if match(s)]
1190 1189 if subs:
1191 1190 if (not match('.hgsub') and
1192 1191 '.hgsub' in (wctx.modified() + wctx.added())):
1193 1192 raise util.Abort(
1194 1193 _("can't commit subrepos without .hgsub"))
1195 1194 changes[0].insert(0, '.hgsubstate')
1196 1195
1197 1196 elif '.hgsub' in changes[2]:
1198 1197 # clean up .hgsubstate when .hgsub is removed
1199 1198 if ('.hgsubstate' in wctx and
1200 1199 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1201 1200 changes[2].insert(0, '.hgsubstate')
1202 1201
1203 1202 # make sure all explicit patterns are matched
1204 1203 if not force and match.files():
1205 1204 matched = set(changes[0] + changes[1] + changes[2])
1206 1205
1207 1206 for f in match.files():
1208 1207 if f == '.' or f in matched or f in wctx.substate:
1209 1208 continue
1210 1209 if f in changes[3]: # missing
1211 1210 fail(f, _('file not found!'))
1212 1211 if f in vdirs: # visited directory
1213 1212 d = f + '/'
1214 1213 for mf in matched:
1215 1214 if mf.startswith(d):
1216 1215 break
1217 1216 else:
1218 1217 fail(f, _("no match under directory!"))
1219 1218 elif f not in self.dirstate:
1220 1219 fail(f, _("file not tracked!"))
1221 1220
1222 1221 if (not force and not extra.get("close") and not merge
1223 1222 and not (changes[0] or changes[1] or changes[2])
1224 1223 and wctx.branch() == wctx.p1().branch()):
1225 1224 return None
1226 1225
1227 1226 if merge and changes[3]:
1228 1227 raise util.Abort(_("cannot commit merge with missing files"))
1229 1228
1230 1229 ms = mergemod.mergestate(self)
1231 1230 for f in changes[0]:
1232 1231 if f in ms and ms[f] == 'u':
1233 1232 raise util.Abort(_("unresolved merge conflicts "
1234 1233 "(see hg help resolve)"))
1235 1234
1236 1235 cctx = context.workingctx(self, text, user, date, extra, changes)
1237 1236 if editor:
1238 1237 cctx._text = editor(self, cctx, subs)
1239 1238 edited = (text != cctx._text)
1240 1239
1241 1240 # commit subs and write new state
1242 1241 if subs:
1243 1242 for s in sorted(commitsubs):
1244 1243 sub = wctx.sub(s)
1245 1244 self.ui.status(_('committing subrepository %s\n') %
1246 1245 subrepo.subrelpath(sub))
1247 1246 sr = sub.commit(cctx._text, user, date)
1248 1247 newstate[s] = (newstate[s][0], sr)
1249 1248 subrepo.writestate(self, newstate)
1250 1249
1251 1250 # Save commit message in case this transaction gets rolled back
1252 1251 # (e.g. by a pretxncommit hook). Leave the content alone on
1253 1252 # the assumption that the user will use the same editor again.
1254 1253 msgfn = self.savecommitmessage(cctx._text)
1255 1254
1256 1255 p1, p2 = self.dirstate.parents()
1257 1256 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1258 1257 try:
1259 1258 self.hook("precommit", throw=True, parent1=hookp1,
1260 1259 parent2=hookp2)
1261 1260 ret = self.commitctx(cctx, True)
1262 1261 except: # re-raises
1263 1262 if edited:
1264 1263 self.ui.write(
1265 1264 _('note: commit message saved in %s\n') % msgfn)
1266 1265 raise
1267 1266
1268 1267 # update bookmarks, dirstate and mergestate
1269 1268 bookmarks.update(self, [p1, p2], ret)
1270 1269 for f in changes[0] + changes[1]:
1271 1270 self.dirstate.normal(f)
1272 1271 for f in changes[2]:
1273 1272 self.dirstate.drop(f)
1274 1273 self.dirstate.setparents(ret)
1275 1274 ms.reset()
1276 1275 finally:
1277 1276 wlock.release()
1278 1277
1279 1278 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1280 1279 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1281 1280 self._afterlock(commithook)
1282 1281 return ret
1283 1282
1284 1283 def commitctx(self, ctx, error=False):
1285 1284 """Add a new revision to current repository.
1286 1285 Revision information is passed via the context argument.
1287 1286 """
1288 1287
1289 1288 tr = lock = None
1290 1289 removed = list(ctx.removed())
1291 1290 p1, p2 = ctx.p1(), ctx.p2()
1292 1291 user = ctx.user()
1293 1292
1294 1293 lock = self.lock()
1295 1294 try:
1296 1295 tr = self.transaction("commit")
1297 1296 trp = weakref.proxy(tr)
1298 1297
1299 1298 if ctx.files():
1300 1299 m1 = p1.manifest().copy()
1301 1300 m2 = p2.manifest()
1302 1301
1303 1302 # check in files
1304 1303 new = {}
1305 1304 changed = []
1306 1305 linkrev = len(self)
1307 1306 for f in sorted(ctx.modified() + ctx.added()):
1308 1307 self.ui.note(f + "\n")
1309 1308 try:
1310 1309 fctx = ctx[f]
1311 1310 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1312 1311 changed)
1313 1312 m1.set(f, fctx.flags())
1314 1313 except OSError, inst:
1315 1314 self.ui.warn(_("trouble committing %s!\n") % f)
1316 1315 raise
1317 1316 except IOError, inst:
1318 1317 errcode = getattr(inst, 'errno', errno.ENOENT)
1319 1318 if error or errcode and errcode != errno.ENOENT:
1320 1319 self.ui.warn(_("trouble committing %s!\n") % f)
1321 1320 raise
1322 1321 else:
1323 1322 removed.append(f)
1324 1323
1325 1324 # update manifest
1326 1325 m1.update(new)
1327 1326 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1328 1327 drop = [f for f in removed if f in m1]
1329 1328 for f in drop:
1330 1329 del m1[f]
1331 1330 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1332 1331 p2.manifestnode(), (new, drop))
1333 1332 files = changed + removed
1334 1333 else:
1335 1334 mn = p1.manifestnode()
1336 1335 files = []
1337 1336
1338 1337 # update changelog
1339 1338 self.changelog.delayupdate()
1340 1339 n = self.changelog.add(mn, files, ctx.description(),
1341 1340 trp, p1.node(), p2.node(),
1342 1341 user, ctx.date(), ctx.extra().copy())
1343 1342 p = lambda: self.changelog.writepending() and self.root or ""
1344 1343 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1345 1344 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1346 1345 parent2=xp2, pending=p)
1347 1346 self.changelog.finalize(trp)
1348 1347 # set the new commit is proper phase
1349 1348 targetphase = phases.newcommitphase(self.ui)
1350 1349 if targetphase:
1351 1350 # retract boundary do not alter parent changeset.
1352 1351 # if a parent have higher the resulting phase will
1353 1352 # be compliant anyway
1354 1353 #
1355 1354 # if minimal phase was 0 we don't need to retract anything
1356 1355 phases.retractboundary(self, targetphase, [n])
1357 1356 tr.close()
1358 1357 self.updatebranchcache()
1359 1358 return n
1360 1359 finally:
1361 1360 if tr:
1362 1361 tr.release()
1363 1362 lock.release()
1364 1363
1365 1364 def destroyed(self, newheadnodes=None):
1366 1365 '''Inform the repository that nodes have been destroyed.
1367 1366 Intended for use by strip and rollback, so there's a common
1368 1367 place for anything that has to be done after destroying history.
1369 1368
1370 1369 If you know the branchheadcache was uptodate before nodes were removed
1371 1370 and you also know the set of candidate new heads that may have resulted
1372 1371 from the destruction, you can set newheadnodes. This will enable the
1373 1372 code to update the branchheads cache, rather than having future code
1374 1373 decide it's invalid and regenrating it from scratch.
1375 1374 '''
1376 1375 # If we have info, newheadnodes, on how to update the branch cache, do
1377 1376 # it, Otherwise, since nodes were destroyed, the cache is stale and this
1378 1377 # will be caught the next time it is read.
1379 1378 if newheadnodes:
1380 1379 tiprev = len(self) - 1
1381 1380 ctxgen = (self[node] for node in newheadnodes
1382 1381 if self.changelog.hasnode(node))
1383 1382 self._updatebranchcache(self._branchcache, ctxgen)
1384 1383 self._writebranchcache(self._branchcache, self.changelog.tip(),
1385 1384 tiprev)
1386 1385
1387 1386 # Ensure the persistent tag cache is updated. Doing it now
1388 1387 # means that the tag cache only has to worry about destroyed
1389 1388 # heads immediately after a strip/rollback. That in turn
1390 1389 # guarantees that "cachetip == currenttip" (comparing both rev
1391 1390 # and node) always means no nodes have been added or destroyed.
1392 1391
1393 1392 # XXX this is suboptimal when qrefresh'ing: we strip the current
1394 1393 # head, refresh the tag cache, then immediately add a new head.
1395 1394 # But I think doing it this way is necessary for the "instant
1396 1395 # tag cache retrieval" case to work.
1397 1396 self.invalidatecaches()
1398 1397
1399 1398 def walk(self, match, node=None):
1400 1399 '''
1401 1400 walk recursively through the directory tree or a given
1402 1401 changeset, finding all files matched by the match
1403 1402 function
1404 1403 '''
1405 1404 return self[node].walk(match)
1406 1405
1407 1406 def status(self, node1='.', node2=None, match=None,
1408 1407 ignored=False, clean=False, unknown=False,
1409 1408 listsubrepos=False):
1410 1409 """return status of files between two nodes or node and working
1411 1410 directory.
1412 1411
1413 1412 If node1 is None, use the first dirstate parent instead.
1414 1413 If node2 is None, compare node1 with working directory.
1415 1414 """
1416 1415
1417 1416 def mfmatches(ctx):
1418 1417 mf = ctx.manifest().copy()
1419 1418 if match.always():
1420 1419 return mf
1421 1420 for fn in mf.keys():
1422 1421 if not match(fn):
1423 1422 del mf[fn]
1424 1423 return mf
1425 1424
1426 1425 if isinstance(node1, context.changectx):
1427 1426 ctx1 = node1
1428 1427 else:
1429 1428 ctx1 = self[node1]
1430 1429 if isinstance(node2, context.changectx):
1431 1430 ctx2 = node2
1432 1431 else:
1433 1432 ctx2 = self[node2]
1434 1433
1435 1434 working = ctx2.rev() is None
1436 1435 parentworking = working and ctx1 == self['.']
1437 1436 match = match or matchmod.always(self.root, self.getcwd())
1438 1437 listignored, listclean, listunknown = ignored, clean, unknown
1439 1438
1440 1439 # load earliest manifest first for caching reasons
1441 1440 if not working and ctx2.rev() < ctx1.rev():
1442 1441 ctx2.manifest()
1443 1442
1444 1443 if not parentworking:
1445 1444 def bad(f, msg):
1446 1445 # 'f' may be a directory pattern from 'match.files()',
1447 1446 # so 'f not in ctx1' is not enough
1448 1447 if f not in ctx1 and f not in ctx1.dirs():
1449 1448 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1450 1449 match.bad = bad
1451 1450
1452 1451 if working: # we need to scan the working dir
1453 1452 subrepos = []
1454 1453 if '.hgsub' in self.dirstate:
1455 1454 subrepos = ctx2.substate.keys()
1456 1455 s = self.dirstate.status(match, subrepos, listignored,
1457 1456 listclean, listunknown)
1458 1457 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1459 1458
1460 1459 # check for any possibly clean files
1461 1460 if parentworking and cmp:
1462 1461 fixup = []
1463 1462 # do a full compare of any files that might have changed
1464 1463 for f in sorted(cmp):
1465 1464 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1466 1465 or ctx1[f].cmp(ctx2[f])):
1467 1466 modified.append(f)
1468 1467 else:
1469 1468 fixup.append(f)
1470 1469
1471 1470 # update dirstate for files that are actually clean
1472 1471 if fixup:
1473 1472 if listclean:
1474 1473 clean += fixup
1475 1474
1476 1475 try:
1477 1476 # updating the dirstate is optional
1478 1477 # so we don't wait on the lock
1479 1478 wlock = self.wlock(False)
1480 1479 try:
1481 1480 for f in fixup:
1482 1481 self.dirstate.normal(f)
1483 1482 finally:
1484 1483 wlock.release()
1485 1484 except error.LockError:
1486 1485 pass
1487 1486
1488 1487 if not parentworking:
1489 1488 mf1 = mfmatches(ctx1)
1490 1489 if working:
1491 1490 # we are comparing working dir against non-parent
1492 1491 # generate a pseudo-manifest for the working dir
1493 1492 mf2 = mfmatches(self['.'])
1494 1493 for f in cmp + modified + added:
1495 1494 mf2[f] = None
1496 1495 mf2.set(f, ctx2.flags(f))
1497 1496 for f in removed:
1498 1497 if f in mf2:
1499 1498 del mf2[f]
1500 1499 else:
1501 1500 # we are comparing two revisions
1502 1501 deleted, unknown, ignored = [], [], []
1503 1502 mf2 = mfmatches(ctx2)
1504 1503
1505 1504 modified, added, clean = [], [], []
1506 1505 withflags = mf1.withflags() | mf2.withflags()
1507 1506 for fn in mf2:
1508 1507 if fn in mf1:
1509 1508 if (fn not in deleted and
1510 1509 ((fn in withflags and mf1.flags(fn) != mf2.flags(fn)) or
1511 1510 (mf1[fn] != mf2[fn] and
1512 1511 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1513 1512 modified.append(fn)
1514 1513 elif listclean:
1515 1514 clean.append(fn)
1516 1515 del mf1[fn]
1517 1516 elif fn not in deleted:
1518 1517 added.append(fn)
1519 1518 removed = mf1.keys()
1520 1519
1521 1520 if working and modified and not self.dirstate._checklink:
1522 1521 # Symlink placeholders may get non-symlink-like contents
1523 1522 # via user error or dereferencing by NFS or Samba servers,
1524 1523 # so we filter out any placeholders that don't look like a
1525 1524 # symlink
1526 1525 sane = []
1527 1526 for f in modified:
1528 1527 if ctx2.flags(f) == 'l':
1529 1528 d = ctx2[f].data()
1530 1529 if len(d) >= 1024 or '\n' in d or util.binary(d):
1531 1530 self.ui.debug('ignoring suspect symlink placeholder'
1532 1531 ' "%s"\n' % f)
1533 1532 continue
1534 1533 sane.append(f)
1535 1534 modified = sane
1536 1535
1537 1536 r = modified, added, removed, deleted, unknown, ignored, clean
1538 1537
1539 1538 if listsubrepos:
1540 1539 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1541 1540 if working:
1542 1541 rev2 = None
1543 1542 else:
1544 1543 rev2 = ctx2.substate[subpath][1]
1545 1544 try:
1546 1545 submatch = matchmod.narrowmatcher(subpath, match)
1547 1546 s = sub.status(rev2, match=submatch, ignored=listignored,
1548 1547 clean=listclean, unknown=listunknown,
1549 1548 listsubrepos=True)
1550 1549 for rfiles, sfiles in zip(r, s):
1551 1550 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1552 1551 except error.LookupError:
1553 1552 self.ui.status(_("skipping missing subrepository: %s\n")
1554 1553 % subpath)
1555 1554
1556 1555 for l in r:
1557 1556 l.sort()
1558 1557 return r
1559 1558
1560 1559 def heads(self, start=None):
1561 1560 heads = self.changelog.heads(start)
1562 1561 # sort the output in rev descending order
1563 1562 return sorted(heads, key=self.changelog.rev, reverse=True)
1564 1563
1565 1564 def branchheads(self, branch=None, start=None, closed=False):
1566 1565 '''return a (possibly filtered) list of heads for the given branch
1567 1566
1568 1567 Heads are returned in topological order, from newest to oldest.
1569 1568 If branch is None, use the dirstate branch.
1570 1569 If start is not None, return only heads reachable from start.
1571 1570 If closed is True, return heads that are marked as closed as well.
1572 1571 '''
1573 1572 if branch is None:
1574 1573 branch = self[None].branch()
1575 1574 branches = self.branchmap()
1576 1575 if branch not in branches:
1577 1576 return []
1578 1577 # the cache returns heads ordered lowest to highest
1579 1578 bheads = list(reversed(branches[branch]))
1580 1579 if start is not None:
1581 1580 # filter out the heads that cannot be reached from startrev
1582 1581 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1583 1582 bheads = [h for h in bheads if h in fbheads]
1584 1583 if not closed:
1585 1584 bheads = [h for h in bheads if not self[h].closesbranch()]
1586 1585 return bheads
1587 1586
1588 1587 def branches(self, nodes):
1589 1588 if not nodes:
1590 1589 nodes = [self.changelog.tip()]
1591 1590 b = []
1592 1591 for n in nodes:
1593 1592 t = n
1594 1593 while True:
1595 1594 p = self.changelog.parents(n)
1596 1595 if p[1] != nullid or p[0] == nullid:
1597 1596 b.append((t, n, p[0], p[1]))
1598 1597 break
1599 1598 n = p[0]
1600 1599 return b
1601 1600
1602 1601 def between(self, pairs):
1603 1602 r = []
1604 1603
1605 1604 for top, bottom in pairs:
1606 1605 n, l, i = top, [], 0
1607 1606 f = 1
1608 1607
1609 1608 while n != bottom and n != nullid:
1610 1609 p = self.changelog.parents(n)[0]
1611 1610 if i == f:
1612 1611 l.append(n)
1613 1612 f = f * 2
1614 1613 n = p
1615 1614 i += 1
1616 1615
1617 1616 r.append(l)
1618 1617
1619 1618 return r
1620 1619
1621 1620 def pull(self, remote, heads=None, force=False):
1622 1621 lock = self.lock()
1623 1622 try:
1624 1623 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1625 1624 force=force)
1626 1625 common, fetch, rheads = tmp
1627 1626 if not fetch:
1628 1627 self.ui.status(_("no changes found\n"))
1629 1628 added = []
1630 1629 result = 0
1631 1630 else:
1632 1631 if heads is None and list(common) == [nullid]:
1633 1632 self.ui.status(_("requesting all changes\n"))
1634 1633 elif heads is None and remote.capable('changegroupsubset'):
1635 1634 # issue1320, avoid a race if remote changed after discovery
1636 1635 heads = rheads
1637 1636
1638 1637 if remote.capable('getbundle'):
1639 1638 cg = remote.getbundle('pull', common=common,
1640 1639 heads=heads or rheads)
1641 1640 elif heads is None:
1642 1641 cg = remote.changegroup(fetch, 'pull')
1643 1642 elif not remote.capable('changegroupsubset'):
1644 1643 raise util.Abort(_("partial pull cannot be done because "
1645 1644 "other repository doesn't support "
1646 1645 "changegroupsubset."))
1647 1646 else:
1648 1647 cg = remote.changegroupsubset(fetch, heads, 'pull')
1649 1648 clstart = len(self.changelog)
1650 1649 result = self.addchangegroup(cg, 'pull', remote.url())
1651 1650 clend = len(self.changelog)
1652 1651 added = [self.changelog.node(r) for r in xrange(clstart, clend)]
1653 1652
1654 1653 # compute target subset
1655 1654 if heads is None:
1656 1655 # We pulled every thing possible
1657 1656 # sync on everything common
1658 1657 subset = common + added
1659 1658 else:
1660 1659 # We pulled a specific subset
1661 1660 # sync on this subset
1662 1661 subset = heads
1663 1662
1664 1663 # Get remote phases data from remote
1665 1664 remotephases = remote.listkeys('phases')
1666 1665 publishing = bool(remotephases.get('publishing', False))
1667 1666 if remotephases and not publishing:
1668 1667 # remote is new and unpublishing
1669 1668 pheads, _dr = phases.analyzeremotephases(self, subset,
1670 1669 remotephases)
1671 1670 phases.advanceboundary(self, phases.public, pheads)
1672 1671 phases.advanceboundary(self, phases.draft, subset)
1673 1672 else:
1674 1673 # Remote is old or publishing all common changesets
1675 1674 # should be seen as public
1676 1675 phases.advanceboundary(self, phases.public, subset)
1676
1677 remoteobs = remote.listkeys('obsolete')
1678 if 'dump' in remoteobs:
1679 data = base85.b85decode(remoteobs['dump'])
1680 self.obsstore.mergemarkers(data)
1677 1681 finally:
1678 1682 lock.release()
1679 1683
1680 1684 return result
1681 1685
1682 1686 def checkpush(self, force, revs):
1683 1687 """Extensions can override this function if additional checks have
1684 1688 to be performed before pushing, or call it if they override push
1685 1689 command.
1686 1690 """
1687 1691 pass
1688 1692
1689 1693 def push(self, remote, force=False, revs=None, newbranch=False):
1690 1694 '''Push outgoing changesets (limited by revs) from the current
1691 1695 repository to remote. Return an integer:
1692 1696 - None means nothing to push
1693 1697 - 0 means HTTP error
1694 1698 - 1 means we pushed and remote head count is unchanged *or*
1695 1699 we have outgoing changesets but refused to push
1696 1700 - other values as described by addchangegroup()
1697 1701 '''
1698 1702 # there are two ways to push to remote repo:
1699 1703 #
1700 1704 # addchangegroup assumes local user can lock remote
1701 1705 # repo (local filesystem, old ssh servers).
1702 1706 #
1703 1707 # unbundle assumes local user cannot lock remote repo (new ssh
1704 1708 # servers, http servers).
1705 1709
1706 1710 # get local lock as we might write phase data
1707 1711 locallock = self.lock()
1708 1712 try:
1709 1713 self.checkpush(force, revs)
1710 1714 lock = None
1711 1715 unbundle = remote.capable('unbundle')
1712 1716 if not unbundle:
1713 1717 lock = remote.lock()
1714 1718 try:
1715 1719 # discovery
1716 1720 fci = discovery.findcommonincoming
1717 1721 commoninc = fci(self, remote, force=force)
1718 1722 common, inc, remoteheads = commoninc
1719 1723 fco = discovery.findcommonoutgoing
1720 1724 outgoing = fco(self, remote, onlyheads=revs,
1721 1725 commoninc=commoninc, force=force)
1722 1726
1723 1727
1724 1728 if not outgoing.missing:
1725 1729 # nothing to push
1726 1730 scmutil.nochangesfound(self.ui, outgoing.excluded)
1727 1731 ret = None
1728 1732 else:
1729 1733 # something to push
1730 1734 if not force:
1731 1735 discovery.checkheads(self, remote, outgoing,
1732 1736 remoteheads, newbranch,
1733 1737 bool(inc))
1734 1738
1735 1739 # create a changegroup from local
1736 1740 if revs is None and not outgoing.excluded:
1737 1741 # push everything,
1738 1742 # use the fast path, no race possible on push
1739 1743 cg = self._changegroup(outgoing.missing, 'push')
1740 1744 else:
1741 1745 cg = self.getlocalbundle('push', outgoing)
1742 1746
1743 1747 # apply changegroup to remote
1744 1748 if unbundle:
1745 1749 # local repo finds heads on server, finds out what
1746 1750 # revs it must push. once revs transferred, if server
1747 1751 # finds it has different heads (someone else won
1748 1752 # commit/push race), server aborts.
1749 1753 if force:
1750 1754 remoteheads = ['force']
1751 1755 # ssh: return remote's addchangegroup()
1752 1756 # http: return remote's addchangegroup() or 0 for error
1753 1757 ret = remote.unbundle(cg, remoteheads, 'push')
1754 1758 else:
1755 1759 # we return an integer indicating remote head count
1756 1760 # change
1757 1761 ret = remote.addchangegroup(cg, 'push', self.url())
1758 1762
1759 1763 if ret:
1760 1764 # push succeed, synchonize target of the push
1761 1765 cheads = outgoing.missingheads
1762 1766 elif revs is None:
1763 1767 # All out push fails. synchronize all common
1764 1768 cheads = outgoing.commonheads
1765 1769 else:
1766 1770 # I want cheads = heads(::missingheads and ::commonheads)
1767 1771 # (missingheads is revs with secret changeset filtered out)
1768 1772 #
1769 1773 # This can be expressed as:
1770 1774 # cheads = ( (missingheads and ::commonheads)
1771 1775 # + (commonheads and ::missingheads))"
1772 1776 # )
1773 1777 #
1774 1778 # while trying to push we already computed the following:
1775 1779 # common = (::commonheads)
1776 1780 # missing = ((commonheads::missingheads) - commonheads)
1777 1781 #
1778 1782 # We can pick:
1779 1783 # * missingheads part of comon (::commonheads)
1780 1784 common = set(outgoing.common)
1781 1785 cheads = [node for node in revs if node in common]
1782 1786 # and
1783 1787 # * commonheads parents on missing
1784 1788 revset = self.set('%ln and parents(roots(%ln))',
1785 1789 outgoing.commonheads,
1786 1790 outgoing.missing)
1787 1791 cheads.extend(c.node() for c in revset)
1788 1792 # even when we don't push, exchanging phase data is useful
1789 1793 remotephases = remote.listkeys('phases')
1790 1794 if not remotephases: # old server or public only repo
1791 1795 phases.advanceboundary(self, phases.public, cheads)
1792 1796 # don't push any phase data as there is nothing to push
1793 1797 else:
1794 1798 ana = phases.analyzeremotephases(self, cheads, remotephases)
1795 1799 pheads, droots = ana
1796 1800 ### Apply remote phase on local
1797 1801 if remotephases.get('publishing', False):
1798 1802 phases.advanceboundary(self, phases.public, cheads)
1799 1803 else: # publish = False
1800 1804 phases.advanceboundary(self, phases.public, pheads)
1801 1805 phases.advanceboundary(self, phases.draft, cheads)
1802 1806 ### Apply local phase on remote
1803 1807
1804 1808 # Get the list of all revs draft on remote by public here.
1805 1809 # XXX Beware that revset break if droots is not strictly
1806 1810 # XXX root we may want to ensure it is but it is costly
1807 1811 outdated = self.set('heads((%ln::%ln) and public())',
1808 1812 droots, cheads)
1809 1813 for newremotehead in outdated:
1810 1814 r = remote.pushkey('phases',
1811 1815 newremotehead.hex(),
1812 1816 str(phases.draft),
1813 1817 str(phases.public))
1814 1818 if not r:
1815 1819 self.ui.warn(_('updating %s to public failed!\n')
1816 1820 % newremotehead)
1821 if 'obsolete' in self.listkeys('namespaces') and self.obsstore:
1822 data = self.obsstore._writemarkers()
1823 r = remote.pushkey('obsolete', 'dump', '',
1824 base85.b85encode(data))
1825 if not r:
1826 self.ui.warn(_('failed to push obsolete markers!\n'))
1817 1827 finally:
1818 1828 if lock is not None:
1819 1829 lock.release()
1820 1830 finally:
1821 1831 locallock.release()
1822 1832
1823 1833 self.ui.debug("checking for updated bookmarks\n")
1824 1834 rb = remote.listkeys('bookmarks')
1825 1835 for k in rb.keys():
1826 1836 if k in self._bookmarks:
1827 1837 nr, nl = rb[k], hex(self._bookmarks[k])
1828 1838 if nr in self:
1829 1839 cr = self[nr]
1830 1840 cl = self[nl]
1831 1841 if cl in cr.descendants():
1832 1842 r = remote.pushkey('bookmarks', k, nr, nl)
1833 1843 if r:
1834 1844 self.ui.status(_("updating bookmark %s\n") % k)
1835 1845 else:
1836 1846 self.ui.warn(_('updating bookmark %s'
1837 1847 ' failed!\n') % k)
1838 1848
1839 1849 return ret
1840 1850
1841 1851 def changegroupinfo(self, nodes, source):
1842 1852 if self.ui.verbose or source == 'bundle':
1843 1853 self.ui.status(_("%d changesets found\n") % len(nodes))
1844 1854 if self.ui.debugflag:
1845 1855 self.ui.debug("list of changesets:\n")
1846 1856 for node in nodes:
1847 1857 self.ui.debug("%s\n" % hex(node))
1848 1858
1849 1859 def changegroupsubset(self, bases, heads, source):
1850 1860 """Compute a changegroup consisting of all the nodes that are
1851 1861 descendants of any of the bases and ancestors of any of the heads.
1852 1862 Return a chunkbuffer object whose read() method will return
1853 1863 successive changegroup chunks.
1854 1864
1855 1865 It is fairly complex as determining which filenodes and which
1856 1866 manifest nodes need to be included for the changeset to be complete
1857 1867 is non-trivial.
1858 1868
1859 1869 Another wrinkle is doing the reverse, figuring out which changeset in
1860 1870 the changegroup a particular filenode or manifestnode belongs to.
1861 1871 """
1862 1872 cl = self.changelog
1863 1873 if not bases:
1864 1874 bases = [nullid]
1865 1875 csets, bases, heads = cl.nodesbetween(bases, heads)
1866 1876 # We assume that all ancestors of bases are known
1867 1877 common = set(cl.ancestors([cl.rev(n) for n in bases]))
1868 1878 return self._changegroupsubset(common, csets, heads, source)
1869 1879
1870 1880 def getlocalbundle(self, source, outgoing):
1871 1881 """Like getbundle, but taking a discovery.outgoing as an argument.
1872 1882
1873 1883 This is only implemented for local repos and reuses potentially
1874 1884 precomputed sets in outgoing."""
1875 1885 if not outgoing.missing:
1876 1886 return None
1877 1887 return self._changegroupsubset(outgoing.common,
1878 1888 outgoing.missing,
1879 1889 outgoing.missingheads,
1880 1890 source)
1881 1891
1882 1892 def getbundle(self, source, heads=None, common=None):
1883 1893 """Like changegroupsubset, but returns the set difference between the
1884 1894 ancestors of heads and the ancestors common.
1885 1895
1886 1896 If heads is None, use the local heads. If common is None, use [nullid].
1887 1897
1888 1898 The nodes in common might not all be known locally due to the way the
1889 1899 current discovery protocol works.
1890 1900 """
1891 1901 cl = self.changelog
1892 1902 if common:
1893 1903 nm = cl.nodemap
1894 1904 common = [n for n in common if n in nm]
1895 1905 else:
1896 1906 common = [nullid]
1897 1907 if not heads:
1898 1908 heads = cl.heads()
1899 1909 return self.getlocalbundle(source,
1900 1910 discovery.outgoing(cl, common, heads))
1901 1911
1902 1912 def _changegroupsubset(self, commonrevs, csets, heads, source):
1903 1913
1904 1914 cl = self.changelog
1905 1915 mf = self.manifest
1906 1916 mfs = {} # needed manifests
1907 1917 fnodes = {} # needed file nodes
1908 1918 changedfiles = set()
1909 1919 fstate = ['', {}]
1910 1920 count = [0, 0]
1911 1921
1912 1922 # can we go through the fast path ?
1913 1923 heads.sort()
1914 1924 if heads == sorted(self.heads()):
1915 1925 return self._changegroup(csets, source)
1916 1926
1917 1927 # slow path
1918 1928 self.hook('preoutgoing', throw=True, source=source)
1919 1929 self.changegroupinfo(csets, source)
1920 1930
1921 1931 # filter any nodes that claim to be part of the known set
1922 1932 def prune(revlog, missing):
1923 1933 rr, rl = revlog.rev, revlog.linkrev
1924 1934 return [n for n in missing
1925 1935 if rl(rr(n)) not in commonrevs]
1926 1936
1927 1937 progress = self.ui.progress
1928 1938 _bundling = _('bundling')
1929 1939 _changesets = _('changesets')
1930 1940 _manifests = _('manifests')
1931 1941 _files = _('files')
1932 1942
1933 1943 def lookup(revlog, x):
1934 1944 if revlog == cl:
1935 1945 c = cl.read(x)
1936 1946 changedfiles.update(c[3])
1937 1947 mfs.setdefault(c[0], x)
1938 1948 count[0] += 1
1939 1949 progress(_bundling, count[0],
1940 1950 unit=_changesets, total=count[1])
1941 1951 return x
1942 1952 elif revlog == mf:
1943 1953 clnode = mfs[x]
1944 1954 mdata = mf.readfast(x)
1945 1955 for f, n in mdata.iteritems():
1946 1956 if f in changedfiles:
1947 1957 fnodes[f].setdefault(n, clnode)
1948 1958 count[0] += 1
1949 1959 progress(_bundling, count[0],
1950 1960 unit=_manifests, total=count[1])
1951 1961 return clnode
1952 1962 else:
1953 1963 progress(_bundling, count[0], item=fstate[0],
1954 1964 unit=_files, total=count[1])
1955 1965 return fstate[1][x]
1956 1966
1957 1967 bundler = changegroup.bundle10(lookup)
1958 1968 reorder = self.ui.config('bundle', 'reorder', 'auto')
1959 1969 if reorder == 'auto':
1960 1970 reorder = None
1961 1971 else:
1962 1972 reorder = util.parsebool(reorder)
1963 1973
1964 1974 def gengroup():
1965 1975 # Create a changenode group generator that will call our functions
1966 1976 # back to lookup the owning changenode and collect information.
1967 1977 count[:] = [0, len(csets)]
1968 1978 for chunk in cl.group(csets, bundler, reorder=reorder):
1969 1979 yield chunk
1970 1980 progress(_bundling, None)
1971 1981
1972 1982 # Create a generator for the manifestnodes that calls our lookup
1973 1983 # and data collection functions back.
1974 1984 for f in changedfiles:
1975 1985 fnodes[f] = {}
1976 1986 count[:] = [0, len(mfs)]
1977 1987 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1978 1988 yield chunk
1979 1989 progress(_bundling, None)
1980 1990
1981 1991 mfs.clear()
1982 1992
1983 1993 # Go through all our files in order sorted by name.
1984 1994 count[:] = [0, len(changedfiles)]
1985 1995 for fname in sorted(changedfiles):
1986 1996 filerevlog = self.file(fname)
1987 1997 if not len(filerevlog):
1988 1998 raise util.Abort(_("empty or missing revlog for %s")
1989 1999 % fname)
1990 2000 fstate[0] = fname
1991 2001 fstate[1] = fnodes.pop(fname, {})
1992 2002
1993 2003 nodelist = prune(filerevlog, fstate[1])
1994 2004 if nodelist:
1995 2005 count[0] += 1
1996 2006 yield bundler.fileheader(fname)
1997 2007 for chunk in filerevlog.group(nodelist, bundler, reorder):
1998 2008 yield chunk
1999 2009
2000 2010 # Signal that no more groups are left.
2001 2011 yield bundler.close()
2002 2012 progress(_bundling, None)
2003 2013
2004 2014 if csets:
2005 2015 self.hook('outgoing', node=hex(csets[0]), source=source)
2006 2016
2007 2017 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2008 2018
2009 2019 def changegroup(self, basenodes, source):
2010 2020 # to avoid a race we use changegroupsubset() (issue1320)
2011 2021 return self.changegroupsubset(basenodes, self.heads(), source)
2012 2022
2013 2023 def _changegroup(self, nodes, source):
2014 2024 """Compute the changegroup of all nodes that we have that a recipient
2015 2025 doesn't. Return a chunkbuffer object whose read() method will return
2016 2026 successive changegroup chunks.
2017 2027
2018 2028 This is much easier than the previous function as we can assume that
2019 2029 the recipient has any changenode we aren't sending them.
2020 2030
2021 2031 nodes is the set of nodes to send"""
2022 2032
2023 2033 cl = self.changelog
2024 2034 mf = self.manifest
2025 2035 mfs = {}
2026 2036 changedfiles = set()
2027 2037 fstate = ['']
2028 2038 count = [0, 0]
2029 2039
2030 2040 self.hook('preoutgoing', throw=True, source=source)
2031 2041 self.changegroupinfo(nodes, source)
2032 2042
2033 2043 revset = set([cl.rev(n) for n in nodes])
2034 2044
2035 2045 def gennodelst(log):
2036 2046 ln, llr = log.node, log.linkrev
2037 2047 return [ln(r) for r in log if llr(r) in revset]
2038 2048
2039 2049 progress = self.ui.progress
2040 2050 _bundling = _('bundling')
2041 2051 _changesets = _('changesets')
2042 2052 _manifests = _('manifests')
2043 2053 _files = _('files')
2044 2054
2045 2055 def lookup(revlog, x):
2046 2056 if revlog == cl:
2047 2057 c = cl.read(x)
2048 2058 changedfiles.update(c[3])
2049 2059 mfs.setdefault(c[0], x)
2050 2060 count[0] += 1
2051 2061 progress(_bundling, count[0],
2052 2062 unit=_changesets, total=count[1])
2053 2063 return x
2054 2064 elif revlog == mf:
2055 2065 count[0] += 1
2056 2066 progress(_bundling, count[0],
2057 2067 unit=_manifests, total=count[1])
2058 2068 return cl.node(revlog.linkrev(revlog.rev(x)))
2059 2069 else:
2060 2070 progress(_bundling, count[0], item=fstate[0],
2061 2071 total=count[1], unit=_files)
2062 2072 return cl.node(revlog.linkrev(revlog.rev(x)))
2063 2073
2064 2074 bundler = changegroup.bundle10(lookup)
2065 2075 reorder = self.ui.config('bundle', 'reorder', 'auto')
2066 2076 if reorder == 'auto':
2067 2077 reorder = None
2068 2078 else:
2069 2079 reorder = util.parsebool(reorder)
2070 2080
2071 2081 def gengroup():
2072 2082 '''yield a sequence of changegroup chunks (strings)'''
2073 2083 # construct a list of all changed files
2074 2084
2075 2085 count[:] = [0, len(nodes)]
2076 2086 for chunk in cl.group(nodes, bundler, reorder=reorder):
2077 2087 yield chunk
2078 2088 progress(_bundling, None)
2079 2089
2080 2090 count[:] = [0, len(mfs)]
2081 2091 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
2082 2092 yield chunk
2083 2093 progress(_bundling, None)
2084 2094
2085 2095 count[:] = [0, len(changedfiles)]
2086 2096 for fname in sorted(changedfiles):
2087 2097 filerevlog = self.file(fname)
2088 2098 if not len(filerevlog):
2089 2099 raise util.Abort(_("empty or missing revlog for %s")
2090 2100 % fname)
2091 2101 fstate[0] = fname
2092 2102 nodelist = gennodelst(filerevlog)
2093 2103 if nodelist:
2094 2104 count[0] += 1
2095 2105 yield bundler.fileheader(fname)
2096 2106 for chunk in filerevlog.group(nodelist, bundler, reorder):
2097 2107 yield chunk
2098 2108 yield bundler.close()
2099 2109 progress(_bundling, None)
2100 2110
2101 2111 if nodes:
2102 2112 self.hook('outgoing', node=hex(nodes[0]), source=source)
2103 2113
2104 2114 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
2105 2115
2106 2116 def addchangegroup(self, source, srctype, url, emptyok=False):
2107 2117 """Add the changegroup returned by source.read() to this repo.
2108 2118 srctype is a string like 'push', 'pull', or 'unbundle'. url is
2109 2119 the URL of the repo where this changegroup is coming from.
2110 2120
2111 2121 Return an integer summarizing the change to this repo:
2112 2122 - nothing changed or no source: 0
2113 2123 - more heads than before: 1+added heads (2..n)
2114 2124 - fewer heads than before: -1-removed heads (-2..-n)
2115 2125 - number of heads stays the same: 1
2116 2126 """
2117 2127 def csmap(x):
2118 2128 self.ui.debug("add changeset %s\n" % short(x))
2119 2129 return len(cl)
2120 2130
2121 2131 def revmap(x):
2122 2132 return cl.rev(x)
2123 2133
2124 2134 if not source:
2125 2135 return 0
2126 2136
2127 2137 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2128 2138
2129 2139 changesets = files = revisions = 0
2130 2140 efiles = set()
2131 2141
2132 2142 # write changelog data to temp files so concurrent readers will not see
2133 2143 # inconsistent view
2134 2144 cl = self.changelog
2135 2145 cl.delayupdate()
2136 2146 oldheads = cl.heads()
2137 2147
2138 2148 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
2139 2149 try:
2140 2150 trp = weakref.proxy(tr)
2141 2151 # pull off the changeset group
2142 2152 self.ui.status(_("adding changesets\n"))
2143 2153 clstart = len(cl)
2144 2154 class prog(object):
2145 2155 step = _('changesets')
2146 2156 count = 1
2147 2157 ui = self.ui
2148 2158 total = None
2149 2159 def __call__(self):
2150 2160 self.ui.progress(self.step, self.count, unit=_('chunks'),
2151 2161 total=self.total)
2152 2162 self.count += 1
2153 2163 pr = prog()
2154 2164 source.callback = pr
2155 2165
2156 2166 source.changelogheader()
2157 2167 srccontent = cl.addgroup(source, csmap, trp)
2158 2168 if not (srccontent or emptyok):
2159 2169 raise util.Abort(_("received changelog group is empty"))
2160 2170 clend = len(cl)
2161 2171 changesets = clend - clstart
2162 2172 for c in xrange(clstart, clend):
2163 2173 efiles.update(self[c].files())
2164 2174 efiles = len(efiles)
2165 2175 self.ui.progress(_('changesets'), None)
2166 2176
2167 2177 # pull off the manifest group
2168 2178 self.ui.status(_("adding manifests\n"))
2169 2179 pr.step = _('manifests')
2170 2180 pr.count = 1
2171 2181 pr.total = changesets # manifests <= changesets
2172 2182 # no need to check for empty manifest group here:
2173 2183 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2174 2184 # no new manifest will be created and the manifest group will
2175 2185 # be empty during the pull
2176 2186 source.manifestheader()
2177 2187 self.manifest.addgroup(source, revmap, trp)
2178 2188 self.ui.progress(_('manifests'), None)
2179 2189
2180 2190 needfiles = {}
2181 2191 if self.ui.configbool('server', 'validate', default=False):
2182 2192 # validate incoming csets have their manifests
2183 2193 for cset in xrange(clstart, clend):
2184 2194 mfest = self.changelog.read(self.changelog.node(cset))[0]
2185 2195 mfest = self.manifest.readdelta(mfest)
2186 2196 # store file nodes we must see
2187 2197 for f, n in mfest.iteritems():
2188 2198 needfiles.setdefault(f, set()).add(n)
2189 2199
2190 2200 # process the files
2191 2201 self.ui.status(_("adding file changes\n"))
2192 2202 pr.step = _('files')
2193 2203 pr.count = 1
2194 2204 pr.total = efiles
2195 2205 source.callback = None
2196 2206
2197 2207 while True:
2198 2208 chunkdata = source.filelogheader()
2199 2209 if not chunkdata:
2200 2210 break
2201 2211 f = chunkdata["filename"]
2202 2212 self.ui.debug("adding %s revisions\n" % f)
2203 2213 pr()
2204 2214 fl = self.file(f)
2205 2215 o = len(fl)
2206 2216 if not fl.addgroup(source, revmap, trp):
2207 2217 raise util.Abort(_("received file revlog group is empty"))
2208 2218 revisions += len(fl) - o
2209 2219 files += 1
2210 2220 if f in needfiles:
2211 2221 needs = needfiles[f]
2212 2222 for new in xrange(o, len(fl)):
2213 2223 n = fl.node(new)
2214 2224 if n in needs:
2215 2225 needs.remove(n)
2216 2226 if not needs:
2217 2227 del needfiles[f]
2218 2228 self.ui.progress(_('files'), None)
2219 2229
2220 2230 for f, needs in needfiles.iteritems():
2221 2231 fl = self.file(f)
2222 2232 for n in needs:
2223 2233 try:
2224 2234 fl.rev(n)
2225 2235 except error.LookupError:
2226 2236 raise util.Abort(
2227 2237 _('missing file data for %s:%s - run hg verify') %
2228 2238 (f, hex(n)))
2229 2239
2230 2240 dh = 0
2231 2241 if oldheads:
2232 2242 heads = cl.heads()
2233 2243 dh = len(heads) - len(oldheads)
2234 2244 for h in heads:
2235 2245 if h not in oldheads and self[h].closesbranch():
2236 2246 dh -= 1
2237 2247 htext = ""
2238 2248 if dh:
2239 2249 htext = _(" (%+d heads)") % dh
2240 2250
2241 2251 self.ui.status(_("added %d changesets"
2242 2252 " with %d changes to %d files%s\n")
2243 2253 % (changesets, revisions, files, htext))
2244 2254
2245 2255 if changesets > 0:
2246 2256 p = lambda: cl.writepending() and self.root or ""
2247 2257 self.hook('pretxnchangegroup', throw=True,
2248 2258 node=hex(cl.node(clstart)), source=srctype,
2249 2259 url=url, pending=p)
2250 2260
2251 2261 added = [cl.node(r) for r in xrange(clstart, clend)]
2252 2262 publishing = self.ui.configbool('phases', 'publish', True)
2253 2263 if srctype == 'push':
2254 2264 # Old server can not push the boundary themself.
2255 2265 # New server won't push the boundary if changeset already
2256 2266 # existed locally as secrete
2257 2267 #
2258 2268 # We should not use added here but the list of all change in
2259 2269 # the bundle
2260 2270 if publishing:
2261 2271 phases.advanceboundary(self, phases.public, srccontent)
2262 2272 else:
2263 2273 phases.advanceboundary(self, phases.draft, srccontent)
2264 2274 phases.retractboundary(self, phases.draft, added)
2265 2275 elif srctype != 'strip':
2266 2276 # publishing only alter behavior during push
2267 2277 #
2268 2278 # strip should not touch boundary at all
2269 2279 phases.retractboundary(self, phases.draft, added)
2270 2280
2271 2281 # make changelog see real files again
2272 2282 cl.finalize(trp)
2273 2283
2274 2284 tr.close()
2275 2285
2276 2286 if changesets > 0:
2277 2287 def runhooks():
2278 2288 # forcefully update the on-disk branch cache
2279 2289 self.ui.debug("updating the branch cache\n")
2280 2290 self.updatebranchcache()
2281 2291 self.hook("changegroup", node=hex(cl.node(clstart)),
2282 2292 source=srctype, url=url)
2283 2293
2284 2294 for n in added:
2285 2295 self.hook("incoming", node=hex(n), source=srctype,
2286 2296 url=url)
2287 2297 self._afterlock(runhooks)
2288 2298
2289 2299 finally:
2290 2300 tr.release()
2291 2301 # never return 0 here:
2292 2302 if dh < 0:
2293 2303 return dh - 1
2294 2304 else:
2295 2305 return dh + 1
2296 2306
2297 2307 def stream_in(self, remote, requirements):
2298 2308 lock = self.lock()
2299 2309 try:
2300 2310 fp = remote.stream_out()
2301 2311 l = fp.readline()
2302 2312 try:
2303 2313 resp = int(l)
2304 2314 except ValueError:
2305 2315 raise error.ResponseError(
2306 2316 _('unexpected response from remote server:'), l)
2307 2317 if resp == 1:
2308 2318 raise util.Abort(_('operation forbidden by server'))
2309 2319 elif resp == 2:
2310 2320 raise util.Abort(_('locking the remote repository failed'))
2311 2321 elif resp != 0:
2312 2322 raise util.Abort(_('the server sent an unknown error code'))
2313 2323 self.ui.status(_('streaming all changes\n'))
2314 2324 l = fp.readline()
2315 2325 try:
2316 2326 total_files, total_bytes = map(int, l.split(' ', 1))
2317 2327 except (ValueError, TypeError):
2318 2328 raise error.ResponseError(
2319 2329 _('unexpected response from remote server:'), l)
2320 2330 self.ui.status(_('%d files to transfer, %s of data\n') %
2321 2331 (total_files, util.bytecount(total_bytes)))
2322 2332 handled_bytes = 0
2323 2333 self.ui.progress(_('clone'), 0, total=total_bytes)
2324 2334 start = time.time()
2325 2335 for i in xrange(total_files):
2326 2336 # XXX doesn't support '\n' or '\r' in filenames
2327 2337 l = fp.readline()
2328 2338 try:
2329 2339 name, size = l.split('\0', 1)
2330 2340 size = int(size)
2331 2341 except (ValueError, TypeError):
2332 2342 raise error.ResponseError(
2333 2343 _('unexpected response from remote server:'), l)
2334 2344 if self.ui.debugflag:
2335 2345 self.ui.debug('adding %s (%s)\n' %
2336 2346 (name, util.bytecount(size)))
2337 2347 # for backwards compat, name was partially encoded
2338 2348 ofp = self.sopener(store.decodedir(name), 'w')
2339 2349 for chunk in util.filechunkiter(fp, limit=size):
2340 2350 handled_bytes += len(chunk)
2341 2351 self.ui.progress(_('clone'), handled_bytes,
2342 2352 total=total_bytes)
2343 2353 ofp.write(chunk)
2344 2354 ofp.close()
2345 2355 elapsed = time.time() - start
2346 2356 if elapsed <= 0:
2347 2357 elapsed = 0.001
2348 2358 self.ui.progress(_('clone'), None)
2349 2359 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2350 2360 (util.bytecount(total_bytes), elapsed,
2351 2361 util.bytecount(total_bytes / elapsed)))
2352 2362
2353 2363 # new requirements = old non-format requirements +
2354 2364 # new format-related
2355 2365 # requirements from the streamed-in repository
2356 2366 requirements.update(set(self.requirements) - self.supportedformats)
2357 2367 self._applyrequirements(requirements)
2358 2368 self._writerequirements()
2359 2369
2360 2370 self.invalidate()
2361 2371 return len(self.heads()) + 1
2362 2372 finally:
2363 2373 lock.release()
2364 2374
2365 2375 def clone(self, remote, heads=[], stream=False):
2366 2376 '''clone remote repository.
2367 2377
2368 2378 keyword arguments:
2369 2379 heads: list of revs to clone (forces use of pull)
2370 2380 stream: use streaming clone if possible'''
2371 2381
2372 2382 # now, all clients that can request uncompressed clones can
2373 2383 # read repo formats supported by all servers that can serve
2374 2384 # them.
2375 2385
2376 2386 # if revlog format changes, client will have to check version
2377 2387 # and format flags on "stream" capability, and use
2378 2388 # uncompressed only if compatible.
2379 2389
2380 2390 if not stream:
2381 2391 # if the server explicitely prefer to stream (for fast LANs)
2382 2392 stream = remote.capable('stream-preferred')
2383 2393
2384 2394 if stream and not heads:
2385 2395 # 'stream' means remote revlog format is revlogv1 only
2386 2396 if remote.capable('stream'):
2387 2397 return self.stream_in(remote, set(('revlogv1',)))
2388 2398 # otherwise, 'streamreqs' contains the remote revlog format
2389 2399 streamreqs = remote.capable('streamreqs')
2390 2400 if streamreqs:
2391 2401 streamreqs = set(streamreqs.split(','))
2392 2402 # if we support it, stream in and adjust our requirements
2393 2403 if not streamreqs - self.supportedformats:
2394 2404 return self.stream_in(remote, streamreqs)
2395 2405 return self.pull(remote, heads)
2396 2406
2397 2407 def pushkey(self, namespace, key, old, new):
2398 2408 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2399 2409 old=old, new=new)
2400 2410 ret = pushkey.push(self, namespace, key, old, new)
2401 2411 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2402 2412 ret=ret)
2403 2413 return ret
2404 2414
2405 2415 def listkeys(self, namespace):
2406 2416 self.hook('prelistkeys', throw=True, namespace=namespace)
2407 2417 values = pushkey.list(self, namespace)
2408 2418 self.hook('listkeys', namespace=namespace, values=values)
2409 2419 return values
2410 2420
2411 2421 def debugwireargs(self, one, two, three=None, four=None, five=None):
2412 2422 '''used to test argument passing over the wire'''
2413 2423 return "%s %s %s %s %s" % (one, two, three, four, five)
2414 2424
2415 2425 def savecommitmessage(self, text):
2416 2426 fp = self.opener('last-message.txt', 'wb')
2417 2427 try:
2418 2428 fp.write(text)
2419 2429 finally:
2420 2430 fp.close()
2421 2431 return self.pathto(fp.name[len(self.root)+1:])
2422 2432
2423 2433 # used to avoid circular references so destructors work
2424 2434 def aftertrans(files):
2425 2435 renamefiles = [tuple(t) for t in files]
2426 2436 def a():
2427 2437 for src, dest in renamefiles:
2428 2438 try:
2429 2439 util.rename(src, dest)
2430 2440 except OSError: # journal file does not yet exist
2431 2441 pass
2432 2442 return a
2433 2443
2434 2444 def undoname(fn):
2435 2445 base, name = os.path.split(fn)
2436 2446 assert name.startswith('journal')
2437 2447 return os.path.join(base, name.replace('journal', 'undo', 1))
2438 2448
2439 2449 def instance(ui, path, create):
2440 2450 return localrepository(ui, util.urllocalpath(path), create)
2441 2451
2442 2452 def islocal(path):
2443 2453 return True
@@ -1,231 +1,269 b''
1 1 # obsolete.py - obsolete markers handling
2 2 #
3 3 # Copyright 2012 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 4 # Logilab SA <contact@logilab.fr>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """Obsolete markers handling
10 10
11 11 An obsolete marker maps an old changeset to a list of new
12 12 changesets. If the list of new changesets is empty, the old changeset
13 13 is said to be "killed". Otherwise, the old changeset is being
14 14 "replaced" by the new changesets.
15 15
16 16 Obsolete markers can be used to record and distribute changeset graph
17 17 transformations performed by history rewriting operations, and help
18 18 building new tools to reconciliate conflicting rewriting actions. To
19 19 facilitate conflicts resolution, markers include various annotations
20 20 besides old and news changeset identifiers, such as creation date or
21 21 author name.
22 22
23 23
24 24 Format
25 25 ------
26 26
27 27 Markers are stored in an append-only file stored in
28 28 '.hg/store/obsstore'.
29 29
30 30 The file starts with a version header:
31 31
32 32 - 1 unsigned byte: version number, starting at zero.
33 33
34 34
35 35 The header is followed by the markers. Each marker is made of:
36 36
37 37 - 1 unsigned byte: number of new changesets "R", could be zero.
38 38
39 39 - 1 unsigned 32-bits integer: metadata size "M" in bytes.
40 40
41 41 - 1 byte: a bit field. It is reserved for flags used in obsolete
42 42 markers common operations, to avoid repeated decoding of metadata
43 43 entries.
44 44
45 45 - 20 bytes: obsoleted changeset identifier.
46 46
47 47 - N*20 bytes: new changesets identifiers.
48 48
49 49 - M bytes: metadata as a sequence of nul-terminated strings. Each
50 50 string contains a key and a value, separated by a color ':', without
51 51 additional encoding. Keys cannot contain '\0' or ':' and values
52 52 cannot contain '\0'.
53 53 """
54 54 import struct
55 from mercurial import util
55 from mercurial import util, base85
56 56 from i18n import _
57 57
58 58 _pack = struct.pack
59 59 _unpack = struct.unpack
60 60
61 61
62 62
63 63 # data used for parsing and writing
64 64 _fmversion = 0
65 65 _fmfixed = '>BIB20s'
66 66 _fmnode = '20s'
67 67 _fmfsize = struct.calcsize(_fmfixed)
68 68 _fnodesize = struct.calcsize(_fmnode)
69 69
70 70 def _readmarkers(data):
71 71 """Read and enumerate markers from raw data"""
72 72 off = 0
73 73 diskversion = _unpack('>B', data[off:off + 1])[0]
74 74 off += 1
75 75 if diskversion != _fmversion:
76 76 raise util.Abort(_('parsing obsolete marker: unknown version %r')
77 77 % diskversion)
78 78
79 79 # Loop on markers
80 80 l = len(data)
81 81 while off + _fmfsize <= l:
82 82 # read fixed part
83 83 cur = data[off:off + _fmfsize]
84 84 off += _fmfsize
85 85 nbsuc, mdsize, flags, pre = _unpack(_fmfixed, cur)
86 86 # read replacement
87 87 sucs = ()
88 88 if nbsuc:
89 89 s = (_fnodesize * nbsuc)
90 90 cur = data[off:off + s]
91 91 sucs = _unpack(_fmnode * nbsuc, cur)
92 92 off += s
93 93 # read metadata
94 94 # (metadata will be decoded on demand)
95 95 metadata = data[off:off + mdsize]
96 96 if len(metadata) != mdsize:
97 97 raise util.Abort(_('parsing obsolete marker: metadata is too '
98 98 'short, %d bytes expected, got %d')
99 99 % (len(metadata), mdsize))
100 100 off += mdsize
101 101 yield (pre, sucs, flags, metadata)
102 102
103 103 def encodemeta(meta):
104 104 """Return encoded metadata string to string mapping.
105 105
106 106 Assume no ':' in key and no '\0' in both key and value."""
107 107 for key, value in meta.iteritems():
108 108 if ':' in key or '\0' in key:
109 109 raise ValueError("':' and '\0' are forbidden in metadata key'")
110 110 if '\0' in value:
111 111 raise ValueError("':' are forbidden in metadata value'")
112 112 return '\0'.join(['%s:%s' % (k, meta[k]) for k in sorted(meta)])
113 113
114 114 def decodemeta(data):
115 115 """Return string to string dictionary from encoded version."""
116 116 d = {}
117 117 for l in data.split('\0'):
118 118 if l:
119 119 key, value = l.split(':')
120 120 d[key] = value
121 121 return d
122 122
123 123 class marker(object):
124 124 """Wrap obsolete marker raw data"""
125 125
126 126 def __init__(self, repo, data):
127 127 # the repo argument will be used to create changectx in later version
128 128 self._repo = repo
129 129 self._data = data
130 130 self._decodedmeta = None
131 131
132 132 def precnode(self):
133 133 """Precursor changeset node identifier"""
134 134 return self._data[0]
135 135
136 136 def succnodes(self):
137 137 """List of successor changesets node identifiers"""
138 138 return self._data[1]
139 139
140 140 def metadata(self):
141 141 """Decoded metadata dictionary"""
142 142 if self._decodedmeta is None:
143 143 self._decodedmeta = decodemeta(self._data[3])
144 144 return self._decodedmeta
145 145
146 146 def date(self):
147 147 """Creation date as (unixtime, offset)"""
148 148 parts = self.metadata()['date'].split(' ')
149 149 return (float(parts[0]), int(parts[1]))
150 150
151 151 class obsstore(object):
152 152 """Store obsolete markers
153 153
154 154 Markers can be accessed with two mappings:
155 155 - precursors: old -> set(new)
156 156 - successors: new -> set(old)
157 157 """
158 158
159 159 def __init__(self):
160 160 self._all = []
161 161 # new markers to serialize
162 162 self._new = []
163 163 self.precursors = {}
164 164 self.successors = {}
165 165
166 166 def __iter__(self):
167 167 return iter(self._all)
168 168
169 def __nonzero__(self):
170 return bool(self._all)
171
169 172 def create(self, prec, succs=(), flag=0, metadata=None):
170 173 """obsolete: add a new obsolete marker
171 174
172 175 * ensuring it is hashable
173 176 * check mandatory metadata
174 177 * encode metadata
175 178 """
176 179 if metadata is None:
177 180 metadata = {}
178 181 if len(prec) != 20:
179 182 raise ValueError(prec)
180 183 for succ in succs:
181 184 if len(succ) != 20:
182 185 raise ValueError(prec)
183 186 marker = (str(prec), tuple(succs), int(flag), encodemeta(metadata))
184 187 self.add(marker)
185 188
186 189 def add(self, marker):
187 190 """Add a new marker to the store
188 191
189 192 This marker still needs to be written to disk"""
190 193 self._new.append(marker)
191 194 self._load(marker)
192 195
193 196 def loadmarkers(self, data):
194 197 """Load all markers in data, mark them as known."""
195 198 for marker in _readmarkers(data):
196 199 self._load(marker)
197 200
201 def mergemarkers(self, data):
202 other = set(_readmarkers(data))
203 local = set(self._all)
204 new = other - local
205 for marker in new:
206 self.add(marker)
207
198 208 def flushmarkers(self, stream):
199 209 """Write all markers to a stream
200 210
201 211 After this operation, "new" markers are considered "known"."""
202 212 self._writemarkers(stream)
203 213 self._new[:] = []
204 214
205 215 def _load(self, marker):
206 216 self._all.append(marker)
207 217 pre, sucs = marker[:2]
208 218 self.precursors.setdefault(pre, set()).add(marker)
209 219 for suc in sucs:
210 220 self.successors.setdefault(suc, set()).add(marker)
211 221
212 def _writemarkers(self, stream):
222 def _writemarkers(self, stream=None):
213 223 # Kept separate from flushmarkers(), it will be reused for
214 224 # markers exchange.
215 stream.write(_pack('>B', _fmversion))
225 if stream is None:
226 final = []
227 w = final.append
228 else:
229 w = stream.write
230 w(_pack('>B', _fmversion))
216 231 for marker in self._all:
217 232 pre, sucs, flags, metadata = marker
218 233 nbsuc = len(sucs)
219 234 format = _fmfixed + (_fmnode * nbsuc)
220 235 data = [nbsuc, len(metadata), flags, pre]
221 236 data.extend(sucs)
222 stream.write(_pack(format, *data))
223 stream.write(metadata)
237 w(_pack(format, *data))
238 w(metadata)
239 if stream is None:
240 return ''.join(final)
241
242 def listmarkers(repo):
243 """List markers over pushkey"""
244 if not repo.obsstore:
245 return {}
246 data = repo.obsstore._writemarkers()
247 return {'dump': base85.b85encode(data)}
224 248
225
249 def pushmarker(repo, key, old, new):
250 """Push markers over pushkey"""
251 if key != 'dump':
252 repo.ui.warn(_('unknown key: %r') % key)
253 return 0
254 if old:
255 repo.ui.warn(_('unexpected old value') % key)
256 return 0
257 data = base85.b85decode(new)
258 lock = repo.lock()
259 try:
260 repo.obsstore.mergemarkers(data)
261 return 1
262 finally:
263 lock.release()
226 264
227 265 def allmarkers(repo):
228 266 """all obsolete markers known in a repository"""
229 267 for markerdata in repo.obsstore:
230 268 yield marker(repo, markerdata)
231 269
@@ -1,36 +1,37 b''
1 1 # pushkey.py - dispatching for pushing and pulling keys
2 2 #
3 3 # Copyright 2010 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 import bookmarks, phases
8 import bookmarks, phases, obsolete
9 9
10 10 def _nslist(repo):
11 11 n = {}
12 12 for k in _namespaces:
13 13 n[k] = ""
14 14 return n
15 15
16 16 _namespaces = {"namespaces": (lambda *x: False, _nslist),
17 17 "bookmarks": (bookmarks.pushbookmark, bookmarks.listbookmarks),
18 18 "phases": (phases.pushphase, phases.listphases),
19 "obsolete": (obsolete.pushmarker, obsolete.listmarkers),
19 20 }
20 21
21 22 def register(namespace, pushkey, listkeys):
22 23 _namespaces[namespace] = (pushkey, listkeys)
23 24
24 25 def _get(namespace):
25 26 return _namespaces.get(namespace, (lambda *x: False, lambda *x: {}))
26 27
27 28 def push(repo, namespace, key, old, new):
28 29 '''should succeed iff value was old'''
29 30 pk = _get(namespace)[0]
30 31 return pk(repo, key, old, new)
31 32
32 33 def list(repo, namespace):
33 34 '''return a dict'''
34 35 lk = _get(namespace)[1]
35 36 return lk(repo)
36 37
@@ -1,264 +1,266 b''
1 1 $ "$TESTDIR/hghave" serve || exit 80
2 2
3 3 initialize
4 4
5 5 $ hg init a
6 6 $ cd a
7 7 $ echo 'test' > test
8 8 $ hg commit -Am'test'
9 9 adding test
10 10
11 11 set bookmarks
12 12
13 13 $ hg bookmark X
14 14 $ hg bookmark Y
15 15 $ hg bookmark Z
16 16
17 17 import bookmark by name
18 18
19 19 $ hg init ../b
20 20 $ cd ../b
21 21 $ hg book Y
22 22 $ hg book
23 23 * Y -1:000000000000
24 24 $ hg pull ../a
25 25 pulling from ../a
26 26 requesting all changes
27 27 adding changesets
28 28 adding manifests
29 29 adding file changes
30 30 added 1 changesets with 1 changes to 1 files
31 31 updating bookmark Y
32 32 adding remote bookmark X
33 33 adding remote bookmark Z
34 34 (run 'hg update' to get a working copy)
35 35 $ hg bookmarks
36 36 X 0:4e3505fd9583
37 37 Y 0:4e3505fd9583
38 38 Z 0:4e3505fd9583
39 39 $ hg debugpushkey ../a namespaces
40 40 bookmarks
41 41 phases
42 42 namespaces
43 obsolete
43 44 $ hg debugpushkey ../a bookmarks
44 45 Y 4e3505fd95835d721066b76e75dbb8cc554d7f77
45 46 X 4e3505fd95835d721066b76e75dbb8cc554d7f77
46 47 Z 4e3505fd95835d721066b76e75dbb8cc554d7f77
47 48 $ hg pull -B X ../a
48 49 pulling from ../a
49 50 no changes found
50 51 importing bookmark X
51 52 $ hg bookmark
52 53 X 0:4e3505fd9583
53 54 Y 0:4e3505fd9583
54 55 Z 0:4e3505fd9583
55 56
56 57 export bookmark by name
57 58
58 59 $ hg bookmark W
59 60 $ hg bookmark foo
60 61 $ hg bookmark foobar
61 62 $ hg push -B W ../a
62 63 pushing to ../a
63 64 searching for changes
64 65 no changes found
65 66 exporting bookmark W
66 67 [1]
67 68 $ hg -R ../a bookmarks
68 69 W -1:000000000000
69 70 X 0:4e3505fd9583
70 71 Y 0:4e3505fd9583
71 72 * Z 0:4e3505fd9583
72 73
73 74 delete a remote bookmark
74 75
75 76 $ hg book -d W
76 77 $ hg push -B W ../a
77 78 pushing to ../a
78 79 searching for changes
79 80 no changes found
80 81 deleting remote bookmark W
81 82 [1]
82 83
83 84 push/pull name that doesn't exist
84 85
85 86 $ hg push -B badname ../a
86 87 pushing to ../a
87 88 searching for changes
88 89 no changes found
89 90 bookmark badname does not exist on the local or remote repository!
90 91 [2]
91 92 $ hg pull -B anotherbadname ../a
92 93 pulling from ../a
93 94 abort: remote bookmark anotherbadname not found!
94 95 [255]
95 96
96 97 divergent bookmarks
97 98
98 99 $ cd ../a
99 100 $ echo c1 > f1
100 101 $ hg ci -Am1
101 102 adding f1
102 103 $ hg book -f X
103 104 $ hg book
104 105 * X 1:0d2164f0ce0d
105 106 Y 0:4e3505fd9583
106 107 Z 1:0d2164f0ce0d
107 108
108 109 $ cd ../b
109 110 $ hg up
110 111 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
111 112 updating bookmark foobar
112 113 $ echo c2 > f2
113 114 $ hg ci -Am2
114 115 adding f2
115 116 $ hg book -f X
116 117 $ hg book
117 118 * X 1:9b140be10808
118 119 Y 0:4e3505fd9583
119 120 Z 0:4e3505fd9583
120 121 foo -1:000000000000
121 122 foobar 1:9b140be10808
122 123
123 124 $ hg pull --config paths.foo=../a foo
124 125 pulling from $TESTTMP/a (glob)
125 126 searching for changes
126 127 adding changesets
127 128 adding manifests
128 129 adding file changes
129 130 added 1 changesets with 1 changes to 1 files (+1 heads)
130 131 divergent bookmark X stored as X@foo
131 132 updating bookmark Z
132 133 (run 'hg heads' to see heads, 'hg merge' to merge)
133 134 $ hg book
134 135 * X 1:9b140be10808
135 136 X@foo 2:0d2164f0ce0d
136 137 Y 0:4e3505fd9583
137 138 Z 2:0d2164f0ce0d
138 139 foo -1:000000000000
139 140 foobar 1:9b140be10808
140 141 $ hg push -f ../a
141 142 pushing to ../a
142 143 searching for changes
143 144 adding changesets
144 145 adding manifests
145 146 adding file changes
146 147 added 1 changesets with 1 changes to 1 files (+1 heads)
147 148 $ hg -R ../a book
148 149 * X 1:0d2164f0ce0d
149 150 Y 0:4e3505fd9583
150 151 Z 1:0d2164f0ce0d
151 152
152 153 update a remote bookmark from a non-head to a head
153 154
154 155 $ hg up -q Y
155 156 $ echo c3 > f2
156 157 $ hg ci -Am3
157 158 adding f2
158 159 created new head
159 160 $ hg push ../a
160 161 pushing to ../a
161 162 searching for changes
162 163 adding changesets
163 164 adding manifests
164 165 adding file changes
165 166 added 1 changesets with 1 changes to 1 files (+1 heads)
166 167 updating bookmark Y
167 168 $ hg -R ../a book
168 169 * X 1:0d2164f0ce0d
169 170 Y 3:f6fc62dde3c0
170 171 Z 1:0d2164f0ce0d
171 172
172 173 diverging a remote bookmark fails
173 174
174 175 $ hg up -q 4e3505fd9583
175 176 $ echo c4 > f2
176 177 $ hg ci -Am4
177 178 adding f2
178 179 created new head
179 180 $ hg book -f Y
180 181
181 182 $ cat <<EOF > ../a/.hg/hgrc
182 183 > [web]
183 184 > push_ssl = false
184 185 > allow_push = *
185 186 > EOF
186 187
187 188 $ hg -R ../a serve -p $HGPORT2 -d --pid-file=../hg2.pid
188 189 $ cat ../hg2.pid >> $DAEMON_PIDS
189 190
190 191 $ hg push http://localhost:$HGPORT2/
191 192 pushing to http://localhost:$HGPORT2/
192 193 searching for changes
193 194 abort: push creates new remote head 4efff6d98829!
194 195 (did you forget to merge? use push -f to force)
195 196 [255]
196 197 $ hg -R ../a book
197 198 * X 1:0d2164f0ce0d
198 199 Y 3:f6fc62dde3c0
199 200 Z 1:0d2164f0ce0d
200 201
201 202 hgweb
202 203
203 204 $ cat <<EOF > .hg/hgrc
204 205 > [web]
205 206 > push_ssl = false
206 207 > allow_push = *
207 208 > EOF
208 209
209 210 $ hg serve -p $HGPORT -d --pid-file=../hg.pid -E errors.log
210 211 $ cat ../hg.pid >> $DAEMON_PIDS
211 212 $ cd ../a
212 213
213 214 $ hg debugpushkey http://localhost:$HGPORT/ namespaces
214 215 bookmarks
215 216 phases
216 217 namespaces
218 obsolete
217 219 $ hg debugpushkey http://localhost:$HGPORT/ bookmarks
218 220 Y 4efff6d98829d9c824c621afd6e3f01865f5439f
219 221 foobar 9b140be1080824d768c5a4691a564088eede71f9
220 222 Z 0d2164f0ce0d8f1d6f94351eba04b794909be66c
221 223 foo 0000000000000000000000000000000000000000
222 224 X 9b140be1080824d768c5a4691a564088eede71f9
223 225 $ hg out -B http://localhost:$HGPORT/
224 226 comparing with http://localhost:$HGPORT/
225 227 searching for changed bookmarks
226 228 no changed bookmarks found
227 229 [1]
228 230 $ hg push -B Z http://localhost:$HGPORT/
229 231 pushing to http://localhost:$HGPORT/
230 232 searching for changes
231 233 no changes found
232 234 exporting bookmark Z
233 235 [1]
234 236 $ hg book -d Z
235 237 $ hg in -B http://localhost:$HGPORT/
236 238 comparing with http://localhost:$HGPORT/
237 239 searching for changed bookmarks
238 240 Z 0d2164f0ce0d
239 241 foo 000000000000
240 242 foobar 9b140be10808
241 243 $ hg pull -B Z http://localhost:$HGPORT/
242 244 pulling from http://localhost:$HGPORT/
243 245 no changes found
244 246 adding remote bookmark foobar
245 247 adding remote bookmark Z
246 248 adding remote bookmark foo
247 249 divergent bookmark X stored as X@1
248 250 importing bookmark Z
249 251 $ hg clone http://localhost:$HGPORT/ cloned-bookmarks
250 252 requesting all changes
251 253 adding changesets
252 254 adding manifests
253 255 adding file changes
254 256 added 5 changesets with 5 changes to 3 files (+3 heads)
255 257 updating to branch default
256 258 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
257 259 $ hg -R cloned-bookmarks bookmarks
258 260 X 1:9b140be10808
259 261 Y 4:4efff6d98829
260 262 Z 2:0d2164f0ce0d
261 263 foo -1:000000000000
262 264 foobar 1:9b140be10808
263 265
264 266 $ cd ..
@@ -1,628 +1,629 b''
1 1 commit hooks can see env vars
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ cat > .hg/hgrc <<EOF
6 6 > [hooks]
7 7 > commit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit"
8 8 > commit.b = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" commit.b"
9 9 > precommit = sh -c "HG_LOCAL= HG_NODE= HG_TAG= python \"$TESTDIR/printenv.py\" precommit"
10 10 > pretxncommit = sh -c "HG_LOCAL= HG_TAG= python \"$TESTDIR/printenv.py\" pretxncommit"
11 11 > pretxncommit.tip = hg -q tip
12 12 > pre-identify = python "$TESTDIR/printenv.py" pre-identify 1
13 13 > pre-cat = python "$TESTDIR/printenv.py" pre-cat
14 14 > post-cat = python "$TESTDIR/printenv.py" post-cat
15 15 > EOF
16 16 $ echo a > a
17 17 $ hg add a
18 18 $ hg commit -m a
19 19 precommit hook: HG_PARENT1=0000000000000000000000000000000000000000
20 20 pretxncommit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000 HG_PENDING=$TESTTMP/a
21 21 0:cb9a9f314b8b
22 22 commit hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
23 23 commit.b hook: HG_NODE=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PARENT1=0000000000000000000000000000000000000000
24 24
25 25 $ hg clone . ../b
26 26 updating to branch default
27 27 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
28 28 $ cd ../b
29 29
30 30 changegroup hooks can see env vars
31 31
32 32 $ cat > .hg/hgrc <<EOF
33 33 > [hooks]
34 34 > prechangegroup = python "$TESTDIR/printenv.py" prechangegroup
35 35 > changegroup = python "$TESTDIR/printenv.py" changegroup
36 36 > incoming = python "$TESTDIR/printenv.py" incoming
37 37 > EOF
38 38
39 39 pretxncommit and commit hooks can see both parents of merge
40 40
41 41 $ cd ../a
42 42 $ echo b >> a
43 43 $ hg commit -m a1 -d "1 0"
44 44 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
45 45 pretxncommit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
46 46 1:ab228980c14d
47 47 commit hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
48 48 commit.b hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
49 49 $ hg update -C 0
50 50 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
51 51 $ echo b > b
52 52 $ hg add b
53 53 $ hg commit -m b -d '1 0'
54 54 precommit hook: HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
55 55 pretxncommit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b HG_PENDING=$TESTTMP/a
56 56 2:ee9deb46ab31
57 57 commit hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
58 58 commit.b hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT1=cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b
59 59 created new head
60 60 $ hg merge 1
61 61 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 62 (branch merge, don't forget to commit)
63 63 $ hg commit -m merge -d '2 0'
64 64 precommit hook: HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
65 65 pretxncommit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd HG_PENDING=$TESTTMP/a
66 66 3:07f3376c1e65
67 67 commit hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
68 68 commit.b hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PARENT1=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_PARENT2=ab228980c14deea8b9555d91c9581127383e40fd
69 69
70 70 test generic hooks
71 71
72 72 $ hg id
73 73 pre-identify hook: HG_ARGS=id HG_OPTS={'bookmarks': None, 'branch': None, 'id': None, 'insecure': None, 'num': None, 'remotecmd': '', 'rev': '', 'ssh': '', 'tags': None} HG_PATS=[]
74 74 warning: pre-identify hook exited with status 1
75 75 [1]
76 76 $ hg cat b
77 77 pre-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b']
78 78 b
79 79 post-cat hook: HG_ARGS=cat b HG_OPTS={'decode': None, 'exclude': [], 'include': [], 'output': '', 'rev': ''} HG_PATS=['b'] HG_RESULT=0
80 80
81 81 $ cd ../b
82 82 $ hg pull ../a
83 83 pulling from ../a
84 84 searching for changes
85 85 prechangegroup hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
86 86 adding changesets
87 87 adding manifests
88 88 adding file changes
89 89 added 3 changesets with 2 changes to 2 files
90 90 changegroup hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
91 91 incoming hook: HG_NODE=ab228980c14deea8b9555d91c9581127383e40fd HG_SOURCE=pull HG_URL=file:$TESTTMP/a
92 92 incoming hook: HG_NODE=ee9deb46ab31e4cc3310f3cf0c3d668e4d8fffc2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
93 93 incoming hook: HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_SOURCE=pull HG_URL=file:$TESTTMP/a
94 94 (run 'hg update' to get a working copy)
95 95
96 96 tag hooks can see env vars
97 97
98 98 $ cd ../a
99 99 $ cat >> .hg/hgrc <<EOF
100 100 > pretag = python "$TESTDIR/printenv.py" pretag
101 101 > tag = sh -c "HG_PARENT1= HG_PARENT2= python \"$TESTDIR/printenv.py\" tag"
102 102 > EOF
103 103 $ hg tag -d '3 0' a
104 104 pretag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
105 105 precommit hook: HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
106 106 pretxncommit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2 HG_PENDING=$TESTTMP/a
107 107 4:539e4b31b6dc
108 108 tag hook: HG_LOCAL=0 HG_NODE=07f3376c1e655977439df2a814e3cc14b27abac2 HG_TAG=a
109 109 commit hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
110 110 commit.b hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PARENT1=07f3376c1e655977439df2a814e3cc14b27abac2
111 111 $ hg tag -l la
112 112 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
113 113 tag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=la
114 114
115 115 pretag hook can forbid tagging
116 116
117 117 $ echo "pretag.forbid = python \"$TESTDIR/printenv.py\" pretag.forbid 1" >> .hg/hgrc
118 118 $ hg tag -d '4 0' fa
119 119 pretag hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
120 120 pretag.forbid hook: HG_LOCAL=0 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fa
121 121 abort: pretag.forbid hook exited with status 1
122 122 [255]
123 123 $ hg tag -l fla
124 124 pretag hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
125 125 pretag.forbid hook: HG_LOCAL=1 HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_TAG=fla
126 126 abort: pretag.forbid hook exited with status 1
127 127 [255]
128 128
129 129 pretxncommit hook can see changeset, can roll back txn, changeset no
130 130 more there after
131 131
132 132 $ echo "pretxncommit.forbid0 = hg tip -q" >> .hg/hgrc
133 133 $ echo "pretxncommit.forbid1 = python \"$TESTDIR/printenv.py\" pretxncommit.forbid 1" >> .hg/hgrc
134 134 $ echo z > z
135 135 $ hg add z
136 136 $ hg -q tip
137 137 4:539e4b31b6dc
138 138 $ hg commit -m 'fail' -d '4 0'
139 139 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
140 140 pretxncommit hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
141 141 5:6f611f8018c1
142 142 5:6f611f8018c1
143 143 pretxncommit.forbid hook: HG_NODE=6f611f8018c10e827fee6bd2bc807f937e761567 HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/a
144 144 transaction abort!
145 145 rollback completed
146 146 abort: pretxncommit.forbid1 hook exited with status 1
147 147 [255]
148 148 $ hg -q tip
149 149 4:539e4b31b6dc
150 150
151 151 precommit hook can prevent commit
152 152
153 153 $ echo "precommit.forbid = python \"$TESTDIR/printenv.py\" precommit.forbid 1" >> .hg/hgrc
154 154 $ hg commit -m 'fail' -d '4 0'
155 155 precommit hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
156 156 precommit.forbid hook: HG_PARENT1=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10
157 157 abort: precommit.forbid hook exited with status 1
158 158 [255]
159 159 $ hg -q tip
160 160 4:539e4b31b6dc
161 161
162 162 preupdate hook can prevent update
163 163
164 164 $ echo "preupdate = python \"$TESTDIR/printenv.py\" preupdate" >> .hg/hgrc
165 165 $ hg update 1
166 166 preupdate hook: HG_PARENT1=ab228980c14d
167 167 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
168 168
169 169 update hook
170 170
171 171 $ echo "update = python \"$TESTDIR/printenv.py\" update" >> .hg/hgrc
172 172 $ hg update
173 173 preupdate hook: HG_PARENT1=539e4b31b6dc
174 174 update hook: HG_ERROR=0 HG_PARENT1=539e4b31b6dc
175 175 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
176 176
177 177 pushkey hook
178 178
179 179 $ echo "pushkey = python \"$TESTDIR/printenv.py\" pushkey" >> .hg/hgrc
180 180 $ cd ../b
181 181 $ hg bookmark -r null foo
182 182 $ hg push -B foo ../a
183 183 pushing to ../a
184 184 searching for changes
185 185 no changes found
186 186 exporting bookmark foo
187 187 pushkey hook: HG_KEY=foo HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000 HG_RET=1
188 188 [1]
189 189 $ cd ../a
190 190
191 191 listkeys hook
192 192
193 193 $ echo "listkeys = python \"$TESTDIR/printenv.py\" listkeys" >> .hg/hgrc
194 194 $ hg bookmark -r null bar
195 195 $ cd ../b
196 196 $ hg pull -B bar ../a
197 197 pulling from ../a
198 198 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
199 199 no changes found
200 200 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
201 listkeys hook: HG_NAMESPACE=obsolete HG_VALUES={}
201 202 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
202 203 adding remote bookmark bar
203 204 importing bookmark bar
204 205 $ cd ../a
205 206
206 207 test that prepushkey can prevent incoming keys
207 208
208 209 $ echo "prepushkey = python \"$TESTDIR/printenv.py\" prepushkey.forbid 1" >> .hg/hgrc
209 210 $ cd ../b
210 211 $ hg bookmark -r null baz
211 212 $ hg push -B baz ../a
212 213 pushing to ../a
213 214 searching for changes
214 215 no changes found
215 216 listkeys hook: HG_NAMESPACE=phases HG_VALUES={'cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b': '1', 'publishing': 'True'}
216 217 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
217 218 listkeys hook: HG_NAMESPACE=bookmarks HG_VALUES={'bar': '0000000000000000000000000000000000000000', 'foo': '0000000000000000000000000000000000000000'}
218 219 exporting bookmark baz
219 220 prepushkey.forbid hook: HG_KEY=baz HG_NAMESPACE=bookmarks HG_NEW=0000000000000000000000000000000000000000
220 221 abort: prepushkey hook exited with status 1
221 222 [255]
222 223 $ cd ../a
223 224
224 225 test that prelistkeys can prevent listing keys
225 226
226 227 $ echo "prelistkeys = python \"$TESTDIR/printenv.py\" prelistkeys.forbid 1" >> .hg/hgrc
227 228 $ hg bookmark -r null quux
228 229 $ cd ../b
229 230 $ hg pull -B quux ../a
230 231 pulling from ../a
231 232 prelistkeys.forbid hook: HG_NAMESPACE=bookmarks
232 233 abort: prelistkeys hook exited with status 1
233 234 [255]
234 235 $ cd ../a
235 236
236 237 prechangegroup hook can prevent incoming changes
237 238
238 239 $ cd ../b
239 240 $ hg -q tip
240 241 3:07f3376c1e65
241 242 $ cat > .hg/hgrc <<EOF
242 243 > [hooks]
243 244 > prechangegroup.forbid = python "$TESTDIR/printenv.py" prechangegroup.forbid 1
244 245 > EOF
245 246 $ hg pull ../a
246 247 pulling from ../a
247 248 searching for changes
248 249 prechangegroup.forbid hook: HG_SOURCE=pull HG_URL=file:$TESTTMP/a
249 250 abort: prechangegroup.forbid hook exited with status 1
250 251 [255]
251 252
252 253 pretxnchangegroup hook can see incoming changes, can roll back txn,
253 254 incoming changes no longer there after
254 255
255 256 $ cat > .hg/hgrc <<EOF
256 257 > [hooks]
257 258 > pretxnchangegroup.forbid0 = hg tip -q
258 259 > pretxnchangegroup.forbid1 = python "$TESTDIR/printenv.py" pretxnchangegroup.forbid 1
259 260 > EOF
260 261 $ hg pull ../a
261 262 pulling from ../a
262 263 searching for changes
263 264 adding changesets
264 265 adding manifests
265 266 adding file changes
266 267 added 1 changesets with 1 changes to 1 files
267 268 4:539e4b31b6dc
268 269 pretxnchangegroup.forbid hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_PENDING=$TESTTMP/b HG_SOURCE=pull HG_URL=file:$TESTTMP/a
269 270 transaction abort!
270 271 rollback completed
271 272 abort: pretxnchangegroup.forbid1 hook exited with status 1
272 273 [255]
273 274 $ hg -q tip
274 275 3:07f3376c1e65
275 276
276 277 outgoing hooks can see env vars
277 278
278 279 $ rm .hg/hgrc
279 280 $ cat > ../a/.hg/hgrc <<EOF
280 281 > [hooks]
281 282 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
282 283 > outgoing = python "$TESTDIR/printenv.py" outgoing
283 284 > EOF
284 285 $ hg pull ../a
285 286 pulling from ../a
286 287 searching for changes
287 288 preoutgoing hook: HG_SOURCE=pull
288 289 adding changesets
289 290 outgoing hook: HG_NODE=539e4b31b6dc99b3cfbaa6b53cbc1c1f9a1e3a10 HG_SOURCE=pull
290 291 adding manifests
291 292 adding file changes
292 293 added 1 changesets with 1 changes to 1 files
293 294 adding remote bookmark quux
294 295 (run 'hg update' to get a working copy)
295 296 $ hg rollback
296 297 repository tip rolled back to revision 3 (undo pull)
297 298
298 299 preoutgoing hook can prevent outgoing changes
299 300
300 301 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> ../a/.hg/hgrc
301 302 $ hg pull ../a
302 303 pulling from ../a
303 304 searching for changes
304 305 preoutgoing hook: HG_SOURCE=pull
305 306 preoutgoing.forbid hook: HG_SOURCE=pull
306 307 abort: preoutgoing.forbid hook exited with status 1
307 308 [255]
308 309
309 310 outgoing hooks work for local clones
310 311
311 312 $ cd ..
312 313 $ cat > a/.hg/hgrc <<EOF
313 314 > [hooks]
314 315 > preoutgoing = python "$TESTDIR/printenv.py" preoutgoing
315 316 > outgoing = python "$TESTDIR/printenv.py" outgoing
316 317 > EOF
317 318 $ hg clone a c
318 319 preoutgoing hook: HG_SOURCE=clone
319 320 outgoing hook: HG_NODE=0000000000000000000000000000000000000000 HG_SOURCE=clone
320 321 updating to branch default
321 322 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
322 323 $ rm -rf c
323 324
324 325 preoutgoing hook can prevent outgoing changes for local clones
325 326
326 327 $ echo "preoutgoing.forbid = python \"$TESTDIR/printenv.py\" preoutgoing.forbid 1" >> a/.hg/hgrc
327 328 $ hg clone a zzz
328 329 preoutgoing hook: HG_SOURCE=clone
329 330 preoutgoing.forbid hook: HG_SOURCE=clone
330 331 abort: preoutgoing.forbid hook exited with status 1
331 332 [255]
332 333
333 334 $ cd "$TESTTMP/b"
334 335
335 336 $ cat > hooktests.py <<EOF
336 337 > from mercurial import util
337 338 >
338 339 > uncallable = 0
339 340 >
340 341 > def printargs(args):
341 342 > args.pop('ui', None)
342 343 > args.pop('repo', None)
343 344 > a = list(args.items())
344 345 > a.sort()
345 346 > print 'hook args:'
346 347 > for k, v in a:
347 348 > print ' ', k, v
348 349 >
349 350 > def passhook(**args):
350 351 > printargs(args)
351 352 >
352 353 > def failhook(**args):
353 354 > printargs(args)
354 355 > return True
355 356 >
356 357 > class LocalException(Exception):
357 358 > pass
358 359 >
359 360 > def raisehook(**args):
360 361 > raise LocalException('exception from hook')
361 362 >
362 363 > def aborthook(**args):
363 364 > raise util.Abort('raise abort from hook')
364 365 >
365 366 > def brokenhook(**args):
366 367 > return 1 + {}
367 368 >
368 369 > def verbosehook(ui, **args):
369 370 > ui.note('verbose output from hook\n')
370 371 >
371 372 > def printtags(ui, repo, **args):
372 373 > print repo.tags().keys()
373 374 >
374 375 > class container:
375 376 > unreachable = 1
376 377 > EOF
377 378
378 379 test python hooks
379 380
380 381 #if windows
381 382 $ PYTHONPATH="$TESTTMP/b;$PYTHONPATH"
382 383 #else
383 384 $ PYTHONPATH="$TESTTMP/b:$PYTHONPATH"
384 385 #endif
385 386 $ export PYTHONPATH
386 387
387 388 $ echo '[hooks]' > ../a/.hg/hgrc
388 389 $ echo 'preoutgoing.broken = python:hooktests.brokenhook' >> ../a/.hg/hgrc
389 390 $ hg pull ../a 2>&1 | grep 'raised an exception'
390 391 error: preoutgoing.broken hook raised an exception: unsupported operand type(s) for +: 'int' and 'dict'
391 392
392 393 $ echo '[hooks]' > ../a/.hg/hgrc
393 394 $ echo 'preoutgoing.raise = python:hooktests.raisehook' >> ../a/.hg/hgrc
394 395 $ hg pull ../a 2>&1 | grep 'raised an exception'
395 396 error: preoutgoing.raise hook raised an exception: exception from hook
396 397
397 398 $ echo '[hooks]' > ../a/.hg/hgrc
398 399 $ echo 'preoutgoing.abort = python:hooktests.aborthook' >> ../a/.hg/hgrc
399 400 $ hg pull ../a
400 401 pulling from ../a
401 402 searching for changes
402 403 error: preoutgoing.abort hook failed: raise abort from hook
403 404 abort: raise abort from hook
404 405 [255]
405 406
406 407 $ echo '[hooks]' > ../a/.hg/hgrc
407 408 $ echo 'preoutgoing.fail = python:hooktests.failhook' >> ../a/.hg/hgrc
408 409 $ hg pull ../a
409 410 pulling from ../a
410 411 searching for changes
411 412 hook args:
412 413 hooktype preoutgoing
413 414 source pull
414 415 abort: preoutgoing.fail hook failed
415 416 [255]
416 417
417 418 $ echo '[hooks]' > ../a/.hg/hgrc
418 419 $ echo 'preoutgoing.uncallable = python:hooktests.uncallable' >> ../a/.hg/hgrc
419 420 $ hg pull ../a
420 421 pulling from ../a
421 422 searching for changes
422 423 abort: preoutgoing.uncallable hook is invalid ("hooktests.uncallable" is not callable)
423 424 [255]
424 425
425 426 $ echo '[hooks]' > ../a/.hg/hgrc
426 427 $ echo 'preoutgoing.nohook = python:hooktests.nohook' >> ../a/.hg/hgrc
427 428 $ hg pull ../a
428 429 pulling from ../a
429 430 searching for changes
430 431 abort: preoutgoing.nohook hook is invalid ("hooktests.nohook" is not defined)
431 432 [255]
432 433
433 434 $ echo '[hooks]' > ../a/.hg/hgrc
434 435 $ echo 'preoutgoing.nomodule = python:nomodule' >> ../a/.hg/hgrc
435 436 $ hg pull ../a
436 437 pulling from ../a
437 438 searching for changes
438 439 abort: preoutgoing.nomodule hook is invalid ("nomodule" not in a module)
439 440 [255]
440 441
441 442 $ echo '[hooks]' > ../a/.hg/hgrc
442 443 $ echo 'preoutgoing.badmodule = python:nomodule.nowhere' >> ../a/.hg/hgrc
443 444 $ hg pull ../a
444 445 pulling from ../a
445 446 searching for changes
446 447 abort: preoutgoing.badmodule hook is invalid (import of "nomodule" failed)
447 448 [255]
448 449
449 450 $ echo '[hooks]' > ../a/.hg/hgrc
450 451 $ echo 'preoutgoing.unreachable = python:hooktests.container.unreachable' >> ../a/.hg/hgrc
451 452 $ hg pull ../a
452 453 pulling from ../a
453 454 searching for changes
454 455 abort: preoutgoing.unreachable hook is invalid (import of "hooktests.container" failed)
455 456 [255]
456 457
457 458 $ echo '[hooks]' > ../a/.hg/hgrc
458 459 $ echo 'preoutgoing.pass = python:hooktests.passhook' >> ../a/.hg/hgrc
459 460 $ hg pull ../a
460 461 pulling from ../a
461 462 searching for changes
462 463 hook args:
463 464 hooktype preoutgoing
464 465 source pull
465 466 adding changesets
466 467 adding manifests
467 468 adding file changes
468 469 added 1 changesets with 1 changes to 1 files
469 470 adding remote bookmark quux
470 471 (run 'hg update' to get a working copy)
471 472
472 473 make sure --traceback works
473 474
474 475 $ echo '[hooks]' > .hg/hgrc
475 476 $ echo 'commit.abort = python:hooktests.aborthook' >> .hg/hgrc
476 477
477 478 $ echo aa > a
478 479 $ hg --traceback commit -d '0 0' -ma 2>&1 | grep '^Traceback'
479 480 Traceback (most recent call last):
480 481
481 482 $ cd ..
482 483 $ hg init c
483 484 $ cd c
484 485
485 486 $ cat > hookext.py <<EOF
486 487 > def autohook(**args):
487 488 > print "Automatically installed hook"
488 489 >
489 490 > def reposetup(ui, repo):
490 491 > repo.ui.setconfig("hooks", "commit.auto", autohook)
491 492 > EOF
492 493 $ echo '[extensions]' >> .hg/hgrc
493 494 $ echo 'hookext = hookext.py' >> .hg/hgrc
494 495
495 496 $ touch foo
496 497 $ hg add foo
497 498 $ hg ci -d '0 0' -m 'add foo'
498 499 Automatically installed hook
499 500 $ echo >> foo
500 501 $ hg ci --debug -d '0 0' -m 'change foo'
501 502 foo
502 503 calling hook commit.auto: <function autohook at *> (glob)
503 504 Automatically installed hook
504 505 committed changeset 1:52998019f6252a2b893452765fcb0a47351a5708
505 506
506 507 $ hg showconfig hooks
507 508 hooks.commit.auto=<function autohook at *> (glob)
508 509
509 510 test python hook configured with python:[file]:[hook] syntax
510 511
511 512 $ cd ..
512 513 $ mkdir d
513 514 $ cd d
514 515 $ hg init repo
515 516 $ mkdir hooks
516 517
517 518 $ cd hooks
518 519 $ cat > testhooks.py <<EOF
519 520 > def testhook(**args):
520 521 > print 'hook works'
521 522 > EOF
522 523 $ echo '[hooks]' > ../repo/.hg/hgrc
523 524 $ echo "pre-commit.test = python:`pwd`/testhooks.py:testhook" >> ../repo/.hg/hgrc
524 525
525 526 $ cd ../repo
526 527 $ hg commit -d '0 0'
527 528 hook works
528 529 nothing changed
529 530 [1]
530 531
531 532 $ cd ../../b
532 533
533 534 make sure --traceback works on hook import failure
534 535
535 536 $ cat > importfail.py <<EOF
536 537 > import somebogusmodule
537 538 > # dereference something in the module to force demandimport to load it
538 539 > somebogusmodule.whatever
539 540 > EOF
540 541
541 542 $ echo '[hooks]' > .hg/hgrc
542 543 $ echo 'precommit.importfail = python:importfail.whatever' >> .hg/hgrc
543 544
544 545 $ echo a >> a
545 546 $ hg --traceback commit -ma 2>&1 | egrep '^(exception|Traceback|ImportError)'
546 547 exception from first failed import attempt:
547 548 Traceback (most recent call last):
548 549 ImportError: No module named somebogusmodule
549 550 exception from second failed import attempt:
550 551 Traceback (most recent call last):
551 552 ImportError: No module named hgext_importfail
552 553 Traceback (most recent call last):
553 554
554 555 Issue1827: Hooks Update & Commit not completely post operation
555 556
556 557 commit and update hooks should run after command completion
557 558
558 559 $ echo '[hooks]' > .hg/hgrc
559 560 $ echo 'commit = hg id' >> .hg/hgrc
560 561 $ echo 'update = hg id' >> .hg/hgrc
561 562 $ echo bb > a
562 563 $ hg ci -ma
563 564 223eafe2750c tip
564 565 $ hg up 0
565 566 cb9a9f314b8b
566 567 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
567 568
568 569 make sure --verbose (and --quiet/--debug etc.) are propogated to the local ui
569 570 that is passed to pre/post hooks
570 571
571 572 $ echo '[hooks]' > .hg/hgrc
572 573 $ echo 'pre-identify = python:hooktests.verbosehook' >> .hg/hgrc
573 574 $ hg id
574 575 cb9a9f314b8b
575 576 $ hg id --verbose
576 577 calling hook pre-identify: hooktests.verbosehook
577 578 verbose output from hook
578 579 cb9a9f314b8b
579 580
580 581 Ensure hooks can be prioritized
581 582
582 583 $ echo '[hooks]' > .hg/hgrc
583 584 $ echo 'pre-identify.a = python:hooktests.verbosehook' >> .hg/hgrc
584 585 $ echo 'pre-identify.b = python:hooktests.verbosehook' >> .hg/hgrc
585 586 $ echo 'priority.pre-identify.b = 1' >> .hg/hgrc
586 587 $ echo 'pre-identify.c = python:hooktests.verbosehook' >> .hg/hgrc
587 588 $ hg id --verbose
588 589 calling hook pre-identify.b: hooktests.verbosehook
589 590 verbose output from hook
590 591 calling hook pre-identify.a: hooktests.verbosehook
591 592 verbose output from hook
592 593 calling hook pre-identify.c: hooktests.verbosehook
593 594 verbose output from hook
594 595 cb9a9f314b8b
595 596
596 597 new tags must be visible in pretxncommit (issue3210)
597 598
598 599 $ echo 'pretxncommit.printtags = python:hooktests.printtags' >> .hg/hgrc
599 600 $ hg tag -f foo
600 601 ['a', 'foo', 'tip']
601 602
602 603 new commits must be visible in pretxnchangegroup (issue3428)
603 604
604 605 $ cd ..
605 606 $ hg init to
606 607 $ echo '[hooks]' >> to/.hg/hgrc
607 608 $ echo 'pretxnchangegroup = hg --traceback tip' >> to/.hg/hgrc
608 609 $ echo a >> to/a
609 610 $ hg --cwd to ci -Ama
610 611 adding a
611 612 $ hg clone to from
612 613 updating to branch default
613 614 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
614 615 $ echo aa >> from/a
615 616 $ hg --cwd from ci -mb
616 617 $ hg --cwd from push
617 618 pushing to $TESTTMP/to (glob)
618 619 searching for changes
619 620 adding changesets
620 621 adding manifests
621 622 adding file changes
622 623 added 1 changesets with 1 changes to 1 files
623 624 changeset: 1:9836a07b9b9d
624 625 tag: tip
625 626 user: test
626 627 date: Thu Jan 01 00:00:00 1970 +0000
627 628 summary: b
628 629
@@ -1,124 +1,128 b''
1 1 $ "$TESTDIR/hghave" serve || exit 80
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg ci -Ama -d '1123456789 0'
7 7 adding a
8 8 $ hg --config server.uncompressed=True serve -p $HGPORT -d --pid-file=hg.pid
9 9 $ cat hg.pid >> $DAEMON_PIDS
10 10 $ cd ..
11 11 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log 2>&1 </dev/null &
12 12 $ while [ ! -f proxy.pid ]; do sleep 0; done
13 13 $ cat proxy.pid >> $DAEMON_PIDS
14 14
15 15 url for proxy, stream
16 16
17 17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --uncompressed http://localhost:$HGPORT/ b
18 18 streaming all changes
19 19 3 files to transfer, 303 bytes of data
20 20 transferred * bytes in * seconds (*/sec) (glob)
21 21 updating to branch default
22 22 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 23 $ cd b
24 24 $ hg verify
25 25 checking changesets
26 26 checking manifests
27 27 crosschecking files in changesets and manifests
28 28 checking files
29 29 1 files, 1 changesets, 1 total revisions
30 30 $ cd ..
31 31
32 32 url for proxy, pull
33 33
34 34 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
35 35 requesting all changes
36 36 adding changesets
37 37 adding manifests
38 38 adding file changes
39 39 added 1 changesets with 1 changes to 1 files
40 40 updating to branch default
41 41 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 42 $ cd b-pull
43 43 $ hg verify
44 44 checking changesets
45 45 checking manifests
46 46 crosschecking files in changesets and manifests
47 47 checking files
48 48 1 files, 1 changesets, 1 total revisions
49 49 $ cd ..
50 50
51 51 host:port for proxy
52 52
53 53 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
54 54 requesting all changes
55 55 adding changesets
56 56 adding manifests
57 57 adding file changes
58 58 added 1 changesets with 1 changes to 1 files
59 59 updating to branch default
60 60 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 61
62 62 proxy url with user name and password
63 63
64 64 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
65 65 requesting all changes
66 66 adding changesets
67 67 adding manifests
68 68 adding file changes
69 69 added 1 changesets with 1 changes to 1 files
70 70 updating to branch default
71 71 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 72
73 73 url with user name and password
74 74
75 75 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
76 76 requesting all changes
77 77 adding changesets
78 78 adding manifests
79 79 adding file changes
80 80 added 1 changesets with 1 changes to 1 files
81 81 updating to branch default
82 82 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 83
84 84 bad host:port for proxy
85 85
86 86 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
87 87 abort: error: Connection refused
88 88 [255]
89 89
90 90 do not use the proxy if it is in the no list
91 91
92 92 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
93 93 requesting all changes
94 94 adding changesets
95 95 adding manifests
96 96 adding file changes
97 97 added 1 changesets with 1 changes to 1 files
98 98 updating to branch default
99 99 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 100 $ cat proxy.log
101 101 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
102 102 * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
103 103 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
104 104 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
105 105 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
106 106 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
107 107 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
108 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=obsolete (glob)
108 109 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
109 110 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
110 111 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
111 112 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
112 113 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
114 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=obsolete (glob)
113 115 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
114 116 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 117 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
116 118 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
117 119 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
120 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=obsolete (glob)
118 121 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
119 122 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
120 123 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
121 124 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
122 125 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
126 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=obsolete (glob)
123 127 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
124 128
@@ -1,281 +1,287 b''
1 1 Proper https client requires the built-in ssl from Python 2.6.
2 2
3 3 $ "$TESTDIR/hghave" serve ssl || exit 80
4 4
5 5 Certificates created with:
6 6 printf '.\n.\n.\n.\n.\nlocalhost\nhg@localhost\n' | \
7 7 openssl req -newkey rsa:512 -keyout priv.pem -nodes -x509 -days 9000 -out pub.pem
8 8 Can be dumped with:
9 9 openssl x509 -in pub.pem -text
10 10
11 11 $ cat << EOT > priv.pem
12 12 > -----BEGIN PRIVATE KEY-----
13 13 > MIIBVAIBADANBgkqhkiG9w0BAQEFAASCAT4wggE6AgEAAkEApjCWeYGrIa/Vo7LH
14 14 > aRF8ou0tbgHKE33Use/whCnKEUm34rDaXQd4lxxX6aDWg06n9tiVStAKTgQAHJY8
15 15 > j/xgSwIDAQABAkBxHC6+Qlf0VJXGlb6NL16yEVVTQxqDS6hA9zqu6TZjrr0YMfzc
16 16 > EGNIiZGt7HCBL0zO+cPDg/LeCZc6HQhf0KrhAiEAzlJq4hWWzvguWFIJWSoBeBUG
17 17 > MF1ACazQO7PYE8M0qfECIQDONHHP0SKZzz/ZwBZcAveC5K61f/v9hONFwbeYulzR
18 18 > +wIgc9SvbtgB/5Yzpp//4ZAEnR7oh5SClCvyB+KSx52K3nECICbhQphhoXmI10wy
19 19 > aMTellaq0bpNMHFDziqH9RsqAHhjAiEAgYGxfzkftt5IUUn/iFK89aaIpyrpuaAh
20 20 > HY8gUVkVRVs=
21 21 > -----END PRIVATE KEY-----
22 22 > EOT
23 23
24 24 $ cat << EOT > pub.pem
25 25 > -----BEGIN CERTIFICATE-----
26 26 > MIIBqzCCAVWgAwIBAgIJANAXFFyWjGnRMA0GCSqGSIb3DQEBBQUAMDExEjAQBgNV
27 27 > BAMMCWxvY2FsaG9zdDEbMBkGCSqGSIb3DQEJARYMaGdAbG9jYWxob3N0MB4XDTEw
28 28 > MTAxNDIwMzAxNFoXDTM1MDYwNTIwMzAxNFowMTESMBAGA1UEAwwJbG9jYWxob3N0
29 29 > MRswGQYJKoZIhvcNAQkBFgxoZ0Bsb2NhbGhvc3QwXDANBgkqhkiG9w0BAQEFAANL
30 30 > ADBIAkEApjCWeYGrIa/Vo7LHaRF8ou0tbgHKE33Use/whCnKEUm34rDaXQd4lxxX
31 31 > 6aDWg06n9tiVStAKTgQAHJY8j/xgSwIDAQABo1AwTjAdBgNVHQ4EFgQUE6sA+amm
32 32 > r24dGX0kpjxOgO45hzQwHwYDVR0jBBgwFoAUE6sA+ammr24dGX0kpjxOgO45hzQw
33 33 > DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAANBAFArvQFiAZJgQczRsbYlG1xl
34 34 > t+truk37w5B3m3Ick1ntRcQrqs+hf0CO1q6Squ144geYaQ8CDirSR92fICELI1c=
35 35 > -----END CERTIFICATE-----
36 36 > EOT
37 37 $ cat priv.pem pub.pem >> server.pem
38 38 $ PRIV=`pwd`/server.pem
39 39
40 40 $ cat << EOT > pub-other.pem
41 41 > -----BEGIN CERTIFICATE-----
42 42 > MIIBqzCCAVWgAwIBAgIJALwZS731c/ORMA0GCSqGSIb3DQEBBQUAMDExEjAQBgNV
43 43 > BAMMCWxvY2FsaG9zdDEbMBkGCSqGSIb3DQEJARYMaGdAbG9jYWxob3N0MB4XDTEw
44 44 > MTAxNDIwNDUxNloXDTM1MDYwNTIwNDUxNlowMTESMBAGA1UEAwwJbG9jYWxob3N0
45 45 > MRswGQYJKoZIhvcNAQkBFgxoZ0Bsb2NhbGhvc3QwXDANBgkqhkiG9w0BAQEFAANL
46 46 > ADBIAkEAsxsapLbHrqqUKuQBxdpK4G3m2LjtyrTSdpzzzFlecxd5yhNP6AyWrufo
47 47 > K4VMGo2xlu9xOo88nDSUNSKPuD09MwIDAQABo1AwTjAdBgNVHQ4EFgQUoIB1iMhN
48 48 > y868rpQ2qk9dHnU6ebswHwYDVR0jBBgwFoAUoIB1iMhNy868rpQ2qk9dHnU6ebsw
49 49 > DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAANBAJ544f125CsE7J2t55PdFaF6
50 50 > bBlNBb91FCywBgSjhBjf+GG3TNPwrPdc3yqeq+hzJiuInqbOBv9abmMyq8Wsoig=
51 51 > -----END CERTIFICATE-----
52 52 > EOT
53 53
54 54 pub.pem patched with other notBefore / notAfter:
55 55
56 56 $ cat << EOT > pub-not-yet.pem
57 57 > -----BEGIN CERTIFICATE-----
58 58 > MIIBqzCCAVWgAwIBAgIJANAXFFyWjGnRMA0GCSqGSIb3DQEBBQUAMDExEjAQBgNVBAMMCWxvY2Fs
59 59 > aG9zdDEbMBkGCSqGSIb3DQEJARYMaGdAbG9jYWxob3N0MB4XDTM1MDYwNTIwMzAxNFoXDTM1MDYw
60 60 > NTIwMzAxNFowMTESMBAGA1UEAwwJbG9jYWxob3N0MRswGQYJKoZIhvcNAQkBFgxoZ0Bsb2NhbGhv
61 61 > c3QwXDANBgkqhkiG9w0BAQEFAANLADBIAkEApjCWeYGrIa/Vo7LHaRF8ou0tbgHKE33Use/whCnK
62 62 > EUm34rDaXQd4lxxX6aDWg06n9tiVStAKTgQAHJY8j/xgSwIDAQABo1AwTjAdBgNVHQ4EFgQUE6sA
63 63 > +ammr24dGX0kpjxOgO45hzQwHwYDVR0jBBgwFoAUE6sA+ammr24dGX0kpjxOgO45hzQwDAYDVR0T
64 64 > BAUwAwEB/zANBgkqhkiG9w0BAQUFAANBAJXV41gWnkgC7jcpPpFRSUSZaxyzrXmD1CIqQf0WgVDb
65 65 > /12E0vR2DuZitgzUYtBaofM81aTtc0a2/YsrmqePGm0=
66 66 > -----END CERTIFICATE-----
67 67 > EOT
68 68 $ cat priv.pem pub-not-yet.pem > server-not-yet.pem
69 69
70 70 $ cat << EOT > pub-expired.pem
71 71 > -----BEGIN CERTIFICATE-----
72 72 > MIIBqzCCAVWgAwIBAgIJANAXFFyWjGnRMA0GCSqGSIb3DQEBBQUAMDExEjAQBgNVBAMMCWxvY2Fs
73 73 > aG9zdDEbMBkGCSqGSIb3DQEJARYMaGdAbG9jYWxob3N0MB4XDTEwMTAxNDIwMzAxNFoXDTEwMTAx
74 74 > NDIwMzAxNFowMTESMBAGA1UEAwwJbG9jYWxob3N0MRswGQYJKoZIhvcNAQkBFgxoZ0Bsb2NhbGhv
75 75 > c3QwXDANBgkqhkiG9w0BAQEFAANLADBIAkEApjCWeYGrIa/Vo7LHaRF8ou0tbgHKE33Use/whCnK
76 76 > EUm34rDaXQd4lxxX6aDWg06n9tiVStAKTgQAHJY8j/xgSwIDAQABo1AwTjAdBgNVHQ4EFgQUE6sA
77 77 > +ammr24dGX0kpjxOgO45hzQwHwYDVR0jBBgwFoAUE6sA+ammr24dGX0kpjxOgO45hzQwDAYDVR0T
78 78 > BAUwAwEB/zANBgkqhkiG9w0BAQUFAANBAJfk57DTRf2nUbYaMSlVAARxMNbFGOjQhAUtY400GhKt
79 79 > 2uiKCNGKXVXD3AHWe13yHc5KttzbHQStE5Nm/DlWBWQ=
80 80 > -----END CERTIFICATE-----
81 81 > EOT
82 82 $ cat priv.pem pub-expired.pem > server-expired.pem
83 83
84 84 $ hg init test
85 85 $ cd test
86 86 $ echo foo>foo
87 87 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
88 88 $ echo foo>foo.d/foo
89 89 $ echo bar>foo.d/bAr.hg.d/BaR
90 90 $ echo bar>foo.d/baR.d.hg/bAR
91 91 $ hg commit -A -m 1
92 92 adding foo
93 93 adding foo.d/bAr.hg.d/BaR
94 94 adding foo.d/baR.d.hg/bAR
95 95 adding foo.d/foo
96 96 $ hg serve -p $HGPORT -d --pid-file=../hg0.pid --certificate=$PRIV
97 97 $ cat ../hg0.pid >> $DAEMON_PIDS
98 98
99 99 cacert not found
100 100
101 101 $ hg in --config web.cacerts=no-such.pem https://localhost:$HGPORT/
102 102 abort: could not find web.cacerts: no-such.pem
103 103 [255]
104 104
105 105 Test server address cannot be reused
106 106
107 107 #if windows
108 108 $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
109 109 abort: cannot start server at ':$HGPORT': (glob)
110 110 [255]
111 111 #else
112 112 $ hg serve -p $HGPORT --certificate=$PRIV 2>&1
113 113 abort: cannot start server at ':$HGPORT': Address already in use
114 114 [255]
115 115 #endif
116 116 $ cd ..
117 117
118 118 clone via pull
119 119
120 120 $ hg clone https://localhost:$HGPORT/ copy-pull
121 121 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
122 122 requesting all changes
123 123 adding changesets
124 124 adding manifests
125 125 adding file changes
126 126 added 1 changesets with 4 changes to 4 files
127 127 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
128 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
128 129 updating to branch default
129 130 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 131 $ hg verify -R copy-pull
131 132 checking changesets
132 133 checking manifests
133 134 crosschecking files in changesets and manifests
134 135 checking files
135 136 4 files, 1 changesets, 4 total revisions
136 137 $ cd test
137 138 $ echo bar > bar
138 139 $ hg commit -A -d '1 0' -m 2
139 140 adding bar
140 141 $ cd ..
141 142
142 143 pull without cacert
143 144
144 145 $ cd copy-pull
145 146 $ echo '[hooks]' >> .hg/hgrc
146 147 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
147 148 $ hg pull
148 149 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
149 150 pulling from https://localhost:$HGPORT/
150 151 searching for changes
151 152 adding changesets
152 153 adding manifests
153 154 adding file changes
154 155 added 1 changesets with 1 changes to 1 files
155 156 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
156 157 changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_URL=https://localhost:$HGPORT/
158 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
157 159 (run 'hg update' to get a working copy)
158 160 $ cd ..
159 161
160 162 cacert configured in local repo
161 163
162 164 $ cp copy-pull/.hg/hgrc copy-pull/.hg/hgrc.bu
163 165 $ echo "[web]" >> copy-pull/.hg/hgrc
164 166 $ echo "cacerts=`pwd`/pub.pem" >> copy-pull/.hg/hgrc
165 167 $ hg -R copy-pull pull --traceback
166 168 pulling from https://localhost:$HGPORT/
167 169 searching for changes
168 170 no changes found
169 171 $ mv copy-pull/.hg/hgrc.bu copy-pull/.hg/hgrc
170 172
171 173 cacert configured globally, also testing expansion of environment
172 174 variables in the filename
173 175
174 176 $ echo "[web]" >> $HGRCPATH
175 177 $ echo 'cacerts=$P/pub.pem' >> $HGRCPATH
176 178 $ P=`pwd` hg -R copy-pull pull
177 179 pulling from https://localhost:$HGPORT/
178 180 searching for changes
179 181 no changes found
180 182 $ P=`pwd` hg -R copy-pull pull --insecure
181 183 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
182 184 pulling from https://localhost:$HGPORT/
183 185 searching for changes
184 186 no changes found
187 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
185 188
186 189 cacert mismatch
187 190
188 191 $ hg -R copy-pull pull --config web.cacerts=pub.pem https://127.0.0.1:$HGPORT/
189 192 abort: 127.0.0.1 certificate error: certificate is for localhost
190 193 (configure hostfingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca or use --insecure to connect insecurely)
191 194 [255]
192 195 $ hg -R copy-pull pull --config web.cacerts=pub.pem https://127.0.0.1:$HGPORT/ --insecure
193 196 warning: 127.0.0.1 certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
194 197 pulling from https://127.0.0.1:$HGPORT/
195 198 searching for changes
196 199 no changes found
200 warning: 127.0.0.1 certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
197 201 $ hg -R copy-pull pull --config web.cacerts=pub-other.pem
198 202 abort: error: *:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed (glob)
199 203 [255]
200 204 $ hg -R copy-pull pull --config web.cacerts=pub-other.pem --insecure
201 205 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
202 206 pulling from https://localhost:$HGPORT/
203 207 searching for changes
204 208 no changes found
209 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
205 210
206 211 Test server cert which isn't valid yet
207 212
208 213 $ hg -R test serve -p $HGPORT1 -d --pid-file=hg1.pid --certificate=server-not-yet.pem
209 214 $ cat hg1.pid >> $DAEMON_PIDS
210 215 $ hg -R copy-pull pull --config web.cacerts=pub-not-yet.pem https://localhost:$HGPORT1/
211 216 abort: error: *:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed (glob)
212 217 [255]
213 218
214 219 Test server cert which no longer is valid
215 220
216 221 $ hg -R test serve -p $HGPORT2 -d --pid-file=hg2.pid --certificate=server-expired.pem
217 222 $ cat hg2.pid >> $DAEMON_PIDS
218 223 $ hg -R copy-pull pull --config web.cacerts=pub-expired.pem https://localhost:$HGPORT2/
219 224 abort: error: *:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed (glob)
220 225 [255]
221 226
222 227 Fingerprints
223 228
224 229 $ echo "[hostfingerprints]" >> copy-pull/.hg/hgrc
225 230 $ echo "localhost = 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca" >> copy-pull/.hg/hgrc
226 231 $ echo "127.0.0.1 = 914f1aff87249c09b6859b88b1906d30756491ca" >> copy-pull/.hg/hgrc
227 232
228 233 - works without cacerts
229 234 $ hg -R copy-pull id https://localhost:$HGPORT/ --config web.cacerts=
230 235 5fed3813f7f5
231 236
232 237 - fails when cert doesn't match hostname (port is ignored)
233 238 $ hg -R copy-pull id https://localhost:$HGPORT1/
234 239 abort: certificate for localhost has unexpected fingerprint 28:ff:71:bf:65:31:14:23:ad:62:92:b4:0e:31:99:18:fc:83:e3:9b
235 240 (check hostfingerprint configuration)
236 241 [255]
237 242
238 243 - ignores that certificate doesn't match hostname
239 244 $ hg -R copy-pull id https://127.0.0.1:$HGPORT/
240 245 5fed3813f7f5
241 246
242 247 $ while kill `cat hg1.pid` 2>/dev/null; do sleep 0; done
243 248
244 249 Prepare for connecting through proxy
245 250
246 251 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log </dev/null 2>&1 &
247 252 $ while [ ! -f proxy.pid ]; do sleep 0; done
248 253 $ cat proxy.pid >> $DAEMON_PIDS
249 254
250 255 $ echo "[http_proxy]" >> copy-pull/.hg/hgrc
251 256 $ echo "always=True" >> copy-pull/.hg/hgrc
252 257 $ echo "[hostfingerprints]" >> copy-pull/.hg/hgrc
253 258 $ echo "localhost =" >> copy-pull/.hg/hgrc
254 259
255 260 Test unvalidated https through proxy
256 261
257 262 $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --insecure --traceback
258 263 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
259 264 pulling from https://localhost:$HGPORT/
260 265 searching for changes
261 266 no changes found
267 warning: localhost certificate with fingerprint 91:4f:1a:ff:87:24:9c:09:b6:85:9b:88:b1:90:6d:30:75:64:91:ca not verified (check hostfingerprints or web.cacerts config setting)
262 268
263 269 Test https with cacert and fingerprint through proxy
264 270
265 271 $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --config web.cacerts=pub.pem
266 272 pulling from https://localhost:$HGPORT/
267 273 searching for changes
268 274 no changes found
269 275 $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull https://127.0.0.1:$HGPORT/
270 276 pulling from https://127.0.0.1:$HGPORT/
271 277 searching for changes
272 278 no changes found
273 279
274 280 Test https with cert problems through proxy
275 281
276 282 $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --config web.cacerts=pub-other.pem
277 283 abort: error: *:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed (glob)
278 284 [255]
279 285 $ http_proxy=http://localhost:$HGPORT1/ hg -R copy-pull pull --config web.cacerts=pub-expired.pem https://localhost:$HGPORT2/
280 286 abort: error: *:SSL3_GET_SERVER_CERTIFICATE:certificate verify failed (glob)
281 287 [255]
@@ -1,61 +1,143 b''
1 1
2 2 $ mkcommit() {
3 3 > echo "$1" > "$1"
4 4 > hg add "$1"
5 5 > hg ci -m "add $1"
6 6 > }
7 7 $ getid() {
8 8 > hg id --debug -ir "desc('$1')"
9 9 > }
10 10
11 11
12 12 $ hg init tmpa
13 13 $ cd tmpa
14 14
15 15 Killing a single changeset without replacement
16 16
17 17 $ mkcommit kill_me
18 18 $ hg debugobsolete -d '0 0' `getid kill_me` -u babar
19 19 $ hg debugobsolete
20 20 97b7c2d76b1845ed3eb988cd612611e72406cef0 0 {'date': '0 0', 'user': 'babar'}
21 21 $ cd ..
22 22
23 23 Killing a single changeset with replacement
24 24
25 25 $ hg init tmpb
26 26 $ cd tmpb
27 27 $ mkcommit a
28 28 $ mkcommit b
29 29 $ mkcommit original_c
30 30 $ hg up "desc('b')"
31 31 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
32 32 $ mkcommit new_c
33 33 created new head
34 34 $ hg debugobsolete `getid original_c` `getid new_c` -d '56 12'
35 35 $ hg debugobsolete
36 36 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
37 37
38 38 do it again (it read the obsstore before adding new changeset)
39 39
40 40 $ hg up '.^'
41 41 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
42 42 $ mkcommit new_2_c
43 43 created new head
44 44 $ hg debugobsolete -d '1337 0' `getid new_c` `getid new_2_c`
45 45 $ hg debugobsolete
46 46 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
47 47 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
48 48
49 49 Register two markers with a missing node
50 50
51 51 $ hg up '.^'
52 52 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
53 53 $ mkcommit new_3_c
54 54 created new head
55 55 $ hg debugobsolete -d '1338 0' `getid new_2_c` 1337133713371337133713371337133713371337
56 56 $ hg debugobsolete -d '1339 0' 1337133713371337133713371337133713371337 `getid new_3_c`
57 57 $ hg debugobsolete
58 58 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
59 59 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
60 60 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
61 61 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
62
63 $ cd ..
64
65 Exchange Test
66 ============================
67
68 Destination repo does not have any data
69 ---------------------------------------
70
71 Try to pull markers
72
73 $ hg init tmpc
74 $ cd tmpc
75 $ hg pull ../tmpb
76 pulling from ../tmpb
77 requesting all changes
78 adding changesets
79 adding manifests
80 adding file changes
81 added 6 changesets with 6 changes to 6 files (+3 heads)
82 (run 'hg heads' to see heads, 'hg merge' to merge)
83 $ hg debugobsolete
84 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
85 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
86 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
87 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
88
89 $ cd ..
90
91 Try to pull markers
92
93 $ hg init tmpd
94 $ hg -R tmpb push tmpd
95 pushing to tmpd
96 searching for changes
97 adding changesets
98 adding manifests
99 adding file changes
100 added 6 changesets with 6 changes to 6 files (+3 heads)
101 $ hg -R tmpd debugobsolete
102 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
103 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
104 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
105 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
106
107
108 Destination repo have existing data
109 ---------------------------------------
110
111 On pull
112
113 $ hg init tmpe
114 $ cd tmpe
115 $ hg debugobsolete -d '1339 0' 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339
116 $ hg pull ../tmpb
117 pulling from ../tmpb
118 requesting all changes
119 adding changesets
120 adding manifests
121 adding file changes
122 added 6 changesets with 6 changes to 6 files (+3 heads)
123 (run 'hg heads' to see heads, 'hg merge' to merge)
124 $ hg debugobsolete
125 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
126 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
127 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
128 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
129 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
130
131 On push
132
133 $ hg push ../tmpc
134 pushing to ../tmpc
135 searching for changes
136 no changes found
137 [1]
138 $ hg -R ../tmpc debugobsolete
139 ca819180edb99ed25ceafb3e9584ac287e240b00 1337133713371337133713371337133713371337 0 {'date': '1338 0', 'user': 'test'}
140 cdbce2fbb16313928851e97e0d85413f3f7eb77f ca819180edb99ed25ceafb3e9584ac287e240b00 0 {'date': '1337 0', 'user': 'test'}
141 245bde4270cd1072a27757984f9cda8ba26f08ca cdbce2fbb16313928851e97e0d85413f3f7eb77f 0 {'date': '56 12', 'user': 'test'}
142 1337133713371337133713371337133713371337 5601fb93a350734d935195fee37f4054c529ff39 0 {'date': '1339 0', 'user': 'test'}
143 2448244824482448244824482448244824482448 1339133913391339133913391339133913391339 0 {'date': '1339 0', 'user': 'test'}
@@ -1,373 +1,374 b''
1 1
2 2
3 3 This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 creating 'remote' repo
6 6
7 7 $ hg init remote
8 8 $ cd remote
9 9 $ echo this > foo
10 10 $ echo this > fooO
11 11 $ hg ci -A -m "init" foo fooO
12 12 $ cat <<EOF > .hg/hgrc
13 13 > [server]
14 14 > uncompressed = True
15 15 >
16 16 > [hooks]
17 17 > changegroup = python "$TESTDIR/printenv.py" changegroup-in-remote 0 ../dummylog
18 18 > EOF
19 19 $ cd ..
20 20
21 21 repo not found error
22 22
23 23 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
24 24 remote: abort: there is no Mercurial repository here (.hg not found)!
25 25 abort: no suitable response from remote hg!
26 26 [255]
27 27
28 28 non-existent absolute path
29 29
30 30 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
31 31 remote: abort: there is no Mercurial repository here (.hg not found)!
32 32 abort: no suitable response from remote hg!
33 33 [255]
34 34
35 35 clone remote via stream
36 36
37 37 $ hg clone -e "python \"$TESTDIR/dummyssh\"" --uncompressed ssh://user@dummy/remote local-stream
38 38 streaming all changes
39 39 4 files to transfer, 392 bytes of data
40 40 transferred 392 bytes in * seconds (*/sec) (glob)
41 41 updating to branch default
42 42 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 43 $ cd local-stream
44 44 $ hg verify
45 45 checking changesets
46 46 checking manifests
47 47 crosschecking files in changesets and manifests
48 48 checking files
49 49 2 files, 1 changesets, 2 total revisions
50 50 $ cd ..
51 51
52 52 clone remote via pull
53 53
54 54 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
55 55 requesting all changes
56 56 adding changesets
57 57 adding manifests
58 58 adding file changes
59 59 added 1 changesets with 2 changes to 2 files
60 60 updating to branch default
61 61 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 62
63 63 verify
64 64
65 65 $ cd local
66 66 $ hg verify
67 67 checking changesets
68 68 checking manifests
69 69 crosschecking files in changesets and manifests
70 70 checking files
71 71 2 files, 1 changesets, 2 total revisions
72 72 $ echo '[hooks]' >> .hg/hgrc
73 73 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup-in-local 0 ../dummylog" >> .hg/hgrc
74 74
75 75 empty default pull
76 76
77 77 $ hg paths
78 78 default = ssh://user@dummy/remote
79 79 $ hg pull -e "python \"$TESTDIR/dummyssh\""
80 80 pulling from ssh://user@dummy/remote
81 81 searching for changes
82 82 no changes found
83 83
84 84 local change
85 85
86 86 $ echo bleah > foo
87 87 $ hg ci -m "add"
88 88
89 89 updating rc
90 90
91 91 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
92 92 $ echo "[ui]" >> .hg/hgrc
93 93 $ echo "ssh = python \"$TESTDIR/dummyssh\"" >> .hg/hgrc
94 94
95 95 find outgoing
96 96
97 97 $ hg out ssh://user@dummy/remote
98 98 comparing with ssh://user@dummy/remote
99 99 searching for changes
100 100 changeset: 1:a28a9d1a809c
101 101 tag: tip
102 102 user: test
103 103 date: Thu Jan 01 00:00:00 1970 +0000
104 104 summary: add
105 105
106 106
107 107 find incoming on the remote side
108 108
109 109 $ hg incoming -R ../remote -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
110 110 comparing with ssh://user@dummy/local
111 111 searching for changes
112 112 changeset: 1:a28a9d1a809c
113 113 tag: tip
114 114 user: test
115 115 date: Thu Jan 01 00:00:00 1970 +0000
116 116 summary: add
117 117
118 118
119 119 find incoming on the remote side (using absolute path)
120 120
121 121 $ hg incoming -R ../remote -e "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
122 122 comparing with ssh://user@dummy/$TESTTMP/local
123 123 searching for changes
124 124 changeset: 1:a28a9d1a809c
125 125 tag: tip
126 126 user: test
127 127 date: Thu Jan 01 00:00:00 1970 +0000
128 128 summary: add
129 129
130 130
131 131 push
132 132
133 133 $ hg push
134 134 pushing to ssh://user@dummy/remote
135 135 searching for changes
136 136 remote: adding changesets
137 137 remote: adding manifests
138 138 remote: adding file changes
139 139 remote: added 1 changesets with 1 changes to 1 files
140 140 $ cd ../remote
141 141
142 142 check remote tip
143 143
144 144 $ hg tip
145 145 changeset: 1:a28a9d1a809c
146 146 tag: tip
147 147 user: test
148 148 date: Thu Jan 01 00:00:00 1970 +0000
149 149 summary: add
150 150
151 151 $ hg verify
152 152 checking changesets
153 153 checking manifests
154 154 crosschecking files in changesets and manifests
155 155 checking files
156 156 2 files, 2 changesets, 3 total revisions
157 157 $ hg cat -r tip foo
158 158 bleah
159 159 $ echo z > z
160 160 $ hg ci -A -m z z
161 161 created new head
162 162
163 163 test pushkeys and bookmarks
164 164
165 165 $ cd ../local
166 166 $ hg debugpushkey --config ui.ssh="python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
167 167 bookmarks
168 168 phases
169 169 namespaces
170 obsolete
170 171 $ hg book foo -r 0
171 172 $ hg out -B
172 173 comparing with ssh://user@dummy/remote
173 174 searching for changed bookmarks
174 175 foo 1160648e36ce
175 176 $ hg push -B foo
176 177 pushing to ssh://user@dummy/remote
177 178 searching for changes
178 179 no changes found
179 180 exporting bookmark foo
180 181 [1]
181 182 $ hg debugpushkey --config ui.ssh="python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
182 183 foo 1160648e36cec0054048a7edc4110c6f84fde594
183 184 $ hg book -f foo
184 185 $ hg push --traceback
185 186 pushing to ssh://user@dummy/remote
186 187 searching for changes
187 188 no changes found
188 189 updating bookmark foo
189 190 [1]
190 191 $ hg book -d foo
191 192 $ hg in -B
192 193 comparing with ssh://user@dummy/remote
193 194 searching for changed bookmarks
194 195 foo a28a9d1a809c
195 196 $ hg book -f -r 0 foo
196 197 $ hg pull -B foo
197 198 pulling from ssh://user@dummy/remote
198 199 no changes found
199 200 updating bookmark foo
200 201 importing bookmark foo
201 202 $ hg book -d foo
202 203 $ hg push -B foo
203 204 pushing to ssh://user@dummy/remote
204 205 searching for changes
205 206 no changes found
206 207 deleting remote bookmark foo
207 208 [1]
208 209
209 210 a bad, evil hook that prints to stdout
210 211
211 212 $ cat <<EOF > $TESTTMP/badhook
212 213 > import sys
213 214 > sys.stdout.write("KABOOM\n")
214 215 > EOF
215 216
216 217 $ echo '[hooks]' >> ../remote/.hg/hgrc
217 218 $ echo "changegroup.stdout = python $TESTTMP/badhook" >> ../remote/.hg/hgrc
218 219 $ echo r > r
219 220 $ hg ci -A -m z r
220 221
221 222 push should succeed even though it has an unexpected response
222 223
223 224 $ hg push
224 225 pushing to ssh://user@dummy/remote
225 226 searching for changes
226 227 note: unsynced remote changes!
227 228 remote: adding changesets
228 229 remote: adding manifests
229 230 remote: adding file changes
230 231 remote: added 1 changesets with 1 changes to 1 files
231 232 remote: KABOOM
232 233 $ hg -R ../remote heads
233 234 changeset: 3:1383141674ec
234 235 tag: tip
235 236 parent: 1:a28a9d1a809c
236 237 user: test
237 238 date: Thu Jan 01 00:00:00 1970 +0000
238 239 summary: z
239 240
240 241 changeset: 2:6c0482d977a3
241 242 parent: 0:1160648e36ce
242 243 user: test
243 244 date: Thu Jan 01 00:00:00 1970 +0000
244 245 summary: z
245 246
246 247
247 248 clone bookmarks
248 249
249 250 $ hg -R ../remote bookmark test
250 251 $ hg -R ../remote bookmarks
251 252 * test 2:6c0482d977a3
252 253 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
253 254 requesting all changes
254 255 adding changesets
255 256 adding manifests
256 257 adding file changes
257 258 added 4 changesets with 5 changes to 4 files (+1 heads)
258 259 updating to branch default
259 260 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 261 $ hg -R local-bookmarks bookmarks
261 262 test 2:6c0482d977a3
262 263
263 264 passwords in ssh urls are not supported
264 265 (we use a glob here because different Python versions give different
265 266 results here)
266 267
267 268 $ hg push ssh://user:erroneouspwd@dummy/remote
268 269 pushing to ssh://user:*@dummy/remote (glob)
269 270 abort: password in URL not supported!
270 271 [255]
271 272
272 273 $ cd ..
273 274
274 275 hide outer repo
275 276 $ hg init
276 277
277 278 Test remote paths with spaces (issue2983):
278 279
279 280 $ hg init --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
280 281 $ hg -R 'a repo' tag tag
281 282 $ hg id --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
282 283 3fb238f49e8c
283 284
284 285 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
285 286 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
286 287 parameters:
287 288
288 289 $ cat > ssh.sh << EOF
289 290 > userhost="\$1"
290 291 > SSH_ORIGINAL_COMMAND="\$2"
291 292 > export SSH_ORIGINAL_COMMAND
292 293 > PYTHONPATH="$PYTHONPATH"
293 294 > export PYTHONPATH
294 295 > python "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
295 296 > EOF
296 297
297 298 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
298 299 3fb238f49e8c
299 300
300 301 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
301 302 remote: Illegal repository "$TESTTMP/a'repo" (glob)
302 303 abort: no suitable response from remote hg!
303 304 [255]
304 305
305 306 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
306 307 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
307 308 abort: no suitable response from remote hg!
308 309 [255]
309 310
310 311 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" python "$TESTDIR/../contrib/hg-ssh"
311 312 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
312 313 [255]
313 314
314 315 Test hg-ssh in read-only mode:
315 316
316 317 $ cat > ssh.sh << EOF
317 318 > userhost="\$1"
318 319 > SSH_ORIGINAL_COMMAND="\$2"
319 320 > export SSH_ORIGINAL_COMMAND
320 321 > PYTHONPATH="$PYTHONPATH"
321 322 > export PYTHONPATH
322 323 > python "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
323 324 > EOF
324 325
325 326 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
326 327 requesting all changes
327 328 adding changesets
328 329 adding manifests
329 330 adding file changes
330 331 added 4 changesets with 5 changes to 4 files (+1 heads)
331 332 updating to branch default
332 333 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
333 334
334 335 $ cd read-only-local
335 336 $ echo "baz" > bar
336 337 $ hg ci -A -m "unpushable commit" bar
337 338 $ hg push --ssh "sh ../ssh.sh"
338 339 pushing to ssh://user@dummy/*/remote (glob)
339 340 searching for changes
340 341 remote: Permission denied
341 342 remote: abort: prechangegroup.hg-ssh hook failed
342 343 remote: Permission denied
343 344 remote: abort: prepushkey.hg-ssh hook failed
344 345 abort: unexpected response: empty string
345 346 [255]
346 347
347 348 $ cd ..
348 349
349 350 $ cat dummylog
350 351 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
351 352 Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio
352 353 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
353 354 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
354 355 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
355 356 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
356 357 Got arguments 1:user@dummy 2:hg -R local serve --stdio
357 358 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
358 359 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
359 360 changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
360 361 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
361 362 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
362 363 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
363 364 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
364 365 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
365 366 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
366 367 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
367 368 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
368 369 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
369 370 changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
370 371 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
371 372 Got arguments 1:user@dummy 2:hg init 'a repo'
372 373 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
373 374 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
General Comments 0
You need to be logged in to leave comments. Login now