##// END OF EJS Templates
clone: fix copying bookmarks in uncompressed clones (issue4430)...
Durham Goode -
r23116:2dc6b791 stable
parent child Browse files
Show More
@@ -1,1792 +1,1793 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 319
320 320 def _writerequirements(self):
321 321 reqfile = self.opener("requires", "w")
322 322 for r in sorted(self.requirements):
323 323 reqfile.write("%s\n" % r)
324 324 reqfile.close()
325 325
326 326 def _checknested(self, path):
327 327 """Determine if path is a legal nested repository."""
328 328 if not path.startswith(self.root):
329 329 return False
330 330 subpath = path[len(self.root) + 1:]
331 331 normsubpath = util.pconvert(subpath)
332 332
333 333 # XXX: Checking against the current working copy is wrong in
334 334 # the sense that it can reject things like
335 335 #
336 336 # $ hg cat -r 10 sub/x.txt
337 337 #
338 338 # if sub/ is no longer a subrepository in the working copy
339 339 # parent revision.
340 340 #
341 341 # However, it can of course also allow things that would have
342 342 # been rejected before, such as the above cat command if sub/
343 343 # is a subrepository now, but was a normal directory before.
344 344 # The old path auditor would have rejected by mistake since it
345 345 # panics when it sees sub/.hg/.
346 346 #
347 347 # All in all, checking against the working copy seems sensible
348 348 # since we want to prevent access to nested repositories on
349 349 # the filesystem *now*.
350 350 ctx = self[None]
351 351 parts = util.splitpath(subpath)
352 352 while parts:
353 353 prefix = '/'.join(parts)
354 354 if prefix in ctx.substate:
355 355 if prefix == normsubpath:
356 356 return True
357 357 else:
358 358 sub = ctx.sub(prefix)
359 359 return sub.checknested(subpath[len(prefix) + 1:])
360 360 else:
361 361 parts.pop()
362 362 return False
363 363
364 364 def peer(self):
365 365 return localpeer(self) # not cached to avoid reference cycle
366 366
367 367 def unfiltered(self):
368 368 """Return unfiltered version of the repository
369 369
370 370 Intended to be overwritten by filtered repo."""
371 371 return self
372 372
373 373 def filtered(self, name):
374 374 """Return a filtered version of a repository"""
375 375 # build a new class with the mixin and the current class
376 376 # (possibly subclass of the repo)
377 377 class proxycls(repoview.repoview, self.unfiltered().__class__):
378 378 pass
379 379 return proxycls(self, name)
380 380
381 381 @repofilecache('bookmarks')
382 382 def _bookmarks(self):
383 383 return bookmarks.bmstore(self)
384 384
385 385 @repofilecache('bookmarks.current')
386 386 def _bookmarkcurrent(self):
387 387 return bookmarks.readcurrent(self)
388 388
389 389 def bookmarkheads(self, bookmark):
390 390 name = bookmark.split('@', 1)[0]
391 391 heads = []
392 392 for mark, n in self._bookmarks.iteritems():
393 393 if mark.split('@', 1)[0] == name:
394 394 heads.append(n)
395 395 return heads
396 396
397 397 @storecache('phaseroots')
398 398 def _phasecache(self):
399 399 return phases.phasecache(self, self._phasedefaults)
400 400
401 401 @storecache('obsstore')
402 402 def obsstore(self):
403 403 # read default format for new obsstore.
404 404 defaultformat = self.ui.configint('format', 'obsstore-version', None)
405 405 # rely on obsstore class default when possible.
406 406 kwargs = {}
407 407 if defaultformat is not None:
408 408 kwargs['defaultformat'] = defaultformat
409 409 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
410 410 store = obsolete.obsstore(self.sopener, readonly=readonly,
411 411 **kwargs)
412 412 if store and readonly:
413 413 # message is rare enough to not be translated
414 414 msg = 'obsolete feature not enabled but %i markers found!\n'
415 415 self.ui.warn(msg % len(list(store)))
416 416 return store
417 417
418 418 @storecache('00changelog.i')
419 419 def changelog(self):
420 420 c = changelog.changelog(self.sopener)
421 421 if 'HG_PENDING' in os.environ:
422 422 p = os.environ['HG_PENDING']
423 423 if p.startswith(self.root):
424 424 c.readpending('00changelog.i.a')
425 425 return c
426 426
427 427 @storecache('00manifest.i')
428 428 def manifest(self):
429 429 return manifest.manifest(self.sopener)
430 430
431 431 @repofilecache('dirstate')
432 432 def dirstate(self):
433 433 warned = [0]
434 434 def validate(node):
435 435 try:
436 436 self.changelog.rev(node)
437 437 return node
438 438 except error.LookupError:
439 439 if not warned[0]:
440 440 warned[0] = True
441 441 self.ui.warn(_("warning: ignoring unknown"
442 442 " working parent %s!\n") % short(node))
443 443 return nullid
444 444
445 445 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
446 446
447 447 def __getitem__(self, changeid):
448 448 if changeid is None:
449 449 return context.workingctx(self)
450 450 return context.changectx(self, changeid)
451 451
452 452 def __contains__(self, changeid):
453 453 try:
454 454 return bool(self.lookup(changeid))
455 455 except error.RepoLookupError:
456 456 return False
457 457
458 458 def __nonzero__(self):
459 459 return True
460 460
461 461 def __len__(self):
462 462 return len(self.changelog)
463 463
464 464 def __iter__(self):
465 465 return iter(self.changelog)
466 466
467 467 def revs(self, expr, *args):
468 468 '''Return a list of revisions matching the given revset'''
469 469 expr = revset.formatspec(expr, *args)
470 470 m = revset.match(None, expr)
471 471 return m(self, revset.spanset(self))
472 472
473 473 def set(self, expr, *args):
474 474 '''
475 475 Yield a context for each matching revision, after doing arg
476 476 replacement via revset.formatspec
477 477 '''
478 478 for r in self.revs(expr, *args):
479 479 yield self[r]
480 480
481 481 def url(self):
482 482 return 'file:' + self.root
483 483
484 484 def hook(self, name, throw=False, **args):
485 485 """Call a hook, passing this repo instance.
486 486
487 487 This a convenience method to aid invoking hooks. Extensions likely
488 488 won't call this unless they have registered a custom hook or are
489 489 replacing code that is expected to call a hook.
490 490 """
491 491 return hook.hook(self.ui, self, name, throw, **args)
492 492
493 493 @unfilteredmethod
494 494 def _tag(self, names, node, message, local, user, date, extra={},
495 495 editor=False):
496 496 if isinstance(names, str):
497 497 names = (names,)
498 498
499 499 branches = self.branchmap()
500 500 for name in names:
501 501 self.hook('pretag', throw=True, node=hex(node), tag=name,
502 502 local=local)
503 503 if name in branches:
504 504 self.ui.warn(_("warning: tag %s conflicts with existing"
505 505 " branch name\n") % name)
506 506
507 507 def writetags(fp, names, munge, prevtags):
508 508 fp.seek(0, 2)
509 509 if prevtags and prevtags[-1] != '\n':
510 510 fp.write('\n')
511 511 for name in names:
512 512 m = munge and munge(name) or name
513 513 if (self._tagscache.tagtypes and
514 514 name in self._tagscache.tagtypes):
515 515 old = self.tags().get(name, nullid)
516 516 fp.write('%s %s\n' % (hex(old), m))
517 517 fp.write('%s %s\n' % (hex(node), m))
518 518 fp.close()
519 519
520 520 prevtags = ''
521 521 if local:
522 522 try:
523 523 fp = self.opener('localtags', 'r+')
524 524 except IOError:
525 525 fp = self.opener('localtags', 'a')
526 526 else:
527 527 prevtags = fp.read()
528 528
529 529 # local tags are stored in the current charset
530 530 writetags(fp, names, None, prevtags)
531 531 for name in names:
532 532 self.hook('tag', node=hex(node), tag=name, local=local)
533 533 return
534 534
535 535 try:
536 536 fp = self.wfile('.hgtags', 'rb+')
537 537 except IOError, e:
538 538 if e.errno != errno.ENOENT:
539 539 raise
540 540 fp = self.wfile('.hgtags', 'ab')
541 541 else:
542 542 prevtags = fp.read()
543 543
544 544 # committed tags are stored in UTF-8
545 545 writetags(fp, names, encoding.fromlocal, prevtags)
546 546
547 547 fp.close()
548 548
549 549 self.invalidatecaches()
550 550
551 551 if '.hgtags' not in self.dirstate:
552 552 self[None].add(['.hgtags'])
553 553
554 554 m = matchmod.exact(self.root, '', ['.hgtags'])
555 555 tagnode = self.commit(message, user, date, extra=extra, match=m,
556 556 editor=editor)
557 557
558 558 for name in names:
559 559 self.hook('tag', node=hex(node), tag=name, local=local)
560 560
561 561 return tagnode
562 562
563 563 def tag(self, names, node, message, local, user, date, editor=False):
564 564 '''tag a revision with one or more symbolic names.
565 565
566 566 names is a list of strings or, when adding a single tag, names may be a
567 567 string.
568 568
569 569 if local is True, the tags are stored in a per-repository file.
570 570 otherwise, they are stored in the .hgtags file, and a new
571 571 changeset is committed with the change.
572 572
573 573 keyword arguments:
574 574
575 575 local: whether to store tags in non-version-controlled file
576 576 (default False)
577 577
578 578 message: commit message to use if committing
579 579
580 580 user: name of user to use if committing
581 581
582 582 date: date tuple to use if committing'''
583 583
584 584 if not local:
585 585 m = matchmod.exact(self.root, '', ['.hgtags'])
586 586 if util.any(self.status(match=m, unknown=True, ignored=True)):
587 587 raise util.Abort(_('working copy of .hgtags is changed'),
588 588 hint=_('please commit .hgtags manually'))
589 589
590 590 self.tags() # instantiate the cache
591 591 self._tag(names, node, message, local, user, date, editor=editor)
592 592
593 593 @filteredpropertycache
594 594 def _tagscache(self):
595 595 '''Returns a tagscache object that contains various tags related
596 596 caches.'''
597 597
598 598 # This simplifies its cache management by having one decorated
599 599 # function (this one) and the rest simply fetch things from it.
600 600 class tagscache(object):
601 601 def __init__(self):
602 602 # These two define the set of tags for this repository. tags
603 603 # maps tag name to node; tagtypes maps tag name to 'global' or
604 604 # 'local'. (Global tags are defined by .hgtags across all
605 605 # heads, and local tags are defined in .hg/localtags.)
606 606 # They constitute the in-memory cache of tags.
607 607 self.tags = self.tagtypes = None
608 608
609 609 self.nodetagscache = self.tagslist = None
610 610
611 611 cache = tagscache()
612 612 cache.tags, cache.tagtypes = self._findtags()
613 613
614 614 return cache
615 615
616 616 def tags(self):
617 617 '''return a mapping of tag to node'''
618 618 t = {}
619 619 if self.changelog.filteredrevs:
620 620 tags, tt = self._findtags()
621 621 else:
622 622 tags = self._tagscache.tags
623 623 for k, v in tags.iteritems():
624 624 try:
625 625 # ignore tags to unknown nodes
626 626 self.changelog.rev(v)
627 627 t[k] = v
628 628 except (error.LookupError, ValueError):
629 629 pass
630 630 return t
631 631
632 632 def _findtags(self):
633 633 '''Do the hard work of finding tags. Return a pair of dicts
634 634 (tags, tagtypes) where tags maps tag name to node, and tagtypes
635 635 maps tag name to a string like \'global\' or \'local\'.
636 636 Subclasses or extensions are free to add their own tags, but
637 637 should be aware that the returned dicts will be retained for the
638 638 duration of the localrepo object.'''
639 639
640 640 # XXX what tagtype should subclasses/extensions use? Currently
641 641 # mq and bookmarks add tags, but do not set the tagtype at all.
642 642 # Should each extension invent its own tag type? Should there
643 643 # be one tagtype for all such "virtual" tags? Or is the status
644 644 # quo fine?
645 645
646 646 alltags = {} # map tag name to (node, hist)
647 647 tagtypes = {}
648 648
649 649 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
650 650 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
651 651
652 652 # Build the return dicts. Have to re-encode tag names because
653 653 # the tags module always uses UTF-8 (in order not to lose info
654 654 # writing to the cache), but the rest of Mercurial wants them in
655 655 # local encoding.
656 656 tags = {}
657 657 for (name, (node, hist)) in alltags.iteritems():
658 658 if node != nullid:
659 659 tags[encoding.tolocal(name)] = node
660 660 tags['tip'] = self.changelog.tip()
661 661 tagtypes = dict([(encoding.tolocal(name), value)
662 662 for (name, value) in tagtypes.iteritems()])
663 663 return (tags, tagtypes)
664 664
665 665 def tagtype(self, tagname):
666 666 '''
667 667 return the type of the given tag. result can be:
668 668
669 669 'local' : a local tag
670 670 'global' : a global tag
671 671 None : tag does not exist
672 672 '''
673 673
674 674 return self._tagscache.tagtypes.get(tagname)
675 675
676 676 def tagslist(self):
677 677 '''return a list of tags ordered by revision'''
678 678 if not self._tagscache.tagslist:
679 679 l = []
680 680 for t, n in self.tags().iteritems():
681 681 l.append((self.changelog.rev(n), t, n))
682 682 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
683 683
684 684 return self._tagscache.tagslist
685 685
686 686 def nodetags(self, node):
687 687 '''return the tags associated with a node'''
688 688 if not self._tagscache.nodetagscache:
689 689 nodetagscache = {}
690 690 for t, n in self._tagscache.tags.iteritems():
691 691 nodetagscache.setdefault(n, []).append(t)
692 692 for tags in nodetagscache.itervalues():
693 693 tags.sort()
694 694 self._tagscache.nodetagscache = nodetagscache
695 695 return self._tagscache.nodetagscache.get(node, [])
696 696
697 697 def nodebookmarks(self, node):
698 698 marks = []
699 699 for bookmark, n in self._bookmarks.iteritems():
700 700 if n == node:
701 701 marks.append(bookmark)
702 702 return sorted(marks)
703 703
704 704 def branchmap(self):
705 705 '''returns a dictionary {branch: [branchheads]} with branchheads
706 706 ordered by increasing revision number'''
707 707 branchmap.updatecache(self)
708 708 return self._branchcaches[self.filtername]
709 709
710 710 def branchtip(self, branch):
711 711 '''return the tip node for a given branch'''
712 712 try:
713 713 return self.branchmap().branchtip(branch)
714 714 except KeyError:
715 715 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
716 716
717 717 def lookup(self, key):
718 718 return self[key].node()
719 719
720 720 def lookupbranch(self, key, remote=None):
721 721 repo = remote or self
722 722 if key in repo.branchmap():
723 723 return key
724 724
725 725 repo = (remote and remote.local()) and remote or self
726 726 return repo[key].branch()
727 727
728 728 def known(self, nodes):
729 729 nm = self.changelog.nodemap
730 730 pc = self._phasecache
731 731 result = []
732 732 for n in nodes:
733 733 r = nm.get(n)
734 734 resp = not (r is None or pc.phase(self, r) >= phases.secret)
735 735 result.append(resp)
736 736 return result
737 737
738 738 def local(self):
739 739 return self
740 740
741 741 def cancopy(self):
742 742 # so statichttprepo's override of local() works
743 743 if not self.local():
744 744 return False
745 745 if not self.ui.configbool('phases', 'publish', True):
746 746 return True
747 747 # if publishing we can't copy if there is filtered content
748 748 return not self.filtered('visible').changelog.filteredrevs
749 749
750 750 def join(self, f, *insidef):
751 751 return os.path.join(self.path, f, *insidef)
752 752
753 753 def wjoin(self, f, *insidef):
754 754 return os.path.join(self.root, f, *insidef)
755 755
756 756 def file(self, f):
757 757 if f[0] == '/':
758 758 f = f[1:]
759 759 return filelog.filelog(self.sopener, f)
760 760
761 761 def changectx(self, changeid):
762 762 return self[changeid]
763 763
764 764 def parents(self, changeid=None):
765 765 '''get list of changectxs for parents of changeid'''
766 766 return self[changeid].parents()
767 767
768 768 def setparents(self, p1, p2=nullid):
769 769 self.dirstate.beginparentchange()
770 770 copies = self.dirstate.setparents(p1, p2)
771 771 pctx = self[p1]
772 772 if copies:
773 773 # Adjust copy records, the dirstate cannot do it, it
774 774 # requires access to parents manifests. Preserve them
775 775 # only for entries added to first parent.
776 776 for f in copies:
777 777 if f not in pctx and copies[f] in pctx:
778 778 self.dirstate.copy(copies[f], f)
779 779 if p2 == nullid:
780 780 for f, s in sorted(self.dirstate.copies().items()):
781 781 if f not in pctx and s not in pctx:
782 782 self.dirstate.copy(None, f)
783 783 self.dirstate.endparentchange()
784 784
785 785 def filectx(self, path, changeid=None, fileid=None):
786 786 """changeid can be a changeset revision, node, or tag.
787 787 fileid can be a file revision or node."""
788 788 return context.filectx(self, path, changeid, fileid)
789 789
790 790 def getcwd(self):
791 791 return self.dirstate.getcwd()
792 792
793 793 def pathto(self, f, cwd=None):
794 794 return self.dirstate.pathto(f, cwd)
795 795
796 796 def wfile(self, f, mode='r'):
797 797 return self.wopener(f, mode)
798 798
799 799 def _link(self, f):
800 800 return self.wvfs.islink(f)
801 801
802 802 def _loadfilter(self, filter):
803 803 if filter not in self.filterpats:
804 804 l = []
805 805 for pat, cmd in self.ui.configitems(filter):
806 806 if cmd == '!':
807 807 continue
808 808 mf = matchmod.match(self.root, '', [pat])
809 809 fn = None
810 810 params = cmd
811 811 for name, filterfn in self._datafilters.iteritems():
812 812 if cmd.startswith(name):
813 813 fn = filterfn
814 814 params = cmd[len(name):].lstrip()
815 815 break
816 816 if not fn:
817 817 fn = lambda s, c, **kwargs: util.filter(s, c)
818 818 # Wrap old filters not supporting keyword arguments
819 819 if not inspect.getargspec(fn)[2]:
820 820 oldfn = fn
821 821 fn = lambda s, c, **kwargs: oldfn(s, c)
822 822 l.append((mf, fn, params))
823 823 self.filterpats[filter] = l
824 824 return self.filterpats[filter]
825 825
826 826 def _filter(self, filterpats, filename, data):
827 827 for mf, fn, cmd in filterpats:
828 828 if mf(filename):
829 829 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
830 830 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
831 831 break
832 832
833 833 return data
834 834
835 835 @unfilteredpropertycache
836 836 def _encodefilterpats(self):
837 837 return self._loadfilter('encode')
838 838
839 839 @unfilteredpropertycache
840 840 def _decodefilterpats(self):
841 841 return self._loadfilter('decode')
842 842
843 843 def adddatafilter(self, name, filter):
844 844 self._datafilters[name] = filter
845 845
846 846 def wread(self, filename):
847 847 if self._link(filename):
848 848 data = self.wvfs.readlink(filename)
849 849 else:
850 850 data = self.wopener.read(filename)
851 851 return self._filter(self._encodefilterpats, filename, data)
852 852
853 853 def wwrite(self, filename, data, flags):
854 854 data = self._filter(self._decodefilterpats, filename, data)
855 855 if 'l' in flags:
856 856 self.wopener.symlink(data, filename)
857 857 else:
858 858 self.wopener.write(filename, data)
859 859 if 'x' in flags:
860 860 self.wvfs.setflags(filename, False, True)
861 861
862 862 def wwritedata(self, filename, data):
863 863 return self._filter(self._decodefilterpats, filename, data)
864 864
865 865 def transaction(self, desc, report=None):
866 866 tr = self._transref and self._transref() or None
867 867 if tr and tr.running():
868 868 return tr.nest()
869 869
870 870 # abort here if the journal already exists
871 871 if self.svfs.exists("journal"):
872 872 raise error.RepoError(
873 873 _("abandoned transaction found"),
874 874 hint=_("run 'hg recover' to clean up transaction"))
875 875
876 876 def onclose():
877 877 self.store.write(self._transref())
878 878
879 879 self._writejournal(desc)
880 880 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
881 881 rp = report and report or self.ui.warn
882 882 tr = transaction.transaction(rp, self.sopener,
883 883 "journal",
884 884 aftertrans(renames),
885 885 self.store.createmode,
886 886 onclose)
887 887 self._transref = weakref.ref(tr)
888 888 return tr
889 889
890 890 def _journalfiles(self):
891 891 return ((self.svfs, 'journal'),
892 892 (self.vfs, 'journal.dirstate'),
893 893 (self.vfs, 'journal.branch'),
894 894 (self.vfs, 'journal.desc'),
895 895 (self.vfs, 'journal.bookmarks'),
896 896 (self.svfs, 'journal.phaseroots'))
897 897
898 898 def undofiles(self):
899 899 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
900 900
901 901 def _writejournal(self, desc):
902 902 self.opener.write("journal.dirstate",
903 903 self.opener.tryread("dirstate"))
904 904 self.opener.write("journal.branch",
905 905 encoding.fromlocal(self.dirstate.branch()))
906 906 self.opener.write("journal.desc",
907 907 "%d\n%s\n" % (len(self), desc))
908 908 self.opener.write("journal.bookmarks",
909 909 self.opener.tryread("bookmarks"))
910 910 self.sopener.write("journal.phaseroots",
911 911 self.sopener.tryread("phaseroots"))
912 912
913 913 def recover(self):
914 914 lock = self.lock()
915 915 try:
916 916 if self.svfs.exists("journal"):
917 917 self.ui.status(_("rolling back interrupted transaction\n"))
918 918 transaction.rollback(self.sopener, "journal",
919 919 self.ui.warn)
920 920 self.invalidate()
921 921 return True
922 922 else:
923 923 self.ui.warn(_("no interrupted transaction available\n"))
924 924 return False
925 925 finally:
926 926 lock.release()
927 927
928 928 def rollback(self, dryrun=False, force=False):
929 929 wlock = lock = None
930 930 try:
931 931 wlock = self.wlock()
932 932 lock = self.lock()
933 933 if self.svfs.exists("undo"):
934 934 return self._rollback(dryrun, force)
935 935 else:
936 936 self.ui.warn(_("no rollback information available\n"))
937 937 return 1
938 938 finally:
939 939 release(lock, wlock)
940 940
941 941 @unfilteredmethod # Until we get smarter cache management
942 942 def _rollback(self, dryrun, force):
943 943 ui = self.ui
944 944 try:
945 945 args = self.opener.read('undo.desc').splitlines()
946 946 (oldlen, desc, detail) = (int(args[0]), args[1], None)
947 947 if len(args) >= 3:
948 948 detail = args[2]
949 949 oldtip = oldlen - 1
950 950
951 951 if detail and ui.verbose:
952 952 msg = (_('repository tip rolled back to revision %s'
953 953 ' (undo %s: %s)\n')
954 954 % (oldtip, desc, detail))
955 955 else:
956 956 msg = (_('repository tip rolled back to revision %s'
957 957 ' (undo %s)\n')
958 958 % (oldtip, desc))
959 959 except IOError:
960 960 msg = _('rolling back unknown transaction\n')
961 961 desc = None
962 962
963 963 if not force and self['.'] != self['tip'] and desc == 'commit':
964 964 raise util.Abort(
965 965 _('rollback of last commit while not checked out '
966 966 'may lose data'), hint=_('use -f to force'))
967 967
968 968 ui.status(msg)
969 969 if dryrun:
970 970 return 0
971 971
972 972 parents = self.dirstate.parents()
973 973 self.destroying()
974 974 transaction.rollback(self.sopener, 'undo', ui.warn)
975 975 if self.vfs.exists('undo.bookmarks'):
976 976 self.vfs.rename('undo.bookmarks', 'bookmarks')
977 977 if self.svfs.exists('undo.phaseroots'):
978 978 self.svfs.rename('undo.phaseroots', 'phaseroots')
979 979 self.invalidate()
980 980
981 981 parentgone = (parents[0] not in self.changelog.nodemap or
982 982 parents[1] not in self.changelog.nodemap)
983 983 if parentgone:
984 984 self.vfs.rename('undo.dirstate', 'dirstate')
985 985 try:
986 986 branch = self.opener.read('undo.branch')
987 987 self.dirstate.setbranch(encoding.tolocal(branch))
988 988 except IOError:
989 989 ui.warn(_('named branch could not be reset: '
990 990 'current branch is still \'%s\'\n')
991 991 % self.dirstate.branch())
992 992
993 993 self.dirstate.invalidate()
994 994 parents = tuple([p.rev() for p in self.parents()])
995 995 if len(parents) > 1:
996 996 ui.status(_('working directory now based on '
997 997 'revisions %d and %d\n') % parents)
998 998 else:
999 999 ui.status(_('working directory now based on '
1000 1000 'revision %d\n') % parents)
1001 1001 # TODO: if we know which new heads may result from this rollback, pass
1002 1002 # them to destroy(), which will prevent the branchhead cache from being
1003 1003 # invalidated.
1004 1004 self.destroyed()
1005 1005 return 0
1006 1006
1007 1007 def invalidatecaches(self):
1008 1008
1009 1009 if '_tagscache' in vars(self):
1010 1010 # can't use delattr on proxy
1011 1011 del self.__dict__['_tagscache']
1012 1012
1013 1013 self.unfiltered()._branchcaches.clear()
1014 1014 self.invalidatevolatilesets()
1015 1015
1016 1016 def invalidatevolatilesets(self):
1017 1017 self.filteredrevcache.clear()
1018 1018 obsolete.clearobscaches(self)
1019 1019
1020 1020 def invalidatedirstate(self):
1021 1021 '''Invalidates the dirstate, causing the next call to dirstate
1022 1022 to check if it was modified since the last time it was read,
1023 1023 rereading it if it has.
1024 1024
1025 1025 This is different to dirstate.invalidate() that it doesn't always
1026 1026 rereads the dirstate. Use dirstate.invalidate() if you want to
1027 1027 explicitly read the dirstate again (i.e. restoring it to a previous
1028 1028 known good state).'''
1029 1029 if hasunfilteredcache(self, 'dirstate'):
1030 1030 for k in self.dirstate._filecache:
1031 1031 try:
1032 1032 delattr(self.dirstate, k)
1033 1033 except AttributeError:
1034 1034 pass
1035 1035 delattr(self.unfiltered(), 'dirstate')
1036 1036
1037 1037 def invalidate(self):
1038 1038 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1039 1039 for k in self._filecache:
1040 1040 # dirstate is invalidated separately in invalidatedirstate()
1041 1041 if k == 'dirstate':
1042 1042 continue
1043 1043
1044 1044 try:
1045 1045 delattr(unfiltered, k)
1046 1046 except AttributeError:
1047 1047 pass
1048 1048 self.invalidatecaches()
1049 1049 self.store.invalidatecaches()
1050 1050
1051 1051 def invalidateall(self):
1052 1052 '''Fully invalidates both store and non-store parts, causing the
1053 1053 subsequent operation to reread any outside changes.'''
1054 1054 # extension should hook this to invalidate its caches
1055 1055 self.invalidate()
1056 1056 self.invalidatedirstate()
1057 1057
1058 1058 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1059 1059 try:
1060 1060 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1061 1061 except error.LockHeld, inst:
1062 1062 if not wait:
1063 1063 raise
1064 1064 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1065 1065 (desc, inst.locker))
1066 1066 # default to 600 seconds timeout
1067 1067 l = lockmod.lock(vfs, lockname,
1068 1068 int(self.ui.config("ui", "timeout", "600")),
1069 1069 releasefn, desc=desc)
1070 1070 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1071 1071 if acquirefn:
1072 1072 acquirefn()
1073 1073 return l
1074 1074
1075 1075 def _afterlock(self, callback):
1076 1076 """add a callback to the current repository lock.
1077 1077
1078 1078 The callback will be executed on lock release."""
1079 1079 l = self._lockref and self._lockref()
1080 1080 if l:
1081 1081 l.postrelease.append(callback)
1082 1082 else:
1083 1083 callback()
1084 1084
1085 1085 def lock(self, wait=True):
1086 1086 '''Lock the repository store (.hg/store) and return a weak reference
1087 1087 to the lock. Use this before modifying the store (e.g. committing or
1088 1088 stripping). If you are opening a transaction, get a lock as well.)'''
1089 1089 l = self._lockref and self._lockref()
1090 1090 if l is not None and l.held:
1091 1091 l.lock()
1092 1092 return l
1093 1093
1094 1094 def unlock():
1095 1095 for k, ce in self._filecache.items():
1096 1096 if k == 'dirstate' or k not in self.__dict__:
1097 1097 continue
1098 1098 ce.refresh()
1099 1099
1100 1100 l = self._lock(self.svfs, "lock", wait, unlock,
1101 1101 self.invalidate, _('repository %s') % self.origroot)
1102 1102 self._lockref = weakref.ref(l)
1103 1103 return l
1104 1104
1105 1105 def wlock(self, wait=True):
1106 1106 '''Lock the non-store parts of the repository (everything under
1107 1107 .hg except .hg/store) and return a weak reference to the lock.
1108 1108 Use this before modifying files in .hg.'''
1109 1109 l = self._wlockref and self._wlockref()
1110 1110 if l is not None and l.held:
1111 1111 l.lock()
1112 1112 return l
1113 1113
1114 1114 def unlock():
1115 1115 if self.dirstate.pendingparentchange():
1116 1116 self.dirstate.invalidate()
1117 1117 else:
1118 1118 self.dirstate.write()
1119 1119
1120 1120 self._filecache['dirstate'].refresh()
1121 1121
1122 1122 l = self._lock(self.vfs, "wlock", wait, unlock,
1123 1123 self.invalidatedirstate, _('working directory of %s') %
1124 1124 self.origroot)
1125 1125 self._wlockref = weakref.ref(l)
1126 1126 return l
1127 1127
1128 1128 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1129 1129 """
1130 1130 commit an individual file as part of a larger transaction
1131 1131 """
1132 1132
1133 1133 fname = fctx.path()
1134 1134 text = fctx.data()
1135 1135 flog = self.file(fname)
1136 1136 fparent1 = manifest1.get(fname, nullid)
1137 1137 fparent2 = manifest2.get(fname, nullid)
1138 1138
1139 1139 meta = {}
1140 1140 copy = fctx.renamed()
1141 1141 if copy and copy[0] != fname:
1142 1142 # Mark the new revision of this file as a copy of another
1143 1143 # file. This copy data will effectively act as a parent
1144 1144 # of this new revision. If this is a merge, the first
1145 1145 # parent will be the nullid (meaning "look up the copy data")
1146 1146 # and the second one will be the other parent. For example:
1147 1147 #
1148 1148 # 0 --- 1 --- 3 rev1 changes file foo
1149 1149 # \ / rev2 renames foo to bar and changes it
1150 1150 # \- 2 -/ rev3 should have bar with all changes and
1151 1151 # should record that bar descends from
1152 1152 # bar in rev2 and foo in rev1
1153 1153 #
1154 1154 # this allows this merge to succeed:
1155 1155 #
1156 1156 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1157 1157 # \ / merging rev3 and rev4 should use bar@rev2
1158 1158 # \- 2 --- 4 as the merge base
1159 1159 #
1160 1160
1161 1161 cfname = copy[0]
1162 1162 crev = manifest1.get(cfname)
1163 1163 newfparent = fparent2
1164 1164
1165 1165 if manifest2: # branch merge
1166 1166 if fparent2 == nullid or crev is None: # copied on remote side
1167 1167 if cfname in manifest2:
1168 1168 crev = manifest2[cfname]
1169 1169 newfparent = fparent1
1170 1170
1171 1171 # find source in nearest ancestor if we've lost track
1172 1172 if not crev:
1173 1173 self.ui.debug(" %s: searching for copy revision for %s\n" %
1174 1174 (fname, cfname))
1175 1175 for ancestor in self[None].ancestors():
1176 1176 if cfname in ancestor:
1177 1177 crev = ancestor[cfname].filenode()
1178 1178 break
1179 1179
1180 1180 if crev:
1181 1181 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1182 1182 meta["copy"] = cfname
1183 1183 meta["copyrev"] = hex(crev)
1184 1184 fparent1, fparent2 = nullid, newfparent
1185 1185 else:
1186 1186 self.ui.warn(_("warning: can't find ancestor for '%s' "
1187 1187 "copied from '%s'!\n") % (fname, cfname))
1188 1188
1189 1189 elif fparent1 == nullid:
1190 1190 fparent1, fparent2 = fparent2, nullid
1191 1191 elif fparent2 != nullid:
1192 1192 # is one parent an ancestor of the other?
1193 1193 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1194 1194 if fparent1 in fparentancestors:
1195 1195 fparent1, fparent2 = fparent2, nullid
1196 1196 elif fparent2 in fparentancestors:
1197 1197 fparent2 = nullid
1198 1198
1199 1199 # is the file changed?
1200 1200 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1201 1201 changelist.append(fname)
1202 1202 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1203 1203 # are just the flags changed during merge?
1204 1204 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1205 1205 changelist.append(fname)
1206 1206
1207 1207 return fparent1
1208 1208
1209 1209 @unfilteredmethod
1210 1210 def commit(self, text="", user=None, date=None, match=None, force=False,
1211 1211 editor=False, extra={}):
1212 1212 """Add a new revision to current repository.
1213 1213
1214 1214 Revision information is gathered from the working directory,
1215 1215 match can be used to filter the committed files. If editor is
1216 1216 supplied, it is called to get a commit message.
1217 1217 """
1218 1218
1219 1219 def fail(f, msg):
1220 1220 raise util.Abort('%s: %s' % (f, msg))
1221 1221
1222 1222 if not match:
1223 1223 match = matchmod.always(self.root, '')
1224 1224
1225 1225 if not force:
1226 1226 vdirs = []
1227 1227 match.explicitdir = vdirs.append
1228 1228 match.bad = fail
1229 1229
1230 1230 wlock = self.wlock()
1231 1231 try:
1232 1232 wctx = self[None]
1233 1233 merge = len(wctx.parents()) > 1
1234 1234
1235 1235 if (not force and merge and match and
1236 1236 (match.files() or match.anypats())):
1237 1237 raise util.Abort(_('cannot partially commit a merge '
1238 1238 '(do not specify files or patterns)'))
1239 1239
1240 1240 status = self.status(match=match, clean=force)
1241 1241 if force:
1242 1242 status.modified.extend(status.clean) # mq may commit clean files
1243 1243
1244 1244 # check subrepos
1245 1245 subs = []
1246 1246 commitsubs = set()
1247 1247 newstate = wctx.substate.copy()
1248 1248 # only manage subrepos and .hgsubstate if .hgsub is present
1249 1249 if '.hgsub' in wctx:
1250 1250 # we'll decide whether to track this ourselves, thanks
1251 1251 for c in status.modified, status.added, status.removed:
1252 1252 if '.hgsubstate' in c:
1253 1253 c.remove('.hgsubstate')
1254 1254
1255 1255 # compare current state to last committed state
1256 1256 # build new substate based on last committed state
1257 1257 oldstate = wctx.p1().substate
1258 1258 for s in sorted(newstate.keys()):
1259 1259 if not match(s):
1260 1260 # ignore working copy, use old state if present
1261 1261 if s in oldstate:
1262 1262 newstate[s] = oldstate[s]
1263 1263 continue
1264 1264 if not force:
1265 1265 raise util.Abort(
1266 1266 _("commit with new subrepo %s excluded") % s)
1267 1267 if wctx.sub(s).dirty(True):
1268 1268 if not self.ui.configbool('ui', 'commitsubrepos'):
1269 1269 raise util.Abort(
1270 1270 _("uncommitted changes in subrepo %s") % s,
1271 1271 hint=_("use --subrepos for recursive commit"))
1272 1272 subs.append(s)
1273 1273 commitsubs.add(s)
1274 1274 else:
1275 1275 bs = wctx.sub(s).basestate()
1276 1276 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1277 1277 if oldstate.get(s, (None, None, None))[1] != bs:
1278 1278 subs.append(s)
1279 1279
1280 1280 # check for removed subrepos
1281 1281 for p in wctx.parents():
1282 1282 r = [s for s in p.substate if s not in newstate]
1283 1283 subs += [s for s in r if match(s)]
1284 1284 if subs:
1285 1285 if (not match('.hgsub') and
1286 1286 '.hgsub' in (wctx.modified() + wctx.added())):
1287 1287 raise util.Abort(
1288 1288 _("can't commit subrepos without .hgsub"))
1289 1289 status.modified.insert(0, '.hgsubstate')
1290 1290
1291 1291 elif '.hgsub' in status.removed:
1292 1292 # clean up .hgsubstate when .hgsub is removed
1293 1293 if ('.hgsubstate' in wctx and
1294 1294 '.hgsubstate' not in (status.modified + status.added +
1295 1295 status.removed)):
1296 1296 status.removed.insert(0, '.hgsubstate')
1297 1297
1298 1298 # make sure all explicit patterns are matched
1299 1299 if not force and match.files():
1300 1300 matched = set(status.modified + status.added + status.removed)
1301 1301
1302 1302 for f in match.files():
1303 1303 f = self.dirstate.normalize(f)
1304 1304 if f == '.' or f in matched or f in wctx.substate:
1305 1305 continue
1306 1306 if f in status.deleted:
1307 1307 fail(f, _('file not found!'))
1308 1308 if f in vdirs: # visited directory
1309 1309 d = f + '/'
1310 1310 for mf in matched:
1311 1311 if mf.startswith(d):
1312 1312 break
1313 1313 else:
1314 1314 fail(f, _("no match under directory!"))
1315 1315 elif f not in self.dirstate:
1316 1316 fail(f, _("file not tracked!"))
1317 1317
1318 1318 cctx = context.workingctx(self, text, user, date, extra, status)
1319 1319
1320 1320 if (not force and not extra.get("close") and not merge
1321 1321 and not cctx.files()
1322 1322 and wctx.branch() == wctx.p1().branch()):
1323 1323 return None
1324 1324
1325 1325 if merge and cctx.deleted():
1326 1326 raise util.Abort(_("cannot commit merge with missing files"))
1327 1327
1328 1328 ms = mergemod.mergestate(self)
1329 1329 for f in status.modified:
1330 1330 if f in ms and ms[f] == 'u':
1331 1331 raise util.Abort(_("unresolved merge conflicts "
1332 1332 "(see hg help resolve)"))
1333 1333
1334 1334 if editor:
1335 1335 cctx._text = editor(self, cctx, subs)
1336 1336 edited = (text != cctx._text)
1337 1337
1338 1338 # Save commit message in case this transaction gets rolled back
1339 1339 # (e.g. by a pretxncommit hook). Leave the content alone on
1340 1340 # the assumption that the user will use the same editor again.
1341 1341 msgfn = self.savecommitmessage(cctx._text)
1342 1342
1343 1343 # commit subs and write new state
1344 1344 if subs:
1345 1345 for s in sorted(commitsubs):
1346 1346 sub = wctx.sub(s)
1347 1347 self.ui.status(_('committing subrepository %s\n') %
1348 1348 subrepo.subrelpath(sub))
1349 1349 sr = sub.commit(cctx._text, user, date)
1350 1350 newstate[s] = (newstate[s][0], sr)
1351 1351 subrepo.writestate(self, newstate)
1352 1352
1353 1353 p1, p2 = self.dirstate.parents()
1354 1354 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1355 1355 try:
1356 1356 self.hook("precommit", throw=True, parent1=hookp1,
1357 1357 parent2=hookp2)
1358 1358 ret = self.commitctx(cctx, True)
1359 1359 except: # re-raises
1360 1360 if edited:
1361 1361 self.ui.write(
1362 1362 _('note: commit message saved in %s\n') % msgfn)
1363 1363 raise
1364 1364
1365 1365 # update bookmarks, dirstate and mergestate
1366 1366 bookmarks.update(self, [p1, p2], ret)
1367 1367 cctx.markcommitted(ret)
1368 1368 ms.reset()
1369 1369 finally:
1370 1370 wlock.release()
1371 1371
1372 1372 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1373 1373 self.hook("commit", node=node, parent1=parent1, parent2=parent2)
1374 1374 self._afterlock(commithook)
1375 1375 return ret
1376 1376
1377 1377 @unfilteredmethod
1378 1378 def commitctx(self, ctx, error=False):
1379 1379 """Add a new revision to current repository.
1380 1380 Revision information is passed via the context argument.
1381 1381 """
1382 1382
1383 1383 tr = None
1384 1384 p1, p2 = ctx.p1(), ctx.p2()
1385 1385 user = ctx.user()
1386 1386
1387 1387 lock = self.lock()
1388 1388 try:
1389 1389 tr = self.transaction("commit")
1390 1390 trp = weakref.proxy(tr)
1391 1391
1392 1392 if ctx.files():
1393 1393 m1 = p1.manifest()
1394 1394 m2 = p2.manifest()
1395 1395 m = m1.copy()
1396 1396
1397 1397 # check in files
1398 1398 added = []
1399 1399 changed = []
1400 1400 removed = list(ctx.removed())
1401 1401 linkrev = len(self)
1402 1402 for f in sorted(ctx.modified() + ctx.added()):
1403 1403 self.ui.note(f + "\n")
1404 1404 try:
1405 1405 fctx = ctx[f]
1406 1406 if fctx is None:
1407 1407 removed.append(f)
1408 1408 else:
1409 1409 added.append(f)
1410 1410 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1411 1411 trp, changed)
1412 1412 m.setflag(f, fctx.flags())
1413 1413 except OSError, inst:
1414 1414 self.ui.warn(_("trouble committing %s!\n") % f)
1415 1415 raise
1416 1416 except IOError, inst:
1417 1417 errcode = getattr(inst, 'errno', errno.ENOENT)
1418 1418 if error or errcode and errcode != errno.ENOENT:
1419 1419 self.ui.warn(_("trouble committing %s!\n") % f)
1420 1420 raise
1421 1421
1422 1422 # update manifest
1423 1423 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1424 1424 drop = [f for f in removed if f in m]
1425 1425 for f in drop:
1426 1426 del m[f]
1427 1427 mn = self.manifest.add(m, trp, linkrev,
1428 1428 p1.manifestnode(), p2.manifestnode(),
1429 1429 added, drop)
1430 1430 files = changed + removed
1431 1431 else:
1432 1432 mn = p1.manifestnode()
1433 1433 files = []
1434 1434
1435 1435 # update changelog
1436 1436 self.changelog.delayupdate()
1437 1437 n = self.changelog.add(mn, files, ctx.description(),
1438 1438 trp, p1.node(), p2.node(),
1439 1439 user, ctx.date(), ctx.extra().copy())
1440 1440 p = lambda: self.changelog.writepending() and self.root or ""
1441 1441 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1442 1442 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1443 1443 parent2=xp2, pending=p)
1444 1444 self.changelog.finalize(trp)
1445 1445 # set the new commit is proper phase
1446 1446 targetphase = subrepo.newcommitphase(self.ui, ctx)
1447 1447 if targetphase:
1448 1448 # retract boundary do not alter parent changeset.
1449 1449 # if a parent have higher the resulting phase will
1450 1450 # be compliant anyway
1451 1451 #
1452 1452 # if minimal phase was 0 we don't need to retract anything
1453 1453 phases.retractboundary(self, tr, targetphase, [n])
1454 1454 tr.close()
1455 1455 branchmap.updatecache(self.filtered('served'))
1456 1456 return n
1457 1457 finally:
1458 1458 if tr:
1459 1459 tr.release()
1460 1460 lock.release()
1461 1461
1462 1462 @unfilteredmethod
1463 1463 def destroying(self):
1464 1464 '''Inform the repository that nodes are about to be destroyed.
1465 1465 Intended for use by strip and rollback, so there's a common
1466 1466 place for anything that has to be done before destroying history.
1467 1467
1468 1468 This is mostly useful for saving state that is in memory and waiting
1469 1469 to be flushed when the current lock is released. Because a call to
1470 1470 destroyed is imminent, the repo will be invalidated causing those
1471 1471 changes to stay in memory (waiting for the next unlock), or vanish
1472 1472 completely.
1473 1473 '''
1474 1474 # When using the same lock to commit and strip, the phasecache is left
1475 1475 # dirty after committing. Then when we strip, the repo is invalidated,
1476 1476 # causing those changes to disappear.
1477 1477 if '_phasecache' in vars(self):
1478 1478 self._phasecache.write()
1479 1479
1480 1480 @unfilteredmethod
1481 1481 def destroyed(self):
1482 1482 '''Inform the repository that nodes have been destroyed.
1483 1483 Intended for use by strip and rollback, so there's a common
1484 1484 place for anything that has to be done after destroying history.
1485 1485 '''
1486 1486 # When one tries to:
1487 1487 # 1) destroy nodes thus calling this method (e.g. strip)
1488 1488 # 2) use phasecache somewhere (e.g. commit)
1489 1489 #
1490 1490 # then 2) will fail because the phasecache contains nodes that were
1491 1491 # removed. We can either remove phasecache from the filecache,
1492 1492 # causing it to reload next time it is accessed, or simply filter
1493 1493 # the removed nodes now and write the updated cache.
1494 1494 self._phasecache.filterunknown(self)
1495 1495 self._phasecache.write()
1496 1496
1497 1497 # update the 'served' branch cache to help read only server process
1498 1498 # Thanks to branchcache collaboration this is done from the nearest
1499 1499 # filtered subset and it is expected to be fast.
1500 1500 branchmap.updatecache(self.filtered('served'))
1501 1501
1502 1502 # Ensure the persistent tag cache is updated. Doing it now
1503 1503 # means that the tag cache only has to worry about destroyed
1504 1504 # heads immediately after a strip/rollback. That in turn
1505 1505 # guarantees that "cachetip == currenttip" (comparing both rev
1506 1506 # and node) always means no nodes have been added or destroyed.
1507 1507
1508 1508 # XXX this is suboptimal when qrefresh'ing: we strip the current
1509 1509 # head, refresh the tag cache, then immediately add a new head.
1510 1510 # But I think doing it this way is necessary for the "instant
1511 1511 # tag cache retrieval" case to work.
1512 1512 self.invalidate()
1513 1513
1514 1514 def walk(self, match, node=None):
1515 1515 '''
1516 1516 walk recursively through the directory tree or a given
1517 1517 changeset, finding all files matched by the match
1518 1518 function
1519 1519 '''
1520 1520 return self[node].walk(match)
1521 1521
1522 1522 def status(self, node1='.', node2=None, match=None,
1523 1523 ignored=False, clean=False, unknown=False,
1524 1524 listsubrepos=False):
1525 1525 '''a convenience method that calls node1.status(node2)'''
1526 1526 return self[node1].status(node2, match, ignored, clean, unknown,
1527 1527 listsubrepos)
1528 1528
1529 1529 def heads(self, start=None):
1530 1530 heads = self.changelog.heads(start)
1531 1531 # sort the output in rev descending order
1532 1532 return sorted(heads, key=self.changelog.rev, reverse=True)
1533 1533
1534 1534 def branchheads(self, branch=None, start=None, closed=False):
1535 1535 '''return a (possibly filtered) list of heads for the given branch
1536 1536
1537 1537 Heads are returned in topological order, from newest to oldest.
1538 1538 If branch is None, use the dirstate branch.
1539 1539 If start is not None, return only heads reachable from start.
1540 1540 If closed is True, return heads that are marked as closed as well.
1541 1541 '''
1542 1542 if branch is None:
1543 1543 branch = self[None].branch()
1544 1544 branches = self.branchmap()
1545 1545 if branch not in branches:
1546 1546 return []
1547 1547 # the cache returns heads ordered lowest to highest
1548 1548 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1549 1549 if start is not None:
1550 1550 # filter out the heads that cannot be reached from startrev
1551 1551 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1552 1552 bheads = [h for h in bheads if h in fbheads]
1553 1553 return bheads
1554 1554
1555 1555 def branches(self, nodes):
1556 1556 if not nodes:
1557 1557 nodes = [self.changelog.tip()]
1558 1558 b = []
1559 1559 for n in nodes:
1560 1560 t = n
1561 1561 while True:
1562 1562 p = self.changelog.parents(n)
1563 1563 if p[1] != nullid or p[0] == nullid:
1564 1564 b.append((t, n, p[0], p[1]))
1565 1565 break
1566 1566 n = p[0]
1567 1567 return b
1568 1568
1569 1569 def between(self, pairs):
1570 1570 r = []
1571 1571
1572 1572 for top, bottom in pairs:
1573 1573 n, l, i = top, [], 0
1574 1574 f = 1
1575 1575
1576 1576 while n != bottom and n != nullid:
1577 1577 p = self.changelog.parents(n)[0]
1578 1578 if i == f:
1579 1579 l.append(n)
1580 1580 f = f * 2
1581 1581 n = p
1582 1582 i += 1
1583 1583
1584 1584 r.append(l)
1585 1585
1586 1586 return r
1587 1587
1588 1588 def checkpush(self, pushop):
1589 1589 """Extensions can override this function if additional checks have
1590 1590 to be performed before pushing, or call it if they override push
1591 1591 command.
1592 1592 """
1593 1593 pass
1594 1594
1595 1595 @unfilteredpropertycache
1596 1596 def prepushoutgoinghooks(self):
1597 1597 """Return util.hooks consists of "(repo, remote, outgoing)"
1598 1598 functions, which are called before pushing changesets.
1599 1599 """
1600 1600 return util.hooks()
1601 1601
1602 1602 def stream_in(self, remote, requirements):
1603 1603 lock = self.lock()
1604 1604 try:
1605 1605 # Save remote branchmap. We will use it later
1606 1606 # to speed up branchcache creation
1607 1607 rbranchmap = None
1608 1608 if remote.capable("branchmap"):
1609 1609 rbranchmap = remote.branchmap()
1610 1610
1611 1611 fp = remote.stream_out()
1612 1612 l = fp.readline()
1613 1613 try:
1614 1614 resp = int(l)
1615 1615 except ValueError:
1616 1616 raise error.ResponseError(
1617 1617 _('unexpected response from remote server:'), l)
1618 1618 if resp == 1:
1619 1619 raise util.Abort(_('operation forbidden by server'))
1620 1620 elif resp == 2:
1621 1621 raise util.Abort(_('locking the remote repository failed'))
1622 1622 elif resp != 0:
1623 1623 raise util.Abort(_('the server sent an unknown error code'))
1624 1624 self.ui.status(_('streaming all changes\n'))
1625 1625 l = fp.readline()
1626 1626 try:
1627 1627 total_files, total_bytes = map(int, l.split(' ', 1))
1628 1628 except (ValueError, TypeError):
1629 1629 raise error.ResponseError(
1630 1630 _('unexpected response from remote server:'), l)
1631 1631 self.ui.status(_('%d files to transfer, %s of data\n') %
1632 1632 (total_files, util.bytecount(total_bytes)))
1633 1633 handled_bytes = 0
1634 1634 self.ui.progress(_('clone'), 0, total=total_bytes)
1635 1635 start = time.time()
1636 1636
1637 1637 tr = self.transaction(_('clone'))
1638 1638 try:
1639 1639 for i in xrange(total_files):
1640 1640 # XXX doesn't support '\n' or '\r' in filenames
1641 1641 l = fp.readline()
1642 1642 try:
1643 1643 name, size = l.split('\0', 1)
1644 1644 size = int(size)
1645 1645 except (ValueError, TypeError):
1646 1646 raise error.ResponseError(
1647 1647 _('unexpected response from remote server:'), l)
1648 1648 if self.ui.debugflag:
1649 1649 self.ui.debug('adding %s (%s)\n' %
1650 1650 (name, util.bytecount(size)))
1651 1651 # for backwards compat, name was partially encoded
1652 1652 ofp = self.sopener(store.decodedir(name), 'w')
1653 1653 for chunk in util.filechunkiter(fp, limit=size):
1654 1654 handled_bytes += len(chunk)
1655 1655 self.ui.progress(_('clone'), handled_bytes,
1656 1656 total=total_bytes)
1657 1657 ofp.write(chunk)
1658 1658 ofp.close()
1659 1659 tr.close()
1660 1660 finally:
1661 1661 tr.release()
1662 1662
1663 1663 # Writing straight to files circumvented the inmemory caches
1664 1664 self.invalidate()
1665 1665
1666 1666 elapsed = time.time() - start
1667 1667 if elapsed <= 0:
1668 1668 elapsed = 0.001
1669 1669 self.ui.progress(_('clone'), None)
1670 1670 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1671 1671 (util.bytecount(total_bytes), elapsed,
1672 1672 util.bytecount(total_bytes / elapsed)))
1673 1673
1674 1674 # new requirements = old non-format requirements +
1675 1675 # new format-related
1676 1676 # requirements from the streamed-in repository
1677 1677 requirements.update(set(self.requirements) - self.supportedformats)
1678 1678 self._applyrequirements(requirements)
1679 1679 self._writerequirements()
1680 1680
1681 1681 if rbranchmap:
1682 1682 rbheads = []
1683 1683 for bheads in rbranchmap.itervalues():
1684 1684 rbheads.extend(bheads)
1685 1685
1686 1686 if rbheads:
1687 1687 rtiprev = max((int(self.changelog.rev(node))
1688 1688 for node in rbheads))
1689 1689 cache = branchmap.branchcache(rbranchmap,
1690 1690 self[rtiprev].node(),
1691 1691 rtiprev)
1692 1692 # Try to stick it as low as possible
1693 1693 # filter above served are unlikely to be fetch from a clone
1694 1694 for candidate in ('base', 'immutable', 'served'):
1695 1695 rview = self.filtered(candidate)
1696 1696 if cache.validfor(rview):
1697 1697 self._branchcaches[candidate] = cache
1698 1698 cache.write(rview)
1699 1699 break
1700 1700 self.invalidate()
1701 1701 return len(self.heads()) + 1
1702 1702 finally:
1703 1703 lock.release()
1704 1704
1705 1705 def clone(self, remote, heads=[], stream=False):
1706 1706 '''clone remote repository.
1707 1707
1708 1708 keyword arguments:
1709 1709 heads: list of revs to clone (forces use of pull)
1710 1710 stream: use streaming clone if possible'''
1711 1711
1712 1712 # now, all clients that can request uncompressed clones can
1713 1713 # read repo formats supported by all servers that can serve
1714 1714 # them.
1715 1715
1716 1716 # if revlog format changes, client will have to check version
1717 1717 # and format flags on "stream" capability, and use
1718 1718 # uncompressed only if compatible.
1719 1719
1720 1720 if not stream:
1721 1721 # if the server explicitly prefers to stream (for fast LANs)
1722 1722 stream = remote.capable('stream-preferred')
1723 1723
1724 1724 if stream and not heads:
1725 1725 # 'stream' means remote revlog format is revlogv1 only
1726 1726 if remote.capable('stream'):
1727 return self.stream_in(remote, set(('revlogv1',)))
1728 # otherwise, 'streamreqs' contains the remote revlog format
1729 streamreqs = remote.capable('streamreqs')
1730 if streamreqs:
1731 streamreqs = set(streamreqs.split(','))
1732 # if we support it, stream in and adjust our requirements
1733 if not streamreqs - self.supportedformats:
1734 return self.stream_in(remote, streamreqs)
1727 self.stream_in(remote, set(('revlogv1',)))
1728 else:
1729 # otherwise, 'streamreqs' contains the remote revlog format
1730 streamreqs = remote.capable('streamreqs')
1731 if streamreqs:
1732 streamreqs = set(streamreqs.split(','))
1733 # if we support it, stream in and adjust our requirements
1734 if not streamreqs - self.supportedformats:
1735 self.stream_in(remote, streamreqs)
1735 1736
1736 1737 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1737 1738 try:
1738 1739 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1739 1740 ret = exchange.pull(self, remote, heads).cgresult
1740 1741 finally:
1741 1742 self.ui.restoreconfig(quiet)
1742 1743 return ret
1743 1744
1744 1745 def pushkey(self, namespace, key, old, new):
1745 1746 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1746 1747 old=old, new=new)
1747 1748 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1748 1749 ret = pushkey.push(self, namespace, key, old, new)
1749 1750 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1750 1751 ret=ret)
1751 1752 return ret
1752 1753
1753 1754 def listkeys(self, namespace):
1754 1755 self.hook('prelistkeys', throw=True, namespace=namespace)
1755 1756 self.ui.debug('listing keys for "%s"\n' % namespace)
1756 1757 values = pushkey.list(self, namespace)
1757 1758 self.hook('listkeys', namespace=namespace, values=values)
1758 1759 return values
1759 1760
1760 1761 def debugwireargs(self, one, two, three=None, four=None, five=None):
1761 1762 '''used to test argument passing over the wire'''
1762 1763 return "%s %s %s %s %s" % (one, two, three, four, five)
1763 1764
1764 1765 def savecommitmessage(self, text):
1765 1766 fp = self.opener('last-message.txt', 'wb')
1766 1767 try:
1767 1768 fp.write(text)
1768 1769 finally:
1769 1770 fp.close()
1770 1771 return self.pathto(fp.name[len(self.root) + 1:])
1771 1772
1772 1773 # used to avoid circular references so destructors work
1773 1774 def aftertrans(files):
1774 1775 renamefiles = [tuple(t) for t in files]
1775 1776 def a():
1776 1777 for vfs, src, dest in renamefiles:
1777 1778 try:
1778 1779 vfs.rename(src, dest)
1779 1780 except OSError: # journal file does not yet exist
1780 1781 pass
1781 1782 return a
1782 1783
1783 1784 def undoname(fn):
1784 1785 base, name = os.path.split(fn)
1785 1786 assert name.startswith('journal')
1786 1787 return os.path.join(base, name.replace('journal', 'undo', 1))
1787 1788
1788 1789 def instance(ui, path, create):
1789 1790 return localrepository(ui, util.urllocalpath(path), create)
1790 1791
1791 1792 def islocal(path):
1792 1793 return True
@@ -1,123 +1,128 b''
1 1 #require serve
2 2
3 3 $ hg init a
4 4 $ cd a
5 5 $ echo a > a
6 6 $ hg ci -Ama -d '1123456789 0'
7 7 adding a
8 8 $ hg --config server.uncompressed=True serve -p $HGPORT -d --pid-file=hg.pid
9 9 $ cat hg.pid >> $DAEMON_PIDS
10 10 $ cd ..
11 11 $ "$TESTDIR/tinyproxy.py" $HGPORT1 localhost >proxy.log 2>&1 </dev/null &
12 12 $ while [ ! -f proxy.pid ]; do sleep 0; done
13 13 $ cat proxy.pid >> $DAEMON_PIDS
14 14
15 15 url for proxy, stream
16 16
17 17 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone --uncompressed http://localhost:$HGPORT/ b
18 18 streaming all changes
19 19 3 files to transfer, 303 bytes of data
20 20 transferred * bytes in * seconds (*/sec) (glob)
21 searching for changes
22 no changes found
21 23 updating to branch default
22 24 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
23 25 $ cd b
24 26 $ hg verify
25 27 checking changesets
26 28 checking manifests
27 29 crosschecking files in changesets and manifests
28 30 checking files
29 31 1 files, 1 changesets, 1 total revisions
30 32 $ cd ..
31 33
32 34 url for proxy, pull
33 35
34 36 $ http_proxy=http://localhost:$HGPORT1/ hg --config http_proxy.always=True clone http://localhost:$HGPORT/ b-pull
35 37 requesting all changes
36 38 adding changesets
37 39 adding manifests
38 40 adding file changes
39 41 added 1 changesets with 1 changes to 1 files
40 42 updating to branch default
41 43 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
42 44 $ cd b-pull
43 45 $ hg verify
44 46 checking changesets
45 47 checking manifests
46 48 crosschecking files in changesets and manifests
47 49 checking files
48 50 1 files, 1 changesets, 1 total revisions
49 51 $ cd ..
50 52
51 53 host:port for proxy
52 54
53 55 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ c
54 56 requesting all changes
55 57 adding changesets
56 58 adding manifests
57 59 adding file changes
58 60 added 1 changesets with 1 changes to 1 files
59 61 updating to branch default
60 62 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 63
62 64 proxy url with user name and password
63 65
64 66 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ d
65 67 requesting all changes
66 68 adding changesets
67 69 adding manifests
68 70 adding file changes
69 71 added 1 changesets with 1 changes to 1 files
70 72 updating to branch default
71 73 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
72 74
73 75 url with user name and password
74 76
75 77 $ http_proxy=http://user:passwd@localhost:$HGPORT1 hg clone --config http_proxy.always=True http://user:passwd@localhost:$HGPORT/ e
76 78 requesting all changes
77 79 adding changesets
78 80 adding manifests
79 81 adding file changes
80 82 added 1 changesets with 1 changes to 1 files
81 83 updating to branch default
82 84 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
83 85
84 86 bad host:port for proxy
85 87
86 88 $ http_proxy=localhost:$HGPORT2 hg clone --config http_proxy.always=True http://localhost:$HGPORT/ f
87 89 abort: error: Connection refused
88 90 [255]
89 91
90 92 do not use the proxy if it is in the no list
91 93
92 94 $ http_proxy=localhost:$HGPORT1 hg clone --config http_proxy.no=localhost http://localhost:$HGPORT/ g
93 95 requesting all changes
94 96 adding changesets
95 97 adding manifests
96 98 adding file changes
97 99 added 1 changesets with 1 changes to 1 files
98 100 updating to branch default
99 101 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 102 $ cat proxy.log
101 103 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
102 104 * - - [*] "GET http://localhost:$HGPORT/?cmd=branchmap HTTP/1.1" - - (glob)
103 105 * - - [*] "GET http://localhost:$HGPORT/?cmd=stream_out HTTP/1.1" - - (glob)
106 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
107 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
108 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
104 109 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
105 110 *- - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
106 111 *- - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
107 112 *- - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
108 113 *- - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
109 114 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
110 115 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
111 116 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
112 117 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
113 118 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
114 119 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
115 120 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
116 121 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
117 122 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
118 123 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
119 124 * - - [*] "GET http://localhost:$HGPORT/?cmd=capabilities HTTP/1.1" - - (glob)
120 125 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=bookmarks (glob)
121 126 * - - [*] "GET http://localhost:$HGPORT/?cmd=batch HTTP/1.1" - - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D (glob)
122 127 * - - [*] "GET http://localhost:$HGPORT/?cmd=getbundle HTTP/1.1" - - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=83180e7845de420a1bb46896fd5fe05294f8d629 (glob)
123 128 * - - [*] "GET http://localhost:$HGPORT/?cmd=listkeys HTTP/1.1" - - x-hgarg-1:namespace=phases (glob)
@@ -1,306 +1,313 b''
1 1 #require serve
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo foo>foo
6 6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 7 $ echo foo>foo.d/foo
8 8 $ echo bar>foo.d/bAr.hg.d/BaR
9 9 $ echo bar>foo.d/baR.d.hg/bAR
10 10 $ hg commit -A -m 1
11 11 adding foo
12 12 adding foo.d/bAr.hg.d/BaR
13 13 adding foo.d/baR.d.hg/bAR
14 14 adding foo.d/foo
15 15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 16 $ hg --config server.uncompressed=False serve -p $HGPORT1 -d --pid-file=../hg2.pid
17 17
18 18 Test server address cannot be reused
19 19
20 20 #if windows
21 21 $ hg serve -p $HGPORT1 2>&1
22 22 abort: cannot start server at ':$HGPORT1': * (glob)
23 23 [255]
24 24 #else
25 25 $ hg serve -p $HGPORT1 2>&1
26 26 abort: cannot start server at ':$HGPORT1': Address already in use
27 27 [255]
28 28 #endif
29 29 $ cd ..
30 30 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
31 31
32 32 clone via stream
33 33
34 34 $ hg clone --uncompressed http://localhost:$HGPORT/ copy 2>&1
35 35 streaming all changes
36 36 6 files to transfer, 606 bytes of data
37 37 transferred * bytes in * seconds (*/sec) (glob)
38 searching for changes
39 no changes found
38 40 updating to branch default
39 41 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
40 42 $ hg verify -R copy
41 43 checking changesets
42 44 checking manifests
43 45 crosschecking files in changesets and manifests
44 46 checking files
45 47 4 files, 1 changesets, 4 total revisions
46 48
47 49 try to clone via stream, should use pull instead
48 50
49 51 $ hg clone --uncompressed http://localhost:$HGPORT1/ copy2
50 52 requesting all changes
51 53 adding changesets
52 54 adding manifests
53 55 adding file changes
54 56 added 1 changesets with 4 changes to 4 files
55 57 updating to branch default
56 58 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 59
58 60 clone via pull
59 61
60 62 $ hg clone http://localhost:$HGPORT1/ copy-pull
61 63 requesting all changes
62 64 adding changesets
63 65 adding manifests
64 66 adding file changes
65 67 added 1 changesets with 4 changes to 4 files
66 68 updating to branch default
67 69 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 70 $ hg verify -R copy-pull
69 71 checking changesets
70 72 checking manifests
71 73 crosschecking files in changesets and manifests
72 74 checking files
73 75 4 files, 1 changesets, 4 total revisions
74 76 $ cd test
75 77 $ echo bar > bar
76 78 $ hg commit -A -d '1 0' -m 2
77 79 adding bar
78 80 $ cd ..
79 81
80 82 clone over http with --update
81 83
82 84 $ hg clone http://localhost:$HGPORT1/ updated --update 0
83 85 requesting all changes
84 86 adding changesets
85 87 adding manifests
86 88 adding file changes
87 89 added 2 changesets with 5 changes to 5 files
88 90 updating to branch default
89 91 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 92 $ hg log -r . -R updated
91 93 changeset: 0:8b6053c928fe
92 94 user: test
93 95 date: Thu Jan 01 00:00:00 1970 +0000
94 96 summary: 1
95 97
96 98 $ rm -rf updated
97 99
98 100 incoming via HTTP
99 101
100 102 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
101 103 adding changesets
102 104 adding manifests
103 105 adding file changes
104 106 added 1 changesets with 4 changes to 4 files
105 107 updating to branch default
106 108 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 109 $ cd partial
108 110 $ touch LOCAL
109 111 $ hg ci -qAm LOCAL
110 112 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
111 113 comparing with http://localhost:$HGPORT1/
112 114 searching for changes
113 115 2
114 116 $ cd ..
115 117
116 118 pull
117 119
118 120 $ cd copy-pull
119 121 $ echo '[hooks]' >> .hg/hgrc
120 122 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup" >> .hg/hgrc
121 123 $ hg pull
122 124 pulling from http://localhost:$HGPORT1/
123 125 searching for changes
124 126 adding changesets
125 127 adding manifests
126 128 adding file changes
127 129 added 1 changesets with 1 changes to 1 files
128 130 changegroup hook: HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d HG_SOURCE=pull HG_URL=http://localhost:$HGPORT1/
129 131 (run 'hg update' to get a working copy)
130 132 $ cd ..
131 133
132 134 clone from invalid URL
133 135
134 136 $ hg clone http://localhost:$HGPORT/bad
135 137 abort: HTTP Error 404: Not Found
136 138 [255]
137 139
138 140 test http authentication
139 141 + use the same server to test server side streaming preference
140 142
141 143 $ cd test
142 144 $ cat << EOT > userpass.py
143 145 > import base64
144 146 > from mercurial.hgweb import common
145 147 > def perform_authentication(hgweb, req, op):
146 148 > auth = req.env.get('HTTP_AUTHORIZATION')
147 149 > if not auth:
148 150 > raise common.ErrorResponse(common.HTTP_UNAUTHORIZED, 'who',
149 151 > [('WWW-Authenticate', 'Basic Realm="mercurial"')])
150 152 > if base64.b64decode(auth.split()[1]).split(':', 1) != ['user', 'pass']:
151 153 > raise common.ErrorResponse(common.HTTP_FORBIDDEN, 'no')
152 154 > def extsetup():
153 155 > common.permhooks.insert(0, perform_authentication)
154 156 > EOT
155 157 $ hg --config extensions.x=userpass.py serve -p $HGPORT2 -d --pid-file=pid \
156 158 > --config server.preferuncompressed=True \
157 159 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
158 160 $ cat pid >> $DAEMON_PIDS
159 161
160 162 $ cat << EOF > get_pass.py
161 163 > import getpass
162 164 > def newgetpass(arg):
163 165 > return "pass"
164 166 > getpass.getpass = newgetpass
165 167 > EOF
166 168
167 169 #if python243
168 170 $ hg id http://localhost:$HGPORT2/
169 171 abort: http authorization required for http://localhost:$HGPORT2/
170 172 [255]
171 173 $ hg id http://localhost:$HGPORT2/
172 174 abort: http authorization required for http://localhost:$HGPORT2/
173 175 [255]
174 176 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
175 177 http authorization required for http://localhost:$HGPORT2/
176 178 realm: mercurial
177 179 user: user
178 180 password: 5fed3813f7f5
179 181 $ hg id http://user:pass@localhost:$HGPORT2/
180 182 5fed3813f7f5
181 183 #endif
182 184 $ echo '[auth]' >> .hg/hgrc
183 185 $ echo 'l.schemes=http' >> .hg/hgrc
184 186 $ echo 'l.prefix=lo' >> .hg/hgrc
185 187 $ echo 'l.username=user' >> .hg/hgrc
186 188 $ echo 'l.password=pass' >> .hg/hgrc
187 189 $ hg id http://localhost:$HGPORT2/
188 190 5fed3813f7f5
189 191 $ hg id http://localhost:$HGPORT2/
190 192 5fed3813f7f5
191 193 $ hg id http://user@localhost:$HGPORT2/
192 194 5fed3813f7f5
193 195 #if python243
194 196 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
195 197 streaming all changes
196 198 7 files to transfer, 916 bytes of data
197 199 transferred * bytes in * seconds (*/sec) (glob)
200 searching for changes
201 no changes found
198 202 updating to branch default
199 203 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
200 204
201 205 $ hg id http://user2@localhost:$HGPORT2/
202 206 abort: http authorization required for http://localhost:$HGPORT2/
203 207 [255]
204 208 $ hg id http://user:pass2@localhost:$HGPORT2/
205 209 abort: HTTP Error 403: no
206 210 [255]
207 211
208 212 $ hg -R dest tag -r tip top
209 213 $ hg -R dest push http://user:pass@localhost:$HGPORT2/
210 214 pushing to http://user:***@localhost:$HGPORT2/
211 215 searching for changes
212 216 remote: adding changesets
213 217 remote: adding manifests
214 218 remote: adding file changes
215 219 remote: added 1 changesets with 1 changes to 1 files
216 220 $ hg rollback -q
217 221
218 222 $ cut -c38- ../access.log
219 223 "GET /?cmd=capabilities HTTP/1.1" 200 -
220 224 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
221 225 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
222 226 "GET /?cmd=capabilities HTTP/1.1" 200 -
223 227 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
224 228 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
225 229 "GET /?cmd=capabilities HTTP/1.1" 200 -
226 230 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
227 231 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
228 232 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
229 233 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
230 234 "GET /?cmd=capabilities HTTP/1.1" 200 -
231 235 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
232 236 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
233 237 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
234 238 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
235 239 "GET /?cmd=capabilities HTTP/1.1" 200 -
236 240 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
237 241 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
238 242 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
239 243 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
240 244 "GET /?cmd=capabilities HTTP/1.1" 200 -
241 245 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
242 246 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
243 247 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
244 248 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
245 249 "GET /?cmd=capabilities HTTP/1.1" 200 -
246 250 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
247 251 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
248 252 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces
249 253 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
250 254 "GET /?cmd=capabilities HTTP/1.1" 200 -
251 255 "GET /?cmd=branchmap HTTP/1.1" 200 -
252 256 "GET /?cmd=stream_out HTTP/1.1" 401 -
253 257 "GET /?cmd=stream_out HTTP/1.1" 200 -
258 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
259 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d
260 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
254 261 "GET /?cmd=capabilities HTTP/1.1" 200 -
255 262 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
256 263 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
257 264 "GET /?cmd=capabilities HTTP/1.1" 200 -
258 265 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip
259 266 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces
260 267 "GET /?cmd=listkeys HTTP/1.1" 403 - x-hgarg-1:namespace=namespaces
261 268 "GET /?cmd=capabilities HTTP/1.1" 200 -
262 269 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872
263 270 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=phases
264 271 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
265 272 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
266 273 "GET /?cmd=branchmap HTTP/1.1" 200 -
267 274 "GET /?cmd=branchmap HTTP/1.1" 200 -
268 275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks
269 276 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524
270 277 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases
271 278
272 279 #endif
273 280 $ cd ..
274 281
275 282 clone of serve with repo in root and unserved subrepo (issue2970)
276 283
277 284 $ hg --cwd test init sub
278 285 $ echo empty > test/sub/empty
279 286 $ hg --cwd test/sub add empty
280 287 $ hg --cwd test/sub commit -qm 'add empty'
281 288 $ hg --cwd test/sub tag -r 0 something
282 289 $ echo sub = sub > test/.hgsub
283 290 $ hg --cwd test add .hgsub
284 291 $ hg --cwd test commit -qm 'add subrepo'
285 292 $ hg clone http://localhost:$HGPORT noslash-clone
286 293 requesting all changes
287 294 adding changesets
288 295 adding manifests
289 296 adding file changes
290 297 added 3 changesets with 7 changes to 7 files
291 298 updating to branch default
292 299 abort: HTTP Error 404: Not Found
293 300 [255]
294 301 $ hg clone http://localhost:$HGPORT/ slash-clone
295 302 requesting all changes
296 303 adding changesets
297 304 adding manifests
298 305 adding file changes
299 306 added 3 changesets with 7 changes to 7 files
300 307 updating to branch default
301 308 abort: HTTP Error 404: Not Found
302 309 [255]
303 310
304 311 check error log
305 312
306 313 $ cat error.log
@@ -1,431 +1,451 b''
1 1
2 2
3 3 This test tries to exercise the ssh functionality with a dummy script
4 4
5 5 creating 'remote' repo
6 6
7 7 $ hg init remote
8 8 $ cd remote
9 9 $ echo this > foo
10 10 $ echo this > fooO
11 11 $ hg ci -A -m "init" foo fooO
12 12 $ cat <<EOF > .hg/hgrc
13 13 > [server]
14 14 > uncompressed = True
15 15 >
16 16 > [hooks]
17 17 > changegroup = python "$TESTDIR/printenv.py" changegroup-in-remote 0 ../dummylog
18 18 > EOF
19 19 $ cd ..
20 20
21 21 repo not found error
22 22
23 23 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/nonexistent local
24 24 remote: abort: there is no Mercurial repository here (.hg not found)!
25 25 abort: no suitable response from remote hg!
26 26 [255]
27 27
28 28 non-existent absolute path
29 29
30 30 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy//`pwd`/nonexistent local
31 31 remote: abort: there is no Mercurial repository here (.hg not found)!
32 32 abort: no suitable response from remote hg!
33 33 [255]
34 34
35 35 clone remote via stream
36 36
37 37 $ hg clone -e "python \"$TESTDIR/dummyssh\"" --uncompressed ssh://user@dummy/remote local-stream
38 38 streaming all changes
39 39 4 files to transfer, 392 bytes of data
40 40 transferred 392 bytes in * seconds (*/sec) (glob)
41 searching for changes
42 no changes found
41 43 updating to branch default
42 44 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
43 45 $ cd local-stream
44 46 $ hg verify
45 47 checking changesets
46 48 checking manifests
47 49 crosschecking files in changesets and manifests
48 50 checking files
49 51 2 files, 1 changesets, 2 total revisions
50 52 $ cd ..
51 53
54 clone bookmarks via stream
55
56 $ hg -R local-stream book mybook
57 $ hg clone -e "python \"$TESTDIR/dummyssh\"" --uncompressed ssh://user@dummy/local-stream stream2
58 streaming all changes
59 4 files to transfer, 392 bytes of data
60 transferred 392 bytes in * seconds (* KB/sec) (glob)
61 searching for changes
62 no changes found
63 updating to branch default
64 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 $ cd stream2
66 $ hg book
67 mybook 0:1160648e36ce
68 $ cd ..
69 $ rm -rf local-stream stream2
70
52 71 clone remote via pull
53 72
54 73 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local
55 74 requesting all changes
56 75 adding changesets
57 76 adding manifests
58 77 adding file changes
59 78 added 1 changesets with 2 changes to 2 files
60 79 updating to branch default
61 80 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
62 81
63 82 verify
64 83
65 84 $ cd local
66 85 $ hg verify
67 86 checking changesets
68 87 checking manifests
69 88 crosschecking files in changesets and manifests
70 89 checking files
71 90 2 files, 1 changesets, 2 total revisions
72 91 $ echo '[hooks]' >> .hg/hgrc
73 92 $ echo "changegroup = python \"$TESTDIR/printenv.py\" changegroup-in-local 0 ../dummylog" >> .hg/hgrc
74 93
75 94 empty default pull
76 95
77 96 $ hg paths
78 97 default = ssh://user@dummy/remote
79 98 $ hg pull -e "python \"$TESTDIR/dummyssh\""
80 99 pulling from ssh://user@dummy/remote
81 100 searching for changes
82 101 no changes found
83 102
84 103 local change
85 104
86 105 $ echo bleah > foo
87 106 $ hg ci -m "add"
88 107
89 108 updating rc
90 109
91 110 $ echo "default-push = ssh://user@dummy/remote" >> .hg/hgrc
92 111 $ echo "[ui]" >> .hg/hgrc
93 112 $ echo "ssh = python \"$TESTDIR/dummyssh\"" >> .hg/hgrc
94 113
95 114 find outgoing
96 115
97 116 $ hg out ssh://user@dummy/remote
98 117 comparing with ssh://user@dummy/remote
99 118 searching for changes
100 119 changeset: 1:a28a9d1a809c
101 120 tag: tip
102 121 user: test
103 122 date: Thu Jan 01 00:00:00 1970 +0000
104 123 summary: add
105 124
106 125
107 126 find incoming on the remote side
108 127
109 128 $ hg incoming -R ../remote -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/local
110 129 comparing with ssh://user@dummy/local
111 130 searching for changes
112 131 changeset: 1:a28a9d1a809c
113 132 tag: tip
114 133 user: test
115 134 date: Thu Jan 01 00:00:00 1970 +0000
116 135 summary: add
117 136
118 137
119 138 find incoming on the remote side (using absolute path)
120 139
121 140 $ hg incoming -R ../remote -e "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/`pwd`"
122 141 comparing with ssh://user@dummy/$TESTTMP/local
123 142 searching for changes
124 143 changeset: 1:a28a9d1a809c
125 144 tag: tip
126 145 user: test
127 146 date: Thu Jan 01 00:00:00 1970 +0000
128 147 summary: add
129 148
130 149
131 150 push
132 151
133 152 $ hg push
134 153 pushing to ssh://user@dummy/remote
135 154 searching for changes
136 155 remote: adding changesets
137 156 remote: adding manifests
138 157 remote: adding file changes
139 158 remote: added 1 changesets with 1 changes to 1 files
140 159 $ cd ../remote
141 160
142 161 check remote tip
143 162
144 163 $ hg tip
145 164 changeset: 1:a28a9d1a809c
146 165 tag: tip
147 166 user: test
148 167 date: Thu Jan 01 00:00:00 1970 +0000
149 168 summary: add
150 169
151 170 $ hg verify
152 171 checking changesets
153 172 checking manifests
154 173 crosschecking files in changesets and manifests
155 174 checking files
156 175 2 files, 2 changesets, 3 total revisions
157 176 $ hg cat -r tip foo
158 177 bleah
159 178 $ echo z > z
160 179 $ hg ci -A -m z z
161 180 created new head
162 181
163 182 test pushkeys and bookmarks
164 183
165 184 $ cd ../local
166 185 $ hg debugpushkey --config ui.ssh="python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote namespaces
167 186 bookmarks
168 187 namespaces
169 188 phases
170 189 $ hg book foo -r 0
171 190 $ hg out -B
172 191 comparing with ssh://user@dummy/remote
173 192 searching for changed bookmarks
174 193 foo 1160648e36ce
175 194 $ hg push -B foo
176 195 pushing to ssh://user@dummy/remote
177 196 searching for changes
178 197 no changes found
179 198 exporting bookmark foo
180 199 [1]
181 200 $ hg debugpushkey --config ui.ssh="python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote bookmarks
182 201 foo 1160648e36cec0054048a7edc4110c6f84fde594
183 202 $ hg book -f foo
184 203 $ hg push --traceback
185 204 pushing to ssh://user@dummy/remote
186 205 searching for changes
187 206 no changes found
188 207 updating bookmark foo
189 208 [1]
190 209 $ hg book -d foo
191 210 $ hg in -B
192 211 comparing with ssh://user@dummy/remote
193 212 searching for changed bookmarks
194 213 foo a28a9d1a809c
195 214 $ hg book -f -r 0 foo
196 215 $ hg pull -B foo
197 216 pulling from ssh://user@dummy/remote
198 217 no changes found
199 218 updating bookmark foo
200 219 $ hg book -d foo
201 220 $ hg push -B foo
202 221 pushing to ssh://user@dummy/remote
203 222 searching for changes
204 223 no changes found
205 224 deleting remote bookmark foo
206 225 [1]
207 226
208 227 a bad, evil hook that prints to stdout
209 228
210 229 $ cat <<EOF > $TESTTMP/badhook
211 230 > import sys
212 231 > sys.stdout.write("KABOOM\n")
213 232 > EOF
214 233
215 234 $ echo '[hooks]' >> ../remote/.hg/hgrc
216 235 $ echo "changegroup.stdout = python $TESTTMP/badhook" >> ../remote/.hg/hgrc
217 236 $ echo r > r
218 237 $ hg ci -A -m z r
219 238
220 239 push should succeed even though it has an unexpected response
221 240
222 241 $ hg push
223 242 pushing to ssh://user@dummy/remote
224 243 searching for changes
225 244 remote has heads on branch 'default' that are not known locally: 6c0482d977a3
226 245 remote: adding changesets
227 246 remote: adding manifests
228 247 remote: adding file changes
229 248 remote: added 1 changesets with 1 changes to 1 files
230 249 remote: KABOOM
231 250 $ hg -R ../remote heads
232 251 changeset: 3:1383141674ec
233 252 tag: tip
234 253 parent: 1:a28a9d1a809c
235 254 user: test
236 255 date: Thu Jan 01 00:00:00 1970 +0000
237 256 summary: z
238 257
239 258 changeset: 2:6c0482d977a3
240 259 parent: 0:1160648e36ce
241 260 user: test
242 261 date: Thu Jan 01 00:00:00 1970 +0000
243 262 summary: z
244 263
245 264
246 265 clone bookmarks
247 266
248 267 $ hg -R ../remote bookmark test
249 268 $ hg -R ../remote bookmarks
250 269 * test 2:6c0482d977a3
251 270 $ hg clone -e "python \"$TESTDIR/dummyssh\"" ssh://user@dummy/remote local-bookmarks
252 271 requesting all changes
253 272 adding changesets
254 273 adding manifests
255 274 adding file changes
256 275 added 4 changesets with 5 changes to 4 files (+1 heads)
257 276 updating to branch default
258 277 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
259 278 $ hg -R local-bookmarks bookmarks
260 279 test 2:6c0482d977a3
261 280
262 281 passwords in ssh urls are not supported
263 282 (we use a glob here because different Python versions give different
264 283 results here)
265 284
266 285 $ hg push ssh://user:erroneouspwd@dummy/remote
267 286 pushing to ssh://user:*@dummy/remote (glob)
268 287 abort: password in URL not supported!
269 288 [255]
270 289
271 290 $ cd ..
272 291
273 292 hide outer repo
274 293 $ hg init
275 294
276 295 Test remote paths with spaces (issue2983):
277 296
278 297 $ hg init --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
279 298 $ touch "$TESTTMP/a repo/test"
280 299 $ hg -R 'a repo' commit -A -m "test"
281 300 adding test
282 301 $ hg -R 'a repo' tag tag
283 302 $ hg id --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
284 303 73649e48688a
285 304
286 305 $ hg id --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo#noNoNO"
287 306 abort: unknown revision 'noNoNO'!
288 307 [255]
289 308
290 309 Test (non-)escaping of remote paths with spaces when cloning (issue3145):
291 310
292 311 $ hg clone --ssh "python \"$TESTDIR/dummyssh\"" "ssh://user@dummy/a repo"
293 312 destination directory: a repo
294 313 abort: destination 'a repo' is not empty
295 314 [255]
296 315
297 316 Test hg-ssh using a helper script that will restore PYTHONPATH (which might
298 317 have been cleared by a hg.exe wrapper) and invoke hg-ssh with the right
299 318 parameters:
300 319
301 320 $ cat > ssh.sh << EOF
302 321 > userhost="\$1"
303 322 > SSH_ORIGINAL_COMMAND="\$2"
304 323 > export SSH_ORIGINAL_COMMAND
305 324 > PYTHONPATH="$PYTHONPATH"
306 325 > export PYTHONPATH
307 326 > python "$TESTDIR/../contrib/hg-ssh" "$TESTTMP/a repo"
308 327 > EOF
309 328
310 329 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a repo"
311 330 73649e48688a
312 331
313 332 $ hg id --ssh "sh ssh.sh" "ssh://user@dummy/a'repo"
314 333 remote: Illegal repository "$TESTTMP/a'repo" (glob)
315 334 abort: no suitable response from remote hg!
316 335 [255]
317 336
318 337 $ hg id --ssh "sh ssh.sh" --remotecmd hacking "ssh://user@dummy/a'repo"
319 338 remote: Illegal command "hacking -R 'a'\''repo' serve --stdio"
320 339 abort: no suitable response from remote hg!
321 340 [255]
322 341
323 342 $ SSH_ORIGINAL_COMMAND="'hg' -R 'a'repo' serve --stdio" python "$TESTDIR/../contrib/hg-ssh"
324 343 Illegal command "'hg' -R 'a'repo' serve --stdio": No closing quotation
325 344 [255]
326 345
327 346 Test hg-ssh in read-only mode:
328 347
329 348 $ cat > ssh.sh << EOF
330 349 > userhost="\$1"
331 350 > SSH_ORIGINAL_COMMAND="\$2"
332 351 > export SSH_ORIGINAL_COMMAND
333 352 > PYTHONPATH="$PYTHONPATH"
334 353 > export PYTHONPATH
335 354 > python "$TESTDIR/../contrib/hg-ssh" --read-only "$TESTTMP/remote"
336 355 > EOF
337 356
338 357 $ hg clone --ssh "sh ssh.sh" "ssh://user@dummy/$TESTTMP/remote" read-only-local
339 358 requesting all changes
340 359 adding changesets
341 360 adding manifests
342 361 adding file changes
343 362 added 4 changesets with 5 changes to 4 files (+1 heads)
344 363 updating to branch default
345 364 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
346 365
347 366 $ cd read-only-local
348 367 $ echo "baz" > bar
349 368 $ hg ci -A -m "unpushable commit" bar
350 369 $ hg push --ssh "sh ../ssh.sh"
351 370 pushing to ssh://user@dummy/*/remote (glob)
352 371 searching for changes
353 372 remote: Permission denied
354 373 remote: abort: prechangegroup.hg-ssh hook failed
355 374 remote: Permission denied
356 375 remote: abort: prepushkey.hg-ssh hook failed
357 376 abort: unexpected response: empty string
358 377 [255]
359 378
360 379 $ cd ..
361 380
362 381 stderr from remote commands should be printed before stdout from local code (issue4336)
363 382
364 383 $ hg clone remote stderr-ordering
365 384 updating to branch default
366 385 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
367 386 $ cd stderr-ordering
368 387 $ cat >> localwrite.py << EOF
369 388 > from mercurial import exchange, extensions
370 389 >
371 390 > def wrappedpush(orig, repo, *args, **kwargs):
372 391 > res = orig(repo, *args, **kwargs)
373 392 > repo.ui.write('local stdout\n')
374 393 > return res
375 394 >
376 395 > def extsetup(ui):
377 396 > extensions.wrapfunction(exchange, 'push', wrappedpush)
378 397 > EOF
379 398
380 399 $ cat >> .hg/hgrc << EOF
381 400 > [paths]
382 401 > default-push = ssh://user@dummy/remote
383 402 > [ui]
384 403 > ssh = python "$TESTDIR/dummyssh"
385 404 > [extensions]
386 405 > localwrite = localwrite.py
387 406 > EOF
388 407
389 408 $ echo localwrite > foo
390 409 $ hg commit -m 'testing localwrite'
391 410 $ hg push
392 411 pushing to ssh://user@dummy/remote
393 412 searching for changes
394 413 remote: adding changesets
395 414 remote: adding manifests
396 415 remote: adding file changes
397 416 remote: added 1 changesets with 1 changes to 1 files
398 417 remote: KABOOM
399 418 local stdout
400 419
401 420 $ cd ..
402 421
403 422 $ cat dummylog
404 423 Got arguments 1:user@dummy 2:hg -R nonexistent serve --stdio
405 424 Got arguments 1:user@dummy 2:hg -R /$TESTTMP/nonexistent serve --stdio
406 425 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
426 Got arguments 1:user@dummy 2:hg -R local-stream serve --stdio
407 427 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
408 428 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
409 429 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
410 430 Got arguments 1:user@dummy 2:hg -R local serve --stdio
411 431 Got arguments 1:user@dummy 2:hg -R $TESTTMP/local serve --stdio
412 432 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
413 433 changegroup-in-remote hook: HG_NODE=a28a9d1a809cab7d4e2fde4bee738a9ede948b60 HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
414 434 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
415 435 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
416 436 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
417 437 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
418 438 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
419 439 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
420 440 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
421 441 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
422 442 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
423 443 changegroup-in-remote hook: HG_NODE=1383141674ec756a6056f6a9097618482fe0f4a6 HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
424 444 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
425 445 Got arguments 1:user@dummy 2:hg init 'a repo'
426 446 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
427 447 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
428 448 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
429 449 Got arguments 1:user@dummy 2:hg -R 'a repo' serve --stdio
430 450 Got arguments 1:user@dummy 2:hg -R remote serve --stdio
431 451 changegroup-in-remote hook: HG_NODE=65c38f4125f9602c8db4af56530cc221d93b8ef8 HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
General Comments 0
You need to be logged in to leave comments. Login now