##// END OF EJS Templates
merge with stable
Yuya Nishihara -
r40675:43752021 merge default
parent child Browse files
Show More
@@ -1,986 +1,987
1 1 # phabricator.py - simple Phabricator integration
2 2 #
3 3 # Copyright 2017 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """simple Phabricator integration (EXPERIMENTAL)
8 8
9 9 This extension provides a ``phabsend`` command which sends a stack of
10 10 changesets to Phabricator, and a ``phabread`` command which prints a stack of
11 11 revisions in a format suitable for :hg:`import`, and a ``phabupdate`` command
12 12 to update statuses in batch.
13 13
14 14 By default, Phabricator requires ``Test Plan`` which might prevent some
15 15 changeset from being sent. The requirement could be disabled by changing
16 16 ``differential.require-test-plan-field`` config server side.
17 17
18 18 Config::
19 19
20 20 [phabricator]
21 21 # Phabricator URL
22 22 url = https://phab.example.com/
23 23
24 24 # Repo callsign. If a repo has a URL https://$HOST/diffusion/FOO, then its
25 25 # callsign is "FOO".
26 26 callsign = FOO
27 27
28 28 # curl command to use. If not set (default), use builtin HTTP library to
29 29 # communicate. If set, use the specified curl command. This could be useful
30 30 # if you need to specify advanced options that is not easily supported by
31 31 # the internal library.
32 32 curlcmd = curl --connect-timeout 2 --retry 3 --silent
33 33
34 34 [auth]
35 35 example.schemes = https
36 36 example.prefix = phab.example.com
37 37
38 38 # API token. Get it from https://$HOST/conduit/login/
39 39 example.phabtoken = cli-xxxxxxxxxxxxxxxxxxxxxxxxxxxx
40 40 """
41 41
42 42 from __future__ import absolute_import
43 43
44 44 import itertools
45 45 import json
46 46 import operator
47 47 import re
48 48
49 49 from mercurial.node import bin, nullid
50 50 from mercurial.i18n import _
51 51 from mercurial import (
52 52 cmdutil,
53 53 context,
54 54 encoding,
55 55 error,
56 56 httpconnection as httpconnectionmod,
57 57 mdiff,
58 58 obsutil,
59 59 parser,
60 60 patch,
61 61 registrar,
62 62 scmutil,
63 63 smartset,
64 64 tags,
65 65 templateutil,
66 66 url as urlmod,
67 67 util,
68 68 )
69 69 from mercurial.utils import (
70 70 procutil,
71 71 stringutil,
72 72 )
73 73
74 74 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
75 75 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
76 76 # be specifying the version(s) of Mercurial they are tested with, or
77 77 # leave the attribute unspecified.
78 78 testedwith = 'ships-with-hg-core'
79 79
80 80 cmdtable = {}
81 81 command = registrar.command(cmdtable)
82 82
83 83 configtable = {}
84 84 configitem = registrar.configitem(configtable)
85 85
86 86 # developer config: phabricator.batchsize
87 87 configitem(b'phabricator', b'batchsize',
88 88 default=12,
89 89 )
90 90 configitem(b'phabricator', b'callsign',
91 91 default=None,
92 92 )
93 93 configitem(b'phabricator', b'curlcmd',
94 94 default=None,
95 95 )
96 96 # developer config: phabricator.repophid
97 97 configitem(b'phabricator', b'repophid',
98 98 default=None,
99 99 )
100 100 configitem(b'phabricator', b'url',
101 101 default=None,
102 102 )
103 103 configitem(b'phabsend', b'confirm',
104 104 default=False,
105 105 )
106 106
107 107 colortable = {
108 108 b'phabricator.action.created': b'green',
109 109 b'phabricator.action.skipped': b'magenta',
110 110 b'phabricator.action.updated': b'magenta',
111 111 b'phabricator.desc': b'',
112 112 b'phabricator.drev': b'bold',
113 113 b'phabricator.node': b'',
114 114 }
115 115
116 116 _VCR_FLAGS = [
117 117 (b'', b'test-vcr', b'',
118 118 _(b'Path to a vcr file. If nonexistent, will record a new vcr transcript'
119 119 b', otherwise will mock all http requests using the specified vcr file.'
120 120 b' (ADVANCED)'
121 121 )),
122 122 ]
123 123
124 124 def vcrcommand(name, flags, spec):
125 125 fullflags = flags + _VCR_FLAGS
126 126 def decorate(fn):
127 127 def inner(*args, **kwargs):
128 128 cassette = kwargs.pop(r'test_vcr', None)
129 129 if cassette:
130 130 import hgdemandimport
131 131 with hgdemandimport.deactivated():
132 132 import vcr as vcrmod
133 133 import vcr.stubs as stubs
134 134 vcr = vcrmod.VCR(
135 135 serializer=r'json',
136 136 custom_patches=[
137 137 (urlmod, 'httpconnection', stubs.VCRHTTPConnection),
138 138 (urlmod, 'httpsconnection',
139 139 stubs.VCRHTTPSConnection),
140 140 ])
141 141 with vcr.use_cassette(cassette):
142 142 return fn(*args, **kwargs)
143 143 return fn(*args, **kwargs)
144 144 inner.__name__ = fn.__name__
145 inner.__doc__ = fn.__doc__
145 146 return command(name, fullflags, spec)(inner)
146 147 return decorate
147 148
148 149 def urlencodenested(params):
149 150 """like urlencode, but works with nested parameters.
150 151
151 152 For example, if params is {'a': ['b', 'c'], 'd': {'e': 'f'}}, it will be
152 153 flattened to {'a[0]': 'b', 'a[1]': 'c', 'd[e]': 'f'} and then passed to
153 154 urlencode. Note: the encoding is consistent with PHP's http_build_query.
154 155 """
155 156 flatparams = util.sortdict()
156 157 def process(prefix, obj):
157 158 items = {list: enumerate, dict: lambda x: x.items()}.get(type(obj))
158 159 if items is None:
159 160 flatparams[prefix] = obj
160 161 else:
161 162 for k, v in items(obj):
162 163 if prefix:
163 164 process(b'%s[%s]' % (prefix, k), v)
164 165 else:
165 166 process(k, v)
166 167 process(b'', params)
167 168 return util.urlreq.urlencode(flatparams)
168 169
169 170 def readurltoken(repo):
170 171 """return conduit url, token and make sure they exist
171 172
172 173 Currently read from [auth] config section. In the future, it might
173 174 make sense to read from .arcconfig and .arcrc as well.
174 175 """
175 176 url = repo.ui.config(b'phabricator', b'url')
176 177 if not url:
177 178 raise error.Abort(_(b'config %s.%s is required')
178 179 % (b'phabricator', b'url'))
179 180
180 181 res = httpconnectionmod.readauthforuri(repo.ui, url, util.url(url).user)
181 182 token = None
182 183
183 184 if res:
184 185 group, auth = res
185 186
186 187 repo.ui.debug(b"using auth.%s.* for authentication\n" % group)
187 188
188 189 token = auth.get(b'phabtoken')
189 190
190 191 if not token:
191 192 raise error.Abort(_(b'Can\'t find conduit token associated to %s')
192 193 % (url,))
193 194
194 195 return url, token
195 196
196 197 def callconduit(repo, name, params):
197 198 """call Conduit API, params is a dict. return json.loads result, or None"""
198 199 host, token = readurltoken(repo)
199 200 url, authinfo = util.url(b'/'.join([host, b'api', name])).authinfo()
200 201 repo.ui.debug(b'Conduit Call: %s %s\n' % (url, params))
201 202 params = params.copy()
202 203 params[b'api.token'] = token
203 204 data = urlencodenested(params)
204 205 curlcmd = repo.ui.config(b'phabricator', b'curlcmd')
205 206 if curlcmd:
206 207 sin, sout = procutil.popen2(b'%s -d @- %s'
207 208 % (curlcmd, procutil.shellquote(url)))
208 209 sin.write(data)
209 210 sin.close()
210 211 body = sout.read()
211 212 else:
212 213 urlopener = urlmod.opener(repo.ui, authinfo)
213 214 request = util.urlreq.request(url, data=data)
214 215 body = urlopener.open(request).read()
215 216 repo.ui.debug(b'Conduit Response: %s\n' % body)
216 217 parsed = json.loads(body)
217 218 if parsed.get(r'error_code'):
218 219 msg = (_(b'Conduit Error (%s): %s')
219 220 % (parsed[r'error_code'], parsed[r'error_info']))
220 221 raise error.Abort(msg)
221 222 return parsed[r'result']
222 223
223 224 @vcrcommand(b'debugcallconduit', [], _(b'METHOD'))
224 225 def debugcallconduit(ui, repo, name):
225 226 """call Conduit API
226 227
227 228 Call parameters are read from stdin as a JSON blob. Result will be written
228 229 to stdout as a JSON blob.
229 230 """
230 231 params = json.loads(ui.fin.read())
231 232 result = callconduit(repo, name, params)
232 233 s = json.dumps(result, sort_keys=True, indent=2, separators=(b',', b': '))
233 234 ui.write(b'%s\n' % s)
234 235
235 236 def getrepophid(repo):
236 237 """given callsign, return repository PHID or None"""
237 238 # developer config: phabricator.repophid
238 239 repophid = repo.ui.config(b'phabricator', b'repophid')
239 240 if repophid:
240 241 return repophid
241 242 callsign = repo.ui.config(b'phabricator', b'callsign')
242 243 if not callsign:
243 244 return None
244 245 query = callconduit(repo, b'diffusion.repository.search',
245 246 {b'constraints': {b'callsigns': [callsign]}})
246 247 if len(query[r'data']) == 0:
247 248 return None
248 249 repophid = encoding.strtolocal(query[r'data'][0][r'phid'])
249 250 repo.ui.setconfig(b'phabricator', b'repophid', repophid)
250 251 return repophid
251 252
252 253 _differentialrevisiontagre = re.compile(b'\AD([1-9][0-9]*)\Z')
253 254 _differentialrevisiondescre = re.compile(
254 255 b'^Differential Revision:\s*(?P<url>(?:.*)D(?P<id>[1-9][0-9]*))$', re.M)
255 256
256 257 def getoldnodedrevmap(repo, nodelist):
257 258 """find previous nodes that has been sent to Phabricator
258 259
259 260 return {node: (oldnode, Differential diff, Differential Revision ID)}
260 261 for node in nodelist with known previous sent versions, or associated
261 262 Differential Revision IDs. ``oldnode`` and ``Differential diff`` could
262 263 be ``None``.
263 264
264 265 Examines commit messages like "Differential Revision:" to get the
265 266 association information.
266 267
267 268 If such commit message line is not found, examines all precursors and their
268 269 tags. Tags with format like "D1234" are considered a match and the node
269 270 with that tag, and the number after "D" (ex. 1234) will be returned.
270 271
271 272 The ``old node``, if not None, is guaranteed to be the last diff of
272 273 corresponding Differential Revision, and exist in the repo.
273 274 """
274 275 url, token = readurltoken(repo)
275 276 unfi = repo.unfiltered()
276 277 nodemap = unfi.changelog.nodemap
277 278
278 279 result = {} # {node: (oldnode?, lastdiff?, drev)}
279 280 toconfirm = {} # {node: (force, {precnode}, drev)}
280 281 for node in nodelist:
281 282 ctx = unfi[node]
282 283 # For tags like "D123", put them into "toconfirm" to verify later
283 284 precnodes = list(obsutil.allpredecessors(unfi.obsstore, [node]))
284 285 for n in precnodes:
285 286 if n in nodemap:
286 287 for tag in unfi.nodetags(n):
287 288 m = _differentialrevisiontagre.match(tag)
288 289 if m:
289 290 toconfirm[node] = (0, set(precnodes), int(m.group(1)))
290 291 continue
291 292
292 293 # Check commit message
293 294 m = _differentialrevisiondescre.search(ctx.description())
294 295 if m:
295 296 toconfirm[node] = (1, set(precnodes), int(m.group(b'id')))
296 297
297 298 # Double check if tags are genuine by collecting all old nodes from
298 299 # Phabricator, and expect precursors overlap with it.
299 300 if toconfirm:
300 301 drevs = [drev for force, precs, drev in toconfirm.values()]
301 302 alldiffs = callconduit(unfi, b'differential.querydiffs',
302 303 {b'revisionIDs': drevs})
303 304 getnode = lambda d: bin(encoding.unitolocal(
304 305 getdiffmeta(d).get(r'node', b''))) or None
305 306 for newnode, (force, precset, drev) in toconfirm.items():
306 307 diffs = [d for d in alldiffs.values()
307 308 if int(d[r'revisionID']) == drev]
308 309
309 310 # "precursors" as known by Phabricator
310 311 phprecset = set(getnode(d) for d in diffs)
311 312
312 313 # Ignore if precursors (Phabricator and local repo) do not overlap,
313 314 # and force is not set (when commit message says nothing)
314 315 if not force and not bool(phprecset & precset):
315 316 tagname = b'D%d' % drev
316 317 tags.tag(repo, tagname, nullid, message=None, user=None,
317 318 date=None, local=True)
318 319 unfi.ui.warn(_(b'D%s: local tag removed - does not match '
319 320 b'Differential history\n') % drev)
320 321 continue
321 322
322 323 # Find the last node using Phabricator metadata, and make sure it
323 324 # exists in the repo
324 325 oldnode = lastdiff = None
325 326 if diffs:
326 327 lastdiff = max(diffs, key=lambda d: int(d[r'id']))
327 328 oldnode = getnode(lastdiff)
328 329 if oldnode and oldnode not in nodemap:
329 330 oldnode = None
330 331
331 332 result[newnode] = (oldnode, lastdiff, drev)
332 333
333 334 return result
334 335
335 336 def getdiff(ctx, diffopts):
336 337 """plain-text diff without header (user, commit message, etc)"""
337 338 output = util.stringio()
338 339 for chunk, _label in patch.diffui(ctx.repo(), ctx.p1().node(), ctx.node(),
339 340 None, opts=diffopts):
340 341 output.write(chunk)
341 342 return output.getvalue()
342 343
343 344 def creatediff(ctx):
344 345 """create a Differential Diff"""
345 346 repo = ctx.repo()
346 347 repophid = getrepophid(repo)
347 348 # Create a "Differential Diff" via "differential.createrawdiff" API
348 349 params = {b'diff': getdiff(ctx, mdiff.diffopts(git=True, context=32767))}
349 350 if repophid:
350 351 params[b'repositoryPHID'] = repophid
351 352 diff = callconduit(repo, b'differential.createrawdiff', params)
352 353 if not diff:
353 354 raise error.Abort(_(b'cannot create diff for %s') % ctx)
354 355 return diff
355 356
356 357 def writediffproperties(ctx, diff):
357 358 """write metadata to diff so patches could be applied losslessly"""
358 359 params = {
359 360 b'diff_id': diff[r'id'],
360 361 b'name': b'hg:meta',
361 362 b'data': json.dumps({
362 363 b'user': ctx.user(),
363 364 b'date': b'%d %d' % ctx.date(),
364 365 b'node': ctx.hex(),
365 366 b'parent': ctx.p1().hex(),
366 367 }),
367 368 }
368 369 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
369 370
370 371 params = {
371 372 b'diff_id': diff[r'id'],
372 373 b'name': b'local:commits',
373 374 b'data': json.dumps({
374 375 ctx.hex(): {
375 376 b'author': stringutil.person(ctx.user()),
376 377 b'authorEmail': stringutil.email(ctx.user()),
377 378 b'time': ctx.date()[0],
378 379 },
379 380 }),
380 381 }
381 382 callconduit(ctx.repo(), b'differential.setdiffproperty', params)
382 383
383 384 def createdifferentialrevision(ctx, revid=None, parentrevid=None, oldnode=None,
384 385 olddiff=None, actions=None):
385 386 """create or update a Differential Revision
386 387
387 388 If revid is None, create a new Differential Revision, otherwise update
388 389 revid. If parentrevid is not None, set it as a dependency.
389 390
390 391 If oldnode is not None, check if the patch content (without commit message
391 392 and metadata) has changed before creating another diff.
392 393
393 394 If actions is not None, they will be appended to the transaction.
394 395 """
395 396 repo = ctx.repo()
396 397 if oldnode:
397 398 diffopts = mdiff.diffopts(git=True, context=32767)
398 399 oldctx = repo.unfiltered()[oldnode]
399 400 neednewdiff = (getdiff(ctx, diffopts) != getdiff(oldctx, diffopts))
400 401 else:
401 402 neednewdiff = True
402 403
403 404 transactions = []
404 405 if neednewdiff:
405 406 diff = creatediff(ctx)
406 407 transactions.append({b'type': b'update', b'value': diff[r'phid']})
407 408 else:
408 409 # Even if we don't need to upload a new diff because the patch content
409 410 # does not change. We might still need to update its metadata so
410 411 # pushers could know the correct node metadata.
411 412 assert olddiff
412 413 diff = olddiff
413 414 writediffproperties(ctx, diff)
414 415
415 416 # Use a temporary summary to set dependency. There might be better ways but
416 417 # I cannot find them for now. But do not do that if we are updating an
417 418 # existing revision (revid is not None) since that introduces visible
418 419 # churns (someone edited "Summary" twice) on the web page.
419 420 if parentrevid and revid is None:
420 421 summary = b'Depends on D%s' % parentrevid
421 422 transactions += [{b'type': b'summary', b'value': summary},
422 423 {b'type': b'summary', b'value': b' '}]
423 424
424 425 if actions:
425 426 transactions += actions
426 427
427 428 # Parse commit message and update related fields.
428 429 desc = ctx.description()
429 430 info = callconduit(repo, b'differential.parsecommitmessage',
430 431 {b'corpus': desc})
431 432 for k, v in info[r'fields'].items():
432 433 if k in [b'title', b'summary', b'testPlan']:
433 434 transactions.append({b'type': k, b'value': v})
434 435
435 436 params = {b'transactions': transactions}
436 437 if revid is not None:
437 438 # Update an existing Differential Revision
438 439 params[b'objectIdentifier'] = revid
439 440
440 441 revision = callconduit(repo, b'differential.revision.edit', params)
441 442 if not revision:
442 443 raise error.Abort(_(b'cannot create revision for %s') % ctx)
443 444
444 445 return revision, diff
445 446
446 447 def userphids(repo, names):
447 448 """convert user names to PHIDs"""
448 449 query = {b'constraints': {b'usernames': names}}
449 450 result = callconduit(repo, b'user.search', query)
450 451 # username not found is not an error of the API. So check if we have missed
451 452 # some names here.
452 453 data = result[r'data']
453 454 resolved = set(entry[r'fields'][r'username'] for entry in data)
454 455 unresolved = set(names) - resolved
455 456 if unresolved:
456 457 raise error.Abort(_(b'unknown username: %s')
457 458 % b' '.join(sorted(unresolved)))
458 459 return [entry[r'phid'] for entry in data]
459 460
460 461 @vcrcommand(b'phabsend',
461 462 [(b'r', b'rev', [], _(b'revisions to send'), _(b'REV')),
462 463 (b'', b'amend', True, _(b'update commit messages')),
463 464 (b'', b'reviewer', [], _(b'specify reviewers')),
464 465 (b'', b'confirm', None, _(b'ask for confirmation before sending'))],
465 466 _(b'REV [OPTIONS]'))
466 467 def phabsend(ui, repo, *revs, **opts):
467 468 """upload changesets to Phabricator
468 469
469 470 If there are multiple revisions specified, they will be send as a stack
470 471 with a linear dependencies relationship using the order specified by the
471 472 revset.
472 473
473 474 For the first time uploading changesets, local tags will be created to
474 475 maintain the association. After the first time, phabsend will check
475 476 obsstore and tags information so it can figure out whether to update an
476 477 existing Differential Revision, or create a new one.
477 478
478 479 If --amend is set, update commit messages so they have the
479 480 ``Differential Revision`` URL, remove related tags. This is similar to what
480 481 arcanist will do, and is more desired in author-push workflows. Otherwise,
481 482 use local tags to record the ``Differential Revision`` association.
482 483
483 484 The --confirm option lets you confirm changesets before sending them. You
484 485 can also add following to your configuration file to make it default
485 486 behaviour::
486 487
487 488 [phabsend]
488 489 confirm = true
489 490
490 491 phabsend will check obsstore and the above association to decide whether to
491 492 update an existing Differential Revision, or create a new one.
492 493 """
493 494 revs = list(revs) + opts.get(b'rev', [])
494 495 revs = scmutil.revrange(repo, revs)
495 496
496 497 if not revs:
497 498 raise error.Abort(_(b'phabsend requires at least one changeset'))
498 499 if opts.get(b'amend'):
499 500 cmdutil.checkunfinished(repo)
500 501
501 502 # {newnode: (oldnode, olddiff, olddrev}
502 503 oldmap = getoldnodedrevmap(repo, [repo[r].node() for r in revs])
503 504
504 505 confirm = ui.configbool(b'phabsend', b'confirm')
505 506 confirm |= bool(opts.get(b'confirm'))
506 507 if confirm:
507 508 confirmed = _confirmbeforesend(repo, revs, oldmap)
508 509 if not confirmed:
509 510 raise error.Abort(_(b'phabsend cancelled'))
510 511
511 512 actions = []
512 513 reviewers = opts.get(b'reviewer', [])
513 514 if reviewers:
514 515 phids = userphids(repo, reviewers)
515 516 actions.append({b'type': b'reviewers.add', b'value': phids})
516 517
517 518 drevids = [] # [int]
518 519 diffmap = {} # {newnode: diff}
519 520
520 521 # Send patches one by one so we know their Differential Revision IDs and
521 522 # can provide dependency relationship
522 523 lastrevid = None
523 524 for rev in revs:
524 525 ui.debug(b'sending rev %d\n' % rev)
525 526 ctx = repo[rev]
526 527
527 528 # Get Differential Revision ID
528 529 oldnode, olddiff, revid = oldmap.get(ctx.node(), (None, None, None))
529 530 if oldnode != ctx.node() or opts.get(b'amend'):
530 531 # Create or update Differential Revision
531 532 revision, diff = createdifferentialrevision(
532 533 ctx, revid, lastrevid, oldnode, olddiff, actions)
533 534 diffmap[ctx.node()] = diff
534 535 newrevid = int(revision[r'object'][r'id'])
535 536 if revid:
536 537 action = b'updated'
537 538 else:
538 539 action = b'created'
539 540
540 541 # Create a local tag to note the association, if commit message
541 542 # does not have it already
542 543 m = _differentialrevisiondescre.search(ctx.description())
543 544 if not m or int(m.group(b'id')) != newrevid:
544 545 tagname = b'D%d' % newrevid
545 546 tags.tag(repo, tagname, ctx.node(), message=None, user=None,
546 547 date=None, local=True)
547 548 else:
548 549 # Nothing changed. But still set "newrevid" so the next revision
549 550 # could depend on this one.
550 551 newrevid = revid
551 552 action = b'skipped'
552 553
553 554 actiondesc = ui.label(
554 555 {b'created': _(b'created'),
555 556 b'skipped': _(b'skipped'),
556 557 b'updated': _(b'updated')}[action],
557 558 b'phabricator.action.%s' % action)
558 559 drevdesc = ui.label(b'D%s' % newrevid, b'phabricator.drev')
559 560 nodedesc = ui.label(bytes(ctx), b'phabricator.node')
560 561 desc = ui.label(ctx.description().split(b'\n')[0], b'phabricator.desc')
561 562 ui.write(_(b'%s - %s - %s: %s\n') % (drevdesc, actiondesc, nodedesc,
562 563 desc))
563 564 drevids.append(newrevid)
564 565 lastrevid = newrevid
565 566
566 567 # Update commit messages and remove tags
567 568 if opts.get(b'amend'):
568 569 unfi = repo.unfiltered()
569 570 drevs = callconduit(repo, b'differential.query', {b'ids': drevids})
570 571 with repo.wlock(), repo.lock(), repo.transaction(b'phabsend'):
571 572 wnode = unfi[b'.'].node()
572 573 mapping = {} # {oldnode: [newnode]}
573 574 for i, rev in enumerate(revs):
574 575 old = unfi[rev]
575 576 drevid = drevids[i]
576 577 drev = [d for d in drevs if int(d[r'id']) == drevid][0]
577 578 newdesc = getdescfromdrev(drev)
578 579 newdesc = encoding.unitolocal(newdesc)
579 580 # Make sure commit message contain "Differential Revision"
580 581 if old.description() != newdesc:
581 582 parents = [
582 583 mapping.get(old.p1().node(), (old.p1(),))[0],
583 584 mapping.get(old.p2().node(), (old.p2(),))[0],
584 585 ]
585 586 new = context.metadataonlyctx(
586 587 repo, old, parents=parents, text=newdesc,
587 588 user=old.user(), date=old.date(), extra=old.extra())
588 589
589 590 newnode = new.commit()
590 591
591 592 mapping[old.node()] = [newnode]
592 593 # Update diff property
593 594 writediffproperties(unfi[newnode], diffmap[old.node()])
594 595 # Remove local tags since it's no longer necessary
595 596 tagname = b'D%d' % drevid
596 597 if tagname in repo.tags():
597 598 tags.tag(repo, tagname, nullid, message=None, user=None,
598 599 date=None, local=True)
599 600 scmutil.cleanupnodes(repo, mapping, b'phabsend', fixphase=True)
600 601 if wnode in mapping:
601 602 unfi.setparents(mapping[wnode][0])
602 603
603 604 # Map from "hg:meta" keys to header understood by "hg import". The order is
604 605 # consistent with "hg export" output.
605 606 _metanamemap = util.sortdict([(r'user', b'User'), (r'date', b'Date'),
606 607 (r'node', b'Node ID'), (r'parent', b'Parent ')])
607 608
608 609 def _confirmbeforesend(repo, revs, oldmap):
609 610 url, token = readurltoken(repo)
610 611 ui = repo.ui
611 612 for rev in revs:
612 613 ctx = repo[rev]
613 614 desc = ctx.description().splitlines()[0]
614 615 oldnode, olddiff, drevid = oldmap.get(ctx.node(), (None, None, None))
615 616 if drevid:
616 617 drevdesc = ui.label(b'D%s' % drevid, b'phabricator.drev')
617 618 else:
618 619 drevdesc = ui.label(_(b'NEW'), b'phabricator.drev')
619 620
620 621 ui.write(_(b'%s - %s: %s\n')
621 622 % (drevdesc,
622 623 ui.label(bytes(ctx), b'phabricator.node'),
623 624 ui.label(desc, b'phabricator.desc')))
624 625
625 626 if ui.promptchoice(_(b'Send the above changes to %s (yn)?'
626 627 b'$$ &Yes $$ &No') % url):
627 628 return False
628 629
629 630 return True
630 631
631 632 _knownstatusnames = {b'accepted', b'needsreview', b'needsrevision', b'closed',
632 633 b'abandoned'}
633 634
634 635 def _getstatusname(drev):
635 636 """get normalized status name from a Differential Revision"""
636 637 return drev[r'statusName'].replace(b' ', b'').lower()
637 638
638 639 # Small language to specify differential revisions. Support symbols: (), :X,
639 640 # +, and -.
640 641
641 642 _elements = {
642 643 # token-type: binding-strength, primary, prefix, infix, suffix
643 644 b'(': (12, None, (b'group', 1, b')'), None, None),
644 645 b':': (8, None, (b'ancestors', 8), None, None),
645 646 b'&': (5, None, None, (b'and_', 5), None),
646 647 b'+': (4, None, None, (b'add', 4), None),
647 648 b'-': (4, None, None, (b'sub', 4), None),
648 649 b')': (0, None, None, None, None),
649 650 b'symbol': (0, b'symbol', None, None, None),
650 651 b'end': (0, None, None, None, None),
651 652 }
652 653
653 654 def _tokenize(text):
654 655 view = memoryview(text) # zero-copy slice
655 656 special = b'():+-& '
656 657 pos = 0
657 658 length = len(text)
658 659 while pos < length:
659 660 symbol = b''.join(itertools.takewhile(lambda ch: ch not in special,
660 661 view[pos:]))
661 662 if symbol:
662 663 yield (b'symbol', symbol, pos)
663 664 pos += len(symbol)
664 665 else: # special char, ignore space
665 666 if text[pos] != b' ':
666 667 yield (text[pos], None, pos)
667 668 pos += 1
668 669 yield (b'end', None, pos)
669 670
670 671 def _parse(text):
671 672 tree, pos = parser.parser(_elements).parse(_tokenize(text))
672 673 if pos != len(text):
673 674 raise error.ParseError(b'invalid token', pos)
674 675 return tree
675 676
676 677 def _parsedrev(symbol):
677 678 """str -> int or None, ex. 'D45' -> 45; '12' -> 12; 'x' -> None"""
678 679 if symbol.startswith(b'D') and symbol[1:].isdigit():
679 680 return int(symbol[1:])
680 681 if symbol.isdigit():
681 682 return int(symbol)
682 683
683 684 def _prefetchdrevs(tree):
684 685 """return ({single-drev-id}, {ancestor-drev-id}) to prefetch"""
685 686 drevs = set()
686 687 ancestordrevs = set()
687 688 op = tree[0]
688 689 if op == b'symbol':
689 690 r = _parsedrev(tree[1])
690 691 if r:
691 692 drevs.add(r)
692 693 elif op == b'ancestors':
693 694 r, a = _prefetchdrevs(tree[1])
694 695 drevs.update(r)
695 696 ancestordrevs.update(r)
696 697 ancestordrevs.update(a)
697 698 else:
698 699 for t in tree[1:]:
699 700 r, a = _prefetchdrevs(t)
700 701 drevs.update(r)
701 702 ancestordrevs.update(a)
702 703 return drevs, ancestordrevs
703 704
704 705 def querydrev(repo, spec):
705 706 """return a list of "Differential Revision" dicts
706 707
707 708 spec is a string using a simple query language, see docstring in phabread
708 709 for details.
709 710
710 711 A "Differential Revision dict" looks like:
711 712
712 713 {
713 714 "id": "2",
714 715 "phid": "PHID-DREV-672qvysjcczopag46qty",
715 716 "title": "example",
716 717 "uri": "https://phab.example.com/D2",
717 718 "dateCreated": "1499181406",
718 719 "dateModified": "1499182103",
719 720 "authorPHID": "PHID-USER-tv3ohwc4v4jeu34otlye",
720 721 "status": "0",
721 722 "statusName": "Needs Review",
722 723 "properties": [],
723 724 "branch": null,
724 725 "summary": "",
725 726 "testPlan": "",
726 727 "lineCount": "2",
727 728 "activeDiffPHID": "PHID-DIFF-xoqnjkobbm6k4dk6hi72",
728 729 "diffs": [
729 730 "3",
730 731 "4",
731 732 ],
732 733 "commits": [],
733 734 "reviewers": [],
734 735 "ccs": [],
735 736 "hashes": [],
736 737 "auxiliary": {
737 738 "phabricator:projects": [],
738 739 "phabricator:depends-on": [
739 740 "PHID-DREV-gbapp366kutjebt7agcd"
740 741 ]
741 742 },
742 743 "repositoryPHID": "PHID-REPO-hub2hx62ieuqeheznasv",
743 744 "sourcePath": null
744 745 }
745 746 """
746 747 def fetch(params):
747 748 """params -> single drev or None"""
748 749 key = (params.get(r'ids') or params.get(r'phids') or [None])[0]
749 750 if key in prefetched:
750 751 return prefetched[key]
751 752 drevs = callconduit(repo, b'differential.query', params)
752 753 # Fill prefetched with the result
753 754 for drev in drevs:
754 755 prefetched[drev[r'phid']] = drev
755 756 prefetched[int(drev[r'id'])] = drev
756 757 if key not in prefetched:
757 758 raise error.Abort(_(b'cannot get Differential Revision %r')
758 759 % params)
759 760 return prefetched[key]
760 761
761 762 def getstack(topdrevids):
762 763 """given a top, get a stack from the bottom, [id] -> [id]"""
763 764 visited = set()
764 765 result = []
765 766 queue = [{r'ids': [i]} for i in topdrevids]
766 767 while queue:
767 768 params = queue.pop()
768 769 drev = fetch(params)
769 770 if drev[r'id'] in visited:
770 771 continue
771 772 visited.add(drev[r'id'])
772 773 result.append(int(drev[r'id']))
773 774 auxiliary = drev.get(r'auxiliary', {})
774 775 depends = auxiliary.get(r'phabricator:depends-on', [])
775 776 for phid in depends:
776 777 queue.append({b'phids': [phid]})
777 778 result.reverse()
778 779 return smartset.baseset(result)
779 780
780 781 # Initialize prefetch cache
781 782 prefetched = {} # {id or phid: drev}
782 783
783 784 tree = _parse(spec)
784 785 drevs, ancestordrevs = _prefetchdrevs(tree)
785 786
786 787 # developer config: phabricator.batchsize
787 788 batchsize = repo.ui.configint(b'phabricator', b'batchsize')
788 789
789 790 # Prefetch Differential Revisions in batch
790 791 tofetch = set(drevs)
791 792 for r in ancestordrevs:
792 793 tofetch.update(range(max(1, r - batchsize), r + 1))
793 794 if drevs:
794 795 fetch({r'ids': list(tofetch)})
795 796 validids = sorted(set(getstack(list(ancestordrevs))) | set(drevs))
796 797
797 798 # Walk through the tree, return smartsets
798 799 def walk(tree):
799 800 op = tree[0]
800 801 if op == b'symbol':
801 802 drev = _parsedrev(tree[1])
802 803 if drev:
803 804 return smartset.baseset([drev])
804 805 elif tree[1] in _knownstatusnames:
805 806 drevs = [r for r in validids
806 807 if _getstatusname(prefetched[r]) == tree[1]]
807 808 return smartset.baseset(drevs)
808 809 else:
809 810 raise error.Abort(_(b'unknown symbol: %s') % tree[1])
810 811 elif op in {b'and_', b'add', b'sub'}:
811 812 assert len(tree) == 3
812 813 return getattr(operator, op)(walk(tree[1]), walk(tree[2]))
813 814 elif op == b'group':
814 815 return walk(tree[1])
815 816 elif op == b'ancestors':
816 817 return getstack(walk(tree[1]))
817 818 else:
818 819 raise error.ProgrammingError(b'illegal tree: %r' % tree)
819 820
820 821 return [prefetched[r] for r in walk(tree)]
821 822
822 823 def getdescfromdrev(drev):
823 824 """get description (commit message) from "Differential Revision"
824 825
825 826 This is similar to differential.getcommitmessage API. But we only care
826 827 about limited fields: title, summary, test plan, and URL.
827 828 """
828 829 title = drev[r'title']
829 830 summary = drev[r'summary'].rstrip()
830 831 testplan = drev[r'testPlan'].rstrip()
831 832 if testplan:
832 833 testplan = b'Test Plan:\n%s' % testplan
833 834 uri = b'Differential Revision: %s' % drev[r'uri']
834 835 return b'\n\n'.join(filter(None, [title, summary, testplan, uri]))
835 836
836 837 def getdiffmeta(diff):
837 838 """get commit metadata (date, node, user, p1) from a diff object
838 839
839 840 The metadata could be "hg:meta", sent by phabsend, like:
840 841
841 842 "properties": {
842 843 "hg:meta": {
843 844 "date": "1499571514 25200",
844 845 "node": "98c08acae292b2faf60a279b4189beb6cff1414d",
845 846 "user": "Foo Bar <foo@example.com>",
846 847 "parent": "6d0abad76b30e4724a37ab8721d630394070fe16"
847 848 }
848 849 }
849 850
850 851 Or converted from "local:commits", sent by "arc", like:
851 852
852 853 "properties": {
853 854 "local:commits": {
854 855 "98c08acae292b2faf60a279b4189beb6cff1414d": {
855 856 "author": "Foo Bar",
856 857 "time": 1499546314,
857 858 "branch": "default",
858 859 "tag": "",
859 860 "commit": "98c08acae292b2faf60a279b4189beb6cff1414d",
860 861 "rev": "98c08acae292b2faf60a279b4189beb6cff1414d",
861 862 "local": "1000",
862 863 "parents": ["6d0abad76b30e4724a37ab8721d630394070fe16"],
863 864 "summary": "...",
864 865 "message": "...",
865 866 "authorEmail": "foo@example.com"
866 867 }
867 868 }
868 869 }
869 870
870 871 Note: metadata extracted from "local:commits" will lose time zone
871 872 information.
872 873 """
873 874 props = diff.get(r'properties') or {}
874 875 meta = props.get(r'hg:meta')
875 876 if not meta and props.get(r'local:commits'):
876 877 commit = sorted(props[r'local:commits'].values())[0]
877 878 meta = {
878 879 r'date': r'%d 0' % commit[r'time'],
879 880 r'node': commit[r'rev'],
880 881 r'user': r'%s <%s>' % (commit[r'author'], commit[r'authorEmail']),
881 882 }
882 883 if len(commit.get(r'parents', ())) >= 1:
883 884 meta[r'parent'] = commit[r'parents'][0]
884 885 return meta or {}
885 886
886 887 def readpatch(repo, drevs, write):
887 888 """generate plain-text patch readable by 'hg import'
888 889
889 890 write is usually ui.write. drevs is what "querydrev" returns, results of
890 891 "differential.query".
891 892 """
892 893 # Prefetch hg:meta property for all diffs
893 894 diffids = sorted(set(max(int(v) for v in drev[r'diffs']) for drev in drevs))
894 895 diffs = callconduit(repo, b'differential.querydiffs', {b'ids': diffids})
895 896
896 897 # Generate patch for each drev
897 898 for drev in drevs:
898 899 repo.ui.note(_(b'reading D%s\n') % drev[r'id'])
899 900
900 901 diffid = max(int(v) for v in drev[r'diffs'])
901 902 body = callconduit(repo, b'differential.getrawdiff',
902 903 {b'diffID': diffid})
903 904 desc = getdescfromdrev(drev)
904 905 header = b'# HG changeset patch\n'
905 906
906 907 # Try to preserve metadata from hg:meta property. Write hg patch
907 908 # headers that can be read by the "import" command. See patchheadermap
908 909 # and extract in mercurial/patch.py for supported headers.
909 910 meta = getdiffmeta(diffs[str(diffid)])
910 911 for k in _metanamemap.keys():
911 912 if k in meta:
912 913 header += b'# %s %s\n' % (_metanamemap[k], meta[k])
913 914
914 915 content = b'%s%s\n%s' % (header, desc, body)
915 916 write(encoding.unitolocal(content))
916 917
917 918 @vcrcommand(b'phabread',
918 919 [(b'', b'stack', False, _(b'read dependencies'))],
919 920 _(b'DREVSPEC [OPTIONS]'))
920 921 def phabread(ui, repo, spec, **opts):
921 922 """print patches from Phabricator suitable for importing
922 923
923 924 DREVSPEC could be a Differential Revision identity, like ``D123``, or just
924 925 the number ``123``. It could also have common operators like ``+``, ``-``,
925 926 ``&``, ``(``, ``)`` for complex queries. Prefix ``:`` could be used to
926 927 select a stack.
927 928
928 929 ``abandoned``, ``accepted``, ``closed``, ``needsreview``, ``needsrevision``
929 930 could be used to filter patches by status. For performance reason, they
930 931 only represent a subset of non-status selections and cannot be used alone.
931 932
932 933 For example, ``:D6+8-(2+D4)`` selects a stack up to D6, plus D8 and exclude
933 934 D2 and D4. ``:D9 & needsreview`` selects "Needs Review" revisions in a
934 935 stack up to D9.
935 936
936 937 If --stack is given, follow dependencies information and read all patches.
937 938 It is equivalent to the ``:`` operator.
938 939 """
939 940 if opts.get(b'stack'):
940 941 spec = b':(%s)' % spec
941 942 drevs = querydrev(repo, spec)
942 943 readpatch(repo, drevs, ui.write)
943 944
944 945 @vcrcommand(b'phabupdate',
945 946 [(b'', b'accept', False, _(b'accept revisions')),
946 947 (b'', b'reject', False, _(b'reject revisions')),
947 948 (b'', b'abandon', False, _(b'abandon revisions')),
948 949 (b'', b'reclaim', False, _(b'reclaim revisions')),
949 950 (b'm', b'comment', b'', _(b'comment on the last revision')),
950 951 ], _(b'DREVSPEC [OPTIONS]'))
951 952 def phabupdate(ui, repo, spec, **opts):
952 953 """update Differential Revision in batch
953 954
954 955 DREVSPEC selects revisions. See :hg:`help phabread` for its usage.
955 956 """
956 957 flags = [n for n in b'accept reject abandon reclaim'.split() if opts.get(n)]
957 958 if len(flags) > 1:
958 959 raise error.Abort(_(b'%s cannot be used together') % b', '.join(flags))
959 960
960 961 actions = []
961 962 for f in flags:
962 963 actions.append({b'type': f, b'value': b'true'})
963 964
964 965 drevs = querydrev(repo, spec)
965 966 for i, drev in enumerate(drevs):
966 967 if i + 1 == len(drevs) and opts.get(b'comment'):
967 968 actions.append({b'type': b'comment', b'value': opts[b'comment']})
968 969 if actions:
969 970 params = {b'objectIdentifier': drev[r'phid'],
970 971 b'transactions': actions}
971 972 callconduit(repo, b'differential.revision.edit', params)
972 973
973 974 templatekeyword = registrar.templatekeyword()
974 975
975 976 @templatekeyword(b'phabreview', requires={b'ctx'})
976 977 def template_review(context, mapping):
977 978 """:phabreview: Object describing the review for this changeset.
978 979 Has attributes `url` and `id`.
979 980 """
980 981 ctx = context.resource(mapping, b'ctx')
981 982 m = _differentialrevisiondescre.search(ctx.description())
982 983 if m:
983 984 return templateutil.hybriddict({
984 985 b'url': m.group(b'url'),
985 986 b'id': b"D{}".format(m.group(b'id')),
986 987 })
@@ -1,1229 +1,1229
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 from __future__ import absolute_import
10 10
11 11 import errno
12 12 import hashlib
13 13 import os
14 14 import shutil
15 15 import stat
16 16
17 17 from .i18n import _
18 18 from .node import (
19 19 nullid,
20 20 )
21 21
22 22 from . import (
23 23 bookmarks,
24 24 bundlerepo,
25 25 cacheutil,
26 26 cmdutil,
27 27 destutil,
28 28 discovery,
29 29 error,
30 30 exchange,
31 31 extensions,
32 32 httppeer,
33 33 localrepo,
34 34 lock,
35 35 logcmdutil,
36 36 logexchange,
37 37 merge as mergemod,
38 38 narrowspec,
39 39 node,
40 40 phases,
41 41 scmutil,
42 42 sshpeer,
43 43 statichttprepo,
44 44 ui as uimod,
45 45 unionrepo,
46 46 url,
47 47 util,
48 48 verify as verifymod,
49 49 vfs as vfsmod,
50 50 )
51 51
52 52 release = lock.release
53 53
54 54 # shared features
55 55 sharedbookmarks = 'bookmarks'
56 56
57 57 def _local(path):
58 58 path = util.expandpath(util.urllocalpath(path))
59 59 return (os.path.isfile(path) and bundlerepo or localrepo)
60 60
61 61 def addbranchrevs(lrepo, other, branches, revs):
62 62 peer = other.peer() # a courtesy to callers using a localrepo for other
63 63 hashbranch, branches = branches
64 64 if not hashbranch and not branches:
65 65 x = revs or None
66 66 if revs:
67 67 y = revs[0]
68 68 else:
69 69 y = None
70 70 return x, y
71 71 if revs:
72 72 revs = list(revs)
73 73 else:
74 74 revs = []
75 75
76 76 if not peer.capable('branchmap'):
77 77 if branches:
78 78 raise error.Abort(_("remote branch lookup not supported"))
79 79 revs.append(hashbranch)
80 80 return revs, revs[0]
81 81
82 82 with peer.commandexecutor() as e:
83 83 branchmap = e.callcommand('branchmap', {}).result()
84 84
85 85 def primary(branch):
86 86 if branch == '.':
87 87 if not lrepo:
88 88 raise error.Abort(_("dirstate branch not accessible"))
89 89 branch = lrepo.dirstate.branch()
90 90 if branch in branchmap:
91 91 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
92 92 return True
93 93 else:
94 94 return False
95 95
96 96 for branch in branches:
97 97 if not primary(branch):
98 98 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
99 99 if hashbranch:
100 100 if not primary(hashbranch):
101 101 revs.append(hashbranch)
102 102 return revs, revs[0]
103 103
104 104 def parseurl(path, branches=None):
105 105 '''parse url#branch, returning (url, (branch, branches))'''
106 106
107 107 u = util.url(path)
108 108 branch = None
109 109 if u.fragment:
110 110 branch = u.fragment
111 111 u.fragment = None
112 112 return bytes(u), (branch, branches or [])
113 113
114 114 schemes = {
115 115 'bundle': bundlerepo,
116 116 'union': unionrepo,
117 117 'file': _local,
118 118 'http': httppeer,
119 119 'https': httppeer,
120 120 'ssh': sshpeer,
121 121 'static-http': statichttprepo,
122 122 }
123 123
124 124 def _peerlookup(path):
125 125 u = util.url(path)
126 126 scheme = u.scheme or 'file'
127 127 thing = schemes.get(scheme) or schemes['file']
128 128 try:
129 129 return thing(path)
130 130 except TypeError:
131 131 # we can't test callable(thing) because 'thing' can be an unloaded
132 132 # module that implements __call__
133 133 if not util.safehasattr(thing, 'instance'):
134 134 raise
135 135 return thing
136 136
137 137 def islocal(repo):
138 138 '''return true if repo (or path pointing to repo) is local'''
139 139 if isinstance(repo, bytes):
140 140 try:
141 141 return _peerlookup(repo).islocal(repo)
142 142 except AttributeError:
143 143 return False
144 144 return repo.local()
145 145
146 146 def openpath(ui, path):
147 147 '''open path with open if local, url.open if remote'''
148 148 pathurl = util.url(path, parsequery=False, parsefragment=False)
149 149 if pathurl.islocal():
150 150 return util.posixfile(pathurl.localpath(), 'rb')
151 151 else:
152 152 return url.open(ui, path)
153 153
154 154 # a list of (ui, repo) functions called for wire peer initialization
155 155 wirepeersetupfuncs = []
156 156
157 157 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
158 158 intents=None, createopts=None):
159 159 """return a repository object for the specified path"""
160 160 obj = _peerlookup(path).instance(ui, path, create, intents=intents,
161 161 createopts=createopts)
162 162 ui = getattr(obj, "ui", ui)
163 163 if ui.configbool('devel', 'debug.extensions'):
164 164 log = lambda msg, *values: ui.debug('debug.extensions: ',
165 165 msg % values, label='debug.extensions')
166 166 else:
167 167 log = lambda *a, **kw: None
168 168 for f in presetupfuncs or []:
169 169 f(ui, obj)
170 170 log('- executing reposetup hooks\n')
171 171 with util.timedcm('all reposetup') as allreposetupstats:
172 172 for name, module in extensions.extensions(ui):
173 173 log(' - running reposetup for %s\n' % (name,))
174 174 hook = getattr(module, 'reposetup', None)
175 175 if hook:
176 176 with util.timedcm('reposetup %r', name) as stats:
177 177 hook(ui, obj)
178 178 log(' > reposetup for %s took %s\n', name, stats)
179 179 log('> all reposetup took %s\n', allreposetupstats)
180 180 if not obj.local():
181 181 for f in wirepeersetupfuncs:
182 182 f(ui, obj)
183 183 return obj
184 184
185 185 def repository(ui, path='', create=False, presetupfuncs=None, intents=None,
186 186 createopts=None):
187 187 """return a repository object for the specified path"""
188 188 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
189 189 intents=intents, createopts=createopts)
190 190 repo = peer.local()
191 191 if not repo:
192 192 raise error.Abort(_("repository '%s' is not local") %
193 193 (path or peer.url()))
194 194 return repo.filtered('visible')
195 195
196 196 def peer(uiorrepo, opts, path, create=False, intents=None, createopts=None):
197 197 '''return a repository peer for the specified path'''
198 198 rui = remoteui(uiorrepo, opts)
199 199 return _peerorrepo(rui, path, create, intents=intents,
200 200 createopts=createopts).peer()
201 201
202 202 def defaultdest(source):
203 203 '''return default destination of clone if none is given
204 204
205 205 >>> defaultdest(b'foo')
206 206 'foo'
207 207 >>> defaultdest(b'/foo/bar')
208 208 'bar'
209 209 >>> defaultdest(b'/')
210 210 ''
211 211 >>> defaultdest(b'')
212 212 ''
213 213 >>> defaultdest(b'http://example.org/')
214 214 ''
215 215 >>> defaultdest(b'http://example.org/foo/')
216 216 'foo'
217 217 '''
218 218 path = util.url(source).path
219 219 if not path:
220 220 return ''
221 221 return os.path.basename(os.path.normpath(path))
222 222
223 223 def sharedreposource(repo):
224 224 """Returns repository object for source repository of a shared repo.
225 225
226 226 If repo is not a shared repository, returns None.
227 227 """
228 228 if repo.sharedpath == repo.path:
229 229 return None
230 230
231 231 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
232 232 return repo.srcrepo
233 233
234 234 # the sharedpath always ends in the .hg; we want the path to the repo
235 235 source = repo.vfs.split(repo.sharedpath)[0]
236 236 srcurl, branches = parseurl(source)
237 237 srcrepo = repository(repo.ui, srcurl)
238 238 repo.srcrepo = srcrepo
239 239 return srcrepo
240 240
241 241 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
242 242 relative=False):
243 243 '''create a shared repository'''
244 244
245 245 if not islocal(source):
246 246 raise error.Abort(_('can only share local repositories'))
247 247
248 248 if not dest:
249 249 dest = defaultdest(source)
250 250 else:
251 251 dest = ui.expandpath(dest)
252 252
253 253 if isinstance(source, bytes):
254 254 origsource = ui.expandpath(source)
255 255 source, branches = parseurl(origsource)
256 256 srcrepo = repository(ui, source)
257 257 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
258 258 else:
259 259 srcrepo = source.local()
260 260 checkout = None
261 261
262 262 shareditems = set()
263 263 if bookmarks:
264 264 shareditems.add(sharedbookmarks)
265 265
266 266 r = repository(ui, dest, create=True, createopts={
267 267 'sharedrepo': srcrepo,
268 268 'sharedrelative': relative,
269 269 'shareditems': shareditems,
270 270 })
271 271
272 272 postshare(srcrepo, r, defaultpath=defaultpath)
273 273 r = repository(ui, dest)
274 274 _postshareupdate(r, update, checkout=checkout)
275 275 return r
276 276
277 277 def unshare(ui, repo):
278 278 """convert a shared repository to a normal one
279 279
280 280 Copy the store data to the repo and remove the sharedpath data.
281 281
282 282 Returns a new repository object representing the unshared repository.
283 283
284 284 The passed repository object is not usable after this function is
285 285 called.
286 286 """
287 287
288 288 destlock = lock = None
289 289 lock = repo.lock()
290 290 try:
291 291 # we use locks here because if we race with commit, we
292 292 # can end up with extra data in the cloned revlogs that's
293 293 # not pointed to by changesets, thus causing verify to
294 294 # fail
295 295
296 296 destlock = copystore(ui, repo, repo.path)
297 297
298 298 sharefile = repo.vfs.join('sharedpath')
299 299 util.rename(sharefile, sharefile + '.old')
300 300
301 301 repo.requirements.discard('shared')
302 302 repo.requirements.discard('relshared')
303 303 repo._writerequirements()
304 304 finally:
305 305 destlock and destlock.release()
306 306 lock and lock.release()
307 307
308 308 # Removing share changes some fundamental properties of the repo instance.
309 309 # So we instantiate a new repo object and operate on it rather than
310 310 # try to keep the existing repo usable.
311 311 newrepo = repository(repo.baseui, repo.root, create=False)
312 312
313 313 # TODO: figure out how to access subrepos that exist, but were previously
314 314 # removed from .hgsub
315 315 c = newrepo['.']
316 316 subs = c.substate
317 317 for s in sorted(subs):
318 318 c.sub(s).unshare()
319 319
320 320 localrepo.poisonrepository(repo)
321 321
322 322 return newrepo
323 323
324 324 def postshare(sourcerepo, destrepo, defaultpath=None):
325 325 """Called after a new shared repo is created.
326 326
327 327 The new repo only has a requirements file and pointer to the source.
328 328 This function configures additional shared data.
329 329
330 330 Extensions can wrap this function and write additional entries to
331 331 destrepo/.hg/shared to indicate additional pieces of data to be shared.
332 332 """
333 333 default = defaultpath or sourcerepo.ui.config('paths', 'default')
334 334 if default:
335 335 template = ('[paths]\n'
336 336 'default = %s\n')
337 337 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
338 338
339 339 def _postshareupdate(repo, update, checkout=None):
340 340 """Maybe perform a working directory update after a shared repo is created.
341 341
342 342 ``update`` can be a boolean or a revision to update to.
343 343 """
344 344 if not update:
345 345 return
346 346
347 347 repo.ui.status(_("updating working directory\n"))
348 348 if update is not True:
349 349 checkout = update
350 350 for test in (checkout, 'default', 'tip'):
351 351 if test is None:
352 352 continue
353 353 try:
354 354 uprev = repo.lookup(test)
355 355 break
356 356 except error.RepoLookupError:
357 357 continue
358 358 _update(repo, uprev)
359 359
360 360 def copystore(ui, srcrepo, destpath):
361 361 '''copy files from store of srcrepo in destpath
362 362
363 363 returns destlock
364 364 '''
365 365 destlock = None
366 366 try:
367 367 hardlink = None
368 368 topic = _('linking') if hardlink else _('copying')
369 with ui.makeprogress(topic) as progress:
369 with ui.makeprogress(topic, unit=_('files')) as progress:
370 370 num = 0
371 371 srcpublishing = srcrepo.publishing()
372 372 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
373 373 dstvfs = vfsmod.vfs(destpath)
374 374 for f in srcrepo.store.copylist():
375 375 if srcpublishing and f.endswith('phaseroots'):
376 376 continue
377 377 dstbase = os.path.dirname(f)
378 378 if dstbase and not dstvfs.exists(dstbase):
379 379 dstvfs.mkdir(dstbase)
380 380 if srcvfs.exists(f):
381 381 if f.endswith('data'):
382 382 # 'dstbase' may be empty (e.g. revlog format 0)
383 383 lockfile = os.path.join(dstbase, "lock")
384 384 # lock to avoid premature writing to the target
385 385 destlock = lock.lock(dstvfs, lockfile)
386 386 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
387 387 hardlink, progress)
388 388 num += n
389 389 if hardlink:
390 390 ui.debug("linked %d files\n" % num)
391 391 else:
392 392 ui.debug("copied %d files\n" % num)
393 393 return destlock
394 394 except: # re-raises
395 395 release(destlock)
396 396 raise
397 397
398 398 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
399 399 rev=None, update=True, stream=False):
400 400 """Perform a clone using a shared repo.
401 401
402 402 The store for the repository will be located at <sharepath>/.hg. The
403 403 specified revisions will be cloned or pulled from "source". A shared repo
404 404 will be created at "dest" and a working copy will be created if "update" is
405 405 True.
406 406 """
407 407 revs = None
408 408 if rev:
409 409 if not srcpeer.capable('lookup'):
410 410 raise error.Abort(_("src repository does not support "
411 411 "revision lookup and so doesn't "
412 412 "support clone by revision"))
413 413
414 414 # TODO this is batchable.
415 415 remoterevs = []
416 416 for r in rev:
417 417 with srcpeer.commandexecutor() as e:
418 418 remoterevs.append(e.callcommand('lookup', {
419 419 'key': r,
420 420 }).result())
421 421 revs = remoterevs
422 422
423 423 # Obtain a lock before checking for or cloning the pooled repo otherwise
424 424 # 2 clients may race creating or populating it.
425 425 pooldir = os.path.dirname(sharepath)
426 426 # lock class requires the directory to exist.
427 427 try:
428 428 util.makedir(pooldir, False)
429 429 except OSError as e:
430 430 if e.errno != errno.EEXIST:
431 431 raise
432 432
433 433 poolvfs = vfsmod.vfs(pooldir)
434 434 basename = os.path.basename(sharepath)
435 435
436 436 with lock.lock(poolvfs, '%s.lock' % basename):
437 437 if os.path.exists(sharepath):
438 438 ui.status(_('(sharing from existing pooled repository %s)\n') %
439 439 basename)
440 440 else:
441 441 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
442 442 # Always use pull mode because hardlinks in share mode don't work
443 443 # well. Never update because working copies aren't necessary in
444 444 # share mode.
445 445 clone(ui, peeropts, source, dest=sharepath, pull=True,
446 446 revs=rev, update=False, stream=stream)
447 447
448 448 # Resolve the value to put in [paths] section for the source.
449 449 if islocal(source):
450 450 defaultpath = os.path.abspath(util.urllocalpath(source))
451 451 else:
452 452 defaultpath = source
453 453
454 454 sharerepo = repository(ui, path=sharepath)
455 455 destrepo = share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
456 456 defaultpath=defaultpath)
457 457
458 458 # We need to perform a pull against the dest repo to fetch bookmarks
459 459 # and other non-store data that isn't shared by default. In the case of
460 460 # non-existing shared repo, this means we pull from the remote twice. This
461 461 # is a bit weird. But at the time it was implemented, there wasn't an easy
462 462 # way to pull just non-changegroup data.
463 463 exchange.pull(destrepo, srcpeer, heads=revs)
464 464
465 465 _postshareupdate(destrepo, update)
466 466
467 467 return srcpeer, peer(ui, peeropts, dest)
468 468
469 469 # Recomputing branch cache might be slow on big repos,
470 470 # so just copy it
471 471 def _copycache(srcrepo, dstcachedir, fname):
472 472 """copy a cache from srcrepo to destcachedir (if it exists)"""
473 473 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
474 474 dstbranchcache = os.path.join(dstcachedir, fname)
475 475 if os.path.exists(srcbranchcache):
476 476 if not os.path.exists(dstcachedir):
477 477 os.mkdir(dstcachedir)
478 478 util.copyfile(srcbranchcache, dstbranchcache)
479 479
480 480 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
481 481 update=True, stream=False, branch=None, shareopts=None,
482 482 storeincludepats=None, storeexcludepats=None, depth=None):
483 483 """Make a copy of an existing repository.
484 484
485 485 Create a copy of an existing repository in a new directory. The
486 486 source and destination are URLs, as passed to the repository
487 487 function. Returns a pair of repository peers, the source and
488 488 newly created destination.
489 489
490 490 The location of the source is added to the new repository's
491 491 .hg/hgrc file, as the default to be used for future pulls and
492 492 pushes.
493 493
494 494 If an exception is raised, the partly cloned/updated destination
495 495 repository will be deleted.
496 496
497 497 Arguments:
498 498
499 499 source: repository object or URL
500 500
501 501 dest: URL of destination repository to create (defaults to base
502 502 name of source repository)
503 503
504 504 pull: always pull from source repository, even in local case or if the
505 505 server prefers streaming
506 506
507 507 stream: stream raw data uncompressed from repository (fast over
508 508 LAN, slow over WAN)
509 509
510 510 revs: revision to clone up to (implies pull=True)
511 511
512 512 update: update working directory after clone completes, if
513 513 destination is local repository (True means update to default rev,
514 514 anything else is treated as a revision)
515 515
516 516 branch: branches to clone
517 517
518 518 shareopts: dict of options to control auto sharing behavior. The "pool" key
519 519 activates auto sharing mode and defines the directory for stores. The
520 520 "mode" key determines how to construct the directory name of the shared
521 521 repository. "identity" means the name is derived from the node of the first
522 522 changeset in the repository. "remote" means the name is derived from the
523 523 remote's path/URL. Defaults to "identity."
524 524
525 525 storeincludepats and storeexcludepats: sets of file patterns to include and
526 526 exclude in the repository copy, respectively. If not defined, all files
527 527 will be included (a "full" clone). Otherwise a "narrow" clone containing
528 528 only the requested files will be performed. If ``storeincludepats`` is not
529 529 defined but ``storeexcludepats`` is, ``storeincludepats`` is assumed to be
530 530 ``path:.``. If both are empty sets, no files will be cloned.
531 531 """
532 532
533 533 if isinstance(source, bytes):
534 534 origsource = ui.expandpath(source)
535 535 source, branches = parseurl(origsource, branch)
536 536 srcpeer = peer(ui, peeropts, source)
537 537 else:
538 538 srcpeer = source.peer() # in case we were called with a localrepo
539 539 branches = (None, branch or [])
540 540 origsource = source = srcpeer.url()
541 541 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
542 542
543 543 if dest is None:
544 544 dest = defaultdest(source)
545 545 if dest:
546 546 ui.status(_("destination directory: %s\n") % dest)
547 547 else:
548 548 dest = ui.expandpath(dest)
549 549
550 550 dest = util.urllocalpath(dest)
551 551 source = util.urllocalpath(source)
552 552
553 553 if not dest:
554 554 raise error.Abort(_("empty destination path is not valid"))
555 555
556 556 destvfs = vfsmod.vfs(dest, expandpath=True)
557 557 if destvfs.lexists():
558 558 if not destvfs.isdir():
559 559 raise error.Abort(_("destination '%s' already exists") % dest)
560 560 elif destvfs.listdir():
561 561 raise error.Abort(_("destination '%s' is not empty") % dest)
562 562
563 563 createopts = {}
564 564 narrow = False
565 565
566 566 if storeincludepats is not None:
567 567 narrowspec.validatepatterns(storeincludepats)
568 568 narrow = True
569 569
570 570 if storeexcludepats is not None:
571 571 narrowspec.validatepatterns(storeexcludepats)
572 572 narrow = True
573 573
574 574 if narrow:
575 575 # Include everything by default if only exclusion patterns defined.
576 576 if storeexcludepats and not storeincludepats:
577 577 storeincludepats = {'path:.'}
578 578
579 579 createopts['narrowfiles'] = True
580 580
581 581 if depth:
582 582 createopts['shallowfilestore'] = True
583 583
584 584 if srcpeer.capable(b'lfs-serve'):
585 585 # Repository creation honors the config if it disabled the extension, so
586 586 # we can't just announce that lfs will be enabled. This check avoids
587 587 # saying that lfs will be enabled, and then saying it's an unknown
588 588 # feature. The lfs creation option is set in either case so that a
589 589 # requirement is added. If the extension is explicitly disabled but the
590 590 # requirement is set, the clone aborts early, before transferring any
591 591 # data.
592 592 createopts['lfs'] = True
593 593
594 594 if extensions.disabledext('lfs'):
595 595 ui.status(_('(remote is using large file support (lfs), but it is '
596 596 'explicitly disabled in the local configuration)\n'))
597 597 else:
598 598 ui.status(_('(remote is using large file support (lfs); lfs will '
599 599 'be enabled for this repository)\n'))
600 600
601 601 shareopts = shareopts or {}
602 602 sharepool = shareopts.get('pool')
603 603 sharenamemode = shareopts.get('mode')
604 604 if sharepool and islocal(dest):
605 605 sharepath = None
606 606 if sharenamemode == 'identity':
607 607 # Resolve the name from the initial changeset in the remote
608 608 # repository. This returns nullid when the remote is empty. It
609 609 # raises RepoLookupError if revision 0 is filtered or otherwise
610 610 # not available. If we fail to resolve, sharing is not enabled.
611 611 try:
612 612 with srcpeer.commandexecutor() as e:
613 613 rootnode = e.callcommand('lookup', {
614 614 'key': '0',
615 615 }).result()
616 616
617 617 if rootnode != node.nullid:
618 618 sharepath = os.path.join(sharepool, node.hex(rootnode))
619 619 else:
620 620 ui.status(_('(not using pooled storage: '
621 621 'remote appears to be empty)\n'))
622 622 except error.RepoLookupError:
623 623 ui.status(_('(not using pooled storage: '
624 624 'unable to resolve identity of remote)\n'))
625 625 elif sharenamemode == 'remote':
626 626 sharepath = os.path.join(
627 627 sharepool, node.hex(hashlib.sha1(source).digest()))
628 628 else:
629 629 raise error.Abort(_('unknown share naming mode: %s') %
630 630 sharenamemode)
631 631
632 632 # TODO this is a somewhat arbitrary restriction.
633 633 if narrow:
634 634 ui.status(_('(pooled storage not supported for narrow clones)\n'))
635 635 sharepath = None
636 636
637 637 if sharepath:
638 638 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
639 639 dest, pull=pull, rev=revs, update=update,
640 640 stream=stream)
641 641
642 642 srclock = destlock = cleandir = None
643 643 srcrepo = srcpeer.local()
644 644 try:
645 645 abspath = origsource
646 646 if islocal(origsource):
647 647 abspath = os.path.abspath(util.urllocalpath(origsource))
648 648
649 649 if islocal(dest):
650 650 cleandir = dest
651 651
652 652 copy = False
653 653 if (srcrepo and srcrepo.cancopy() and islocal(dest)
654 654 and not phases.hassecret(srcrepo)):
655 655 copy = not pull and not revs
656 656
657 657 # TODO this is a somewhat arbitrary restriction.
658 658 if narrow:
659 659 copy = False
660 660
661 661 if copy:
662 662 try:
663 663 # we use a lock here because if we race with commit, we
664 664 # can end up with extra data in the cloned revlogs that's
665 665 # not pointed to by changesets, thus causing verify to
666 666 # fail
667 667 srclock = srcrepo.lock(wait=False)
668 668 except error.LockError:
669 669 copy = False
670 670
671 671 if copy:
672 672 srcrepo.hook('preoutgoing', throw=True, source='clone')
673 673 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
674 674 if not os.path.exists(dest):
675 675 util.makedirs(dest)
676 676 else:
677 677 # only clean up directories we create ourselves
678 678 cleandir = hgdir
679 679 try:
680 680 destpath = hgdir
681 681 util.makedir(destpath, notindexed=True)
682 682 except OSError as inst:
683 683 if inst.errno == errno.EEXIST:
684 684 cleandir = None
685 685 raise error.Abort(_("destination '%s' already exists")
686 686 % dest)
687 687 raise
688 688
689 689 destlock = copystore(ui, srcrepo, destpath)
690 690 # copy bookmarks over
691 691 srcbookmarks = srcrepo.vfs.join('bookmarks')
692 692 dstbookmarks = os.path.join(destpath, 'bookmarks')
693 693 if os.path.exists(srcbookmarks):
694 694 util.copyfile(srcbookmarks, dstbookmarks)
695 695
696 696 dstcachedir = os.path.join(destpath, 'cache')
697 697 for cache in cacheutil.cachetocopy(srcrepo):
698 698 _copycache(srcrepo, dstcachedir, cache)
699 699
700 700 # we need to re-init the repo after manually copying the data
701 701 # into it
702 702 destpeer = peer(srcrepo, peeropts, dest)
703 703 srcrepo.hook('outgoing', source='clone',
704 704 node=node.hex(node.nullid))
705 705 else:
706 706 try:
707 707 # only pass ui when no srcrepo
708 708 destpeer = peer(srcrepo or ui, peeropts, dest, create=True,
709 709 createopts=createopts)
710 710 except OSError as inst:
711 711 if inst.errno == errno.EEXIST:
712 712 cleandir = None
713 713 raise error.Abort(_("destination '%s' already exists")
714 714 % dest)
715 715 raise
716 716
717 717 if revs:
718 718 if not srcpeer.capable('lookup'):
719 719 raise error.Abort(_("src repository does not support "
720 720 "revision lookup and so doesn't "
721 721 "support clone by revision"))
722 722
723 723 # TODO this is batchable.
724 724 remoterevs = []
725 725 for rev in revs:
726 726 with srcpeer.commandexecutor() as e:
727 727 remoterevs.append(e.callcommand('lookup', {
728 728 'key': rev,
729 729 }).result())
730 730 revs = remoterevs
731 731
732 732 checkout = revs[0]
733 733 else:
734 734 revs = None
735 735 local = destpeer.local()
736 736 if local:
737 737 if narrow:
738 738 with local.lock():
739 739 local.setnarrowpats(storeincludepats, storeexcludepats)
740 740
741 741 u = util.url(abspath)
742 742 defaulturl = bytes(u)
743 743 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
744 744 if not stream:
745 745 if pull:
746 746 stream = False
747 747 else:
748 748 stream = None
749 749 # internal config: ui.quietbookmarkmove
750 750 overrides = {('ui', 'quietbookmarkmove'): True}
751 751 with local.ui.configoverride(overrides, 'clone'):
752 752 exchange.pull(local, srcpeer, revs,
753 753 streamclonerequested=stream,
754 754 includepats=storeincludepats,
755 755 excludepats=storeexcludepats,
756 756 depth=depth)
757 757 elif srcrepo:
758 758 # TODO lift restriction once exchange.push() accepts narrow
759 759 # push.
760 760 if narrow:
761 761 raise error.Abort(_('narrow clone not available for '
762 762 'remote destinations'))
763 763
764 764 exchange.push(srcrepo, destpeer, revs=revs,
765 765 bookmarks=srcrepo._bookmarks.keys())
766 766 else:
767 767 raise error.Abort(_("clone from remote to remote not supported")
768 768 )
769 769
770 770 cleandir = None
771 771
772 772 destrepo = destpeer.local()
773 773 if destrepo:
774 774 template = uimod.samplehgrcs['cloned']
775 775 u = util.url(abspath)
776 776 u.passwd = None
777 777 defaulturl = bytes(u)
778 778 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
779 779 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
780 780
781 781 if ui.configbool('experimental', 'remotenames'):
782 782 logexchange.pullremotenames(destrepo, srcpeer)
783 783
784 784 if update:
785 785 if update is not True:
786 786 with srcpeer.commandexecutor() as e:
787 787 checkout = e.callcommand('lookup', {
788 788 'key': update,
789 789 }).result()
790 790
791 791 uprev = None
792 792 status = None
793 793 if checkout is not None:
794 794 # Some extensions (at least hg-git and hg-subversion) have
795 795 # a peer.lookup() implementation that returns a name instead
796 796 # of a nodeid. We work around it here until we've figured
797 797 # out a better solution.
798 798 if len(checkout) == 20 and checkout in destrepo:
799 799 uprev = checkout
800 800 elif scmutil.isrevsymbol(destrepo, checkout):
801 801 uprev = scmutil.revsymbol(destrepo, checkout).node()
802 802 else:
803 803 if update is not True:
804 804 try:
805 805 uprev = destrepo.lookup(update)
806 806 except error.RepoLookupError:
807 807 pass
808 808 if uprev is None:
809 809 try:
810 810 uprev = destrepo._bookmarks['@']
811 811 update = '@'
812 812 bn = destrepo[uprev].branch()
813 813 if bn == 'default':
814 814 status = _("updating to bookmark @\n")
815 815 else:
816 816 status = (_("updating to bookmark @ on branch %s\n")
817 817 % bn)
818 818 except KeyError:
819 819 try:
820 820 uprev = destrepo.branchtip('default')
821 821 except error.RepoLookupError:
822 822 uprev = destrepo.lookup('tip')
823 823 if not status:
824 824 bn = destrepo[uprev].branch()
825 825 status = _("updating to branch %s\n") % bn
826 826 destrepo.ui.status(status)
827 827 _update(destrepo, uprev)
828 828 if update in destrepo._bookmarks:
829 829 bookmarks.activate(destrepo, update)
830 830 finally:
831 831 release(srclock, destlock)
832 832 if cleandir is not None:
833 833 shutil.rmtree(cleandir, True)
834 834 if srcpeer is not None:
835 835 srcpeer.close()
836 836 return srcpeer, destpeer
837 837
838 838 def _showstats(repo, stats, quietempty=False):
839 839 if quietempty and stats.isempty():
840 840 return
841 841 repo.ui.status(_("%d files updated, %d files merged, "
842 842 "%d files removed, %d files unresolved\n") % (
843 843 stats.updatedcount, stats.mergedcount,
844 844 stats.removedcount, stats.unresolvedcount))
845 845
846 846 def updaterepo(repo, node, overwrite, updatecheck=None):
847 847 """Update the working directory to node.
848 848
849 849 When overwrite is set, changes are clobbered, merged else
850 850
851 851 returns stats (see pydoc mercurial.merge.applyupdates)"""
852 852 return mergemod.update(repo, node, branchmerge=False, force=overwrite,
853 853 labels=['working copy', 'destination'],
854 854 updatecheck=updatecheck)
855 855
856 856 def update(repo, node, quietempty=False, updatecheck=None):
857 857 """update the working directory to node"""
858 858 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
859 859 _showstats(repo, stats, quietempty)
860 860 if stats.unresolvedcount:
861 861 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
862 862 return stats.unresolvedcount > 0
863 863
864 864 # naming conflict in clone()
865 865 _update = update
866 866
867 867 def clean(repo, node, show_stats=True, quietempty=False):
868 868 """forcibly switch the working directory to node, clobbering changes"""
869 869 stats = updaterepo(repo, node, True)
870 870 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
871 871 if show_stats:
872 872 _showstats(repo, stats, quietempty)
873 873 return stats.unresolvedcount > 0
874 874
875 875 # naming conflict in updatetotally()
876 876 _clean = clean
877 877
878 878 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
879 879 """Update the working directory with extra care for non-file components
880 880
881 881 This takes care of non-file components below:
882 882
883 883 :bookmark: might be advanced or (in)activated
884 884
885 885 This takes arguments below:
886 886
887 887 :checkout: to which revision the working directory is updated
888 888 :brev: a name, which might be a bookmark to be activated after updating
889 889 :clean: whether changes in the working directory can be discarded
890 890 :updatecheck: how to deal with a dirty working directory
891 891
892 892 Valid values for updatecheck are (None => linear):
893 893
894 894 * abort: abort if the working directory is dirty
895 895 * none: don't check (merge working directory changes into destination)
896 896 * linear: check that update is linear before merging working directory
897 897 changes into destination
898 898 * noconflict: check that the update does not result in file merges
899 899
900 900 This returns whether conflict is detected at updating or not.
901 901 """
902 902 if updatecheck is None:
903 903 updatecheck = ui.config('commands', 'update.check')
904 904 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
905 905 # If not configured, or invalid value configured
906 906 updatecheck = 'linear'
907 907 with repo.wlock():
908 908 movemarkfrom = None
909 909 warndest = False
910 910 if checkout is None:
911 911 updata = destutil.destupdate(repo, clean=clean)
912 912 checkout, movemarkfrom, brev = updata
913 913 warndest = True
914 914
915 915 if clean:
916 916 ret = _clean(repo, checkout)
917 917 else:
918 918 if updatecheck == 'abort':
919 919 cmdutil.bailifchanged(repo, merge=False)
920 920 updatecheck = 'none'
921 921 ret = _update(repo, checkout, updatecheck=updatecheck)
922 922
923 923 if not ret and movemarkfrom:
924 924 if movemarkfrom == repo['.'].node():
925 925 pass # no-op update
926 926 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
927 927 b = ui.label(repo._activebookmark, 'bookmarks.active')
928 928 ui.status(_("updating bookmark %s\n") % b)
929 929 else:
930 930 # this can happen with a non-linear update
931 931 b = ui.label(repo._activebookmark, 'bookmarks')
932 932 ui.status(_("(leaving bookmark %s)\n") % b)
933 933 bookmarks.deactivate(repo)
934 934 elif brev in repo._bookmarks:
935 935 if brev != repo._activebookmark:
936 936 b = ui.label(brev, 'bookmarks.active')
937 937 ui.status(_("(activating bookmark %s)\n") % b)
938 938 bookmarks.activate(repo, brev)
939 939 elif brev:
940 940 if repo._activebookmark:
941 941 b = ui.label(repo._activebookmark, 'bookmarks')
942 942 ui.status(_("(leaving bookmark %s)\n") % b)
943 943 bookmarks.deactivate(repo)
944 944
945 945 if warndest:
946 946 destutil.statusotherdests(ui, repo)
947 947
948 948 return ret
949 949
950 950 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
951 951 abort=False):
952 952 """Branch merge with node, resolving changes. Return true if any
953 953 unresolved conflicts."""
954 954 if not abort:
955 955 stats = mergemod.update(repo, node, branchmerge=True, force=force,
956 956 mergeforce=mergeforce, labels=labels)
957 957 else:
958 958 ms = mergemod.mergestate.read(repo)
959 959 if ms.active():
960 960 # there were conflicts
961 961 node = ms.localctx.hex()
962 962 else:
963 963 # there were no conficts, mergestate was not stored
964 964 node = repo['.'].hex()
965 965
966 966 repo.ui.status(_("aborting the merge, updating back to"
967 967 " %s\n") % node[:12])
968 968 stats = mergemod.update(repo, node, branchmerge=False, force=True,
969 969 labels=labels)
970 970
971 971 _showstats(repo, stats)
972 972 if stats.unresolvedcount:
973 973 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
974 974 "or 'hg merge --abort' to abandon\n"))
975 975 elif remind and not abort:
976 976 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
977 977 return stats.unresolvedcount > 0
978 978
979 979 def _incoming(displaychlist, subreporecurse, ui, repo, source,
980 980 opts, buffered=False):
981 981 """
982 982 Helper for incoming / gincoming.
983 983 displaychlist gets called with
984 984 (remoterepo, incomingchangesetlist, displayer) parameters,
985 985 and is supposed to contain only code that can't be unified.
986 986 """
987 987 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
988 988 other = peer(repo, opts, source)
989 989 ui.status(_('comparing with %s\n') % util.hidepassword(source))
990 990 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
991 991
992 992 if revs:
993 993 revs = [other.lookup(rev) for rev in revs]
994 994 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
995 995 revs, opts["bundle"], opts["force"])
996 996 try:
997 997 if not chlist:
998 998 ui.status(_("no changes found\n"))
999 999 return subreporecurse()
1000 1000 ui.pager('incoming')
1001 1001 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
1002 1002 buffered=buffered)
1003 1003 displaychlist(other, chlist, displayer)
1004 1004 displayer.close()
1005 1005 finally:
1006 1006 cleanupfn()
1007 1007 subreporecurse()
1008 1008 return 0 # exit code is zero since we found incoming changes
1009 1009
1010 1010 def incoming(ui, repo, source, opts):
1011 1011 def subreporecurse():
1012 1012 ret = 1
1013 1013 if opts.get('subrepos'):
1014 1014 ctx = repo[None]
1015 1015 for subpath in sorted(ctx.substate):
1016 1016 sub = ctx.sub(subpath)
1017 1017 ret = min(ret, sub.incoming(ui, source, opts))
1018 1018 return ret
1019 1019
1020 1020 def display(other, chlist, displayer):
1021 1021 limit = logcmdutil.getlimit(opts)
1022 1022 if opts.get('newest_first'):
1023 1023 chlist.reverse()
1024 1024 count = 0
1025 1025 for n in chlist:
1026 1026 if limit is not None and count >= limit:
1027 1027 break
1028 1028 parents = [p for p in other.changelog.parents(n) if p != nullid]
1029 1029 if opts.get('no_merges') and len(parents) == 2:
1030 1030 continue
1031 1031 count += 1
1032 1032 displayer.show(other[n])
1033 1033 return _incoming(display, subreporecurse, ui, repo, source, opts)
1034 1034
1035 1035 def _outgoing(ui, repo, dest, opts):
1036 1036 path = ui.paths.getpath(dest, default=('default-push', 'default'))
1037 1037 if not path:
1038 1038 raise error.Abort(_('default repository not configured!'),
1039 1039 hint=_("see 'hg help config.paths'"))
1040 1040 dest = path.pushloc or path.loc
1041 1041 branches = path.branch, opts.get('branch') or []
1042 1042
1043 1043 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
1044 1044 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
1045 1045 if revs:
1046 1046 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
1047 1047
1048 1048 other = peer(repo, opts, dest)
1049 1049 outgoing = discovery.findcommonoutgoing(repo, other, revs,
1050 1050 force=opts.get('force'))
1051 1051 o = outgoing.missing
1052 1052 if not o:
1053 1053 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
1054 1054 return o, other
1055 1055
1056 1056 def outgoing(ui, repo, dest, opts):
1057 1057 def recurse():
1058 1058 ret = 1
1059 1059 if opts.get('subrepos'):
1060 1060 ctx = repo[None]
1061 1061 for subpath in sorted(ctx.substate):
1062 1062 sub = ctx.sub(subpath)
1063 1063 ret = min(ret, sub.outgoing(ui, dest, opts))
1064 1064 return ret
1065 1065
1066 1066 limit = logcmdutil.getlimit(opts)
1067 1067 o, other = _outgoing(ui, repo, dest, opts)
1068 1068 if not o:
1069 1069 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1070 1070 return recurse()
1071 1071
1072 1072 if opts.get('newest_first'):
1073 1073 o.reverse()
1074 1074 ui.pager('outgoing')
1075 1075 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1076 1076 count = 0
1077 1077 for n in o:
1078 1078 if limit is not None and count >= limit:
1079 1079 break
1080 1080 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1081 1081 if opts.get('no_merges') and len(parents) == 2:
1082 1082 continue
1083 1083 count += 1
1084 1084 displayer.show(repo[n])
1085 1085 displayer.close()
1086 1086 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1087 1087 recurse()
1088 1088 return 0 # exit code is zero since we found outgoing changes
1089 1089
1090 1090 def verify(repo):
1091 1091 """verify the consistency of a repository"""
1092 1092 ret = verifymod.verify(repo)
1093 1093
1094 1094 # Broken subrepo references in hidden csets don't seem worth worrying about,
1095 1095 # since they can't be pushed/pulled, and --hidden can be used if they are a
1096 1096 # concern.
1097 1097
1098 1098 # pathto() is needed for -R case
1099 1099 revs = repo.revs("filelog(%s)",
1100 1100 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1101 1101
1102 1102 if revs:
1103 1103 repo.ui.status(_('checking subrepo links\n'))
1104 1104 for rev in revs:
1105 1105 ctx = repo[rev]
1106 1106 try:
1107 1107 for subpath in ctx.substate:
1108 1108 try:
1109 1109 ret = (ctx.sub(subpath, allowcreate=False).verify()
1110 1110 or ret)
1111 1111 except error.RepoError as e:
1112 1112 repo.ui.warn(('%d: %s\n') % (rev, e))
1113 1113 except Exception:
1114 1114 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1115 1115 node.short(ctx.node()))
1116 1116
1117 1117 return ret
1118 1118
1119 1119 def remoteui(src, opts):
1120 1120 'build a remote ui from ui or repo and opts'
1121 1121 if util.safehasattr(src, 'baseui'): # looks like a repository
1122 1122 dst = src.baseui.copy() # drop repo-specific config
1123 1123 src = src.ui # copy target options from repo
1124 1124 else: # assume it's a global ui object
1125 1125 dst = src.copy() # keep all global options
1126 1126
1127 1127 # copy ssh-specific options
1128 1128 for o in 'ssh', 'remotecmd':
1129 1129 v = opts.get(o) or src.config('ui', o)
1130 1130 if v:
1131 1131 dst.setconfig("ui", o, v, 'copied')
1132 1132
1133 1133 # copy bundle-specific options
1134 1134 r = src.config('bundle', 'mainreporoot')
1135 1135 if r:
1136 1136 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1137 1137
1138 1138 # copy selected local settings to the remote ui
1139 1139 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1140 1140 for key, val in src.configitems(sect):
1141 1141 dst.setconfig(sect, key, val, 'copied')
1142 1142 v = src.config('web', 'cacerts')
1143 1143 if v:
1144 1144 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1145 1145
1146 1146 return dst
1147 1147
1148 1148 # Files of interest
1149 1149 # Used to check if the repository has changed looking at mtime and size of
1150 1150 # these files.
1151 1151 foi = [('spath', '00changelog.i'),
1152 1152 ('spath', 'phaseroots'), # ! phase can change content at the same size
1153 1153 ('spath', 'obsstore'),
1154 1154 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1155 1155 ]
1156 1156
1157 1157 class cachedlocalrepo(object):
1158 1158 """Holds a localrepository that can be cached and reused."""
1159 1159
1160 1160 def __init__(self, repo):
1161 1161 """Create a new cached repo from an existing repo.
1162 1162
1163 1163 We assume the passed in repo was recently created. If the
1164 1164 repo has changed between when it was created and when it was
1165 1165 turned into a cache, it may not refresh properly.
1166 1166 """
1167 1167 assert isinstance(repo, localrepo.localrepository)
1168 1168 self._repo = repo
1169 1169 self._state, self.mtime = self._repostate()
1170 1170 self._filtername = repo.filtername
1171 1171
1172 1172 def fetch(self):
1173 1173 """Refresh (if necessary) and return a repository.
1174 1174
1175 1175 If the cached instance is out of date, it will be recreated
1176 1176 automatically and returned.
1177 1177
1178 1178 Returns a tuple of the repo and a boolean indicating whether a new
1179 1179 repo instance was created.
1180 1180 """
1181 1181 # We compare the mtimes and sizes of some well-known files to
1182 1182 # determine if the repo changed. This is not precise, as mtimes
1183 1183 # are susceptible to clock skew and imprecise filesystems and
1184 1184 # file content can change while maintaining the same size.
1185 1185
1186 1186 state, mtime = self._repostate()
1187 1187 if state == self._state:
1188 1188 return self._repo, False
1189 1189
1190 1190 repo = repository(self._repo.baseui, self._repo.url())
1191 1191 if self._filtername:
1192 1192 self._repo = repo.filtered(self._filtername)
1193 1193 else:
1194 1194 self._repo = repo.unfiltered()
1195 1195 self._state = state
1196 1196 self.mtime = mtime
1197 1197
1198 1198 return self._repo, True
1199 1199
1200 1200 def _repostate(self):
1201 1201 state = []
1202 1202 maxmtime = -1
1203 1203 for attr, fname in foi:
1204 1204 prefix = getattr(self._repo, attr)
1205 1205 p = os.path.join(prefix, fname)
1206 1206 try:
1207 1207 st = os.stat(p)
1208 1208 except OSError:
1209 1209 st = os.stat(prefix)
1210 1210 state.append((st[stat.ST_MTIME], st.st_size))
1211 1211 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1212 1212
1213 1213 return tuple(state), maxmtime
1214 1214
1215 1215 def copy(self):
1216 1216 """Obtain a copy of this class instance.
1217 1217
1218 1218 A new localrepository instance is obtained. The new instance should be
1219 1219 completely independent of the original.
1220 1220 """
1221 1221 repo = repository(self._repo.baseui, self._repo.origroot)
1222 1222 if self._filtername:
1223 1223 repo = repo.filtered(self._filtername)
1224 1224 else:
1225 1225 repo = repo.unfiltered()
1226 1226 c = cachedlocalrepo(repo)
1227 1227 c._state = self._state
1228 1228 c.mtime = self.mtime
1229 1229 return c
@@ -1,453 +1,454
1 1 # verify.py - repository integrity checking for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 nullid,
15 15 short,
16 16 )
17 17
18 18 from . import (
19 19 error,
20 20 pycompat,
21 21 revlog,
22 22 util,
23 23 )
24 24
25 25 def verify(repo):
26 26 with repo.lock():
27 27 return verifier(repo).verify()
28 28
29 29 def _normpath(f):
30 30 # under hg < 2.4, convert didn't sanitize paths properly, so a
31 31 # converted repo may contain repeated slashes
32 32 while '//' in f:
33 33 f = f.replace('//', '/')
34 34 return f
35 35
36 36 class verifier(object):
37 37 def __init__(self, repo):
38 38 self.repo = repo.unfiltered()
39 39 self.ui = repo.ui
40 40 self.match = repo.narrowmatch()
41 41 self.badrevs = set()
42 42 self.errors = 0
43 43 self.warnings = 0
44 44 self.havecl = len(repo.changelog) > 0
45 45 self.havemf = len(repo.manifestlog.getstorage(b'')) > 0
46 46 self.revlogv1 = repo.changelog.version != revlog.REVLOGV0
47 47 self.lrugetctx = util.lrucachefunc(repo.__getitem__)
48 48 self.refersmf = False
49 49 self.fncachewarned = False
50 50 # developer config: verify.skipflags
51 51 self.skipflags = repo.ui.configint('verify', 'skipflags')
52 52 self.warnorphanstorefiles = True
53 53
54 54 def warn(self, msg):
55 55 self.ui.warn(msg + "\n")
56 56 self.warnings += 1
57 57
58 58 def err(self, linkrev, msg, filename=None):
59 59 if linkrev is not None:
60 60 self.badrevs.add(linkrev)
61 61 linkrev = "%d" % linkrev
62 62 else:
63 63 linkrev = '?'
64 64 msg = "%s: %s" % (linkrev, msg)
65 65 if filename:
66 66 msg = "%s@%s" % (filename, msg)
67 67 self.ui.warn(" " + msg + "\n")
68 68 self.errors += 1
69 69
70 70 def exc(self, linkrev, msg, inst, filename=None):
71 71 fmsg = pycompat.bytestr(inst)
72 72 if not fmsg:
73 73 fmsg = pycompat.byterepr(inst)
74 74 self.err(linkrev, "%s: %s" % (msg, fmsg), filename)
75 75
76 76 def checklog(self, obj, name, linkrev):
77 77 if not len(obj) and (self.havecl or self.havemf):
78 78 self.err(linkrev, _("empty or missing %s") % name)
79 79 return
80 80
81 81 d = obj.checksize()
82 82 if d[0]:
83 83 self.err(None, _("data length off by %d bytes") % d[0], name)
84 84 if d[1]:
85 85 self.err(None, _("index contains %d extra bytes") % d[1], name)
86 86
87 87 if obj.version != revlog.REVLOGV0:
88 88 if not self.revlogv1:
89 89 self.warn(_("warning: `%s' uses revlog format 1") % name)
90 90 elif self.revlogv1:
91 91 self.warn(_("warning: `%s' uses revlog format 0") % name)
92 92
93 93 def checkentry(self, obj, i, node, seen, linkrevs, f):
94 94 lr = obj.linkrev(obj.rev(node))
95 95 if lr < 0 or (self.havecl and lr not in linkrevs):
96 96 if lr < 0 or lr >= len(self.repo.changelog):
97 97 msg = _("rev %d points to nonexistent changeset %d")
98 98 else:
99 99 msg = _("rev %d points to unexpected changeset %d")
100 100 self.err(None, msg % (i, lr), f)
101 101 if linkrevs:
102 102 if f and len(linkrevs) > 1:
103 103 try:
104 104 # attempt to filter down to real linkrevs
105 105 linkrevs = [l for l in linkrevs
106 106 if self.lrugetctx(l)[f].filenode() == node]
107 107 except Exception:
108 108 pass
109 109 self.warn(_(" (expected %s)") % " ".join
110 110 (map(pycompat.bytestr, linkrevs)))
111 111 lr = None # can't be trusted
112 112
113 113 try:
114 114 p1, p2 = obj.parents(node)
115 115 if p1 not in seen and p1 != nullid:
116 116 self.err(lr, _("unknown parent 1 %s of %s") %
117 117 (short(p1), short(node)), f)
118 118 if p2 not in seen and p2 != nullid:
119 119 self.err(lr, _("unknown parent 2 %s of %s") %
120 120 (short(p2), short(node)), f)
121 121 except Exception as inst:
122 122 self.exc(lr, _("checking parents of %s") % short(node), inst, f)
123 123
124 124 if node in seen:
125 125 self.err(lr, _("duplicate revision %d (%d)") % (i, seen[node]), f)
126 126 seen[node] = i
127 127 return lr
128 128
129 129 def verify(self):
130 130 repo = self.repo
131 131
132 132 ui = repo.ui
133 133
134 134 if not repo.url().startswith('file:'):
135 135 raise error.Abort(_("cannot verify bundle or remote repos"))
136 136
137 137 if os.path.exists(repo.sjoin("journal")):
138 138 ui.warn(_("abandoned transaction found - run hg recover\n"))
139 139
140 140 if ui.verbose or not self.revlogv1:
141 141 ui.status(_("repository uses revlog format %d\n") %
142 142 (self.revlogv1 and 1 or 0))
143 143
144 144 mflinkrevs, filelinkrevs = self._verifychangelog()
145 145
146 146 filenodes = self._verifymanifest(mflinkrevs)
147 147 del mflinkrevs
148 148
149 149 self._crosscheckfiles(filelinkrevs, filenodes)
150 150
151 151 totalfiles, filerevisions = self._verifyfiles(filenodes, filelinkrevs)
152 152
153 153 ui.status(_("checked %d changesets with %d changes to %d files\n") %
154 154 (len(repo.changelog), filerevisions, totalfiles))
155 155 if self.warnings:
156 156 ui.warn(_("%d warnings encountered!\n") % self.warnings)
157 157 if self.fncachewarned:
158 158 ui.warn(_('hint: run "hg debugrebuildfncache" to recover from '
159 159 'corrupt fncache\n'))
160 160 if self.errors:
161 161 ui.warn(_("%d integrity errors encountered!\n") % self.errors)
162 162 if self.badrevs:
163 163 ui.warn(_("(first damaged changeset appears to be %d)\n")
164 164 % min(self.badrevs))
165 165 return 1
166 166
167 167 def _verifychangelog(self):
168 168 ui = self.ui
169 169 repo = self.repo
170 170 match = self.match
171 171 cl = repo.changelog
172 172
173 173 ui.status(_("checking changesets\n"))
174 174 mflinkrevs = {}
175 175 filelinkrevs = {}
176 176 seen = {}
177 177 self.checklog(cl, "changelog", 0)
178 178 progress = ui.makeprogress(_('checking'), unit=_('changesets'),
179 179 total=len(repo))
180 180 for i in repo:
181 181 progress.update(i)
182 182 n = cl.node(i)
183 183 self.checkentry(cl, i, n, seen, [i], "changelog")
184 184
185 185 try:
186 186 changes = cl.read(n)
187 187 if changes[0] != nullid:
188 188 mflinkrevs.setdefault(changes[0], []).append(i)
189 189 self.refersmf = True
190 190 for f in changes[3]:
191 191 if match(f):
192 192 filelinkrevs.setdefault(_normpath(f), []).append(i)
193 193 except Exception as inst:
194 194 self.refersmf = True
195 195 self.exc(i, _("unpacking changeset %s") % short(n), inst)
196 196 progress.complete()
197 197 return mflinkrevs, filelinkrevs
198 198
199 199 def _verifymanifest(self, mflinkrevs, dir="", storefiles=None,
200 200 subdirprogress=None):
201 201 repo = self.repo
202 202 ui = self.ui
203 203 match = self.match
204 204 mfl = self.repo.manifestlog
205 205 mf = mfl.getstorage(dir)
206 206
207 207 if not dir:
208 208 self.ui.status(_("checking manifests\n"))
209 209
210 210 filenodes = {}
211 211 subdirnodes = {}
212 212 seen = {}
213 213 label = "manifest"
214 214 if dir:
215 215 label = dir
216 216 revlogfiles = mf.files()
217 217 storefiles.difference_update(revlogfiles)
218 218 if subdirprogress: # should be true since we're in a subdirectory
219 219 subdirprogress.increment()
220 220 if self.refersmf:
221 221 # Do not check manifest if there are only changelog entries with
222 222 # null manifests.
223 223 self.checklog(mf, label, 0)
224 224 progress = ui.makeprogress(_('checking'), unit=_('manifests'),
225 225 total=len(mf))
226 226 for i in mf:
227 227 if not dir:
228 228 progress.update(i)
229 229 n = mf.node(i)
230 230 lr = self.checkentry(mf, i, n, seen, mflinkrevs.get(n, []), label)
231 231 if n in mflinkrevs:
232 232 del mflinkrevs[n]
233 233 elif dir:
234 234 self.err(lr, _("%s not in parent-directory manifest") %
235 235 short(n), label)
236 236 else:
237 237 self.err(lr, _("%s not in changesets") % short(n), label)
238 238
239 239 try:
240 240 mfdelta = mfl.get(dir, n).readdelta(shallow=True)
241 241 for f, fn, fl in mfdelta.iterentries():
242 242 if not f:
243 243 self.err(lr, _("entry without name in manifest"))
244 244 elif f == "/dev/null": # ignore this in very old repos
245 245 continue
246 246 fullpath = dir + _normpath(f)
247 247 if fl == 't':
248 248 if not match.visitdir(fullpath):
249 249 continue
250 250 subdirnodes.setdefault(fullpath + '/', {}).setdefault(
251 251 fn, []).append(lr)
252 252 else:
253 253 if not match(fullpath):
254 254 continue
255 255 filenodes.setdefault(fullpath, {}).setdefault(fn, lr)
256 256 except Exception as inst:
257 257 self.exc(lr, _("reading delta %s") % short(n), inst, label)
258 258 if not dir:
259 259 progress.complete()
260 260
261 261 if self.havemf:
262 262 for c, m in sorted([(c, m) for m in mflinkrevs
263 263 for c in mflinkrevs[m]]):
264 264 if dir:
265 265 self.err(c, _("parent-directory manifest refers to unknown "
266 266 "revision %s") % short(m), label)
267 267 else:
268 268 self.err(c, _("changeset refers to unknown revision %s") %
269 269 short(m), label)
270 270
271 271 if not dir and subdirnodes:
272 272 self.ui.status(_("checking directory manifests\n"))
273 273 storefiles = set()
274 274 subdirs = set()
275 275 revlogv1 = self.revlogv1
276 276 for f, f2, size in repo.store.datafiles():
277 277 if not f:
278 278 self.err(None, _("cannot decode filename '%s'") % f2)
279 279 elif (size > 0 or not revlogv1) and f.startswith('meta/'):
280 280 storefiles.add(_normpath(f))
281 281 subdirs.add(os.path.dirname(f))
282 282 subdirprogress = ui.makeprogress(_('checking'), unit=_('manifests'),
283 283 total=len(subdirs))
284 284
285 285 for subdir, linkrevs in subdirnodes.iteritems():
286 286 subdirfilenodes = self._verifymanifest(linkrevs, subdir, storefiles,
287 287 subdirprogress)
288 288 for f, onefilenodes in subdirfilenodes.iteritems():
289 289 filenodes.setdefault(f, {}).update(onefilenodes)
290 290
291 291 if not dir and subdirnodes:
292 292 subdirprogress.complete()
293 293 if self.warnorphanstorefiles:
294 294 for f in sorted(storefiles):
295 295 self.warn(_("warning: orphan data file '%s'") % f)
296 296
297 297 return filenodes
298 298
299 299 def _crosscheckfiles(self, filelinkrevs, filenodes):
300 300 repo = self.repo
301 301 ui = self.ui
302 302 ui.status(_("crosschecking files in changesets and manifests\n"))
303 303
304 304 total = len(filelinkrevs) + len(filenodes)
305 progress = ui.makeprogress(_('crosschecking'), total=total)
305 progress = ui.makeprogress(_('crosschecking'), unit=_('files'),
306 total=total)
306 307 if self.havemf:
307 308 for f in sorted(filelinkrevs):
308 309 progress.increment()
309 310 if f not in filenodes:
310 311 lr = filelinkrevs[f][0]
311 312 self.err(lr, _("in changeset but not in manifest"), f)
312 313
313 314 if self.havecl:
314 315 for f in sorted(filenodes):
315 316 progress.increment()
316 317 if f not in filelinkrevs:
317 318 try:
318 319 fl = repo.file(f)
319 320 lr = min([fl.linkrev(fl.rev(n)) for n in filenodes[f]])
320 321 except Exception:
321 322 lr = None
322 323 self.err(lr, _("in manifest but not in changeset"), f)
323 324
324 325 progress.complete()
325 326
326 327 def _verifyfiles(self, filenodes, filelinkrevs):
327 328 repo = self.repo
328 329 ui = self.ui
329 330 lrugetctx = self.lrugetctx
330 331 revlogv1 = self.revlogv1
331 332 havemf = self.havemf
332 333 ui.status(_("checking files\n"))
333 334
334 335 storefiles = set()
335 336 for f, f2, size in repo.store.datafiles():
336 337 if not f:
337 338 self.err(None, _("cannot decode filename '%s'") % f2)
338 339 elif (size > 0 or not revlogv1) and f.startswith('data/'):
339 340 storefiles.add(_normpath(f))
340 341
341 342 state = {
342 343 # TODO this assumes revlog storage for changelog.
343 344 'expectedversion': self.repo.changelog.version & 0xFFFF,
344 345 'skipflags': self.skipflags,
345 346 # experimental config: censor.policy
346 347 'erroroncensored': ui.config('censor', 'policy') == 'abort',
347 348 }
348 349
349 350 files = sorted(set(filenodes) | set(filelinkrevs))
350 351 revisions = 0
351 352 progress = ui.makeprogress(_('checking'), unit=_('files'),
352 353 total=len(files))
353 354 for i, f in enumerate(files):
354 355 progress.update(i, item=f)
355 356 try:
356 357 linkrevs = filelinkrevs[f]
357 358 except KeyError:
358 359 # in manifest but not in changelog
359 360 linkrevs = []
360 361
361 362 if linkrevs:
362 363 lr = linkrevs[0]
363 364 else:
364 365 lr = None
365 366
366 367 try:
367 368 fl = repo.file(f)
368 369 except error.StorageError as e:
369 370 self.err(lr, _("broken revlog! (%s)") % e, f)
370 371 continue
371 372
372 373 for ff in fl.files():
373 374 try:
374 375 storefiles.remove(ff)
375 376 except KeyError:
376 377 if self.warnorphanstorefiles:
377 378 self.warn(_(" warning: revlog '%s' not in fncache!") %
378 379 ff)
379 380 self.fncachewarned = True
380 381
381 382 if not len(fl) and (self.havecl or self.havemf):
382 383 self.err(lr, _("empty or missing %s") % f)
383 384 else:
384 385 # Guard against implementations not setting this.
385 386 state['skipread'] = set()
386 387 for problem in fl.verifyintegrity(state):
387 388 if problem.node is not None:
388 389 linkrev = fl.linkrev(fl.rev(problem.node))
389 390 else:
390 391 linkrev = None
391 392
392 393 if problem.warning:
393 394 self.warn(problem.warning)
394 395 elif problem.error:
395 396 self.err(linkrev if linkrev is not None else lr,
396 397 problem.error, f)
397 398 else:
398 399 raise error.ProgrammingError(
399 400 'problem instance does not set warning or error '
400 401 'attribute: %s' % problem.msg)
401 402
402 403 seen = {}
403 404 for i in fl:
404 405 revisions += 1
405 406 n = fl.node(i)
406 407 lr = self.checkentry(fl, i, n, seen, linkrevs, f)
407 408 if f in filenodes:
408 409 if havemf and n not in filenodes[f]:
409 410 self.err(lr, _("%s not in manifests") % (short(n)), f)
410 411 else:
411 412 del filenodes[f][n]
412 413
413 414 if n in state['skipread']:
414 415 continue
415 416
416 417 # check renames
417 418 try:
418 419 # This requires resolving fulltext (at least on revlogs). We
419 420 # may want ``verifyintegrity()`` to pass a set of nodes with
420 421 # rename metadata as an optimization.
421 422 rp = fl.renamed(n)
422 423 if rp:
423 424 if lr is not None and ui.verbose:
424 425 ctx = lrugetctx(lr)
425 426 if not any(rp[0] in pctx for pctx in ctx.parents()):
426 427 self.warn(_("warning: copy source of '%s' not"
427 428 " in parents of %s") % (f, ctx))
428 429 fl2 = repo.file(rp[0])
429 430 if not len(fl2):
430 431 self.err(lr, _("empty or missing copy source "
431 432 "revlog %s:%s") % (rp[0], short(rp[1])), f)
432 433 elif rp[1] == nullid:
433 434 ui.note(_("warning: %s@%s: copy source"
434 435 " revision is nullid %s:%s\n")
435 436 % (f, lr, rp[0], short(rp[1])))
436 437 else:
437 438 fl2.rev(rp[1])
438 439 except Exception as inst:
439 440 self.exc(lr, _("checking rename of %s") % short(n), inst, f)
440 441
441 442 # cross-check
442 443 if f in filenodes:
443 444 fns = [(v, k) for k, v in filenodes[f].iteritems()]
444 445 for lr, node in sorted(fns):
445 446 self.err(lr, _("manifest refers to unknown revision %s") %
446 447 short(node), f)
447 448 progress.complete()
448 449
449 450 if self.warnorphanstorefiles:
450 451 for f in sorted(storefiles):
451 452 self.warn(_("warning: orphan data file '%s'") % f)
452 453
453 454 return len(files), revisions
@@ -1,1296 +1,1296
1 1 #testcases sshv1 sshv2
2 2
3 3 #if sshv2
4 4 $ cat >> $HGRCPATH << EOF
5 5 > [experimental]
6 6 > sshpeer.advertise-v2 = true
7 7 > sshserver.support-v2 = true
8 8 > EOF
9 9 #endif
10 10
11 11 Prepare repo a:
12 12
13 13 $ hg init a
14 14 $ cd a
15 15 $ echo a > a
16 16 $ hg add a
17 17 $ hg commit -m test
18 18 $ echo first line > b
19 19 $ hg add b
20 20
21 21 Create a non-inlined filelog:
22 22
23 23 $ "$PYTHON" -c 'open("data1", "wb").write(b"".join(b"%d\n" % x for x in range(10000)))'
24 24 $ for j in 0 1 2 3 4 5 6 7 8 9; do
25 25 > cat data1 >> b
26 26 > hg commit -m test
27 27 > done
28 28
29 29 List files in store/data (should show a 'b.d'):
30 30
31 31 #if reporevlogstore
32 32 $ for i in .hg/store/data/*; do
33 33 > echo $i
34 34 > done
35 35 .hg/store/data/a.i
36 36 .hg/store/data/b.d
37 37 .hg/store/data/b.i
38 38 #endif
39 39
40 40 Trigger branchcache creation:
41 41
42 42 $ hg branches
43 43 default 10:a7949464abda
44 44 $ ls .hg/cache
45 45 branch2-served
46 46 checkisexec (execbit !)
47 47 checklink (symlink !)
48 48 checklink-target (symlink !)
49 49 checknoexec (execbit !)
50 50 manifestfulltextcache (reporevlogstore !)
51 51 rbc-names-v1
52 52 rbc-revs-v1
53 53
54 54 Default operation:
55 55
56 56 $ hg clone . ../b
57 57 updating to branch default
58 58 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
59 59 $ cd ../b
60 60
61 61 Ensure branchcache got copied over:
62 62
63 63 $ ls .hg/cache
64 64 branch2-served
65 65 checkisexec (execbit !)
66 66 checklink (symlink !)
67 67 checklink-target (symlink !)
68 68 rbc-names-v1
69 69 rbc-revs-v1
70 70
71 71 $ cat a
72 72 a
73 73 $ hg verify
74 74 checking changesets
75 75 checking manifests
76 76 crosschecking files in changesets and manifests
77 77 checking files
78 78 checked 11 changesets with 11 changes to 2 files
79 79
80 80 Invalid dest '' must abort:
81 81
82 82 $ hg clone . ''
83 83 abort: empty destination path is not valid
84 84 [255]
85 85
86 86 No update, with debug option:
87 87
88 88 #if hardlink
89 89 $ hg --debug clone -U . ../c --config progress.debug=true
90 linking: 1
91 linking: 2
92 linking: 3
93 linking: 4
94 linking: 5
95 linking: 6
96 linking: 7
97 linking: 8
90 linking: 1 files
91 linking: 2 files
92 linking: 3 files
93 linking: 4 files
94 linking: 5 files
95 linking: 6 files
96 linking: 7 files
97 linking: 8 files
98 98 linked 8 files (reporevlogstore !)
99 linking: 9 (reposimplestore !)
100 linking: 10 (reposimplestore !)
101 linking: 11 (reposimplestore !)
102 linking: 12 (reposimplestore !)
103 linking: 13 (reposimplestore !)
104 linking: 14 (reposimplestore !)
105 linking: 15 (reposimplestore !)
106 linking: 16 (reposimplestore !)
107 linking: 17 (reposimplestore !)
108 linking: 18 (reposimplestore !)
99 linking: 9 files (reposimplestore !)
100 linking: 10 files (reposimplestore !)
101 linking: 11 files (reposimplestore !)
102 linking: 12 files (reposimplestore !)
103 linking: 13 files (reposimplestore !)
104 linking: 14 files (reposimplestore !)
105 linking: 15 files (reposimplestore !)
106 linking: 16 files (reposimplestore !)
107 linking: 17 files (reposimplestore !)
108 linking: 18 files (reposimplestore !)
109 109 linked 18 files (reposimplestore !)
110 110 #else
111 111 $ hg --debug clone -U . ../c --config progress.debug=true
112 linking: 1
113 copying: 2
114 copying: 3
115 copying: 4
116 copying: 5
117 copying: 6
118 copying: 7
119 copying: 8
112 linking: 1 files
113 copying: 2 files
114 copying: 3 files
115 copying: 4 files
116 copying: 5 files
117 copying: 6 files
118 copying: 7 files
119 copying: 8 files
120 120 copied 8 files (reporevlogstore !)
121 copying: 9 (reposimplestore !)
122 copying: 10 (reposimplestore !)
123 copying: 11 (reposimplestore !)
124 copying: 12 (reposimplestore !)
125 copying: 13 (reposimplestore !)
126 copying: 14 (reposimplestore !)
127 copying: 15 (reposimplestore !)
128 copying: 16 (reposimplestore !)
129 copying: 17 (reposimplestore !)
130 copying: 18 (reposimplestore !)
121 copying: 9 files (reposimplestore !)
122 copying: 10 files (reposimplestore !)
123 copying: 11 files (reposimplestore !)
124 copying: 12 files (reposimplestore !)
125 copying: 13 files (reposimplestore !)
126 copying: 14 files (reposimplestore !)
127 copying: 15 files (reposimplestore !)
128 copying: 16 files (reposimplestore !)
129 copying: 17 files (reposimplestore !)
130 copying: 18 files (reposimplestore !)
131 131 copied 18 files (reposimplestore !)
132 132 #endif
133 133 $ cd ../c
134 134
135 135 Ensure branchcache got copied over:
136 136
137 137 $ ls .hg/cache
138 138 branch2-served
139 139 rbc-names-v1
140 140 rbc-revs-v1
141 141
142 142 $ cat a 2>/dev/null || echo "a not present"
143 143 a not present
144 144 $ hg verify
145 145 checking changesets
146 146 checking manifests
147 147 crosschecking files in changesets and manifests
148 148 checking files
149 149 checked 11 changesets with 11 changes to 2 files
150 150
151 151 Default destination:
152 152
153 153 $ mkdir ../d
154 154 $ cd ../d
155 155 $ hg clone ../a
156 156 destination directory: a
157 157 updating to branch default
158 158 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
159 159 $ cd a
160 160 $ hg cat a
161 161 a
162 162 $ cd ../..
163 163
164 164 Check that we drop the 'file:' from the path before writing the .hgrc:
165 165
166 166 $ hg clone file:a e
167 167 updating to branch default
168 168 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 169 $ grep 'file:' e/.hg/hgrc
170 170 [1]
171 171
172 172 Check that path aliases are expanded:
173 173
174 174 $ hg clone -q -U --config 'paths.foobar=a#0' foobar f
175 175 $ hg -R f showconfig paths.default
176 176 $TESTTMP/a#0
177 177
178 178 Use --pull:
179 179
180 180 $ hg clone --pull a g
181 181 requesting all changes
182 182 adding changesets
183 183 adding manifests
184 184 adding file changes
185 185 added 11 changesets with 11 changes to 2 files
186 186 new changesets acb14030fe0a:a7949464abda
187 187 updating to branch default
188 188 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
189 189 $ hg -R g verify
190 190 checking changesets
191 191 checking manifests
192 192 crosschecking files in changesets and manifests
193 193 checking files
194 194 checked 11 changesets with 11 changes to 2 files
195 195
196 196 Invalid dest '' with --pull must abort (issue2528):
197 197
198 198 $ hg clone --pull a ''
199 199 abort: empty destination path is not valid
200 200 [255]
201 201
202 202 Clone to '.':
203 203
204 204 $ mkdir h
205 205 $ cd h
206 206 $ hg clone ../a .
207 207 updating to branch default
208 208 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
209 209 $ cd ..
210 210
211 211
212 212 *** Tests for option -u ***
213 213
214 214 Adding some more history to repo a:
215 215
216 216 $ cd a
217 217 $ hg tag ref1
218 218 $ echo the quick brown fox >a
219 219 $ hg ci -m "hacked default"
220 220 $ hg up ref1
221 221 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
222 222 $ hg branch stable
223 223 marked working directory as branch stable
224 224 (branches are permanent and global, did you want a bookmark?)
225 225 $ echo some text >a
226 226 $ hg ci -m "starting branch stable"
227 227 $ hg tag ref2
228 228 $ echo some more text >a
229 229 $ hg ci -m "another change for branch stable"
230 230 $ hg up ref2
231 231 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
232 232 $ hg parents
233 233 changeset: 13:e8ece76546a6
234 234 branch: stable
235 235 tag: ref2
236 236 parent: 10:a7949464abda
237 237 user: test
238 238 date: Thu Jan 01 00:00:00 1970 +0000
239 239 summary: starting branch stable
240 240
241 241
242 242 Repo a has two heads:
243 243
244 244 $ hg heads
245 245 changeset: 15:0aae7cf88f0d
246 246 branch: stable
247 247 tag: tip
248 248 user: test
249 249 date: Thu Jan 01 00:00:00 1970 +0000
250 250 summary: another change for branch stable
251 251
252 252 changeset: 12:f21241060d6a
253 253 user: test
254 254 date: Thu Jan 01 00:00:00 1970 +0000
255 255 summary: hacked default
256 256
257 257
258 258 $ cd ..
259 259
260 260
261 261 Testing --noupdate with --updaterev (must abort):
262 262
263 263 $ hg clone --noupdate --updaterev 1 a ua
264 264 abort: cannot specify both --noupdate and --updaterev
265 265 [255]
266 266
267 267
268 268 Testing clone -u:
269 269
270 270 $ hg clone -u . a ua
271 271 updating to branch stable
272 272 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
273 273
274 274 Repo ua has both heads:
275 275
276 276 $ hg -R ua heads
277 277 changeset: 15:0aae7cf88f0d
278 278 branch: stable
279 279 tag: tip
280 280 user: test
281 281 date: Thu Jan 01 00:00:00 1970 +0000
282 282 summary: another change for branch stable
283 283
284 284 changeset: 12:f21241060d6a
285 285 user: test
286 286 date: Thu Jan 01 00:00:00 1970 +0000
287 287 summary: hacked default
288 288
289 289
290 290 Same revision checked out in repo a and ua:
291 291
292 292 $ hg -R a parents --template "{node|short}\n"
293 293 e8ece76546a6
294 294 $ hg -R ua parents --template "{node|short}\n"
295 295 e8ece76546a6
296 296
297 297 $ rm -r ua
298 298
299 299
300 300 Testing clone --pull -u:
301 301
302 302 $ hg clone --pull -u . a ua
303 303 requesting all changes
304 304 adding changesets
305 305 adding manifests
306 306 adding file changes
307 307 added 16 changesets with 16 changes to 3 files (+1 heads)
308 308 new changesets acb14030fe0a:0aae7cf88f0d
309 309 updating to branch stable
310 310 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
311 311
312 312 Repo ua has both heads:
313 313
314 314 $ hg -R ua heads
315 315 changeset: 15:0aae7cf88f0d
316 316 branch: stable
317 317 tag: tip
318 318 user: test
319 319 date: Thu Jan 01 00:00:00 1970 +0000
320 320 summary: another change for branch stable
321 321
322 322 changeset: 12:f21241060d6a
323 323 user: test
324 324 date: Thu Jan 01 00:00:00 1970 +0000
325 325 summary: hacked default
326 326
327 327
328 328 Same revision checked out in repo a and ua:
329 329
330 330 $ hg -R a parents --template "{node|short}\n"
331 331 e8ece76546a6
332 332 $ hg -R ua parents --template "{node|short}\n"
333 333 e8ece76546a6
334 334
335 335 $ rm -r ua
336 336
337 337
338 338 Testing clone -u <branch>:
339 339
340 340 $ hg clone -u stable a ua
341 341 updating to branch stable
342 342 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
343 343
344 344 Repo ua has both heads:
345 345
346 346 $ hg -R ua heads
347 347 changeset: 15:0aae7cf88f0d
348 348 branch: stable
349 349 tag: tip
350 350 user: test
351 351 date: Thu Jan 01 00:00:00 1970 +0000
352 352 summary: another change for branch stable
353 353
354 354 changeset: 12:f21241060d6a
355 355 user: test
356 356 date: Thu Jan 01 00:00:00 1970 +0000
357 357 summary: hacked default
358 358
359 359
360 360 Branch 'stable' is checked out:
361 361
362 362 $ hg -R ua parents
363 363 changeset: 15:0aae7cf88f0d
364 364 branch: stable
365 365 tag: tip
366 366 user: test
367 367 date: Thu Jan 01 00:00:00 1970 +0000
368 368 summary: another change for branch stable
369 369
370 370
371 371 $ rm -r ua
372 372
373 373
374 374 Testing default checkout:
375 375
376 376 $ hg clone a ua
377 377 updating to branch default
378 378 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
379 379
380 380 Repo ua has both heads:
381 381
382 382 $ hg -R ua heads
383 383 changeset: 15:0aae7cf88f0d
384 384 branch: stable
385 385 tag: tip
386 386 user: test
387 387 date: Thu Jan 01 00:00:00 1970 +0000
388 388 summary: another change for branch stable
389 389
390 390 changeset: 12:f21241060d6a
391 391 user: test
392 392 date: Thu Jan 01 00:00:00 1970 +0000
393 393 summary: hacked default
394 394
395 395
396 396 Branch 'default' is checked out:
397 397
398 398 $ hg -R ua parents
399 399 changeset: 12:f21241060d6a
400 400 user: test
401 401 date: Thu Jan 01 00:00:00 1970 +0000
402 402 summary: hacked default
403 403
404 404 Test clone with a branch named "@" (issue3677)
405 405
406 406 $ hg -R ua branch @
407 407 marked working directory as branch @
408 408 $ hg -R ua commit -m 'created branch @'
409 409 $ hg clone ua atbranch
410 410 updating to branch default
411 411 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
412 412 $ hg -R atbranch heads
413 413 changeset: 16:798b6d97153e
414 414 branch: @
415 415 tag: tip
416 416 parent: 12:f21241060d6a
417 417 user: test
418 418 date: Thu Jan 01 00:00:00 1970 +0000
419 419 summary: created branch @
420 420
421 421 changeset: 15:0aae7cf88f0d
422 422 branch: stable
423 423 user: test
424 424 date: Thu Jan 01 00:00:00 1970 +0000
425 425 summary: another change for branch stable
426 426
427 427 changeset: 12:f21241060d6a
428 428 user: test
429 429 date: Thu Jan 01 00:00:00 1970 +0000
430 430 summary: hacked default
431 431
432 432 $ hg -R atbranch parents
433 433 changeset: 12:f21241060d6a
434 434 user: test
435 435 date: Thu Jan 01 00:00:00 1970 +0000
436 436 summary: hacked default
437 437
438 438
439 439 $ rm -r ua atbranch
440 440
441 441
442 442 Testing #<branch>:
443 443
444 444 $ hg clone -u . a#stable ua
445 445 adding changesets
446 446 adding manifests
447 447 adding file changes
448 448 added 14 changesets with 14 changes to 3 files
449 449 new changesets acb14030fe0a:0aae7cf88f0d
450 450 updating to branch stable
451 451 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
452 452
453 453 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
454 454
455 455 $ hg -R ua heads
456 456 changeset: 13:0aae7cf88f0d
457 457 branch: stable
458 458 tag: tip
459 459 user: test
460 460 date: Thu Jan 01 00:00:00 1970 +0000
461 461 summary: another change for branch stable
462 462
463 463 changeset: 10:a7949464abda
464 464 user: test
465 465 date: Thu Jan 01 00:00:00 1970 +0000
466 466 summary: test
467 467
468 468
469 469 Same revision checked out in repo a and ua:
470 470
471 471 $ hg -R a parents --template "{node|short}\n"
472 472 e8ece76546a6
473 473 $ hg -R ua parents --template "{node|short}\n"
474 474 e8ece76546a6
475 475
476 476 $ rm -r ua
477 477
478 478
479 479 Testing -u -r <branch>:
480 480
481 481 $ hg clone -u . -r stable a ua
482 482 adding changesets
483 483 adding manifests
484 484 adding file changes
485 485 added 14 changesets with 14 changes to 3 files
486 486 new changesets acb14030fe0a:0aae7cf88f0d
487 487 updating to branch stable
488 488 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
489 489
490 490 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
491 491
492 492 $ hg -R ua heads
493 493 changeset: 13:0aae7cf88f0d
494 494 branch: stable
495 495 tag: tip
496 496 user: test
497 497 date: Thu Jan 01 00:00:00 1970 +0000
498 498 summary: another change for branch stable
499 499
500 500 changeset: 10:a7949464abda
501 501 user: test
502 502 date: Thu Jan 01 00:00:00 1970 +0000
503 503 summary: test
504 504
505 505
506 506 Same revision checked out in repo a and ua:
507 507
508 508 $ hg -R a parents --template "{node|short}\n"
509 509 e8ece76546a6
510 510 $ hg -R ua parents --template "{node|short}\n"
511 511 e8ece76546a6
512 512
513 513 $ rm -r ua
514 514
515 515
516 516 Testing -r <branch>:
517 517
518 518 $ hg clone -r stable a ua
519 519 adding changesets
520 520 adding manifests
521 521 adding file changes
522 522 added 14 changesets with 14 changes to 3 files
523 523 new changesets acb14030fe0a:0aae7cf88f0d
524 524 updating to branch stable
525 525 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 526
527 527 Repo ua has branch 'stable' and 'default' (was changed in fd511e9eeea6):
528 528
529 529 $ hg -R ua heads
530 530 changeset: 13:0aae7cf88f0d
531 531 branch: stable
532 532 tag: tip
533 533 user: test
534 534 date: Thu Jan 01 00:00:00 1970 +0000
535 535 summary: another change for branch stable
536 536
537 537 changeset: 10:a7949464abda
538 538 user: test
539 539 date: Thu Jan 01 00:00:00 1970 +0000
540 540 summary: test
541 541
542 542
543 543 Branch 'stable' is checked out:
544 544
545 545 $ hg -R ua parents
546 546 changeset: 13:0aae7cf88f0d
547 547 branch: stable
548 548 tag: tip
549 549 user: test
550 550 date: Thu Jan 01 00:00:00 1970 +0000
551 551 summary: another change for branch stable
552 552
553 553
554 554 $ rm -r ua
555 555
556 556
557 557 Issue2267: Error in 1.6 hg.py: TypeError: 'NoneType' object is not
558 558 iterable in addbranchrevs()
559 559
560 560 $ cat <<EOF > simpleclone.py
561 561 > from mercurial import hg, ui as uimod
562 562 > myui = uimod.ui.load()
563 563 > repo = hg.repository(myui, b'a')
564 564 > hg.clone(myui, {}, repo, dest=b"ua")
565 565 > EOF
566 566
567 567 $ "$PYTHON" simpleclone.py
568 568 updating to branch default
569 569 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
570 570
571 571 $ rm -r ua
572 572
573 573 $ cat <<EOF > branchclone.py
574 574 > from mercurial import extensions, hg, ui as uimod
575 575 > myui = uimod.ui.load()
576 576 > extensions.loadall(myui)
577 577 > repo = hg.repository(myui, b'a')
578 578 > hg.clone(myui, {}, repo, dest=b"ua", branch=[b"stable",])
579 579 > EOF
580 580
581 581 $ "$PYTHON" branchclone.py
582 582 adding changesets
583 583 adding manifests
584 584 adding file changes
585 585 added 14 changesets with 14 changes to 3 files
586 586 new changesets acb14030fe0a:0aae7cf88f0d
587 587 updating to branch stable
588 588 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
589 589 $ rm -r ua
590 590
591 591
592 592 Test clone with special '@' bookmark:
593 593 $ cd a
594 594 $ hg bookmark -r a7949464abda @ # branch point of stable from default
595 595 $ hg clone . ../i
596 596 updating to bookmark @
597 597 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
598 598 $ hg id -i ../i
599 599 a7949464abda
600 600 $ rm -r ../i
601 601
602 602 $ hg bookmark -f -r stable @
603 603 $ hg bookmarks
604 604 @ 15:0aae7cf88f0d
605 605 $ hg clone . ../i
606 606 updating to bookmark @ on branch stable
607 607 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
608 608 $ hg id -i ../i
609 609 0aae7cf88f0d
610 610 $ cd "$TESTTMP"
611 611
612 612
613 613 Testing failures:
614 614
615 615 $ mkdir fail
616 616 $ cd fail
617 617
618 618 No local source
619 619
620 620 $ hg clone a b
621 621 abort: repository a not found!
622 622 [255]
623 623
624 624 No remote source
625 625
626 626 #if windows
627 627 $ hg clone http://$LOCALIP:3121/a b
628 628 abort: error: * (glob)
629 629 [255]
630 630 #else
631 631 $ hg clone http://$LOCALIP:3121/a b
632 632 abort: error: *refused* (glob)
633 633 [255]
634 634 #endif
635 635 $ rm -rf b # work around bug with http clone
636 636
637 637
638 638 #if unix-permissions no-root
639 639
640 640 Inaccessible source
641 641
642 642 $ mkdir a
643 643 $ chmod 000 a
644 644 $ hg clone a b
645 645 abort: Permission denied: *$TESTTMP/fail/a/.hg* (glob)
646 646 [255]
647 647
648 648 Inaccessible destination
649 649
650 650 $ hg init b
651 651 $ cd b
652 652 $ hg clone . ../a
653 653 abort: Permission denied: *../a* (glob)
654 654 [255]
655 655 $ cd ..
656 656 $ chmod 700 a
657 657 $ rm -r a b
658 658
659 659 #endif
660 660
661 661
662 662 #if fifo
663 663
664 664 Source of wrong type
665 665
666 666 $ mkfifo a
667 667 $ hg clone a b
668 668 abort: $ENOTDIR$: *$TESTTMP/fail/a/.hg* (glob)
669 669 [255]
670 670 $ rm a
671 671
672 672 #endif
673 673
674 674 Default destination, same directory
675 675
676 676 $ hg init q
677 677 $ hg clone q
678 678 destination directory: q
679 679 abort: destination 'q' is not empty
680 680 [255]
681 681
682 682 destination directory not empty
683 683
684 684 $ mkdir a
685 685 $ echo stuff > a/a
686 686 $ hg clone q a
687 687 abort: destination 'a' is not empty
688 688 [255]
689 689
690 690
691 691 #if unix-permissions no-root
692 692
693 693 leave existing directory in place after clone failure
694 694
695 695 $ hg init c
696 696 $ cd c
697 697 $ echo c > c
698 698 $ hg commit -A -m test
699 699 adding c
700 700 $ chmod -rx .hg/store/data
701 701 $ cd ..
702 702 $ mkdir d
703 703 $ hg clone c d 2> err
704 704 [255]
705 705 $ test -d d
706 706 $ test -d d/.hg
707 707 [1]
708 708
709 709 re-enable perm to allow deletion
710 710
711 711 $ chmod +rx c/.hg/store/data
712 712
713 713 #endif
714 714
715 715 $ cd ..
716 716
717 717 Test clone from the repository in (emulated) revlog format 0 (issue4203):
718 718
719 719 $ mkdir issue4203
720 720 $ mkdir -p src/.hg
721 721 $ echo foo > src/foo
722 722 $ hg -R src add src/foo
723 723 $ hg -R src commit -m '#0'
724 724 $ hg -R src log -q
725 725 0:e1bab28bca43
726 726 $ hg clone -U -q src dst
727 727 $ hg -R dst log -q
728 728 0:e1bab28bca43
729 729
730 730 Create repositories to test auto sharing functionality
731 731
732 732 $ cat >> $HGRCPATH << EOF
733 733 > [extensions]
734 734 > share=
735 735 > EOF
736 736
737 737 $ hg init empty
738 738 $ hg init source1a
739 739 $ cd source1a
740 740 $ echo initial1 > foo
741 741 $ hg -q commit -A -m initial
742 742 $ echo second > foo
743 743 $ hg commit -m second
744 744 $ cd ..
745 745
746 746 $ hg init filteredrev0
747 747 $ cd filteredrev0
748 748 $ cat >> .hg/hgrc << EOF
749 749 > [experimental]
750 750 > evolution.createmarkers=True
751 751 > EOF
752 752 $ echo initial1 > foo
753 753 $ hg -q commit -A -m initial0
754 754 $ hg -q up -r null
755 755 $ echo initial2 > foo
756 756 $ hg -q commit -A -m initial1
757 757 $ hg debugobsolete c05d5c47a5cf81401869999f3d05f7d699d2b29a e082c1832e09a7d1e78b7fd49a592d372de854c8
758 758 obsoleted 1 changesets
759 759 $ cd ..
760 760
761 761 $ hg -q clone --pull source1a source1b
762 762 $ cd source1a
763 763 $ hg bookmark bookA
764 764 $ echo 1a > foo
765 765 $ hg commit -m 1a
766 766 $ cd ../source1b
767 767 $ hg -q up -r 0
768 768 $ echo head1 > foo
769 769 $ hg commit -m head1
770 770 created new head
771 771 $ hg bookmark head1
772 772 $ hg -q up -r 0
773 773 $ echo head2 > foo
774 774 $ hg commit -m head2
775 775 created new head
776 776 $ hg bookmark head2
777 777 $ hg -q up -r 0
778 778 $ hg branch branch1
779 779 marked working directory as branch branch1
780 780 (branches are permanent and global, did you want a bookmark?)
781 781 $ echo branch1 > foo
782 782 $ hg commit -m branch1
783 783 $ hg -q up -r 0
784 784 $ hg branch branch2
785 785 marked working directory as branch branch2
786 786 $ echo branch2 > foo
787 787 $ hg commit -m branch2
788 788 $ cd ..
789 789 $ hg init source2
790 790 $ cd source2
791 791 $ echo initial2 > foo
792 792 $ hg -q commit -A -m initial2
793 793 $ echo second > foo
794 794 $ hg commit -m second
795 795 $ cd ..
796 796
797 797 Clone with auto share from an empty repo should not result in share
798 798
799 799 $ mkdir share
800 800 $ hg --config share.pool=share clone empty share-empty
801 801 (not using pooled storage: remote appears to be empty)
802 802 updating to branch default
803 803 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
804 804 $ ls share
805 805 $ test -d share-empty/.hg/store
806 806 $ test -f share-empty/.hg/sharedpath
807 807 [1]
808 808
809 809 Clone with auto share from a repo with filtered revision 0 should not result in share
810 810
811 811 $ hg --config share.pool=share clone filteredrev0 share-filtered
812 812 (not using pooled storage: unable to resolve identity of remote)
813 813 requesting all changes
814 814 adding changesets
815 815 adding manifests
816 816 adding file changes
817 817 added 1 changesets with 1 changes to 1 files
818 818 new changesets e082c1832e09
819 819 updating to branch default
820 820 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
821 821
822 822 Clone from repo with content should result in shared store being created
823 823
824 824 $ hg --config share.pool=share clone source1a share-dest1a
825 825 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
826 826 requesting all changes
827 827 adding changesets
828 828 adding manifests
829 829 adding file changes
830 830 added 3 changesets with 3 changes to 1 files
831 831 new changesets b5f04eac9d8f:e5bfe23c0b47
832 832 searching for changes
833 833 no changes found
834 834 adding remote bookmark bookA
835 835 updating working directory
836 836 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
837 837
838 838 The shared repo should have been created
839 839
840 840 $ ls share
841 841 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
842 842
843 843 The destination should point to it
844 844
845 845 $ cat share-dest1a/.hg/sharedpath; echo
846 846 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
847 847
848 848 The destination should have bookmarks
849 849
850 850 $ hg -R share-dest1a bookmarks
851 851 bookA 2:e5bfe23c0b47
852 852
853 853 The default path should be the remote, not the share
854 854
855 855 $ hg -R share-dest1a config paths.default
856 856 $TESTTMP/source1a
857 857
858 858 Clone with existing share dir should result in pull + share
859 859
860 860 $ hg --config share.pool=share clone source1b share-dest1b
861 861 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
862 862 searching for changes
863 863 adding changesets
864 864 adding manifests
865 865 adding file changes
866 866 added 4 changesets with 4 changes to 1 files (+4 heads)
867 867 adding remote bookmark head1
868 868 adding remote bookmark head2
869 869 new changesets 4a8dc1ab4c13:6bacf4683960
870 870 updating working directory
871 871 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
872 872
873 873 $ ls share
874 874 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
875 875
876 876 $ cat share-dest1b/.hg/sharedpath; echo
877 877 $TESTTMP/share/b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1/.hg
878 878
879 879 We only get bookmarks from the remote, not everything in the share
880 880
881 881 $ hg -R share-dest1b bookmarks
882 882 head1 3:4a8dc1ab4c13
883 883 head2 4:99f71071f117
884 884
885 885 Default path should be source, not share.
886 886
887 887 $ hg -R share-dest1b config paths.default
888 888 $TESTTMP/source1b
889 889
890 890 Checked out revision should be head of default branch
891 891
892 892 $ hg -R share-dest1b log -r .
893 893 changeset: 4:99f71071f117
894 894 bookmark: head2
895 895 parent: 0:b5f04eac9d8f
896 896 user: test
897 897 date: Thu Jan 01 00:00:00 1970 +0000
898 898 summary: head2
899 899
900 900
901 901 Clone from unrelated repo should result in new share
902 902
903 903 $ hg --config share.pool=share clone source2 share-dest2
904 904 (sharing from new pooled repository 22aeff664783fd44c6d9b435618173c118c3448e)
905 905 requesting all changes
906 906 adding changesets
907 907 adding manifests
908 908 adding file changes
909 909 added 2 changesets with 2 changes to 1 files
910 910 new changesets 22aeff664783:63cf6c3dba4a
911 911 searching for changes
912 912 no changes found
913 913 updating working directory
914 914 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
915 915
916 916 $ ls share
917 917 22aeff664783fd44c6d9b435618173c118c3448e
918 918 b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1
919 919
920 920 remote naming mode works as advertised
921 921
922 922 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1a share-remote1a
923 923 (sharing from new pooled repository 195bb1fcdb595c14a6c13e0269129ed78f6debde)
924 924 requesting all changes
925 925 adding changesets
926 926 adding manifests
927 927 adding file changes
928 928 added 3 changesets with 3 changes to 1 files
929 929 new changesets b5f04eac9d8f:e5bfe23c0b47
930 930 searching for changes
931 931 no changes found
932 932 adding remote bookmark bookA
933 933 updating working directory
934 934 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
935 935
936 936 $ ls shareremote
937 937 195bb1fcdb595c14a6c13e0269129ed78f6debde
938 938
939 939 $ hg --config share.pool=shareremote --config share.poolnaming=remote clone source1b share-remote1b
940 940 (sharing from new pooled repository c0d4f83847ca2a873741feb7048a45085fd47c46)
941 941 requesting all changes
942 942 adding changesets
943 943 adding manifests
944 944 adding file changes
945 945 added 6 changesets with 6 changes to 1 files (+4 heads)
946 946 new changesets b5f04eac9d8f:6bacf4683960
947 947 searching for changes
948 948 no changes found
949 949 adding remote bookmark head1
950 950 adding remote bookmark head2
951 951 updating working directory
952 952 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
953 953
954 954 $ ls shareremote
955 955 195bb1fcdb595c14a6c13e0269129ed78f6debde
956 956 c0d4f83847ca2a873741feb7048a45085fd47c46
957 957
958 958 request to clone a single revision is respected in sharing mode
959 959
960 960 $ hg --config share.pool=sharerevs clone -r 4a8dc1ab4c13 source1b share-1arev
961 961 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
962 962 adding changesets
963 963 adding manifests
964 964 adding file changes
965 965 added 2 changesets with 2 changes to 1 files
966 966 new changesets b5f04eac9d8f:4a8dc1ab4c13
967 967 no changes found
968 968 adding remote bookmark head1
969 969 updating working directory
970 970 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
971 971
972 972 $ hg -R share-1arev log -G
973 973 @ changeset: 1:4a8dc1ab4c13
974 974 | bookmark: head1
975 975 | tag: tip
976 976 | user: test
977 977 | date: Thu Jan 01 00:00:00 1970 +0000
978 978 | summary: head1
979 979 |
980 980 o changeset: 0:b5f04eac9d8f
981 981 user: test
982 982 date: Thu Jan 01 00:00:00 1970 +0000
983 983 summary: initial
984 984
985 985
986 986 making another clone should only pull down requested rev
987 987
988 988 $ hg --config share.pool=sharerevs clone -r 99f71071f117 source1b share-1brev
989 989 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
990 990 searching for changes
991 991 adding changesets
992 992 adding manifests
993 993 adding file changes
994 994 added 1 changesets with 1 changes to 1 files (+1 heads)
995 995 adding remote bookmark head1
996 996 adding remote bookmark head2
997 997 new changesets 99f71071f117
998 998 updating working directory
999 999 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1000 1000
1001 1001 $ hg -R share-1brev log -G
1002 1002 @ changeset: 2:99f71071f117
1003 1003 | bookmark: head2
1004 1004 | tag: tip
1005 1005 | parent: 0:b5f04eac9d8f
1006 1006 | user: test
1007 1007 | date: Thu Jan 01 00:00:00 1970 +0000
1008 1008 | summary: head2
1009 1009 |
1010 1010 | o changeset: 1:4a8dc1ab4c13
1011 1011 |/ bookmark: head1
1012 1012 | user: test
1013 1013 | date: Thu Jan 01 00:00:00 1970 +0000
1014 1014 | summary: head1
1015 1015 |
1016 1016 o changeset: 0:b5f04eac9d8f
1017 1017 user: test
1018 1018 date: Thu Jan 01 00:00:00 1970 +0000
1019 1019 summary: initial
1020 1020
1021 1021
1022 1022 Request to clone a single branch is respected in sharing mode
1023 1023
1024 1024 $ hg --config share.pool=sharebranch clone -b branch1 source1b share-1bbranch1
1025 1025 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1026 1026 adding changesets
1027 1027 adding manifests
1028 1028 adding file changes
1029 1029 added 2 changesets with 2 changes to 1 files
1030 1030 new changesets b5f04eac9d8f:5f92a6c1a1b1
1031 1031 no changes found
1032 1032 updating working directory
1033 1033 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1034 1034
1035 1035 $ hg -R share-1bbranch1 log -G
1036 1036 o changeset: 1:5f92a6c1a1b1
1037 1037 | branch: branch1
1038 1038 | tag: tip
1039 1039 | user: test
1040 1040 | date: Thu Jan 01 00:00:00 1970 +0000
1041 1041 | summary: branch1
1042 1042 |
1043 1043 @ changeset: 0:b5f04eac9d8f
1044 1044 user: test
1045 1045 date: Thu Jan 01 00:00:00 1970 +0000
1046 1046 summary: initial
1047 1047
1048 1048
1049 1049 $ hg --config share.pool=sharebranch clone -b branch2 source1b share-1bbranch2
1050 1050 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1051 1051 searching for changes
1052 1052 adding changesets
1053 1053 adding manifests
1054 1054 adding file changes
1055 1055 added 1 changesets with 1 changes to 1 files (+1 heads)
1056 1056 new changesets 6bacf4683960
1057 1057 updating working directory
1058 1058 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1059 1059
1060 1060 $ hg -R share-1bbranch2 log -G
1061 1061 o changeset: 2:6bacf4683960
1062 1062 | branch: branch2
1063 1063 | tag: tip
1064 1064 | parent: 0:b5f04eac9d8f
1065 1065 | user: test
1066 1066 | date: Thu Jan 01 00:00:00 1970 +0000
1067 1067 | summary: branch2
1068 1068 |
1069 1069 | o changeset: 1:5f92a6c1a1b1
1070 1070 |/ branch: branch1
1071 1071 | user: test
1072 1072 | date: Thu Jan 01 00:00:00 1970 +0000
1073 1073 | summary: branch1
1074 1074 |
1075 1075 @ changeset: 0:b5f04eac9d8f
1076 1076 user: test
1077 1077 date: Thu Jan 01 00:00:00 1970 +0000
1078 1078 summary: initial
1079 1079
1080 1080
1081 1081 -U is respected in share clone mode
1082 1082
1083 1083 $ hg --config share.pool=share clone -U source1a share-1anowc
1084 1084 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1085 1085 searching for changes
1086 1086 no changes found
1087 1087 adding remote bookmark bookA
1088 1088
1089 1089 $ ls share-1anowc
1090 1090
1091 1091 Test that auto sharing doesn't cause failure of "hg clone local remote"
1092 1092
1093 1093 $ cd $TESTTMP
1094 1094 $ hg -R a id -r 0
1095 1095 acb14030fe0a
1096 1096 $ hg id -R remote -r 0
1097 1097 abort: repository remote not found!
1098 1098 [255]
1099 1099 $ hg --config share.pool=share -q clone -e "\"$PYTHON\" \"$TESTDIR/dummyssh\"" a ssh://user@dummy/remote
1100 1100 $ hg -R remote id -r 0
1101 1101 acb14030fe0a
1102 1102
1103 1103 Cloning into pooled storage doesn't race (issue5104)
1104 1104
1105 1105 $ HGPOSTLOCKDELAY=2.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace1 > race1.log 2>&1 &
1106 1106 $ HGPRELOCKDELAY=1.0 hg --config share.pool=racepool --config extensions.lockdelay=$TESTDIR/lockdelay.py clone source1a share-destrace2 > race2.log 2>&1
1107 1107 $ wait
1108 1108
1109 1109 $ hg -R share-destrace1 log -r tip
1110 1110 changeset: 2:e5bfe23c0b47
1111 1111 bookmark: bookA
1112 1112 tag: tip
1113 1113 user: test
1114 1114 date: Thu Jan 01 00:00:00 1970 +0000
1115 1115 summary: 1a
1116 1116
1117 1117
1118 1118 $ hg -R share-destrace2 log -r tip
1119 1119 changeset: 2:e5bfe23c0b47
1120 1120 bookmark: bookA
1121 1121 tag: tip
1122 1122 user: test
1123 1123 date: Thu Jan 01 00:00:00 1970 +0000
1124 1124 summary: 1a
1125 1125
1126 1126 One repo should be new, the other should be shared from the pool. We
1127 1127 don't care which is which, so we just make sure we always print the
1128 1128 one containing "new pooled" first, then one one containing "existing
1129 1129 pooled".
1130 1130
1131 1131 $ (grep 'new pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1132 1132 (sharing from new pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1133 1133 requesting all changes
1134 1134 adding changesets
1135 1135 adding manifests
1136 1136 adding file changes
1137 1137 added 3 changesets with 3 changes to 1 files
1138 1138 new changesets b5f04eac9d8f:e5bfe23c0b47
1139 1139 searching for changes
1140 1140 no changes found
1141 1141 adding remote bookmark bookA
1142 1142 updating working directory
1143 1143 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1144 1144
1145 1145 $ (grep 'existing pooled' race1.log > /dev/null && cat race1.log || cat race2.log) | grep -v lock
1146 1146 (sharing from existing pooled repository b5f04eac9d8f7a6a9fcb070243cccea7dc5ea0c1)
1147 1147 searching for changes
1148 1148 no changes found
1149 1149 adding remote bookmark bookA
1150 1150 updating working directory
1151 1151 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1152 1152
1153 1153 SEC: check for unsafe ssh url
1154 1154
1155 1155 $ cat >> $HGRCPATH << EOF
1156 1156 > [ui]
1157 1157 > ssh = sh -c "read l; read l; read l"
1158 1158 > EOF
1159 1159
1160 1160 $ hg clone 'ssh://-oProxyCommand=touch${IFS}owned/path'
1161 1161 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1162 1162 [255]
1163 1163 $ hg clone 'ssh://%2DoProxyCommand=touch${IFS}owned/path'
1164 1164 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch${IFS}owned/path'
1165 1165 [255]
1166 1166 $ hg clone 'ssh://fakehost|touch%20owned/path'
1167 1167 abort: no suitable response from remote hg!
1168 1168 [255]
1169 1169 $ hg clone 'ssh://fakehost%7Ctouch%20owned/path'
1170 1170 abort: no suitable response from remote hg!
1171 1171 [255]
1172 1172
1173 1173 $ hg clone 'ssh://-oProxyCommand=touch owned%20foo@example.com/nonexistent/path'
1174 1174 abort: potentially unsafe url: 'ssh://-oProxyCommand=touch owned foo@example.com/nonexistent/path'
1175 1175 [255]
1176 1176
1177 1177 #if windows
1178 1178 $ hg clone "ssh://%26touch%20owned%20/" --debug
1179 1179 running sh -c "read l; read l; read l" "&touch owned " "hg -R . serve --stdio"
1180 1180 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1181 1181 sending hello command
1182 1182 sending between command
1183 1183 abort: no suitable response from remote hg!
1184 1184 [255]
1185 1185 $ hg clone "ssh://example.com:%26touch%20owned%20/" --debug
1186 1186 running sh -c "read l; read l; read l" -p "&touch owned " example.com "hg -R . serve --stdio"
1187 1187 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1188 1188 sending hello command
1189 1189 sending between command
1190 1190 abort: no suitable response from remote hg!
1191 1191 [255]
1192 1192 #else
1193 1193 $ hg clone "ssh://%3btouch%20owned%20/" --debug
1194 1194 running sh -c "read l; read l; read l" ';touch owned ' 'hg -R . serve --stdio'
1195 1195 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1196 1196 sending hello command
1197 1197 sending between command
1198 1198 abort: no suitable response from remote hg!
1199 1199 [255]
1200 1200 $ hg clone "ssh://example.com:%3btouch%20owned%20/" --debug
1201 1201 running sh -c "read l; read l; read l" -p ';touch owned ' example.com 'hg -R . serve --stdio'
1202 1202 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1203 1203 sending hello command
1204 1204 sending between command
1205 1205 abort: no suitable response from remote hg!
1206 1206 [255]
1207 1207 #endif
1208 1208
1209 1209 $ hg clone "ssh://v-alid.example.com/" --debug
1210 1210 running sh -c "read l; read l; read l" v-alid\.example\.com ['"]hg -R \. serve --stdio['"] (re)
1211 1211 sending upgrade request: * proto=exp-ssh-v2-0003 (glob) (sshv2 !)
1212 1212 sending hello command
1213 1213 sending between command
1214 1214 abort: no suitable response from remote hg!
1215 1215 [255]
1216 1216
1217 1217 We should not have created a file named owned - if it exists, the
1218 1218 attack succeeded.
1219 1219 $ if test -f owned; then echo 'you got owned'; fi
1220 1220
1221 1221 Cloning without fsmonitor enabled does not print a warning for small repos
1222 1222
1223 1223 $ hg clone a fsmonitor-default
1224 1224 updating to bookmark @ on branch stable
1225 1225 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1226 1226
1227 1227 Lower the warning threshold to simulate a large repo
1228 1228
1229 1229 $ cat >> $HGRCPATH << EOF
1230 1230 > [fsmonitor]
1231 1231 > warn_update_file_count = 2
1232 1232 > EOF
1233 1233
1234 1234 We should see a warning about no fsmonitor on supported platforms
1235 1235
1236 1236 #if linuxormacos no-fsmonitor
1237 1237 $ hg clone a nofsmonitor
1238 1238 updating to bookmark @ on branch stable
1239 1239 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1240 1240 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1241 1241 #else
1242 1242 $ hg clone a nofsmonitor
1243 1243 updating to bookmark @ on branch stable
1244 1244 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1245 1245 #endif
1246 1246
1247 1247 We should not see warning about fsmonitor when it is enabled
1248 1248
1249 1249 #if fsmonitor
1250 1250 $ hg clone a fsmonitor-enabled
1251 1251 updating to bookmark @ on branch stable
1252 1252 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1253 1253 #endif
1254 1254
1255 1255 We can disable the fsmonitor warning
1256 1256
1257 1257 $ hg --config fsmonitor.warn_when_unused=false clone a fsmonitor-disable-warning
1258 1258 updating to bookmark @ on branch stable
1259 1259 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1260 1260
1261 1261 Loaded fsmonitor but disabled in config should still print warning
1262 1262
1263 1263 #if linuxormacos fsmonitor
1264 1264 $ hg --config fsmonitor.mode=off clone a fsmonitor-mode-off
1265 1265 updating to bookmark @ on branch stable
1266 1266 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (fsmonitor !)
1267 1267 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
1268 1268 #endif
1269 1269
1270 1270 Warning not printed if working directory isn't empty
1271 1271
1272 1272 $ hg -q clone a fsmonitor-update
1273 1273 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor") (?)
1274 1274 $ cd fsmonitor-update
1275 1275 $ hg up acb14030fe0a
1276 1276 1 files updated, 0 files merged, 2 files removed, 0 files unresolved
1277 1277 (leaving bookmark @)
1278 1278 $ hg up cf0fe1914066
1279 1279 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
1280 1280
1281 1281 `hg update` from null revision also prints
1282 1282
1283 1283 $ hg up null
1284 1284 0 files updated, 0 files merged, 2 files removed, 0 files unresolved
1285 1285
1286 1286 #if linuxormacos no-fsmonitor
1287 1287 $ hg up cf0fe1914066
1288 1288 (warning: large working directory being used without fsmonitor enabled; enable fsmonitor to improve performance; see "hg help -e fsmonitor")
1289 1289 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1290 1290 #else
1291 1291 $ hg up cf0fe1914066
1292 1292 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
1293 1293 #endif
1294 1294
1295 1295 $ cd ..
1296 1296
@@ -1,432 +1,432
1 1 #require hardlink reporevlogstore
2 2
3 3 $ cat > nlinks.py <<EOF
4 4 > from __future__ import print_function
5 5 > import sys
6 6 > from mercurial import util
7 7 > for f in sorted(sys.stdin.readlines()):
8 8 > f = f[:-1]
9 9 > print(util.nlinks(f), f)
10 10 > EOF
11 11
12 12 $ nlinksdir()
13 13 > {
14 14 > find "$@" -type f | "$PYTHON" $TESTTMP/nlinks.py
15 15 > }
16 16
17 17 Some implementations of cp can't create hardlinks (replaces 'cp -al' on Linux):
18 18
19 19 $ cat > linkcp.py <<EOF
20 20 > from __future__ import absolute_import
21 21 > import sys
22 22 > from mercurial import pycompat, util
23 23 > util.copyfiles(pycompat.fsencode(sys.argv[1]),
24 24 > pycompat.fsencode(sys.argv[2]), hardlink=True)
25 25 > EOF
26 26
27 27 $ linkcp()
28 28 > {
29 29 > "$PYTHON" $TESTTMP/linkcp.py $1 $2
30 30 > }
31 31
32 32 Prepare repo r1:
33 33
34 34 $ hg init r1
35 35 $ cd r1
36 36
37 37 $ echo c1 > f1
38 38 $ hg add f1
39 39 $ hg ci -m0
40 40
41 41 $ mkdir d1
42 42 $ cd d1
43 43 $ echo c2 > f2
44 44 $ hg add f2
45 45 $ hg ci -m1
46 46 $ cd ../..
47 47
48 48 $ nlinksdir r1/.hg/store
49 49 1 r1/.hg/store/00changelog.i
50 50 1 r1/.hg/store/00manifest.i
51 51 1 r1/.hg/store/data/d1/f2.i
52 52 1 r1/.hg/store/data/f1.i
53 53 1 r1/.hg/store/fncache (repofncache !)
54 54 1 r1/.hg/store/phaseroots
55 55 1 r1/.hg/store/undo
56 56 1 r1/.hg/store/undo.backup.fncache (repofncache !)
57 57 1 r1/.hg/store/undo.backupfiles
58 58 1 r1/.hg/store/undo.phaseroots
59 59
60 60
61 61 Create hardlinked clone r2:
62 62
63 63 $ hg clone -U --debug r1 r2 --config progress.debug=true
64 linking: 1
65 linking: 2
66 linking: 3
67 linking: 4
68 linking: 5
69 linking: 6
70 linking: 7
64 linking: 1 files
65 linking: 2 files
66 linking: 3 files
67 linking: 4 files
68 linking: 5 files
69 linking: 6 files
70 linking: 7 files
71 71 linked 7 files
72 72
73 73 Create non-hardlinked clone r3:
74 74
75 75 $ hg clone --pull r1 r3
76 76 requesting all changes
77 77 adding changesets
78 78 adding manifests
79 79 adding file changes
80 80 added 2 changesets with 2 changes to 2 files
81 81 new changesets 40d85e9847f2:7069c422939c
82 82 updating to branch default
83 83 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
84 84
85 85
86 86 Repos r1 and r2 should now contain hardlinked files:
87 87
88 88 $ nlinksdir r1/.hg/store
89 89 2 r1/.hg/store/00changelog.i
90 90 2 r1/.hg/store/00manifest.i
91 91 2 r1/.hg/store/data/d1/f2.i
92 92 2 r1/.hg/store/data/f1.i
93 93 2 r1/.hg/store/fncache (repofncache !)
94 94 1 r1/.hg/store/phaseroots
95 95 1 r1/.hg/store/undo
96 96 1 r1/.hg/store/undo.backup.fncache (repofncache !)
97 97 1 r1/.hg/store/undo.backupfiles
98 98 1 r1/.hg/store/undo.phaseroots
99 99
100 100 $ nlinksdir r2/.hg/store
101 101 2 r2/.hg/store/00changelog.i
102 102 2 r2/.hg/store/00manifest.i
103 103 2 r2/.hg/store/data/d1/f2.i
104 104 2 r2/.hg/store/data/f1.i
105 105 2 r2/.hg/store/fncache (repofncache !)
106 106
107 107 Repo r3 should not be hardlinked:
108 108
109 109 $ nlinksdir r3/.hg/store
110 110 1 r3/.hg/store/00changelog.i
111 111 1 r3/.hg/store/00manifest.i
112 112 1 r3/.hg/store/data/d1/f2.i
113 113 1 r3/.hg/store/data/f1.i
114 114 1 r3/.hg/store/fncache (repofncache !)
115 115 1 r3/.hg/store/phaseroots
116 116 1 r3/.hg/store/undo
117 117 1 r3/.hg/store/undo.backupfiles
118 118 1 r3/.hg/store/undo.phaseroots
119 119
120 120
121 121 Create a non-inlined filelog in r3:
122 122
123 123 $ cd r3/d1
124 124 >>> f = open('data1', 'wb')
125 125 >>> for x in range(10000):
126 126 ... f.write(b"%d\n" % x) and None
127 127 >>> f.close()
128 128 $ for j in 0 1 2 3 4 5 6 7 8 9; do
129 129 > cat data1 >> f2
130 130 > hg commit -m$j
131 131 > done
132 132 $ cd ../..
133 133
134 134 $ nlinksdir r3/.hg/store
135 135 1 r3/.hg/store/00changelog.i
136 136 1 r3/.hg/store/00manifest.i
137 137 1 r3/.hg/store/data/d1/f2.d
138 138 1 r3/.hg/store/data/d1/f2.i
139 139 1 r3/.hg/store/data/f1.i
140 140 1 r3/.hg/store/fncache (repofncache !)
141 141 1 r3/.hg/store/phaseroots
142 142 1 r3/.hg/store/undo
143 143 1 r3/.hg/store/undo.backup.fncache (repofncache !)
144 144 1 r3/.hg/store/undo.backup.phaseroots
145 145 1 r3/.hg/store/undo.backupfiles
146 146 1 r3/.hg/store/undo.phaseroots
147 147
148 148 Push to repo r1 should break up most hardlinks in r2:
149 149
150 150 $ hg -R r2 verify
151 151 checking changesets
152 152 checking manifests
153 153 crosschecking files in changesets and manifests
154 154 checking files
155 155 checked 2 changesets with 2 changes to 2 files
156 156
157 157 $ cd r3
158 158 $ hg push
159 159 pushing to $TESTTMP/r1
160 160 searching for changes
161 161 adding changesets
162 162 adding manifests
163 163 adding file changes
164 164 added 10 changesets with 10 changes to 1 files
165 165
166 166 $ cd ..
167 167
168 168 $ nlinksdir r2/.hg/store
169 169 1 r2/.hg/store/00changelog.i
170 170 1 r2/.hg/store/00manifest.i
171 171 1 r2/.hg/store/data/d1/f2.i
172 172 2 r2/.hg/store/data/f1.i
173 173 [12] r2/\.hg/store/fncache (re) (repofncache !)
174 174
175 175 #if hardlink-whitelisted repofncache
176 176 $ nlinksdir r2/.hg/store/fncache
177 177 2 r2/.hg/store/fncache
178 178 #endif
179 179
180 180 $ hg -R r2 verify
181 181 checking changesets
182 182 checking manifests
183 183 crosschecking files in changesets and manifests
184 184 checking files
185 185 checked 2 changesets with 2 changes to 2 files
186 186
187 187
188 188 $ cd r1
189 189 $ hg up
190 190 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
191 191
192 192 Committing a change to f1 in r1 must break up hardlink f1.i in r2:
193 193
194 194 $ echo c1c1 >> f1
195 195 $ hg ci -m00
196 196 $ cd ..
197 197
198 198 $ nlinksdir r2/.hg/store
199 199 1 r2/.hg/store/00changelog.i
200 200 1 r2/.hg/store/00manifest.i
201 201 1 r2/.hg/store/data/d1/f2.i
202 202 1 r2/.hg/store/data/f1.i
203 203 [12] r2/\.hg/store/fncache (re) (repofncache !)
204 204
205 205 #if hardlink-whitelisted repofncache
206 206 $ nlinksdir r2/.hg/store/fncache
207 207 2 r2/.hg/store/fncache
208 208 #endif
209 209
210 210 Create a file which exec permissions we will change
211 211 $ cd r3
212 212 $ echo "echo hello world" > f3
213 213 $ hg add f3
214 214 $ hg ci -mf3
215 215 $ cd ..
216 216
217 217 $ cd r3
218 218 $ hg tip --template '{rev}:{node|short}\n'
219 219 12:d3b77733a28a
220 220 $ echo bla > f1
221 221 $ chmod +x f3
222 222 $ hg ci -m1
223 223 $ cd ..
224 224
225 225 Create hardlinked copy r4 of r3 (on Linux, we would call 'cp -al'):
226 226
227 227 $ linkcp r3 r4
228 228
229 229 'checklink' is produced by hardlinking a symlink, which is undefined whether
230 230 the symlink should be followed or not. It does behave differently on Linux and
231 231 BSD. Just remove it so the test pass on both platforms.
232 232
233 233 $ rm -f r4/.hg/cache/checklink
234 234
235 235 r4 has hardlinks in the working dir (not just inside .hg):
236 236
237 237 $ nlinksdir r4
238 238 2 r4/.hg/00changelog.i
239 239 2 r4/.hg/branch
240 240 2 r4/.hg/cache/branch2-base
241 241 2 r4/.hg/cache/branch2-served
242 242 2 r4/.hg/cache/checkisexec (execbit !)
243 243 ? r4/.hg/cache/checklink-target (glob) (symlink !)
244 244 2 r4/.hg/cache/checknoexec (execbit !)
245 245 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
246 246 2 r4/.hg/cache/rbc-names-v1
247 247 2 r4/.hg/cache/rbc-revs-v1
248 248 2 r4/.hg/dirstate
249 249 2 r4/.hg/fsmonitor.state (fsmonitor !)
250 250 2 r4/.hg/hgrc
251 251 2 r4/.hg/last-message.txt
252 252 2 r4/.hg/requires
253 253 2 r4/.hg/store/00changelog.i
254 254 2 r4/.hg/store/00manifest.i
255 255 2 r4/.hg/store/data/d1/f2.d
256 256 2 r4/.hg/store/data/d1/f2.i
257 257 2 r4/.hg/store/data/f1.i
258 258 2 r4/.hg/store/data/f3.i
259 259 2 r4/.hg/store/fncache (repofncache !)
260 260 2 r4/.hg/store/phaseroots
261 261 2 r4/.hg/store/undo
262 262 2 r4/.hg/store/undo.backup.fncache (repofncache !)
263 263 2 r4/.hg/store/undo.backup.phaseroots
264 264 2 r4/.hg/store/undo.backupfiles
265 265 2 r4/.hg/store/undo.phaseroots
266 266 [24] r4/\.hg/undo\.backup\.dirstate (re)
267 267 2 r4/.hg/undo.bookmarks
268 268 2 r4/.hg/undo.branch
269 269 2 r4/.hg/undo.desc
270 270 [24] r4/\.hg/undo\.dirstate (re)
271 271 2 r4/d1/data1
272 272 2 r4/d1/f2
273 273 2 r4/f1
274 274 2 r4/f3
275 275
276 276 Update back to revision 12 in r4 should break hardlink of file f1 and f3:
277 277 #if hardlink-whitelisted
278 278 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
279 279 4 r4/.hg/undo.backup.dirstate
280 280 4 r4/.hg/undo.dirstate
281 281 #endif
282 282
283 283
284 284 $ hg -R r4 up 12
285 285 2 files updated, 0 files merged, 0 files removed, 0 files unresolved (execbit !)
286 286 1 files updated, 0 files merged, 0 files removed, 0 files unresolved (no-execbit !)
287 287
288 288 $ nlinksdir r4
289 289 2 r4/.hg/00changelog.i
290 290 1 r4/.hg/branch
291 291 2 r4/.hg/cache/branch2-base
292 292 2 r4/.hg/cache/branch2-served
293 293 2 r4/.hg/cache/checkisexec (execbit !)
294 294 2 r4/.hg/cache/checklink-target (symlink !)
295 295 2 r4/.hg/cache/checknoexec (execbit !)
296 296 2 r4/.hg/cache/manifestfulltextcache (reporevlogstore !)
297 297 2 r4/.hg/cache/rbc-names-v1
298 298 2 r4/.hg/cache/rbc-revs-v1
299 299 1 r4/.hg/dirstate
300 300 1 r4/.hg/fsmonitor.state (fsmonitor !)
301 301 2 r4/.hg/hgrc
302 302 2 r4/.hg/last-message.txt
303 303 2 r4/.hg/requires
304 304 2 r4/.hg/store/00changelog.i
305 305 2 r4/.hg/store/00manifest.i
306 306 2 r4/.hg/store/data/d1/f2.d
307 307 2 r4/.hg/store/data/d1/f2.i
308 308 2 r4/.hg/store/data/f1.i
309 309 2 r4/.hg/store/data/f3.i
310 310 2 r4/.hg/store/fncache
311 311 2 r4/.hg/store/phaseroots
312 312 2 r4/.hg/store/undo
313 313 2 r4/.hg/store/undo.backup.fncache (repofncache !)
314 314 2 r4/.hg/store/undo.backup.phaseroots
315 315 2 r4/.hg/store/undo.backupfiles
316 316 2 r4/.hg/store/undo.phaseroots
317 317 [24] r4/\.hg/undo\.backup\.dirstate (re)
318 318 2 r4/.hg/undo.bookmarks
319 319 2 r4/.hg/undo.branch
320 320 2 r4/.hg/undo.desc
321 321 [24] r4/\.hg/undo\.dirstate (re)
322 322 2 r4/d1/data1
323 323 2 r4/d1/f2
324 324 1 r4/f1
325 325 1 r4/f3 (execbit !)
326 326 2 r4/f3 (no-execbit !)
327 327
328 328 #if hardlink-whitelisted
329 329 $ nlinksdir r4/.hg/undo.backup.dirstate r4/.hg/undo.dirstate
330 330 4 r4/.hg/undo.backup.dirstate
331 331 4 r4/.hg/undo.dirstate
332 332 #endif
333 333
334 334 Test hardlinking outside hg:
335 335
336 336 $ mkdir x
337 337 $ echo foo > x/a
338 338
339 339 $ linkcp x y
340 340 $ echo bar >> y/a
341 341
342 342 No diff if hardlink:
343 343
344 344 $ diff x/a y/a
345 345
346 346 Test mq hardlinking:
347 347
348 348 $ echo "[extensions]" >> $HGRCPATH
349 349 $ echo "mq=" >> $HGRCPATH
350 350
351 351 $ hg init a
352 352 $ cd a
353 353
354 354 $ hg qimport -n foo - << EOF
355 355 > # HG changeset patch
356 356 > # Date 1 0
357 357 > diff -r 2588a8b53d66 a
358 358 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
359 359 > +++ b/a Wed Jul 23 15:54:29 2008 +0200
360 360 > @@ -0,0 +1,1 @@
361 361 > +a
362 362 > EOF
363 363 adding foo to series file
364 364
365 365 $ hg qpush
366 366 applying foo
367 367 now at: foo
368 368
369 369 $ cd ..
370 370 $ linkcp a b
371 371 $ cd b
372 372
373 373 $ hg qimport -n bar - << EOF
374 374 > # HG changeset patch
375 375 > # Date 2 0
376 376 > diff -r 2588a8b53d66 a
377 377 > --- /dev/null Thu Jan 01 00:00:00 1970 +0000
378 378 > +++ b/b Wed Jul 23 15:54:29 2008 +0200
379 379 > @@ -0,0 +1,1 @@
380 380 > +b
381 381 > EOF
382 382 adding bar to series file
383 383
384 384 $ hg qpush
385 385 applying bar
386 386 now at: bar
387 387
388 388 $ cat .hg/patches/status
389 389 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
390 390 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c:bar
391 391
392 392 $ cat .hg/patches/series
393 393 foo
394 394 bar
395 395
396 396 $ cat ../a/.hg/patches/status
397 397 430ed4828a74fa4047bc816a25500f7472ab4bfe:foo
398 398
399 399 $ cat ../a/.hg/patches/series
400 400 foo
401 401
402 402 Test tags hardlinking:
403 403
404 404 $ hg qdel -r qbase:qtip
405 405 patch foo finalized without changeset message
406 406 patch bar finalized without changeset message
407 407
408 408 $ hg tag -l lfoo
409 409 $ hg tag foo
410 410
411 411 $ cd ..
412 412 $ linkcp b c
413 413 $ cd c
414 414
415 415 $ hg tag -l -r 0 lbar
416 416 $ hg tag -r 0 bar
417 417
418 418 $ cat .hgtags
419 419 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
420 420 430ed4828a74fa4047bc816a25500f7472ab4bfe bar
421 421
422 422 $ cat .hg/localtags
423 423 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
424 424 430ed4828a74fa4047bc816a25500f7472ab4bfe lbar
425 425
426 426 $ cat ../b/.hgtags
427 427 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c foo
428 428
429 429 $ cat ../b/.hg/localtags
430 430 4e7abb4840c46a910f6d7b4d3c3fc7e5209e684c lfoo
431 431
432 432 $ cd ..
@@ -1,167 +1,167
1 1 test --time
2 2
3 3 $ hg --time help -q help 2>&1 | grep time > /dev/null
4 4 $ hg init a
5 5 $ cd a
6 6
7 7 Function to check that statprof ran
8 8 $ statprofran () {
9 9 > egrep 'Sample count:|No samples recorded' > /dev/null
10 10 > }
11 11
12 12 test --profile
13 13
14 14 $ hg st --profile 2>&1 | statprofran
15 15
16 16 Abreviated version
17 17
18 18 $ hg st --prof 2>&1 | statprofran
19 19
20 20 In alias
21 21
22 22 $ hg --config "alias.profst=status --profile" profst 2>&1 | statprofran
23 23
24 24 #if lsprof
25 25
26 26 $ prof='hg --config profiling.type=ls --profile'
27 27
28 28 $ $prof st 2>../out
29 29 $ grep CallCount ../out > /dev/null || cat ../out
30 30
31 31 $ $prof --config profiling.output=../out st
32 32 $ grep CallCount ../out > /dev/null || cat ../out
33 33
34 34 $ $prof --config profiling.output=blackbox --config extensions.blackbox= st
35 35 $ grep CallCount .hg/blackbox.log > /dev/null || cat .hg/blackbox.log
36 36
37 37 $ $prof --config profiling.format=text st 2>../out
38 38 $ grep CallCount ../out > /dev/null || cat ../out
39 39
40 40 $ echo "[profiling]" >> $HGRCPATH
41 41 $ echo "format=kcachegrind" >> $HGRCPATH
42 42
43 43 $ $prof st 2>../out
44 44 $ grep 'events: Ticks' ../out > /dev/null || cat ../out
45 45
46 46 $ $prof --config profiling.output=../out st
47 47 $ grep 'events: Ticks' ../out > /dev/null || cat ../out
48 48
49 49 #endif
50 50
51 51 #if lsprof serve
52 52
53 53 Profiling of HTTP requests works
54 54
55 55 $ $prof --config profiling.format=text --config profiling.output=../profile.log serve -d -p $HGPORT --pid-file ../hg.pid -A ../access.log
56 56 $ cat ../hg.pid >> $DAEMON_PIDS
57 57 $ hg -q clone -U http://localhost:$HGPORT ../clone
58 58
59 59 A single profile is logged because file logging doesn't append
60 60 $ grep CallCount ../profile.log | wc -l
61 61 \s*1 (re)
62 62
63 63 #endif
64 64
65 65 Install an extension that can sleep and guarantee a profiler has time to run
66 66
67 67 $ cat >> sleepext.py << EOF
68 68 > import time
69 69 > from mercurial import registrar
70 70 > cmdtable = {}
71 71 > command = registrar.command(cmdtable)
72 72 > @command(b'sleep', [], b'hg sleep')
73 73 > def sleep(ui, *args, **kwargs):
74 74 > time.sleep(0.1)
75 75 > EOF
76 76
77 77 $ cat >> $HGRCPATH << EOF
78 78 > [extensions]
79 79 > sleep = `pwd`/sleepext.py
80 80 > EOF
81 81
82 82 statistical profiler works
83 83
84 84 $ hg --profile sleep 2>../out
85 85 $ cat ../out | statprofran
86 86
87 87 Various statprof formatters work
88 88
89 89 $ hg --profile --config profiling.statformat=byline sleep 2>../out || cat ../out
90 90 $ head -n 3 ../out
91 91 % cumulative self
92 92 time seconds seconds name
93 * sleepext.py:*:sleep (glob)
93 * sleepext.py:*:sleep (glob)
94 94 $ cat ../out | statprofran
95 95
96 96 $ hg --profile --config profiling.statformat=bymethod sleep 2>../out || cat ../out
97 97 $ head -n 1 ../out
98 98 % cumulative self
99 99 $ cat ../out | statprofran
100 100
101 101 $ hg --profile --config profiling.statformat=hotpath sleep 2>../out || cat ../out
102 102 $ cat ../out | statprofran
103 103
104 104 $ hg --profile --config profiling.statformat=json sleep 2>../out || cat ../out
105 105 $ cat ../out
106 106 \[\[-?\d+.* (re)
107 107
108 108 statprof can be used as a standalone module
109 109
110 110 $ "$PYTHON" -m mercurial.statprof hotpath
111 111 must specify --file to load
112 112 [1]
113 113
114 114 $ cd ..
115 115
116 116 #if no-chg
117 117 profiler extension could be loaded before other extensions
118 118
119 119 $ cat > fooprof.py <<EOF
120 120 > from __future__ import absolute_import
121 121 > import contextlib
122 122 > import sys
123 123 > @contextlib.contextmanager
124 124 > def profile(ui, fp):
125 125 > print('fooprof: start profile')
126 126 > sys.stdout.flush()
127 127 > yield
128 128 > print('fooprof: end profile')
129 129 > sys.stdout.flush()
130 130 > def extsetup(ui):
131 131 > ui.write(b'fooprof: loaded\n')
132 132 > EOF
133 133
134 134 $ cat > otherextension.py <<EOF
135 135 > from __future__ import absolute_import
136 136 > def extsetup(ui):
137 137 > ui.write(b'otherextension: loaded\n')
138 138 > EOF
139 139
140 140 $ hg init b
141 141 $ cd b
142 142 $ cat >> .hg/hgrc <<EOF
143 143 > [extensions]
144 144 > other = $TESTTMP/otherextension.py
145 145 > fooprof = $TESTTMP/fooprof.py
146 146 > EOF
147 147
148 148 $ hg root
149 149 otherextension: loaded
150 150 fooprof: loaded
151 151 $TESTTMP/b
152 152 $ HGPROF=fooprof hg root --profile
153 153 fooprof: loaded
154 154 fooprof: start profile
155 155 otherextension: loaded
156 156 $TESTTMP/b
157 157 fooprof: end profile
158 158
159 159 $ HGPROF=other hg root --profile 2>&1 | head -n 2
160 160 otherextension: loaded
161 161 unrecognized profiler 'other' - ignored
162 162
163 163 $ HGPROF=unknown hg root --profile 2>&1 | head -n 1
164 164 unrecognized profiler 'unknown' - ignored
165 165
166 166 $ cd ..
167 167 #endif
@@ -1,1470 +1,1473
1 1 $ . $TESTDIR/wireprotohelpers.sh
2 2
3 3 $ cat >> $HGRCPATH << EOF
4 4 > [extensions]
5 5 > blackbox =
6 6 > [blackbox]
7 7 > track = simplecache
8 8 > EOF
9 9
10 10 $ hg init server
11 11 $ enablehttpv2 server
12 12 $ cd server
13 13 $ cat >> .hg/hgrc << EOF
14 14 > [server]
15 15 > compressionengines = zlib
16 16 > [extensions]
17 17 > simplecache = $TESTDIR/wireprotosimplecache.py
18 18 > [simplecache]
19 19 > cacheapi = true
20 20 > EOF
21 21
22 22 $ echo a0 > a
23 23 $ echo b0 > b
24 24 $ hg -q commit -A -m 'commit 0'
25 25 $ echo a1 > a
26 26 $ hg commit -m 'commit 1'
27 27
28 28 $ hg --debug debugindex -m
29 29 rev linkrev nodeid p1 p2
30 30 0 0 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000 0000000000000000000000000000000000000000
31 31 1 1 a988fb43583e871d1ed5750ee074c6d840bbbfc8 992f4779029a3df8d0666d00bb924f69634e2641 0000000000000000000000000000000000000000
32 32
33 33 $ hg --config simplecache.redirectsfile=redirects.py serve -p $HGPORT -d --pid-file hg.pid -E error.log
34 34 $ cat hg.pid > $DAEMON_PIDS
35 35
36 36 $ cat > redirects.py << EOF
37 37 > [
38 38 > {
39 39 > b'name': b'target-a',
40 40 > b'protocol': b'http',
41 41 > b'snirequired': False,
42 42 > b'tlsversions': [b'1.2', b'1.3'],
43 43 > b'uris': [b'http://example.com/'],
44 44 > },
45 45 > ]
46 46 > EOF
47 47
48 48 Redirect targets advertised when configured
49 49
50 50 $ sendhttpv2peerhandshake << EOF
51 51 > command capabilities
52 52 > EOF
53 53 creating http peer for wire protocol version 2
54 54 s> GET /?cmd=capabilities HTTP/1.1\r\n
55 55 s> Accept-Encoding: identity\r\n
56 56 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
57 57 s> x-hgproto-1: cbor\r\n
58 58 s> x-hgupgrade-1: exp-http-v2-0003\r\n
59 59 s> accept: application/mercurial-0.1\r\n
60 60 s> host: $LOCALIP:$HGPORT\r\n (glob)
61 61 s> user-agent: Mercurial debugwireproto\r\n
62 62 s> \r\n
63 63 s> makefile('rb', None)
64 64 s> HTTP/1.1 200 OK\r\n
65 65 s> Server: testing stub value\r\n
66 66 s> Date: $HTTP_DATE$\r\n
67 67 s> Content-Type: application/mercurial-cbor\r\n
68 68 s> Content-Length: 2259\r\n
69 69 s> \r\n
70 70 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
71 (remote redirect target target-a is compatible)
71 (remote redirect target target-a is compatible) (tls1.2 !)
72 (remote redirect target target-a requires unsupported TLS versions: 1.2, 1.3) (no-tls1.2 !)
72 73 sending capabilities command
73 74 s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
74 75 s> Accept-Encoding: identity\r\n
75 76 s> accept: application/mercurial-exp-framing-0006\r\n
76 77 s> content-type: application/mercurial-exp-framing-0006\r\n
77 s> content-length: 111\r\n
78 s> content-length: 111\r\n (tls1.2 !)
79 s> content-length: 102\r\n (no-tls1.2 !)
78 80 s> host: $LOCALIP:$HGPORT\r\n (glob)
79 81 s> user-agent: Mercurial debugwireproto\r\n
80 82 s> \r\n
81 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
83 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a (tls1.2 !)
84 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80 (no-tls1.2 !)
82 85 s> makefile('rb', None)
83 86 s> HTTP/1.1 200 OK\r\n
84 87 s> Server: testing stub value\r\n
85 88 s> Date: $HTTP_DATE$\r\n
86 89 s> Content-Type: application/mercurial-exp-framing-0006\r\n
87 90 s> Transfer-Encoding: chunked\r\n
88 91 s> \r\n
89 92 s> 11\r\n
90 93 s> \t\x00\x00\x01\x00\x02\x01\x92
91 94 s> Hidentity
92 95 s> \r\n
93 96 received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
94 97 s> 13\r\n
95 98 s> \x0b\x00\x00\x01\x00\x02\x041
96 99 s> \xa1FstatusBok
97 100 s> \r\n
98 101 received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
99 102 s> 6d1\r\n
100 103 s> \xc9\x06\x00\x01\x00\x02\x041
101 104 s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa5DnameHtarget-aHprotocolDhttpKsnirequired\xf4Ktlsversions\x82C1.2C1.3Duris\x81Shttp://example.com/
102 105 s> \r\n
103 106 received frame(size=1737; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
104 107 s> 8\r\n
105 108 s> \x00\x00\x00\x01\x00\x02\x002
106 109 s> \r\n
107 110 s> 0\r\n
108 111 s> \r\n
109 112 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
110 113 response: gen[
111 114 {
112 115 b'commands': {
113 116 b'branchmap': {
114 117 b'args': {},
115 118 b'permissions': [
116 119 b'pull'
117 120 ]
118 121 },
119 122 b'capabilities': {
120 123 b'args': {},
121 124 b'permissions': [
122 125 b'pull'
123 126 ]
124 127 },
125 128 b'changesetdata': {
126 129 b'args': {
127 130 b'fields': {
128 131 b'default': set([]),
129 132 b'required': False,
130 133 b'type': b'set',
131 134 b'validvalues': set([
132 135 b'bookmarks',
133 136 b'parents',
134 137 b'phase',
135 138 b'revision'
136 139 ])
137 140 },
138 141 b'revisions': {
139 142 b'required': True,
140 143 b'type': b'list'
141 144 }
142 145 },
143 146 b'permissions': [
144 147 b'pull'
145 148 ]
146 149 },
147 150 b'filedata': {
148 151 b'args': {
149 152 b'fields': {
150 153 b'default': set([]),
151 154 b'required': False,
152 155 b'type': b'set',
153 156 b'validvalues': set([
154 157 b'linknode',
155 158 b'parents',
156 159 b'revision'
157 160 ])
158 161 },
159 162 b'haveparents': {
160 163 b'default': False,
161 164 b'required': False,
162 165 b'type': b'bool'
163 166 },
164 167 b'nodes': {
165 168 b'required': True,
166 169 b'type': b'list'
167 170 },
168 171 b'path': {
169 172 b'required': True,
170 173 b'type': b'bytes'
171 174 }
172 175 },
173 176 b'permissions': [
174 177 b'pull'
175 178 ]
176 179 },
177 180 b'filesdata': {
178 181 b'args': {
179 182 b'fields': {
180 183 b'default': set([]),
181 184 b'required': False,
182 185 b'type': b'set',
183 186 b'validvalues': set([
184 187 b'firstchangeset',
185 188 b'linknode',
186 189 b'parents',
187 190 b'revision'
188 191 ])
189 192 },
190 193 b'haveparents': {
191 194 b'default': False,
192 195 b'required': False,
193 196 b'type': b'bool'
194 197 },
195 198 b'pathfilter': {
196 199 b'default': None,
197 200 b'required': False,
198 201 b'type': b'dict'
199 202 },
200 203 b'revisions': {
201 204 b'required': True,
202 205 b'type': b'list'
203 206 }
204 207 },
205 208 b'permissions': [
206 209 b'pull'
207 210 ],
208 211 b'recommendedbatchsize': 50000
209 212 },
210 213 b'heads': {
211 214 b'args': {
212 215 b'publiconly': {
213 216 b'default': False,
214 217 b'required': False,
215 218 b'type': b'bool'
216 219 }
217 220 },
218 221 b'permissions': [
219 222 b'pull'
220 223 ]
221 224 },
222 225 b'known': {
223 226 b'args': {
224 227 b'nodes': {
225 228 b'default': [],
226 229 b'required': False,
227 230 b'type': b'list'
228 231 }
229 232 },
230 233 b'permissions': [
231 234 b'pull'
232 235 ]
233 236 },
234 237 b'listkeys': {
235 238 b'args': {
236 239 b'namespace': {
237 240 b'required': True,
238 241 b'type': b'bytes'
239 242 }
240 243 },
241 244 b'permissions': [
242 245 b'pull'
243 246 ]
244 247 },
245 248 b'lookup': {
246 249 b'args': {
247 250 b'key': {
248 251 b'required': True,
249 252 b'type': b'bytes'
250 253 }
251 254 },
252 255 b'permissions': [
253 256 b'pull'
254 257 ]
255 258 },
256 259 b'manifestdata': {
257 260 b'args': {
258 261 b'fields': {
259 262 b'default': set([]),
260 263 b'required': False,
261 264 b'type': b'set',
262 265 b'validvalues': set([
263 266 b'parents',
264 267 b'revision'
265 268 ])
266 269 },
267 270 b'haveparents': {
268 271 b'default': False,
269 272 b'required': False,
270 273 b'type': b'bool'
271 274 },
272 275 b'nodes': {
273 276 b'required': True,
274 277 b'type': b'list'
275 278 },
276 279 b'tree': {
277 280 b'required': True,
278 281 b'type': b'bytes'
279 282 }
280 283 },
281 284 b'permissions': [
282 285 b'pull'
283 286 ],
284 287 b'recommendedbatchsize': 100000
285 288 },
286 289 b'pushkey': {
287 290 b'args': {
288 291 b'key': {
289 292 b'required': True,
290 293 b'type': b'bytes'
291 294 },
292 295 b'namespace': {
293 296 b'required': True,
294 297 b'type': b'bytes'
295 298 },
296 299 b'new': {
297 300 b'required': True,
298 301 b'type': b'bytes'
299 302 },
300 303 b'old': {
301 304 b'required': True,
302 305 b'type': b'bytes'
303 306 }
304 307 },
305 308 b'permissions': [
306 309 b'push'
307 310 ]
308 311 },
309 312 b'rawstorefiledata': {
310 313 b'args': {
311 314 b'files': {
312 315 b'required': True,
313 316 b'type': b'list'
314 317 },
315 318 b'pathfilter': {
316 319 b'default': None,
317 320 b'required': False,
318 321 b'type': b'list'
319 322 }
320 323 },
321 324 b'permissions': [
322 325 b'pull'
323 326 ]
324 327 }
325 328 },
326 329 b'framingmediatypes': [
327 330 b'application/mercurial-exp-framing-0006'
328 331 ],
329 332 b'pathfilterprefixes': set([
330 333 b'path:',
331 334 b'rootfilesin:'
332 335 ]),
333 336 b'rawrepoformats': [
334 337 b'generaldelta',
335 338 b'revlogv1'
336 339 ],
337 340 b'redirect': {
338 341 b'hashes': [
339 342 b'sha256',
340 343 b'sha1'
341 344 ],
342 345 b'targets': [
343 346 {
344 347 b'name': b'target-a',
345 348 b'protocol': b'http',
346 349 b'snirequired': False,
347 350 b'tlsversions': [
348 351 b'1.2',
349 352 b'1.3'
350 353 ],
351 354 b'uris': [
352 355 b'http://example.com/'
353 356 ]
354 357 }
355 358 ]
356 359 }
357 360 }
358 361 ]
359 362 (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
360 363
361 364 Unknown protocol is filtered from compatible targets
362 365
363 366 $ cat > redirects.py << EOF
364 367 > [
365 368 > {
366 369 > b'name': b'target-a',
367 370 > b'protocol': b'http',
368 371 > b'uris': [b'http://example.com/'],
369 372 > },
370 373 > {
371 374 > b'name': b'target-b',
372 375 > b'protocol': b'unknown',
373 376 > b'uris': [b'unknown://example.com/'],
374 377 > },
375 378 > ]
376 379 > EOF
377 380
378 381 $ sendhttpv2peerhandshake << EOF
379 382 > command capabilities
380 383 > EOF
381 384 creating http peer for wire protocol version 2
382 385 s> GET /?cmd=capabilities HTTP/1.1\r\n
383 386 s> Accept-Encoding: identity\r\n
384 387 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
385 388 s> x-hgproto-1: cbor\r\n
386 389 s> x-hgupgrade-1: exp-http-v2-0003\r\n
387 390 s> accept: application/mercurial-0.1\r\n
388 391 s> host: $LOCALIP:$HGPORT\r\n (glob)
389 392 s> user-agent: Mercurial debugwireproto\r\n
390 393 s> \r\n
391 394 s> makefile('rb', None)
392 395 s> HTTP/1.1 200 OK\r\n
393 396 s> Server: testing stub value\r\n
394 397 s> Date: $HTTP_DATE$\r\n
395 398 s> Content-Type: application/mercurial-cbor\r\n
396 399 s> Content-Length: 2286\r\n
397 400 s> \r\n
398 401 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
399 402 (remote redirect target target-a is compatible)
400 403 (remote redirect target target-b uses unsupported protocol: unknown)
401 404 sending capabilities command
402 405 s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
403 406 s> Accept-Encoding: identity\r\n
404 407 s> accept: application/mercurial-exp-framing-0006\r\n
405 408 s> content-type: application/mercurial-exp-framing-0006\r\n
406 409 s> content-length: 111\r\n
407 410 s> host: $LOCALIP:$HGPORT\r\n (glob)
408 411 s> user-agent: Mercurial debugwireproto\r\n
409 412 s> \r\n
410 413 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81HidentityC\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81Htarget-a
411 414 s> makefile('rb', None)
412 415 s> HTTP/1.1 200 OK\r\n
413 416 s> Server: testing stub value\r\n
414 417 s> Date: $HTTP_DATE$\r\n
415 418 s> Content-Type: application/mercurial-exp-framing-0006\r\n
416 419 s> Transfer-Encoding: chunked\r\n
417 420 s> \r\n
418 421 s> 11\r\n
419 422 s> \t\x00\x00\x01\x00\x02\x01\x92
420 423 s> Hidentity
421 424 s> \r\n
422 425 received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
423 426 s> 13\r\n
424 427 s> \x0b\x00\x00\x01\x00\x02\x041
425 428 s> \xa1FstatusBok
426 429 s> \r\n
427 430 received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
428 431 s> 6ec\r\n
429 432 s> \xe4\x06\x00\x01\x00\x02\x041
430 433 s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x82\xa3DnameHtarget-aHprotocolDhttpDuris\x81Shttp://example.com/\xa3DnameHtarget-bHprotocolGunknownDuris\x81Vunknown://example.com/
431 434 s> \r\n
432 435 received frame(size=1764; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
433 436 s> 8\r\n
434 437 s> \x00\x00\x00\x01\x00\x02\x002
435 438 s> \r\n
436 439 s> 0\r\n
437 440 s> \r\n
438 441 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
439 442 response: gen[
440 443 {
441 444 b'commands': {
442 445 b'branchmap': {
443 446 b'args': {},
444 447 b'permissions': [
445 448 b'pull'
446 449 ]
447 450 },
448 451 b'capabilities': {
449 452 b'args': {},
450 453 b'permissions': [
451 454 b'pull'
452 455 ]
453 456 },
454 457 b'changesetdata': {
455 458 b'args': {
456 459 b'fields': {
457 460 b'default': set([]),
458 461 b'required': False,
459 462 b'type': b'set',
460 463 b'validvalues': set([
461 464 b'bookmarks',
462 465 b'parents',
463 466 b'phase',
464 467 b'revision'
465 468 ])
466 469 },
467 470 b'revisions': {
468 471 b'required': True,
469 472 b'type': b'list'
470 473 }
471 474 },
472 475 b'permissions': [
473 476 b'pull'
474 477 ]
475 478 },
476 479 b'filedata': {
477 480 b'args': {
478 481 b'fields': {
479 482 b'default': set([]),
480 483 b'required': False,
481 484 b'type': b'set',
482 485 b'validvalues': set([
483 486 b'linknode',
484 487 b'parents',
485 488 b'revision'
486 489 ])
487 490 },
488 491 b'haveparents': {
489 492 b'default': False,
490 493 b'required': False,
491 494 b'type': b'bool'
492 495 },
493 496 b'nodes': {
494 497 b'required': True,
495 498 b'type': b'list'
496 499 },
497 500 b'path': {
498 501 b'required': True,
499 502 b'type': b'bytes'
500 503 }
501 504 },
502 505 b'permissions': [
503 506 b'pull'
504 507 ]
505 508 },
506 509 b'filesdata': {
507 510 b'args': {
508 511 b'fields': {
509 512 b'default': set([]),
510 513 b'required': False,
511 514 b'type': b'set',
512 515 b'validvalues': set([
513 516 b'firstchangeset',
514 517 b'linknode',
515 518 b'parents',
516 519 b'revision'
517 520 ])
518 521 },
519 522 b'haveparents': {
520 523 b'default': False,
521 524 b'required': False,
522 525 b'type': b'bool'
523 526 },
524 527 b'pathfilter': {
525 528 b'default': None,
526 529 b'required': False,
527 530 b'type': b'dict'
528 531 },
529 532 b'revisions': {
530 533 b'required': True,
531 534 b'type': b'list'
532 535 }
533 536 },
534 537 b'permissions': [
535 538 b'pull'
536 539 ],
537 540 b'recommendedbatchsize': 50000
538 541 },
539 542 b'heads': {
540 543 b'args': {
541 544 b'publiconly': {
542 545 b'default': False,
543 546 b'required': False,
544 547 b'type': b'bool'
545 548 }
546 549 },
547 550 b'permissions': [
548 551 b'pull'
549 552 ]
550 553 },
551 554 b'known': {
552 555 b'args': {
553 556 b'nodes': {
554 557 b'default': [],
555 558 b'required': False,
556 559 b'type': b'list'
557 560 }
558 561 },
559 562 b'permissions': [
560 563 b'pull'
561 564 ]
562 565 },
563 566 b'listkeys': {
564 567 b'args': {
565 568 b'namespace': {
566 569 b'required': True,
567 570 b'type': b'bytes'
568 571 }
569 572 },
570 573 b'permissions': [
571 574 b'pull'
572 575 ]
573 576 },
574 577 b'lookup': {
575 578 b'args': {
576 579 b'key': {
577 580 b'required': True,
578 581 b'type': b'bytes'
579 582 }
580 583 },
581 584 b'permissions': [
582 585 b'pull'
583 586 ]
584 587 },
585 588 b'manifestdata': {
586 589 b'args': {
587 590 b'fields': {
588 591 b'default': set([]),
589 592 b'required': False,
590 593 b'type': b'set',
591 594 b'validvalues': set([
592 595 b'parents',
593 596 b'revision'
594 597 ])
595 598 },
596 599 b'haveparents': {
597 600 b'default': False,
598 601 b'required': False,
599 602 b'type': b'bool'
600 603 },
601 604 b'nodes': {
602 605 b'required': True,
603 606 b'type': b'list'
604 607 },
605 608 b'tree': {
606 609 b'required': True,
607 610 b'type': b'bytes'
608 611 }
609 612 },
610 613 b'permissions': [
611 614 b'pull'
612 615 ],
613 616 b'recommendedbatchsize': 100000
614 617 },
615 618 b'pushkey': {
616 619 b'args': {
617 620 b'key': {
618 621 b'required': True,
619 622 b'type': b'bytes'
620 623 },
621 624 b'namespace': {
622 625 b'required': True,
623 626 b'type': b'bytes'
624 627 },
625 628 b'new': {
626 629 b'required': True,
627 630 b'type': b'bytes'
628 631 },
629 632 b'old': {
630 633 b'required': True,
631 634 b'type': b'bytes'
632 635 }
633 636 },
634 637 b'permissions': [
635 638 b'push'
636 639 ]
637 640 },
638 641 b'rawstorefiledata': {
639 642 b'args': {
640 643 b'files': {
641 644 b'required': True,
642 645 b'type': b'list'
643 646 },
644 647 b'pathfilter': {
645 648 b'default': None,
646 649 b'required': False,
647 650 b'type': b'list'
648 651 }
649 652 },
650 653 b'permissions': [
651 654 b'pull'
652 655 ]
653 656 }
654 657 },
655 658 b'framingmediatypes': [
656 659 b'application/mercurial-exp-framing-0006'
657 660 ],
658 661 b'pathfilterprefixes': set([
659 662 b'path:',
660 663 b'rootfilesin:'
661 664 ]),
662 665 b'rawrepoformats': [
663 666 b'generaldelta',
664 667 b'revlogv1'
665 668 ],
666 669 b'redirect': {
667 670 b'hashes': [
668 671 b'sha256',
669 672 b'sha1'
670 673 ],
671 674 b'targets': [
672 675 {
673 676 b'name': b'target-a',
674 677 b'protocol': b'http',
675 678 b'uris': [
676 679 b'http://example.com/'
677 680 ]
678 681 },
679 682 {
680 683 b'name': b'target-b',
681 684 b'protocol': b'unknown',
682 685 b'uris': [
683 686 b'unknown://example.com/'
684 687 ]
685 688 }
686 689 ]
687 690 }
688 691 }
689 692 ]
690 693 (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
691 694
692 695 Missing SNI support filters targets that require SNI
693 696
694 697 $ cat > nosni.py << EOF
695 698 > from mercurial import sslutil
696 699 > sslutil.hassni = False
697 700 > EOF
698 701 $ cat >> $HGRCPATH << EOF
699 702 > [extensions]
700 703 > nosni=`pwd`/nosni.py
701 704 > EOF
702 705
703 706 $ cat > redirects.py << EOF
704 707 > [
705 708 > {
706 709 > b'name': b'target-bad-tls',
707 710 > b'protocol': b'https',
708 711 > b'uris': [b'https://example.com/'],
709 712 > b'snirequired': True,
710 713 > },
711 714 > ]
712 715 > EOF
713 716
714 717 $ sendhttpv2peerhandshake << EOF
715 718 > command capabilities
716 719 > EOF
717 720 creating http peer for wire protocol version 2
718 721 s> GET /?cmd=capabilities HTTP/1.1\r\n
719 722 s> Accept-Encoding: identity\r\n
720 723 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
721 724 s> x-hgproto-1: cbor\r\n
722 725 s> x-hgupgrade-1: exp-http-v2-0003\r\n
723 726 s> accept: application/mercurial-0.1\r\n
724 727 s> host: $LOCALIP:$HGPORT\r\n (glob)
725 728 s> user-agent: Mercurial debugwireproto\r\n
726 729 s> \r\n
727 730 s> makefile('rb', None)
728 731 s> HTTP/1.1 200 OK\r\n
729 732 s> Server: testing stub value\r\n
730 733 s> Date: $HTTP_DATE$\r\n
731 734 s> Content-Type: application/mercurial-cbor\r\n
732 735 s> Content-Length: 2246\r\n
733 736 s> \r\n
734 737 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
735 738 (redirect target target-bad-tls requires SNI, which is unsupported)
736 739 sending capabilities command
737 740 s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
738 741 s> Accept-Encoding: identity\r\n
739 742 s> accept: application/mercurial-exp-framing-0006\r\n
740 743 s> content-type: application/mercurial-exp-framing-0006\r\n
741 744 s> content-length: 102\r\n
742 745 s> host: $LOCALIP:$HGPORT\r\n (glob)
743 746 s> user-agent: Mercurial debugwireproto\r\n
744 747 s> \r\n
745 748 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
746 749 s> makefile('rb', None)
747 750 s> HTTP/1.1 200 OK\r\n
748 751 s> Server: testing stub value\r\n
749 752 s> Date: $HTTP_DATE$\r\n
750 753 s> Content-Type: application/mercurial-exp-framing-0006\r\n
751 754 s> Transfer-Encoding: chunked\r\n
752 755 s> \r\n
753 756 s> 11\r\n
754 757 s> \t\x00\x00\x01\x00\x02\x01\x92
755 758 s> Hidentity
756 759 s> \r\n
757 760 received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
758 761 s> 13\r\n
759 762 s> \x0b\x00\x00\x01\x00\x02\x041
760 763 s> \xa1FstatusBok
761 764 s> \r\n
762 765 received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
763 766 s> 6c4\r\n
764 767 s> \xbc\x06\x00\x01\x00\x02\x041
765 768 s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKsnirequired\xf5Duris\x81Thttps://example.com/
766 769 s> \r\n
767 770 received frame(size=1724; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
768 771 s> 8\r\n
769 772 s> \x00\x00\x00\x01\x00\x02\x002
770 773 s> \r\n
771 774 s> 0\r\n
772 775 s> \r\n
773 776 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
774 777 response: gen[
775 778 {
776 779 b'commands': {
777 780 b'branchmap': {
778 781 b'args': {},
779 782 b'permissions': [
780 783 b'pull'
781 784 ]
782 785 },
783 786 b'capabilities': {
784 787 b'args': {},
785 788 b'permissions': [
786 789 b'pull'
787 790 ]
788 791 },
789 792 b'changesetdata': {
790 793 b'args': {
791 794 b'fields': {
792 795 b'default': set([]),
793 796 b'required': False,
794 797 b'type': b'set',
795 798 b'validvalues': set([
796 799 b'bookmarks',
797 800 b'parents',
798 801 b'phase',
799 802 b'revision'
800 803 ])
801 804 },
802 805 b'revisions': {
803 806 b'required': True,
804 807 b'type': b'list'
805 808 }
806 809 },
807 810 b'permissions': [
808 811 b'pull'
809 812 ]
810 813 },
811 814 b'filedata': {
812 815 b'args': {
813 816 b'fields': {
814 817 b'default': set([]),
815 818 b'required': False,
816 819 b'type': b'set',
817 820 b'validvalues': set([
818 821 b'linknode',
819 822 b'parents',
820 823 b'revision'
821 824 ])
822 825 },
823 826 b'haveparents': {
824 827 b'default': False,
825 828 b'required': False,
826 829 b'type': b'bool'
827 830 },
828 831 b'nodes': {
829 832 b'required': True,
830 833 b'type': b'list'
831 834 },
832 835 b'path': {
833 836 b'required': True,
834 837 b'type': b'bytes'
835 838 }
836 839 },
837 840 b'permissions': [
838 841 b'pull'
839 842 ]
840 843 },
841 844 b'filesdata': {
842 845 b'args': {
843 846 b'fields': {
844 847 b'default': set([]),
845 848 b'required': False,
846 849 b'type': b'set',
847 850 b'validvalues': set([
848 851 b'firstchangeset',
849 852 b'linknode',
850 853 b'parents',
851 854 b'revision'
852 855 ])
853 856 },
854 857 b'haveparents': {
855 858 b'default': False,
856 859 b'required': False,
857 860 b'type': b'bool'
858 861 },
859 862 b'pathfilter': {
860 863 b'default': None,
861 864 b'required': False,
862 865 b'type': b'dict'
863 866 },
864 867 b'revisions': {
865 868 b'required': True,
866 869 b'type': b'list'
867 870 }
868 871 },
869 872 b'permissions': [
870 873 b'pull'
871 874 ],
872 875 b'recommendedbatchsize': 50000
873 876 },
874 877 b'heads': {
875 878 b'args': {
876 879 b'publiconly': {
877 880 b'default': False,
878 881 b'required': False,
879 882 b'type': b'bool'
880 883 }
881 884 },
882 885 b'permissions': [
883 886 b'pull'
884 887 ]
885 888 },
886 889 b'known': {
887 890 b'args': {
888 891 b'nodes': {
889 892 b'default': [],
890 893 b'required': False,
891 894 b'type': b'list'
892 895 }
893 896 },
894 897 b'permissions': [
895 898 b'pull'
896 899 ]
897 900 },
898 901 b'listkeys': {
899 902 b'args': {
900 903 b'namespace': {
901 904 b'required': True,
902 905 b'type': b'bytes'
903 906 }
904 907 },
905 908 b'permissions': [
906 909 b'pull'
907 910 ]
908 911 },
909 912 b'lookup': {
910 913 b'args': {
911 914 b'key': {
912 915 b'required': True,
913 916 b'type': b'bytes'
914 917 }
915 918 },
916 919 b'permissions': [
917 920 b'pull'
918 921 ]
919 922 },
920 923 b'manifestdata': {
921 924 b'args': {
922 925 b'fields': {
923 926 b'default': set([]),
924 927 b'required': False,
925 928 b'type': b'set',
926 929 b'validvalues': set([
927 930 b'parents',
928 931 b'revision'
929 932 ])
930 933 },
931 934 b'haveparents': {
932 935 b'default': False,
933 936 b'required': False,
934 937 b'type': b'bool'
935 938 },
936 939 b'nodes': {
937 940 b'required': True,
938 941 b'type': b'list'
939 942 },
940 943 b'tree': {
941 944 b'required': True,
942 945 b'type': b'bytes'
943 946 }
944 947 },
945 948 b'permissions': [
946 949 b'pull'
947 950 ],
948 951 b'recommendedbatchsize': 100000
949 952 },
950 953 b'pushkey': {
951 954 b'args': {
952 955 b'key': {
953 956 b'required': True,
954 957 b'type': b'bytes'
955 958 },
956 959 b'namespace': {
957 960 b'required': True,
958 961 b'type': b'bytes'
959 962 },
960 963 b'new': {
961 964 b'required': True,
962 965 b'type': b'bytes'
963 966 },
964 967 b'old': {
965 968 b'required': True,
966 969 b'type': b'bytes'
967 970 }
968 971 },
969 972 b'permissions': [
970 973 b'push'
971 974 ]
972 975 },
973 976 b'rawstorefiledata': {
974 977 b'args': {
975 978 b'files': {
976 979 b'required': True,
977 980 b'type': b'list'
978 981 },
979 982 b'pathfilter': {
980 983 b'default': None,
981 984 b'required': False,
982 985 b'type': b'list'
983 986 }
984 987 },
985 988 b'permissions': [
986 989 b'pull'
987 990 ]
988 991 }
989 992 },
990 993 b'framingmediatypes': [
991 994 b'application/mercurial-exp-framing-0006'
992 995 ],
993 996 b'pathfilterprefixes': set([
994 997 b'path:',
995 998 b'rootfilesin:'
996 999 ]),
997 1000 b'rawrepoformats': [
998 1001 b'generaldelta',
999 1002 b'revlogv1'
1000 1003 ],
1001 1004 b'redirect': {
1002 1005 b'hashes': [
1003 1006 b'sha256',
1004 1007 b'sha1'
1005 1008 ],
1006 1009 b'targets': [
1007 1010 {
1008 1011 b'name': b'target-bad-tls',
1009 1012 b'protocol': b'https',
1010 1013 b'snirequired': True,
1011 1014 b'uris': [
1012 1015 b'https://example.com/'
1013 1016 ]
1014 1017 }
1015 1018 ]
1016 1019 }
1017 1020 }
1018 1021 ]
1019 1022 (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
1020 1023
1021 1024 $ cat >> $HGRCPATH << EOF
1022 1025 > [extensions]
1023 1026 > nosni=!
1024 1027 > EOF
1025 1028
1026 1029 Unknown tls value is filtered from compatible targets
1027 1030
1028 1031 $ cat > redirects.py << EOF
1029 1032 > [
1030 1033 > {
1031 1034 > b'name': b'target-bad-tls',
1032 1035 > b'protocol': b'https',
1033 1036 > b'uris': [b'https://example.com/'],
1034 1037 > b'tlsversions': [b'42', b'39'],
1035 1038 > },
1036 1039 > ]
1037 1040 > EOF
1038 1041
1039 1042 $ sendhttpv2peerhandshake << EOF
1040 1043 > command capabilities
1041 1044 > EOF
1042 1045 creating http peer for wire protocol version 2
1043 1046 s> GET /?cmd=capabilities HTTP/1.1\r\n
1044 1047 s> Accept-Encoding: identity\r\n
1045 1048 s> vary: X-HgProto-1,X-HgUpgrade-1\r\n
1046 1049 s> x-hgproto-1: cbor\r\n
1047 1050 s> x-hgupgrade-1: exp-http-v2-0003\r\n
1048 1051 s> accept: application/mercurial-0.1\r\n
1049 1052 s> host: $LOCALIP:$HGPORT\r\n (glob)
1050 1053 s> user-agent: Mercurial debugwireproto\r\n
1051 1054 s> \r\n
1052 1055 s> makefile('rb', None)
1053 1056 s> HTTP/1.1 200 OK\r\n
1054 1057 s> Server: testing stub value\r\n
1055 1058 s> Date: $HTTP_DATE$\r\n
1056 1059 s> Content-Type: application/mercurial-cbor\r\n
1057 1060 s> Content-Length: 2252\r\n
1058 1061 s> \r\n
1059 1062 s> \xa3GapibaseDapi/Dapis\xa1Pexp-http-v2-0003\xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/Nv1capabilitiesY\x01\xd3batch branchmap $USUAL_BUNDLE2_CAPS$ changegroupsubset compression=$BUNDLE2_COMPRESSIONS$ getbundle httpheader=1024 httpmediatype=0.1rx,0.1tx,0.2tx known lookup pushkey streamreqs=generaldelta,revlogv1 unbundle=HG10GZ,HG10BZ,HG10UN unbundlehash
1060 1063 (remote redirect target target-bad-tls requires unsupported TLS versions: 39, 42)
1061 1064 sending capabilities command
1062 1065 s> POST /api/exp-http-v2-0003/ro/capabilities HTTP/1.1\r\n
1063 1066 s> Accept-Encoding: identity\r\n
1064 1067 s> accept: application/mercurial-exp-framing-0006\r\n
1065 1068 s> content-type: application/mercurial-exp-framing-0006\r\n
1066 1069 s> content-length: 102\r\n
1067 1070 s> host: $LOCALIP:$HGPORT\r\n (glob)
1068 1071 s> user-agent: Mercurial debugwireproto\r\n
1069 1072 s> \r\n
1070 1073 s> \x1c\x00\x00\x01\x00\x01\x01\x82\xa1Pcontentencodings\x81Hidentity:\x00\x00\x01\x00\x01\x00\x11\xa2DnameLcapabilitiesHredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x80
1071 1074 s> makefile('rb', None)
1072 1075 s> HTTP/1.1 200 OK\r\n
1073 1076 s> Server: testing stub value\r\n
1074 1077 s> Date: $HTTP_DATE$\r\n
1075 1078 s> Content-Type: application/mercurial-exp-framing-0006\r\n
1076 1079 s> Transfer-Encoding: chunked\r\n
1077 1080 s> \r\n
1078 1081 s> 11\r\n
1079 1082 s> \t\x00\x00\x01\x00\x02\x01\x92
1080 1083 s> Hidentity
1081 1084 s> \r\n
1082 1085 received frame(size=9; request=1; stream=2; streamflags=stream-begin; type=stream-settings; flags=eos)
1083 1086 s> 13\r\n
1084 1087 s> \x0b\x00\x00\x01\x00\x02\x041
1085 1088 s> \xa1FstatusBok
1086 1089 s> \r\n
1087 1090 received frame(size=11; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
1088 1091 s> 6ca\r\n
1089 1092 s> \xc2\x06\x00\x01\x00\x02\x041
1090 1093 s> \xa5Hcommands\xacIbranchmap\xa2Dargs\xa0Kpermissions\x81DpullLcapabilities\xa2Dargs\xa0Kpermissions\x81DpullMchangesetdata\xa2Dargs\xa2Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84IbookmarksGparentsEphaseHrevisionIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullHfiledata\xa2Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x83HlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDpath\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullIfilesdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x84NfirstchangesetHlinknodeGparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDdictIrevisions\xa2Hrequired\xf5DtypeDlistKpermissions\x81DpullTrecommendedbatchsize\x19\xc3PEheads\xa2Dargs\xa1Jpubliconly\xa3Gdefault\xf4Hrequired\xf4DtypeDboolKpermissions\x81DpullEknown\xa2Dargs\xa1Enodes\xa3Gdefault\x80Hrequired\xf4DtypeDlistKpermissions\x81DpullHlistkeys\xa2Dargs\xa1Inamespace\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullFlookup\xa2Dargs\xa1Ckey\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullLmanifestdata\xa3Dargs\xa4Ffields\xa4Gdefault\xd9\x01\x02\x80Hrequired\xf4DtypeCsetKvalidvalues\xd9\x01\x02\x82GparentsHrevisionKhaveparents\xa3Gdefault\xf4Hrequired\xf4DtypeDboolEnodes\xa2Hrequired\xf5DtypeDlistDtree\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpullTrecommendedbatchsize\x1a\x00\x01\x86\xa0Gpushkey\xa2Dargs\xa4Ckey\xa2Hrequired\xf5DtypeEbytesInamespace\xa2Hrequired\xf5DtypeEbytesCnew\xa2Hrequired\xf5DtypeEbytesCold\xa2Hrequired\xf5DtypeEbytesKpermissions\x81DpushPrawstorefiledata\xa2Dargs\xa2Efiles\xa2Hrequired\xf5DtypeDlistJpathfilter\xa3Gdefault\xf6Hrequired\xf4DtypeDlistKpermissions\x81DpullQframingmediatypes\x81X&application/mercurial-exp-framing-0006Rpathfilterprefixes\xd9\x01\x02\x82Epath:Lrootfilesin:Nrawrepoformats\x82LgeneraldeltaHrevlogv1Hredirect\xa2Fhashes\x82Fsha256Dsha1Gtargets\x81\xa4DnameNtarget-bad-tlsHprotocolEhttpsKtlsversions\x82B42B39Duris\x81Thttps://example.com/
1091 1094 s> \r\n
1092 1095 received frame(size=1730; request=1; stream=2; streamflags=encoded; type=command-response; flags=continuation)
1093 1096 s> 8\r\n
1094 1097 s> \x00\x00\x00\x01\x00\x02\x002
1095 1098 s> \r\n
1096 1099 s> 0\r\n
1097 1100 s> \r\n
1098 1101 received frame(size=0; request=1; stream=2; streamflags=; type=command-response; flags=eos)
1099 1102 response: gen[
1100 1103 {
1101 1104 b'commands': {
1102 1105 b'branchmap': {
1103 1106 b'args': {},
1104 1107 b'permissions': [
1105 1108 b'pull'
1106 1109 ]
1107 1110 },
1108 1111 b'capabilities': {
1109 1112 b'args': {},
1110 1113 b'permissions': [
1111 1114 b'pull'
1112 1115 ]
1113 1116 },
1114 1117 b'changesetdata': {
1115 1118 b'args': {
1116 1119 b'fields': {
1117 1120 b'default': set([]),
1118 1121 b'required': False,
1119 1122 b'type': b'set',
1120 1123 b'validvalues': set([
1121 1124 b'bookmarks',
1122 1125 b'parents',
1123 1126 b'phase',
1124 1127 b'revision'
1125 1128 ])
1126 1129 },
1127 1130 b'revisions': {
1128 1131 b'required': True,
1129 1132 b'type': b'list'
1130 1133 }
1131 1134 },
1132 1135 b'permissions': [
1133 1136 b'pull'
1134 1137 ]
1135 1138 },
1136 1139 b'filedata': {
1137 1140 b'args': {
1138 1141 b'fields': {
1139 1142 b'default': set([]),
1140 1143 b'required': False,
1141 1144 b'type': b'set',
1142 1145 b'validvalues': set([
1143 1146 b'linknode',
1144 1147 b'parents',
1145 1148 b'revision'
1146 1149 ])
1147 1150 },
1148 1151 b'haveparents': {
1149 1152 b'default': False,
1150 1153 b'required': False,
1151 1154 b'type': b'bool'
1152 1155 },
1153 1156 b'nodes': {
1154 1157 b'required': True,
1155 1158 b'type': b'list'
1156 1159 },
1157 1160 b'path': {
1158 1161 b'required': True,
1159 1162 b'type': b'bytes'
1160 1163 }
1161 1164 },
1162 1165 b'permissions': [
1163 1166 b'pull'
1164 1167 ]
1165 1168 },
1166 1169 b'filesdata': {
1167 1170 b'args': {
1168 1171 b'fields': {
1169 1172 b'default': set([]),
1170 1173 b'required': False,
1171 1174 b'type': b'set',
1172 1175 b'validvalues': set([
1173 1176 b'firstchangeset',
1174 1177 b'linknode',
1175 1178 b'parents',
1176 1179 b'revision'
1177 1180 ])
1178 1181 },
1179 1182 b'haveparents': {
1180 1183 b'default': False,
1181 1184 b'required': False,
1182 1185 b'type': b'bool'
1183 1186 },
1184 1187 b'pathfilter': {
1185 1188 b'default': None,
1186 1189 b'required': False,
1187 1190 b'type': b'dict'
1188 1191 },
1189 1192 b'revisions': {
1190 1193 b'required': True,
1191 1194 b'type': b'list'
1192 1195 }
1193 1196 },
1194 1197 b'permissions': [
1195 1198 b'pull'
1196 1199 ],
1197 1200 b'recommendedbatchsize': 50000
1198 1201 },
1199 1202 b'heads': {
1200 1203 b'args': {
1201 1204 b'publiconly': {
1202 1205 b'default': False,
1203 1206 b'required': False,
1204 1207 b'type': b'bool'
1205 1208 }
1206 1209 },
1207 1210 b'permissions': [
1208 1211 b'pull'
1209 1212 ]
1210 1213 },
1211 1214 b'known': {
1212 1215 b'args': {
1213 1216 b'nodes': {
1214 1217 b'default': [],
1215 1218 b'required': False,
1216 1219 b'type': b'list'
1217 1220 }
1218 1221 },
1219 1222 b'permissions': [
1220 1223 b'pull'
1221 1224 ]
1222 1225 },
1223 1226 b'listkeys': {
1224 1227 b'args': {
1225 1228 b'namespace': {
1226 1229 b'required': True,
1227 1230 b'type': b'bytes'
1228 1231 }
1229 1232 },
1230 1233 b'permissions': [
1231 1234 b'pull'
1232 1235 ]
1233 1236 },
1234 1237 b'lookup': {
1235 1238 b'args': {
1236 1239 b'key': {
1237 1240 b'required': True,
1238 1241 b'type': b'bytes'
1239 1242 }
1240 1243 },
1241 1244 b'permissions': [
1242 1245 b'pull'
1243 1246 ]
1244 1247 },
1245 1248 b'manifestdata': {
1246 1249 b'args': {
1247 1250 b'fields': {
1248 1251 b'default': set([]),
1249 1252 b'required': False,
1250 1253 b'type': b'set',
1251 1254 b'validvalues': set([
1252 1255 b'parents',
1253 1256 b'revision'
1254 1257 ])
1255 1258 },
1256 1259 b'haveparents': {
1257 1260 b'default': False,
1258 1261 b'required': False,
1259 1262 b'type': b'bool'
1260 1263 },
1261 1264 b'nodes': {
1262 1265 b'required': True,
1263 1266 b'type': b'list'
1264 1267 },
1265 1268 b'tree': {
1266 1269 b'required': True,
1267 1270 b'type': b'bytes'
1268 1271 }
1269 1272 },
1270 1273 b'permissions': [
1271 1274 b'pull'
1272 1275 ],
1273 1276 b'recommendedbatchsize': 100000
1274 1277 },
1275 1278 b'pushkey': {
1276 1279 b'args': {
1277 1280 b'key': {
1278 1281 b'required': True,
1279 1282 b'type': b'bytes'
1280 1283 },
1281 1284 b'namespace': {
1282 1285 b'required': True,
1283 1286 b'type': b'bytes'
1284 1287 },
1285 1288 b'new': {
1286 1289 b'required': True,
1287 1290 b'type': b'bytes'
1288 1291 },
1289 1292 b'old': {
1290 1293 b'required': True,
1291 1294 b'type': b'bytes'
1292 1295 }
1293 1296 },
1294 1297 b'permissions': [
1295 1298 b'push'
1296 1299 ]
1297 1300 },
1298 1301 b'rawstorefiledata': {
1299 1302 b'args': {
1300 1303 b'files': {
1301 1304 b'required': True,
1302 1305 b'type': b'list'
1303 1306 },
1304 1307 b'pathfilter': {
1305 1308 b'default': None,
1306 1309 b'required': False,
1307 1310 b'type': b'list'
1308 1311 }
1309 1312 },
1310 1313 b'permissions': [
1311 1314 b'pull'
1312 1315 ]
1313 1316 }
1314 1317 },
1315 1318 b'framingmediatypes': [
1316 1319 b'application/mercurial-exp-framing-0006'
1317 1320 ],
1318 1321 b'pathfilterprefixes': set([
1319 1322 b'path:',
1320 1323 b'rootfilesin:'
1321 1324 ]),
1322 1325 b'rawrepoformats': [
1323 1326 b'generaldelta',
1324 1327 b'revlogv1'
1325 1328 ],
1326 1329 b'redirect': {
1327 1330 b'hashes': [
1328 1331 b'sha256',
1329 1332 b'sha1'
1330 1333 ],
1331 1334 b'targets': [
1332 1335 {
1333 1336 b'name': b'target-bad-tls',
1334 1337 b'protocol': b'https',
1335 1338 b'tlsversions': [
1336 1339 b'42',
1337 1340 b'39'
1338 1341 ],
1339 1342 b'uris': [
1340 1343 b'https://example.com/'
1341 1344 ]
1342 1345 }
1343 1346 ]
1344 1347 }
1345 1348 }
1346 1349 ]
1347 1350 (sent 2 HTTP requests and * bytes; received * bytes in responses) (glob)
1348 1351
1349 1352 Set up the server to issue content redirects to its built-in API server.
1350 1353
1351 1354 $ cat > redirects.py << EOF
1352 1355 > [
1353 1356 > {
1354 1357 > b'name': b'local',
1355 1358 > b'protocol': b'http',
1356 1359 > b'uris': [b'http://example.com/'],
1357 1360 > },
1358 1361 > ]
1359 1362 > EOF
1360 1363
1361 1364 Request to eventual cache URL should return 404 (validating the cache server works)
1362 1365
1363 1366 $ sendhttpraw << EOF
1364 1367 > httprequest GET api/simplecache/missingkey
1365 1368 > user-agent: test
1366 1369 > EOF
1367 1370 using raw connection to peer
1368 1371 s> GET /api/simplecache/missingkey HTTP/1.1\r\n
1369 1372 s> Accept-Encoding: identity\r\n
1370 1373 s> user-agent: test\r\n
1371 1374 s> host: $LOCALIP:$HGPORT\r\n (glob)
1372 1375 s> \r\n
1373 1376 s> makefile('rb', None)
1374 1377 s> HTTP/1.1 404 Not Found\r\n
1375 1378 s> Server: testing stub value\r\n
1376 1379 s> Date: $HTTP_DATE$\r\n
1377 1380 s> Content-Type: text/plain\r\n
1378 1381 s> Content-Length: 22\r\n
1379 1382 s> \r\n
1380 1383 s> key not found in cache
1381 1384
1382 1385 Send a cacheable request
1383 1386
1384 1387 $ sendhttpv2peer << EOF
1385 1388 > command manifestdata
1386 1389 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
1387 1390 > tree eval:b''
1388 1391 > fields eval:[b'parents']
1389 1392 > EOF
1390 1393 creating http peer for wire protocol version 2
1391 1394 sending manifestdata command
1392 1395 response: gen[
1393 1396 {
1394 1397 b'totalitems': 1
1395 1398 },
1396 1399 {
1397 1400 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
1398 1401 b'parents': [
1399 1402 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
1400 1403 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
1401 1404 ]
1402 1405 }
1403 1406 ]
1404 1407
1405 1408 Cached entry should be available on server
1406 1409
1407 1410 $ sendhttpraw << EOF
1408 1411 > httprequest GET api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c
1409 1412 > user-agent: test
1410 1413 > EOF
1411 1414 using raw connection to peer
1412 1415 s> GET /api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c HTTP/1.1\r\n
1413 1416 s> Accept-Encoding: identity\r\n
1414 1417 s> user-agent: test\r\n
1415 1418 s> host: $LOCALIP:$HGPORT\r\n (glob)
1416 1419 s> \r\n
1417 1420 s> makefile('rb', None)
1418 1421 s> HTTP/1.1 200 OK\r\n
1419 1422 s> Server: testing stub value\r\n
1420 1423 s> Date: $HTTP_DATE$\r\n
1421 1424 s> Content-Type: application/mercurial-cbor\r\n
1422 1425 s> Content-Length: 91\r\n
1423 1426 s> \r\n
1424 1427 s> \xa1Jtotalitems\x01\xa2DnodeT\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&AGparents\x82T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00T\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
1425 1428 cbor> [
1426 1429 {
1427 1430 b'totalitems': 1
1428 1431 },
1429 1432 {
1430 1433 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
1431 1434 b'parents': [
1432 1435 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
1433 1436 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
1434 1437 ]
1435 1438 }
1436 1439 ]
1437 1440
1438 1441 2nd request should result in content redirect response
1439 1442
1440 1443 $ sendhttpv2peer << EOF
1441 1444 > command manifestdata
1442 1445 > nodes eval:[b'\x99\x2f\x47\x79\x02\x9a\x3d\xf8\xd0\x66\x6d\x00\xbb\x92\x4f\x69\x63\x4e\x26\x41']
1443 1446 > tree eval:b''
1444 1447 > fields eval:[b'parents']
1445 1448 > EOF
1446 1449 creating http peer for wire protocol version 2
1447 1450 sending manifestdata command
1448 1451 response: gen[
1449 1452 {
1450 1453 b'totalitems': 1
1451 1454 },
1452 1455 {
1453 1456 b'node': b'\x99/Gy\x02\x9a=\xf8\xd0fm\x00\xbb\x92OicN&A',
1454 1457 b'parents': [
1455 1458 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00',
1456 1459 b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
1457 1460 ]
1458 1461 }
1459 1462 ]
1460 1463
1461 1464 $ cat error.log
1462 1465 $ killdaemons.py
1463 1466
1464 1467 $ cat .hg/blackbox.log
1465 1468 *> cacher constructed for manifestdata (glob)
1466 1469 *> cache miss for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
1467 1470 *> storing cache entry for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
1468 1471 *> cacher constructed for manifestdata (glob)
1469 1472 *> cache hit for 47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
1470 1473 *> sending content redirect for 47abb8efa5f01b8964d74917793ad2464db0fa2c to http://*:$HGPORT/api/simplecache/47abb8efa5f01b8964d74917793ad2464db0fa2c (glob)
General Comments 0
You need to be logged in to leave comments. Login now