##// END OF EJS Templates
stream-clone: filter possible missing requirements using all supported one...
marmoute -
r49522:6d2ddea0 stable
parent child Browse files
Show More
@@ -1,920 +1,920 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .pycompat import open
17 17 from .interfaces import repository
18 18 from . import (
19 19 bookmarks,
20 20 cacheutil,
21 21 error,
22 22 narrowspec,
23 23 phases,
24 24 pycompat,
25 25 requirements as requirementsmod,
26 26 scmutil,
27 27 store,
28 28 util,
29 29 )
30 30 from .utils import (
31 31 stringutil,
32 32 )
33 33
34 34
35 35 def canperformstreamclone(pullop, bundle2=False):
36 36 """Whether it is possible to perform a streaming clone as part of pull.
37 37
38 38 ``bundle2`` will cause the function to consider stream clone through
39 39 bundle2 and only through bundle2.
40 40
41 41 Returns a tuple of (supported, requirements). ``supported`` is True if
42 42 streaming clone is supported and False otherwise. ``requirements`` is
43 43 a set of repo requirements from the remote, or ``None`` if stream clone
44 44 isn't supported.
45 45 """
46 46 repo = pullop.repo
47 47 remote = pullop.remote
48 48
49 49 bundle2supported = False
50 50 if pullop.canusebundle2:
51 51 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
52 52 bundle2supported = True
53 53 # else
54 54 # Server doesn't support bundle2 stream clone or doesn't support
55 55 # the versions we support. Fall back and possibly allow legacy.
56 56
57 57 # Ensures legacy code path uses available bundle2.
58 58 if bundle2supported and not bundle2:
59 59 return False, None
60 60 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
61 61 elif bundle2 and not bundle2supported:
62 62 return False, None
63 63
64 64 # Streaming clone only works on empty repositories.
65 65 if len(repo):
66 66 return False, None
67 67
68 68 # Streaming clone only works if all data is being requested.
69 69 if pullop.heads:
70 70 return False, None
71 71
72 72 streamrequested = pullop.streamclonerequested
73 73
74 74 # If we don't have a preference, let the server decide for us. This
75 75 # likely only comes into play in LANs.
76 76 if streamrequested is None:
77 77 # The server can advertise whether to prefer streaming clone.
78 78 streamrequested = remote.capable(b'stream-preferred')
79 79
80 80 if not streamrequested:
81 81 return False, None
82 82
83 83 # In order for stream clone to work, the client has to support all the
84 84 # requirements advertised by the server.
85 85 #
86 86 # The server advertises its requirements via the "stream" and "streamreqs"
87 87 # capability. "stream" (a value-less capability) is advertised if and only
88 88 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
89 89 # is advertised and contains a comma-delimited list of requirements.
90 90 requirements = set()
91 91 if remote.capable(b'stream'):
92 92 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
93 93 else:
94 94 streamreqs = remote.capable(b'streamreqs')
95 95 # This is weird and shouldn't happen with modern servers.
96 96 if not streamreqs:
97 97 pullop.repo.ui.warn(
98 98 _(
99 99 b'warning: stream clone requested but server has them '
100 100 b'disabled\n'
101 101 )
102 102 )
103 103 return False, None
104 104
105 105 streamreqs = set(streamreqs.split(b','))
106 106 # Server requires something we don't support. Bail.
107 missingreqs = streamreqs - repo.supportedformats
107 missingreqs = streamreqs - repo.supported
108 108 if missingreqs:
109 109 pullop.repo.ui.warn(
110 110 _(
111 111 b'warning: stream clone requested but client is missing '
112 112 b'requirements: %s\n'
113 113 )
114 114 % b', '.join(sorted(missingreqs))
115 115 )
116 116 pullop.repo.ui.warn(
117 117 _(
118 118 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
119 119 b'for more information)\n'
120 120 )
121 121 )
122 122 return False, None
123 123 requirements = streamreqs
124 124
125 125 return True, requirements
126 126
127 127
128 128 def maybeperformlegacystreamclone(pullop):
129 129 """Possibly perform a legacy stream clone operation.
130 130
131 131 Legacy stream clones are performed as part of pull but before all other
132 132 operations.
133 133
134 134 A legacy stream clone will not be performed if a bundle2 stream clone is
135 135 supported.
136 136 """
137 137 from . import localrepo
138 138
139 139 supported, requirements = canperformstreamclone(pullop)
140 140
141 141 if not supported:
142 142 return
143 143
144 144 repo = pullop.repo
145 145 remote = pullop.remote
146 146
147 147 # Save remote branchmap. We will use it later to speed up branchcache
148 148 # creation.
149 149 rbranchmap = None
150 150 if remote.capable(b'branchmap'):
151 151 with remote.commandexecutor() as e:
152 152 rbranchmap = e.callcommand(b'branchmap', {}).result()
153 153
154 154 repo.ui.status(_(b'streaming all changes\n'))
155 155
156 156 with remote.commandexecutor() as e:
157 157 fp = e.callcommand(b'stream_out', {}).result()
158 158
159 159 # TODO strictly speaking, this code should all be inside the context
160 160 # manager because the context manager is supposed to ensure all wire state
161 161 # is flushed when exiting. But the legacy peers don't do this, so it
162 162 # doesn't matter.
163 163 l = fp.readline()
164 164 try:
165 165 resp = int(l)
166 166 except ValueError:
167 167 raise error.ResponseError(
168 168 _(b'unexpected response from remote server:'), l
169 169 )
170 170 if resp == 1:
171 171 raise error.Abort(_(b'operation forbidden by server'))
172 172 elif resp == 2:
173 173 raise error.Abort(_(b'locking the remote repository failed'))
174 174 elif resp != 0:
175 175 raise error.Abort(_(b'the server sent an unknown error code'))
176 176
177 177 l = fp.readline()
178 178 try:
179 179 filecount, bytecount = map(int, l.split(b' ', 1))
180 180 except (ValueError, TypeError):
181 181 raise error.ResponseError(
182 182 _(b'unexpected response from remote server:'), l
183 183 )
184 184
185 185 with repo.lock():
186 186 consumev1(repo, fp, filecount, bytecount)
187 187
188 188 # new requirements = old non-format requirements +
189 189 # new format-related remote requirements
190 190 # requirements from the streamed-in repository
191 191 repo.requirements = requirements | (
192 192 repo.requirements - repo.supportedformats
193 193 )
194 194 repo.svfs.options = localrepo.resolvestorevfsoptions(
195 195 repo.ui, repo.requirements, repo.features
196 196 )
197 197 scmutil.writereporequirements(repo)
198 198
199 199 if rbranchmap:
200 200 repo._branchcaches.replace(repo, rbranchmap)
201 201
202 202 repo.invalidate()
203 203
204 204
205 205 def allowservergeneration(repo):
206 206 """Whether streaming clones are allowed from the server."""
207 207 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
208 208 return False
209 209
210 210 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
211 211 return False
212 212
213 213 # The way stream clone works makes it impossible to hide secret changesets.
214 214 # So don't allow this by default.
215 215 secret = phases.hassecret(repo)
216 216 if secret:
217 217 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
218 218
219 219 return True
220 220
221 221
222 222 # This is it's own function so extensions can override it.
223 223 def _walkstreamfiles(repo, matcher=None):
224 224 return repo.store.walk(matcher)
225 225
226 226
227 227 def generatev1(repo):
228 228 """Emit content for version 1 of a streaming clone.
229 229
230 230 This returns a 3-tuple of (file count, byte size, data iterator).
231 231
232 232 The data iterator consists of N entries for each file being transferred.
233 233 Each file entry starts as a line with the file name and integer size
234 234 delimited by a null byte.
235 235
236 236 The raw file data follows. Following the raw file data is the next file
237 237 entry, or EOF.
238 238
239 239 When used on the wire protocol, an additional line indicating protocol
240 240 success will be prepended to the stream. This function is not responsible
241 241 for adding it.
242 242
243 243 This function will obtain a repository lock to ensure a consistent view of
244 244 the store is captured. It therefore may raise LockError.
245 245 """
246 246 entries = []
247 247 total_bytes = 0
248 248 # Get consistent snapshot of repo, lock during scan.
249 249 with repo.lock():
250 250 repo.ui.debug(b'scanning\n')
251 251 for file_type, name, size in _walkstreamfiles(repo):
252 252 if size:
253 253 entries.append((name, size))
254 254 total_bytes += size
255 255 _test_sync_point_walk_1(repo)
256 256 _test_sync_point_walk_2(repo)
257 257
258 258 repo.ui.debug(
259 259 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
260 260 )
261 261
262 262 svfs = repo.svfs
263 263 debugflag = repo.ui.debugflag
264 264
265 265 def emitrevlogdata():
266 266 for name, size in entries:
267 267 if debugflag:
268 268 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
269 269 # partially encode name over the wire for backwards compat
270 270 yield b'%s\0%d\n' % (store.encodedir(name), size)
271 271 # auditing at this stage is both pointless (paths are already
272 272 # trusted by the local repo) and expensive
273 273 with svfs(name, b'rb', auditpath=False) as fp:
274 274 if size <= 65536:
275 275 yield fp.read(size)
276 276 else:
277 277 for chunk in util.filechunkiter(fp, limit=size):
278 278 yield chunk
279 279
280 280 return len(entries), total_bytes, emitrevlogdata()
281 281
282 282
283 283 def generatev1wireproto(repo):
284 284 """Emit content for version 1 of streaming clone suitable for the wire.
285 285
286 286 This is the data output from ``generatev1()`` with 2 header lines. The
287 287 first line indicates overall success. The 2nd contains the file count and
288 288 byte size of payload.
289 289
290 290 The success line contains "0" for success, "1" for stream generation not
291 291 allowed, and "2" for error locking the repository (possibly indicating
292 292 a permissions error for the server process).
293 293 """
294 294 if not allowservergeneration(repo):
295 295 yield b'1\n'
296 296 return
297 297
298 298 try:
299 299 filecount, bytecount, it = generatev1(repo)
300 300 except error.LockError:
301 301 yield b'2\n'
302 302 return
303 303
304 304 # Indicates successful response.
305 305 yield b'0\n'
306 306 yield b'%d %d\n' % (filecount, bytecount)
307 307 for chunk in it:
308 308 yield chunk
309 309
310 310
311 311 def generatebundlev1(repo, compression=b'UN'):
312 312 """Emit content for version 1 of a stream clone bundle.
313 313
314 314 The first 4 bytes of the output ("HGS1") denote this as stream clone
315 315 bundle version 1.
316 316
317 317 The next 2 bytes indicate the compression type. Only "UN" is currently
318 318 supported.
319 319
320 320 The next 16 bytes are two 64-bit big endian unsigned integers indicating
321 321 file count and byte count, respectively.
322 322
323 323 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
324 324 of the requirements string, including a trailing \0. The following N bytes
325 325 are the requirements string, which is ASCII containing a comma-delimited
326 326 list of repo requirements that are needed to support the data.
327 327
328 328 The remaining content is the output of ``generatev1()`` (which may be
329 329 compressed in the future).
330 330
331 331 Returns a tuple of (requirements, data generator).
332 332 """
333 333 if compression != b'UN':
334 334 raise ValueError(b'we do not support the compression argument yet')
335 335
336 336 requirements = repo.requirements & repo.supportedformats
337 337 requires = b','.join(sorted(requirements))
338 338
339 339 def gen():
340 340 yield b'HGS1'
341 341 yield compression
342 342
343 343 filecount, bytecount, it = generatev1(repo)
344 344 repo.ui.status(
345 345 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
346 346 )
347 347
348 348 yield struct.pack(b'>QQ', filecount, bytecount)
349 349 yield struct.pack(b'>H', len(requires) + 1)
350 350 yield requires + b'\0'
351 351
352 352 # This is where we'll add compression in the future.
353 353 assert compression == b'UN'
354 354
355 355 progress = repo.ui.makeprogress(
356 356 _(b'bundle'), total=bytecount, unit=_(b'bytes')
357 357 )
358 358 progress.update(0)
359 359
360 360 for chunk in it:
361 361 progress.increment(step=len(chunk))
362 362 yield chunk
363 363
364 364 progress.complete()
365 365
366 366 return requirements, gen()
367 367
368 368
369 369 def consumev1(repo, fp, filecount, bytecount):
370 370 """Apply the contents from version 1 of a streaming clone file handle.
371 371
372 372 This takes the output from "stream_out" and applies it to the specified
373 373 repository.
374 374
375 375 Like "stream_out," the status line added by the wire protocol is not
376 376 handled by this function.
377 377 """
378 378 with repo.lock():
379 379 repo.ui.status(
380 380 _(b'%d files to transfer, %s of data\n')
381 381 % (filecount, util.bytecount(bytecount))
382 382 )
383 383 progress = repo.ui.makeprogress(
384 384 _(b'clone'), total=bytecount, unit=_(b'bytes')
385 385 )
386 386 progress.update(0)
387 387 start = util.timer()
388 388
389 389 # TODO: get rid of (potential) inconsistency
390 390 #
391 391 # If transaction is started and any @filecache property is
392 392 # changed at this point, it causes inconsistency between
393 393 # in-memory cached property and streamclone-ed file on the
394 394 # disk. Nested transaction prevents transaction scope "clone"
395 395 # below from writing in-memory changes out at the end of it,
396 396 # even though in-memory changes are discarded at the end of it
397 397 # regardless of transaction nesting.
398 398 #
399 399 # But transaction nesting can't be simply prohibited, because
400 400 # nesting occurs also in ordinary case (e.g. enabling
401 401 # clonebundles).
402 402
403 403 with repo.transaction(b'clone'):
404 404 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
405 405 for i in pycompat.xrange(filecount):
406 406 # XXX doesn't support '\n' or '\r' in filenames
407 407 l = fp.readline()
408 408 try:
409 409 name, size = l.split(b'\0', 1)
410 410 size = int(size)
411 411 except (ValueError, TypeError):
412 412 raise error.ResponseError(
413 413 _(b'unexpected response from remote server:'), l
414 414 )
415 415 if repo.ui.debugflag:
416 416 repo.ui.debug(
417 417 b'adding %s (%s)\n' % (name, util.bytecount(size))
418 418 )
419 419 # for backwards compat, name was partially encoded
420 420 path = store.decodedir(name)
421 421 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
422 422 for chunk in util.filechunkiter(fp, limit=size):
423 423 progress.increment(step=len(chunk))
424 424 ofp.write(chunk)
425 425
426 426 # force @filecache properties to be reloaded from
427 427 # streamclone-ed file at next access
428 428 repo.invalidate(clearfilecache=True)
429 429
430 430 elapsed = util.timer() - start
431 431 if elapsed <= 0:
432 432 elapsed = 0.001
433 433 progress.complete()
434 434 repo.ui.status(
435 435 _(b'transferred %s in %.1f seconds (%s/sec)\n')
436 436 % (
437 437 util.bytecount(bytecount),
438 438 elapsed,
439 439 util.bytecount(bytecount / elapsed),
440 440 )
441 441 )
442 442
443 443
444 444 def readbundle1header(fp):
445 445 compression = fp.read(2)
446 446 if compression != b'UN':
447 447 raise error.Abort(
448 448 _(
449 449 b'only uncompressed stream clone bundles are '
450 450 b'supported; got %s'
451 451 )
452 452 % compression
453 453 )
454 454
455 455 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
456 456 requireslen = struct.unpack(b'>H', fp.read(2))[0]
457 457 requires = fp.read(requireslen)
458 458
459 459 if not requires.endswith(b'\0'):
460 460 raise error.Abort(
461 461 _(
462 462 b'malformed stream clone bundle: '
463 463 b'requirements not properly encoded'
464 464 )
465 465 )
466 466
467 467 requirements = set(requires.rstrip(b'\0').split(b','))
468 468
469 469 return filecount, bytecount, requirements
470 470
471 471
472 472 def applybundlev1(repo, fp):
473 473 """Apply the content from a stream clone bundle version 1.
474 474
475 475 We assume the 4 byte header has been read and validated and the file handle
476 476 is at the 2 byte compression identifier.
477 477 """
478 478 if len(repo):
479 479 raise error.Abort(
480 480 _(b'cannot apply stream clone bundle on non-empty repo')
481 481 )
482 482
483 483 filecount, bytecount, requirements = readbundle1header(fp)
484 missingreqs = requirements - repo.supportedformats
484 missingreqs = requirements - repo.supported
485 485 if missingreqs:
486 486 raise error.Abort(
487 487 _(b'unable to apply stream clone: unsupported format: %s')
488 488 % b', '.join(sorted(missingreqs))
489 489 )
490 490
491 491 consumev1(repo, fp, filecount, bytecount)
492 492
493 493
494 494 class streamcloneapplier(object):
495 495 """Class to manage applying streaming clone bundles.
496 496
497 497 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
498 498 readers to perform bundle type-specific functionality.
499 499 """
500 500
501 501 def __init__(self, fh):
502 502 self._fh = fh
503 503
504 504 def apply(self, repo):
505 505 return applybundlev1(repo, self._fh)
506 506
507 507
508 508 # type of file to stream
509 509 _fileappend = 0 # append only file
510 510 _filefull = 1 # full snapshot file
511 511
512 512 # Source of the file
513 513 _srcstore = b's' # store (svfs)
514 514 _srccache = b'c' # cache (cache)
515 515
516 516 # This is it's own function so extensions can override it.
517 517 def _walkstreamfullstorefiles(repo):
518 518 """list snapshot file from the store"""
519 519 fnames = []
520 520 if not repo.publishing():
521 521 fnames.append(b'phaseroots')
522 522 return fnames
523 523
524 524
525 525 def _filterfull(entry, copy, vfsmap):
526 526 """actually copy the snapshot files"""
527 527 src, name, ftype, data = entry
528 528 if ftype != _filefull:
529 529 return entry
530 530 return (src, name, ftype, copy(vfsmap[src].join(name)))
531 531
532 532
533 533 @contextlib.contextmanager
534 534 def maketempcopies():
535 535 """return a function to temporary copy file"""
536 536 files = []
537 537 try:
538 538
539 539 def copy(src):
540 540 fd, dst = pycompat.mkstemp()
541 541 os.close(fd)
542 542 files.append(dst)
543 543 util.copyfiles(src, dst, hardlink=True)
544 544 return dst
545 545
546 546 yield copy
547 547 finally:
548 548 for tmp in files:
549 549 util.tryunlink(tmp)
550 550
551 551
552 552 def _makemap(repo):
553 553 """make a (src -> vfs) map for the repo"""
554 554 vfsmap = {
555 555 _srcstore: repo.svfs,
556 556 _srccache: repo.cachevfs,
557 557 }
558 558 # we keep repo.vfs out of the on purpose, ther are too many danger there
559 559 # (eg: .hg/hgrc)
560 560 assert repo.vfs not in vfsmap.values()
561 561
562 562 return vfsmap
563 563
564 564
565 565 def _emit2(repo, entries, totalfilesize):
566 566 """actually emit the stream bundle"""
567 567 vfsmap = _makemap(repo)
568 568 # we keep repo.vfs out of the on purpose, ther are too many danger there
569 569 # (eg: .hg/hgrc),
570 570 #
571 571 # this assert is duplicated (from _makemap) as author might think this is
572 572 # fine, while this is really not fine.
573 573 if repo.vfs in vfsmap.values():
574 574 raise error.ProgrammingError(
575 575 b'repo.vfs must not be added to vfsmap for security reasons'
576 576 )
577 577
578 578 progress = repo.ui.makeprogress(
579 579 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
580 580 )
581 581 progress.update(0)
582 582 with maketempcopies() as copy, progress:
583 583 # copy is delayed until we are in the try
584 584 entries = [_filterfull(e, copy, vfsmap) for e in entries]
585 585 yield None # this release the lock on the repository
586 586 totalbytecount = 0
587 587
588 588 for src, name, ftype, data in entries:
589 589 vfs = vfsmap[src]
590 590 yield src
591 591 yield util.uvarintencode(len(name))
592 592 if ftype == _fileappend:
593 593 fp = vfs(name)
594 594 size = data
595 595 elif ftype == _filefull:
596 596 fp = open(data, b'rb')
597 597 size = util.fstat(fp).st_size
598 598 bytecount = 0
599 599 try:
600 600 yield util.uvarintencode(size)
601 601 yield name
602 602 if size <= 65536:
603 603 chunks = (fp.read(size),)
604 604 else:
605 605 chunks = util.filechunkiter(fp, limit=size)
606 606 for chunk in chunks:
607 607 bytecount += len(chunk)
608 608 totalbytecount += len(chunk)
609 609 progress.update(totalbytecount)
610 610 yield chunk
611 611 if bytecount != size:
612 612 # Would most likely be caused by a race due to `hg strip` or
613 613 # a revlog split
614 614 raise error.Abort(
615 615 _(
616 616 b'clone could only read %d bytes from %s, but '
617 617 b'expected %d bytes'
618 618 )
619 619 % (bytecount, name, size)
620 620 )
621 621 finally:
622 622 fp.close()
623 623
624 624
625 625 def _test_sync_point_walk_1(repo):
626 626 """a function for synchronisation during tests"""
627 627
628 628
629 629 def _test_sync_point_walk_2(repo):
630 630 """a function for synchronisation during tests"""
631 631
632 632
633 633 def _v2_walk(repo, includes, excludes, includeobsmarkers):
634 634 """emit a seris of files information useful to clone a repo
635 635
636 636 return (entries, totalfilesize)
637 637
638 638 entries is a list of tuple (vfs-key, file-path, file-type, size)
639 639
640 640 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
641 641 - `name`: file path of the file to copy (to be feed to the vfss)
642 642 - `file-type`: do this file need to be copied with the source lock ?
643 643 - `size`: the size of the file (or None)
644 644 """
645 645 assert repo._currentlock(repo._lockref) is not None
646 646 entries = []
647 647 totalfilesize = 0
648 648
649 649 matcher = None
650 650 if includes or excludes:
651 651 matcher = narrowspec.match(repo.root, includes, excludes)
652 652
653 653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
654 654 if size:
655 655 ft = _fileappend
656 656 if rl_type & store.FILEFLAGS_VOLATILE:
657 657 ft = _filefull
658 658 entries.append((_srcstore, name, ft, size))
659 659 totalfilesize += size
660 660 for name in _walkstreamfullstorefiles(repo):
661 661 if repo.svfs.exists(name):
662 662 totalfilesize += repo.svfs.lstat(name).st_size
663 663 entries.append((_srcstore, name, _filefull, None))
664 664 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
665 665 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
666 666 entries.append((_srcstore, b'obsstore', _filefull, None))
667 667 for name in cacheutil.cachetocopy(repo):
668 668 if repo.cachevfs.exists(name):
669 669 totalfilesize += repo.cachevfs.lstat(name).st_size
670 670 entries.append((_srccache, name, _filefull, None))
671 671 return entries, totalfilesize
672 672
673 673
674 674 def generatev2(repo, includes, excludes, includeobsmarkers):
675 675 """Emit content for version 2 of a streaming clone.
676 676
677 677 the data stream consists the following entries:
678 678 1) A char representing the file destination (eg: store or cache)
679 679 2) A varint containing the length of the filename
680 680 3) A varint containing the length of file data
681 681 4) N bytes containing the filename (the internal, store-agnostic form)
682 682 5) N bytes containing the file data
683 683
684 684 Returns a 3-tuple of (file count, file size, data iterator).
685 685 """
686 686
687 687 with repo.lock():
688 688
689 689 repo.ui.debug(b'scanning\n')
690 690
691 691 entries, totalfilesize = _v2_walk(
692 692 repo,
693 693 includes=includes,
694 694 excludes=excludes,
695 695 includeobsmarkers=includeobsmarkers,
696 696 )
697 697
698 698 chunks = _emit2(repo, entries, totalfilesize)
699 699 first = next(chunks)
700 700 assert first is None
701 701 _test_sync_point_walk_1(repo)
702 702 _test_sync_point_walk_2(repo)
703 703
704 704 return len(entries), totalfilesize, chunks
705 705
706 706
707 707 @contextlib.contextmanager
708 708 def nested(*ctxs):
709 709 this = ctxs[0]
710 710 rest = ctxs[1:]
711 711 with this:
712 712 if rest:
713 713 with nested(*rest):
714 714 yield
715 715 else:
716 716 yield
717 717
718 718
719 719 def consumev2(repo, fp, filecount, filesize):
720 720 """Apply the contents from a version 2 streaming clone.
721 721
722 722 Data is read from an object that only needs to provide a ``read(size)``
723 723 method.
724 724 """
725 725 with repo.lock():
726 726 repo.ui.status(
727 727 _(b'%d files to transfer, %s of data\n')
728 728 % (filecount, util.bytecount(filesize))
729 729 )
730 730
731 731 start = util.timer()
732 732 progress = repo.ui.makeprogress(
733 733 _(b'clone'), total=filesize, unit=_(b'bytes')
734 734 )
735 735 progress.update(0)
736 736
737 737 vfsmap = _makemap(repo)
738 738 # we keep repo.vfs out of the on purpose, ther are too many danger
739 739 # there (eg: .hg/hgrc),
740 740 #
741 741 # this assert is duplicated (from _makemap) as author might think this
742 742 # is fine, while this is really not fine.
743 743 if repo.vfs in vfsmap.values():
744 744 raise error.ProgrammingError(
745 745 b'repo.vfs must not be added to vfsmap for security reasons'
746 746 )
747 747
748 748 with repo.transaction(b'clone'):
749 749 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
750 750 with nested(*ctxs):
751 751 for i in range(filecount):
752 752 src = util.readexactly(fp, 1)
753 753 vfs = vfsmap[src]
754 754 namelen = util.uvarintdecodestream(fp)
755 755 datalen = util.uvarintdecodestream(fp)
756 756
757 757 name = util.readexactly(fp, namelen)
758 758
759 759 if repo.ui.debugflag:
760 760 repo.ui.debug(
761 761 b'adding [%s] %s (%s)\n'
762 762 % (src, name, util.bytecount(datalen))
763 763 )
764 764
765 765 with vfs(name, b'w') as ofp:
766 766 for chunk in util.filechunkiter(fp, limit=datalen):
767 767 progress.increment(step=len(chunk))
768 768 ofp.write(chunk)
769 769
770 770 # force @filecache properties to be reloaded from
771 771 # streamclone-ed file at next access
772 772 repo.invalidate(clearfilecache=True)
773 773
774 774 elapsed = util.timer() - start
775 775 if elapsed <= 0:
776 776 elapsed = 0.001
777 777 repo.ui.status(
778 778 _(b'transferred %s in %.1f seconds (%s/sec)\n')
779 779 % (
780 780 util.bytecount(progress.pos),
781 781 elapsed,
782 782 util.bytecount(progress.pos / elapsed),
783 783 )
784 784 )
785 785 progress.complete()
786 786
787 787
788 788 def applybundlev2(repo, fp, filecount, filesize, requirements):
789 789 from . import localrepo
790 790
791 791 missingreqs = [r for r in requirements if r not in repo.supported]
792 792 if missingreqs:
793 793 raise error.Abort(
794 794 _(b'unable to apply stream clone: unsupported format: %s')
795 795 % b', '.join(sorted(missingreqs))
796 796 )
797 797
798 798 consumev2(repo, fp, filecount, filesize)
799 799
800 800 # new requirements = old non-format requirements +
801 801 # new format-related remote requirements
802 802 # requirements from the streamed-in repository
803 803 repo.requirements = set(requirements) | (
804 804 repo.requirements - repo.supportedformats
805 805 )
806 806 repo.svfs.options = localrepo.resolvestorevfsoptions(
807 807 repo.ui, repo.requirements, repo.features
808 808 )
809 809 scmutil.writereporequirements(repo)
810 810
811 811
812 812 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
813 813 hardlink = [True]
814 814
815 815 def copy_used():
816 816 hardlink[0] = False
817 817 progress.topic = _(b'copying')
818 818
819 819 for k, path, size in entries:
820 820 src_vfs = src_vfs_map[k]
821 821 dst_vfs = dst_vfs_map[k]
822 822 src_path = src_vfs.join(path)
823 823 dst_path = dst_vfs.join(path)
824 824 # We cannot use dirname and makedirs of dst_vfs here because the store
825 825 # encoding confuses them. See issue 6581 for details.
826 826 dirname = os.path.dirname(dst_path)
827 827 if not os.path.exists(dirname):
828 828 util.makedirs(dirname)
829 829 dst_vfs.register_file(path)
830 830 # XXX we could use the #nb_bytes argument.
831 831 util.copyfile(
832 832 src_path,
833 833 dst_path,
834 834 hardlink=hardlink[0],
835 835 no_hardlink_cb=copy_used,
836 836 check_fs_hardlink=False,
837 837 )
838 838 progress.increment()
839 839 return hardlink[0]
840 840
841 841
842 842 def local_copy(src_repo, dest_repo):
843 843 """copy all content from one local repository to another
844 844
845 845 This is useful for local clone"""
846 846 src_store_requirements = {
847 847 r
848 848 for r in src_repo.requirements
849 849 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
850 850 }
851 851 dest_store_requirements = {
852 852 r
853 853 for r in dest_repo.requirements
854 854 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
855 855 }
856 856 assert src_store_requirements == dest_store_requirements
857 857
858 858 with dest_repo.lock():
859 859 with src_repo.lock():
860 860
861 861 # bookmark is not integrated to the streaming as it might use the
862 862 # `repo.vfs` and they are too many sentitive data accessible
863 863 # through `repo.vfs` to expose it to streaming clone.
864 864 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
865 865 srcbookmarks = src_book_vfs.join(b'bookmarks')
866 866 bm_count = 0
867 867 if os.path.exists(srcbookmarks):
868 868 bm_count = 1
869 869
870 870 entries, totalfilesize = _v2_walk(
871 871 src_repo,
872 872 includes=None,
873 873 excludes=None,
874 874 includeobsmarkers=True,
875 875 )
876 876 src_vfs_map = _makemap(src_repo)
877 877 dest_vfs_map = _makemap(dest_repo)
878 878 progress = src_repo.ui.makeprogress(
879 879 topic=_(b'linking'),
880 880 total=len(entries) + bm_count,
881 881 unit=_(b'files'),
882 882 )
883 883 # copy files
884 884 #
885 885 # We could copy the full file while the source repository is locked
886 886 # and the other one without the lock. However, in the linking case,
887 887 # this would also requires checks that nobody is appending any data
888 888 # to the files while we do the clone, so this is not done yet. We
889 889 # could do this blindly when copying files.
890 890 files = ((k, path, size) for k, path, ftype, size in entries)
891 891 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
892 892
893 893 # copy bookmarks over
894 894 if bm_count:
895 895 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
896 896 dstbookmarks = dst_book_vfs.join(b'bookmarks')
897 897 util.copyfile(srcbookmarks, dstbookmarks)
898 898 progress.complete()
899 899 if hardlink:
900 900 msg = b'linked %d files\n'
901 901 else:
902 902 msg = b'copied %d files\n'
903 903 src_repo.ui.debug(msg % (len(entries) + bm_count))
904 904
905 905 with dest_repo.transaction(b"localclone") as tr:
906 906 dest_repo.store.write(tr)
907 907
908 908 # clean up transaction file as they do not make sense
909 909 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
910 910 undo_files.extend(dest_repo.undofiles())
911 911 for undovfs, undofile in undo_files:
912 912 try:
913 913 undovfs.unlink(undofile)
914 914 except OSError as e:
915 915 if e.errno != errno.ENOENT:
916 916 msg = _(b'error removing %s: %s\n')
917 917 path = undovfs.join(undofile)
918 918 e_msg = stringutil.forcebytestr(e)
919 919 msg %= (path, e_msg)
920 920 dest_repo.ui.warn(msg)
@@ -1,413 +1,415 b''
1 1 #require serve
2 2
3 3 This test is a duplicate of 'test-http.t', feel free to factor out
4 4 parts that are not bundle1/bundle2 specific.
5 5
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [devel]
8 8 > # This test is dedicated to interaction through old bundle
9 9 > legacy.exchange = bundle1
10 10 > EOF
11 11
12 12 $ hg init test
13 13 $ cd test
14 14 $ echo foo>foo
15 15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
16 16 $ echo foo>foo.d/foo
17 17 $ echo bar>foo.d/bAr.hg.d/BaR
18 18 $ echo bar>foo.d/baR.d.hg/bAR
19 19 $ hg commit -A -m 1
20 20 adding foo
21 21 adding foo.d/bAr.hg.d/BaR
22 22 adding foo.d/baR.d.hg/bAR
23 23 adding foo.d/foo
24 24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
25 25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
26 26
27 27 Test server address cannot be reused
28 28
29 29 $ hg serve -p $HGPORT1 2>&1
30 30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
31 31 [255]
32 32
33 33 $ cd ..
34 34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
35 35
36 36 clone via stream
37 37
38 38 #if no-reposimplestore
39 39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
40 40 streaming all changes
41 41 6 files to transfer, 606 bytes of data (no-zstd !)
42 42 6 files to transfer, 608 bytes of data (zstd !)
43 43 transferred * bytes in * seconds (*/sec) (glob)
44 44 searching for changes
45 45 no changes found
46 46 updating to branch default
47 47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48 $ hg verify -R copy
49 49 checking changesets
50 50 checking manifests
51 51 crosschecking files in changesets and manifests
52 52 checking files
53 53 checked 1 changesets with 4 changes to 4 files
54 54 #endif
55 55
56 56 try to clone via stream, should use pull instead
57 57
58 58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
59 59 warning: stream clone requested but server has them disabled
60 60 requesting all changes
61 61 adding changesets
62 62 adding manifests
63 63 adding file changes
64 64 added 1 changesets with 4 changes to 4 files
65 65 new changesets 8b6053c928fe
66 66 updating to branch default
67 67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 68
69 69 try to clone via stream but missing requirements, so should use pull instead
70 70
71 71 $ cat > $TESTTMP/removesupportedformat.py << EOF
72 72 > from mercurial import localrepo
73 > def extsetup(ui):
74 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
73 > def reposetup(ui, repo):
74 > local = repo.local()
75 > if local is not None:
76 > local.supported.remove(b'generaldelta')
75 77 > EOF
76 78
77 79 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
78 80 warning: stream clone requested but client is missing requirements: generaldelta
79 81 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
80 82 requesting all changes
81 83 adding changesets
82 84 adding manifests
83 85 adding file changes
84 86 added 1 changesets with 4 changes to 4 files
85 87 new changesets 8b6053c928fe
86 88 updating to branch default
87 89 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 90
89 91 clone via pull
90 92
91 93 $ hg clone http://localhost:$HGPORT1/ copy-pull
92 94 requesting all changes
93 95 adding changesets
94 96 adding manifests
95 97 adding file changes
96 98 added 1 changesets with 4 changes to 4 files
97 99 new changesets 8b6053c928fe
98 100 updating to branch default
99 101 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 102 $ hg verify -R copy-pull
101 103 checking changesets
102 104 checking manifests
103 105 crosschecking files in changesets and manifests
104 106 checking files
105 107 checked 1 changesets with 4 changes to 4 files
106 108 $ cd test
107 109 $ echo bar > bar
108 110 $ hg commit -A -d '1 0' -m 2
109 111 adding bar
110 112 $ cd ..
111 113
112 114 clone over http with --update
113 115
114 116 $ hg clone http://localhost:$HGPORT1/ updated --update 0
115 117 requesting all changes
116 118 adding changesets
117 119 adding manifests
118 120 adding file changes
119 121 added 2 changesets with 5 changes to 5 files
120 122 new changesets 8b6053c928fe:5fed3813f7f5
121 123 updating to branch default
122 124 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 125 $ hg log -r . -R updated
124 126 changeset: 0:8b6053c928fe
125 127 user: test
126 128 date: Thu Jan 01 00:00:00 1970 +0000
127 129 summary: 1
128 130
129 131 $ rm -rf updated
130 132
131 133 incoming via HTTP
132 134
133 135 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
134 136 adding changesets
135 137 adding manifests
136 138 adding file changes
137 139 added 1 changesets with 4 changes to 4 files
138 140 new changesets 8b6053c928fe
139 141 updating to branch default
140 142 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 143 $ cd partial
142 144 $ touch LOCAL
143 145 $ hg ci -qAm LOCAL
144 146 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
145 147 comparing with http://localhost:$HGPORT1/
146 148 searching for changes
147 149 2
148 150 $ cd ..
149 151
150 152 pull
151 153
152 154 $ cd copy-pull
153 155 $ cat >> .hg/hgrc <<EOF
154 156 > [hooks]
155 157 > changegroup = sh -c "printenv.py --line changegroup"
156 158 > EOF
157 159 $ hg pull
158 160 pulling from http://localhost:$HGPORT1/
159 161 searching for changes
160 162 adding changesets
161 163 adding manifests
162 164 adding file changes
163 165 added 1 changesets with 1 changes to 1 files
164 166 new changesets 5fed3813f7f5
165 167 changegroup hook: HG_HOOKNAME=changegroup
166 168 HG_HOOKTYPE=changegroup
167 169 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
168 170 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 171 HG_SOURCE=pull
170 172 HG_TXNID=TXN:$ID$
171 173 HG_TXNNAME=pull
172 174 http://localhost:$HGPORT1/
173 175 HG_URL=http://localhost:$HGPORT1/
174 176
175 177 (run 'hg update' to get a working copy)
176 178 $ cd ..
177 179
178 180 clone from invalid URL
179 181
180 182 $ hg clone http://localhost:$HGPORT/bad
181 183 abort: HTTP Error 404: Not Found
182 184 [100]
183 185
184 186 test http authentication
185 187 + use the same server to test server side streaming preference
186 188
187 189 $ cd test
188 190
189 191 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
190 192 > --pid-file=pid --config server.preferuncompressed=True \
191 193 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
192 194 $ cat pid >> $DAEMON_PIDS
193 195
194 196 $ cat << EOF > get_pass.py
195 197 > from mercurial import util
196 198 > def newgetpass():
197 199 > return "pass"
198 200 > util.get_password = newgetpass
199 201 > EOF
200 202
201 203 $ hg id http://localhost:$HGPORT2/
202 204 abort: http authorization required for http://localhost:$HGPORT2/
203 205 [255]
204 206 $ hg id http://localhost:$HGPORT2/
205 207 abort: http authorization required for http://localhost:$HGPORT2/
206 208 [255]
207 209 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
208 210 http authorization required for http://localhost:$HGPORT2/
209 211 realm: mercurial
210 212 user: user
211 213 password: 5fed3813f7f5
212 214 $ hg id http://user:pass@localhost:$HGPORT2/
213 215 5fed3813f7f5
214 216 $ echo '[auth]' >> .hg/hgrc
215 217 $ echo 'l.schemes=http' >> .hg/hgrc
216 218 $ echo 'l.prefix=lo' >> .hg/hgrc
217 219 $ echo 'l.username=user' >> .hg/hgrc
218 220 $ echo 'l.password=pass' >> .hg/hgrc
219 221 $ hg id http://localhost:$HGPORT2/
220 222 5fed3813f7f5
221 223 $ hg id http://localhost:$HGPORT2/
222 224 5fed3813f7f5
223 225 $ hg id http://user@localhost:$HGPORT2/
224 226 5fed3813f7f5
225 227
226 228 #if no-reposimplestore
227 229 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
228 230 streaming all changes
229 231 7 files to transfer, 916 bytes of data (no-zstd !)
230 232 7 files to transfer, 919 bytes of data (zstd !)
231 233 transferred * bytes in * seconds (*/sec) (glob)
232 234 searching for changes
233 235 no changes found
234 236 updating to branch default
235 237 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
236 238 #endif
237 239
238 240 --pull should override server's preferuncompressed
239 241
240 242 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
241 243 requesting all changes
242 244 adding changesets
243 245 adding manifests
244 246 adding file changes
245 247 added 2 changesets with 5 changes to 5 files
246 248 new changesets 8b6053c928fe:5fed3813f7f5
247 249 updating to branch default
248 250 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 251
250 252 $ hg id http://user2@localhost:$HGPORT2/
251 253 abort: http authorization required for http://localhost:$HGPORT2/
252 254 [255]
253 255 $ hg id http://user:pass2@localhost:$HGPORT2/
254 256 abort: HTTP Error 403: no
255 257 [100]
256 258
257 259 $ hg -R dest-pull tag -r tip top
258 260 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
259 261 pushing to http://user:***@localhost:$HGPORT2/
260 262 searching for changes
261 263 remote: adding changesets
262 264 remote: adding manifests
263 265 remote: adding file changes
264 266 remote: added 1 changesets with 1 changes to 1 files
265 267 $ hg rollback -q
266 268
267 269 $ sed 's/.*] "/"/' < ../access.log
268 270 "GET /?cmd=capabilities HTTP/1.1" 401 -
269 271 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 272 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 273 "GET /?cmd=capabilities HTTP/1.1" 200 -
272 274 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
273 275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 276 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 277 "GET /?cmd=capabilities HTTP/1.1" 401 -
276 278 "GET /?cmd=capabilities HTTP/1.1" 200 -
277 279 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
278 280 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 281 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 282 "GET /?cmd=capabilities HTTP/1.1" 401 -
281 283 "GET /?cmd=capabilities HTTP/1.1" 200 -
282 284 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
283 285 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 287 "GET /?cmd=capabilities HTTP/1.1" 401 -
286 288 "GET /?cmd=capabilities HTTP/1.1" 200 -
287 289 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
288 290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 291 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 292 "GET /?cmd=capabilities HTTP/1.1" 401 -
291 293 "GET /?cmd=capabilities HTTP/1.1" 200 -
292 294 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
293 295 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 296 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 297 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
296 298 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
297 299 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
298 300 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 302 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 303 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 304 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
303 305 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
304 306 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
305 307 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
306 308 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 309 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 310 "GET /?cmd=capabilities HTTP/1.1" 401 -
309 311 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 312 "GET /?cmd=capabilities HTTP/1.1" 403 -
311 313 "GET /?cmd=capabilities HTTP/1.1" 401 -
312 314 "GET /?cmd=capabilities HTTP/1.1" 200 -
313 315 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 316 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 318 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 320 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
319 321 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
320 322
321 323 $ cd ..
322 324
323 325 clone of serve with repo in root and unserved subrepo (issue2970)
324 326
325 327 $ hg --cwd test init sub
326 328 $ echo empty > test/sub/empty
327 329 $ hg --cwd test/sub add empty
328 330 $ hg --cwd test/sub commit -qm 'add empty'
329 331 $ hg --cwd test/sub tag -r 0 something
330 332 $ echo sub = sub > test/.hgsub
331 333 $ hg --cwd test add .hgsub
332 334 $ hg --cwd test commit -qm 'add subrepo'
333 335 $ hg clone http://localhost:$HGPORT noslash-clone
334 336 requesting all changes
335 337 adding changesets
336 338 adding manifests
337 339 adding file changes
338 340 added 3 changesets with 7 changes to 7 files
339 341 new changesets 8b6053c928fe:56f9bc90cce6
340 342 updating to branch default
341 343 cloning subrepo sub from http://localhost:$HGPORT/sub
342 344 abort: HTTP Error 404: Not Found
343 345 [100]
344 346 $ hg clone http://localhost:$HGPORT/ slash-clone
345 347 requesting all changes
346 348 adding changesets
347 349 adding manifests
348 350 adding file changes
349 351 added 3 changesets with 7 changes to 7 files
350 352 new changesets 8b6053c928fe:56f9bc90cce6
351 353 updating to branch default
352 354 cloning subrepo sub from http://localhost:$HGPORT/sub
353 355 abort: HTTP Error 404: Not Found
354 356 [100]
355 357
356 358 check error log
357 359
358 360 $ cat error.log
359 361
360 362 Check error reporting while pulling/cloning
361 363
362 364 $ $RUNTESTDIR/killdaemons.py
363 365 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
364 366 $ cat hg3.pid >> $DAEMON_PIDS
365 367 $ hg clone http://localhost:$HGPORT/ abort-clone
366 368 requesting all changes
367 369 abort: remote error:
368 370 this is an exercise
369 371 [100]
370 372 $ cat error.log
371 373
372 374 disable pull-based clones
373 375
374 376 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
375 377 $ cat hg4.pid >> $DAEMON_PIDS
376 378 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
377 379 requesting all changes
378 380 abort: remote error:
379 381 server has pull-based clones disabled
380 382 [100]
381 383
382 384 #if no-reposimplestore
383 385 ... but keep stream clones working
384 386
385 387 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
386 388 streaming all changes
387 389 * files to transfer, * of data (glob)
388 390 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
389 391 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
390 392 searching for changes
391 393 no changes found
392 394 #endif
393 395
394 396 ... and also keep partial clones and pulls working
395 397 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
396 398 adding changesets
397 399 adding manifests
398 400 adding file changes
399 401 added 1 changesets with 4 changes to 4 files
400 402 new changesets 8b6053c928fe
401 403 updating to branch default
402 404 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 405 $ hg pull -R test-partial-clone
404 406 pulling from http://localhost:$HGPORT1/
405 407 searching for changes
406 408 adding changesets
407 409 adding manifests
408 410 adding file changes
409 411 added 2 changesets with 3 changes to 3 files
410 412 new changesets 5fed3813f7f5:56f9bc90cce6
411 413 (run 'hg update' to get a working copy)
412 414
413 415 $ cat error.log
@@ -1,617 +1,619 b''
1 1 #require serve
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo foo>foo
6 6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 7 $ echo foo>foo.d/foo
8 8 $ echo bar>foo.d/bAr.hg.d/BaR
9 9 $ echo bar>foo.d/baR.d.hg/bAR
10 10 $ hg commit -A -m 1
11 11 adding foo
12 12 adding foo.d/bAr.hg.d/BaR
13 13 adding foo.d/baR.d.hg/bAR
14 14 adding foo.d/foo
15 15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
17 17
18 18 Test server address cannot be reused
19 19
20 20 $ hg serve -p $HGPORT1 2>&1
21 21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
22 22 [255]
23 23
24 24 $ cd ..
25 25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
26 26
27 27 clone via stream
28 28
29 29 #if no-reposimplestore
30 30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
31 31 streaming all changes
32 32 9 files to transfer, 715 bytes of data (no-zstd !)
33 33 9 files to transfer, 717 bytes of data (zstd !)
34 34 transferred * bytes in * seconds (*/sec) (glob)
35 35 updating to branch default
36 36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 37 $ hg verify -R copy
38 38 checking changesets
39 39 checking manifests
40 40 crosschecking files in changesets and manifests
41 41 checking files
42 42 checked 1 changesets with 4 changes to 4 files
43 43 #endif
44 44
45 45 try to clone via stream, should use pull instead
46 46
47 47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
48 48 warning: stream clone requested but server has them disabled
49 49 requesting all changes
50 50 adding changesets
51 51 adding manifests
52 52 adding file changes
53 53 added 1 changesets with 4 changes to 4 files
54 54 new changesets 8b6053c928fe
55 55 updating to branch default
56 56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 57
58 58 try to clone via stream but missing requirements, so should use pull instead
59 59
60 60 $ cat > $TESTTMP/removesupportedformat.py << EOF
61 61 > from mercurial import localrepo
62 > def extsetup(ui):
63 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
62 > def reposetup(ui, repo):
63 > local = repo.local()
64 > if local is not None:
65 > local.supported.remove(b'generaldelta')
64 66 > EOF
65 67
66 68 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
67 69 warning: stream clone requested but client is missing requirements: generaldelta
68 70 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
69 71 requesting all changes
70 72 adding changesets
71 73 adding manifests
72 74 adding file changes
73 75 added 1 changesets with 4 changes to 4 files
74 76 new changesets 8b6053c928fe
75 77 updating to branch default
76 78 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 79
78 80 clone via pull
79 81
80 82 $ hg clone http://localhost:$HGPORT1/ copy-pull
81 83 requesting all changes
82 84 adding changesets
83 85 adding manifests
84 86 adding file changes
85 87 added 1 changesets with 4 changes to 4 files
86 88 new changesets 8b6053c928fe
87 89 updating to branch default
88 90 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 91 $ hg verify -R copy-pull
90 92 checking changesets
91 93 checking manifests
92 94 crosschecking files in changesets and manifests
93 95 checking files
94 96 checked 1 changesets with 4 changes to 4 files
95 97 $ cd test
96 98 $ echo bar > bar
97 99 $ hg commit -A -d '1 0' -m 2
98 100 adding bar
99 101 $ cd ..
100 102
101 103 clone over http with --update
102 104
103 105 $ hg clone http://localhost:$HGPORT1/ updated --update 0
104 106 requesting all changes
105 107 adding changesets
106 108 adding manifests
107 109 adding file changes
108 110 added 2 changesets with 5 changes to 5 files
109 111 new changesets 8b6053c928fe:5fed3813f7f5
110 112 updating to branch default
111 113 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 114 $ hg log -r . -R updated
113 115 changeset: 0:8b6053c928fe
114 116 user: test
115 117 date: Thu Jan 01 00:00:00 1970 +0000
116 118 summary: 1
117 119
118 120 $ rm -rf updated
119 121
120 122 incoming via HTTP
121 123
122 124 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
123 125 adding changesets
124 126 adding manifests
125 127 adding file changes
126 128 added 1 changesets with 4 changes to 4 files
127 129 new changesets 8b6053c928fe
128 130 updating to branch default
129 131 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 132 $ cd partial
131 133 $ touch LOCAL
132 134 $ hg ci -qAm LOCAL
133 135 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
134 136 comparing with http://localhost:$HGPORT1/
135 137 searching for changes
136 138 2
137 139 $ cd ..
138 140
139 141 pull
140 142
141 143 $ cd copy-pull
142 144 $ cat >> .hg/hgrc <<EOF
143 145 > [hooks]
144 146 > changegroup = sh -c "printenv.py --line changegroup"
145 147 > EOF
146 148 $ hg pull
147 149 pulling from http://localhost:$HGPORT1/
148 150 searching for changes
149 151 adding changesets
150 152 adding manifests
151 153 adding file changes
152 154 added 1 changesets with 1 changes to 1 files
153 155 new changesets 5fed3813f7f5
154 156 changegroup hook: HG_HOOKNAME=changegroup
155 157 HG_HOOKTYPE=changegroup
156 158 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
157 159 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 160 HG_SOURCE=pull
159 161 HG_TXNID=TXN:$ID$
160 162 HG_TXNNAME=pull
161 163 http://localhost:$HGPORT1/
162 164 HG_URL=http://localhost:$HGPORT1/
163 165
164 166 (run 'hg update' to get a working copy)
165 167 $ cd ..
166 168
167 169 clone from invalid URL
168 170
169 171 $ hg clone http://localhost:$HGPORT/bad
170 172 abort: HTTP Error 404: Not Found
171 173 [100]
172 174
173 175 test http authentication
174 176 + use the same server to test server side streaming preference
175 177
176 178 $ cd test
177 179
178 180 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
179 181 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
180 182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
181 183 $ cat pid >> $DAEMON_PIDS
182 184
183 185 $ cat << EOF > get_pass.py
184 186 > from mercurial import util
185 187 > def newgetpass():
186 188 > return "pass"
187 189 > util.get_password = newgetpass
188 190 > EOF
189 191
190 192 $ hg id http://localhost:$HGPORT2/
191 193 abort: http authorization required for http://localhost:$HGPORT2/
192 194 [255]
193 195 $ hg id http://localhost:$HGPORT2/
194 196 abort: http authorization required for http://localhost:$HGPORT2/
195 197 [255]
196 198 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
197 199 using http://localhost:$HGPORT2/
198 200 sending capabilities command
199 201 http authorization required for http://localhost:$HGPORT2/
200 202 realm: mercurial
201 203 user: abort: response expected
202 204 [255]
203 205 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
204 206 >
205 207 > EOF
206 208 using http://localhost:$HGPORT2/
207 209 sending capabilities command
208 210 http authorization required for http://localhost:$HGPORT2/
209 211 realm: mercurial
210 212 user:
211 213 password: abort: response expected
212 214 [255]
213 215 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
214 216 >
215 217 >
216 218 > EOF
217 219 using http://localhost:$HGPORT2/
218 220 sending capabilities command
219 221 http authorization required for http://localhost:$HGPORT2/
220 222 realm: mercurial
221 223 user:
222 224 password: abort: authorization failed
223 225 [255]
224 226 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
225 227 http authorization required for http://localhost:$HGPORT2/
226 228 realm: mercurial
227 229 user: user
228 230 password: 5fed3813f7f5
229 231 $ hg id http://user:pass@localhost:$HGPORT2/
230 232 5fed3813f7f5
231 233 $ echo '[auth]' >> .hg/hgrc
232 234 $ echo 'l.schemes=http' >> .hg/hgrc
233 235 $ echo 'l.prefix=lo' >> .hg/hgrc
234 236 $ echo 'l.username=user' >> .hg/hgrc
235 237 $ echo 'l.password=pass' >> .hg/hgrc
236 238 $ hg id http://localhost:$HGPORT2/
237 239 5fed3813f7f5
238 240 $ hg id http://localhost:$HGPORT2/
239 241 5fed3813f7f5
240 242 $ hg id http://user@localhost:$HGPORT2/
241 243 5fed3813f7f5
242 244
243 245 $ cat > use_digests.py << EOF
244 246 > from mercurial import (
245 247 > exthelper,
246 248 > url,
247 249 > )
248 250 >
249 251 > eh = exthelper.exthelper()
250 252 > uisetup = eh.finaluisetup
251 253 >
252 254 > @eh.wrapfunction(url, 'opener')
253 255 > def urlopener(orig, *args, **kwargs):
254 256 > opener = orig(*args, **kwargs)
255 257 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
256 258 > return opener
257 259 > EOF
258 260
259 261 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
260 262 5fed3813f7f5
261 263
262 264 #if no-reposimplestore
263 265 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
264 266 streaming all changes
265 267 10 files to transfer, 1.01 KB of data
266 268 transferred * KB in * seconds (*/sec) (glob)
267 269 updating to branch default
268 270 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 271 #endif
270 272
271 273 --pull should override server's preferuncompressed
272 274 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
273 275 requesting all changes
274 276 adding changesets
275 277 adding manifests
276 278 adding file changes
277 279 added 2 changesets with 5 changes to 5 files
278 280 new changesets 8b6053c928fe:5fed3813f7f5
279 281 updating to branch default
280 282 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
281 283
282 284 $ hg id http://user2@localhost:$HGPORT2/
283 285 abort: http authorization required for http://localhost:$HGPORT2/
284 286 [255]
285 287 $ hg id http://user:pass2@localhost:$HGPORT2/
286 288 abort: HTTP Error 403: no
287 289 [100]
288 290
289 291 $ hg -R dest-pull tag -r tip top
290 292 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
291 293 pushing to http://user:***@localhost:$HGPORT2/
292 294 searching for changes
293 295 remote: adding changesets
294 296 remote: adding manifests
295 297 remote: adding file changes
296 298 remote: added 1 changesets with 1 changes to 1 files
297 299 $ hg rollback -q
298 300 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
299 301 pushing to http://user:***@localhost:$HGPORT2/
300 302 using http://localhost:$HGPORT2/
301 303 http auth: user user, password ****
302 304 sending capabilities command
303 305 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
304 306 http auth: user user, password ****
305 307 devel-peer-request: finished in *.???? seconds (200) (glob)
306 308 query 1; heads
307 309 devel-peer-request: batched-content
308 310 devel-peer-request: - heads (0 arguments)
309 311 devel-peer-request: - known (1 arguments)
310 312 sending batch command
311 313 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
312 314 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
313 315 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 316 devel-peer-request: 68 bytes of commands arguments in headers
315 317 devel-peer-request: finished in *.???? seconds (200) (glob)
316 318 searching for changes
317 319 all remote heads known locally
318 320 preparing listkeys for "phases"
319 321 sending listkeys command
320 322 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
321 323 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
322 324 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
323 325 devel-peer-request: 16 bytes of commands arguments in headers
324 326 devel-peer-request: finished in *.???? seconds (200) (glob)
325 327 received listkey for "phases": 58 bytes
326 328 checking for updated bookmarks
327 329 preparing listkeys for "bookmarks"
328 330 sending listkeys command
329 331 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
330 332 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
331 333 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
332 334 devel-peer-request: 19 bytes of commands arguments in headers
333 335 devel-peer-request: finished in *.???? seconds (200) (glob)
334 336 received listkey for "bookmarks": 0 bytes
335 337 sending branchmap command
336 338 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
337 339 devel-peer-request: Vary X-HgProto-1
338 340 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
339 341 devel-peer-request: finished in *.???? seconds (200) (glob)
340 342 preparing listkeys for "bookmarks"
341 343 sending listkeys command
342 344 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
343 345 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
344 346 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
345 347 devel-peer-request: 19 bytes of commands arguments in headers
346 348 devel-peer-request: finished in *.???? seconds (200) (glob)
347 349 received listkey for "bookmarks": 0 bytes
348 350 1 changesets found
349 351 list of changesets:
350 352 7f4e523d01f2cc3765ac8934da3d14db775ff872
351 353 bundle2-output-bundle: "HG20", 5 parts total
352 354 bundle2-output-part: "replycaps" 207 bytes payload
353 355 bundle2-output-part: "check:phases" 24 bytes payload
354 356 bundle2-output-part: "check:updated-heads" streamed payload
355 357 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
356 358 bundle2-output-part: "phase-heads" 24 bytes payload
357 359 sending unbundle command
358 360 sending 1023 bytes
359 361 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
360 362 devel-peer-request: Content-length 1023
361 363 devel-peer-request: Content-type application/mercurial-0.1
362 364 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
363 365 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
364 366 devel-peer-request: 16 bytes of commands arguments in headers
365 367 devel-peer-request: 1023 bytes of data
366 368 devel-peer-request: finished in *.???? seconds (200) (glob)
367 369 bundle2-input-bundle: no-transaction
368 370 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
369 371 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
370 372 bundle2-input-part: total payload size 55
371 373 remote: adding changesets
372 374 remote: adding manifests
373 375 remote: adding file changes
374 376 bundle2-input-part: "output" (advisory) supported
375 377 bundle2-input-part: total payload size 45
376 378 remote: added 1 changesets with 1 changes to 1 files
377 379 bundle2-input-bundle: 3 parts total
378 380 preparing listkeys for "phases"
379 381 sending listkeys command
380 382 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
381 383 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
382 384 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
383 385 devel-peer-request: 16 bytes of commands arguments in headers
384 386 devel-peer-request: finished in *.???? seconds (200) (glob)
385 387 received listkey for "phases": 15 bytes
386 388 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
387 389 $ hg rollback -q
388 390
389 391 $ sed 's/.*] "/"/' < ../access.log
390 392 "GET /?cmd=capabilities HTTP/1.1" 401 -
391 393 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 394 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 395 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 396 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 397 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 398 "GET /?cmd=capabilities HTTP/1.1" 200 -
397 399 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
398 400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 401 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 402 "GET /?cmd=capabilities HTTP/1.1" 401 -
401 403 "GET /?cmd=capabilities HTTP/1.1" 200 -
402 404 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
403 405 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 406 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 407 "GET /?cmd=capabilities HTTP/1.1" 401 -
406 408 "GET /?cmd=capabilities HTTP/1.1" 200 -
407 409 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
408 410 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 411 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 412 "GET /?cmd=capabilities HTTP/1.1" 401 -
411 413 "GET /?cmd=capabilities HTTP/1.1" 200 -
412 414 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
413 415 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 416 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 417 "GET /?cmd=capabilities HTTP/1.1" 401 -
416 418 "GET /?cmd=capabilities HTTP/1.1" 200 -
417 419 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
418 420 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 421 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 422 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
421 423 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
422 424 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
423 425 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 428 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 429 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 430 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
429 431 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
430 432 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
431 433 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 434 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
433 435 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
434 436 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
435 437 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 438 "GET /?cmd=capabilities HTTP/1.1" 401 -
437 439 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 440 "GET /?cmd=capabilities HTTP/1.1" 403 -
439 441 "GET /?cmd=capabilities HTTP/1.1" 401 -
440 442 "GET /?cmd=capabilities HTTP/1.1" 200 -
441 443 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
442 444 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 446 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 448 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
447 449 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
448 450 "GET /?cmd=capabilities HTTP/1.1" 401 -
449 451 "GET /?cmd=capabilities HTTP/1.1" 200 -
450 452 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
451 453 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 455 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 457 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 458 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457 459
458 460 $ cd ..
459 461
460 462 clone of serve with repo in root and unserved subrepo (issue2970)
461 463
462 464 $ hg --cwd test init sub
463 465 $ echo empty > test/sub/empty
464 466 $ hg --cwd test/sub add empty
465 467 $ hg --cwd test/sub commit -qm 'add empty'
466 468 $ hg --cwd test/sub tag -r 0 something
467 469 $ echo sub = sub > test/.hgsub
468 470 $ hg --cwd test add .hgsub
469 471 $ hg --cwd test commit -qm 'add subrepo'
470 472 $ hg clone http://localhost:$HGPORT noslash-clone
471 473 requesting all changes
472 474 adding changesets
473 475 adding manifests
474 476 adding file changes
475 477 added 3 changesets with 7 changes to 7 files
476 478 new changesets 8b6053c928fe:56f9bc90cce6
477 479 updating to branch default
478 480 cloning subrepo sub from http://localhost:$HGPORT/sub
479 481 abort: HTTP Error 404: Not Found
480 482 [100]
481 483 $ hg clone http://localhost:$HGPORT/ slash-clone
482 484 requesting all changes
483 485 adding changesets
484 486 adding manifests
485 487 adding file changes
486 488 added 3 changesets with 7 changes to 7 files
487 489 new changesets 8b6053c928fe:56f9bc90cce6
488 490 updating to branch default
489 491 cloning subrepo sub from http://localhost:$HGPORT/sub
490 492 abort: HTTP Error 404: Not Found
491 493 [100]
492 494
493 495 check error log
494 496
495 497 $ cat error.log
496 498
497 499 $ cat errors2.log
498 500
499 501 check abort error reporting while pulling/cloning
500 502
501 503 $ $RUNTESTDIR/killdaemons.py
502 504 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
503 505 $ cat hg3.pid >> $DAEMON_PIDS
504 506 $ hg clone http://localhost:$HGPORT/ abort-clone
505 507 requesting all changes
506 508 remote: abort: this is an exercise
507 509 abort: pull failed on remote
508 510 [100]
509 511 $ cat error.log
510 512
511 513 disable pull-based clones
512 514
513 515 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
514 516 $ cat hg4.pid >> $DAEMON_PIDS
515 517 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
516 518 requesting all changes
517 519 remote: abort: server has pull-based clones disabled
518 520 abort: pull failed on remote
519 521 (remove --pull if specified or upgrade Mercurial)
520 522 [100]
521 523
522 524 #if no-reposimplestore
523 525 ... but keep stream clones working
524 526
525 527 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
526 528 streaming all changes
527 529 * files to transfer, * of data (glob)
528 530 transferred * in * seconds (*/sec) (glob)
529 531 $ cat error.log
530 532 #endif
531 533
532 534 ... and also keep partial clones and pulls working
533 535 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
534 536 adding changesets
535 537 adding manifests
536 538 adding file changes
537 539 added 1 changesets with 4 changes to 4 files
538 540 new changesets 8b6053c928fe
539 541 updating to branch default
540 542 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
541 543 $ hg pull -R test/partial/clone
542 544 pulling from http://localhost:$HGPORT1/
543 545 searching for changes
544 546 adding changesets
545 547 adding manifests
546 548 adding file changes
547 549 added 2 changesets with 3 changes to 3 files
548 550 new changesets 5fed3813f7f5:56f9bc90cce6
549 551 (run 'hg update' to get a working copy)
550 552
551 553 $ hg clone -U -r 0 test/partial/clone test/another/clone
552 554 adding changesets
553 555 adding manifests
554 556 adding file changes
555 557 added 1 changesets with 4 changes to 4 files
556 558 new changesets 8b6053c928fe
557 559
558 560 corrupt cookies file should yield a warning
559 561
560 562 $ cat > $TESTTMP/cookies.txt << EOF
561 563 > bad format
562 564 > EOF
563 565
564 566 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
565 567 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
566 568 56f9bc90cce6
567 569
568 570 $ killdaemons.py
569 571
570 572 Create dummy authentication handler that looks for cookies. It doesn't do anything
571 573 useful. It just raises an HTTP 500 with details about the Cookie request header.
572 574 We raise HTTP 500 because its message is printed in the abort message.
573 575
574 576 $ cat > cookieauth.py << EOF
575 577 > from mercurial import util
576 578 > from mercurial.hgweb import common
577 579 > def perform_authentication(hgweb, req, op):
578 580 > cookie = req.headers.get(b'Cookie')
579 581 > if not cookie:
580 582 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
581 583 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
582 584 > def extsetup(ui):
583 585 > common.permhooks.insert(0, perform_authentication)
584 586 > EOF
585 587
586 588 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
587 589 $ cat pid > $DAEMON_PIDS
588 590
589 591 Request without cookie sent should fail due to lack of cookie
590 592
591 593 $ hg id http://localhost:$HGPORT
592 594 abort: HTTP Error 500: no-cookie
593 595 [100]
594 596
595 597 Populate a cookies file
596 598
597 599 $ cat > cookies.txt << EOF
598 600 > # HTTP Cookie File
599 601 > # Expiration is 2030-01-01 at midnight
600 602 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
601 603 > EOF
602 604
603 605 Should not send a cookie for another domain
604 606
605 607 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
606 608 abort: HTTP Error 500: no-cookie
607 609 [100]
608 610
609 611 Add a cookie entry for our test server and verify it is sent
610 612
611 613 $ cat >> cookies.txt << EOF
612 614 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
613 615 > EOF
614 616
615 617 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
616 618 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
617 619 [100]
General Comments 0
You need to be logged in to leave comments. Login now