##// END OF EJS Templates
stream-clone: filter possible missing requirements using all supported one...
marmoute -
r49831:d9017df7 default
parent child Browse files
Show More
@@ -1,941 +1,941 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import contextlib
11 11 import errno
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .pycompat import open
17 17 from .interfaces import repository
18 18 from . import (
19 19 bookmarks,
20 20 cacheutil,
21 21 error,
22 22 narrowspec,
23 23 phases,
24 24 pycompat,
25 25 requirements as requirementsmod,
26 26 scmutil,
27 27 store,
28 28 util,
29 29 )
30 30 from .utils import (
31 31 stringutil,
32 32 )
33 33
34 34
35 35 def new_stream_clone_requirements(
36 36 supported_formats, default_requirements, streamed_requirements
37 37 ):
38 38 """determine the final set of requirement for a new stream clone
39 39
40 40 this method combine the "default" requirements that a new repository would
41 41 use with the constaint we get from the stream clone content. We keep local
42 42 configuration choice when possible.
43 43 """
44 44 requirements = set(default_requirements)
45 45 requirements -= supported_formats
46 46 requirements.update(streamed_requirements)
47 47 return requirements
48 48
49 49
50 50 def streamed_requirements(repo):
51 51 """the set of requirement the new clone will have to support
52 52
53 53 This is used for advertising the stream options and to generate the actual
54 54 stream content."""
55 55 requiredformats = repo.requirements & repo.supportedformats
56 56 return requiredformats
57 57
58 58
59 59 def canperformstreamclone(pullop, bundle2=False):
60 60 """Whether it is possible to perform a streaming clone as part of pull.
61 61
62 62 ``bundle2`` will cause the function to consider stream clone through
63 63 bundle2 and only through bundle2.
64 64
65 65 Returns a tuple of (supported, requirements). ``supported`` is True if
66 66 streaming clone is supported and False otherwise. ``requirements`` is
67 67 a set of repo requirements from the remote, or ``None`` if stream clone
68 68 isn't supported.
69 69 """
70 70 repo = pullop.repo
71 71 remote = pullop.remote
72 72
73 73 bundle2supported = False
74 74 if pullop.canusebundle2:
75 75 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
76 76 bundle2supported = True
77 77 # else
78 78 # Server doesn't support bundle2 stream clone or doesn't support
79 79 # the versions we support. Fall back and possibly allow legacy.
80 80
81 81 # Ensures legacy code path uses available bundle2.
82 82 if bundle2supported and not bundle2:
83 83 return False, None
84 84 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
85 85 elif bundle2 and not bundle2supported:
86 86 return False, None
87 87
88 88 # Streaming clone only works on empty repositories.
89 89 if len(repo):
90 90 return False, None
91 91
92 92 # Streaming clone only works if all data is being requested.
93 93 if pullop.heads:
94 94 return False, None
95 95
96 96 streamrequested = pullop.streamclonerequested
97 97
98 98 # If we don't have a preference, let the server decide for us. This
99 99 # likely only comes into play in LANs.
100 100 if streamrequested is None:
101 101 # The server can advertise whether to prefer streaming clone.
102 102 streamrequested = remote.capable(b'stream-preferred')
103 103
104 104 if not streamrequested:
105 105 return False, None
106 106
107 107 # In order for stream clone to work, the client has to support all the
108 108 # requirements advertised by the server.
109 109 #
110 110 # The server advertises its requirements via the "stream" and "streamreqs"
111 111 # capability. "stream" (a value-less capability) is advertised if and only
112 112 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
113 113 # is advertised and contains a comma-delimited list of requirements.
114 114 requirements = set()
115 115 if remote.capable(b'stream'):
116 116 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
117 117 else:
118 118 streamreqs = remote.capable(b'streamreqs')
119 119 # This is weird and shouldn't happen with modern servers.
120 120 if not streamreqs:
121 121 pullop.repo.ui.warn(
122 122 _(
123 123 b'warning: stream clone requested but server has them '
124 124 b'disabled\n'
125 125 )
126 126 )
127 127 return False, None
128 128
129 129 streamreqs = set(streamreqs.split(b','))
130 130 # Server requires something we don't support. Bail.
131 missingreqs = streamreqs - repo.supportedformats
131 missingreqs = streamreqs - repo.supported
132 132 if missingreqs:
133 133 pullop.repo.ui.warn(
134 134 _(
135 135 b'warning: stream clone requested but client is missing '
136 136 b'requirements: %s\n'
137 137 )
138 138 % b', '.join(sorted(missingreqs))
139 139 )
140 140 pullop.repo.ui.warn(
141 141 _(
142 142 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
143 143 b'for more information)\n'
144 144 )
145 145 )
146 146 return False, None
147 147 requirements = streamreqs
148 148
149 149 return True, requirements
150 150
151 151
152 152 def maybeperformlegacystreamclone(pullop):
153 153 """Possibly perform a legacy stream clone operation.
154 154
155 155 Legacy stream clones are performed as part of pull but before all other
156 156 operations.
157 157
158 158 A legacy stream clone will not be performed if a bundle2 stream clone is
159 159 supported.
160 160 """
161 161 from . import localrepo
162 162
163 163 supported, requirements = canperformstreamclone(pullop)
164 164
165 165 if not supported:
166 166 return
167 167
168 168 repo = pullop.repo
169 169 remote = pullop.remote
170 170
171 171 # Save remote branchmap. We will use it later to speed up branchcache
172 172 # creation.
173 173 rbranchmap = None
174 174 if remote.capable(b'branchmap'):
175 175 with remote.commandexecutor() as e:
176 176 rbranchmap = e.callcommand(b'branchmap', {}).result()
177 177
178 178 repo.ui.status(_(b'streaming all changes\n'))
179 179
180 180 with remote.commandexecutor() as e:
181 181 fp = e.callcommand(b'stream_out', {}).result()
182 182
183 183 # TODO strictly speaking, this code should all be inside the context
184 184 # manager because the context manager is supposed to ensure all wire state
185 185 # is flushed when exiting. But the legacy peers don't do this, so it
186 186 # doesn't matter.
187 187 l = fp.readline()
188 188 try:
189 189 resp = int(l)
190 190 except ValueError:
191 191 raise error.ResponseError(
192 192 _(b'unexpected response from remote server:'), l
193 193 )
194 194 if resp == 1:
195 195 raise error.Abort(_(b'operation forbidden by server'))
196 196 elif resp == 2:
197 197 raise error.Abort(_(b'locking the remote repository failed'))
198 198 elif resp != 0:
199 199 raise error.Abort(_(b'the server sent an unknown error code'))
200 200
201 201 l = fp.readline()
202 202 try:
203 203 filecount, bytecount = map(int, l.split(b' ', 1))
204 204 except (ValueError, TypeError):
205 205 raise error.ResponseError(
206 206 _(b'unexpected response from remote server:'), l
207 207 )
208 208
209 209 with repo.lock():
210 210 consumev1(repo, fp, filecount, bytecount)
211 211 repo.requirements = new_stream_clone_requirements(
212 212 repo.supportedformats,
213 213 repo.requirements,
214 214 requirements,
215 215 )
216 216 repo.svfs.options = localrepo.resolvestorevfsoptions(
217 217 repo.ui, repo.requirements, repo.features
218 218 )
219 219 scmutil.writereporequirements(repo)
220 220
221 221 if rbranchmap:
222 222 repo._branchcaches.replace(repo, rbranchmap)
223 223
224 224 repo.invalidate()
225 225
226 226
227 227 def allowservergeneration(repo):
228 228 """Whether streaming clones are allowed from the server."""
229 229 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
230 230 return False
231 231
232 232 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
233 233 return False
234 234
235 235 # The way stream clone works makes it impossible to hide secret changesets.
236 236 # So don't allow this by default.
237 237 secret = phases.hassecret(repo)
238 238 if secret:
239 239 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
240 240
241 241 return True
242 242
243 243
244 244 # This is it's own function so extensions can override it.
245 245 def _walkstreamfiles(repo, matcher=None):
246 246 return repo.store.walk(matcher)
247 247
248 248
249 249 def generatev1(repo):
250 250 """Emit content for version 1 of a streaming clone.
251 251
252 252 This returns a 3-tuple of (file count, byte size, data iterator).
253 253
254 254 The data iterator consists of N entries for each file being transferred.
255 255 Each file entry starts as a line with the file name and integer size
256 256 delimited by a null byte.
257 257
258 258 The raw file data follows. Following the raw file data is the next file
259 259 entry, or EOF.
260 260
261 261 When used on the wire protocol, an additional line indicating protocol
262 262 success will be prepended to the stream. This function is not responsible
263 263 for adding it.
264 264
265 265 This function will obtain a repository lock to ensure a consistent view of
266 266 the store is captured. It therefore may raise LockError.
267 267 """
268 268 entries = []
269 269 total_bytes = 0
270 270 # Get consistent snapshot of repo, lock during scan.
271 271 with repo.lock():
272 272 repo.ui.debug(b'scanning\n')
273 273 for file_type, name, size in _walkstreamfiles(repo):
274 274 if size:
275 275 entries.append((name, size))
276 276 total_bytes += size
277 277 _test_sync_point_walk_1(repo)
278 278 _test_sync_point_walk_2(repo)
279 279
280 280 repo.ui.debug(
281 281 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
282 282 )
283 283
284 284 svfs = repo.svfs
285 285 debugflag = repo.ui.debugflag
286 286
287 287 def emitrevlogdata():
288 288 for name, size in entries:
289 289 if debugflag:
290 290 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
291 291 # partially encode name over the wire for backwards compat
292 292 yield b'%s\0%d\n' % (store.encodedir(name), size)
293 293 # auditing at this stage is both pointless (paths are already
294 294 # trusted by the local repo) and expensive
295 295 with svfs(name, b'rb', auditpath=False) as fp:
296 296 if size <= 65536:
297 297 yield fp.read(size)
298 298 else:
299 299 for chunk in util.filechunkiter(fp, limit=size):
300 300 yield chunk
301 301
302 302 return len(entries), total_bytes, emitrevlogdata()
303 303
304 304
305 305 def generatev1wireproto(repo):
306 306 """Emit content for version 1 of streaming clone suitable for the wire.
307 307
308 308 This is the data output from ``generatev1()`` with 2 header lines. The
309 309 first line indicates overall success. The 2nd contains the file count and
310 310 byte size of payload.
311 311
312 312 The success line contains "0" for success, "1" for stream generation not
313 313 allowed, and "2" for error locking the repository (possibly indicating
314 314 a permissions error for the server process).
315 315 """
316 316 if not allowservergeneration(repo):
317 317 yield b'1\n'
318 318 return
319 319
320 320 try:
321 321 filecount, bytecount, it = generatev1(repo)
322 322 except error.LockError:
323 323 yield b'2\n'
324 324 return
325 325
326 326 # Indicates successful response.
327 327 yield b'0\n'
328 328 yield b'%d %d\n' % (filecount, bytecount)
329 329 for chunk in it:
330 330 yield chunk
331 331
332 332
333 333 def generatebundlev1(repo, compression=b'UN'):
334 334 """Emit content for version 1 of a stream clone bundle.
335 335
336 336 The first 4 bytes of the output ("HGS1") denote this as stream clone
337 337 bundle version 1.
338 338
339 339 The next 2 bytes indicate the compression type. Only "UN" is currently
340 340 supported.
341 341
342 342 The next 16 bytes are two 64-bit big endian unsigned integers indicating
343 343 file count and byte count, respectively.
344 344
345 345 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
346 346 of the requirements string, including a trailing \0. The following N bytes
347 347 are the requirements string, which is ASCII containing a comma-delimited
348 348 list of repo requirements that are needed to support the data.
349 349
350 350 The remaining content is the output of ``generatev1()`` (which may be
351 351 compressed in the future).
352 352
353 353 Returns a tuple of (requirements, data generator).
354 354 """
355 355 if compression != b'UN':
356 356 raise ValueError(b'we do not support the compression argument yet')
357 357
358 358 requirements = streamed_requirements(repo)
359 359 requires = b','.join(sorted(requirements))
360 360
361 361 def gen():
362 362 yield b'HGS1'
363 363 yield compression
364 364
365 365 filecount, bytecount, it = generatev1(repo)
366 366 repo.ui.status(
367 367 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
368 368 )
369 369
370 370 yield struct.pack(b'>QQ', filecount, bytecount)
371 371 yield struct.pack(b'>H', len(requires) + 1)
372 372 yield requires + b'\0'
373 373
374 374 # This is where we'll add compression in the future.
375 375 assert compression == b'UN'
376 376
377 377 progress = repo.ui.makeprogress(
378 378 _(b'bundle'), total=bytecount, unit=_(b'bytes')
379 379 )
380 380 progress.update(0)
381 381
382 382 for chunk in it:
383 383 progress.increment(step=len(chunk))
384 384 yield chunk
385 385
386 386 progress.complete()
387 387
388 388 return requirements, gen()
389 389
390 390
391 391 def consumev1(repo, fp, filecount, bytecount):
392 392 """Apply the contents from version 1 of a streaming clone file handle.
393 393
394 394 This takes the output from "stream_out" and applies it to the specified
395 395 repository.
396 396
397 397 Like "stream_out," the status line added by the wire protocol is not
398 398 handled by this function.
399 399 """
400 400 with repo.lock():
401 401 repo.ui.status(
402 402 _(b'%d files to transfer, %s of data\n')
403 403 % (filecount, util.bytecount(bytecount))
404 404 )
405 405 progress = repo.ui.makeprogress(
406 406 _(b'clone'), total=bytecount, unit=_(b'bytes')
407 407 )
408 408 progress.update(0)
409 409 start = util.timer()
410 410
411 411 # TODO: get rid of (potential) inconsistency
412 412 #
413 413 # If transaction is started and any @filecache property is
414 414 # changed at this point, it causes inconsistency between
415 415 # in-memory cached property and streamclone-ed file on the
416 416 # disk. Nested transaction prevents transaction scope "clone"
417 417 # below from writing in-memory changes out at the end of it,
418 418 # even though in-memory changes are discarded at the end of it
419 419 # regardless of transaction nesting.
420 420 #
421 421 # But transaction nesting can't be simply prohibited, because
422 422 # nesting occurs also in ordinary case (e.g. enabling
423 423 # clonebundles).
424 424
425 425 with repo.transaction(b'clone'):
426 426 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
427 427 for i in pycompat.xrange(filecount):
428 428 # XXX doesn't support '\n' or '\r' in filenames
429 429 l = fp.readline()
430 430 try:
431 431 name, size = l.split(b'\0', 1)
432 432 size = int(size)
433 433 except (ValueError, TypeError):
434 434 raise error.ResponseError(
435 435 _(b'unexpected response from remote server:'), l
436 436 )
437 437 if repo.ui.debugflag:
438 438 repo.ui.debug(
439 439 b'adding %s (%s)\n' % (name, util.bytecount(size))
440 440 )
441 441 # for backwards compat, name was partially encoded
442 442 path = store.decodedir(name)
443 443 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
444 444 for chunk in util.filechunkiter(fp, limit=size):
445 445 progress.increment(step=len(chunk))
446 446 ofp.write(chunk)
447 447
448 448 # force @filecache properties to be reloaded from
449 449 # streamclone-ed file at next access
450 450 repo.invalidate(clearfilecache=True)
451 451
452 452 elapsed = util.timer() - start
453 453 if elapsed <= 0:
454 454 elapsed = 0.001
455 455 progress.complete()
456 456 repo.ui.status(
457 457 _(b'transferred %s in %.1f seconds (%s/sec)\n')
458 458 % (
459 459 util.bytecount(bytecount),
460 460 elapsed,
461 461 util.bytecount(bytecount / elapsed),
462 462 )
463 463 )
464 464
465 465
466 466 def readbundle1header(fp):
467 467 compression = fp.read(2)
468 468 if compression != b'UN':
469 469 raise error.Abort(
470 470 _(
471 471 b'only uncompressed stream clone bundles are '
472 472 b'supported; got %s'
473 473 )
474 474 % compression
475 475 )
476 476
477 477 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
478 478 requireslen = struct.unpack(b'>H', fp.read(2))[0]
479 479 requires = fp.read(requireslen)
480 480
481 481 if not requires.endswith(b'\0'):
482 482 raise error.Abort(
483 483 _(
484 484 b'malformed stream clone bundle: '
485 485 b'requirements not properly encoded'
486 486 )
487 487 )
488 488
489 489 requirements = set(requires.rstrip(b'\0').split(b','))
490 490
491 491 return filecount, bytecount, requirements
492 492
493 493
494 494 def applybundlev1(repo, fp):
495 495 """Apply the content from a stream clone bundle version 1.
496 496
497 497 We assume the 4 byte header has been read and validated and the file handle
498 498 is at the 2 byte compression identifier.
499 499 """
500 500 if len(repo):
501 501 raise error.Abort(
502 502 _(b'cannot apply stream clone bundle on non-empty repo')
503 503 )
504 504
505 505 filecount, bytecount, requirements = readbundle1header(fp)
506 missingreqs = requirements - repo.supportedformats
506 missingreqs = requirements - repo.supported
507 507 if missingreqs:
508 508 raise error.Abort(
509 509 _(b'unable to apply stream clone: unsupported format: %s')
510 510 % b', '.join(sorted(missingreqs))
511 511 )
512 512
513 513 consumev1(repo, fp, filecount, bytecount)
514 514
515 515
516 516 class streamcloneapplier(object):
517 517 """Class to manage applying streaming clone bundles.
518 518
519 519 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
520 520 readers to perform bundle type-specific functionality.
521 521 """
522 522
523 523 def __init__(self, fh):
524 524 self._fh = fh
525 525
526 526 def apply(self, repo):
527 527 return applybundlev1(repo, self._fh)
528 528
529 529
530 530 # type of file to stream
531 531 _fileappend = 0 # append only file
532 532 _filefull = 1 # full snapshot file
533 533
534 534 # Source of the file
535 535 _srcstore = b's' # store (svfs)
536 536 _srccache = b'c' # cache (cache)
537 537
538 538 # This is it's own function so extensions can override it.
539 539 def _walkstreamfullstorefiles(repo):
540 540 """list snapshot file from the store"""
541 541 fnames = []
542 542 if not repo.publishing():
543 543 fnames.append(b'phaseroots')
544 544 return fnames
545 545
546 546
547 547 def _filterfull(entry, copy, vfsmap):
548 548 """actually copy the snapshot files"""
549 549 src, name, ftype, data = entry
550 550 if ftype != _filefull:
551 551 return entry
552 552 return (src, name, ftype, copy(vfsmap[src].join(name)))
553 553
554 554
555 555 @contextlib.contextmanager
556 556 def maketempcopies():
557 557 """return a function to temporary copy file"""
558 558 files = []
559 559 try:
560 560
561 561 def copy(src):
562 562 fd, dst = pycompat.mkstemp()
563 563 os.close(fd)
564 564 files.append(dst)
565 565 util.copyfiles(src, dst, hardlink=True)
566 566 return dst
567 567
568 568 yield copy
569 569 finally:
570 570 for tmp in files:
571 571 util.tryunlink(tmp)
572 572
573 573
574 574 def _makemap(repo):
575 575 """make a (src -> vfs) map for the repo"""
576 576 vfsmap = {
577 577 _srcstore: repo.svfs,
578 578 _srccache: repo.cachevfs,
579 579 }
580 580 # we keep repo.vfs out of the on purpose, ther are too many danger there
581 581 # (eg: .hg/hgrc)
582 582 assert repo.vfs not in vfsmap.values()
583 583
584 584 return vfsmap
585 585
586 586
587 587 def _emit2(repo, entries, totalfilesize):
588 588 """actually emit the stream bundle"""
589 589 vfsmap = _makemap(repo)
590 590 # we keep repo.vfs out of the on purpose, ther are too many danger there
591 591 # (eg: .hg/hgrc),
592 592 #
593 593 # this assert is duplicated (from _makemap) as author might think this is
594 594 # fine, while this is really not fine.
595 595 if repo.vfs in vfsmap.values():
596 596 raise error.ProgrammingError(
597 597 b'repo.vfs must not be added to vfsmap for security reasons'
598 598 )
599 599
600 600 progress = repo.ui.makeprogress(
601 601 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
602 602 )
603 603 progress.update(0)
604 604 with maketempcopies() as copy, progress:
605 605 # copy is delayed until we are in the try
606 606 entries = [_filterfull(e, copy, vfsmap) for e in entries]
607 607 yield None # this release the lock on the repository
608 608 totalbytecount = 0
609 609
610 610 for src, name, ftype, data in entries:
611 611 vfs = vfsmap[src]
612 612 yield src
613 613 yield util.uvarintencode(len(name))
614 614 if ftype == _fileappend:
615 615 fp = vfs(name)
616 616 size = data
617 617 elif ftype == _filefull:
618 618 fp = open(data, b'rb')
619 619 size = util.fstat(fp).st_size
620 620 bytecount = 0
621 621 try:
622 622 yield util.uvarintencode(size)
623 623 yield name
624 624 if size <= 65536:
625 625 chunks = (fp.read(size),)
626 626 else:
627 627 chunks = util.filechunkiter(fp, limit=size)
628 628 for chunk in chunks:
629 629 bytecount += len(chunk)
630 630 totalbytecount += len(chunk)
631 631 progress.update(totalbytecount)
632 632 yield chunk
633 633 if bytecount != size:
634 634 # Would most likely be caused by a race due to `hg strip` or
635 635 # a revlog split
636 636 raise error.Abort(
637 637 _(
638 638 b'clone could only read %d bytes from %s, but '
639 639 b'expected %d bytes'
640 640 )
641 641 % (bytecount, name, size)
642 642 )
643 643 finally:
644 644 fp.close()
645 645
646 646
647 647 def _test_sync_point_walk_1(repo):
648 648 """a function for synchronisation during tests"""
649 649
650 650
651 651 def _test_sync_point_walk_2(repo):
652 652 """a function for synchronisation during tests"""
653 653
654 654
655 655 def _v2_walk(repo, includes, excludes, includeobsmarkers):
656 656 """emit a seris of files information useful to clone a repo
657 657
658 658 return (entries, totalfilesize)
659 659
660 660 entries is a list of tuple (vfs-key, file-path, file-type, size)
661 661
662 662 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
663 663 - `name`: file path of the file to copy (to be feed to the vfss)
664 664 - `file-type`: do this file need to be copied with the source lock ?
665 665 - `size`: the size of the file (or None)
666 666 """
667 667 assert repo._currentlock(repo._lockref) is not None
668 668 entries = []
669 669 totalfilesize = 0
670 670
671 671 matcher = None
672 672 if includes or excludes:
673 673 matcher = narrowspec.match(repo.root, includes, excludes)
674 674
675 675 for rl_type, name, size in _walkstreamfiles(repo, matcher):
676 676 if size:
677 677 ft = _fileappend
678 678 if rl_type & store.FILEFLAGS_VOLATILE:
679 679 ft = _filefull
680 680 entries.append((_srcstore, name, ft, size))
681 681 totalfilesize += size
682 682 for name in _walkstreamfullstorefiles(repo):
683 683 if repo.svfs.exists(name):
684 684 totalfilesize += repo.svfs.lstat(name).st_size
685 685 entries.append((_srcstore, name, _filefull, None))
686 686 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
687 687 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
688 688 entries.append((_srcstore, b'obsstore', _filefull, None))
689 689 for name in cacheutil.cachetocopy(repo):
690 690 if repo.cachevfs.exists(name):
691 691 totalfilesize += repo.cachevfs.lstat(name).st_size
692 692 entries.append((_srccache, name, _filefull, None))
693 693 return entries, totalfilesize
694 694
695 695
696 696 def generatev2(repo, includes, excludes, includeobsmarkers):
697 697 """Emit content for version 2 of a streaming clone.
698 698
699 699 the data stream consists the following entries:
700 700 1) A char representing the file destination (eg: store or cache)
701 701 2) A varint containing the length of the filename
702 702 3) A varint containing the length of file data
703 703 4) N bytes containing the filename (the internal, store-agnostic form)
704 704 5) N bytes containing the file data
705 705
706 706 Returns a 3-tuple of (file count, file size, data iterator).
707 707 """
708 708
709 709 with repo.lock():
710 710
711 711 repo.ui.debug(b'scanning\n')
712 712
713 713 entries, totalfilesize = _v2_walk(
714 714 repo,
715 715 includes=includes,
716 716 excludes=excludes,
717 717 includeobsmarkers=includeobsmarkers,
718 718 )
719 719
720 720 chunks = _emit2(repo, entries, totalfilesize)
721 721 first = next(chunks)
722 722 assert first is None
723 723 _test_sync_point_walk_1(repo)
724 724 _test_sync_point_walk_2(repo)
725 725
726 726 return len(entries), totalfilesize, chunks
727 727
728 728
729 729 @contextlib.contextmanager
730 730 def nested(*ctxs):
731 731 this = ctxs[0]
732 732 rest = ctxs[1:]
733 733 with this:
734 734 if rest:
735 735 with nested(*rest):
736 736 yield
737 737 else:
738 738 yield
739 739
740 740
741 741 def consumev2(repo, fp, filecount, filesize):
742 742 """Apply the contents from a version 2 streaming clone.
743 743
744 744 Data is read from an object that only needs to provide a ``read(size)``
745 745 method.
746 746 """
747 747 with repo.lock():
748 748 repo.ui.status(
749 749 _(b'%d files to transfer, %s of data\n')
750 750 % (filecount, util.bytecount(filesize))
751 751 )
752 752
753 753 start = util.timer()
754 754 progress = repo.ui.makeprogress(
755 755 _(b'clone'), total=filesize, unit=_(b'bytes')
756 756 )
757 757 progress.update(0)
758 758
759 759 vfsmap = _makemap(repo)
760 760 # we keep repo.vfs out of the on purpose, ther are too many danger
761 761 # there (eg: .hg/hgrc),
762 762 #
763 763 # this assert is duplicated (from _makemap) as author might think this
764 764 # is fine, while this is really not fine.
765 765 if repo.vfs in vfsmap.values():
766 766 raise error.ProgrammingError(
767 767 b'repo.vfs must not be added to vfsmap for security reasons'
768 768 )
769 769
770 770 with repo.transaction(b'clone'):
771 771 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
772 772 with nested(*ctxs):
773 773 for i in range(filecount):
774 774 src = util.readexactly(fp, 1)
775 775 vfs = vfsmap[src]
776 776 namelen = util.uvarintdecodestream(fp)
777 777 datalen = util.uvarintdecodestream(fp)
778 778
779 779 name = util.readexactly(fp, namelen)
780 780
781 781 if repo.ui.debugflag:
782 782 repo.ui.debug(
783 783 b'adding [%s] %s (%s)\n'
784 784 % (src, name, util.bytecount(datalen))
785 785 )
786 786
787 787 with vfs(name, b'w') as ofp:
788 788 for chunk in util.filechunkiter(fp, limit=datalen):
789 789 progress.increment(step=len(chunk))
790 790 ofp.write(chunk)
791 791
792 792 # force @filecache properties to be reloaded from
793 793 # streamclone-ed file at next access
794 794 repo.invalidate(clearfilecache=True)
795 795
796 796 elapsed = util.timer() - start
797 797 if elapsed <= 0:
798 798 elapsed = 0.001
799 799 repo.ui.status(
800 800 _(b'transferred %s in %.1f seconds (%s/sec)\n')
801 801 % (
802 802 util.bytecount(progress.pos),
803 803 elapsed,
804 804 util.bytecount(progress.pos / elapsed),
805 805 )
806 806 )
807 807 progress.complete()
808 808
809 809
810 810 def applybundlev2(repo, fp, filecount, filesize, requirements):
811 811 from . import localrepo
812 812
813 813 missingreqs = [r for r in requirements if r not in repo.supported]
814 814 if missingreqs:
815 815 raise error.Abort(
816 816 _(b'unable to apply stream clone: unsupported format: %s')
817 817 % b', '.join(sorted(missingreqs))
818 818 )
819 819
820 820 consumev2(repo, fp, filecount, filesize)
821 821
822 822 repo.requirements = new_stream_clone_requirements(
823 823 repo.supportedformats,
824 824 repo.requirements,
825 825 requirements,
826 826 )
827 827 repo.svfs.options = localrepo.resolvestorevfsoptions(
828 828 repo.ui, repo.requirements, repo.features
829 829 )
830 830 scmutil.writereporequirements(repo)
831 831
832 832
833 833 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
834 834 hardlink = [True]
835 835
836 836 def copy_used():
837 837 hardlink[0] = False
838 838 progress.topic = _(b'copying')
839 839
840 840 for k, path, size in entries:
841 841 src_vfs = src_vfs_map[k]
842 842 dst_vfs = dst_vfs_map[k]
843 843 src_path = src_vfs.join(path)
844 844 dst_path = dst_vfs.join(path)
845 845 # We cannot use dirname and makedirs of dst_vfs here because the store
846 846 # encoding confuses them. See issue 6581 for details.
847 847 dirname = os.path.dirname(dst_path)
848 848 if not os.path.exists(dirname):
849 849 util.makedirs(dirname)
850 850 dst_vfs.register_file(path)
851 851 # XXX we could use the #nb_bytes argument.
852 852 util.copyfile(
853 853 src_path,
854 854 dst_path,
855 855 hardlink=hardlink[0],
856 856 no_hardlink_cb=copy_used,
857 857 check_fs_hardlink=False,
858 858 )
859 859 progress.increment()
860 860 return hardlink[0]
861 861
862 862
863 863 def local_copy(src_repo, dest_repo):
864 864 """copy all content from one local repository to another
865 865
866 866 This is useful for local clone"""
867 867 src_store_requirements = {
868 868 r
869 869 for r in src_repo.requirements
870 870 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
871 871 }
872 872 dest_store_requirements = {
873 873 r
874 874 for r in dest_repo.requirements
875 875 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
876 876 }
877 877 assert src_store_requirements == dest_store_requirements
878 878
879 879 with dest_repo.lock():
880 880 with src_repo.lock():
881 881
882 882 # bookmark is not integrated to the streaming as it might use the
883 883 # `repo.vfs` and they are too many sentitive data accessible
884 884 # through `repo.vfs` to expose it to streaming clone.
885 885 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
886 886 srcbookmarks = src_book_vfs.join(b'bookmarks')
887 887 bm_count = 0
888 888 if os.path.exists(srcbookmarks):
889 889 bm_count = 1
890 890
891 891 entries, totalfilesize = _v2_walk(
892 892 src_repo,
893 893 includes=None,
894 894 excludes=None,
895 895 includeobsmarkers=True,
896 896 )
897 897 src_vfs_map = _makemap(src_repo)
898 898 dest_vfs_map = _makemap(dest_repo)
899 899 progress = src_repo.ui.makeprogress(
900 900 topic=_(b'linking'),
901 901 total=len(entries) + bm_count,
902 902 unit=_(b'files'),
903 903 )
904 904 # copy files
905 905 #
906 906 # We could copy the full file while the source repository is locked
907 907 # and the other one without the lock. However, in the linking case,
908 908 # this would also requires checks that nobody is appending any data
909 909 # to the files while we do the clone, so this is not done yet. We
910 910 # could do this blindly when copying files.
911 911 files = ((k, path, size) for k, path, ftype, size in entries)
912 912 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
913 913
914 914 # copy bookmarks over
915 915 if bm_count:
916 916 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
917 917 dstbookmarks = dst_book_vfs.join(b'bookmarks')
918 918 util.copyfile(srcbookmarks, dstbookmarks)
919 919 progress.complete()
920 920 if hardlink:
921 921 msg = b'linked %d files\n'
922 922 else:
923 923 msg = b'copied %d files\n'
924 924 src_repo.ui.debug(msg % (len(entries) + bm_count))
925 925
926 926 with dest_repo.transaction(b"localclone") as tr:
927 927 dest_repo.store.write(tr)
928 928
929 929 # clean up transaction file as they do not make sense
930 930 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
931 931 undo_files.extend(dest_repo.undofiles())
932 932 for undovfs, undofile in undo_files:
933 933 try:
934 934 undovfs.unlink(undofile)
935 935 except OSError as e:
936 936 if e.errno != errno.ENOENT:
937 937 msg = _(b'error removing %s: %s\n')
938 938 path = undovfs.join(undofile)
939 939 e_msg = stringutil.forcebytestr(e)
940 940 msg %= (path, e_msg)
941 941 dest_repo.ui.warn(msg)
@@ -1,413 +1,415 b''
1 1 #require serve
2 2
3 3 This test is a duplicate of 'test-http.t', feel free to factor out
4 4 parts that are not bundle1/bundle2 specific.
5 5
6 6 $ cat << EOF >> $HGRCPATH
7 7 > [devel]
8 8 > # This test is dedicated to interaction through old bundle
9 9 > legacy.exchange = bundle1
10 10 > EOF
11 11
12 12 $ hg init test
13 13 $ cd test
14 14 $ echo foo>foo
15 15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
16 16 $ echo foo>foo.d/foo
17 17 $ echo bar>foo.d/bAr.hg.d/BaR
18 18 $ echo bar>foo.d/baR.d.hg/bAR
19 19 $ hg commit -A -m 1
20 20 adding foo
21 21 adding foo.d/bAr.hg.d/BaR
22 22 adding foo.d/baR.d.hg/bAR
23 23 adding foo.d/foo
24 24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
25 25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
26 26
27 27 Test server address cannot be reused
28 28
29 29 $ hg serve -p $HGPORT1 2>&1
30 30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
31 31 [255]
32 32
33 33 $ cd ..
34 34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
35 35
36 36 clone via stream
37 37
38 38 #if no-reposimplestore
39 39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
40 40 streaming all changes
41 41 6 files to transfer, 606 bytes of data (no-zstd !)
42 42 6 files to transfer, 608 bytes of data (zstd !)
43 43 transferred * bytes in * seconds (*/sec) (glob)
44 44 searching for changes
45 45 no changes found
46 46 updating to branch default
47 47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 48 $ hg verify -R copy
49 49 checking changesets
50 50 checking manifests
51 51 crosschecking files in changesets and manifests
52 52 checking files
53 53 checked 1 changesets with 4 changes to 4 files
54 54 #endif
55 55
56 56 try to clone via stream, should use pull instead
57 57
58 58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
59 59 warning: stream clone requested but server has them disabled
60 60 requesting all changes
61 61 adding changesets
62 62 adding manifests
63 63 adding file changes
64 64 added 1 changesets with 4 changes to 4 files
65 65 new changesets 8b6053c928fe
66 66 updating to branch default
67 67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
68 68
69 69 try to clone via stream but missing requirements, so should use pull instead
70 70
71 71 $ cat > $TESTTMP/removesupportedformat.py << EOF
72 72 > from mercurial import localrepo
73 > def extsetup(ui):
74 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
73 > def reposetup(ui, repo):
74 > local = repo.local()
75 > if local is not None:
76 > local.supported.remove(b'generaldelta')
75 77 > EOF
76 78
77 79 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
78 80 warning: stream clone requested but client is missing requirements: generaldelta
79 81 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
80 82 requesting all changes
81 83 adding changesets
82 84 adding manifests
83 85 adding file changes
84 86 added 1 changesets with 4 changes to 4 files
85 87 new changesets 8b6053c928fe
86 88 updating to branch default
87 89 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 90
89 91 clone via pull
90 92
91 93 $ hg clone http://localhost:$HGPORT1/ copy-pull
92 94 requesting all changes
93 95 adding changesets
94 96 adding manifests
95 97 adding file changes
96 98 added 1 changesets with 4 changes to 4 files
97 99 new changesets 8b6053c928fe
98 100 updating to branch default
99 101 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 102 $ hg verify -R copy-pull
101 103 checking changesets
102 104 checking manifests
103 105 crosschecking files in changesets and manifests
104 106 checking files
105 107 checked 1 changesets with 4 changes to 4 files
106 108 $ cd test
107 109 $ echo bar > bar
108 110 $ hg commit -A -d '1 0' -m 2
109 111 adding bar
110 112 $ cd ..
111 113
112 114 clone over http with --update
113 115
114 116 $ hg clone http://localhost:$HGPORT1/ updated --update 0
115 117 requesting all changes
116 118 adding changesets
117 119 adding manifests
118 120 adding file changes
119 121 added 2 changesets with 5 changes to 5 files
120 122 new changesets 8b6053c928fe:5fed3813f7f5
121 123 updating to branch default
122 124 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 125 $ hg log -r . -R updated
124 126 changeset: 0:8b6053c928fe
125 127 user: test
126 128 date: Thu Jan 01 00:00:00 1970 +0000
127 129 summary: 1
128 130
129 131 $ rm -rf updated
130 132
131 133 incoming via HTTP
132 134
133 135 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
134 136 adding changesets
135 137 adding manifests
136 138 adding file changes
137 139 added 1 changesets with 4 changes to 4 files
138 140 new changesets 8b6053c928fe
139 141 updating to branch default
140 142 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 143 $ cd partial
142 144 $ touch LOCAL
143 145 $ hg ci -qAm LOCAL
144 146 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
145 147 comparing with http://localhost:$HGPORT1/
146 148 searching for changes
147 149 2
148 150 $ cd ..
149 151
150 152 pull
151 153
152 154 $ cd copy-pull
153 155 $ cat >> .hg/hgrc <<EOF
154 156 > [hooks]
155 157 > changegroup = sh -c "printenv.py --line changegroup"
156 158 > EOF
157 159 $ hg pull
158 160 pulling from http://localhost:$HGPORT1/
159 161 searching for changes
160 162 adding changesets
161 163 adding manifests
162 164 adding file changes
163 165 added 1 changesets with 1 changes to 1 files
164 166 new changesets 5fed3813f7f5
165 167 changegroup hook: HG_HOOKNAME=changegroup
166 168 HG_HOOKTYPE=changegroup
167 169 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
168 170 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 171 HG_SOURCE=pull
170 172 HG_TXNID=TXN:$ID$
171 173 HG_TXNNAME=pull
172 174 http://localhost:$HGPORT1/
173 175 HG_URL=http://localhost:$HGPORT1/
174 176
175 177 (run 'hg update' to get a working copy)
176 178 $ cd ..
177 179
178 180 clone from invalid URL
179 181
180 182 $ hg clone http://localhost:$HGPORT/bad
181 183 abort: HTTP Error 404: Not Found
182 184 [100]
183 185
184 186 test http authentication
185 187 + use the same server to test server side streaming preference
186 188
187 189 $ cd test
188 190
189 191 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
190 192 > --pid-file=pid --config server.preferuncompressed=True \
191 193 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
192 194 $ cat pid >> $DAEMON_PIDS
193 195
194 196 $ cat << EOF > get_pass.py
195 197 > from mercurial import util
196 198 > def newgetpass():
197 199 > return "pass"
198 200 > util.get_password = newgetpass
199 201 > EOF
200 202
201 203 $ hg id http://localhost:$HGPORT2/
202 204 abort: http authorization required for http://localhost:$HGPORT2/
203 205 [255]
204 206 $ hg id http://localhost:$HGPORT2/
205 207 abort: http authorization required for http://localhost:$HGPORT2/
206 208 [255]
207 209 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
208 210 http authorization required for http://localhost:$HGPORT2/
209 211 realm: mercurial
210 212 user: user
211 213 password: 5fed3813f7f5
212 214 $ hg id http://user:pass@localhost:$HGPORT2/
213 215 5fed3813f7f5
214 216 $ echo '[auth]' >> .hg/hgrc
215 217 $ echo 'l.schemes=http' >> .hg/hgrc
216 218 $ echo 'l.prefix=lo' >> .hg/hgrc
217 219 $ echo 'l.username=user' >> .hg/hgrc
218 220 $ echo 'l.password=pass' >> .hg/hgrc
219 221 $ hg id http://localhost:$HGPORT2/
220 222 5fed3813f7f5
221 223 $ hg id http://localhost:$HGPORT2/
222 224 5fed3813f7f5
223 225 $ hg id http://user@localhost:$HGPORT2/
224 226 5fed3813f7f5
225 227
226 228 #if no-reposimplestore
227 229 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
228 230 streaming all changes
229 231 7 files to transfer, 916 bytes of data (no-zstd !)
230 232 7 files to transfer, 919 bytes of data (zstd !)
231 233 transferred * bytes in * seconds (*/sec) (glob)
232 234 searching for changes
233 235 no changes found
234 236 updating to branch default
235 237 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
236 238 #endif
237 239
238 240 --pull should override server's preferuncompressed
239 241
240 242 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
241 243 requesting all changes
242 244 adding changesets
243 245 adding manifests
244 246 adding file changes
245 247 added 2 changesets with 5 changes to 5 files
246 248 new changesets 8b6053c928fe:5fed3813f7f5
247 249 updating to branch default
248 250 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 251
250 252 $ hg id http://user2@localhost:$HGPORT2/
251 253 abort: http authorization required for http://localhost:$HGPORT2/
252 254 [255]
253 255 $ hg id http://user:pass2@localhost:$HGPORT2/
254 256 abort: HTTP Error 403: no
255 257 [100]
256 258
257 259 $ hg -R dest-pull tag -r tip top
258 260 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
259 261 pushing to http://user:***@localhost:$HGPORT2/
260 262 searching for changes
261 263 remote: adding changesets
262 264 remote: adding manifests
263 265 remote: adding file changes
264 266 remote: added 1 changesets with 1 changes to 1 files
265 267 $ hg rollback -q
266 268
267 269 $ sed 's/.*] "/"/' < ../access.log
268 270 "GET /?cmd=capabilities HTTP/1.1" 401 -
269 271 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 272 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 273 "GET /?cmd=capabilities HTTP/1.1" 200 -
272 274 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
273 275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 276 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 277 "GET /?cmd=capabilities HTTP/1.1" 401 -
276 278 "GET /?cmd=capabilities HTTP/1.1" 200 -
277 279 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
278 280 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 281 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 282 "GET /?cmd=capabilities HTTP/1.1" 401 -
281 283 "GET /?cmd=capabilities HTTP/1.1" 200 -
282 284 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
283 285 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 287 "GET /?cmd=capabilities HTTP/1.1" 401 -
286 288 "GET /?cmd=capabilities HTTP/1.1" 200 -
287 289 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
288 290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 291 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 292 "GET /?cmd=capabilities HTTP/1.1" 401 -
291 293 "GET /?cmd=capabilities HTTP/1.1" 200 -
292 294 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
293 295 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 296 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 297 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
296 298 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
297 299 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
298 300 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 302 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 303 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 304 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
303 305 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
304 306 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
305 307 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
306 308 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 309 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 310 "GET /?cmd=capabilities HTTP/1.1" 401 -
309 311 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 312 "GET /?cmd=capabilities HTTP/1.1" 403 -
311 313 "GET /?cmd=capabilities HTTP/1.1" 401 -
312 314 "GET /?cmd=capabilities HTTP/1.1" 200 -
313 315 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 316 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 318 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 320 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
319 321 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
320 322
321 323 $ cd ..
322 324
323 325 clone of serve with repo in root and unserved subrepo (issue2970)
324 326
325 327 $ hg --cwd test init sub
326 328 $ echo empty > test/sub/empty
327 329 $ hg --cwd test/sub add empty
328 330 $ hg --cwd test/sub commit -qm 'add empty'
329 331 $ hg --cwd test/sub tag -r 0 something
330 332 $ echo sub = sub > test/.hgsub
331 333 $ hg --cwd test add .hgsub
332 334 $ hg --cwd test commit -qm 'add subrepo'
333 335 $ hg clone http://localhost:$HGPORT noslash-clone
334 336 requesting all changes
335 337 adding changesets
336 338 adding manifests
337 339 adding file changes
338 340 added 3 changesets with 7 changes to 7 files
339 341 new changesets 8b6053c928fe:56f9bc90cce6
340 342 updating to branch default
341 343 cloning subrepo sub from http://localhost:$HGPORT/sub
342 344 abort: HTTP Error 404: Not Found
343 345 [100]
344 346 $ hg clone http://localhost:$HGPORT/ slash-clone
345 347 requesting all changes
346 348 adding changesets
347 349 adding manifests
348 350 adding file changes
349 351 added 3 changesets with 7 changes to 7 files
350 352 new changesets 8b6053c928fe:56f9bc90cce6
351 353 updating to branch default
352 354 cloning subrepo sub from http://localhost:$HGPORT/sub
353 355 abort: HTTP Error 404: Not Found
354 356 [100]
355 357
356 358 check error log
357 359
358 360 $ cat error.log
359 361
360 362 Check error reporting while pulling/cloning
361 363
362 364 $ $RUNTESTDIR/killdaemons.py
363 365 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
364 366 $ cat hg3.pid >> $DAEMON_PIDS
365 367 $ hg clone http://localhost:$HGPORT/ abort-clone
366 368 requesting all changes
367 369 abort: remote error:
368 370 this is an exercise
369 371 [100]
370 372 $ cat error.log
371 373
372 374 disable pull-based clones
373 375
374 376 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
375 377 $ cat hg4.pid >> $DAEMON_PIDS
376 378 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
377 379 requesting all changes
378 380 abort: remote error:
379 381 server has pull-based clones disabled
380 382 [100]
381 383
382 384 #if no-reposimplestore
383 385 ... but keep stream clones working
384 386
385 387 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
386 388 streaming all changes
387 389 * files to transfer, * of data (glob)
388 390 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
389 391 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
390 392 searching for changes
391 393 no changes found
392 394 #endif
393 395
394 396 ... and also keep partial clones and pulls working
395 397 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
396 398 adding changesets
397 399 adding manifests
398 400 adding file changes
399 401 added 1 changesets with 4 changes to 4 files
400 402 new changesets 8b6053c928fe
401 403 updating to branch default
402 404 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 405 $ hg pull -R test-partial-clone
404 406 pulling from http://localhost:$HGPORT1/
405 407 searching for changes
406 408 adding changesets
407 409 adding manifests
408 410 adding file changes
409 411 added 2 changesets with 3 changes to 3 files
410 412 new changesets 5fed3813f7f5:56f9bc90cce6
411 413 (run 'hg update' to get a working copy)
412 414
413 415 $ cat error.log
@@ -1,617 +1,619 b''
1 1 #require serve
2 2
3 3 $ hg init test
4 4 $ cd test
5 5 $ echo foo>foo
6 6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 7 $ echo foo>foo.d/foo
8 8 $ echo bar>foo.d/bAr.hg.d/BaR
9 9 $ echo bar>foo.d/baR.d.hg/bAR
10 10 $ hg commit -A -m 1
11 11 adding foo
12 12 adding foo.d/bAr.hg.d/BaR
13 13 adding foo.d/baR.d.hg/bAR
14 14 adding foo.d/foo
15 15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
17 17
18 18 Test server address cannot be reused
19 19
20 20 $ hg serve -p $HGPORT1 2>&1
21 21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
22 22 [255]
23 23
24 24 $ cd ..
25 25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
26 26
27 27 clone via stream
28 28
29 29 #if no-reposimplestore
30 30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
31 31 streaming all changes
32 32 9 files to transfer, 715 bytes of data (no-zstd !)
33 33 9 files to transfer, 717 bytes of data (zstd !)
34 34 transferred * bytes in * seconds (*/sec) (glob)
35 35 updating to branch default
36 36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 37 $ hg verify -R copy
38 38 checking changesets
39 39 checking manifests
40 40 crosschecking files in changesets and manifests
41 41 checking files
42 42 checked 1 changesets with 4 changes to 4 files
43 43 #endif
44 44
45 45 try to clone via stream, should use pull instead
46 46
47 47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
48 48 warning: stream clone requested but server has them disabled
49 49 requesting all changes
50 50 adding changesets
51 51 adding manifests
52 52 adding file changes
53 53 added 1 changesets with 4 changes to 4 files
54 54 new changesets 8b6053c928fe
55 55 updating to branch default
56 56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
57 57
58 58 try to clone via stream but missing requirements, so should use pull instead
59 59
60 60 $ cat > $TESTTMP/removesupportedformat.py << EOF
61 61 > from mercurial import localrepo
62 > def extsetup(ui):
63 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
62 > def reposetup(ui, repo):
63 > local = repo.local()
64 > if local is not None:
65 > local.supported.remove(b'generaldelta')
64 66 > EOF
65 67
66 68 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
67 69 warning: stream clone requested but client is missing requirements: generaldelta
68 70 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
69 71 requesting all changes
70 72 adding changesets
71 73 adding manifests
72 74 adding file changes
73 75 added 1 changesets with 4 changes to 4 files
74 76 new changesets 8b6053c928fe
75 77 updating to branch default
76 78 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
77 79
78 80 clone via pull
79 81
80 82 $ hg clone http://localhost:$HGPORT1/ copy-pull
81 83 requesting all changes
82 84 adding changesets
83 85 adding manifests
84 86 adding file changes
85 87 added 1 changesets with 4 changes to 4 files
86 88 new changesets 8b6053c928fe
87 89 updating to branch default
88 90 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 91 $ hg verify -R copy-pull
90 92 checking changesets
91 93 checking manifests
92 94 crosschecking files in changesets and manifests
93 95 checking files
94 96 checked 1 changesets with 4 changes to 4 files
95 97 $ cd test
96 98 $ echo bar > bar
97 99 $ hg commit -A -d '1 0' -m 2
98 100 adding bar
99 101 $ cd ..
100 102
101 103 clone over http with --update
102 104
103 105 $ hg clone http://localhost:$HGPORT1/ updated --update 0
104 106 requesting all changes
105 107 adding changesets
106 108 adding manifests
107 109 adding file changes
108 110 added 2 changesets with 5 changes to 5 files
109 111 new changesets 8b6053c928fe:5fed3813f7f5
110 112 updating to branch default
111 113 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 114 $ hg log -r . -R updated
113 115 changeset: 0:8b6053c928fe
114 116 user: test
115 117 date: Thu Jan 01 00:00:00 1970 +0000
116 118 summary: 1
117 119
118 120 $ rm -rf updated
119 121
120 122 incoming via HTTP
121 123
122 124 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
123 125 adding changesets
124 126 adding manifests
125 127 adding file changes
126 128 added 1 changesets with 4 changes to 4 files
127 129 new changesets 8b6053c928fe
128 130 updating to branch default
129 131 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 132 $ cd partial
131 133 $ touch LOCAL
132 134 $ hg ci -qAm LOCAL
133 135 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
134 136 comparing with http://localhost:$HGPORT1/
135 137 searching for changes
136 138 2
137 139 $ cd ..
138 140
139 141 pull
140 142
141 143 $ cd copy-pull
142 144 $ cat >> .hg/hgrc <<EOF
143 145 > [hooks]
144 146 > changegroup = sh -c "printenv.py --line changegroup"
145 147 > EOF
146 148 $ hg pull
147 149 pulling from http://localhost:$HGPORT1/
148 150 searching for changes
149 151 adding changesets
150 152 adding manifests
151 153 adding file changes
152 154 added 1 changesets with 1 changes to 1 files
153 155 new changesets 5fed3813f7f5
154 156 changegroup hook: HG_HOOKNAME=changegroup
155 157 HG_HOOKTYPE=changegroup
156 158 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
157 159 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 160 HG_SOURCE=pull
159 161 HG_TXNID=TXN:$ID$
160 162 HG_TXNNAME=pull
161 163 http://localhost:$HGPORT1/
162 164 HG_URL=http://localhost:$HGPORT1/
163 165
164 166 (run 'hg update' to get a working copy)
165 167 $ cd ..
166 168
167 169 clone from invalid URL
168 170
169 171 $ hg clone http://localhost:$HGPORT/bad
170 172 abort: HTTP Error 404: Not Found
171 173 [100]
172 174
173 175 test http authentication
174 176 + use the same server to test server side streaming preference
175 177
176 178 $ cd test
177 179
178 180 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
179 181 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
180 182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
181 183 $ cat pid >> $DAEMON_PIDS
182 184
183 185 $ cat << EOF > get_pass.py
184 186 > from mercurial import util
185 187 > def newgetpass():
186 188 > return "pass"
187 189 > util.get_password = newgetpass
188 190 > EOF
189 191
190 192 $ hg id http://localhost:$HGPORT2/
191 193 abort: http authorization required for http://localhost:$HGPORT2/
192 194 [255]
193 195 $ hg id http://localhost:$HGPORT2/
194 196 abort: http authorization required for http://localhost:$HGPORT2/
195 197 [255]
196 198 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
197 199 using http://localhost:$HGPORT2/
198 200 sending capabilities command
199 201 http authorization required for http://localhost:$HGPORT2/
200 202 realm: mercurial
201 203 user: abort: response expected
202 204 [255]
203 205 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
204 206 >
205 207 > EOF
206 208 using http://localhost:$HGPORT2/
207 209 sending capabilities command
208 210 http authorization required for http://localhost:$HGPORT2/
209 211 realm: mercurial
210 212 user:
211 213 password: abort: response expected
212 214 [255]
213 215 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
214 216 >
215 217 >
216 218 > EOF
217 219 using http://localhost:$HGPORT2/
218 220 sending capabilities command
219 221 http authorization required for http://localhost:$HGPORT2/
220 222 realm: mercurial
221 223 user:
222 224 password: abort: authorization failed
223 225 [255]
224 226 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
225 227 http authorization required for http://localhost:$HGPORT2/
226 228 realm: mercurial
227 229 user: user
228 230 password: 5fed3813f7f5
229 231 $ hg id http://user:pass@localhost:$HGPORT2/
230 232 5fed3813f7f5
231 233 $ echo '[auth]' >> .hg/hgrc
232 234 $ echo 'l.schemes=http' >> .hg/hgrc
233 235 $ echo 'l.prefix=lo' >> .hg/hgrc
234 236 $ echo 'l.username=user' >> .hg/hgrc
235 237 $ echo 'l.password=pass' >> .hg/hgrc
236 238 $ hg id http://localhost:$HGPORT2/
237 239 5fed3813f7f5
238 240 $ hg id http://localhost:$HGPORT2/
239 241 5fed3813f7f5
240 242 $ hg id http://user@localhost:$HGPORT2/
241 243 5fed3813f7f5
242 244
243 245 $ cat > use_digests.py << EOF
244 246 > from mercurial import (
245 247 > exthelper,
246 248 > url,
247 249 > )
248 250 >
249 251 > eh = exthelper.exthelper()
250 252 > uisetup = eh.finaluisetup
251 253 >
252 254 > @eh.wrapfunction(url, 'opener')
253 255 > def urlopener(orig, *args, **kwargs):
254 256 > opener = orig(*args, **kwargs)
255 257 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
256 258 > return opener
257 259 > EOF
258 260
259 261 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
260 262 5fed3813f7f5
261 263
262 264 #if no-reposimplestore
263 265 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
264 266 streaming all changes
265 267 10 files to transfer, 1.01 KB of data
266 268 transferred * KB in * seconds (*/sec) (glob)
267 269 updating to branch default
268 270 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 271 #endif
270 272
271 273 --pull should override server's preferuncompressed
272 274 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
273 275 requesting all changes
274 276 adding changesets
275 277 adding manifests
276 278 adding file changes
277 279 added 2 changesets with 5 changes to 5 files
278 280 new changesets 8b6053c928fe:5fed3813f7f5
279 281 updating to branch default
280 282 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
281 283
282 284 $ hg id http://user2@localhost:$HGPORT2/
283 285 abort: http authorization required for http://localhost:$HGPORT2/
284 286 [255]
285 287 $ hg id http://user:pass2@localhost:$HGPORT2/
286 288 abort: HTTP Error 403: no
287 289 [100]
288 290
289 291 $ hg -R dest-pull tag -r tip top
290 292 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
291 293 pushing to http://user:***@localhost:$HGPORT2/
292 294 searching for changes
293 295 remote: adding changesets
294 296 remote: adding manifests
295 297 remote: adding file changes
296 298 remote: added 1 changesets with 1 changes to 1 files
297 299 $ hg rollback -q
298 300 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
299 301 pushing to http://user:***@localhost:$HGPORT2/
300 302 using http://localhost:$HGPORT2/
301 303 http auth: user user, password ****
302 304 sending capabilities command
303 305 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
304 306 http auth: user user, password ****
305 307 devel-peer-request: finished in *.???? seconds (200) (glob)
306 308 query 1; heads
307 309 devel-peer-request: batched-content
308 310 devel-peer-request: - heads (0 arguments)
309 311 devel-peer-request: - known (1 arguments)
310 312 sending batch command
311 313 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
312 314 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
313 315 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 316 devel-peer-request: 68 bytes of commands arguments in headers
315 317 devel-peer-request: finished in *.???? seconds (200) (glob)
316 318 searching for changes
317 319 all remote heads known locally
318 320 preparing listkeys for "phases"
319 321 sending listkeys command
320 322 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
321 323 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
322 324 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
323 325 devel-peer-request: 16 bytes of commands arguments in headers
324 326 devel-peer-request: finished in *.???? seconds (200) (glob)
325 327 received listkey for "phases": 58 bytes
326 328 checking for updated bookmarks
327 329 preparing listkeys for "bookmarks"
328 330 sending listkeys command
329 331 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
330 332 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
331 333 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
332 334 devel-peer-request: 19 bytes of commands arguments in headers
333 335 devel-peer-request: finished in *.???? seconds (200) (glob)
334 336 received listkey for "bookmarks": 0 bytes
335 337 sending branchmap command
336 338 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
337 339 devel-peer-request: Vary X-HgProto-1
338 340 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
339 341 devel-peer-request: finished in *.???? seconds (200) (glob)
340 342 preparing listkeys for "bookmarks"
341 343 sending listkeys command
342 344 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
343 345 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
344 346 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
345 347 devel-peer-request: 19 bytes of commands arguments in headers
346 348 devel-peer-request: finished in *.???? seconds (200) (glob)
347 349 received listkey for "bookmarks": 0 bytes
348 350 1 changesets found
349 351 list of changesets:
350 352 7f4e523d01f2cc3765ac8934da3d14db775ff872
351 353 bundle2-output-bundle: "HG20", 5 parts total
352 354 bundle2-output-part: "replycaps" 207 bytes payload
353 355 bundle2-output-part: "check:phases" 24 bytes payload
354 356 bundle2-output-part: "check:updated-heads" streamed payload
355 357 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
356 358 bundle2-output-part: "phase-heads" 24 bytes payload
357 359 sending unbundle command
358 360 sending 1023 bytes
359 361 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
360 362 devel-peer-request: Content-length 1023
361 363 devel-peer-request: Content-type application/mercurial-0.1
362 364 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
363 365 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
364 366 devel-peer-request: 16 bytes of commands arguments in headers
365 367 devel-peer-request: 1023 bytes of data
366 368 devel-peer-request: finished in *.???? seconds (200) (glob)
367 369 bundle2-input-bundle: no-transaction
368 370 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
369 371 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
370 372 bundle2-input-part: total payload size 55
371 373 remote: adding changesets
372 374 remote: adding manifests
373 375 remote: adding file changes
374 376 bundle2-input-part: "output" (advisory) supported
375 377 bundle2-input-part: total payload size 45
376 378 remote: added 1 changesets with 1 changes to 1 files
377 379 bundle2-input-bundle: 3 parts total
378 380 preparing listkeys for "phases"
379 381 sending listkeys command
380 382 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
381 383 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
382 384 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
383 385 devel-peer-request: 16 bytes of commands arguments in headers
384 386 devel-peer-request: finished in *.???? seconds (200) (glob)
385 387 received listkey for "phases": 15 bytes
386 388 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
387 389 $ hg rollback -q
388 390
389 391 $ sed 's/.*] "/"/' < ../access.log
390 392 "GET /?cmd=capabilities HTTP/1.1" 401 -
391 393 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 394 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 395 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 396 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 397 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 398 "GET /?cmd=capabilities HTTP/1.1" 200 -
397 399 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
398 400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 401 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 402 "GET /?cmd=capabilities HTTP/1.1" 401 -
401 403 "GET /?cmd=capabilities HTTP/1.1" 200 -
402 404 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
403 405 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 406 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 407 "GET /?cmd=capabilities HTTP/1.1" 401 -
406 408 "GET /?cmd=capabilities HTTP/1.1" 200 -
407 409 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
408 410 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 411 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 412 "GET /?cmd=capabilities HTTP/1.1" 401 -
411 413 "GET /?cmd=capabilities HTTP/1.1" 200 -
412 414 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
413 415 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 416 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 417 "GET /?cmd=capabilities HTTP/1.1" 401 -
416 418 "GET /?cmd=capabilities HTTP/1.1" 200 -
417 419 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
418 420 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 421 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 422 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
421 423 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
422 424 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
423 425 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 428 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 429 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 430 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
429 431 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
430 432 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
431 433 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 434 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
433 435 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
434 436 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
435 437 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 438 "GET /?cmd=capabilities HTTP/1.1" 401 -
437 439 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 440 "GET /?cmd=capabilities HTTP/1.1" 403 -
439 441 "GET /?cmd=capabilities HTTP/1.1" 401 -
440 442 "GET /?cmd=capabilities HTTP/1.1" 200 -
441 443 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
442 444 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 446 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 448 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
447 449 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
448 450 "GET /?cmd=capabilities HTTP/1.1" 401 -
449 451 "GET /?cmd=capabilities HTTP/1.1" 200 -
450 452 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
451 453 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 455 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 457 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 458 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457 459
458 460 $ cd ..
459 461
460 462 clone of serve with repo in root and unserved subrepo (issue2970)
461 463
462 464 $ hg --cwd test init sub
463 465 $ echo empty > test/sub/empty
464 466 $ hg --cwd test/sub add empty
465 467 $ hg --cwd test/sub commit -qm 'add empty'
466 468 $ hg --cwd test/sub tag -r 0 something
467 469 $ echo sub = sub > test/.hgsub
468 470 $ hg --cwd test add .hgsub
469 471 $ hg --cwd test commit -qm 'add subrepo'
470 472 $ hg clone http://localhost:$HGPORT noslash-clone
471 473 requesting all changes
472 474 adding changesets
473 475 adding manifests
474 476 adding file changes
475 477 added 3 changesets with 7 changes to 7 files
476 478 new changesets 8b6053c928fe:56f9bc90cce6
477 479 updating to branch default
478 480 cloning subrepo sub from http://localhost:$HGPORT/sub
479 481 abort: HTTP Error 404: Not Found
480 482 [100]
481 483 $ hg clone http://localhost:$HGPORT/ slash-clone
482 484 requesting all changes
483 485 adding changesets
484 486 adding manifests
485 487 adding file changes
486 488 added 3 changesets with 7 changes to 7 files
487 489 new changesets 8b6053c928fe:56f9bc90cce6
488 490 updating to branch default
489 491 cloning subrepo sub from http://localhost:$HGPORT/sub
490 492 abort: HTTP Error 404: Not Found
491 493 [100]
492 494
493 495 check error log
494 496
495 497 $ cat error.log
496 498
497 499 $ cat errors2.log
498 500
499 501 check abort error reporting while pulling/cloning
500 502
501 503 $ $RUNTESTDIR/killdaemons.py
502 504 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
503 505 $ cat hg3.pid >> $DAEMON_PIDS
504 506 $ hg clone http://localhost:$HGPORT/ abort-clone
505 507 requesting all changes
506 508 remote: abort: this is an exercise
507 509 abort: pull failed on remote
508 510 [100]
509 511 $ cat error.log
510 512
511 513 disable pull-based clones
512 514
513 515 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
514 516 $ cat hg4.pid >> $DAEMON_PIDS
515 517 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
516 518 requesting all changes
517 519 remote: abort: server has pull-based clones disabled
518 520 abort: pull failed on remote
519 521 (remove --pull if specified or upgrade Mercurial)
520 522 [100]
521 523
522 524 #if no-reposimplestore
523 525 ... but keep stream clones working
524 526
525 527 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
526 528 streaming all changes
527 529 * files to transfer, * of data (glob)
528 530 transferred * in * seconds (*/sec) (glob)
529 531 $ cat error.log
530 532 #endif
531 533
532 534 ... and also keep partial clones and pulls working
533 535 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
534 536 adding changesets
535 537 adding manifests
536 538 adding file changes
537 539 added 1 changesets with 4 changes to 4 files
538 540 new changesets 8b6053c928fe
539 541 updating to branch default
540 542 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
541 543 $ hg pull -R test/partial/clone
542 544 pulling from http://localhost:$HGPORT1/
543 545 searching for changes
544 546 adding changesets
545 547 adding manifests
546 548 adding file changes
547 549 added 2 changesets with 3 changes to 3 files
548 550 new changesets 5fed3813f7f5:56f9bc90cce6
549 551 (run 'hg update' to get a working copy)
550 552
551 553 $ hg clone -U -r 0 test/partial/clone test/another/clone
552 554 adding changesets
553 555 adding manifests
554 556 adding file changes
555 557 added 1 changesets with 4 changes to 4 files
556 558 new changesets 8b6053c928fe
557 559
558 560 corrupt cookies file should yield a warning
559 561
560 562 $ cat > $TESTTMP/cookies.txt << EOF
561 563 > bad format
562 564 > EOF
563 565
564 566 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
565 567 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
566 568 56f9bc90cce6
567 569
568 570 $ killdaemons.py
569 571
570 572 Create dummy authentication handler that looks for cookies. It doesn't do anything
571 573 useful. It just raises an HTTP 500 with details about the Cookie request header.
572 574 We raise HTTP 500 because its message is printed in the abort message.
573 575
574 576 $ cat > cookieauth.py << EOF
575 577 > from mercurial import util
576 578 > from mercurial.hgweb import common
577 579 > def perform_authentication(hgweb, req, op):
578 580 > cookie = req.headers.get(b'Cookie')
579 581 > if not cookie:
580 582 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
581 583 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
582 584 > def extsetup(ui):
583 585 > common.permhooks.insert(0, perform_authentication)
584 586 > EOF
585 587
586 588 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
587 589 $ cat pid > $DAEMON_PIDS
588 590
589 591 Request without cookie sent should fail due to lack of cookie
590 592
591 593 $ hg id http://localhost:$HGPORT
592 594 abort: HTTP Error 500: no-cookie
593 595 [100]
594 596
595 597 Populate a cookies file
596 598
597 599 $ cat > cookies.txt << EOF
598 600 > # HTTP Cookie File
599 601 > # Expiration is 2030-01-01 at midnight
600 602 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
601 603 > EOF
602 604
603 605 Should not send a cookie for another domain
604 606
605 607 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
606 608 abort: HTTP Error 500: no-cookie
607 609 [100]
608 610
609 611 Add a cookie entry for our test server and verify it is sent
610 612
611 613 $ cat >> cookies.txt << EOF
612 614 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
613 615 > EOF
614 616
615 617 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
616 618 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
617 619 [100]
General Comments 0
You need to be logged in to leave comments. Login now