##// END OF EJS Templates
stream-clone: filter possible missing requirements using all supported one...
marmoute -
r49831:d9017df7 default
parent child Browse files
Show More
@@ -1,941 +1,941 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import open
16 from .pycompat import open
17 from .interfaces import repository
17 from .interfaces import repository
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 cacheutil,
20 cacheutil,
21 error,
21 error,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 requirements as requirementsmod,
25 requirements as requirementsmod,
26 scmutil,
26 scmutil,
27 store,
27 store,
28 util,
28 util,
29 )
29 )
30 from .utils import (
30 from .utils import (
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34
34
35 def new_stream_clone_requirements(
35 def new_stream_clone_requirements(
36 supported_formats, default_requirements, streamed_requirements
36 supported_formats, default_requirements, streamed_requirements
37 ):
37 ):
38 """determine the final set of requirement for a new stream clone
38 """determine the final set of requirement for a new stream clone
39
39
40 this method combine the "default" requirements that a new repository would
40 this method combine the "default" requirements that a new repository would
41 use with the constaint we get from the stream clone content. We keep local
41 use with the constaint we get from the stream clone content. We keep local
42 configuration choice when possible.
42 configuration choice when possible.
43 """
43 """
44 requirements = set(default_requirements)
44 requirements = set(default_requirements)
45 requirements -= supported_formats
45 requirements -= supported_formats
46 requirements.update(streamed_requirements)
46 requirements.update(streamed_requirements)
47 return requirements
47 return requirements
48
48
49
49
50 def streamed_requirements(repo):
50 def streamed_requirements(repo):
51 """the set of requirement the new clone will have to support
51 """the set of requirement the new clone will have to support
52
52
53 This is used for advertising the stream options and to generate the actual
53 This is used for advertising the stream options and to generate the actual
54 stream content."""
54 stream content."""
55 requiredformats = repo.requirements & repo.supportedformats
55 requiredformats = repo.requirements & repo.supportedformats
56 return requiredformats
56 return requiredformats
57
57
58
58
59 def canperformstreamclone(pullop, bundle2=False):
59 def canperformstreamclone(pullop, bundle2=False):
60 """Whether it is possible to perform a streaming clone as part of pull.
60 """Whether it is possible to perform a streaming clone as part of pull.
61
61
62 ``bundle2`` will cause the function to consider stream clone through
62 ``bundle2`` will cause the function to consider stream clone through
63 bundle2 and only through bundle2.
63 bundle2 and only through bundle2.
64
64
65 Returns a tuple of (supported, requirements). ``supported`` is True if
65 Returns a tuple of (supported, requirements). ``supported`` is True if
66 streaming clone is supported and False otherwise. ``requirements`` is
66 streaming clone is supported and False otherwise. ``requirements`` is
67 a set of repo requirements from the remote, or ``None`` if stream clone
67 a set of repo requirements from the remote, or ``None`` if stream clone
68 isn't supported.
68 isn't supported.
69 """
69 """
70 repo = pullop.repo
70 repo = pullop.repo
71 remote = pullop.remote
71 remote = pullop.remote
72
72
73 bundle2supported = False
73 bundle2supported = False
74 if pullop.canusebundle2:
74 if pullop.canusebundle2:
75 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
75 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
76 bundle2supported = True
76 bundle2supported = True
77 # else
77 # else
78 # Server doesn't support bundle2 stream clone or doesn't support
78 # Server doesn't support bundle2 stream clone or doesn't support
79 # the versions we support. Fall back and possibly allow legacy.
79 # the versions we support. Fall back and possibly allow legacy.
80
80
81 # Ensures legacy code path uses available bundle2.
81 # Ensures legacy code path uses available bundle2.
82 if bundle2supported and not bundle2:
82 if bundle2supported and not bundle2:
83 return False, None
83 return False, None
84 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
84 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
85 elif bundle2 and not bundle2supported:
85 elif bundle2 and not bundle2supported:
86 return False, None
86 return False, None
87
87
88 # Streaming clone only works on empty repositories.
88 # Streaming clone only works on empty repositories.
89 if len(repo):
89 if len(repo):
90 return False, None
90 return False, None
91
91
92 # Streaming clone only works if all data is being requested.
92 # Streaming clone only works if all data is being requested.
93 if pullop.heads:
93 if pullop.heads:
94 return False, None
94 return False, None
95
95
96 streamrequested = pullop.streamclonerequested
96 streamrequested = pullop.streamclonerequested
97
97
98 # If we don't have a preference, let the server decide for us. This
98 # If we don't have a preference, let the server decide for us. This
99 # likely only comes into play in LANs.
99 # likely only comes into play in LANs.
100 if streamrequested is None:
100 if streamrequested is None:
101 # The server can advertise whether to prefer streaming clone.
101 # The server can advertise whether to prefer streaming clone.
102 streamrequested = remote.capable(b'stream-preferred')
102 streamrequested = remote.capable(b'stream-preferred')
103
103
104 if not streamrequested:
104 if not streamrequested:
105 return False, None
105 return False, None
106
106
107 # In order for stream clone to work, the client has to support all the
107 # In order for stream clone to work, the client has to support all the
108 # requirements advertised by the server.
108 # requirements advertised by the server.
109 #
109 #
110 # The server advertises its requirements via the "stream" and "streamreqs"
110 # The server advertises its requirements via the "stream" and "streamreqs"
111 # capability. "stream" (a value-less capability) is advertised if and only
111 # capability. "stream" (a value-less capability) is advertised if and only
112 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
112 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
113 # is advertised and contains a comma-delimited list of requirements.
113 # is advertised and contains a comma-delimited list of requirements.
114 requirements = set()
114 requirements = set()
115 if remote.capable(b'stream'):
115 if remote.capable(b'stream'):
116 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
116 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
117 else:
117 else:
118 streamreqs = remote.capable(b'streamreqs')
118 streamreqs = remote.capable(b'streamreqs')
119 # This is weird and shouldn't happen with modern servers.
119 # This is weird and shouldn't happen with modern servers.
120 if not streamreqs:
120 if not streamreqs:
121 pullop.repo.ui.warn(
121 pullop.repo.ui.warn(
122 _(
122 _(
123 b'warning: stream clone requested but server has them '
123 b'warning: stream clone requested but server has them '
124 b'disabled\n'
124 b'disabled\n'
125 )
125 )
126 )
126 )
127 return False, None
127 return False, None
128
128
129 streamreqs = set(streamreqs.split(b','))
129 streamreqs = set(streamreqs.split(b','))
130 # Server requires something we don't support. Bail.
130 # Server requires something we don't support. Bail.
131 missingreqs = streamreqs - repo.supportedformats
131 missingreqs = streamreqs - repo.supported
132 if missingreqs:
132 if missingreqs:
133 pullop.repo.ui.warn(
133 pullop.repo.ui.warn(
134 _(
134 _(
135 b'warning: stream clone requested but client is missing '
135 b'warning: stream clone requested but client is missing '
136 b'requirements: %s\n'
136 b'requirements: %s\n'
137 )
137 )
138 % b', '.join(sorted(missingreqs))
138 % b', '.join(sorted(missingreqs))
139 )
139 )
140 pullop.repo.ui.warn(
140 pullop.repo.ui.warn(
141 _(
141 _(
142 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
142 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
143 b'for more information)\n'
143 b'for more information)\n'
144 )
144 )
145 )
145 )
146 return False, None
146 return False, None
147 requirements = streamreqs
147 requirements = streamreqs
148
148
149 return True, requirements
149 return True, requirements
150
150
151
151
152 def maybeperformlegacystreamclone(pullop):
152 def maybeperformlegacystreamclone(pullop):
153 """Possibly perform a legacy stream clone operation.
153 """Possibly perform a legacy stream clone operation.
154
154
155 Legacy stream clones are performed as part of pull but before all other
155 Legacy stream clones are performed as part of pull but before all other
156 operations.
156 operations.
157
157
158 A legacy stream clone will not be performed if a bundle2 stream clone is
158 A legacy stream clone will not be performed if a bundle2 stream clone is
159 supported.
159 supported.
160 """
160 """
161 from . import localrepo
161 from . import localrepo
162
162
163 supported, requirements = canperformstreamclone(pullop)
163 supported, requirements = canperformstreamclone(pullop)
164
164
165 if not supported:
165 if not supported:
166 return
166 return
167
167
168 repo = pullop.repo
168 repo = pullop.repo
169 remote = pullop.remote
169 remote = pullop.remote
170
170
171 # Save remote branchmap. We will use it later to speed up branchcache
171 # Save remote branchmap. We will use it later to speed up branchcache
172 # creation.
172 # creation.
173 rbranchmap = None
173 rbranchmap = None
174 if remote.capable(b'branchmap'):
174 if remote.capable(b'branchmap'):
175 with remote.commandexecutor() as e:
175 with remote.commandexecutor() as e:
176 rbranchmap = e.callcommand(b'branchmap', {}).result()
176 rbranchmap = e.callcommand(b'branchmap', {}).result()
177
177
178 repo.ui.status(_(b'streaming all changes\n'))
178 repo.ui.status(_(b'streaming all changes\n'))
179
179
180 with remote.commandexecutor() as e:
180 with remote.commandexecutor() as e:
181 fp = e.callcommand(b'stream_out', {}).result()
181 fp = e.callcommand(b'stream_out', {}).result()
182
182
183 # TODO strictly speaking, this code should all be inside the context
183 # TODO strictly speaking, this code should all be inside the context
184 # manager because the context manager is supposed to ensure all wire state
184 # manager because the context manager is supposed to ensure all wire state
185 # is flushed when exiting. But the legacy peers don't do this, so it
185 # is flushed when exiting. But the legacy peers don't do this, so it
186 # doesn't matter.
186 # doesn't matter.
187 l = fp.readline()
187 l = fp.readline()
188 try:
188 try:
189 resp = int(l)
189 resp = int(l)
190 except ValueError:
190 except ValueError:
191 raise error.ResponseError(
191 raise error.ResponseError(
192 _(b'unexpected response from remote server:'), l
192 _(b'unexpected response from remote server:'), l
193 )
193 )
194 if resp == 1:
194 if resp == 1:
195 raise error.Abort(_(b'operation forbidden by server'))
195 raise error.Abort(_(b'operation forbidden by server'))
196 elif resp == 2:
196 elif resp == 2:
197 raise error.Abort(_(b'locking the remote repository failed'))
197 raise error.Abort(_(b'locking the remote repository failed'))
198 elif resp != 0:
198 elif resp != 0:
199 raise error.Abort(_(b'the server sent an unknown error code'))
199 raise error.Abort(_(b'the server sent an unknown error code'))
200
200
201 l = fp.readline()
201 l = fp.readline()
202 try:
202 try:
203 filecount, bytecount = map(int, l.split(b' ', 1))
203 filecount, bytecount = map(int, l.split(b' ', 1))
204 except (ValueError, TypeError):
204 except (ValueError, TypeError):
205 raise error.ResponseError(
205 raise error.ResponseError(
206 _(b'unexpected response from remote server:'), l
206 _(b'unexpected response from remote server:'), l
207 )
207 )
208
208
209 with repo.lock():
209 with repo.lock():
210 consumev1(repo, fp, filecount, bytecount)
210 consumev1(repo, fp, filecount, bytecount)
211 repo.requirements = new_stream_clone_requirements(
211 repo.requirements = new_stream_clone_requirements(
212 repo.supportedformats,
212 repo.supportedformats,
213 repo.requirements,
213 repo.requirements,
214 requirements,
214 requirements,
215 )
215 )
216 repo.svfs.options = localrepo.resolvestorevfsoptions(
216 repo.svfs.options = localrepo.resolvestorevfsoptions(
217 repo.ui, repo.requirements, repo.features
217 repo.ui, repo.requirements, repo.features
218 )
218 )
219 scmutil.writereporequirements(repo)
219 scmutil.writereporequirements(repo)
220
220
221 if rbranchmap:
221 if rbranchmap:
222 repo._branchcaches.replace(repo, rbranchmap)
222 repo._branchcaches.replace(repo, rbranchmap)
223
223
224 repo.invalidate()
224 repo.invalidate()
225
225
226
226
227 def allowservergeneration(repo):
227 def allowservergeneration(repo):
228 """Whether streaming clones are allowed from the server."""
228 """Whether streaming clones are allowed from the server."""
229 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
229 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
230 return False
230 return False
231
231
232 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
232 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
233 return False
233 return False
234
234
235 # The way stream clone works makes it impossible to hide secret changesets.
235 # The way stream clone works makes it impossible to hide secret changesets.
236 # So don't allow this by default.
236 # So don't allow this by default.
237 secret = phases.hassecret(repo)
237 secret = phases.hassecret(repo)
238 if secret:
238 if secret:
239 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
239 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
240
240
241 return True
241 return True
242
242
243
243
244 # This is it's own function so extensions can override it.
244 # This is it's own function so extensions can override it.
245 def _walkstreamfiles(repo, matcher=None):
245 def _walkstreamfiles(repo, matcher=None):
246 return repo.store.walk(matcher)
246 return repo.store.walk(matcher)
247
247
248
248
249 def generatev1(repo):
249 def generatev1(repo):
250 """Emit content for version 1 of a streaming clone.
250 """Emit content for version 1 of a streaming clone.
251
251
252 This returns a 3-tuple of (file count, byte size, data iterator).
252 This returns a 3-tuple of (file count, byte size, data iterator).
253
253
254 The data iterator consists of N entries for each file being transferred.
254 The data iterator consists of N entries for each file being transferred.
255 Each file entry starts as a line with the file name and integer size
255 Each file entry starts as a line with the file name and integer size
256 delimited by a null byte.
256 delimited by a null byte.
257
257
258 The raw file data follows. Following the raw file data is the next file
258 The raw file data follows. Following the raw file data is the next file
259 entry, or EOF.
259 entry, or EOF.
260
260
261 When used on the wire protocol, an additional line indicating protocol
261 When used on the wire protocol, an additional line indicating protocol
262 success will be prepended to the stream. This function is not responsible
262 success will be prepended to the stream. This function is not responsible
263 for adding it.
263 for adding it.
264
264
265 This function will obtain a repository lock to ensure a consistent view of
265 This function will obtain a repository lock to ensure a consistent view of
266 the store is captured. It therefore may raise LockError.
266 the store is captured. It therefore may raise LockError.
267 """
267 """
268 entries = []
268 entries = []
269 total_bytes = 0
269 total_bytes = 0
270 # Get consistent snapshot of repo, lock during scan.
270 # Get consistent snapshot of repo, lock during scan.
271 with repo.lock():
271 with repo.lock():
272 repo.ui.debug(b'scanning\n')
272 repo.ui.debug(b'scanning\n')
273 for file_type, name, size in _walkstreamfiles(repo):
273 for file_type, name, size in _walkstreamfiles(repo):
274 if size:
274 if size:
275 entries.append((name, size))
275 entries.append((name, size))
276 total_bytes += size
276 total_bytes += size
277 _test_sync_point_walk_1(repo)
277 _test_sync_point_walk_1(repo)
278 _test_sync_point_walk_2(repo)
278 _test_sync_point_walk_2(repo)
279
279
280 repo.ui.debug(
280 repo.ui.debug(
281 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
281 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
282 )
282 )
283
283
284 svfs = repo.svfs
284 svfs = repo.svfs
285 debugflag = repo.ui.debugflag
285 debugflag = repo.ui.debugflag
286
286
287 def emitrevlogdata():
287 def emitrevlogdata():
288 for name, size in entries:
288 for name, size in entries:
289 if debugflag:
289 if debugflag:
290 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
290 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
291 # partially encode name over the wire for backwards compat
291 # partially encode name over the wire for backwards compat
292 yield b'%s\0%d\n' % (store.encodedir(name), size)
292 yield b'%s\0%d\n' % (store.encodedir(name), size)
293 # auditing at this stage is both pointless (paths are already
293 # auditing at this stage is both pointless (paths are already
294 # trusted by the local repo) and expensive
294 # trusted by the local repo) and expensive
295 with svfs(name, b'rb', auditpath=False) as fp:
295 with svfs(name, b'rb', auditpath=False) as fp:
296 if size <= 65536:
296 if size <= 65536:
297 yield fp.read(size)
297 yield fp.read(size)
298 else:
298 else:
299 for chunk in util.filechunkiter(fp, limit=size):
299 for chunk in util.filechunkiter(fp, limit=size):
300 yield chunk
300 yield chunk
301
301
302 return len(entries), total_bytes, emitrevlogdata()
302 return len(entries), total_bytes, emitrevlogdata()
303
303
304
304
305 def generatev1wireproto(repo):
305 def generatev1wireproto(repo):
306 """Emit content for version 1 of streaming clone suitable for the wire.
306 """Emit content for version 1 of streaming clone suitable for the wire.
307
307
308 This is the data output from ``generatev1()`` with 2 header lines. The
308 This is the data output from ``generatev1()`` with 2 header lines. The
309 first line indicates overall success. The 2nd contains the file count and
309 first line indicates overall success. The 2nd contains the file count and
310 byte size of payload.
310 byte size of payload.
311
311
312 The success line contains "0" for success, "1" for stream generation not
312 The success line contains "0" for success, "1" for stream generation not
313 allowed, and "2" for error locking the repository (possibly indicating
313 allowed, and "2" for error locking the repository (possibly indicating
314 a permissions error for the server process).
314 a permissions error for the server process).
315 """
315 """
316 if not allowservergeneration(repo):
316 if not allowservergeneration(repo):
317 yield b'1\n'
317 yield b'1\n'
318 return
318 return
319
319
320 try:
320 try:
321 filecount, bytecount, it = generatev1(repo)
321 filecount, bytecount, it = generatev1(repo)
322 except error.LockError:
322 except error.LockError:
323 yield b'2\n'
323 yield b'2\n'
324 return
324 return
325
325
326 # Indicates successful response.
326 # Indicates successful response.
327 yield b'0\n'
327 yield b'0\n'
328 yield b'%d %d\n' % (filecount, bytecount)
328 yield b'%d %d\n' % (filecount, bytecount)
329 for chunk in it:
329 for chunk in it:
330 yield chunk
330 yield chunk
331
331
332
332
333 def generatebundlev1(repo, compression=b'UN'):
333 def generatebundlev1(repo, compression=b'UN'):
334 """Emit content for version 1 of a stream clone bundle.
334 """Emit content for version 1 of a stream clone bundle.
335
335
336 The first 4 bytes of the output ("HGS1") denote this as stream clone
336 The first 4 bytes of the output ("HGS1") denote this as stream clone
337 bundle version 1.
337 bundle version 1.
338
338
339 The next 2 bytes indicate the compression type. Only "UN" is currently
339 The next 2 bytes indicate the compression type. Only "UN" is currently
340 supported.
340 supported.
341
341
342 The next 16 bytes are two 64-bit big endian unsigned integers indicating
342 The next 16 bytes are two 64-bit big endian unsigned integers indicating
343 file count and byte count, respectively.
343 file count and byte count, respectively.
344
344
345 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
345 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
346 of the requirements string, including a trailing \0. The following N bytes
346 of the requirements string, including a trailing \0. The following N bytes
347 are the requirements string, which is ASCII containing a comma-delimited
347 are the requirements string, which is ASCII containing a comma-delimited
348 list of repo requirements that are needed to support the data.
348 list of repo requirements that are needed to support the data.
349
349
350 The remaining content is the output of ``generatev1()`` (which may be
350 The remaining content is the output of ``generatev1()`` (which may be
351 compressed in the future).
351 compressed in the future).
352
352
353 Returns a tuple of (requirements, data generator).
353 Returns a tuple of (requirements, data generator).
354 """
354 """
355 if compression != b'UN':
355 if compression != b'UN':
356 raise ValueError(b'we do not support the compression argument yet')
356 raise ValueError(b'we do not support the compression argument yet')
357
357
358 requirements = streamed_requirements(repo)
358 requirements = streamed_requirements(repo)
359 requires = b','.join(sorted(requirements))
359 requires = b','.join(sorted(requirements))
360
360
361 def gen():
361 def gen():
362 yield b'HGS1'
362 yield b'HGS1'
363 yield compression
363 yield compression
364
364
365 filecount, bytecount, it = generatev1(repo)
365 filecount, bytecount, it = generatev1(repo)
366 repo.ui.status(
366 repo.ui.status(
367 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
367 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
368 )
368 )
369
369
370 yield struct.pack(b'>QQ', filecount, bytecount)
370 yield struct.pack(b'>QQ', filecount, bytecount)
371 yield struct.pack(b'>H', len(requires) + 1)
371 yield struct.pack(b'>H', len(requires) + 1)
372 yield requires + b'\0'
372 yield requires + b'\0'
373
373
374 # This is where we'll add compression in the future.
374 # This is where we'll add compression in the future.
375 assert compression == b'UN'
375 assert compression == b'UN'
376
376
377 progress = repo.ui.makeprogress(
377 progress = repo.ui.makeprogress(
378 _(b'bundle'), total=bytecount, unit=_(b'bytes')
378 _(b'bundle'), total=bytecount, unit=_(b'bytes')
379 )
379 )
380 progress.update(0)
380 progress.update(0)
381
381
382 for chunk in it:
382 for chunk in it:
383 progress.increment(step=len(chunk))
383 progress.increment(step=len(chunk))
384 yield chunk
384 yield chunk
385
385
386 progress.complete()
386 progress.complete()
387
387
388 return requirements, gen()
388 return requirements, gen()
389
389
390
390
391 def consumev1(repo, fp, filecount, bytecount):
391 def consumev1(repo, fp, filecount, bytecount):
392 """Apply the contents from version 1 of a streaming clone file handle.
392 """Apply the contents from version 1 of a streaming clone file handle.
393
393
394 This takes the output from "stream_out" and applies it to the specified
394 This takes the output from "stream_out" and applies it to the specified
395 repository.
395 repository.
396
396
397 Like "stream_out," the status line added by the wire protocol is not
397 Like "stream_out," the status line added by the wire protocol is not
398 handled by this function.
398 handled by this function.
399 """
399 """
400 with repo.lock():
400 with repo.lock():
401 repo.ui.status(
401 repo.ui.status(
402 _(b'%d files to transfer, %s of data\n')
402 _(b'%d files to transfer, %s of data\n')
403 % (filecount, util.bytecount(bytecount))
403 % (filecount, util.bytecount(bytecount))
404 )
404 )
405 progress = repo.ui.makeprogress(
405 progress = repo.ui.makeprogress(
406 _(b'clone'), total=bytecount, unit=_(b'bytes')
406 _(b'clone'), total=bytecount, unit=_(b'bytes')
407 )
407 )
408 progress.update(0)
408 progress.update(0)
409 start = util.timer()
409 start = util.timer()
410
410
411 # TODO: get rid of (potential) inconsistency
411 # TODO: get rid of (potential) inconsistency
412 #
412 #
413 # If transaction is started and any @filecache property is
413 # If transaction is started and any @filecache property is
414 # changed at this point, it causes inconsistency between
414 # changed at this point, it causes inconsistency between
415 # in-memory cached property and streamclone-ed file on the
415 # in-memory cached property and streamclone-ed file on the
416 # disk. Nested transaction prevents transaction scope "clone"
416 # disk. Nested transaction prevents transaction scope "clone"
417 # below from writing in-memory changes out at the end of it,
417 # below from writing in-memory changes out at the end of it,
418 # even though in-memory changes are discarded at the end of it
418 # even though in-memory changes are discarded at the end of it
419 # regardless of transaction nesting.
419 # regardless of transaction nesting.
420 #
420 #
421 # But transaction nesting can't be simply prohibited, because
421 # But transaction nesting can't be simply prohibited, because
422 # nesting occurs also in ordinary case (e.g. enabling
422 # nesting occurs also in ordinary case (e.g. enabling
423 # clonebundles).
423 # clonebundles).
424
424
425 with repo.transaction(b'clone'):
425 with repo.transaction(b'clone'):
426 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
426 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
427 for i in pycompat.xrange(filecount):
427 for i in pycompat.xrange(filecount):
428 # XXX doesn't support '\n' or '\r' in filenames
428 # XXX doesn't support '\n' or '\r' in filenames
429 l = fp.readline()
429 l = fp.readline()
430 try:
430 try:
431 name, size = l.split(b'\0', 1)
431 name, size = l.split(b'\0', 1)
432 size = int(size)
432 size = int(size)
433 except (ValueError, TypeError):
433 except (ValueError, TypeError):
434 raise error.ResponseError(
434 raise error.ResponseError(
435 _(b'unexpected response from remote server:'), l
435 _(b'unexpected response from remote server:'), l
436 )
436 )
437 if repo.ui.debugflag:
437 if repo.ui.debugflag:
438 repo.ui.debug(
438 repo.ui.debug(
439 b'adding %s (%s)\n' % (name, util.bytecount(size))
439 b'adding %s (%s)\n' % (name, util.bytecount(size))
440 )
440 )
441 # for backwards compat, name was partially encoded
441 # for backwards compat, name was partially encoded
442 path = store.decodedir(name)
442 path = store.decodedir(name)
443 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
443 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
444 for chunk in util.filechunkiter(fp, limit=size):
444 for chunk in util.filechunkiter(fp, limit=size):
445 progress.increment(step=len(chunk))
445 progress.increment(step=len(chunk))
446 ofp.write(chunk)
446 ofp.write(chunk)
447
447
448 # force @filecache properties to be reloaded from
448 # force @filecache properties to be reloaded from
449 # streamclone-ed file at next access
449 # streamclone-ed file at next access
450 repo.invalidate(clearfilecache=True)
450 repo.invalidate(clearfilecache=True)
451
451
452 elapsed = util.timer() - start
452 elapsed = util.timer() - start
453 if elapsed <= 0:
453 if elapsed <= 0:
454 elapsed = 0.001
454 elapsed = 0.001
455 progress.complete()
455 progress.complete()
456 repo.ui.status(
456 repo.ui.status(
457 _(b'transferred %s in %.1f seconds (%s/sec)\n')
457 _(b'transferred %s in %.1f seconds (%s/sec)\n')
458 % (
458 % (
459 util.bytecount(bytecount),
459 util.bytecount(bytecount),
460 elapsed,
460 elapsed,
461 util.bytecount(bytecount / elapsed),
461 util.bytecount(bytecount / elapsed),
462 )
462 )
463 )
463 )
464
464
465
465
466 def readbundle1header(fp):
466 def readbundle1header(fp):
467 compression = fp.read(2)
467 compression = fp.read(2)
468 if compression != b'UN':
468 if compression != b'UN':
469 raise error.Abort(
469 raise error.Abort(
470 _(
470 _(
471 b'only uncompressed stream clone bundles are '
471 b'only uncompressed stream clone bundles are '
472 b'supported; got %s'
472 b'supported; got %s'
473 )
473 )
474 % compression
474 % compression
475 )
475 )
476
476
477 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
477 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
478 requireslen = struct.unpack(b'>H', fp.read(2))[0]
478 requireslen = struct.unpack(b'>H', fp.read(2))[0]
479 requires = fp.read(requireslen)
479 requires = fp.read(requireslen)
480
480
481 if not requires.endswith(b'\0'):
481 if not requires.endswith(b'\0'):
482 raise error.Abort(
482 raise error.Abort(
483 _(
483 _(
484 b'malformed stream clone bundle: '
484 b'malformed stream clone bundle: '
485 b'requirements not properly encoded'
485 b'requirements not properly encoded'
486 )
486 )
487 )
487 )
488
488
489 requirements = set(requires.rstrip(b'\0').split(b','))
489 requirements = set(requires.rstrip(b'\0').split(b','))
490
490
491 return filecount, bytecount, requirements
491 return filecount, bytecount, requirements
492
492
493
493
494 def applybundlev1(repo, fp):
494 def applybundlev1(repo, fp):
495 """Apply the content from a stream clone bundle version 1.
495 """Apply the content from a stream clone bundle version 1.
496
496
497 We assume the 4 byte header has been read and validated and the file handle
497 We assume the 4 byte header has been read and validated and the file handle
498 is at the 2 byte compression identifier.
498 is at the 2 byte compression identifier.
499 """
499 """
500 if len(repo):
500 if len(repo):
501 raise error.Abort(
501 raise error.Abort(
502 _(b'cannot apply stream clone bundle on non-empty repo')
502 _(b'cannot apply stream clone bundle on non-empty repo')
503 )
503 )
504
504
505 filecount, bytecount, requirements = readbundle1header(fp)
505 filecount, bytecount, requirements = readbundle1header(fp)
506 missingreqs = requirements - repo.supportedformats
506 missingreqs = requirements - repo.supported
507 if missingreqs:
507 if missingreqs:
508 raise error.Abort(
508 raise error.Abort(
509 _(b'unable to apply stream clone: unsupported format: %s')
509 _(b'unable to apply stream clone: unsupported format: %s')
510 % b', '.join(sorted(missingreqs))
510 % b', '.join(sorted(missingreqs))
511 )
511 )
512
512
513 consumev1(repo, fp, filecount, bytecount)
513 consumev1(repo, fp, filecount, bytecount)
514
514
515
515
516 class streamcloneapplier(object):
516 class streamcloneapplier(object):
517 """Class to manage applying streaming clone bundles.
517 """Class to manage applying streaming clone bundles.
518
518
519 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
519 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
520 readers to perform bundle type-specific functionality.
520 readers to perform bundle type-specific functionality.
521 """
521 """
522
522
523 def __init__(self, fh):
523 def __init__(self, fh):
524 self._fh = fh
524 self._fh = fh
525
525
526 def apply(self, repo):
526 def apply(self, repo):
527 return applybundlev1(repo, self._fh)
527 return applybundlev1(repo, self._fh)
528
528
529
529
530 # type of file to stream
530 # type of file to stream
531 _fileappend = 0 # append only file
531 _fileappend = 0 # append only file
532 _filefull = 1 # full snapshot file
532 _filefull = 1 # full snapshot file
533
533
534 # Source of the file
534 # Source of the file
535 _srcstore = b's' # store (svfs)
535 _srcstore = b's' # store (svfs)
536 _srccache = b'c' # cache (cache)
536 _srccache = b'c' # cache (cache)
537
537
538 # This is it's own function so extensions can override it.
538 # This is it's own function so extensions can override it.
539 def _walkstreamfullstorefiles(repo):
539 def _walkstreamfullstorefiles(repo):
540 """list snapshot file from the store"""
540 """list snapshot file from the store"""
541 fnames = []
541 fnames = []
542 if not repo.publishing():
542 if not repo.publishing():
543 fnames.append(b'phaseroots')
543 fnames.append(b'phaseroots')
544 return fnames
544 return fnames
545
545
546
546
547 def _filterfull(entry, copy, vfsmap):
547 def _filterfull(entry, copy, vfsmap):
548 """actually copy the snapshot files"""
548 """actually copy the snapshot files"""
549 src, name, ftype, data = entry
549 src, name, ftype, data = entry
550 if ftype != _filefull:
550 if ftype != _filefull:
551 return entry
551 return entry
552 return (src, name, ftype, copy(vfsmap[src].join(name)))
552 return (src, name, ftype, copy(vfsmap[src].join(name)))
553
553
554
554
555 @contextlib.contextmanager
555 @contextlib.contextmanager
556 def maketempcopies():
556 def maketempcopies():
557 """return a function to temporary copy file"""
557 """return a function to temporary copy file"""
558 files = []
558 files = []
559 try:
559 try:
560
560
561 def copy(src):
561 def copy(src):
562 fd, dst = pycompat.mkstemp()
562 fd, dst = pycompat.mkstemp()
563 os.close(fd)
563 os.close(fd)
564 files.append(dst)
564 files.append(dst)
565 util.copyfiles(src, dst, hardlink=True)
565 util.copyfiles(src, dst, hardlink=True)
566 return dst
566 return dst
567
567
568 yield copy
568 yield copy
569 finally:
569 finally:
570 for tmp in files:
570 for tmp in files:
571 util.tryunlink(tmp)
571 util.tryunlink(tmp)
572
572
573
573
574 def _makemap(repo):
574 def _makemap(repo):
575 """make a (src -> vfs) map for the repo"""
575 """make a (src -> vfs) map for the repo"""
576 vfsmap = {
576 vfsmap = {
577 _srcstore: repo.svfs,
577 _srcstore: repo.svfs,
578 _srccache: repo.cachevfs,
578 _srccache: repo.cachevfs,
579 }
579 }
580 # we keep repo.vfs out of the on purpose, ther are too many danger there
580 # we keep repo.vfs out of the on purpose, ther are too many danger there
581 # (eg: .hg/hgrc)
581 # (eg: .hg/hgrc)
582 assert repo.vfs not in vfsmap.values()
582 assert repo.vfs not in vfsmap.values()
583
583
584 return vfsmap
584 return vfsmap
585
585
586
586
587 def _emit2(repo, entries, totalfilesize):
587 def _emit2(repo, entries, totalfilesize):
588 """actually emit the stream bundle"""
588 """actually emit the stream bundle"""
589 vfsmap = _makemap(repo)
589 vfsmap = _makemap(repo)
590 # we keep repo.vfs out of the on purpose, ther are too many danger there
590 # we keep repo.vfs out of the on purpose, ther are too many danger there
591 # (eg: .hg/hgrc),
591 # (eg: .hg/hgrc),
592 #
592 #
593 # this assert is duplicated (from _makemap) as author might think this is
593 # this assert is duplicated (from _makemap) as author might think this is
594 # fine, while this is really not fine.
594 # fine, while this is really not fine.
595 if repo.vfs in vfsmap.values():
595 if repo.vfs in vfsmap.values():
596 raise error.ProgrammingError(
596 raise error.ProgrammingError(
597 b'repo.vfs must not be added to vfsmap for security reasons'
597 b'repo.vfs must not be added to vfsmap for security reasons'
598 )
598 )
599
599
600 progress = repo.ui.makeprogress(
600 progress = repo.ui.makeprogress(
601 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
601 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
602 )
602 )
603 progress.update(0)
603 progress.update(0)
604 with maketempcopies() as copy, progress:
604 with maketempcopies() as copy, progress:
605 # copy is delayed until we are in the try
605 # copy is delayed until we are in the try
606 entries = [_filterfull(e, copy, vfsmap) for e in entries]
606 entries = [_filterfull(e, copy, vfsmap) for e in entries]
607 yield None # this release the lock on the repository
607 yield None # this release the lock on the repository
608 totalbytecount = 0
608 totalbytecount = 0
609
609
610 for src, name, ftype, data in entries:
610 for src, name, ftype, data in entries:
611 vfs = vfsmap[src]
611 vfs = vfsmap[src]
612 yield src
612 yield src
613 yield util.uvarintencode(len(name))
613 yield util.uvarintencode(len(name))
614 if ftype == _fileappend:
614 if ftype == _fileappend:
615 fp = vfs(name)
615 fp = vfs(name)
616 size = data
616 size = data
617 elif ftype == _filefull:
617 elif ftype == _filefull:
618 fp = open(data, b'rb')
618 fp = open(data, b'rb')
619 size = util.fstat(fp).st_size
619 size = util.fstat(fp).st_size
620 bytecount = 0
620 bytecount = 0
621 try:
621 try:
622 yield util.uvarintencode(size)
622 yield util.uvarintencode(size)
623 yield name
623 yield name
624 if size <= 65536:
624 if size <= 65536:
625 chunks = (fp.read(size),)
625 chunks = (fp.read(size),)
626 else:
626 else:
627 chunks = util.filechunkiter(fp, limit=size)
627 chunks = util.filechunkiter(fp, limit=size)
628 for chunk in chunks:
628 for chunk in chunks:
629 bytecount += len(chunk)
629 bytecount += len(chunk)
630 totalbytecount += len(chunk)
630 totalbytecount += len(chunk)
631 progress.update(totalbytecount)
631 progress.update(totalbytecount)
632 yield chunk
632 yield chunk
633 if bytecount != size:
633 if bytecount != size:
634 # Would most likely be caused by a race due to `hg strip` or
634 # Would most likely be caused by a race due to `hg strip` or
635 # a revlog split
635 # a revlog split
636 raise error.Abort(
636 raise error.Abort(
637 _(
637 _(
638 b'clone could only read %d bytes from %s, but '
638 b'clone could only read %d bytes from %s, but '
639 b'expected %d bytes'
639 b'expected %d bytes'
640 )
640 )
641 % (bytecount, name, size)
641 % (bytecount, name, size)
642 )
642 )
643 finally:
643 finally:
644 fp.close()
644 fp.close()
645
645
646
646
647 def _test_sync_point_walk_1(repo):
647 def _test_sync_point_walk_1(repo):
648 """a function for synchronisation during tests"""
648 """a function for synchronisation during tests"""
649
649
650
650
651 def _test_sync_point_walk_2(repo):
651 def _test_sync_point_walk_2(repo):
652 """a function for synchronisation during tests"""
652 """a function for synchronisation during tests"""
653
653
654
654
655 def _v2_walk(repo, includes, excludes, includeobsmarkers):
655 def _v2_walk(repo, includes, excludes, includeobsmarkers):
656 """emit a seris of files information useful to clone a repo
656 """emit a seris of files information useful to clone a repo
657
657
658 return (entries, totalfilesize)
658 return (entries, totalfilesize)
659
659
660 entries is a list of tuple (vfs-key, file-path, file-type, size)
660 entries is a list of tuple (vfs-key, file-path, file-type, size)
661
661
662 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
662 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
663 - `name`: file path of the file to copy (to be feed to the vfss)
663 - `name`: file path of the file to copy (to be feed to the vfss)
664 - `file-type`: do this file need to be copied with the source lock ?
664 - `file-type`: do this file need to be copied with the source lock ?
665 - `size`: the size of the file (or None)
665 - `size`: the size of the file (or None)
666 """
666 """
667 assert repo._currentlock(repo._lockref) is not None
667 assert repo._currentlock(repo._lockref) is not None
668 entries = []
668 entries = []
669 totalfilesize = 0
669 totalfilesize = 0
670
670
671 matcher = None
671 matcher = None
672 if includes or excludes:
672 if includes or excludes:
673 matcher = narrowspec.match(repo.root, includes, excludes)
673 matcher = narrowspec.match(repo.root, includes, excludes)
674
674
675 for rl_type, name, size in _walkstreamfiles(repo, matcher):
675 for rl_type, name, size in _walkstreamfiles(repo, matcher):
676 if size:
676 if size:
677 ft = _fileappend
677 ft = _fileappend
678 if rl_type & store.FILEFLAGS_VOLATILE:
678 if rl_type & store.FILEFLAGS_VOLATILE:
679 ft = _filefull
679 ft = _filefull
680 entries.append((_srcstore, name, ft, size))
680 entries.append((_srcstore, name, ft, size))
681 totalfilesize += size
681 totalfilesize += size
682 for name in _walkstreamfullstorefiles(repo):
682 for name in _walkstreamfullstorefiles(repo):
683 if repo.svfs.exists(name):
683 if repo.svfs.exists(name):
684 totalfilesize += repo.svfs.lstat(name).st_size
684 totalfilesize += repo.svfs.lstat(name).st_size
685 entries.append((_srcstore, name, _filefull, None))
685 entries.append((_srcstore, name, _filefull, None))
686 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
686 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
687 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
687 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
688 entries.append((_srcstore, b'obsstore', _filefull, None))
688 entries.append((_srcstore, b'obsstore', _filefull, None))
689 for name in cacheutil.cachetocopy(repo):
689 for name in cacheutil.cachetocopy(repo):
690 if repo.cachevfs.exists(name):
690 if repo.cachevfs.exists(name):
691 totalfilesize += repo.cachevfs.lstat(name).st_size
691 totalfilesize += repo.cachevfs.lstat(name).st_size
692 entries.append((_srccache, name, _filefull, None))
692 entries.append((_srccache, name, _filefull, None))
693 return entries, totalfilesize
693 return entries, totalfilesize
694
694
695
695
696 def generatev2(repo, includes, excludes, includeobsmarkers):
696 def generatev2(repo, includes, excludes, includeobsmarkers):
697 """Emit content for version 2 of a streaming clone.
697 """Emit content for version 2 of a streaming clone.
698
698
699 the data stream consists the following entries:
699 the data stream consists the following entries:
700 1) A char representing the file destination (eg: store or cache)
700 1) A char representing the file destination (eg: store or cache)
701 2) A varint containing the length of the filename
701 2) A varint containing the length of the filename
702 3) A varint containing the length of file data
702 3) A varint containing the length of file data
703 4) N bytes containing the filename (the internal, store-agnostic form)
703 4) N bytes containing the filename (the internal, store-agnostic form)
704 5) N bytes containing the file data
704 5) N bytes containing the file data
705
705
706 Returns a 3-tuple of (file count, file size, data iterator).
706 Returns a 3-tuple of (file count, file size, data iterator).
707 """
707 """
708
708
709 with repo.lock():
709 with repo.lock():
710
710
711 repo.ui.debug(b'scanning\n')
711 repo.ui.debug(b'scanning\n')
712
712
713 entries, totalfilesize = _v2_walk(
713 entries, totalfilesize = _v2_walk(
714 repo,
714 repo,
715 includes=includes,
715 includes=includes,
716 excludes=excludes,
716 excludes=excludes,
717 includeobsmarkers=includeobsmarkers,
717 includeobsmarkers=includeobsmarkers,
718 )
718 )
719
719
720 chunks = _emit2(repo, entries, totalfilesize)
720 chunks = _emit2(repo, entries, totalfilesize)
721 first = next(chunks)
721 first = next(chunks)
722 assert first is None
722 assert first is None
723 _test_sync_point_walk_1(repo)
723 _test_sync_point_walk_1(repo)
724 _test_sync_point_walk_2(repo)
724 _test_sync_point_walk_2(repo)
725
725
726 return len(entries), totalfilesize, chunks
726 return len(entries), totalfilesize, chunks
727
727
728
728
729 @contextlib.contextmanager
729 @contextlib.contextmanager
730 def nested(*ctxs):
730 def nested(*ctxs):
731 this = ctxs[0]
731 this = ctxs[0]
732 rest = ctxs[1:]
732 rest = ctxs[1:]
733 with this:
733 with this:
734 if rest:
734 if rest:
735 with nested(*rest):
735 with nested(*rest):
736 yield
736 yield
737 else:
737 else:
738 yield
738 yield
739
739
740
740
741 def consumev2(repo, fp, filecount, filesize):
741 def consumev2(repo, fp, filecount, filesize):
742 """Apply the contents from a version 2 streaming clone.
742 """Apply the contents from a version 2 streaming clone.
743
743
744 Data is read from an object that only needs to provide a ``read(size)``
744 Data is read from an object that only needs to provide a ``read(size)``
745 method.
745 method.
746 """
746 """
747 with repo.lock():
747 with repo.lock():
748 repo.ui.status(
748 repo.ui.status(
749 _(b'%d files to transfer, %s of data\n')
749 _(b'%d files to transfer, %s of data\n')
750 % (filecount, util.bytecount(filesize))
750 % (filecount, util.bytecount(filesize))
751 )
751 )
752
752
753 start = util.timer()
753 start = util.timer()
754 progress = repo.ui.makeprogress(
754 progress = repo.ui.makeprogress(
755 _(b'clone'), total=filesize, unit=_(b'bytes')
755 _(b'clone'), total=filesize, unit=_(b'bytes')
756 )
756 )
757 progress.update(0)
757 progress.update(0)
758
758
759 vfsmap = _makemap(repo)
759 vfsmap = _makemap(repo)
760 # we keep repo.vfs out of the on purpose, ther are too many danger
760 # we keep repo.vfs out of the on purpose, ther are too many danger
761 # there (eg: .hg/hgrc),
761 # there (eg: .hg/hgrc),
762 #
762 #
763 # this assert is duplicated (from _makemap) as author might think this
763 # this assert is duplicated (from _makemap) as author might think this
764 # is fine, while this is really not fine.
764 # is fine, while this is really not fine.
765 if repo.vfs in vfsmap.values():
765 if repo.vfs in vfsmap.values():
766 raise error.ProgrammingError(
766 raise error.ProgrammingError(
767 b'repo.vfs must not be added to vfsmap for security reasons'
767 b'repo.vfs must not be added to vfsmap for security reasons'
768 )
768 )
769
769
770 with repo.transaction(b'clone'):
770 with repo.transaction(b'clone'):
771 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
771 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
772 with nested(*ctxs):
772 with nested(*ctxs):
773 for i in range(filecount):
773 for i in range(filecount):
774 src = util.readexactly(fp, 1)
774 src = util.readexactly(fp, 1)
775 vfs = vfsmap[src]
775 vfs = vfsmap[src]
776 namelen = util.uvarintdecodestream(fp)
776 namelen = util.uvarintdecodestream(fp)
777 datalen = util.uvarintdecodestream(fp)
777 datalen = util.uvarintdecodestream(fp)
778
778
779 name = util.readexactly(fp, namelen)
779 name = util.readexactly(fp, namelen)
780
780
781 if repo.ui.debugflag:
781 if repo.ui.debugflag:
782 repo.ui.debug(
782 repo.ui.debug(
783 b'adding [%s] %s (%s)\n'
783 b'adding [%s] %s (%s)\n'
784 % (src, name, util.bytecount(datalen))
784 % (src, name, util.bytecount(datalen))
785 )
785 )
786
786
787 with vfs(name, b'w') as ofp:
787 with vfs(name, b'w') as ofp:
788 for chunk in util.filechunkiter(fp, limit=datalen):
788 for chunk in util.filechunkiter(fp, limit=datalen):
789 progress.increment(step=len(chunk))
789 progress.increment(step=len(chunk))
790 ofp.write(chunk)
790 ofp.write(chunk)
791
791
792 # force @filecache properties to be reloaded from
792 # force @filecache properties to be reloaded from
793 # streamclone-ed file at next access
793 # streamclone-ed file at next access
794 repo.invalidate(clearfilecache=True)
794 repo.invalidate(clearfilecache=True)
795
795
796 elapsed = util.timer() - start
796 elapsed = util.timer() - start
797 if elapsed <= 0:
797 if elapsed <= 0:
798 elapsed = 0.001
798 elapsed = 0.001
799 repo.ui.status(
799 repo.ui.status(
800 _(b'transferred %s in %.1f seconds (%s/sec)\n')
800 _(b'transferred %s in %.1f seconds (%s/sec)\n')
801 % (
801 % (
802 util.bytecount(progress.pos),
802 util.bytecount(progress.pos),
803 elapsed,
803 elapsed,
804 util.bytecount(progress.pos / elapsed),
804 util.bytecount(progress.pos / elapsed),
805 )
805 )
806 )
806 )
807 progress.complete()
807 progress.complete()
808
808
809
809
810 def applybundlev2(repo, fp, filecount, filesize, requirements):
810 def applybundlev2(repo, fp, filecount, filesize, requirements):
811 from . import localrepo
811 from . import localrepo
812
812
813 missingreqs = [r for r in requirements if r not in repo.supported]
813 missingreqs = [r for r in requirements if r not in repo.supported]
814 if missingreqs:
814 if missingreqs:
815 raise error.Abort(
815 raise error.Abort(
816 _(b'unable to apply stream clone: unsupported format: %s')
816 _(b'unable to apply stream clone: unsupported format: %s')
817 % b', '.join(sorted(missingreqs))
817 % b', '.join(sorted(missingreqs))
818 )
818 )
819
819
820 consumev2(repo, fp, filecount, filesize)
820 consumev2(repo, fp, filecount, filesize)
821
821
822 repo.requirements = new_stream_clone_requirements(
822 repo.requirements = new_stream_clone_requirements(
823 repo.supportedformats,
823 repo.supportedformats,
824 repo.requirements,
824 repo.requirements,
825 requirements,
825 requirements,
826 )
826 )
827 repo.svfs.options = localrepo.resolvestorevfsoptions(
827 repo.svfs.options = localrepo.resolvestorevfsoptions(
828 repo.ui, repo.requirements, repo.features
828 repo.ui, repo.requirements, repo.features
829 )
829 )
830 scmutil.writereporequirements(repo)
830 scmutil.writereporequirements(repo)
831
831
832
832
833 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
833 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
834 hardlink = [True]
834 hardlink = [True]
835
835
836 def copy_used():
836 def copy_used():
837 hardlink[0] = False
837 hardlink[0] = False
838 progress.topic = _(b'copying')
838 progress.topic = _(b'copying')
839
839
840 for k, path, size in entries:
840 for k, path, size in entries:
841 src_vfs = src_vfs_map[k]
841 src_vfs = src_vfs_map[k]
842 dst_vfs = dst_vfs_map[k]
842 dst_vfs = dst_vfs_map[k]
843 src_path = src_vfs.join(path)
843 src_path = src_vfs.join(path)
844 dst_path = dst_vfs.join(path)
844 dst_path = dst_vfs.join(path)
845 # We cannot use dirname and makedirs of dst_vfs here because the store
845 # We cannot use dirname and makedirs of dst_vfs here because the store
846 # encoding confuses them. See issue 6581 for details.
846 # encoding confuses them. See issue 6581 for details.
847 dirname = os.path.dirname(dst_path)
847 dirname = os.path.dirname(dst_path)
848 if not os.path.exists(dirname):
848 if not os.path.exists(dirname):
849 util.makedirs(dirname)
849 util.makedirs(dirname)
850 dst_vfs.register_file(path)
850 dst_vfs.register_file(path)
851 # XXX we could use the #nb_bytes argument.
851 # XXX we could use the #nb_bytes argument.
852 util.copyfile(
852 util.copyfile(
853 src_path,
853 src_path,
854 dst_path,
854 dst_path,
855 hardlink=hardlink[0],
855 hardlink=hardlink[0],
856 no_hardlink_cb=copy_used,
856 no_hardlink_cb=copy_used,
857 check_fs_hardlink=False,
857 check_fs_hardlink=False,
858 )
858 )
859 progress.increment()
859 progress.increment()
860 return hardlink[0]
860 return hardlink[0]
861
861
862
862
863 def local_copy(src_repo, dest_repo):
863 def local_copy(src_repo, dest_repo):
864 """copy all content from one local repository to another
864 """copy all content from one local repository to another
865
865
866 This is useful for local clone"""
866 This is useful for local clone"""
867 src_store_requirements = {
867 src_store_requirements = {
868 r
868 r
869 for r in src_repo.requirements
869 for r in src_repo.requirements
870 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
870 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
871 }
871 }
872 dest_store_requirements = {
872 dest_store_requirements = {
873 r
873 r
874 for r in dest_repo.requirements
874 for r in dest_repo.requirements
875 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
875 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
876 }
876 }
877 assert src_store_requirements == dest_store_requirements
877 assert src_store_requirements == dest_store_requirements
878
878
879 with dest_repo.lock():
879 with dest_repo.lock():
880 with src_repo.lock():
880 with src_repo.lock():
881
881
882 # bookmark is not integrated to the streaming as it might use the
882 # bookmark is not integrated to the streaming as it might use the
883 # `repo.vfs` and they are too many sentitive data accessible
883 # `repo.vfs` and they are too many sentitive data accessible
884 # through `repo.vfs` to expose it to streaming clone.
884 # through `repo.vfs` to expose it to streaming clone.
885 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
885 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
886 srcbookmarks = src_book_vfs.join(b'bookmarks')
886 srcbookmarks = src_book_vfs.join(b'bookmarks')
887 bm_count = 0
887 bm_count = 0
888 if os.path.exists(srcbookmarks):
888 if os.path.exists(srcbookmarks):
889 bm_count = 1
889 bm_count = 1
890
890
891 entries, totalfilesize = _v2_walk(
891 entries, totalfilesize = _v2_walk(
892 src_repo,
892 src_repo,
893 includes=None,
893 includes=None,
894 excludes=None,
894 excludes=None,
895 includeobsmarkers=True,
895 includeobsmarkers=True,
896 )
896 )
897 src_vfs_map = _makemap(src_repo)
897 src_vfs_map = _makemap(src_repo)
898 dest_vfs_map = _makemap(dest_repo)
898 dest_vfs_map = _makemap(dest_repo)
899 progress = src_repo.ui.makeprogress(
899 progress = src_repo.ui.makeprogress(
900 topic=_(b'linking'),
900 topic=_(b'linking'),
901 total=len(entries) + bm_count,
901 total=len(entries) + bm_count,
902 unit=_(b'files'),
902 unit=_(b'files'),
903 )
903 )
904 # copy files
904 # copy files
905 #
905 #
906 # We could copy the full file while the source repository is locked
906 # We could copy the full file while the source repository is locked
907 # and the other one without the lock. However, in the linking case,
907 # and the other one without the lock. However, in the linking case,
908 # this would also requires checks that nobody is appending any data
908 # this would also requires checks that nobody is appending any data
909 # to the files while we do the clone, so this is not done yet. We
909 # to the files while we do the clone, so this is not done yet. We
910 # could do this blindly when copying files.
910 # could do this blindly when copying files.
911 files = ((k, path, size) for k, path, ftype, size in entries)
911 files = ((k, path, size) for k, path, ftype, size in entries)
912 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
912 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
913
913
914 # copy bookmarks over
914 # copy bookmarks over
915 if bm_count:
915 if bm_count:
916 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
916 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
917 dstbookmarks = dst_book_vfs.join(b'bookmarks')
917 dstbookmarks = dst_book_vfs.join(b'bookmarks')
918 util.copyfile(srcbookmarks, dstbookmarks)
918 util.copyfile(srcbookmarks, dstbookmarks)
919 progress.complete()
919 progress.complete()
920 if hardlink:
920 if hardlink:
921 msg = b'linked %d files\n'
921 msg = b'linked %d files\n'
922 else:
922 else:
923 msg = b'copied %d files\n'
923 msg = b'copied %d files\n'
924 src_repo.ui.debug(msg % (len(entries) + bm_count))
924 src_repo.ui.debug(msg % (len(entries) + bm_count))
925
925
926 with dest_repo.transaction(b"localclone") as tr:
926 with dest_repo.transaction(b"localclone") as tr:
927 dest_repo.store.write(tr)
927 dest_repo.store.write(tr)
928
928
929 # clean up transaction file as they do not make sense
929 # clean up transaction file as they do not make sense
930 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
930 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
931 undo_files.extend(dest_repo.undofiles())
931 undo_files.extend(dest_repo.undofiles())
932 for undovfs, undofile in undo_files:
932 for undovfs, undofile in undo_files:
933 try:
933 try:
934 undovfs.unlink(undofile)
934 undovfs.unlink(undofile)
935 except OSError as e:
935 except OSError as e:
936 if e.errno != errno.ENOENT:
936 if e.errno != errno.ENOENT:
937 msg = _(b'error removing %s: %s\n')
937 msg = _(b'error removing %s: %s\n')
938 path = undovfs.join(undofile)
938 path = undovfs.join(undofile)
939 e_msg = stringutil.forcebytestr(e)
939 e_msg = stringutil.forcebytestr(e)
940 msg %= (path, e_msg)
940 msg %= (path, e_msg)
941 dest_repo.ui.warn(msg)
941 dest_repo.ui.warn(msg)
@@ -1,413 +1,415 b''
1 #require serve
1 #require serve
2
2
3 This test is a duplicate of 'test-http.t', feel free to factor out
3 This test is a duplicate of 'test-http.t', feel free to factor out
4 parts that are not bundle1/bundle2 specific.
4 parts that are not bundle1/bundle2 specific.
5
5
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [devel]
7 > [devel]
8 > # This test is dedicated to interaction through old bundle
8 > # This test is dedicated to interaction through old bundle
9 > legacy.exchange = bundle1
9 > legacy.exchange = bundle1
10 > EOF
10 > EOF
11
11
12 $ hg init test
12 $ hg init test
13 $ cd test
13 $ cd test
14 $ echo foo>foo
14 $ echo foo>foo
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
16 $ echo foo>foo.d/foo
16 $ echo foo>foo.d/foo
17 $ echo bar>foo.d/bAr.hg.d/BaR
17 $ echo bar>foo.d/bAr.hg.d/BaR
18 $ echo bar>foo.d/baR.d.hg/bAR
18 $ echo bar>foo.d/baR.d.hg/bAR
19 $ hg commit -A -m 1
19 $ hg commit -A -m 1
20 adding foo
20 adding foo
21 adding foo.d/bAr.hg.d/BaR
21 adding foo.d/bAr.hg.d/BaR
22 adding foo.d/baR.d.hg/bAR
22 adding foo.d/baR.d.hg/bAR
23 adding foo.d/foo
23 adding foo.d/foo
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
26
26
27 Test server address cannot be reused
27 Test server address cannot be reused
28
28
29 $ hg serve -p $HGPORT1 2>&1
29 $ hg serve -p $HGPORT1 2>&1
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
31 [255]
31 [255]
32
32
33 $ cd ..
33 $ cd ..
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
35
35
36 clone via stream
36 clone via stream
37
37
38 #if no-reposimplestore
38 #if no-reposimplestore
39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
40 streaming all changes
40 streaming all changes
41 6 files to transfer, 606 bytes of data (no-zstd !)
41 6 files to transfer, 606 bytes of data (no-zstd !)
42 6 files to transfer, 608 bytes of data (zstd !)
42 6 files to transfer, 608 bytes of data (zstd !)
43 transferred * bytes in * seconds (*/sec) (glob)
43 transferred * bytes in * seconds (*/sec) (glob)
44 searching for changes
44 searching for changes
45 no changes found
45 no changes found
46 updating to branch default
46 updating to branch default
47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 $ hg verify -R copy
48 $ hg verify -R copy
49 checking changesets
49 checking changesets
50 checking manifests
50 checking manifests
51 crosschecking files in changesets and manifests
51 crosschecking files in changesets and manifests
52 checking files
52 checking files
53 checked 1 changesets with 4 changes to 4 files
53 checked 1 changesets with 4 changes to 4 files
54 #endif
54 #endif
55
55
56 try to clone via stream, should use pull instead
56 try to clone via stream, should use pull instead
57
57
58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
59 warning: stream clone requested but server has them disabled
59 warning: stream clone requested but server has them disabled
60 requesting all changes
60 requesting all changes
61 adding changesets
61 adding changesets
62 adding manifests
62 adding manifests
63 adding file changes
63 adding file changes
64 added 1 changesets with 4 changes to 4 files
64 added 1 changesets with 4 changes to 4 files
65 new changesets 8b6053c928fe
65 new changesets 8b6053c928fe
66 updating to branch default
66 updating to branch default
67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
68
68
69 try to clone via stream but missing requirements, so should use pull instead
69 try to clone via stream but missing requirements, so should use pull instead
70
70
71 $ cat > $TESTTMP/removesupportedformat.py << EOF
71 $ cat > $TESTTMP/removesupportedformat.py << EOF
72 > from mercurial import localrepo
72 > from mercurial import localrepo
73 > def extsetup(ui):
73 > def reposetup(ui, repo):
74 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
74 > local = repo.local()
75 > if local is not None:
76 > local.supported.remove(b'generaldelta')
75 > EOF
77 > EOF
76
78
77 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
79 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
78 warning: stream clone requested but client is missing requirements: generaldelta
80 warning: stream clone requested but client is missing requirements: generaldelta
79 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
81 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
80 requesting all changes
82 requesting all changes
81 adding changesets
83 adding changesets
82 adding manifests
84 adding manifests
83 adding file changes
85 adding file changes
84 added 1 changesets with 4 changes to 4 files
86 added 1 changesets with 4 changes to 4 files
85 new changesets 8b6053c928fe
87 new changesets 8b6053c928fe
86 updating to branch default
88 updating to branch default
87 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
88
90
89 clone via pull
91 clone via pull
90
92
91 $ hg clone http://localhost:$HGPORT1/ copy-pull
93 $ hg clone http://localhost:$HGPORT1/ copy-pull
92 requesting all changes
94 requesting all changes
93 adding changesets
95 adding changesets
94 adding manifests
96 adding manifests
95 adding file changes
97 adding file changes
96 added 1 changesets with 4 changes to 4 files
98 added 1 changesets with 4 changes to 4 files
97 new changesets 8b6053c928fe
99 new changesets 8b6053c928fe
98 updating to branch default
100 updating to branch default
99 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ hg verify -R copy-pull
102 $ hg verify -R copy-pull
101 checking changesets
103 checking changesets
102 checking manifests
104 checking manifests
103 crosschecking files in changesets and manifests
105 crosschecking files in changesets and manifests
104 checking files
106 checking files
105 checked 1 changesets with 4 changes to 4 files
107 checked 1 changesets with 4 changes to 4 files
106 $ cd test
108 $ cd test
107 $ echo bar > bar
109 $ echo bar > bar
108 $ hg commit -A -d '1 0' -m 2
110 $ hg commit -A -d '1 0' -m 2
109 adding bar
111 adding bar
110 $ cd ..
112 $ cd ..
111
113
112 clone over http with --update
114 clone over http with --update
113
115
114 $ hg clone http://localhost:$HGPORT1/ updated --update 0
116 $ hg clone http://localhost:$HGPORT1/ updated --update 0
115 requesting all changes
117 requesting all changes
116 adding changesets
118 adding changesets
117 adding manifests
119 adding manifests
118 adding file changes
120 adding file changes
119 added 2 changesets with 5 changes to 5 files
121 added 2 changesets with 5 changes to 5 files
120 new changesets 8b6053c928fe:5fed3813f7f5
122 new changesets 8b6053c928fe:5fed3813f7f5
121 updating to branch default
123 updating to branch default
122 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 $ hg log -r . -R updated
125 $ hg log -r . -R updated
124 changeset: 0:8b6053c928fe
126 changeset: 0:8b6053c928fe
125 user: test
127 user: test
126 date: Thu Jan 01 00:00:00 1970 +0000
128 date: Thu Jan 01 00:00:00 1970 +0000
127 summary: 1
129 summary: 1
128
130
129 $ rm -rf updated
131 $ rm -rf updated
130
132
131 incoming via HTTP
133 incoming via HTTP
132
134
133 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
135 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
134 adding changesets
136 adding changesets
135 adding manifests
137 adding manifests
136 adding file changes
138 adding file changes
137 added 1 changesets with 4 changes to 4 files
139 added 1 changesets with 4 changes to 4 files
138 new changesets 8b6053c928fe
140 new changesets 8b6053c928fe
139 updating to branch default
141 updating to branch default
140 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 $ cd partial
143 $ cd partial
142 $ touch LOCAL
144 $ touch LOCAL
143 $ hg ci -qAm LOCAL
145 $ hg ci -qAm LOCAL
144 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
146 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
145 comparing with http://localhost:$HGPORT1/
147 comparing with http://localhost:$HGPORT1/
146 searching for changes
148 searching for changes
147 2
149 2
148 $ cd ..
150 $ cd ..
149
151
150 pull
152 pull
151
153
152 $ cd copy-pull
154 $ cd copy-pull
153 $ cat >> .hg/hgrc <<EOF
155 $ cat >> .hg/hgrc <<EOF
154 > [hooks]
156 > [hooks]
155 > changegroup = sh -c "printenv.py --line changegroup"
157 > changegroup = sh -c "printenv.py --line changegroup"
156 > EOF
158 > EOF
157 $ hg pull
159 $ hg pull
158 pulling from http://localhost:$HGPORT1/
160 pulling from http://localhost:$HGPORT1/
159 searching for changes
161 searching for changes
160 adding changesets
162 adding changesets
161 adding manifests
163 adding manifests
162 adding file changes
164 adding file changes
163 added 1 changesets with 1 changes to 1 files
165 added 1 changesets with 1 changes to 1 files
164 new changesets 5fed3813f7f5
166 new changesets 5fed3813f7f5
165 changegroup hook: HG_HOOKNAME=changegroup
167 changegroup hook: HG_HOOKNAME=changegroup
166 HG_HOOKTYPE=changegroup
168 HG_HOOKTYPE=changegroup
167 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
168 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
170 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 HG_SOURCE=pull
171 HG_SOURCE=pull
170 HG_TXNID=TXN:$ID$
172 HG_TXNID=TXN:$ID$
171 HG_TXNNAME=pull
173 HG_TXNNAME=pull
172 http://localhost:$HGPORT1/
174 http://localhost:$HGPORT1/
173 HG_URL=http://localhost:$HGPORT1/
175 HG_URL=http://localhost:$HGPORT1/
174
176
175 (run 'hg update' to get a working copy)
177 (run 'hg update' to get a working copy)
176 $ cd ..
178 $ cd ..
177
179
178 clone from invalid URL
180 clone from invalid URL
179
181
180 $ hg clone http://localhost:$HGPORT/bad
182 $ hg clone http://localhost:$HGPORT/bad
181 abort: HTTP Error 404: Not Found
183 abort: HTTP Error 404: Not Found
182 [100]
184 [100]
183
185
184 test http authentication
186 test http authentication
185 + use the same server to test server side streaming preference
187 + use the same server to test server side streaming preference
186
188
187 $ cd test
189 $ cd test
188
190
189 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
191 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
190 > --pid-file=pid --config server.preferuncompressed=True \
192 > --pid-file=pid --config server.preferuncompressed=True \
191 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
193 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
192 $ cat pid >> $DAEMON_PIDS
194 $ cat pid >> $DAEMON_PIDS
193
195
194 $ cat << EOF > get_pass.py
196 $ cat << EOF > get_pass.py
195 > from mercurial import util
197 > from mercurial import util
196 > def newgetpass():
198 > def newgetpass():
197 > return "pass"
199 > return "pass"
198 > util.get_password = newgetpass
200 > util.get_password = newgetpass
199 > EOF
201 > EOF
200
202
201 $ hg id http://localhost:$HGPORT2/
203 $ hg id http://localhost:$HGPORT2/
202 abort: http authorization required for http://localhost:$HGPORT2/
204 abort: http authorization required for http://localhost:$HGPORT2/
203 [255]
205 [255]
204 $ hg id http://localhost:$HGPORT2/
206 $ hg id http://localhost:$HGPORT2/
205 abort: http authorization required for http://localhost:$HGPORT2/
207 abort: http authorization required for http://localhost:$HGPORT2/
206 [255]
208 [255]
207 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
209 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
208 http authorization required for http://localhost:$HGPORT2/
210 http authorization required for http://localhost:$HGPORT2/
209 realm: mercurial
211 realm: mercurial
210 user: user
212 user: user
211 password: 5fed3813f7f5
213 password: 5fed3813f7f5
212 $ hg id http://user:pass@localhost:$HGPORT2/
214 $ hg id http://user:pass@localhost:$HGPORT2/
213 5fed3813f7f5
215 5fed3813f7f5
214 $ echo '[auth]' >> .hg/hgrc
216 $ echo '[auth]' >> .hg/hgrc
215 $ echo 'l.schemes=http' >> .hg/hgrc
217 $ echo 'l.schemes=http' >> .hg/hgrc
216 $ echo 'l.prefix=lo' >> .hg/hgrc
218 $ echo 'l.prefix=lo' >> .hg/hgrc
217 $ echo 'l.username=user' >> .hg/hgrc
219 $ echo 'l.username=user' >> .hg/hgrc
218 $ echo 'l.password=pass' >> .hg/hgrc
220 $ echo 'l.password=pass' >> .hg/hgrc
219 $ hg id http://localhost:$HGPORT2/
221 $ hg id http://localhost:$HGPORT2/
220 5fed3813f7f5
222 5fed3813f7f5
221 $ hg id http://localhost:$HGPORT2/
223 $ hg id http://localhost:$HGPORT2/
222 5fed3813f7f5
224 5fed3813f7f5
223 $ hg id http://user@localhost:$HGPORT2/
225 $ hg id http://user@localhost:$HGPORT2/
224 5fed3813f7f5
226 5fed3813f7f5
225
227
226 #if no-reposimplestore
228 #if no-reposimplestore
227 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
229 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
228 streaming all changes
230 streaming all changes
229 7 files to transfer, 916 bytes of data (no-zstd !)
231 7 files to transfer, 916 bytes of data (no-zstd !)
230 7 files to transfer, 919 bytes of data (zstd !)
232 7 files to transfer, 919 bytes of data (zstd !)
231 transferred * bytes in * seconds (*/sec) (glob)
233 transferred * bytes in * seconds (*/sec) (glob)
232 searching for changes
234 searching for changes
233 no changes found
235 no changes found
234 updating to branch default
236 updating to branch default
235 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
237 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
236 #endif
238 #endif
237
239
238 --pull should override server's preferuncompressed
240 --pull should override server's preferuncompressed
239
241
240 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
242 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
241 requesting all changes
243 requesting all changes
242 adding changesets
244 adding changesets
243 adding manifests
245 adding manifests
244 adding file changes
246 adding file changes
245 added 2 changesets with 5 changes to 5 files
247 added 2 changesets with 5 changes to 5 files
246 new changesets 8b6053c928fe:5fed3813f7f5
248 new changesets 8b6053c928fe:5fed3813f7f5
247 updating to branch default
249 updating to branch default
248 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
250 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
249
251
250 $ hg id http://user2@localhost:$HGPORT2/
252 $ hg id http://user2@localhost:$HGPORT2/
251 abort: http authorization required for http://localhost:$HGPORT2/
253 abort: http authorization required for http://localhost:$HGPORT2/
252 [255]
254 [255]
253 $ hg id http://user:pass2@localhost:$HGPORT2/
255 $ hg id http://user:pass2@localhost:$HGPORT2/
254 abort: HTTP Error 403: no
256 abort: HTTP Error 403: no
255 [100]
257 [100]
256
258
257 $ hg -R dest-pull tag -r tip top
259 $ hg -R dest-pull tag -r tip top
258 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
260 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
259 pushing to http://user:***@localhost:$HGPORT2/
261 pushing to http://user:***@localhost:$HGPORT2/
260 searching for changes
262 searching for changes
261 remote: adding changesets
263 remote: adding changesets
262 remote: adding manifests
264 remote: adding manifests
263 remote: adding file changes
265 remote: adding file changes
264 remote: added 1 changesets with 1 changes to 1 files
266 remote: added 1 changesets with 1 changes to 1 files
265 $ hg rollback -q
267 $ hg rollback -q
266
268
267 $ sed 's/.*] "/"/' < ../access.log
269 $ sed 's/.*] "/"/' < ../access.log
268 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 "GET /?cmd=capabilities HTTP/1.1" 401 -
269 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 "GET /?cmd=capabilities HTTP/1.1" 401 -
272 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 "GET /?cmd=capabilities HTTP/1.1" 200 -
273 "GET /?cmd=capabilities HTTP/1.1" 200 -
272 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
273 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
276 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 "GET /?cmd=capabilities HTTP/1.1" 401 -
277 "GET /?cmd=capabilities HTTP/1.1" 401 -
276 "GET /?cmd=capabilities HTTP/1.1" 200 -
278 "GET /?cmd=capabilities HTTP/1.1" 200 -
277 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
278 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
281 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 "GET /?cmd=capabilities HTTP/1.1" 401 -
282 "GET /?cmd=capabilities HTTP/1.1" 401 -
281 "GET /?cmd=capabilities HTTP/1.1" 200 -
283 "GET /?cmd=capabilities HTTP/1.1" 200 -
282 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
283 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 "GET /?cmd=capabilities HTTP/1.1" 401 -
287 "GET /?cmd=capabilities HTTP/1.1" 401 -
286 "GET /?cmd=capabilities HTTP/1.1" 200 -
288 "GET /?cmd=capabilities HTTP/1.1" 200 -
287 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
288 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
291 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 "GET /?cmd=capabilities HTTP/1.1" 401 -
292 "GET /?cmd=capabilities HTTP/1.1" 401 -
291 "GET /?cmd=capabilities HTTP/1.1" 200 -
293 "GET /?cmd=capabilities HTTP/1.1" 200 -
292 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
293 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
296 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
297 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
296 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
298 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
297 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
298 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
303 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
304 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
303 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
305 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
304 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
306 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
305 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
306 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
309 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 "GET /?cmd=capabilities HTTP/1.1" 401 -
309 "GET /?cmd=capabilities HTTP/1.1" 401 -
311 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 "GET /?cmd=capabilities HTTP/1.1" 403 -
312 "GET /?cmd=capabilities HTTP/1.1" 403 -
311 "GET /?cmd=capabilities HTTP/1.1" 401 -
313 "GET /?cmd=capabilities HTTP/1.1" 401 -
312 "GET /?cmd=capabilities HTTP/1.1" 200 -
314 "GET /?cmd=capabilities HTTP/1.1" 200 -
313 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
320 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
321 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
320
322
321 $ cd ..
323 $ cd ..
322
324
323 clone of serve with repo in root and unserved subrepo (issue2970)
325 clone of serve with repo in root and unserved subrepo (issue2970)
324
326
325 $ hg --cwd test init sub
327 $ hg --cwd test init sub
326 $ echo empty > test/sub/empty
328 $ echo empty > test/sub/empty
327 $ hg --cwd test/sub add empty
329 $ hg --cwd test/sub add empty
328 $ hg --cwd test/sub commit -qm 'add empty'
330 $ hg --cwd test/sub commit -qm 'add empty'
329 $ hg --cwd test/sub tag -r 0 something
331 $ hg --cwd test/sub tag -r 0 something
330 $ echo sub = sub > test/.hgsub
332 $ echo sub = sub > test/.hgsub
331 $ hg --cwd test add .hgsub
333 $ hg --cwd test add .hgsub
332 $ hg --cwd test commit -qm 'add subrepo'
334 $ hg --cwd test commit -qm 'add subrepo'
333 $ hg clone http://localhost:$HGPORT noslash-clone
335 $ hg clone http://localhost:$HGPORT noslash-clone
334 requesting all changes
336 requesting all changes
335 adding changesets
337 adding changesets
336 adding manifests
338 adding manifests
337 adding file changes
339 adding file changes
338 added 3 changesets with 7 changes to 7 files
340 added 3 changesets with 7 changes to 7 files
339 new changesets 8b6053c928fe:56f9bc90cce6
341 new changesets 8b6053c928fe:56f9bc90cce6
340 updating to branch default
342 updating to branch default
341 cloning subrepo sub from http://localhost:$HGPORT/sub
343 cloning subrepo sub from http://localhost:$HGPORT/sub
342 abort: HTTP Error 404: Not Found
344 abort: HTTP Error 404: Not Found
343 [100]
345 [100]
344 $ hg clone http://localhost:$HGPORT/ slash-clone
346 $ hg clone http://localhost:$HGPORT/ slash-clone
345 requesting all changes
347 requesting all changes
346 adding changesets
348 adding changesets
347 adding manifests
349 adding manifests
348 adding file changes
350 adding file changes
349 added 3 changesets with 7 changes to 7 files
351 added 3 changesets with 7 changes to 7 files
350 new changesets 8b6053c928fe:56f9bc90cce6
352 new changesets 8b6053c928fe:56f9bc90cce6
351 updating to branch default
353 updating to branch default
352 cloning subrepo sub from http://localhost:$HGPORT/sub
354 cloning subrepo sub from http://localhost:$HGPORT/sub
353 abort: HTTP Error 404: Not Found
355 abort: HTTP Error 404: Not Found
354 [100]
356 [100]
355
357
356 check error log
358 check error log
357
359
358 $ cat error.log
360 $ cat error.log
359
361
360 Check error reporting while pulling/cloning
362 Check error reporting while pulling/cloning
361
363
362 $ $RUNTESTDIR/killdaemons.py
364 $ $RUNTESTDIR/killdaemons.py
363 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
365 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
364 $ cat hg3.pid >> $DAEMON_PIDS
366 $ cat hg3.pid >> $DAEMON_PIDS
365 $ hg clone http://localhost:$HGPORT/ abort-clone
367 $ hg clone http://localhost:$HGPORT/ abort-clone
366 requesting all changes
368 requesting all changes
367 abort: remote error:
369 abort: remote error:
368 this is an exercise
370 this is an exercise
369 [100]
371 [100]
370 $ cat error.log
372 $ cat error.log
371
373
372 disable pull-based clones
374 disable pull-based clones
373
375
374 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
376 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
375 $ cat hg4.pid >> $DAEMON_PIDS
377 $ cat hg4.pid >> $DAEMON_PIDS
376 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
378 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
377 requesting all changes
379 requesting all changes
378 abort: remote error:
380 abort: remote error:
379 server has pull-based clones disabled
381 server has pull-based clones disabled
380 [100]
382 [100]
381
383
382 #if no-reposimplestore
384 #if no-reposimplestore
383 ... but keep stream clones working
385 ... but keep stream clones working
384
386
385 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
387 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
386 streaming all changes
388 streaming all changes
387 * files to transfer, * of data (glob)
389 * files to transfer, * of data (glob)
388 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
390 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
389 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
391 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
390 searching for changes
392 searching for changes
391 no changes found
393 no changes found
392 #endif
394 #endif
393
395
394 ... and also keep partial clones and pulls working
396 ... and also keep partial clones and pulls working
395 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
397 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
396 adding changesets
398 adding changesets
397 adding manifests
399 adding manifests
398 adding file changes
400 adding file changes
399 added 1 changesets with 4 changes to 4 files
401 added 1 changesets with 4 changes to 4 files
400 new changesets 8b6053c928fe
402 new changesets 8b6053c928fe
401 updating to branch default
403 updating to branch default
402 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 $ hg pull -R test-partial-clone
405 $ hg pull -R test-partial-clone
404 pulling from http://localhost:$HGPORT1/
406 pulling from http://localhost:$HGPORT1/
405 searching for changes
407 searching for changes
406 adding changesets
408 adding changesets
407 adding manifests
409 adding manifests
408 adding file changes
410 adding file changes
409 added 2 changesets with 3 changes to 3 files
411 added 2 changesets with 3 changes to 3 files
410 new changesets 5fed3813f7f5:56f9bc90cce6
412 new changesets 5fed3813f7f5:56f9bc90cce6
411 (run 'hg update' to get a working copy)
413 (run 'hg update' to get a working copy)
412
414
413 $ cat error.log
415 $ cat error.log
@@ -1,617 +1,619 b''
1 #require serve
1 #require serve
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo>foo
5 $ echo foo>foo
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 $ echo foo>foo.d/foo
7 $ echo foo>foo.d/foo
8 $ echo bar>foo.d/bAr.hg.d/BaR
8 $ echo bar>foo.d/bAr.hg.d/BaR
9 $ echo bar>foo.d/baR.d.hg/bAR
9 $ echo bar>foo.d/baR.d.hg/bAR
10 $ hg commit -A -m 1
10 $ hg commit -A -m 1
11 adding foo
11 adding foo
12 adding foo.d/bAr.hg.d/BaR
12 adding foo.d/bAr.hg.d/BaR
13 adding foo.d/baR.d.hg/bAR
13 adding foo.d/baR.d.hg/bAR
14 adding foo.d/foo
14 adding foo.d/foo
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
17
17
18 Test server address cannot be reused
18 Test server address cannot be reused
19
19
20 $ hg serve -p $HGPORT1 2>&1
20 $ hg serve -p $HGPORT1 2>&1
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
22 [255]
22 [255]
23
23
24 $ cd ..
24 $ cd ..
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
26
26
27 clone via stream
27 clone via stream
28
28
29 #if no-reposimplestore
29 #if no-reposimplestore
30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
31 streaming all changes
31 streaming all changes
32 9 files to transfer, 715 bytes of data (no-zstd !)
32 9 files to transfer, 715 bytes of data (no-zstd !)
33 9 files to transfer, 717 bytes of data (zstd !)
33 9 files to transfer, 717 bytes of data (zstd !)
34 transferred * bytes in * seconds (*/sec) (glob)
34 transferred * bytes in * seconds (*/sec) (glob)
35 updating to branch default
35 updating to branch default
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg verify -R copy
37 $ hg verify -R copy
38 checking changesets
38 checking changesets
39 checking manifests
39 checking manifests
40 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
41 checking files
41 checking files
42 checked 1 changesets with 4 changes to 4 files
42 checked 1 changesets with 4 changes to 4 files
43 #endif
43 #endif
44
44
45 try to clone via stream, should use pull instead
45 try to clone via stream, should use pull instead
46
46
47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
48 warning: stream clone requested but server has them disabled
48 warning: stream clone requested but server has them disabled
49 requesting all changes
49 requesting all changes
50 adding changesets
50 adding changesets
51 adding manifests
51 adding manifests
52 adding file changes
52 adding file changes
53 added 1 changesets with 4 changes to 4 files
53 added 1 changesets with 4 changes to 4 files
54 new changesets 8b6053c928fe
54 new changesets 8b6053c928fe
55 updating to branch default
55 updating to branch default
56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
57
57
58 try to clone via stream but missing requirements, so should use pull instead
58 try to clone via stream but missing requirements, so should use pull instead
59
59
60 $ cat > $TESTTMP/removesupportedformat.py << EOF
60 $ cat > $TESTTMP/removesupportedformat.py << EOF
61 > from mercurial import localrepo
61 > from mercurial import localrepo
62 > def extsetup(ui):
62 > def reposetup(ui, repo):
63 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
63 > local = repo.local()
64 > if local is not None:
65 > local.supported.remove(b'generaldelta')
64 > EOF
66 > EOF
65
67
66 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
68 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
67 warning: stream clone requested but client is missing requirements: generaldelta
69 warning: stream clone requested but client is missing requirements: generaldelta
68 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
70 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
69 requesting all changes
71 requesting all changes
70 adding changesets
72 adding changesets
71 adding manifests
73 adding manifests
72 adding file changes
74 adding file changes
73 added 1 changesets with 4 changes to 4 files
75 added 1 changesets with 4 changes to 4 files
74 new changesets 8b6053c928fe
76 new changesets 8b6053c928fe
75 updating to branch default
77 updating to branch default
76 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
77
79
78 clone via pull
80 clone via pull
79
81
80 $ hg clone http://localhost:$HGPORT1/ copy-pull
82 $ hg clone http://localhost:$HGPORT1/ copy-pull
81 requesting all changes
83 requesting all changes
82 adding changesets
84 adding changesets
83 adding manifests
85 adding manifests
84 adding file changes
86 adding file changes
85 added 1 changesets with 4 changes to 4 files
87 added 1 changesets with 4 changes to 4 files
86 new changesets 8b6053c928fe
88 new changesets 8b6053c928fe
87 updating to branch default
89 updating to branch default
88 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ hg verify -R copy-pull
91 $ hg verify -R copy-pull
90 checking changesets
92 checking changesets
91 checking manifests
93 checking manifests
92 crosschecking files in changesets and manifests
94 crosschecking files in changesets and manifests
93 checking files
95 checking files
94 checked 1 changesets with 4 changes to 4 files
96 checked 1 changesets with 4 changes to 4 files
95 $ cd test
97 $ cd test
96 $ echo bar > bar
98 $ echo bar > bar
97 $ hg commit -A -d '1 0' -m 2
99 $ hg commit -A -d '1 0' -m 2
98 adding bar
100 adding bar
99 $ cd ..
101 $ cd ..
100
102
101 clone over http with --update
103 clone over http with --update
102
104
103 $ hg clone http://localhost:$HGPORT1/ updated --update 0
105 $ hg clone http://localhost:$HGPORT1/ updated --update 0
104 requesting all changes
106 requesting all changes
105 adding changesets
107 adding changesets
106 adding manifests
108 adding manifests
107 adding file changes
109 adding file changes
108 added 2 changesets with 5 changes to 5 files
110 added 2 changesets with 5 changes to 5 files
109 new changesets 8b6053c928fe:5fed3813f7f5
111 new changesets 8b6053c928fe:5fed3813f7f5
110 updating to branch default
112 updating to branch default
111 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 $ hg log -r . -R updated
114 $ hg log -r . -R updated
113 changeset: 0:8b6053c928fe
115 changeset: 0:8b6053c928fe
114 user: test
116 user: test
115 date: Thu Jan 01 00:00:00 1970 +0000
117 date: Thu Jan 01 00:00:00 1970 +0000
116 summary: 1
118 summary: 1
117
119
118 $ rm -rf updated
120 $ rm -rf updated
119
121
120 incoming via HTTP
122 incoming via HTTP
121
123
122 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
124 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
123 adding changesets
125 adding changesets
124 adding manifests
126 adding manifests
125 adding file changes
127 adding file changes
126 added 1 changesets with 4 changes to 4 files
128 added 1 changesets with 4 changes to 4 files
127 new changesets 8b6053c928fe
129 new changesets 8b6053c928fe
128 updating to branch default
130 updating to branch default
129 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 $ cd partial
132 $ cd partial
131 $ touch LOCAL
133 $ touch LOCAL
132 $ hg ci -qAm LOCAL
134 $ hg ci -qAm LOCAL
133 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
135 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
134 comparing with http://localhost:$HGPORT1/
136 comparing with http://localhost:$HGPORT1/
135 searching for changes
137 searching for changes
136 2
138 2
137 $ cd ..
139 $ cd ..
138
140
139 pull
141 pull
140
142
141 $ cd copy-pull
143 $ cd copy-pull
142 $ cat >> .hg/hgrc <<EOF
144 $ cat >> .hg/hgrc <<EOF
143 > [hooks]
145 > [hooks]
144 > changegroup = sh -c "printenv.py --line changegroup"
146 > changegroup = sh -c "printenv.py --line changegroup"
145 > EOF
147 > EOF
146 $ hg pull
148 $ hg pull
147 pulling from http://localhost:$HGPORT1/
149 pulling from http://localhost:$HGPORT1/
148 searching for changes
150 searching for changes
149 adding changesets
151 adding changesets
150 adding manifests
152 adding manifests
151 adding file changes
153 adding file changes
152 added 1 changesets with 1 changes to 1 files
154 added 1 changesets with 1 changes to 1 files
153 new changesets 5fed3813f7f5
155 new changesets 5fed3813f7f5
154 changegroup hook: HG_HOOKNAME=changegroup
156 changegroup hook: HG_HOOKNAME=changegroup
155 HG_HOOKTYPE=changegroup
157 HG_HOOKTYPE=changegroup
156 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
157 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
159 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 HG_SOURCE=pull
160 HG_SOURCE=pull
159 HG_TXNID=TXN:$ID$
161 HG_TXNID=TXN:$ID$
160 HG_TXNNAME=pull
162 HG_TXNNAME=pull
161 http://localhost:$HGPORT1/
163 http://localhost:$HGPORT1/
162 HG_URL=http://localhost:$HGPORT1/
164 HG_URL=http://localhost:$HGPORT1/
163
165
164 (run 'hg update' to get a working copy)
166 (run 'hg update' to get a working copy)
165 $ cd ..
167 $ cd ..
166
168
167 clone from invalid URL
169 clone from invalid URL
168
170
169 $ hg clone http://localhost:$HGPORT/bad
171 $ hg clone http://localhost:$HGPORT/bad
170 abort: HTTP Error 404: Not Found
172 abort: HTTP Error 404: Not Found
171 [100]
173 [100]
172
174
173 test http authentication
175 test http authentication
174 + use the same server to test server side streaming preference
176 + use the same server to test server side streaming preference
175
177
176 $ cd test
178 $ cd test
177
179
178 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
180 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
179 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
181 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
180 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
181 $ cat pid >> $DAEMON_PIDS
183 $ cat pid >> $DAEMON_PIDS
182
184
183 $ cat << EOF > get_pass.py
185 $ cat << EOF > get_pass.py
184 > from mercurial import util
186 > from mercurial import util
185 > def newgetpass():
187 > def newgetpass():
186 > return "pass"
188 > return "pass"
187 > util.get_password = newgetpass
189 > util.get_password = newgetpass
188 > EOF
190 > EOF
189
191
190 $ hg id http://localhost:$HGPORT2/
192 $ hg id http://localhost:$HGPORT2/
191 abort: http authorization required for http://localhost:$HGPORT2/
193 abort: http authorization required for http://localhost:$HGPORT2/
192 [255]
194 [255]
193 $ hg id http://localhost:$HGPORT2/
195 $ hg id http://localhost:$HGPORT2/
194 abort: http authorization required for http://localhost:$HGPORT2/
196 abort: http authorization required for http://localhost:$HGPORT2/
195 [255]
197 [255]
196 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
198 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
197 using http://localhost:$HGPORT2/
199 using http://localhost:$HGPORT2/
198 sending capabilities command
200 sending capabilities command
199 http authorization required for http://localhost:$HGPORT2/
201 http authorization required for http://localhost:$HGPORT2/
200 realm: mercurial
202 realm: mercurial
201 user: abort: response expected
203 user: abort: response expected
202 [255]
204 [255]
203 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
205 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
204 >
206 >
205 > EOF
207 > EOF
206 using http://localhost:$HGPORT2/
208 using http://localhost:$HGPORT2/
207 sending capabilities command
209 sending capabilities command
208 http authorization required for http://localhost:$HGPORT2/
210 http authorization required for http://localhost:$HGPORT2/
209 realm: mercurial
211 realm: mercurial
210 user:
212 user:
211 password: abort: response expected
213 password: abort: response expected
212 [255]
214 [255]
213 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
215 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
214 >
216 >
215 >
217 >
216 > EOF
218 > EOF
217 using http://localhost:$HGPORT2/
219 using http://localhost:$HGPORT2/
218 sending capabilities command
220 sending capabilities command
219 http authorization required for http://localhost:$HGPORT2/
221 http authorization required for http://localhost:$HGPORT2/
220 realm: mercurial
222 realm: mercurial
221 user:
223 user:
222 password: abort: authorization failed
224 password: abort: authorization failed
223 [255]
225 [255]
224 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
226 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
225 http authorization required for http://localhost:$HGPORT2/
227 http authorization required for http://localhost:$HGPORT2/
226 realm: mercurial
228 realm: mercurial
227 user: user
229 user: user
228 password: 5fed3813f7f5
230 password: 5fed3813f7f5
229 $ hg id http://user:pass@localhost:$HGPORT2/
231 $ hg id http://user:pass@localhost:$HGPORT2/
230 5fed3813f7f5
232 5fed3813f7f5
231 $ echo '[auth]' >> .hg/hgrc
233 $ echo '[auth]' >> .hg/hgrc
232 $ echo 'l.schemes=http' >> .hg/hgrc
234 $ echo 'l.schemes=http' >> .hg/hgrc
233 $ echo 'l.prefix=lo' >> .hg/hgrc
235 $ echo 'l.prefix=lo' >> .hg/hgrc
234 $ echo 'l.username=user' >> .hg/hgrc
236 $ echo 'l.username=user' >> .hg/hgrc
235 $ echo 'l.password=pass' >> .hg/hgrc
237 $ echo 'l.password=pass' >> .hg/hgrc
236 $ hg id http://localhost:$HGPORT2/
238 $ hg id http://localhost:$HGPORT2/
237 5fed3813f7f5
239 5fed3813f7f5
238 $ hg id http://localhost:$HGPORT2/
240 $ hg id http://localhost:$HGPORT2/
239 5fed3813f7f5
241 5fed3813f7f5
240 $ hg id http://user@localhost:$HGPORT2/
242 $ hg id http://user@localhost:$HGPORT2/
241 5fed3813f7f5
243 5fed3813f7f5
242
244
243 $ cat > use_digests.py << EOF
245 $ cat > use_digests.py << EOF
244 > from mercurial import (
246 > from mercurial import (
245 > exthelper,
247 > exthelper,
246 > url,
248 > url,
247 > )
249 > )
248 >
250 >
249 > eh = exthelper.exthelper()
251 > eh = exthelper.exthelper()
250 > uisetup = eh.finaluisetup
252 > uisetup = eh.finaluisetup
251 >
253 >
252 > @eh.wrapfunction(url, 'opener')
254 > @eh.wrapfunction(url, 'opener')
253 > def urlopener(orig, *args, **kwargs):
255 > def urlopener(orig, *args, **kwargs):
254 > opener = orig(*args, **kwargs)
256 > opener = orig(*args, **kwargs)
255 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
257 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
256 > return opener
258 > return opener
257 > EOF
259 > EOF
258
260
259 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
261 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
260 5fed3813f7f5
262 5fed3813f7f5
261
263
262 #if no-reposimplestore
264 #if no-reposimplestore
263 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
265 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
264 streaming all changes
266 streaming all changes
265 10 files to transfer, 1.01 KB of data
267 10 files to transfer, 1.01 KB of data
266 transferred * KB in * seconds (*/sec) (glob)
268 transferred * KB in * seconds (*/sec) (glob)
267 updating to branch default
269 updating to branch default
268 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 #endif
271 #endif
270
272
271 --pull should override server's preferuncompressed
273 --pull should override server's preferuncompressed
272 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
274 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
273 requesting all changes
275 requesting all changes
274 adding changesets
276 adding changesets
275 adding manifests
277 adding manifests
276 adding file changes
278 adding file changes
277 added 2 changesets with 5 changes to 5 files
279 added 2 changesets with 5 changes to 5 files
278 new changesets 8b6053c928fe:5fed3813f7f5
280 new changesets 8b6053c928fe:5fed3813f7f5
279 updating to branch default
281 updating to branch default
280 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
281
283
282 $ hg id http://user2@localhost:$HGPORT2/
284 $ hg id http://user2@localhost:$HGPORT2/
283 abort: http authorization required for http://localhost:$HGPORT2/
285 abort: http authorization required for http://localhost:$HGPORT2/
284 [255]
286 [255]
285 $ hg id http://user:pass2@localhost:$HGPORT2/
287 $ hg id http://user:pass2@localhost:$HGPORT2/
286 abort: HTTP Error 403: no
288 abort: HTTP Error 403: no
287 [100]
289 [100]
288
290
289 $ hg -R dest-pull tag -r tip top
291 $ hg -R dest-pull tag -r tip top
290 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
292 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
291 pushing to http://user:***@localhost:$HGPORT2/
293 pushing to http://user:***@localhost:$HGPORT2/
292 searching for changes
294 searching for changes
293 remote: adding changesets
295 remote: adding changesets
294 remote: adding manifests
296 remote: adding manifests
295 remote: adding file changes
297 remote: adding file changes
296 remote: added 1 changesets with 1 changes to 1 files
298 remote: added 1 changesets with 1 changes to 1 files
297 $ hg rollback -q
299 $ hg rollback -q
298 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
300 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
299 pushing to http://user:***@localhost:$HGPORT2/
301 pushing to http://user:***@localhost:$HGPORT2/
300 using http://localhost:$HGPORT2/
302 using http://localhost:$HGPORT2/
301 http auth: user user, password ****
303 http auth: user user, password ****
302 sending capabilities command
304 sending capabilities command
303 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
305 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
304 http auth: user user, password ****
306 http auth: user user, password ****
305 devel-peer-request: finished in *.???? seconds (200) (glob)
307 devel-peer-request: finished in *.???? seconds (200) (glob)
306 query 1; heads
308 query 1; heads
307 devel-peer-request: batched-content
309 devel-peer-request: batched-content
308 devel-peer-request: - heads (0 arguments)
310 devel-peer-request: - heads (0 arguments)
309 devel-peer-request: - known (1 arguments)
311 devel-peer-request: - known (1 arguments)
310 sending batch command
312 sending batch command
311 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
313 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
312 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
314 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
313 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 devel-peer-request: 68 bytes of commands arguments in headers
316 devel-peer-request: 68 bytes of commands arguments in headers
315 devel-peer-request: finished in *.???? seconds (200) (glob)
317 devel-peer-request: finished in *.???? seconds (200) (glob)
316 searching for changes
318 searching for changes
317 all remote heads known locally
319 all remote heads known locally
318 preparing listkeys for "phases"
320 preparing listkeys for "phases"
319 sending listkeys command
321 sending listkeys command
320 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
322 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
321 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
323 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
322 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
324 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
323 devel-peer-request: 16 bytes of commands arguments in headers
325 devel-peer-request: 16 bytes of commands arguments in headers
324 devel-peer-request: finished in *.???? seconds (200) (glob)
326 devel-peer-request: finished in *.???? seconds (200) (glob)
325 received listkey for "phases": 58 bytes
327 received listkey for "phases": 58 bytes
326 checking for updated bookmarks
328 checking for updated bookmarks
327 preparing listkeys for "bookmarks"
329 preparing listkeys for "bookmarks"
328 sending listkeys command
330 sending listkeys command
329 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
331 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
330 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
332 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
331 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
333 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
332 devel-peer-request: 19 bytes of commands arguments in headers
334 devel-peer-request: 19 bytes of commands arguments in headers
333 devel-peer-request: finished in *.???? seconds (200) (glob)
335 devel-peer-request: finished in *.???? seconds (200) (glob)
334 received listkey for "bookmarks": 0 bytes
336 received listkey for "bookmarks": 0 bytes
335 sending branchmap command
337 sending branchmap command
336 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
338 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
337 devel-peer-request: Vary X-HgProto-1
339 devel-peer-request: Vary X-HgProto-1
338 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
340 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
339 devel-peer-request: finished in *.???? seconds (200) (glob)
341 devel-peer-request: finished in *.???? seconds (200) (glob)
340 preparing listkeys for "bookmarks"
342 preparing listkeys for "bookmarks"
341 sending listkeys command
343 sending listkeys command
342 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
344 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
343 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
345 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
344 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
346 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
345 devel-peer-request: 19 bytes of commands arguments in headers
347 devel-peer-request: 19 bytes of commands arguments in headers
346 devel-peer-request: finished in *.???? seconds (200) (glob)
348 devel-peer-request: finished in *.???? seconds (200) (glob)
347 received listkey for "bookmarks": 0 bytes
349 received listkey for "bookmarks": 0 bytes
348 1 changesets found
350 1 changesets found
349 list of changesets:
351 list of changesets:
350 7f4e523d01f2cc3765ac8934da3d14db775ff872
352 7f4e523d01f2cc3765ac8934da3d14db775ff872
351 bundle2-output-bundle: "HG20", 5 parts total
353 bundle2-output-bundle: "HG20", 5 parts total
352 bundle2-output-part: "replycaps" 207 bytes payload
354 bundle2-output-part: "replycaps" 207 bytes payload
353 bundle2-output-part: "check:phases" 24 bytes payload
355 bundle2-output-part: "check:phases" 24 bytes payload
354 bundle2-output-part: "check:updated-heads" streamed payload
356 bundle2-output-part: "check:updated-heads" streamed payload
355 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
357 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
356 bundle2-output-part: "phase-heads" 24 bytes payload
358 bundle2-output-part: "phase-heads" 24 bytes payload
357 sending unbundle command
359 sending unbundle command
358 sending 1023 bytes
360 sending 1023 bytes
359 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
361 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
360 devel-peer-request: Content-length 1023
362 devel-peer-request: Content-length 1023
361 devel-peer-request: Content-type application/mercurial-0.1
363 devel-peer-request: Content-type application/mercurial-0.1
362 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
364 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
363 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
365 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
364 devel-peer-request: 16 bytes of commands arguments in headers
366 devel-peer-request: 16 bytes of commands arguments in headers
365 devel-peer-request: 1023 bytes of data
367 devel-peer-request: 1023 bytes of data
366 devel-peer-request: finished in *.???? seconds (200) (glob)
368 devel-peer-request: finished in *.???? seconds (200) (glob)
367 bundle2-input-bundle: no-transaction
369 bundle2-input-bundle: no-transaction
368 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
370 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
369 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
371 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
370 bundle2-input-part: total payload size 55
372 bundle2-input-part: total payload size 55
371 remote: adding changesets
373 remote: adding changesets
372 remote: adding manifests
374 remote: adding manifests
373 remote: adding file changes
375 remote: adding file changes
374 bundle2-input-part: "output" (advisory) supported
376 bundle2-input-part: "output" (advisory) supported
375 bundle2-input-part: total payload size 45
377 bundle2-input-part: total payload size 45
376 remote: added 1 changesets with 1 changes to 1 files
378 remote: added 1 changesets with 1 changes to 1 files
377 bundle2-input-bundle: 3 parts total
379 bundle2-input-bundle: 3 parts total
378 preparing listkeys for "phases"
380 preparing listkeys for "phases"
379 sending listkeys command
381 sending listkeys command
380 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
382 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
381 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
383 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
382 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
384 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
383 devel-peer-request: 16 bytes of commands arguments in headers
385 devel-peer-request: 16 bytes of commands arguments in headers
384 devel-peer-request: finished in *.???? seconds (200) (glob)
386 devel-peer-request: finished in *.???? seconds (200) (glob)
385 received listkey for "phases": 15 bytes
387 received listkey for "phases": 15 bytes
386 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
388 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
387 $ hg rollback -q
389 $ hg rollback -q
388
390
389 $ sed 's/.*] "/"/' < ../access.log
391 $ sed 's/.*] "/"/' < ../access.log
390 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 "GET /?cmd=capabilities HTTP/1.1" 401 -
391 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 "GET /?cmd=capabilities HTTP/1.1" 401 -
397 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 "GET /?cmd=capabilities HTTP/1.1" 200 -
398 "GET /?cmd=capabilities HTTP/1.1" 200 -
397 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
398 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
401 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 "GET /?cmd=capabilities HTTP/1.1" 401 -
402 "GET /?cmd=capabilities HTTP/1.1" 401 -
401 "GET /?cmd=capabilities HTTP/1.1" 200 -
403 "GET /?cmd=capabilities HTTP/1.1" 200 -
402 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
403 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
406 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 "GET /?cmd=capabilities HTTP/1.1" 401 -
407 "GET /?cmd=capabilities HTTP/1.1" 401 -
406 "GET /?cmd=capabilities HTTP/1.1" 200 -
408 "GET /?cmd=capabilities HTTP/1.1" 200 -
407 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
408 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
411 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 "GET /?cmd=capabilities HTTP/1.1" 401 -
412 "GET /?cmd=capabilities HTTP/1.1" 401 -
411 "GET /?cmd=capabilities HTTP/1.1" 200 -
413 "GET /?cmd=capabilities HTTP/1.1" 200 -
412 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
413 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
416 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 "GET /?cmd=capabilities HTTP/1.1" 401 -
417 "GET /?cmd=capabilities HTTP/1.1" 401 -
416 "GET /?cmd=capabilities HTTP/1.1" 200 -
418 "GET /?cmd=capabilities HTTP/1.1" 200 -
417 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
418 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
421 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
422 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
421 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
423 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
422 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
423 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
429 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
430 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
429 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
431 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
430 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
431 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
433 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
434 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
433 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
435 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
434 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
435 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
437 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 "GET /?cmd=capabilities HTTP/1.1" 401 -
437 "GET /?cmd=capabilities HTTP/1.1" 401 -
439 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 "GET /?cmd=capabilities HTTP/1.1" 403 -
440 "GET /?cmd=capabilities HTTP/1.1" 403 -
439 "GET /?cmd=capabilities HTTP/1.1" 401 -
441 "GET /?cmd=capabilities HTTP/1.1" 401 -
440 "GET /?cmd=capabilities HTTP/1.1" 200 -
442 "GET /?cmd=capabilities HTTP/1.1" 200 -
441 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
442 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
448 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
449 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
448 "GET /?cmd=capabilities HTTP/1.1" 401 -
450 "GET /?cmd=capabilities HTTP/1.1" 401 -
449 "GET /?cmd=capabilities HTTP/1.1" 200 -
451 "GET /?cmd=capabilities HTTP/1.1" 200 -
450 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
451 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
458 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457
459
458 $ cd ..
460 $ cd ..
459
461
460 clone of serve with repo in root and unserved subrepo (issue2970)
462 clone of serve with repo in root and unserved subrepo (issue2970)
461
463
462 $ hg --cwd test init sub
464 $ hg --cwd test init sub
463 $ echo empty > test/sub/empty
465 $ echo empty > test/sub/empty
464 $ hg --cwd test/sub add empty
466 $ hg --cwd test/sub add empty
465 $ hg --cwd test/sub commit -qm 'add empty'
467 $ hg --cwd test/sub commit -qm 'add empty'
466 $ hg --cwd test/sub tag -r 0 something
468 $ hg --cwd test/sub tag -r 0 something
467 $ echo sub = sub > test/.hgsub
469 $ echo sub = sub > test/.hgsub
468 $ hg --cwd test add .hgsub
470 $ hg --cwd test add .hgsub
469 $ hg --cwd test commit -qm 'add subrepo'
471 $ hg --cwd test commit -qm 'add subrepo'
470 $ hg clone http://localhost:$HGPORT noslash-clone
472 $ hg clone http://localhost:$HGPORT noslash-clone
471 requesting all changes
473 requesting all changes
472 adding changesets
474 adding changesets
473 adding manifests
475 adding manifests
474 adding file changes
476 adding file changes
475 added 3 changesets with 7 changes to 7 files
477 added 3 changesets with 7 changes to 7 files
476 new changesets 8b6053c928fe:56f9bc90cce6
478 new changesets 8b6053c928fe:56f9bc90cce6
477 updating to branch default
479 updating to branch default
478 cloning subrepo sub from http://localhost:$HGPORT/sub
480 cloning subrepo sub from http://localhost:$HGPORT/sub
479 abort: HTTP Error 404: Not Found
481 abort: HTTP Error 404: Not Found
480 [100]
482 [100]
481 $ hg clone http://localhost:$HGPORT/ slash-clone
483 $ hg clone http://localhost:$HGPORT/ slash-clone
482 requesting all changes
484 requesting all changes
483 adding changesets
485 adding changesets
484 adding manifests
486 adding manifests
485 adding file changes
487 adding file changes
486 added 3 changesets with 7 changes to 7 files
488 added 3 changesets with 7 changes to 7 files
487 new changesets 8b6053c928fe:56f9bc90cce6
489 new changesets 8b6053c928fe:56f9bc90cce6
488 updating to branch default
490 updating to branch default
489 cloning subrepo sub from http://localhost:$HGPORT/sub
491 cloning subrepo sub from http://localhost:$HGPORT/sub
490 abort: HTTP Error 404: Not Found
492 abort: HTTP Error 404: Not Found
491 [100]
493 [100]
492
494
493 check error log
495 check error log
494
496
495 $ cat error.log
497 $ cat error.log
496
498
497 $ cat errors2.log
499 $ cat errors2.log
498
500
499 check abort error reporting while pulling/cloning
501 check abort error reporting while pulling/cloning
500
502
501 $ $RUNTESTDIR/killdaemons.py
503 $ $RUNTESTDIR/killdaemons.py
502 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
504 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
503 $ cat hg3.pid >> $DAEMON_PIDS
505 $ cat hg3.pid >> $DAEMON_PIDS
504 $ hg clone http://localhost:$HGPORT/ abort-clone
506 $ hg clone http://localhost:$HGPORT/ abort-clone
505 requesting all changes
507 requesting all changes
506 remote: abort: this is an exercise
508 remote: abort: this is an exercise
507 abort: pull failed on remote
509 abort: pull failed on remote
508 [100]
510 [100]
509 $ cat error.log
511 $ cat error.log
510
512
511 disable pull-based clones
513 disable pull-based clones
512
514
513 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
515 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
514 $ cat hg4.pid >> $DAEMON_PIDS
516 $ cat hg4.pid >> $DAEMON_PIDS
515 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
517 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
516 requesting all changes
518 requesting all changes
517 remote: abort: server has pull-based clones disabled
519 remote: abort: server has pull-based clones disabled
518 abort: pull failed on remote
520 abort: pull failed on remote
519 (remove --pull if specified or upgrade Mercurial)
521 (remove --pull if specified or upgrade Mercurial)
520 [100]
522 [100]
521
523
522 #if no-reposimplestore
524 #if no-reposimplestore
523 ... but keep stream clones working
525 ... but keep stream clones working
524
526
525 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
527 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
526 streaming all changes
528 streaming all changes
527 * files to transfer, * of data (glob)
529 * files to transfer, * of data (glob)
528 transferred * in * seconds (*/sec) (glob)
530 transferred * in * seconds (*/sec) (glob)
529 $ cat error.log
531 $ cat error.log
530 #endif
532 #endif
531
533
532 ... and also keep partial clones and pulls working
534 ... and also keep partial clones and pulls working
533 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
535 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
534 adding changesets
536 adding changesets
535 adding manifests
537 adding manifests
536 adding file changes
538 adding file changes
537 added 1 changesets with 4 changes to 4 files
539 added 1 changesets with 4 changes to 4 files
538 new changesets 8b6053c928fe
540 new changesets 8b6053c928fe
539 updating to branch default
541 updating to branch default
540 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
542 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
541 $ hg pull -R test/partial/clone
543 $ hg pull -R test/partial/clone
542 pulling from http://localhost:$HGPORT1/
544 pulling from http://localhost:$HGPORT1/
543 searching for changes
545 searching for changes
544 adding changesets
546 adding changesets
545 adding manifests
547 adding manifests
546 adding file changes
548 adding file changes
547 added 2 changesets with 3 changes to 3 files
549 added 2 changesets with 3 changes to 3 files
548 new changesets 5fed3813f7f5:56f9bc90cce6
550 new changesets 5fed3813f7f5:56f9bc90cce6
549 (run 'hg update' to get a working copy)
551 (run 'hg update' to get a working copy)
550
552
551 $ hg clone -U -r 0 test/partial/clone test/another/clone
553 $ hg clone -U -r 0 test/partial/clone test/another/clone
552 adding changesets
554 adding changesets
553 adding manifests
555 adding manifests
554 adding file changes
556 adding file changes
555 added 1 changesets with 4 changes to 4 files
557 added 1 changesets with 4 changes to 4 files
556 new changesets 8b6053c928fe
558 new changesets 8b6053c928fe
557
559
558 corrupt cookies file should yield a warning
560 corrupt cookies file should yield a warning
559
561
560 $ cat > $TESTTMP/cookies.txt << EOF
562 $ cat > $TESTTMP/cookies.txt << EOF
561 > bad format
563 > bad format
562 > EOF
564 > EOF
563
565
564 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
566 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
565 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
567 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
566 56f9bc90cce6
568 56f9bc90cce6
567
569
568 $ killdaemons.py
570 $ killdaemons.py
569
571
570 Create dummy authentication handler that looks for cookies. It doesn't do anything
572 Create dummy authentication handler that looks for cookies. It doesn't do anything
571 useful. It just raises an HTTP 500 with details about the Cookie request header.
573 useful. It just raises an HTTP 500 with details about the Cookie request header.
572 We raise HTTP 500 because its message is printed in the abort message.
574 We raise HTTP 500 because its message is printed in the abort message.
573
575
574 $ cat > cookieauth.py << EOF
576 $ cat > cookieauth.py << EOF
575 > from mercurial import util
577 > from mercurial import util
576 > from mercurial.hgweb import common
578 > from mercurial.hgweb import common
577 > def perform_authentication(hgweb, req, op):
579 > def perform_authentication(hgweb, req, op):
578 > cookie = req.headers.get(b'Cookie')
580 > cookie = req.headers.get(b'Cookie')
579 > if not cookie:
581 > if not cookie:
580 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
582 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
581 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
583 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
582 > def extsetup(ui):
584 > def extsetup(ui):
583 > common.permhooks.insert(0, perform_authentication)
585 > common.permhooks.insert(0, perform_authentication)
584 > EOF
586 > EOF
585
587
586 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
588 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
587 $ cat pid > $DAEMON_PIDS
589 $ cat pid > $DAEMON_PIDS
588
590
589 Request without cookie sent should fail due to lack of cookie
591 Request without cookie sent should fail due to lack of cookie
590
592
591 $ hg id http://localhost:$HGPORT
593 $ hg id http://localhost:$HGPORT
592 abort: HTTP Error 500: no-cookie
594 abort: HTTP Error 500: no-cookie
593 [100]
595 [100]
594
596
595 Populate a cookies file
597 Populate a cookies file
596
598
597 $ cat > cookies.txt << EOF
599 $ cat > cookies.txt << EOF
598 > # HTTP Cookie File
600 > # HTTP Cookie File
599 > # Expiration is 2030-01-01 at midnight
601 > # Expiration is 2030-01-01 at midnight
600 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
602 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
601 > EOF
603 > EOF
602
604
603 Should not send a cookie for another domain
605 Should not send a cookie for another domain
604
606
605 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
607 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
606 abort: HTTP Error 500: no-cookie
608 abort: HTTP Error 500: no-cookie
607 [100]
609 [100]
608
610
609 Add a cookie entry for our test server and verify it is sent
611 Add a cookie entry for our test server and verify it is sent
610
612
611 $ cat >> cookies.txt << EOF
613 $ cat >> cookies.txt << EOF
612 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
614 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
613 > EOF
615 > EOF
614
616
615 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
617 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
616 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
618 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
617 [100]
619 [100]
General Comments 0
You need to be logged in to leave comments. Login now