##// END OF EJS Templates
stream-clone: filter possible missing requirements using all supported one...
marmoute -
r49522:6d2ddea0 stable
parent child Browse files
Show More
@@ -1,920 +1,920 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import errno
11 import errno
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import open
16 from .pycompat import open
17 from .interfaces import repository
17 from .interfaces import repository
18 from . import (
18 from . import (
19 bookmarks,
19 bookmarks,
20 cacheutil,
20 cacheutil,
21 error,
21 error,
22 narrowspec,
22 narrowspec,
23 phases,
23 phases,
24 pycompat,
24 pycompat,
25 requirements as requirementsmod,
25 requirements as requirementsmod,
26 scmutil,
26 scmutil,
27 store,
27 store,
28 util,
28 util,
29 )
29 )
30 from .utils import (
30 from .utils import (
31 stringutil,
31 stringutil,
32 )
32 )
33
33
34
34
35 def canperformstreamclone(pullop, bundle2=False):
35 def canperformstreamclone(pullop, bundle2=False):
36 """Whether it is possible to perform a streaming clone as part of pull.
36 """Whether it is possible to perform a streaming clone as part of pull.
37
37
38 ``bundle2`` will cause the function to consider stream clone through
38 ``bundle2`` will cause the function to consider stream clone through
39 bundle2 and only through bundle2.
39 bundle2 and only through bundle2.
40
40
41 Returns a tuple of (supported, requirements). ``supported`` is True if
41 Returns a tuple of (supported, requirements). ``supported`` is True if
42 streaming clone is supported and False otherwise. ``requirements`` is
42 streaming clone is supported and False otherwise. ``requirements`` is
43 a set of repo requirements from the remote, or ``None`` if stream clone
43 a set of repo requirements from the remote, or ``None`` if stream clone
44 isn't supported.
44 isn't supported.
45 """
45 """
46 repo = pullop.repo
46 repo = pullop.repo
47 remote = pullop.remote
47 remote = pullop.remote
48
48
49 bundle2supported = False
49 bundle2supported = False
50 if pullop.canusebundle2:
50 if pullop.canusebundle2:
51 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
51 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
52 bundle2supported = True
52 bundle2supported = True
53 # else
53 # else
54 # Server doesn't support bundle2 stream clone or doesn't support
54 # Server doesn't support bundle2 stream clone or doesn't support
55 # the versions we support. Fall back and possibly allow legacy.
55 # the versions we support. Fall back and possibly allow legacy.
56
56
57 # Ensures legacy code path uses available bundle2.
57 # Ensures legacy code path uses available bundle2.
58 if bundle2supported and not bundle2:
58 if bundle2supported and not bundle2:
59 return False, None
59 return False, None
60 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
60 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
61 elif bundle2 and not bundle2supported:
61 elif bundle2 and not bundle2supported:
62 return False, None
62 return False, None
63
63
64 # Streaming clone only works on empty repositories.
64 # Streaming clone only works on empty repositories.
65 if len(repo):
65 if len(repo):
66 return False, None
66 return False, None
67
67
68 # Streaming clone only works if all data is being requested.
68 # Streaming clone only works if all data is being requested.
69 if pullop.heads:
69 if pullop.heads:
70 return False, None
70 return False, None
71
71
72 streamrequested = pullop.streamclonerequested
72 streamrequested = pullop.streamclonerequested
73
73
74 # If we don't have a preference, let the server decide for us. This
74 # If we don't have a preference, let the server decide for us. This
75 # likely only comes into play in LANs.
75 # likely only comes into play in LANs.
76 if streamrequested is None:
76 if streamrequested is None:
77 # The server can advertise whether to prefer streaming clone.
77 # The server can advertise whether to prefer streaming clone.
78 streamrequested = remote.capable(b'stream-preferred')
78 streamrequested = remote.capable(b'stream-preferred')
79
79
80 if not streamrequested:
80 if not streamrequested:
81 return False, None
81 return False, None
82
82
83 # In order for stream clone to work, the client has to support all the
83 # In order for stream clone to work, the client has to support all the
84 # requirements advertised by the server.
84 # requirements advertised by the server.
85 #
85 #
86 # The server advertises its requirements via the "stream" and "streamreqs"
86 # The server advertises its requirements via the "stream" and "streamreqs"
87 # capability. "stream" (a value-less capability) is advertised if and only
87 # capability. "stream" (a value-less capability) is advertised if and only
88 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
88 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
89 # is advertised and contains a comma-delimited list of requirements.
89 # is advertised and contains a comma-delimited list of requirements.
90 requirements = set()
90 requirements = set()
91 if remote.capable(b'stream'):
91 if remote.capable(b'stream'):
92 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
92 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
93 else:
93 else:
94 streamreqs = remote.capable(b'streamreqs')
94 streamreqs = remote.capable(b'streamreqs')
95 # This is weird and shouldn't happen with modern servers.
95 # This is weird and shouldn't happen with modern servers.
96 if not streamreqs:
96 if not streamreqs:
97 pullop.repo.ui.warn(
97 pullop.repo.ui.warn(
98 _(
98 _(
99 b'warning: stream clone requested but server has them '
99 b'warning: stream clone requested but server has them '
100 b'disabled\n'
100 b'disabled\n'
101 )
101 )
102 )
102 )
103 return False, None
103 return False, None
104
104
105 streamreqs = set(streamreqs.split(b','))
105 streamreqs = set(streamreqs.split(b','))
106 # Server requires something we don't support. Bail.
106 # Server requires something we don't support. Bail.
107 missingreqs = streamreqs - repo.supportedformats
107 missingreqs = streamreqs - repo.supported
108 if missingreqs:
108 if missingreqs:
109 pullop.repo.ui.warn(
109 pullop.repo.ui.warn(
110 _(
110 _(
111 b'warning: stream clone requested but client is missing '
111 b'warning: stream clone requested but client is missing '
112 b'requirements: %s\n'
112 b'requirements: %s\n'
113 )
113 )
114 % b', '.join(sorted(missingreqs))
114 % b', '.join(sorted(missingreqs))
115 )
115 )
116 pullop.repo.ui.warn(
116 pullop.repo.ui.warn(
117 _(
117 _(
118 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
118 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
119 b'for more information)\n'
119 b'for more information)\n'
120 )
120 )
121 )
121 )
122 return False, None
122 return False, None
123 requirements = streamreqs
123 requirements = streamreqs
124
124
125 return True, requirements
125 return True, requirements
126
126
127
127
128 def maybeperformlegacystreamclone(pullop):
128 def maybeperformlegacystreamclone(pullop):
129 """Possibly perform a legacy stream clone operation.
129 """Possibly perform a legacy stream clone operation.
130
130
131 Legacy stream clones are performed as part of pull but before all other
131 Legacy stream clones are performed as part of pull but before all other
132 operations.
132 operations.
133
133
134 A legacy stream clone will not be performed if a bundle2 stream clone is
134 A legacy stream clone will not be performed if a bundle2 stream clone is
135 supported.
135 supported.
136 """
136 """
137 from . import localrepo
137 from . import localrepo
138
138
139 supported, requirements = canperformstreamclone(pullop)
139 supported, requirements = canperformstreamclone(pullop)
140
140
141 if not supported:
141 if not supported:
142 return
142 return
143
143
144 repo = pullop.repo
144 repo = pullop.repo
145 remote = pullop.remote
145 remote = pullop.remote
146
146
147 # Save remote branchmap. We will use it later to speed up branchcache
147 # Save remote branchmap. We will use it later to speed up branchcache
148 # creation.
148 # creation.
149 rbranchmap = None
149 rbranchmap = None
150 if remote.capable(b'branchmap'):
150 if remote.capable(b'branchmap'):
151 with remote.commandexecutor() as e:
151 with remote.commandexecutor() as e:
152 rbranchmap = e.callcommand(b'branchmap', {}).result()
152 rbranchmap = e.callcommand(b'branchmap', {}).result()
153
153
154 repo.ui.status(_(b'streaming all changes\n'))
154 repo.ui.status(_(b'streaming all changes\n'))
155
155
156 with remote.commandexecutor() as e:
156 with remote.commandexecutor() as e:
157 fp = e.callcommand(b'stream_out', {}).result()
157 fp = e.callcommand(b'stream_out', {}).result()
158
158
159 # TODO strictly speaking, this code should all be inside the context
159 # TODO strictly speaking, this code should all be inside the context
160 # manager because the context manager is supposed to ensure all wire state
160 # manager because the context manager is supposed to ensure all wire state
161 # is flushed when exiting. But the legacy peers don't do this, so it
161 # is flushed when exiting. But the legacy peers don't do this, so it
162 # doesn't matter.
162 # doesn't matter.
163 l = fp.readline()
163 l = fp.readline()
164 try:
164 try:
165 resp = int(l)
165 resp = int(l)
166 except ValueError:
166 except ValueError:
167 raise error.ResponseError(
167 raise error.ResponseError(
168 _(b'unexpected response from remote server:'), l
168 _(b'unexpected response from remote server:'), l
169 )
169 )
170 if resp == 1:
170 if resp == 1:
171 raise error.Abort(_(b'operation forbidden by server'))
171 raise error.Abort(_(b'operation forbidden by server'))
172 elif resp == 2:
172 elif resp == 2:
173 raise error.Abort(_(b'locking the remote repository failed'))
173 raise error.Abort(_(b'locking the remote repository failed'))
174 elif resp != 0:
174 elif resp != 0:
175 raise error.Abort(_(b'the server sent an unknown error code'))
175 raise error.Abort(_(b'the server sent an unknown error code'))
176
176
177 l = fp.readline()
177 l = fp.readline()
178 try:
178 try:
179 filecount, bytecount = map(int, l.split(b' ', 1))
179 filecount, bytecount = map(int, l.split(b' ', 1))
180 except (ValueError, TypeError):
180 except (ValueError, TypeError):
181 raise error.ResponseError(
181 raise error.ResponseError(
182 _(b'unexpected response from remote server:'), l
182 _(b'unexpected response from remote server:'), l
183 )
183 )
184
184
185 with repo.lock():
185 with repo.lock():
186 consumev1(repo, fp, filecount, bytecount)
186 consumev1(repo, fp, filecount, bytecount)
187
187
188 # new requirements = old non-format requirements +
188 # new requirements = old non-format requirements +
189 # new format-related remote requirements
189 # new format-related remote requirements
190 # requirements from the streamed-in repository
190 # requirements from the streamed-in repository
191 repo.requirements = requirements | (
191 repo.requirements = requirements | (
192 repo.requirements - repo.supportedformats
192 repo.requirements - repo.supportedformats
193 )
193 )
194 repo.svfs.options = localrepo.resolvestorevfsoptions(
194 repo.svfs.options = localrepo.resolvestorevfsoptions(
195 repo.ui, repo.requirements, repo.features
195 repo.ui, repo.requirements, repo.features
196 )
196 )
197 scmutil.writereporequirements(repo)
197 scmutil.writereporequirements(repo)
198
198
199 if rbranchmap:
199 if rbranchmap:
200 repo._branchcaches.replace(repo, rbranchmap)
200 repo._branchcaches.replace(repo, rbranchmap)
201
201
202 repo.invalidate()
202 repo.invalidate()
203
203
204
204
205 def allowservergeneration(repo):
205 def allowservergeneration(repo):
206 """Whether streaming clones are allowed from the server."""
206 """Whether streaming clones are allowed from the server."""
207 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
207 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
208 return False
208 return False
209
209
210 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
210 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
211 return False
211 return False
212
212
213 # The way stream clone works makes it impossible to hide secret changesets.
213 # The way stream clone works makes it impossible to hide secret changesets.
214 # So don't allow this by default.
214 # So don't allow this by default.
215 secret = phases.hassecret(repo)
215 secret = phases.hassecret(repo)
216 if secret:
216 if secret:
217 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
217 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
218
218
219 return True
219 return True
220
220
221
221
222 # This is it's own function so extensions can override it.
222 # This is it's own function so extensions can override it.
223 def _walkstreamfiles(repo, matcher=None):
223 def _walkstreamfiles(repo, matcher=None):
224 return repo.store.walk(matcher)
224 return repo.store.walk(matcher)
225
225
226
226
227 def generatev1(repo):
227 def generatev1(repo):
228 """Emit content for version 1 of a streaming clone.
228 """Emit content for version 1 of a streaming clone.
229
229
230 This returns a 3-tuple of (file count, byte size, data iterator).
230 This returns a 3-tuple of (file count, byte size, data iterator).
231
231
232 The data iterator consists of N entries for each file being transferred.
232 The data iterator consists of N entries for each file being transferred.
233 Each file entry starts as a line with the file name and integer size
233 Each file entry starts as a line with the file name and integer size
234 delimited by a null byte.
234 delimited by a null byte.
235
235
236 The raw file data follows. Following the raw file data is the next file
236 The raw file data follows. Following the raw file data is the next file
237 entry, or EOF.
237 entry, or EOF.
238
238
239 When used on the wire protocol, an additional line indicating protocol
239 When used on the wire protocol, an additional line indicating protocol
240 success will be prepended to the stream. This function is not responsible
240 success will be prepended to the stream. This function is not responsible
241 for adding it.
241 for adding it.
242
242
243 This function will obtain a repository lock to ensure a consistent view of
243 This function will obtain a repository lock to ensure a consistent view of
244 the store is captured. It therefore may raise LockError.
244 the store is captured. It therefore may raise LockError.
245 """
245 """
246 entries = []
246 entries = []
247 total_bytes = 0
247 total_bytes = 0
248 # Get consistent snapshot of repo, lock during scan.
248 # Get consistent snapshot of repo, lock during scan.
249 with repo.lock():
249 with repo.lock():
250 repo.ui.debug(b'scanning\n')
250 repo.ui.debug(b'scanning\n')
251 for file_type, name, size in _walkstreamfiles(repo):
251 for file_type, name, size in _walkstreamfiles(repo):
252 if size:
252 if size:
253 entries.append((name, size))
253 entries.append((name, size))
254 total_bytes += size
254 total_bytes += size
255 _test_sync_point_walk_1(repo)
255 _test_sync_point_walk_1(repo)
256 _test_sync_point_walk_2(repo)
256 _test_sync_point_walk_2(repo)
257
257
258 repo.ui.debug(
258 repo.ui.debug(
259 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
259 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
260 )
260 )
261
261
262 svfs = repo.svfs
262 svfs = repo.svfs
263 debugflag = repo.ui.debugflag
263 debugflag = repo.ui.debugflag
264
264
265 def emitrevlogdata():
265 def emitrevlogdata():
266 for name, size in entries:
266 for name, size in entries:
267 if debugflag:
267 if debugflag:
268 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
268 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
269 # partially encode name over the wire for backwards compat
269 # partially encode name over the wire for backwards compat
270 yield b'%s\0%d\n' % (store.encodedir(name), size)
270 yield b'%s\0%d\n' % (store.encodedir(name), size)
271 # auditing at this stage is both pointless (paths are already
271 # auditing at this stage is both pointless (paths are already
272 # trusted by the local repo) and expensive
272 # trusted by the local repo) and expensive
273 with svfs(name, b'rb', auditpath=False) as fp:
273 with svfs(name, b'rb', auditpath=False) as fp:
274 if size <= 65536:
274 if size <= 65536:
275 yield fp.read(size)
275 yield fp.read(size)
276 else:
276 else:
277 for chunk in util.filechunkiter(fp, limit=size):
277 for chunk in util.filechunkiter(fp, limit=size):
278 yield chunk
278 yield chunk
279
279
280 return len(entries), total_bytes, emitrevlogdata()
280 return len(entries), total_bytes, emitrevlogdata()
281
281
282
282
283 def generatev1wireproto(repo):
283 def generatev1wireproto(repo):
284 """Emit content for version 1 of streaming clone suitable for the wire.
284 """Emit content for version 1 of streaming clone suitable for the wire.
285
285
286 This is the data output from ``generatev1()`` with 2 header lines. The
286 This is the data output from ``generatev1()`` with 2 header lines. The
287 first line indicates overall success. The 2nd contains the file count and
287 first line indicates overall success. The 2nd contains the file count and
288 byte size of payload.
288 byte size of payload.
289
289
290 The success line contains "0" for success, "1" for stream generation not
290 The success line contains "0" for success, "1" for stream generation not
291 allowed, and "2" for error locking the repository (possibly indicating
291 allowed, and "2" for error locking the repository (possibly indicating
292 a permissions error for the server process).
292 a permissions error for the server process).
293 """
293 """
294 if not allowservergeneration(repo):
294 if not allowservergeneration(repo):
295 yield b'1\n'
295 yield b'1\n'
296 return
296 return
297
297
298 try:
298 try:
299 filecount, bytecount, it = generatev1(repo)
299 filecount, bytecount, it = generatev1(repo)
300 except error.LockError:
300 except error.LockError:
301 yield b'2\n'
301 yield b'2\n'
302 return
302 return
303
303
304 # Indicates successful response.
304 # Indicates successful response.
305 yield b'0\n'
305 yield b'0\n'
306 yield b'%d %d\n' % (filecount, bytecount)
306 yield b'%d %d\n' % (filecount, bytecount)
307 for chunk in it:
307 for chunk in it:
308 yield chunk
308 yield chunk
309
309
310
310
311 def generatebundlev1(repo, compression=b'UN'):
311 def generatebundlev1(repo, compression=b'UN'):
312 """Emit content for version 1 of a stream clone bundle.
312 """Emit content for version 1 of a stream clone bundle.
313
313
314 The first 4 bytes of the output ("HGS1") denote this as stream clone
314 The first 4 bytes of the output ("HGS1") denote this as stream clone
315 bundle version 1.
315 bundle version 1.
316
316
317 The next 2 bytes indicate the compression type. Only "UN" is currently
317 The next 2 bytes indicate the compression type. Only "UN" is currently
318 supported.
318 supported.
319
319
320 The next 16 bytes are two 64-bit big endian unsigned integers indicating
320 The next 16 bytes are two 64-bit big endian unsigned integers indicating
321 file count and byte count, respectively.
321 file count and byte count, respectively.
322
322
323 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
323 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
324 of the requirements string, including a trailing \0. The following N bytes
324 of the requirements string, including a trailing \0. The following N bytes
325 are the requirements string, which is ASCII containing a comma-delimited
325 are the requirements string, which is ASCII containing a comma-delimited
326 list of repo requirements that are needed to support the data.
326 list of repo requirements that are needed to support the data.
327
327
328 The remaining content is the output of ``generatev1()`` (which may be
328 The remaining content is the output of ``generatev1()`` (which may be
329 compressed in the future).
329 compressed in the future).
330
330
331 Returns a tuple of (requirements, data generator).
331 Returns a tuple of (requirements, data generator).
332 """
332 """
333 if compression != b'UN':
333 if compression != b'UN':
334 raise ValueError(b'we do not support the compression argument yet')
334 raise ValueError(b'we do not support the compression argument yet')
335
335
336 requirements = repo.requirements & repo.supportedformats
336 requirements = repo.requirements & repo.supportedformats
337 requires = b','.join(sorted(requirements))
337 requires = b','.join(sorted(requirements))
338
338
339 def gen():
339 def gen():
340 yield b'HGS1'
340 yield b'HGS1'
341 yield compression
341 yield compression
342
342
343 filecount, bytecount, it = generatev1(repo)
343 filecount, bytecount, it = generatev1(repo)
344 repo.ui.status(
344 repo.ui.status(
345 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
345 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
346 )
346 )
347
347
348 yield struct.pack(b'>QQ', filecount, bytecount)
348 yield struct.pack(b'>QQ', filecount, bytecount)
349 yield struct.pack(b'>H', len(requires) + 1)
349 yield struct.pack(b'>H', len(requires) + 1)
350 yield requires + b'\0'
350 yield requires + b'\0'
351
351
352 # This is where we'll add compression in the future.
352 # This is where we'll add compression in the future.
353 assert compression == b'UN'
353 assert compression == b'UN'
354
354
355 progress = repo.ui.makeprogress(
355 progress = repo.ui.makeprogress(
356 _(b'bundle'), total=bytecount, unit=_(b'bytes')
356 _(b'bundle'), total=bytecount, unit=_(b'bytes')
357 )
357 )
358 progress.update(0)
358 progress.update(0)
359
359
360 for chunk in it:
360 for chunk in it:
361 progress.increment(step=len(chunk))
361 progress.increment(step=len(chunk))
362 yield chunk
362 yield chunk
363
363
364 progress.complete()
364 progress.complete()
365
365
366 return requirements, gen()
366 return requirements, gen()
367
367
368
368
369 def consumev1(repo, fp, filecount, bytecount):
369 def consumev1(repo, fp, filecount, bytecount):
370 """Apply the contents from version 1 of a streaming clone file handle.
370 """Apply the contents from version 1 of a streaming clone file handle.
371
371
372 This takes the output from "stream_out" and applies it to the specified
372 This takes the output from "stream_out" and applies it to the specified
373 repository.
373 repository.
374
374
375 Like "stream_out," the status line added by the wire protocol is not
375 Like "stream_out," the status line added by the wire protocol is not
376 handled by this function.
376 handled by this function.
377 """
377 """
378 with repo.lock():
378 with repo.lock():
379 repo.ui.status(
379 repo.ui.status(
380 _(b'%d files to transfer, %s of data\n')
380 _(b'%d files to transfer, %s of data\n')
381 % (filecount, util.bytecount(bytecount))
381 % (filecount, util.bytecount(bytecount))
382 )
382 )
383 progress = repo.ui.makeprogress(
383 progress = repo.ui.makeprogress(
384 _(b'clone'), total=bytecount, unit=_(b'bytes')
384 _(b'clone'), total=bytecount, unit=_(b'bytes')
385 )
385 )
386 progress.update(0)
386 progress.update(0)
387 start = util.timer()
387 start = util.timer()
388
388
389 # TODO: get rid of (potential) inconsistency
389 # TODO: get rid of (potential) inconsistency
390 #
390 #
391 # If transaction is started and any @filecache property is
391 # If transaction is started and any @filecache property is
392 # changed at this point, it causes inconsistency between
392 # changed at this point, it causes inconsistency between
393 # in-memory cached property and streamclone-ed file on the
393 # in-memory cached property and streamclone-ed file on the
394 # disk. Nested transaction prevents transaction scope "clone"
394 # disk. Nested transaction prevents transaction scope "clone"
395 # below from writing in-memory changes out at the end of it,
395 # below from writing in-memory changes out at the end of it,
396 # even though in-memory changes are discarded at the end of it
396 # even though in-memory changes are discarded at the end of it
397 # regardless of transaction nesting.
397 # regardless of transaction nesting.
398 #
398 #
399 # But transaction nesting can't be simply prohibited, because
399 # But transaction nesting can't be simply prohibited, because
400 # nesting occurs also in ordinary case (e.g. enabling
400 # nesting occurs also in ordinary case (e.g. enabling
401 # clonebundles).
401 # clonebundles).
402
402
403 with repo.transaction(b'clone'):
403 with repo.transaction(b'clone'):
404 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
404 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
405 for i in pycompat.xrange(filecount):
405 for i in pycompat.xrange(filecount):
406 # XXX doesn't support '\n' or '\r' in filenames
406 # XXX doesn't support '\n' or '\r' in filenames
407 l = fp.readline()
407 l = fp.readline()
408 try:
408 try:
409 name, size = l.split(b'\0', 1)
409 name, size = l.split(b'\0', 1)
410 size = int(size)
410 size = int(size)
411 except (ValueError, TypeError):
411 except (ValueError, TypeError):
412 raise error.ResponseError(
412 raise error.ResponseError(
413 _(b'unexpected response from remote server:'), l
413 _(b'unexpected response from remote server:'), l
414 )
414 )
415 if repo.ui.debugflag:
415 if repo.ui.debugflag:
416 repo.ui.debug(
416 repo.ui.debug(
417 b'adding %s (%s)\n' % (name, util.bytecount(size))
417 b'adding %s (%s)\n' % (name, util.bytecount(size))
418 )
418 )
419 # for backwards compat, name was partially encoded
419 # for backwards compat, name was partially encoded
420 path = store.decodedir(name)
420 path = store.decodedir(name)
421 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
421 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
422 for chunk in util.filechunkiter(fp, limit=size):
422 for chunk in util.filechunkiter(fp, limit=size):
423 progress.increment(step=len(chunk))
423 progress.increment(step=len(chunk))
424 ofp.write(chunk)
424 ofp.write(chunk)
425
425
426 # force @filecache properties to be reloaded from
426 # force @filecache properties to be reloaded from
427 # streamclone-ed file at next access
427 # streamclone-ed file at next access
428 repo.invalidate(clearfilecache=True)
428 repo.invalidate(clearfilecache=True)
429
429
430 elapsed = util.timer() - start
430 elapsed = util.timer() - start
431 if elapsed <= 0:
431 if elapsed <= 0:
432 elapsed = 0.001
432 elapsed = 0.001
433 progress.complete()
433 progress.complete()
434 repo.ui.status(
434 repo.ui.status(
435 _(b'transferred %s in %.1f seconds (%s/sec)\n')
435 _(b'transferred %s in %.1f seconds (%s/sec)\n')
436 % (
436 % (
437 util.bytecount(bytecount),
437 util.bytecount(bytecount),
438 elapsed,
438 elapsed,
439 util.bytecount(bytecount / elapsed),
439 util.bytecount(bytecount / elapsed),
440 )
440 )
441 )
441 )
442
442
443
443
444 def readbundle1header(fp):
444 def readbundle1header(fp):
445 compression = fp.read(2)
445 compression = fp.read(2)
446 if compression != b'UN':
446 if compression != b'UN':
447 raise error.Abort(
447 raise error.Abort(
448 _(
448 _(
449 b'only uncompressed stream clone bundles are '
449 b'only uncompressed stream clone bundles are '
450 b'supported; got %s'
450 b'supported; got %s'
451 )
451 )
452 % compression
452 % compression
453 )
453 )
454
454
455 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
455 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
456 requireslen = struct.unpack(b'>H', fp.read(2))[0]
456 requireslen = struct.unpack(b'>H', fp.read(2))[0]
457 requires = fp.read(requireslen)
457 requires = fp.read(requireslen)
458
458
459 if not requires.endswith(b'\0'):
459 if not requires.endswith(b'\0'):
460 raise error.Abort(
460 raise error.Abort(
461 _(
461 _(
462 b'malformed stream clone bundle: '
462 b'malformed stream clone bundle: '
463 b'requirements not properly encoded'
463 b'requirements not properly encoded'
464 )
464 )
465 )
465 )
466
466
467 requirements = set(requires.rstrip(b'\0').split(b','))
467 requirements = set(requires.rstrip(b'\0').split(b','))
468
468
469 return filecount, bytecount, requirements
469 return filecount, bytecount, requirements
470
470
471
471
472 def applybundlev1(repo, fp):
472 def applybundlev1(repo, fp):
473 """Apply the content from a stream clone bundle version 1.
473 """Apply the content from a stream clone bundle version 1.
474
474
475 We assume the 4 byte header has been read and validated and the file handle
475 We assume the 4 byte header has been read and validated and the file handle
476 is at the 2 byte compression identifier.
476 is at the 2 byte compression identifier.
477 """
477 """
478 if len(repo):
478 if len(repo):
479 raise error.Abort(
479 raise error.Abort(
480 _(b'cannot apply stream clone bundle on non-empty repo')
480 _(b'cannot apply stream clone bundle on non-empty repo')
481 )
481 )
482
482
483 filecount, bytecount, requirements = readbundle1header(fp)
483 filecount, bytecount, requirements = readbundle1header(fp)
484 missingreqs = requirements - repo.supportedformats
484 missingreqs = requirements - repo.supported
485 if missingreqs:
485 if missingreqs:
486 raise error.Abort(
486 raise error.Abort(
487 _(b'unable to apply stream clone: unsupported format: %s')
487 _(b'unable to apply stream clone: unsupported format: %s')
488 % b', '.join(sorted(missingreqs))
488 % b', '.join(sorted(missingreqs))
489 )
489 )
490
490
491 consumev1(repo, fp, filecount, bytecount)
491 consumev1(repo, fp, filecount, bytecount)
492
492
493
493
494 class streamcloneapplier(object):
494 class streamcloneapplier(object):
495 """Class to manage applying streaming clone bundles.
495 """Class to manage applying streaming clone bundles.
496
496
497 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
497 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
498 readers to perform bundle type-specific functionality.
498 readers to perform bundle type-specific functionality.
499 """
499 """
500
500
501 def __init__(self, fh):
501 def __init__(self, fh):
502 self._fh = fh
502 self._fh = fh
503
503
504 def apply(self, repo):
504 def apply(self, repo):
505 return applybundlev1(repo, self._fh)
505 return applybundlev1(repo, self._fh)
506
506
507
507
508 # type of file to stream
508 # type of file to stream
509 _fileappend = 0 # append only file
509 _fileappend = 0 # append only file
510 _filefull = 1 # full snapshot file
510 _filefull = 1 # full snapshot file
511
511
512 # Source of the file
512 # Source of the file
513 _srcstore = b's' # store (svfs)
513 _srcstore = b's' # store (svfs)
514 _srccache = b'c' # cache (cache)
514 _srccache = b'c' # cache (cache)
515
515
516 # This is it's own function so extensions can override it.
516 # This is it's own function so extensions can override it.
517 def _walkstreamfullstorefiles(repo):
517 def _walkstreamfullstorefiles(repo):
518 """list snapshot file from the store"""
518 """list snapshot file from the store"""
519 fnames = []
519 fnames = []
520 if not repo.publishing():
520 if not repo.publishing():
521 fnames.append(b'phaseroots')
521 fnames.append(b'phaseroots')
522 return fnames
522 return fnames
523
523
524
524
525 def _filterfull(entry, copy, vfsmap):
525 def _filterfull(entry, copy, vfsmap):
526 """actually copy the snapshot files"""
526 """actually copy the snapshot files"""
527 src, name, ftype, data = entry
527 src, name, ftype, data = entry
528 if ftype != _filefull:
528 if ftype != _filefull:
529 return entry
529 return entry
530 return (src, name, ftype, copy(vfsmap[src].join(name)))
530 return (src, name, ftype, copy(vfsmap[src].join(name)))
531
531
532
532
533 @contextlib.contextmanager
533 @contextlib.contextmanager
534 def maketempcopies():
534 def maketempcopies():
535 """return a function to temporary copy file"""
535 """return a function to temporary copy file"""
536 files = []
536 files = []
537 try:
537 try:
538
538
539 def copy(src):
539 def copy(src):
540 fd, dst = pycompat.mkstemp()
540 fd, dst = pycompat.mkstemp()
541 os.close(fd)
541 os.close(fd)
542 files.append(dst)
542 files.append(dst)
543 util.copyfiles(src, dst, hardlink=True)
543 util.copyfiles(src, dst, hardlink=True)
544 return dst
544 return dst
545
545
546 yield copy
546 yield copy
547 finally:
547 finally:
548 for tmp in files:
548 for tmp in files:
549 util.tryunlink(tmp)
549 util.tryunlink(tmp)
550
550
551
551
552 def _makemap(repo):
552 def _makemap(repo):
553 """make a (src -> vfs) map for the repo"""
553 """make a (src -> vfs) map for the repo"""
554 vfsmap = {
554 vfsmap = {
555 _srcstore: repo.svfs,
555 _srcstore: repo.svfs,
556 _srccache: repo.cachevfs,
556 _srccache: repo.cachevfs,
557 }
557 }
558 # we keep repo.vfs out of the on purpose, ther are too many danger there
558 # we keep repo.vfs out of the on purpose, ther are too many danger there
559 # (eg: .hg/hgrc)
559 # (eg: .hg/hgrc)
560 assert repo.vfs not in vfsmap.values()
560 assert repo.vfs not in vfsmap.values()
561
561
562 return vfsmap
562 return vfsmap
563
563
564
564
565 def _emit2(repo, entries, totalfilesize):
565 def _emit2(repo, entries, totalfilesize):
566 """actually emit the stream bundle"""
566 """actually emit the stream bundle"""
567 vfsmap = _makemap(repo)
567 vfsmap = _makemap(repo)
568 # we keep repo.vfs out of the on purpose, ther are too many danger there
568 # we keep repo.vfs out of the on purpose, ther are too many danger there
569 # (eg: .hg/hgrc),
569 # (eg: .hg/hgrc),
570 #
570 #
571 # this assert is duplicated (from _makemap) as author might think this is
571 # this assert is duplicated (from _makemap) as author might think this is
572 # fine, while this is really not fine.
572 # fine, while this is really not fine.
573 if repo.vfs in vfsmap.values():
573 if repo.vfs in vfsmap.values():
574 raise error.ProgrammingError(
574 raise error.ProgrammingError(
575 b'repo.vfs must not be added to vfsmap for security reasons'
575 b'repo.vfs must not be added to vfsmap for security reasons'
576 )
576 )
577
577
578 progress = repo.ui.makeprogress(
578 progress = repo.ui.makeprogress(
579 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
579 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
580 )
580 )
581 progress.update(0)
581 progress.update(0)
582 with maketempcopies() as copy, progress:
582 with maketempcopies() as copy, progress:
583 # copy is delayed until we are in the try
583 # copy is delayed until we are in the try
584 entries = [_filterfull(e, copy, vfsmap) for e in entries]
584 entries = [_filterfull(e, copy, vfsmap) for e in entries]
585 yield None # this release the lock on the repository
585 yield None # this release the lock on the repository
586 totalbytecount = 0
586 totalbytecount = 0
587
587
588 for src, name, ftype, data in entries:
588 for src, name, ftype, data in entries:
589 vfs = vfsmap[src]
589 vfs = vfsmap[src]
590 yield src
590 yield src
591 yield util.uvarintencode(len(name))
591 yield util.uvarintencode(len(name))
592 if ftype == _fileappend:
592 if ftype == _fileappend:
593 fp = vfs(name)
593 fp = vfs(name)
594 size = data
594 size = data
595 elif ftype == _filefull:
595 elif ftype == _filefull:
596 fp = open(data, b'rb')
596 fp = open(data, b'rb')
597 size = util.fstat(fp).st_size
597 size = util.fstat(fp).st_size
598 bytecount = 0
598 bytecount = 0
599 try:
599 try:
600 yield util.uvarintencode(size)
600 yield util.uvarintencode(size)
601 yield name
601 yield name
602 if size <= 65536:
602 if size <= 65536:
603 chunks = (fp.read(size),)
603 chunks = (fp.read(size),)
604 else:
604 else:
605 chunks = util.filechunkiter(fp, limit=size)
605 chunks = util.filechunkiter(fp, limit=size)
606 for chunk in chunks:
606 for chunk in chunks:
607 bytecount += len(chunk)
607 bytecount += len(chunk)
608 totalbytecount += len(chunk)
608 totalbytecount += len(chunk)
609 progress.update(totalbytecount)
609 progress.update(totalbytecount)
610 yield chunk
610 yield chunk
611 if bytecount != size:
611 if bytecount != size:
612 # Would most likely be caused by a race due to `hg strip` or
612 # Would most likely be caused by a race due to `hg strip` or
613 # a revlog split
613 # a revlog split
614 raise error.Abort(
614 raise error.Abort(
615 _(
615 _(
616 b'clone could only read %d bytes from %s, but '
616 b'clone could only read %d bytes from %s, but '
617 b'expected %d bytes'
617 b'expected %d bytes'
618 )
618 )
619 % (bytecount, name, size)
619 % (bytecount, name, size)
620 )
620 )
621 finally:
621 finally:
622 fp.close()
622 fp.close()
623
623
624
624
625 def _test_sync_point_walk_1(repo):
625 def _test_sync_point_walk_1(repo):
626 """a function for synchronisation during tests"""
626 """a function for synchronisation during tests"""
627
627
628
628
629 def _test_sync_point_walk_2(repo):
629 def _test_sync_point_walk_2(repo):
630 """a function for synchronisation during tests"""
630 """a function for synchronisation during tests"""
631
631
632
632
633 def _v2_walk(repo, includes, excludes, includeobsmarkers):
633 def _v2_walk(repo, includes, excludes, includeobsmarkers):
634 """emit a seris of files information useful to clone a repo
634 """emit a seris of files information useful to clone a repo
635
635
636 return (entries, totalfilesize)
636 return (entries, totalfilesize)
637
637
638 entries is a list of tuple (vfs-key, file-path, file-type, size)
638 entries is a list of tuple (vfs-key, file-path, file-type, size)
639
639
640 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
640 - `vfs-key`: is a key to the right vfs to write the file (see _makemap)
641 - `name`: file path of the file to copy (to be feed to the vfss)
641 - `name`: file path of the file to copy (to be feed to the vfss)
642 - `file-type`: do this file need to be copied with the source lock ?
642 - `file-type`: do this file need to be copied with the source lock ?
643 - `size`: the size of the file (or None)
643 - `size`: the size of the file (or None)
644 """
644 """
645 assert repo._currentlock(repo._lockref) is not None
645 assert repo._currentlock(repo._lockref) is not None
646 entries = []
646 entries = []
647 totalfilesize = 0
647 totalfilesize = 0
648
648
649 matcher = None
649 matcher = None
650 if includes or excludes:
650 if includes or excludes:
651 matcher = narrowspec.match(repo.root, includes, excludes)
651 matcher = narrowspec.match(repo.root, includes, excludes)
652
652
653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
653 for rl_type, name, size in _walkstreamfiles(repo, matcher):
654 if size:
654 if size:
655 ft = _fileappend
655 ft = _fileappend
656 if rl_type & store.FILEFLAGS_VOLATILE:
656 if rl_type & store.FILEFLAGS_VOLATILE:
657 ft = _filefull
657 ft = _filefull
658 entries.append((_srcstore, name, ft, size))
658 entries.append((_srcstore, name, ft, size))
659 totalfilesize += size
659 totalfilesize += size
660 for name in _walkstreamfullstorefiles(repo):
660 for name in _walkstreamfullstorefiles(repo):
661 if repo.svfs.exists(name):
661 if repo.svfs.exists(name):
662 totalfilesize += repo.svfs.lstat(name).st_size
662 totalfilesize += repo.svfs.lstat(name).st_size
663 entries.append((_srcstore, name, _filefull, None))
663 entries.append((_srcstore, name, _filefull, None))
664 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
664 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
665 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
665 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
666 entries.append((_srcstore, b'obsstore', _filefull, None))
666 entries.append((_srcstore, b'obsstore', _filefull, None))
667 for name in cacheutil.cachetocopy(repo):
667 for name in cacheutil.cachetocopy(repo):
668 if repo.cachevfs.exists(name):
668 if repo.cachevfs.exists(name):
669 totalfilesize += repo.cachevfs.lstat(name).st_size
669 totalfilesize += repo.cachevfs.lstat(name).st_size
670 entries.append((_srccache, name, _filefull, None))
670 entries.append((_srccache, name, _filefull, None))
671 return entries, totalfilesize
671 return entries, totalfilesize
672
672
673
673
674 def generatev2(repo, includes, excludes, includeobsmarkers):
674 def generatev2(repo, includes, excludes, includeobsmarkers):
675 """Emit content for version 2 of a streaming clone.
675 """Emit content for version 2 of a streaming clone.
676
676
677 the data stream consists the following entries:
677 the data stream consists the following entries:
678 1) A char representing the file destination (eg: store or cache)
678 1) A char representing the file destination (eg: store or cache)
679 2) A varint containing the length of the filename
679 2) A varint containing the length of the filename
680 3) A varint containing the length of file data
680 3) A varint containing the length of file data
681 4) N bytes containing the filename (the internal, store-agnostic form)
681 4) N bytes containing the filename (the internal, store-agnostic form)
682 5) N bytes containing the file data
682 5) N bytes containing the file data
683
683
684 Returns a 3-tuple of (file count, file size, data iterator).
684 Returns a 3-tuple of (file count, file size, data iterator).
685 """
685 """
686
686
687 with repo.lock():
687 with repo.lock():
688
688
689 repo.ui.debug(b'scanning\n')
689 repo.ui.debug(b'scanning\n')
690
690
691 entries, totalfilesize = _v2_walk(
691 entries, totalfilesize = _v2_walk(
692 repo,
692 repo,
693 includes=includes,
693 includes=includes,
694 excludes=excludes,
694 excludes=excludes,
695 includeobsmarkers=includeobsmarkers,
695 includeobsmarkers=includeobsmarkers,
696 )
696 )
697
697
698 chunks = _emit2(repo, entries, totalfilesize)
698 chunks = _emit2(repo, entries, totalfilesize)
699 first = next(chunks)
699 first = next(chunks)
700 assert first is None
700 assert first is None
701 _test_sync_point_walk_1(repo)
701 _test_sync_point_walk_1(repo)
702 _test_sync_point_walk_2(repo)
702 _test_sync_point_walk_2(repo)
703
703
704 return len(entries), totalfilesize, chunks
704 return len(entries), totalfilesize, chunks
705
705
706
706
707 @contextlib.contextmanager
707 @contextlib.contextmanager
708 def nested(*ctxs):
708 def nested(*ctxs):
709 this = ctxs[0]
709 this = ctxs[0]
710 rest = ctxs[1:]
710 rest = ctxs[1:]
711 with this:
711 with this:
712 if rest:
712 if rest:
713 with nested(*rest):
713 with nested(*rest):
714 yield
714 yield
715 else:
715 else:
716 yield
716 yield
717
717
718
718
719 def consumev2(repo, fp, filecount, filesize):
719 def consumev2(repo, fp, filecount, filesize):
720 """Apply the contents from a version 2 streaming clone.
720 """Apply the contents from a version 2 streaming clone.
721
721
722 Data is read from an object that only needs to provide a ``read(size)``
722 Data is read from an object that only needs to provide a ``read(size)``
723 method.
723 method.
724 """
724 """
725 with repo.lock():
725 with repo.lock():
726 repo.ui.status(
726 repo.ui.status(
727 _(b'%d files to transfer, %s of data\n')
727 _(b'%d files to transfer, %s of data\n')
728 % (filecount, util.bytecount(filesize))
728 % (filecount, util.bytecount(filesize))
729 )
729 )
730
730
731 start = util.timer()
731 start = util.timer()
732 progress = repo.ui.makeprogress(
732 progress = repo.ui.makeprogress(
733 _(b'clone'), total=filesize, unit=_(b'bytes')
733 _(b'clone'), total=filesize, unit=_(b'bytes')
734 )
734 )
735 progress.update(0)
735 progress.update(0)
736
736
737 vfsmap = _makemap(repo)
737 vfsmap = _makemap(repo)
738 # we keep repo.vfs out of the on purpose, ther are too many danger
738 # we keep repo.vfs out of the on purpose, ther are too many danger
739 # there (eg: .hg/hgrc),
739 # there (eg: .hg/hgrc),
740 #
740 #
741 # this assert is duplicated (from _makemap) as author might think this
741 # this assert is duplicated (from _makemap) as author might think this
742 # is fine, while this is really not fine.
742 # is fine, while this is really not fine.
743 if repo.vfs in vfsmap.values():
743 if repo.vfs in vfsmap.values():
744 raise error.ProgrammingError(
744 raise error.ProgrammingError(
745 b'repo.vfs must not be added to vfsmap for security reasons'
745 b'repo.vfs must not be added to vfsmap for security reasons'
746 )
746 )
747
747
748 with repo.transaction(b'clone'):
748 with repo.transaction(b'clone'):
749 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
749 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
750 with nested(*ctxs):
750 with nested(*ctxs):
751 for i in range(filecount):
751 for i in range(filecount):
752 src = util.readexactly(fp, 1)
752 src = util.readexactly(fp, 1)
753 vfs = vfsmap[src]
753 vfs = vfsmap[src]
754 namelen = util.uvarintdecodestream(fp)
754 namelen = util.uvarintdecodestream(fp)
755 datalen = util.uvarintdecodestream(fp)
755 datalen = util.uvarintdecodestream(fp)
756
756
757 name = util.readexactly(fp, namelen)
757 name = util.readexactly(fp, namelen)
758
758
759 if repo.ui.debugflag:
759 if repo.ui.debugflag:
760 repo.ui.debug(
760 repo.ui.debug(
761 b'adding [%s] %s (%s)\n'
761 b'adding [%s] %s (%s)\n'
762 % (src, name, util.bytecount(datalen))
762 % (src, name, util.bytecount(datalen))
763 )
763 )
764
764
765 with vfs(name, b'w') as ofp:
765 with vfs(name, b'w') as ofp:
766 for chunk in util.filechunkiter(fp, limit=datalen):
766 for chunk in util.filechunkiter(fp, limit=datalen):
767 progress.increment(step=len(chunk))
767 progress.increment(step=len(chunk))
768 ofp.write(chunk)
768 ofp.write(chunk)
769
769
770 # force @filecache properties to be reloaded from
770 # force @filecache properties to be reloaded from
771 # streamclone-ed file at next access
771 # streamclone-ed file at next access
772 repo.invalidate(clearfilecache=True)
772 repo.invalidate(clearfilecache=True)
773
773
774 elapsed = util.timer() - start
774 elapsed = util.timer() - start
775 if elapsed <= 0:
775 if elapsed <= 0:
776 elapsed = 0.001
776 elapsed = 0.001
777 repo.ui.status(
777 repo.ui.status(
778 _(b'transferred %s in %.1f seconds (%s/sec)\n')
778 _(b'transferred %s in %.1f seconds (%s/sec)\n')
779 % (
779 % (
780 util.bytecount(progress.pos),
780 util.bytecount(progress.pos),
781 elapsed,
781 elapsed,
782 util.bytecount(progress.pos / elapsed),
782 util.bytecount(progress.pos / elapsed),
783 )
783 )
784 )
784 )
785 progress.complete()
785 progress.complete()
786
786
787
787
788 def applybundlev2(repo, fp, filecount, filesize, requirements):
788 def applybundlev2(repo, fp, filecount, filesize, requirements):
789 from . import localrepo
789 from . import localrepo
790
790
791 missingreqs = [r for r in requirements if r not in repo.supported]
791 missingreqs = [r for r in requirements if r not in repo.supported]
792 if missingreqs:
792 if missingreqs:
793 raise error.Abort(
793 raise error.Abort(
794 _(b'unable to apply stream clone: unsupported format: %s')
794 _(b'unable to apply stream clone: unsupported format: %s')
795 % b', '.join(sorted(missingreqs))
795 % b', '.join(sorted(missingreqs))
796 )
796 )
797
797
798 consumev2(repo, fp, filecount, filesize)
798 consumev2(repo, fp, filecount, filesize)
799
799
800 # new requirements = old non-format requirements +
800 # new requirements = old non-format requirements +
801 # new format-related remote requirements
801 # new format-related remote requirements
802 # requirements from the streamed-in repository
802 # requirements from the streamed-in repository
803 repo.requirements = set(requirements) | (
803 repo.requirements = set(requirements) | (
804 repo.requirements - repo.supportedformats
804 repo.requirements - repo.supportedformats
805 )
805 )
806 repo.svfs.options = localrepo.resolvestorevfsoptions(
806 repo.svfs.options = localrepo.resolvestorevfsoptions(
807 repo.ui, repo.requirements, repo.features
807 repo.ui, repo.requirements, repo.features
808 )
808 )
809 scmutil.writereporequirements(repo)
809 scmutil.writereporequirements(repo)
810
810
811
811
812 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
812 def _copy_files(src_vfs_map, dst_vfs_map, entries, progress):
813 hardlink = [True]
813 hardlink = [True]
814
814
815 def copy_used():
815 def copy_used():
816 hardlink[0] = False
816 hardlink[0] = False
817 progress.topic = _(b'copying')
817 progress.topic = _(b'copying')
818
818
819 for k, path, size in entries:
819 for k, path, size in entries:
820 src_vfs = src_vfs_map[k]
820 src_vfs = src_vfs_map[k]
821 dst_vfs = dst_vfs_map[k]
821 dst_vfs = dst_vfs_map[k]
822 src_path = src_vfs.join(path)
822 src_path = src_vfs.join(path)
823 dst_path = dst_vfs.join(path)
823 dst_path = dst_vfs.join(path)
824 # We cannot use dirname and makedirs of dst_vfs here because the store
824 # We cannot use dirname and makedirs of dst_vfs here because the store
825 # encoding confuses them. See issue 6581 for details.
825 # encoding confuses them. See issue 6581 for details.
826 dirname = os.path.dirname(dst_path)
826 dirname = os.path.dirname(dst_path)
827 if not os.path.exists(dirname):
827 if not os.path.exists(dirname):
828 util.makedirs(dirname)
828 util.makedirs(dirname)
829 dst_vfs.register_file(path)
829 dst_vfs.register_file(path)
830 # XXX we could use the #nb_bytes argument.
830 # XXX we could use the #nb_bytes argument.
831 util.copyfile(
831 util.copyfile(
832 src_path,
832 src_path,
833 dst_path,
833 dst_path,
834 hardlink=hardlink[0],
834 hardlink=hardlink[0],
835 no_hardlink_cb=copy_used,
835 no_hardlink_cb=copy_used,
836 check_fs_hardlink=False,
836 check_fs_hardlink=False,
837 )
837 )
838 progress.increment()
838 progress.increment()
839 return hardlink[0]
839 return hardlink[0]
840
840
841
841
842 def local_copy(src_repo, dest_repo):
842 def local_copy(src_repo, dest_repo):
843 """copy all content from one local repository to another
843 """copy all content from one local repository to another
844
844
845 This is useful for local clone"""
845 This is useful for local clone"""
846 src_store_requirements = {
846 src_store_requirements = {
847 r
847 r
848 for r in src_repo.requirements
848 for r in src_repo.requirements
849 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
849 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
850 }
850 }
851 dest_store_requirements = {
851 dest_store_requirements = {
852 r
852 r
853 for r in dest_repo.requirements
853 for r in dest_repo.requirements
854 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
854 if r not in requirementsmod.WORKING_DIR_REQUIREMENTS
855 }
855 }
856 assert src_store_requirements == dest_store_requirements
856 assert src_store_requirements == dest_store_requirements
857
857
858 with dest_repo.lock():
858 with dest_repo.lock():
859 with src_repo.lock():
859 with src_repo.lock():
860
860
861 # bookmark is not integrated to the streaming as it might use the
861 # bookmark is not integrated to the streaming as it might use the
862 # `repo.vfs` and they are too many sentitive data accessible
862 # `repo.vfs` and they are too many sentitive data accessible
863 # through `repo.vfs` to expose it to streaming clone.
863 # through `repo.vfs` to expose it to streaming clone.
864 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
864 src_book_vfs = bookmarks.bookmarksvfs(src_repo)
865 srcbookmarks = src_book_vfs.join(b'bookmarks')
865 srcbookmarks = src_book_vfs.join(b'bookmarks')
866 bm_count = 0
866 bm_count = 0
867 if os.path.exists(srcbookmarks):
867 if os.path.exists(srcbookmarks):
868 bm_count = 1
868 bm_count = 1
869
869
870 entries, totalfilesize = _v2_walk(
870 entries, totalfilesize = _v2_walk(
871 src_repo,
871 src_repo,
872 includes=None,
872 includes=None,
873 excludes=None,
873 excludes=None,
874 includeobsmarkers=True,
874 includeobsmarkers=True,
875 )
875 )
876 src_vfs_map = _makemap(src_repo)
876 src_vfs_map = _makemap(src_repo)
877 dest_vfs_map = _makemap(dest_repo)
877 dest_vfs_map = _makemap(dest_repo)
878 progress = src_repo.ui.makeprogress(
878 progress = src_repo.ui.makeprogress(
879 topic=_(b'linking'),
879 topic=_(b'linking'),
880 total=len(entries) + bm_count,
880 total=len(entries) + bm_count,
881 unit=_(b'files'),
881 unit=_(b'files'),
882 )
882 )
883 # copy files
883 # copy files
884 #
884 #
885 # We could copy the full file while the source repository is locked
885 # We could copy the full file while the source repository is locked
886 # and the other one without the lock. However, in the linking case,
886 # and the other one without the lock. However, in the linking case,
887 # this would also requires checks that nobody is appending any data
887 # this would also requires checks that nobody is appending any data
888 # to the files while we do the clone, so this is not done yet. We
888 # to the files while we do the clone, so this is not done yet. We
889 # could do this blindly when copying files.
889 # could do this blindly when copying files.
890 files = ((k, path, size) for k, path, ftype, size in entries)
890 files = ((k, path, size) for k, path, ftype, size in entries)
891 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
891 hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress)
892
892
893 # copy bookmarks over
893 # copy bookmarks over
894 if bm_count:
894 if bm_count:
895 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
895 dst_book_vfs = bookmarks.bookmarksvfs(dest_repo)
896 dstbookmarks = dst_book_vfs.join(b'bookmarks')
896 dstbookmarks = dst_book_vfs.join(b'bookmarks')
897 util.copyfile(srcbookmarks, dstbookmarks)
897 util.copyfile(srcbookmarks, dstbookmarks)
898 progress.complete()
898 progress.complete()
899 if hardlink:
899 if hardlink:
900 msg = b'linked %d files\n'
900 msg = b'linked %d files\n'
901 else:
901 else:
902 msg = b'copied %d files\n'
902 msg = b'copied %d files\n'
903 src_repo.ui.debug(msg % (len(entries) + bm_count))
903 src_repo.ui.debug(msg % (len(entries) + bm_count))
904
904
905 with dest_repo.transaction(b"localclone") as tr:
905 with dest_repo.transaction(b"localclone") as tr:
906 dest_repo.store.write(tr)
906 dest_repo.store.write(tr)
907
907
908 # clean up transaction file as they do not make sense
908 # clean up transaction file as they do not make sense
909 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
909 undo_files = [(dest_repo.svfs, b'undo.backupfiles')]
910 undo_files.extend(dest_repo.undofiles())
910 undo_files.extend(dest_repo.undofiles())
911 for undovfs, undofile in undo_files:
911 for undovfs, undofile in undo_files:
912 try:
912 try:
913 undovfs.unlink(undofile)
913 undovfs.unlink(undofile)
914 except OSError as e:
914 except OSError as e:
915 if e.errno != errno.ENOENT:
915 if e.errno != errno.ENOENT:
916 msg = _(b'error removing %s: %s\n')
916 msg = _(b'error removing %s: %s\n')
917 path = undovfs.join(undofile)
917 path = undovfs.join(undofile)
918 e_msg = stringutil.forcebytestr(e)
918 e_msg = stringutil.forcebytestr(e)
919 msg %= (path, e_msg)
919 msg %= (path, e_msg)
920 dest_repo.ui.warn(msg)
920 dest_repo.ui.warn(msg)
@@ -1,413 +1,415 b''
1 #require serve
1 #require serve
2
2
3 This test is a duplicate of 'test-http.t', feel free to factor out
3 This test is a duplicate of 'test-http.t', feel free to factor out
4 parts that are not bundle1/bundle2 specific.
4 parts that are not bundle1/bundle2 specific.
5
5
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [devel]
7 > [devel]
8 > # This test is dedicated to interaction through old bundle
8 > # This test is dedicated to interaction through old bundle
9 > legacy.exchange = bundle1
9 > legacy.exchange = bundle1
10 > EOF
10 > EOF
11
11
12 $ hg init test
12 $ hg init test
13 $ cd test
13 $ cd test
14 $ echo foo>foo
14 $ echo foo>foo
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
15 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
16 $ echo foo>foo.d/foo
16 $ echo foo>foo.d/foo
17 $ echo bar>foo.d/bAr.hg.d/BaR
17 $ echo bar>foo.d/bAr.hg.d/BaR
18 $ echo bar>foo.d/baR.d.hg/bAR
18 $ echo bar>foo.d/baR.d.hg/bAR
19 $ hg commit -A -m 1
19 $ hg commit -A -m 1
20 adding foo
20 adding foo
21 adding foo.d/bAr.hg.d/BaR
21 adding foo.d/bAr.hg.d/BaR
22 adding foo.d/baR.d.hg/bAR
22 adding foo.d/baR.d.hg/bAR
23 adding foo.d/foo
23 adding foo.d/foo
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
24 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
25 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
26
26
27 Test server address cannot be reused
27 Test server address cannot be reused
28
28
29 $ hg serve -p $HGPORT1 2>&1
29 $ hg serve -p $HGPORT1 2>&1
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
30 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
31 [255]
31 [255]
32
32
33 $ cd ..
33 $ cd ..
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
34 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
35
35
36 clone via stream
36 clone via stream
37
37
38 #if no-reposimplestore
38 #if no-reposimplestore
39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
39 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
40 streaming all changes
40 streaming all changes
41 6 files to transfer, 606 bytes of data (no-zstd !)
41 6 files to transfer, 606 bytes of data (no-zstd !)
42 6 files to transfer, 608 bytes of data (zstd !)
42 6 files to transfer, 608 bytes of data (zstd !)
43 transferred * bytes in * seconds (*/sec) (glob)
43 transferred * bytes in * seconds (*/sec) (glob)
44 searching for changes
44 searching for changes
45 no changes found
45 no changes found
46 updating to branch default
46 updating to branch default
47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
47 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
48 $ hg verify -R copy
48 $ hg verify -R copy
49 checking changesets
49 checking changesets
50 checking manifests
50 checking manifests
51 crosschecking files in changesets and manifests
51 crosschecking files in changesets and manifests
52 checking files
52 checking files
53 checked 1 changesets with 4 changes to 4 files
53 checked 1 changesets with 4 changes to 4 files
54 #endif
54 #endif
55
55
56 try to clone via stream, should use pull instead
56 try to clone via stream, should use pull instead
57
57
58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
58 $ hg clone --stream http://localhost:$HGPORT1/ copy2
59 warning: stream clone requested but server has them disabled
59 warning: stream clone requested but server has them disabled
60 requesting all changes
60 requesting all changes
61 adding changesets
61 adding changesets
62 adding manifests
62 adding manifests
63 adding file changes
63 adding file changes
64 added 1 changesets with 4 changes to 4 files
64 added 1 changesets with 4 changes to 4 files
65 new changesets 8b6053c928fe
65 new changesets 8b6053c928fe
66 updating to branch default
66 updating to branch default
67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
67 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
68
68
69 try to clone via stream but missing requirements, so should use pull instead
69 try to clone via stream but missing requirements, so should use pull instead
70
70
71 $ cat > $TESTTMP/removesupportedformat.py << EOF
71 $ cat > $TESTTMP/removesupportedformat.py << EOF
72 > from mercurial import localrepo
72 > from mercurial import localrepo
73 > def extsetup(ui):
73 > def reposetup(ui, repo):
74 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
74 > local = repo.local()
75 > if local is not None:
76 > local.supported.remove(b'generaldelta')
75 > EOF
77 > EOF
76
78
77 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
79 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
78 warning: stream clone requested but client is missing requirements: generaldelta
80 warning: stream clone requested but client is missing requirements: generaldelta
79 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
81 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
80 requesting all changes
82 requesting all changes
81 adding changesets
83 adding changesets
82 adding manifests
84 adding manifests
83 adding file changes
85 adding file changes
84 added 1 changesets with 4 changes to 4 files
86 added 1 changesets with 4 changes to 4 files
85 new changesets 8b6053c928fe
87 new changesets 8b6053c928fe
86 updating to branch default
88 updating to branch default
87 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
88
90
89 clone via pull
91 clone via pull
90
92
91 $ hg clone http://localhost:$HGPORT1/ copy-pull
93 $ hg clone http://localhost:$HGPORT1/ copy-pull
92 requesting all changes
94 requesting all changes
93 adding changesets
95 adding changesets
94 adding manifests
96 adding manifests
95 adding file changes
97 adding file changes
96 added 1 changesets with 4 changes to 4 files
98 added 1 changesets with 4 changes to 4 files
97 new changesets 8b6053c928fe
99 new changesets 8b6053c928fe
98 updating to branch default
100 updating to branch default
99 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
101 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
100 $ hg verify -R copy-pull
102 $ hg verify -R copy-pull
101 checking changesets
103 checking changesets
102 checking manifests
104 checking manifests
103 crosschecking files in changesets and manifests
105 crosschecking files in changesets and manifests
104 checking files
106 checking files
105 checked 1 changesets with 4 changes to 4 files
107 checked 1 changesets with 4 changes to 4 files
106 $ cd test
108 $ cd test
107 $ echo bar > bar
109 $ echo bar > bar
108 $ hg commit -A -d '1 0' -m 2
110 $ hg commit -A -d '1 0' -m 2
109 adding bar
111 adding bar
110 $ cd ..
112 $ cd ..
111
113
112 clone over http with --update
114 clone over http with --update
113
115
114 $ hg clone http://localhost:$HGPORT1/ updated --update 0
116 $ hg clone http://localhost:$HGPORT1/ updated --update 0
115 requesting all changes
117 requesting all changes
116 adding changesets
118 adding changesets
117 adding manifests
119 adding manifests
118 adding file changes
120 adding file changes
119 added 2 changesets with 5 changes to 5 files
121 added 2 changesets with 5 changes to 5 files
120 new changesets 8b6053c928fe:5fed3813f7f5
122 new changesets 8b6053c928fe:5fed3813f7f5
121 updating to branch default
123 updating to branch default
122 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
124 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
123 $ hg log -r . -R updated
125 $ hg log -r . -R updated
124 changeset: 0:8b6053c928fe
126 changeset: 0:8b6053c928fe
125 user: test
127 user: test
126 date: Thu Jan 01 00:00:00 1970 +0000
128 date: Thu Jan 01 00:00:00 1970 +0000
127 summary: 1
129 summary: 1
128
130
129 $ rm -rf updated
131 $ rm -rf updated
130
132
131 incoming via HTTP
133 incoming via HTTP
132
134
133 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
135 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
134 adding changesets
136 adding changesets
135 adding manifests
137 adding manifests
136 adding file changes
138 adding file changes
137 added 1 changesets with 4 changes to 4 files
139 added 1 changesets with 4 changes to 4 files
138 new changesets 8b6053c928fe
140 new changesets 8b6053c928fe
139 updating to branch default
141 updating to branch default
140 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
142 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
141 $ cd partial
143 $ cd partial
142 $ touch LOCAL
144 $ touch LOCAL
143 $ hg ci -qAm LOCAL
145 $ hg ci -qAm LOCAL
144 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
146 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
145 comparing with http://localhost:$HGPORT1/
147 comparing with http://localhost:$HGPORT1/
146 searching for changes
148 searching for changes
147 2
149 2
148 $ cd ..
150 $ cd ..
149
151
150 pull
152 pull
151
153
152 $ cd copy-pull
154 $ cd copy-pull
153 $ cat >> .hg/hgrc <<EOF
155 $ cat >> .hg/hgrc <<EOF
154 > [hooks]
156 > [hooks]
155 > changegroup = sh -c "printenv.py --line changegroup"
157 > changegroup = sh -c "printenv.py --line changegroup"
156 > EOF
158 > EOF
157 $ hg pull
159 $ hg pull
158 pulling from http://localhost:$HGPORT1/
160 pulling from http://localhost:$HGPORT1/
159 searching for changes
161 searching for changes
160 adding changesets
162 adding changesets
161 adding manifests
163 adding manifests
162 adding file changes
164 adding file changes
163 added 1 changesets with 1 changes to 1 files
165 added 1 changesets with 1 changes to 1 files
164 new changesets 5fed3813f7f5
166 new changesets 5fed3813f7f5
165 changegroup hook: HG_HOOKNAME=changegroup
167 changegroup hook: HG_HOOKNAME=changegroup
166 HG_HOOKTYPE=changegroup
168 HG_HOOKTYPE=changegroup
167 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
168 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
170 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
169 HG_SOURCE=pull
171 HG_SOURCE=pull
170 HG_TXNID=TXN:$ID$
172 HG_TXNID=TXN:$ID$
171 HG_TXNNAME=pull
173 HG_TXNNAME=pull
172 http://localhost:$HGPORT1/
174 http://localhost:$HGPORT1/
173 HG_URL=http://localhost:$HGPORT1/
175 HG_URL=http://localhost:$HGPORT1/
174
176
175 (run 'hg update' to get a working copy)
177 (run 'hg update' to get a working copy)
176 $ cd ..
178 $ cd ..
177
179
178 clone from invalid URL
180 clone from invalid URL
179
181
180 $ hg clone http://localhost:$HGPORT/bad
182 $ hg clone http://localhost:$HGPORT/bad
181 abort: HTTP Error 404: Not Found
183 abort: HTTP Error 404: Not Found
182 [100]
184 [100]
183
185
184 test http authentication
186 test http authentication
185 + use the same server to test server side streaming preference
187 + use the same server to test server side streaming preference
186
188
187 $ cd test
189 $ cd test
188
190
189 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
191 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
190 > --pid-file=pid --config server.preferuncompressed=True \
192 > --pid-file=pid --config server.preferuncompressed=True \
191 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
193 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
192 $ cat pid >> $DAEMON_PIDS
194 $ cat pid >> $DAEMON_PIDS
193
195
194 $ cat << EOF > get_pass.py
196 $ cat << EOF > get_pass.py
195 > from mercurial import util
197 > from mercurial import util
196 > def newgetpass():
198 > def newgetpass():
197 > return "pass"
199 > return "pass"
198 > util.get_password = newgetpass
200 > util.get_password = newgetpass
199 > EOF
201 > EOF
200
202
201 $ hg id http://localhost:$HGPORT2/
203 $ hg id http://localhost:$HGPORT2/
202 abort: http authorization required for http://localhost:$HGPORT2/
204 abort: http authorization required for http://localhost:$HGPORT2/
203 [255]
205 [255]
204 $ hg id http://localhost:$HGPORT2/
206 $ hg id http://localhost:$HGPORT2/
205 abort: http authorization required for http://localhost:$HGPORT2/
207 abort: http authorization required for http://localhost:$HGPORT2/
206 [255]
208 [255]
207 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
209 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
208 http authorization required for http://localhost:$HGPORT2/
210 http authorization required for http://localhost:$HGPORT2/
209 realm: mercurial
211 realm: mercurial
210 user: user
212 user: user
211 password: 5fed3813f7f5
213 password: 5fed3813f7f5
212 $ hg id http://user:pass@localhost:$HGPORT2/
214 $ hg id http://user:pass@localhost:$HGPORT2/
213 5fed3813f7f5
215 5fed3813f7f5
214 $ echo '[auth]' >> .hg/hgrc
216 $ echo '[auth]' >> .hg/hgrc
215 $ echo 'l.schemes=http' >> .hg/hgrc
217 $ echo 'l.schemes=http' >> .hg/hgrc
216 $ echo 'l.prefix=lo' >> .hg/hgrc
218 $ echo 'l.prefix=lo' >> .hg/hgrc
217 $ echo 'l.username=user' >> .hg/hgrc
219 $ echo 'l.username=user' >> .hg/hgrc
218 $ echo 'l.password=pass' >> .hg/hgrc
220 $ echo 'l.password=pass' >> .hg/hgrc
219 $ hg id http://localhost:$HGPORT2/
221 $ hg id http://localhost:$HGPORT2/
220 5fed3813f7f5
222 5fed3813f7f5
221 $ hg id http://localhost:$HGPORT2/
223 $ hg id http://localhost:$HGPORT2/
222 5fed3813f7f5
224 5fed3813f7f5
223 $ hg id http://user@localhost:$HGPORT2/
225 $ hg id http://user@localhost:$HGPORT2/
224 5fed3813f7f5
226 5fed3813f7f5
225
227
226 #if no-reposimplestore
228 #if no-reposimplestore
227 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
229 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
228 streaming all changes
230 streaming all changes
229 7 files to transfer, 916 bytes of data (no-zstd !)
231 7 files to transfer, 916 bytes of data (no-zstd !)
230 7 files to transfer, 919 bytes of data (zstd !)
232 7 files to transfer, 919 bytes of data (zstd !)
231 transferred * bytes in * seconds (*/sec) (glob)
233 transferred * bytes in * seconds (*/sec) (glob)
232 searching for changes
234 searching for changes
233 no changes found
235 no changes found
234 updating to branch default
236 updating to branch default
235 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
237 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
236 #endif
238 #endif
237
239
238 --pull should override server's preferuncompressed
240 --pull should override server's preferuncompressed
239
241
240 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
242 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
241 requesting all changes
243 requesting all changes
242 adding changesets
244 adding changesets
243 adding manifests
245 adding manifests
244 adding file changes
246 adding file changes
245 added 2 changesets with 5 changes to 5 files
247 added 2 changesets with 5 changes to 5 files
246 new changesets 8b6053c928fe:5fed3813f7f5
248 new changesets 8b6053c928fe:5fed3813f7f5
247 updating to branch default
249 updating to branch default
248 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
250 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
249
251
250 $ hg id http://user2@localhost:$HGPORT2/
252 $ hg id http://user2@localhost:$HGPORT2/
251 abort: http authorization required for http://localhost:$HGPORT2/
253 abort: http authorization required for http://localhost:$HGPORT2/
252 [255]
254 [255]
253 $ hg id http://user:pass2@localhost:$HGPORT2/
255 $ hg id http://user:pass2@localhost:$HGPORT2/
254 abort: HTTP Error 403: no
256 abort: HTTP Error 403: no
255 [100]
257 [100]
256
258
257 $ hg -R dest-pull tag -r tip top
259 $ hg -R dest-pull tag -r tip top
258 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
260 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
259 pushing to http://user:***@localhost:$HGPORT2/
261 pushing to http://user:***@localhost:$HGPORT2/
260 searching for changes
262 searching for changes
261 remote: adding changesets
263 remote: adding changesets
262 remote: adding manifests
264 remote: adding manifests
263 remote: adding file changes
265 remote: adding file changes
264 remote: added 1 changesets with 1 changes to 1 files
266 remote: added 1 changesets with 1 changes to 1 files
265 $ hg rollback -q
267 $ hg rollback -q
266
268
267 $ sed 's/.*] "/"/' < ../access.log
269 $ sed 's/.*] "/"/' < ../access.log
268 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 "GET /?cmd=capabilities HTTP/1.1" 401 -
269 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 "GET /?cmd=capabilities HTTP/1.1" 401 -
270 "GET /?cmd=capabilities HTTP/1.1" 401 -
272 "GET /?cmd=capabilities HTTP/1.1" 401 -
271 "GET /?cmd=capabilities HTTP/1.1" 200 -
273 "GET /?cmd=capabilities HTTP/1.1" 200 -
272 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
273 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
274 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
276 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
275 "GET /?cmd=capabilities HTTP/1.1" 401 -
277 "GET /?cmd=capabilities HTTP/1.1" 401 -
276 "GET /?cmd=capabilities HTTP/1.1" 200 -
278 "GET /?cmd=capabilities HTTP/1.1" 200 -
277 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
278 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
279 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
281 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
280 "GET /?cmd=capabilities HTTP/1.1" 401 -
282 "GET /?cmd=capabilities HTTP/1.1" 401 -
281 "GET /?cmd=capabilities HTTP/1.1" 200 -
283 "GET /?cmd=capabilities HTTP/1.1" 200 -
282 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
283 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
284 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
286 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
285 "GET /?cmd=capabilities HTTP/1.1" 401 -
287 "GET /?cmd=capabilities HTTP/1.1" 401 -
286 "GET /?cmd=capabilities HTTP/1.1" 200 -
288 "GET /?cmd=capabilities HTTP/1.1" 200 -
287 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
288 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
289 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
291 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
290 "GET /?cmd=capabilities HTTP/1.1" 401 -
292 "GET /?cmd=capabilities HTTP/1.1" 401 -
291 "GET /?cmd=capabilities HTTP/1.1" 200 -
293 "GET /?cmd=capabilities HTTP/1.1" 200 -
292 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
293 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
294 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
296 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
295 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
297 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
296 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
298 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
297 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
298 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 "GET /?cmd=stream_out HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
299 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
300 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
301 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
303 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
302 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
304 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
303 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
305 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
304 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
306 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
305 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
306 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
307 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
309 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
308 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 "GET /?cmd=capabilities HTTP/1.1" 401 -
309 "GET /?cmd=capabilities HTTP/1.1" 401 -
311 "GET /?cmd=capabilities HTTP/1.1" 401 -
310 "GET /?cmd=capabilities HTTP/1.1" 403 -
312 "GET /?cmd=capabilities HTTP/1.1" 403 -
311 "GET /?cmd=capabilities HTTP/1.1" 401 -
313 "GET /?cmd=capabilities HTTP/1.1" 401 -
312 "GET /?cmd=capabilities HTTP/1.1" 200 -
314 "GET /?cmd=capabilities HTTP/1.1" 200 -
313 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
316 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
317 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
318 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
320 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=686173686564+5eb5abfefeea63c80dd7553bcc3783f37e0c5524* (glob)
319 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
321 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
320
322
321 $ cd ..
323 $ cd ..
322
324
323 clone of serve with repo in root and unserved subrepo (issue2970)
325 clone of serve with repo in root and unserved subrepo (issue2970)
324
326
325 $ hg --cwd test init sub
327 $ hg --cwd test init sub
326 $ echo empty > test/sub/empty
328 $ echo empty > test/sub/empty
327 $ hg --cwd test/sub add empty
329 $ hg --cwd test/sub add empty
328 $ hg --cwd test/sub commit -qm 'add empty'
330 $ hg --cwd test/sub commit -qm 'add empty'
329 $ hg --cwd test/sub tag -r 0 something
331 $ hg --cwd test/sub tag -r 0 something
330 $ echo sub = sub > test/.hgsub
332 $ echo sub = sub > test/.hgsub
331 $ hg --cwd test add .hgsub
333 $ hg --cwd test add .hgsub
332 $ hg --cwd test commit -qm 'add subrepo'
334 $ hg --cwd test commit -qm 'add subrepo'
333 $ hg clone http://localhost:$HGPORT noslash-clone
335 $ hg clone http://localhost:$HGPORT noslash-clone
334 requesting all changes
336 requesting all changes
335 adding changesets
337 adding changesets
336 adding manifests
338 adding manifests
337 adding file changes
339 adding file changes
338 added 3 changesets with 7 changes to 7 files
340 added 3 changesets with 7 changes to 7 files
339 new changesets 8b6053c928fe:56f9bc90cce6
341 new changesets 8b6053c928fe:56f9bc90cce6
340 updating to branch default
342 updating to branch default
341 cloning subrepo sub from http://localhost:$HGPORT/sub
343 cloning subrepo sub from http://localhost:$HGPORT/sub
342 abort: HTTP Error 404: Not Found
344 abort: HTTP Error 404: Not Found
343 [100]
345 [100]
344 $ hg clone http://localhost:$HGPORT/ slash-clone
346 $ hg clone http://localhost:$HGPORT/ slash-clone
345 requesting all changes
347 requesting all changes
346 adding changesets
348 adding changesets
347 adding manifests
349 adding manifests
348 adding file changes
350 adding file changes
349 added 3 changesets with 7 changes to 7 files
351 added 3 changesets with 7 changes to 7 files
350 new changesets 8b6053c928fe:56f9bc90cce6
352 new changesets 8b6053c928fe:56f9bc90cce6
351 updating to branch default
353 updating to branch default
352 cloning subrepo sub from http://localhost:$HGPORT/sub
354 cloning subrepo sub from http://localhost:$HGPORT/sub
353 abort: HTTP Error 404: Not Found
355 abort: HTTP Error 404: Not Found
354 [100]
356 [100]
355
357
356 check error log
358 check error log
357
359
358 $ cat error.log
360 $ cat error.log
359
361
360 Check error reporting while pulling/cloning
362 Check error reporting while pulling/cloning
361
363
362 $ $RUNTESTDIR/killdaemons.py
364 $ $RUNTESTDIR/killdaemons.py
363 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
365 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
364 $ cat hg3.pid >> $DAEMON_PIDS
366 $ cat hg3.pid >> $DAEMON_PIDS
365 $ hg clone http://localhost:$HGPORT/ abort-clone
367 $ hg clone http://localhost:$HGPORT/ abort-clone
366 requesting all changes
368 requesting all changes
367 abort: remote error:
369 abort: remote error:
368 this is an exercise
370 this is an exercise
369 [100]
371 [100]
370 $ cat error.log
372 $ cat error.log
371
373
372 disable pull-based clones
374 disable pull-based clones
373
375
374 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
376 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
375 $ cat hg4.pid >> $DAEMON_PIDS
377 $ cat hg4.pid >> $DAEMON_PIDS
376 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
378 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
377 requesting all changes
379 requesting all changes
378 abort: remote error:
380 abort: remote error:
379 server has pull-based clones disabled
381 server has pull-based clones disabled
380 [100]
382 [100]
381
383
382 #if no-reposimplestore
384 #if no-reposimplestore
383 ... but keep stream clones working
385 ... but keep stream clones working
384
386
385 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
387 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
386 streaming all changes
388 streaming all changes
387 * files to transfer, * of data (glob)
389 * files to transfer, * of data (glob)
388 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
390 transferred 1.36 KB in * seconds (* */sec) (glob) (no-zstd !)
389 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
391 transferred 1.38 KB in * seconds (* */sec) (glob) (zstd !)
390 searching for changes
392 searching for changes
391 no changes found
393 no changes found
392 #endif
394 #endif
393
395
394 ... and also keep partial clones and pulls working
396 ... and also keep partial clones and pulls working
395 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
397 $ hg clone http://localhost:$HGPORT1 --rev 0 test-partial-clone
396 adding changesets
398 adding changesets
397 adding manifests
399 adding manifests
398 adding file changes
400 adding file changes
399 added 1 changesets with 4 changes to 4 files
401 added 1 changesets with 4 changes to 4 files
400 new changesets 8b6053c928fe
402 new changesets 8b6053c928fe
401 updating to branch default
403 updating to branch default
402 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
404 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
403 $ hg pull -R test-partial-clone
405 $ hg pull -R test-partial-clone
404 pulling from http://localhost:$HGPORT1/
406 pulling from http://localhost:$HGPORT1/
405 searching for changes
407 searching for changes
406 adding changesets
408 adding changesets
407 adding manifests
409 adding manifests
408 adding file changes
410 adding file changes
409 added 2 changesets with 3 changes to 3 files
411 added 2 changesets with 3 changes to 3 files
410 new changesets 5fed3813f7f5:56f9bc90cce6
412 new changesets 5fed3813f7f5:56f9bc90cce6
411 (run 'hg update' to get a working copy)
413 (run 'hg update' to get a working copy)
412
414
413 $ cat error.log
415 $ cat error.log
@@ -1,617 +1,619 b''
1 #require serve
1 #require serve
2
2
3 $ hg init test
3 $ hg init test
4 $ cd test
4 $ cd test
5 $ echo foo>foo
5 $ echo foo>foo
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
6 $ mkdir foo.d foo.d/bAr.hg.d foo.d/baR.d.hg
7 $ echo foo>foo.d/foo
7 $ echo foo>foo.d/foo
8 $ echo bar>foo.d/bAr.hg.d/BaR
8 $ echo bar>foo.d/bAr.hg.d/BaR
9 $ echo bar>foo.d/baR.d.hg/bAR
9 $ echo bar>foo.d/baR.d.hg/bAR
10 $ hg commit -A -m 1
10 $ hg commit -A -m 1
11 adding foo
11 adding foo
12 adding foo.d/bAr.hg.d/BaR
12 adding foo.d/bAr.hg.d/BaR
13 adding foo.d/baR.d.hg/bAR
13 adding foo.d/baR.d.hg/bAR
14 adding foo.d/foo
14 adding foo.d/foo
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
15 $ hg serve -p $HGPORT -d --pid-file=../hg1.pid -E ../error.log
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
16 $ hg serve --config server.uncompressed=False -p $HGPORT1 -d --pid-file=../hg2.pid
17
17
18 Test server address cannot be reused
18 Test server address cannot be reused
19
19
20 $ hg serve -p $HGPORT1 2>&1
20 $ hg serve -p $HGPORT1 2>&1
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
21 abort: cannot start server at 'localhost:$HGPORT1': $EADDRINUSE$
22 [255]
22 [255]
23
23
24 $ cd ..
24 $ cd ..
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
25 $ cat hg1.pid hg2.pid >> $DAEMON_PIDS
26
26
27 clone via stream
27 clone via stream
28
28
29 #if no-reposimplestore
29 #if no-reposimplestore
30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
30 $ hg clone --stream http://localhost:$HGPORT/ copy 2>&1
31 streaming all changes
31 streaming all changes
32 9 files to transfer, 715 bytes of data (no-zstd !)
32 9 files to transfer, 715 bytes of data (no-zstd !)
33 9 files to transfer, 717 bytes of data (zstd !)
33 9 files to transfer, 717 bytes of data (zstd !)
34 transferred * bytes in * seconds (*/sec) (glob)
34 transferred * bytes in * seconds (*/sec) (glob)
35 updating to branch default
35 updating to branch default
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
36 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
37 $ hg verify -R copy
37 $ hg verify -R copy
38 checking changesets
38 checking changesets
39 checking manifests
39 checking manifests
40 crosschecking files in changesets and manifests
40 crosschecking files in changesets and manifests
41 checking files
41 checking files
42 checked 1 changesets with 4 changes to 4 files
42 checked 1 changesets with 4 changes to 4 files
43 #endif
43 #endif
44
44
45 try to clone via stream, should use pull instead
45 try to clone via stream, should use pull instead
46
46
47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
47 $ hg clone --stream http://localhost:$HGPORT1/ copy2
48 warning: stream clone requested but server has them disabled
48 warning: stream clone requested but server has them disabled
49 requesting all changes
49 requesting all changes
50 adding changesets
50 adding changesets
51 adding manifests
51 adding manifests
52 adding file changes
52 adding file changes
53 added 1 changesets with 4 changes to 4 files
53 added 1 changesets with 4 changes to 4 files
54 new changesets 8b6053c928fe
54 new changesets 8b6053c928fe
55 updating to branch default
55 updating to branch default
56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
56 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
57
57
58 try to clone via stream but missing requirements, so should use pull instead
58 try to clone via stream but missing requirements, so should use pull instead
59
59
60 $ cat > $TESTTMP/removesupportedformat.py << EOF
60 $ cat > $TESTTMP/removesupportedformat.py << EOF
61 > from mercurial import localrepo
61 > from mercurial import localrepo
62 > def extsetup(ui):
62 > def reposetup(ui, repo):
63 > localrepo.localrepository.supportedformats.remove(b'generaldelta')
63 > local = repo.local()
64 > if local is not None:
65 > local.supported.remove(b'generaldelta')
64 > EOF
66 > EOF
65
67
66 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
68 $ hg clone --config extensions.rsf=$TESTTMP/removesupportedformat.py --stream http://localhost:$HGPORT/ copy3
67 warning: stream clone requested but client is missing requirements: generaldelta
69 warning: stream clone requested but client is missing requirements: generaldelta
68 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
70 (see https://www.mercurial-scm.org/wiki/MissingRequirement for more information)
69 requesting all changes
71 requesting all changes
70 adding changesets
72 adding changesets
71 adding manifests
73 adding manifests
72 adding file changes
74 adding file changes
73 added 1 changesets with 4 changes to 4 files
75 added 1 changesets with 4 changes to 4 files
74 new changesets 8b6053c928fe
76 new changesets 8b6053c928fe
75 updating to branch default
77 updating to branch default
76 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
78 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
77
79
78 clone via pull
80 clone via pull
79
81
80 $ hg clone http://localhost:$HGPORT1/ copy-pull
82 $ hg clone http://localhost:$HGPORT1/ copy-pull
81 requesting all changes
83 requesting all changes
82 adding changesets
84 adding changesets
83 adding manifests
85 adding manifests
84 adding file changes
86 adding file changes
85 added 1 changesets with 4 changes to 4 files
87 added 1 changesets with 4 changes to 4 files
86 new changesets 8b6053c928fe
88 new changesets 8b6053c928fe
87 updating to branch default
89 updating to branch default
88 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
90 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ hg verify -R copy-pull
91 $ hg verify -R copy-pull
90 checking changesets
92 checking changesets
91 checking manifests
93 checking manifests
92 crosschecking files in changesets and manifests
94 crosschecking files in changesets and manifests
93 checking files
95 checking files
94 checked 1 changesets with 4 changes to 4 files
96 checked 1 changesets with 4 changes to 4 files
95 $ cd test
97 $ cd test
96 $ echo bar > bar
98 $ echo bar > bar
97 $ hg commit -A -d '1 0' -m 2
99 $ hg commit -A -d '1 0' -m 2
98 adding bar
100 adding bar
99 $ cd ..
101 $ cd ..
100
102
101 clone over http with --update
103 clone over http with --update
102
104
103 $ hg clone http://localhost:$HGPORT1/ updated --update 0
105 $ hg clone http://localhost:$HGPORT1/ updated --update 0
104 requesting all changes
106 requesting all changes
105 adding changesets
107 adding changesets
106 adding manifests
108 adding manifests
107 adding file changes
109 adding file changes
108 added 2 changesets with 5 changes to 5 files
110 added 2 changesets with 5 changes to 5 files
109 new changesets 8b6053c928fe:5fed3813f7f5
111 new changesets 8b6053c928fe:5fed3813f7f5
110 updating to branch default
112 updating to branch default
111 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
112 $ hg log -r . -R updated
114 $ hg log -r . -R updated
113 changeset: 0:8b6053c928fe
115 changeset: 0:8b6053c928fe
114 user: test
116 user: test
115 date: Thu Jan 01 00:00:00 1970 +0000
117 date: Thu Jan 01 00:00:00 1970 +0000
116 summary: 1
118 summary: 1
117
119
118 $ rm -rf updated
120 $ rm -rf updated
119
121
120 incoming via HTTP
122 incoming via HTTP
121
123
122 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
124 $ hg clone http://localhost:$HGPORT1/ --rev 0 partial
123 adding changesets
125 adding changesets
124 adding manifests
126 adding manifests
125 adding file changes
127 adding file changes
126 added 1 changesets with 4 changes to 4 files
128 added 1 changesets with 4 changes to 4 files
127 new changesets 8b6053c928fe
129 new changesets 8b6053c928fe
128 updating to branch default
130 updating to branch default
129 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
131 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
130 $ cd partial
132 $ cd partial
131 $ touch LOCAL
133 $ touch LOCAL
132 $ hg ci -qAm LOCAL
134 $ hg ci -qAm LOCAL
133 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
135 $ hg incoming http://localhost:$HGPORT1/ --template '{desc}\n'
134 comparing with http://localhost:$HGPORT1/
136 comparing with http://localhost:$HGPORT1/
135 searching for changes
137 searching for changes
136 2
138 2
137 $ cd ..
139 $ cd ..
138
140
139 pull
141 pull
140
142
141 $ cd copy-pull
143 $ cd copy-pull
142 $ cat >> .hg/hgrc <<EOF
144 $ cat >> .hg/hgrc <<EOF
143 > [hooks]
145 > [hooks]
144 > changegroup = sh -c "printenv.py --line changegroup"
146 > changegroup = sh -c "printenv.py --line changegroup"
145 > EOF
147 > EOF
146 $ hg pull
148 $ hg pull
147 pulling from http://localhost:$HGPORT1/
149 pulling from http://localhost:$HGPORT1/
148 searching for changes
150 searching for changes
149 adding changesets
151 adding changesets
150 adding manifests
152 adding manifests
151 adding file changes
153 adding file changes
152 added 1 changesets with 1 changes to 1 files
154 added 1 changesets with 1 changes to 1 files
153 new changesets 5fed3813f7f5
155 new changesets 5fed3813f7f5
154 changegroup hook: HG_HOOKNAME=changegroup
156 changegroup hook: HG_HOOKNAME=changegroup
155 HG_HOOKTYPE=changegroup
157 HG_HOOKTYPE=changegroup
156 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 HG_NODE=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
157 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
159 HG_NODE_LAST=5fed3813f7f5e1824344fdc9cf8f63bb662c292d
158 HG_SOURCE=pull
160 HG_SOURCE=pull
159 HG_TXNID=TXN:$ID$
161 HG_TXNID=TXN:$ID$
160 HG_TXNNAME=pull
162 HG_TXNNAME=pull
161 http://localhost:$HGPORT1/
163 http://localhost:$HGPORT1/
162 HG_URL=http://localhost:$HGPORT1/
164 HG_URL=http://localhost:$HGPORT1/
163
165
164 (run 'hg update' to get a working copy)
166 (run 'hg update' to get a working copy)
165 $ cd ..
167 $ cd ..
166
168
167 clone from invalid URL
169 clone from invalid URL
168
170
169 $ hg clone http://localhost:$HGPORT/bad
171 $ hg clone http://localhost:$HGPORT/bad
170 abort: HTTP Error 404: Not Found
172 abort: HTTP Error 404: Not Found
171 [100]
173 [100]
172
174
173 test http authentication
175 test http authentication
174 + use the same server to test server side streaming preference
176 + use the same server to test server side streaming preference
175
177
176 $ cd test
178 $ cd test
177
179
178 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
180 $ hg serve --config extensions.x=$TESTDIR/httpserverauth.py -p $HGPORT2 -d \
179 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
181 > --pid-file=pid --config server.preferuncompressed=True -E ../errors2.log \
180 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
182 > --config web.push_ssl=False --config web.allow_push=* -A ../access.log
181 $ cat pid >> $DAEMON_PIDS
183 $ cat pid >> $DAEMON_PIDS
182
184
183 $ cat << EOF > get_pass.py
185 $ cat << EOF > get_pass.py
184 > from mercurial import util
186 > from mercurial import util
185 > def newgetpass():
187 > def newgetpass():
186 > return "pass"
188 > return "pass"
187 > util.get_password = newgetpass
189 > util.get_password = newgetpass
188 > EOF
190 > EOF
189
191
190 $ hg id http://localhost:$HGPORT2/
192 $ hg id http://localhost:$HGPORT2/
191 abort: http authorization required for http://localhost:$HGPORT2/
193 abort: http authorization required for http://localhost:$HGPORT2/
192 [255]
194 [255]
193 $ hg id http://localhost:$HGPORT2/
195 $ hg id http://localhost:$HGPORT2/
194 abort: http authorization required for http://localhost:$HGPORT2/
196 abort: http authorization required for http://localhost:$HGPORT2/
195 [255]
197 [255]
196 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
198 $ hg id --config ui.interactive=true --debug http://localhost:$HGPORT2/
197 using http://localhost:$HGPORT2/
199 using http://localhost:$HGPORT2/
198 sending capabilities command
200 sending capabilities command
199 http authorization required for http://localhost:$HGPORT2/
201 http authorization required for http://localhost:$HGPORT2/
200 realm: mercurial
202 realm: mercurial
201 user: abort: response expected
203 user: abort: response expected
202 [255]
204 [255]
203 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
205 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
204 >
206 >
205 > EOF
207 > EOF
206 using http://localhost:$HGPORT2/
208 using http://localhost:$HGPORT2/
207 sending capabilities command
209 sending capabilities command
208 http authorization required for http://localhost:$HGPORT2/
210 http authorization required for http://localhost:$HGPORT2/
209 realm: mercurial
211 realm: mercurial
210 user:
212 user:
211 password: abort: response expected
213 password: abort: response expected
212 [255]
214 [255]
213 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
215 $ cat <<'EOF' | hg id --config ui.interactive=true --config ui.nontty=true --debug http://localhost:$HGPORT2/
214 >
216 >
215 >
217 >
216 > EOF
218 > EOF
217 using http://localhost:$HGPORT2/
219 using http://localhost:$HGPORT2/
218 sending capabilities command
220 sending capabilities command
219 http authorization required for http://localhost:$HGPORT2/
221 http authorization required for http://localhost:$HGPORT2/
220 realm: mercurial
222 realm: mercurial
221 user:
223 user:
222 password: abort: authorization failed
224 password: abort: authorization failed
223 [255]
225 [255]
224 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
226 $ hg id --config ui.interactive=true --config extensions.getpass=get_pass.py http://user@localhost:$HGPORT2/
225 http authorization required for http://localhost:$HGPORT2/
227 http authorization required for http://localhost:$HGPORT2/
226 realm: mercurial
228 realm: mercurial
227 user: user
229 user: user
228 password: 5fed3813f7f5
230 password: 5fed3813f7f5
229 $ hg id http://user:pass@localhost:$HGPORT2/
231 $ hg id http://user:pass@localhost:$HGPORT2/
230 5fed3813f7f5
232 5fed3813f7f5
231 $ echo '[auth]' >> .hg/hgrc
233 $ echo '[auth]' >> .hg/hgrc
232 $ echo 'l.schemes=http' >> .hg/hgrc
234 $ echo 'l.schemes=http' >> .hg/hgrc
233 $ echo 'l.prefix=lo' >> .hg/hgrc
235 $ echo 'l.prefix=lo' >> .hg/hgrc
234 $ echo 'l.username=user' >> .hg/hgrc
236 $ echo 'l.username=user' >> .hg/hgrc
235 $ echo 'l.password=pass' >> .hg/hgrc
237 $ echo 'l.password=pass' >> .hg/hgrc
236 $ hg id http://localhost:$HGPORT2/
238 $ hg id http://localhost:$HGPORT2/
237 5fed3813f7f5
239 5fed3813f7f5
238 $ hg id http://localhost:$HGPORT2/
240 $ hg id http://localhost:$HGPORT2/
239 5fed3813f7f5
241 5fed3813f7f5
240 $ hg id http://user@localhost:$HGPORT2/
242 $ hg id http://user@localhost:$HGPORT2/
241 5fed3813f7f5
243 5fed3813f7f5
242
244
243 $ cat > use_digests.py << EOF
245 $ cat > use_digests.py << EOF
244 > from mercurial import (
246 > from mercurial import (
245 > exthelper,
247 > exthelper,
246 > url,
248 > url,
247 > )
249 > )
248 >
250 >
249 > eh = exthelper.exthelper()
251 > eh = exthelper.exthelper()
250 > uisetup = eh.finaluisetup
252 > uisetup = eh.finaluisetup
251 >
253 >
252 > @eh.wrapfunction(url, 'opener')
254 > @eh.wrapfunction(url, 'opener')
253 > def urlopener(orig, *args, **kwargs):
255 > def urlopener(orig, *args, **kwargs):
254 > opener = orig(*args, **kwargs)
256 > opener = orig(*args, **kwargs)
255 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
257 > opener.addheaders.append((r'X-HgTest-AuthType', r'Digest'))
256 > return opener
258 > return opener
257 > EOF
259 > EOF
258
260
259 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
261 $ hg id http://localhost:$HGPORT2/ --config extensions.x=use_digests.py
260 5fed3813f7f5
262 5fed3813f7f5
261
263
262 #if no-reposimplestore
264 #if no-reposimplestore
263 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
265 $ hg clone http://user:pass@localhost:$HGPORT2/ dest 2>&1
264 streaming all changes
266 streaming all changes
265 10 files to transfer, 1.01 KB of data
267 10 files to transfer, 1.01 KB of data
266 transferred * KB in * seconds (*/sec) (glob)
268 transferred * KB in * seconds (*/sec) (glob)
267 updating to branch default
269 updating to branch default
268 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
270 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
269 #endif
271 #endif
270
272
271 --pull should override server's preferuncompressed
273 --pull should override server's preferuncompressed
272 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
274 $ hg clone --pull http://user:pass@localhost:$HGPORT2/ dest-pull 2>&1
273 requesting all changes
275 requesting all changes
274 adding changesets
276 adding changesets
275 adding manifests
277 adding manifests
276 adding file changes
278 adding file changes
277 added 2 changesets with 5 changes to 5 files
279 added 2 changesets with 5 changes to 5 files
278 new changesets 8b6053c928fe:5fed3813f7f5
280 new changesets 8b6053c928fe:5fed3813f7f5
279 updating to branch default
281 updating to branch default
280 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 5 files updated, 0 files merged, 0 files removed, 0 files unresolved
281
283
282 $ hg id http://user2@localhost:$HGPORT2/
284 $ hg id http://user2@localhost:$HGPORT2/
283 abort: http authorization required for http://localhost:$HGPORT2/
285 abort: http authorization required for http://localhost:$HGPORT2/
284 [255]
286 [255]
285 $ hg id http://user:pass2@localhost:$HGPORT2/
287 $ hg id http://user:pass2@localhost:$HGPORT2/
286 abort: HTTP Error 403: no
288 abort: HTTP Error 403: no
287 [100]
289 [100]
288
290
289 $ hg -R dest-pull tag -r tip top
291 $ hg -R dest-pull tag -r tip top
290 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
292 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/
291 pushing to http://user:***@localhost:$HGPORT2/
293 pushing to http://user:***@localhost:$HGPORT2/
292 searching for changes
294 searching for changes
293 remote: adding changesets
295 remote: adding changesets
294 remote: adding manifests
296 remote: adding manifests
295 remote: adding file changes
297 remote: adding file changes
296 remote: added 1 changesets with 1 changes to 1 files
298 remote: added 1 changesets with 1 changes to 1 files
297 $ hg rollback -q
299 $ hg rollback -q
298 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
300 $ hg -R dest-pull push http://user:pass@localhost:$HGPORT2/ --debug --config devel.debug.peer-request=yes
299 pushing to http://user:***@localhost:$HGPORT2/
301 pushing to http://user:***@localhost:$HGPORT2/
300 using http://localhost:$HGPORT2/
302 using http://localhost:$HGPORT2/
301 http auth: user user, password ****
303 http auth: user user, password ****
302 sending capabilities command
304 sending capabilities command
303 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
305 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=capabilities
304 http auth: user user, password ****
306 http auth: user user, password ****
305 devel-peer-request: finished in *.???? seconds (200) (glob)
307 devel-peer-request: finished in *.???? seconds (200) (glob)
306 query 1; heads
308 query 1; heads
307 devel-peer-request: batched-content
309 devel-peer-request: batched-content
308 devel-peer-request: - heads (0 arguments)
310 devel-peer-request: - heads (0 arguments)
309 devel-peer-request: - known (1 arguments)
311 devel-peer-request: - known (1 arguments)
310 sending batch command
312 sending batch command
311 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
313 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=batch
312 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
314 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
313 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
315 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
314 devel-peer-request: 68 bytes of commands arguments in headers
316 devel-peer-request: 68 bytes of commands arguments in headers
315 devel-peer-request: finished in *.???? seconds (200) (glob)
317 devel-peer-request: finished in *.???? seconds (200) (glob)
316 searching for changes
318 searching for changes
317 all remote heads known locally
319 all remote heads known locally
318 preparing listkeys for "phases"
320 preparing listkeys for "phases"
319 sending listkeys command
321 sending listkeys command
320 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
322 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
321 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
323 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
322 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
324 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
323 devel-peer-request: 16 bytes of commands arguments in headers
325 devel-peer-request: 16 bytes of commands arguments in headers
324 devel-peer-request: finished in *.???? seconds (200) (glob)
326 devel-peer-request: finished in *.???? seconds (200) (glob)
325 received listkey for "phases": 58 bytes
327 received listkey for "phases": 58 bytes
326 checking for updated bookmarks
328 checking for updated bookmarks
327 preparing listkeys for "bookmarks"
329 preparing listkeys for "bookmarks"
328 sending listkeys command
330 sending listkeys command
329 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
331 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
330 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
332 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
331 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
333 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
332 devel-peer-request: 19 bytes of commands arguments in headers
334 devel-peer-request: 19 bytes of commands arguments in headers
333 devel-peer-request: finished in *.???? seconds (200) (glob)
335 devel-peer-request: finished in *.???? seconds (200) (glob)
334 received listkey for "bookmarks": 0 bytes
336 received listkey for "bookmarks": 0 bytes
335 sending branchmap command
337 sending branchmap command
336 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
338 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=branchmap
337 devel-peer-request: Vary X-HgProto-1
339 devel-peer-request: Vary X-HgProto-1
338 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
340 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
339 devel-peer-request: finished in *.???? seconds (200) (glob)
341 devel-peer-request: finished in *.???? seconds (200) (glob)
340 preparing listkeys for "bookmarks"
342 preparing listkeys for "bookmarks"
341 sending listkeys command
343 sending listkeys command
342 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
344 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
343 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
345 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
344 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
346 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
345 devel-peer-request: 19 bytes of commands arguments in headers
347 devel-peer-request: 19 bytes of commands arguments in headers
346 devel-peer-request: finished in *.???? seconds (200) (glob)
348 devel-peer-request: finished in *.???? seconds (200) (glob)
347 received listkey for "bookmarks": 0 bytes
349 received listkey for "bookmarks": 0 bytes
348 1 changesets found
350 1 changesets found
349 list of changesets:
351 list of changesets:
350 7f4e523d01f2cc3765ac8934da3d14db775ff872
352 7f4e523d01f2cc3765ac8934da3d14db775ff872
351 bundle2-output-bundle: "HG20", 5 parts total
353 bundle2-output-bundle: "HG20", 5 parts total
352 bundle2-output-part: "replycaps" 207 bytes payload
354 bundle2-output-part: "replycaps" 207 bytes payload
353 bundle2-output-part: "check:phases" 24 bytes payload
355 bundle2-output-part: "check:phases" 24 bytes payload
354 bundle2-output-part: "check:updated-heads" streamed payload
356 bundle2-output-part: "check:updated-heads" streamed payload
355 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
357 bundle2-output-part: "changegroup" (params: 1 mandatory) streamed payload
356 bundle2-output-part: "phase-heads" 24 bytes payload
358 bundle2-output-part: "phase-heads" 24 bytes payload
357 sending unbundle command
359 sending unbundle command
358 sending 1023 bytes
360 sending 1023 bytes
359 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
361 devel-peer-request: POST http://localhost:$HGPORT2/?cmd=unbundle
360 devel-peer-request: Content-length 1023
362 devel-peer-request: Content-length 1023
361 devel-peer-request: Content-type application/mercurial-0.1
363 devel-peer-request: Content-type application/mercurial-0.1
362 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
364 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
363 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
365 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
364 devel-peer-request: 16 bytes of commands arguments in headers
366 devel-peer-request: 16 bytes of commands arguments in headers
365 devel-peer-request: 1023 bytes of data
367 devel-peer-request: 1023 bytes of data
366 devel-peer-request: finished in *.???? seconds (200) (glob)
368 devel-peer-request: finished in *.???? seconds (200) (glob)
367 bundle2-input-bundle: no-transaction
369 bundle2-input-bundle: no-transaction
368 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
370 bundle2-input-part: "reply:changegroup" (advisory) (params: 0 advisory) supported
369 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
371 bundle2-input-part: "output" (advisory) (params: 0 advisory) supported
370 bundle2-input-part: total payload size 55
372 bundle2-input-part: total payload size 55
371 remote: adding changesets
373 remote: adding changesets
372 remote: adding manifests
374 remote: adding manifests
373 remote: adding file changes
375 remote: adding file changes
374 bundle2-input-part: "output" (advisory) supported
376 bundle2-input-part: "output" (advisory) supported
375 bundle2-input-part: total payload size 45
377 bundle2-input-part: total payload size 45
376 remote: added 1 changesets with 1 changes to 1 files
378 remote: added 1 changesets with 1 changes to 1 files
377 bundle2-input-bundle: 3 parts total
379 bundle2-input-bundle: 3 parts total
378 preparing listkeys for "phases"
380 preparing listkeys for "phases"
379 sending listkeys command
381 sending listkeys command
380 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
382 devel-peer-request: GET http://localhost:$HGPORT2/?cmd=listkeys
381 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
383 devel-peer-request: Vary X-HgArg-1,X-HgProto-1
382 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
384 devel-peer-request: X-hgproto-1 0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
383 devel-peer-request: 16 bytes of commands arguments in headers
385 devel-peer-request: 16 bytes of commands arguments in headers
384 devel-peer-request: finished in *.???? seconds (200) (glob)
386 devel-peer-request: finished in *.???? seconds (200) (glob)
385 received listkey for "phases": 15 bytes
387 received listkey for "phases": 15 bytes
386 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
388 (sent 9 HTTP requests and * bytes; received * bytes in responses) (glob) (?)
387 $ hg rollback -q
389 $ hg rollback -q
388
390
389 $ sed 's/.*] "/"/' < ../access.log
391 $ sed 's/.*] "/"/' < ../access.log
390 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 "GET /?cmd=capabilities HTTP/1.1" 401 -
391 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 "GET /?cmd=capabilities HTTP/1.1" 401 -
392 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 "GET /?cmd=capabilities HTTP/1.1" 401 -
393 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 "GET /?cmd=capabilities HTTP/1.1" 401 -
394 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 "GET /?cmd=capabilities HTTP/1.1" 401 -
395 "GET /?cmd=capabilities HTTP/1.1" 401 -
397 "GET /?cmd=capabilities HTTP/1.1" 401 -
396 "GET /?cmd=capabilities HTTP/1.1" 200 -
398 "GET /?cmd=capabilities HTTP/1.1" 200 -
397 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
398 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
399 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
401 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
400 "GET /?cmd=capabilities HTTP/1.1" 401 -
402 "GET /?cmd=capabilities HTTP/1.1" 401 -
401 "GET /?cmd=capabilities HTTP/1.1" 200 -
403 "GET /?cmd=capabilities HTTP/1.1" 200 -
402 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
403 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
404 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
406 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
405 "GET /?cmd=capabilities HTTP/1.1" 401 -
407 "GET /?cmd=capabilities HTTP/1.1" 401 -
406 "GET /?cmd=capabilities HTTP/1.1" 200 -
408 "GET /?cmd=capabilities HTTP/1.1" 200 -
407 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
408 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
409 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
411 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
410 "GET /?cmd=capabilities HTTP/1.1" 401 -
412 "GET /?cmd=capabilities HTTP/1.1" 401 -
411 "GET /?cmd=capabilities HTTP/1.1" 200 -
413 "GET /?cmd=capabilities HTTP/1.1" 200 -
412 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
413 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
414 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
416 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
415 "GET /?cmd=capabilities HTTP/1.1" 401 -
417 "GET /?cmd=capabilities HTTP/1.1" 401 -
416 "GET /?cmd=capabilities HTTP/1.1" 200 -
418 "GET /?cmd=capabilities HTTP/1.1" 200 -
417 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
418 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
419 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
421 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
420 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
422 "GET /?cmd=capabilities HTTP/1.1" 401 - x-hgtest-authtype:Digest
421 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
423 "GET /?cmd=capabilities HTTP/1.1" 200 - x-hgtest-authtype:Digest
422 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 "GET /?cmd=lookup HTTP/1.1" 401 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
423 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 "GET /?cmd=lookup HTTP/1.1" 200 - x-hgarg-1:key=tip x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
424 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
425 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=namespaces x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
426 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 "GET /?cmd=listkeys HTTP/1.1" 401 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
427 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
429 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull x-hgtest-authtype:Digest
428 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
430 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
429 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
431 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
430 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
431 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
433 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=0&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&stream=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull (no-reposimplestore !)
432 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
434 "GET /?cmd=capabilities HTTP/1.1" 401 - (no-reposimplestore !)
433 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
435 "GET /?cmd=capabilities HTTP/1.1" 200 - (no-reposimplestore !)
434 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
435 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
437 "GET /?cmd=getbundle HTTP/1.1" 200 - x-hgarg-1:bookmarks=1&$USUAL_BUNDLE_CAPS$&cg=1&common=0000000000000000000000000000000000000000&heads=5fed3813f7f5e1824344fdc9cf8f63bb662c292d&listkeys=bookmarks&phases=1 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
436 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 "GET /?cmd=capabilities HTTP/1.1" 401 -
437 "GET /?cmd=capabilities HTTP/1.1" 401 -
439 "GET /?cmd=capabilities HTTP/1.1" 401 -
438 "GET /?cmd=capabilities HTTP/1.1" 403 -
440 "GET /?cmd=capabilities HTTP/1.1" 403 -
439 "GET /?cmd=capabilities HTTP/1.1" 401 -
441 "GET /?cmd=capabilities HTTP/1.1" 401 -
440 "GET /?cmd=capabilities HTTP/1.1" 200 -
442 "GET /?cmd=capabilities HTTP/1.1" 200 -
441 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
442 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
443 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
444 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
445 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
446 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
448 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365* (glob)
447 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
449 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
448 "GET /?cmd=capabilities HTTP/1.1" 401 -
450 "GET /?cmd=capabilities HTTP/1.1" 401 -
449 "GET /?cmd=capabilities HTTP/1.1" 200 -
451 "GET /?cmd=capabilities HTTP/1.1" 200 -
450 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 "GET /?cmd=batch HTTP/1.1" 200 - x-hgarg-1:cmds=heads+%3Bknown+nodes%3D7f4e523d01f2cc3765ac8934da3d14db775ff872 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
451 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
452 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
453 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 "GET /?cmd=branchmap HTTP/1.1" 200 - x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
454 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=bookmarks x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
455 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457 "POST /?cmd=unbundle HTTP/1.1" 200 - x-hgarg-1:heads=666f726365 x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
456 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
458 "GET /?cmd=listkeys HTTP/1.1" 200 - x-hgarg-1:namespace=phases x-hgproto-1:0.1 0.2 comp=$USUAL_COMPRESSIONS$ partial-pull
457
459
458 $ cd ..
460 $ cd ..
459
461
460 clone of serve with repo in root and unserved subrepo (issue2970)
462 clone of serve with repo in root and unserved subrepo (issue2970)
461
463
462 $ hg --cwd test init sub
464 $ hg --cwd test init sub
463 $ echo empty > test/sub/empty
465 $ echo empty > test/sub/empty
464 $ hg --cwd test/sub add empty
466 $ hg --cwd test/sub add empty
465 $ hg --cwd test/sub commit -qm 'add empty'
467 $ hg --cwd test/sub commit -qm 'add empty'
466 $ hg --cwd test/sub tag -r 0 something
468 $ hg --cwd test/sub tag -r 0 something
467 $ echo sub = sub > test/.hgsub
469 $ echo sub = sub > test/.hgsub
468 $ hg --cwd test add .hgsub
470 $ hg --cwd test add .hgsub
469 $ hg --cwd test commit -qm 'add subrepo'
471 $ hg --cwd test commit -qm 'add subrepo'
470 $ hg clone http://localhost:$HGPORT noslash-clone
472 $ hg clone http://localhost:$HGPORT noslash-clone
471 requesting all changes
473 requesting all changes
472 adding changesets
474 adding changesets
473 adding manifests
475 adding manifests
474 adding file changes
476 adding file changes
475 added 3 changesets with 7 changes to 7 files
477 added 3 changesets with 7 changes to 7 files
476 new changesets 8b6053c928fe:56f9bc90cce6
478 new changesets 8b6053c928fe:56f9bc90cce6
477 updating to branch default
479 updating to branch default
478 cloning subrepo sub from http://localhost:$HGPORT/sub
480 cloning subrepo sub from http://localhost:$HGPORT/sub
479 abort: HTTP Error 404: Not Found
481 abort: HTTP Error 404: Not Found
480 [100]
482 [100]
481 $ hg clone http://localhost:$HGPORT/ slash-clone
483 $ hg clone http://localhost:$HGPORT/ slash-clone
482 requesting all changes
484 requesting all changes
483 adding changesets
485 adding changesets
484 adding manifests
486 adding manifests
485 adding file changes
487 adding file changes
486 added 3 changesets with 7 changes to 7 files
488 added 3 changesets with 7 changes to 7 files
487 new changesets 8b6053c928fe:56f9bc90cce6
489 new changesets 8b6053c928fe:56f9bc90cce6
488 updating to branch default
490 updating to branch default
489 cloning subrepo sub from http://localhost:$HGPORT/sub
491 cloning subrepo sub from http://localhost:$HGPORT/sub
490 abort: HTTP Error 404: Not Found
492 abort: HTTP Error 404: Not Found
491 [100]
493 [100]
492
494
493 check error log
495 check error log
494
496
495 $ cat error.log
497 $ cat error.log
496
498
497 $ cat errors2.log
499 $ cat errors2.log
498
500
499 check abort error reporting while pulling/cloning
501 check abort error reporting while pulling/cloning
500
502
501 $ $RUNTESTDIR/killdaemons.py
503 $ $RUNTESTDIR/killdaemons.py
502 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
504 $ hg serve -R test -p $HGPORT -d --pid-file=hg3.pid -E error.log --config extensions.crash=${TESTDIR}/crashgetbundler.py
503 $ cat hg3.pid >> $DAEMON_PIDS
505 $ cat hg3.pid >> $DAEMON_PIDS
504 $ hg clone http://localhost:$HGPORT/ abort-clone
506 $ hg clone http://localhost:$HGPORT/ abort-clone
505 requesting all changes
507 requesting all changes
506 remote: abort: this is an exercise
508 remote: abort: this is an exercise
507 abort: pull failed on remote
509 abort: pull failed on remote
508 [100]
510 [100]
509 $ cat error.log
511 $ cat error.log
510
512
511 disable pull-based clones
513 disable pull-based clones
512
514
513 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
515 $ hg serve -R test -p $HGPORT1 -d --pid-file=hg4.pid -E error.log --config server.disablefullbundle=True
514 $ cat hg4.pid >> $DAEMON_PIDS
516 $ cat hg4.pid >> $DAEMON_PIDS
515 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
517 $ hg clone http://localhost:$HGPORT1/ disable-pull-clone
516 requesting all changes
518 requesting all changes
517 remote: abort: server has pull-based clones disabled
519 remote: abort: server has pull-based clones disabled
518 abort: pull failed on remote
520 abort: pull failed on remote
519 (remove --pull if specified or upgrade Mercurial)
521 (remove --pull if specified or upgrade Mercurial)
520 [100]
522 [100]
521
523
522 #if no-reposimplestore
524 #if no-reposimplestore
523 ... but keep stream clones working
525 ... but keep stream clones working
524
526
525 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
527 $ hg clone --stream --noupdate http://localhost:$HGPORT1/ test-stream-clone
526 streaming all changes
528 streaming all changes
527 * files to transfer, * of data (glob)
529 * files to transfer, * of data (glob)
528 transferred * in * seconds (*/sec) (glob)
530 transferred * in * seconds (*/sec) (glob)
529 $ cat error.log
531 $ cat error.log
530 #endif
532 #endif
531
533
532 ... and also keep partial clones and pulls working
534 ... and also keep partial clones and pulls working
533 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
535 $ hg clone http://localhost:$HGPORT1 --rev 0 test/partial/clone
534 adding changesets
536 adding changesets
535 adding manifests
537 adding manifests
536 adding file changes
538 adding file changes
537 added 1 changesets with 4 changes to 4 files
539 added 1 changesets with 4 changes to 4 files
538 new changesets 8b6053c928fe
540 new changesets 8b6053c928fe
539 updating to branch default
541 updating to branch default
540 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
542 4 files updated, 0 files merged, 0 files removed, 0 files unresolved
541 $ hg pull -R test/partial/clone
543 $ hg pull -R test/partial/clone
542 pulling from http://localhost:$HGPORT1/
544 pulling from http://localhost:$HGPORT1/
543 searching for changes
545 searching for changes
544 adding changesets
546 adding changesets
545 adding manifests
547 adding manifests
546 adding file changes
548 adding file changes
547 added 2 changesets with 3 changes to 3 files
549 added 2 changesets with 3 changes to 3 files
548 new changesets 5fed3813f7f5:56f9bc90cce6
550 new changesets 5fed3813f7f5:56f9bc90cce6
549 (run 'hg update' to get a working copy)
551 (run 'hg update' to get a working copy)
550
552
551 $ hg clone -U -r 0 test/partial/clone test/another/clone
553 $ hg clone -U -r 0 test/partial/clone test/another/clone
552 adding changesets
554 adding changesets
553 adding manifests
555 adding manifests
554 adding file changes
556 adding file changes
555 added 1 changesets with 4 changes to 4 files
557 added 1 changesets with 4 changes to 4 files
556 new changesets 8b6053c928fe
558 new changesets 8b6053c928fe
557
559
558 corrupt cookies file should yield a warning
560 corrupt cookies file should yield a warning
559
561
560 $ cat > $TESTTMP/cookies.txt << EOF
562 $ cat > $TESTTMP/cookies.txt << EOF
561 > bad format
563 > bad format
562 > EOF
564 > EOF
563
565
564 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
566 $ hg --config auth.cookiefile=$TESTTMP/cookies.txt id http://localhost:$HGPORT/
565 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
567 (error loading cookie file $TESTTMP/cookies.txt: '*/cookies.txt' does not look like a Netscape format cookies file; continuing without cookies) (glob)
566 56f9bc90cce6
568 56f9bc90cce6
567
569
568 $ killdaemons.py
570 $ killdaemons.py
569
571
570 Create dummy authentication handler that looks for cookies. It doesn't do anything
572 Create dummy authentication handler that looks for cookies. It doesn't do anything
571 useful. It just raises an HTTP 500 with details about the Cookie request header.
573 useful. It just raises an HTTP 500 with details about the Cookie request header.
572 We raise HTTP 500 because its message is printed in the abort message.
574 We raise HTTP 500 because its message is printed in the abort message.
573
575
574 $ cat > cookieauth.py << EOF
576 $ cat > cookieauth.py << EOF
575 > from mercurial import util
577 > from mercurial import util
576 > from mercurial.hgweb import common
578 > from mercurial.hgweb import common
577 > def perform_authentication(hgweb, req, op):
579 > def perform_authentication(hgweb, req, op):
578 > cookie = req.headers.get(b'Cookie')
580 > cookie = req.headers.get(b'Cookie')
579 > if not cookie:
581 > if not cookie:
580 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
582 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'no-cookie')
581 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
583 > raise common.ErrorResponse(common.HTTP_SERVER_ERROR, b'Cookie: %s' % cookie)
582 > def extsetup(ui):
584 > def extsetup(ui):
583 > common.permhooks.insert(0, perform_authentication)
585 > common.permhooks.insert(0, perform_authentication)
584 > EOF
586 > EOF
585
587
586 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
588 $ hg serve --config extensions.cookieauth=cookieauth.py -R test -p $HGPORT -d --pid-file=pid
587 $ cat pid > $DAEMON_PIDS
589 $ cat pid > $DAEMON_PIDS
588
590
589 Request without cookie sent should fail due to lack of cookie
591 Request without cookie sent should fail due to lack of cookie
590
592
591 $ hg id http://localhost:$HGPORT
593 $ hg id http://localhost:$HGPORT
592 abort: HTTP Error 500: no-cookie
594 abort: HTTP Error 500: no-cookie
593 [100]
595 [100]
594
596
595 Populate a cookies file
597 Populate a cookies file
596
598
597 $ cat > cookies.txt << EOF
599 $ cat > cookies.txt << EOF
598 > # HTTP Cookie File
600 > # HTTP Cookie File
599 > # Expiration is 2030-01-01 at midnight
601 > # Expiration is 2030-01-01 at midnight
600 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
602 > .example.com TRUE / FALSE 1893456000 hgkey examplevalue
601 > EOF
603 > EOF
602
604
603 Should not send a cookie for another domain
605 Should not send a cookie for another domain
604
606
605 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
607 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
606 abort: HTTP Error 500: no-cookie
608 abort: HTTP Error 500: no-cookie
607 [100]
609 [100]
608
610
609 Add a cookie entry for our test server and verify it is sent
611 Add a cookie entry for our test server and verify it is sent
610
612
611 $ cat >> cookies.txt << EOF
613 $ cat >> cookies.txt << EOF
612 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
614 > localhost.local FALSE / FALSE 1893456000 hgkey localhostvalue
613 > EOF
615 > EOF
614
616
615 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
617 $ hg --config auth.cookiefile=cookies.txt id http://localhost:$HGPORT/
616 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
618 abort: HTTP Error 500: Cookie: hgkey=localhostvalue
617 [100]
619 [100]
General Comments 0
You need to be logged in to leave comments. Login now