##// END OF EJS Templates
streamclone: remove sleep based "synchronisation" in tests...
marmoute -
r47748:faa43f09 default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 from __future__ import absolute_import
2
3 from mercurial import (
4 encoding,
5 extensions,
6 streamclone,
7 testing,
8 )
9
10
11 WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
12 WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
13
14
15 def _test_sync_point_walk_1(orig, repo):
16 testing.write_file(WALKED_FILE_1)
17
18
19 def _test_sync_point_walk_2(orig, repo):
20 assert repo._currentlock(repo._lockref) is None
21 testing.wait_file(WALKED_FILE_2)
22
23
24 def uisetup(ui):
25 extensions.wrapfunction(
26 streamclone, '_test_sync_point_walk_1', _test_sync_point_walk_1
27 )
28
29 extensions.wrapfunction(
30 streamclone, '_test_sync_point_walk_2', _test_sync_point_walk_2
31 )
@@ -1,735 +1,747 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13
13
14 from .i18n import _
14 from .i18n import _
15 from .pycompat import open
15 from .pycompat import open
16 from .interfaces import repository
16 from .interfaces import repository
17 from . import (
17 from . import (
18 cacheutil,
18 cacheutil,
19 error,
19 error,
20 narrowspec,
20 narrowspec,
21 phases,
21 phases,
22 pycompat,
22 pycompat,
23 requirements as requirementsmod,
23 requirements as requirementsmod,
24 scmutil,
24 scmutil,
25 store,
25 store,
26 util,
26 util,
27 )
27 )
28
28
29
29
30 def canperformstreamclone(pullop, bundle2=False):
30 def canperformstreamclone(pullop, bundle2=False):
31 """Whether it is possible to perform a streaming clone as part of pull.
31 """Whether it is possible to perform a streaming clone as part of pull.
32
32
33 ``bundle2`` will cause the function to consider stream clone through
33 ``bundle2`` will cause the function to consider stream clone through
34 bundle2 and only through bundle2.
34 bundle2 and only through bundle2.
35
35
36 Returns a tuple of (supported, requirements). ``supported`` is True if
36 Returns a tuple of (supported, requirements). ``supported`` is True if
37 streaming clone is supported and False otherwise. ``requirements`` is
37 streaming clone is supported and False otherwise. ``requirements`` is
38 a set of repo requirements from the remote, or ``None`` if stream clone
38 a set of repo requirements from the remote, or ``None`` if stream clone
39 isn't supported.
39 isn't supported.
40 """
40 """
41 repo = pullop.repo
41 repo = pullop.repo
42 remote = pullop.remote
42 remote = pullop.remote
43
43
44 bundle2supported = False
44 bundle2supported = False
45 if pullop.canusebundle2:
45 if pullop.canusebundle2:
46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
46 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
47 bundle2supported = True
47 bundle2supported = True
48 # else
48 # else
49 # Server doesn't support bundle2 stream clone or doesn't support
49 # Server doesn't support bundle2 stream clone or doesn't support
50 # the versions we support. Fall back and possibly allow legacy.
50 # the versions we support. Fall back and possibly allow legacy.
51
51
52 # Ensures legacy code path uses available bundle2.
52 # Ensures legacy code path uses available bundle2.
53 if bundle2supported and not bundle2:
53 if bundle2supported and not bundle2:
54 return False, None
54 return False, None
55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
55 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
56 elif bundle2 and not bundle2supported:
56 elif bundle2 and not bundle2supported:
57 return False, None
57 return False, None
58
58
59 # Streaming clone only works on empty repositories.
59 # Streaming clone only works on empty repositories.
60 if len(repo):
60 if len(repo):
61 return False, None
61 return False, None
62
62
63 # Streaming clone only works if all data is being requested.
63 # Streaming clone only works if all data is being requested.
64 if pullop.heads:
64 if pullop.heads:
65 return False, None
65 return False, None
66
66
67 streamrequested = pullop.streamclonerequested
67 streamrequested = pullop.streamclonerequested
68
68
69 # If we don't have a preference, let the server decide for us. This
69 # If we don't have a preference, let the server decide for us. This
70 # likely only comes into play in LANs.
70 # likely only comes into play in LANs.
71 if streamrequested is None:
71 if streamrequested is None:
72 # The server can advertise whether to prefer streaming clone.
72 # The server can advertise whether to prefer streaming clone.
73 streamrequested = remote.capable(b'stream-preferred')
73 streamrequested = remote.capable(b'stream-preferred')
74
74
75 if not streamrequested:
75 if not streamrequested:
76 return False, None
76 return False, None
77
77
78 # In order for stream clone to work, the client has to support all the
78 # In order for stream clone to work, the client has to support all the
79 # requirements advertised by the server.
79 # requirements advertised by the server.
80 #
80 #
81 # The server advertises its requirements via the "stream" and "streamreqs"
81 # The server advertises its requirements via the "stream" and "streamreqs"
82 # capability. "stream" (a value-less capability) is advertised if and only
82 # capability. "stream" (a value-less capability) is advertised if and only
83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
83 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
84 # is advertised and contains a comma-delimited list of requirements.
84 # is advertised and contains a comma-delimited list of requirements.
85 requirements = set()
85 requirements = set()
86 if remote.capable(b'stream'):
86 if remote.capable(b'stream'):
87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
87 requirements.add(requirementsmod.REVLOGV1_REQUIREMENT)
88 else:
88 else:
89 streamreqs = remote.capable(b'streamreqs')
89 streamreqs = remote.capable(b'streamreqs')
90 # This is weird and shouldn't happen with modern servers.
90 # This is weird and shouldn't happen with modern servers.
91 if not streamreqs:
91 if not streamreqs:
92 pullop.repo.ui.warn(
92 pullop.repo.ui.warn(
93 _(
93 _(
94 b'warning: stream clone requested but server has them '
94 b'warning: stream clone requested but server has them '
95 b'disabled\n'
95 b'disabled\n'
96 )
96 )
97 )
97 )
98 return False, None
98 return False, None
99
99
100 streamreqs = set(streamreqs.split(b','))
100 streamreqs = set(streamreqs.split(b','))
101 # Server requires something we don't support. Bail.
101 # Server requires something we don't support. Bail.
102 missingreqs = streamreqs - repo.supportedformats
102 missingreqs = streamreqs - repo.supportedformats
103 if missingreqs:
103 if missingreqs:
104 pullop.repo.ui.warn(
104 pullop.repo.ui.warn(
105 _(
105 _(
106 b'warning: stream clone requested but client is missing '
106 b'warning: stream clone requested but client is missing '
107 b'requirements: %s\n'
107 b'requirements: %s\n'
108 )
108 )
109 % b', '.join(sorted(missingreqs))
109 % b', '.join(sorted(missingreqs))
110 )
110 )
111 pullop.repo.ui.warn(
111 pullop.repo.ui.warn(
112 _(
112 _(
113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
113 b'(see https://www.mercurial-scm.org/wiki/MissingRequirement '
114 b'for more information)\n'
114 b'for more information)\n'
115 )
115 )
116 )
116 )
117 return False, None
117 return False, None
118 requirements = streamreqs
118 requirements = streamreqs
119
119
120 return True, requirements
120 return True, requirements
121
121
122
122
123 def maybeperformlegacystreamclone(pullop):
123 def maybeperformlegacystreamclone(pullop):
124 """Possibly perform a legacy stream clone operation.
124 """Possibly perform a legacy stream clone operation.
125
125
126 Legacy stream clones are performed as part of pull but before all other
126 Legacy stream clones are performed as part of pull but before all other
127 operations.
127 operations.
128
128
129 A legacy stream clone will not be performed if a bundle2 stream clone is
129 A legacy stream clone will not be performed if a bundle2 stream clone is
130 supported.
130 supported.
131 """
131 """
132 from . import localrepo
132 from . import localrepo
133
133
134 supported, requirements = canperformstreamclone(pullop)
134 supported, requirements = canperformstreamclone(pullop)
135
135
136 if not supported:
136 if not supported:
137 return
137 return
138
138
139 repo = pullop.repo
139 repo = pullop.repo
140 remote = pullop.remote
140 remote = pullop.remote
141
141
142 # Save remote branchmap. We will use it later to speed up branchcache
142 # Save remote branchmap. We will use it later to speed up branchcache
143 # creation.
143 # creation.
144 rbranchmap = None
144 rbranchmap = None
145 if remote.capable(b'branchmap'):
145 if remote.capable(b'branchmap'):
146 with remote.commandexecutor() as e:
146 with remote.commandexecutor() as e:
147 rbranchmap = e.callcommand(b'branchmap', {}).result()
147 rbranchmap = e.callcommand(b'branchmap', {}).result()
148
148
149 repo.ui.status(_(b'streaming all changes\n'))
149 repo.ui.status(_(b'streaming all changes\n'))
150
150
151 with remote.commandexecutor() as e:
151 with remote.commandexecutor() as e:
152 fp = e.callcommand(b'stream_out', {}).result()
152 fp = e.callcommand(b'stream_out', {}).result()
153
153
154 # TODO strictly speaking, this code should all be inside the context
154 # TODO strictly speaking, this code should all be inside the context
155 # manager because the context manager is supposed to ensure all wire state
155 # manager because the context manager is supposed to ensure all wire state
156 # is flushed when exiting. But the legacy peers don't do this, so it
156 # is flushed when exiting. But the legacy peers don't do this, so it
157 # doesn't matter.
157 # doesn't matter.
158 l = fp.readline()
158 l = fp.readline()
159 try:
159 try:
160 resp = int(l)
160 resp = int(l)
161 except ValueError:
161 except ValueError:
162 raise error.ResponseError(
162 raise error.ResponseError(
163 _(b'unexpected response from remote server:'), l
163 _(b'unexpected response from remote server:'), l
164 )
164 )
165 if resp == 1:
165 if resp == 1:
166 raise error.Abort(_(b'operation forbidden by server'))
166 raise error.Abort(_(b'operation forbidden by server'))
167 elif resp == 2:
167 elif resp == 2:
168 raise error.Abort(_(b'locking the remote repository failed'))
168 raise error.Abort(_(b'locking the remote repository failed'))
169 elif resp != 0:
169 elif resp != 0:
170 raise error.Abort(_(b'the server sent an unknown error code'))
170 raise error.Abort(_(b'the server sent an unknown error code'))
171
171
172 l = fp.readline()
172 l = fp.readline()
173 try:
173 try:
174 filecount, bytecount = map(int, l.split(b' ', 1))
174 filecount, bytecount = map(int, l.split(b' ', 1))
175 except (ValueError, TypeError):
175 except (ValueError, TypeError):
176 raise error.ResponseError(
176 raise error.ResponseError(
177 _(b'unexpected response from remote server:'), l
177 _(b'unexpected response from remote server:'), l
178 )
178 )
179
179
180 with repo.lock():
180 with repo.lock():
181 consumev1(repo, fp, filecount, bytecount)
181 consumev1(repo, fp, filecount, bytecount)
182
182
183 # new requirements = old non-format requirements +
183 # new requirements = old non-format requirements +
184 # new format-related remote requirements
184 # new format-related remote requirements
185 # requirements from the streamed-in repository
185 # requirements from the streamed-in repository
186 repo.requirements = requirements | (
186 repo.requirements = requirements | (
187 repo.requirements - repo.supportedformats
187 repo.requirements - repo.supportedformats
188 )
188 )
189 repo.svfs.options = localrepo.resolvestorevfsoptions(
189 repo.svfs.options = localrepo.resolvestorevfsoptions(
190 repo.ui, repo.requirements, repo.features
190 repo.ui, repo.requirements, repo.features
191 )
191 )
192 scmutil.writereporequirements(repo)
192 scmutil.writereporequirements(repo)
193
193
194 if rbranchmap:
194 if rbranchmap:
195 repo._branchcaches.replace(repo, rbranchmap)
195 repo._branchcaches.replace(repo, rbranchmap)
196
196
197 repo.invalidate()
197 repo.invalidate()
198
198
199
199
200 def allowservergeneration(repo):
200 def allowservergeneration(repo):
201 """Whether streaming clones are allowed from the server."""
201 """Whether streaming clones are allowed from the server."""
202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
202 if repository.REPO_FEATURE_STREAM_CLONE not in repo.features:
203 return False
203 return False
204
204
205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
205 if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True):
206 return False
206 return False
207
207
208 # The way stream clone works makes it impossible to hide secret changesets.
208 # The way stream clone works makes it impossible to hide secret changesets.
209 # So don't allow this by default.
209 # So don't allow this by default.
210 secret = phases.hassecret(repo)
210 secret = phases.hassecret(repo)
211 if secret:
211 if secret:
212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
212 return repo.ui.configbool(b'server', b'uncompressedallowsecret')
213
213
214 return True
214 return True
215
215
216
216
217 # This is it's own function so extensions can override it.
217 # This is it's own function so extensions can override it.
218 def _walkstreamfiles(repo, matcher=None):
218 def _walkstreamfiles(repo, matcher=None):
219 return repo.store.walk(matcher)
219 return repo.store.walk(matcher)
220
220
221
221
222 def generatev1(repo):
222 def generatev1(repo):
223 """Emit content for version 1 of a streaming clone.
223 """Emit content for version 1 of a streaming clone.
224
224
225 This returns a 3-tuple of (file count, byte size, data iterator).
225 This returns a 3-tuple of (file count, byte size, data iterator).
226
226
227 The data iterator consists of N entries for each file being transferred.
227 The data iterator consists of N entries for each file being transferred.
228 Each file entry starts as a line with the file name and integer size
228 Each file entry starts as a line with the file name and integer size
229 delimited by a null byte.
229 delimited by a null byte.
230
230
231 The raw file data follows. Following the raw file data is the next file
231 The raw file data follows. Following the raw file data is the next file
232 entry, or EOF.
232 entry, or EOF.
233
233
234 When used on the wire protocol, an additional line indicating protocol
234 When used on the wire protocol, an additional line indicating protocol
235 success will be prepended to the stream. This function is not responsible
235 success will be prepended to the stream. This function is not responsible
236 for adding it.
236 for adding it.
237
237
238 This function will obtain a repository lock to ensure a consistent view of
238 This function will obtain a repository lock to ensure a consistent view of
239 the store is captured. It therefore may raise LockError.
239 the store is captured. It therefore may raise LockError.
240 """
240 """
241 entries = []
241 entries = []
242 total_bytes = 0
242 total_bytes = 0
243 # Get consistent snapshot of repo, lock during scan.
243 # Get consistent snapshot of repo, lock during scan.
244 with repo.lock():
244 with repo.lock():
245 repo.ui.debug(b'scanning\n')
245 repo.ui.debug(b'scanning\n')
246 for file_type, name, ename, size in _walkstreamfiles(repo):
246 for file_type, name, ename, size in _walkstreamfiles(repo):
247 if size:
247 if size:
248 entries.append((name, size))
248 entries.append((name, size))
249 total_bytes += size
249 total_bytes += size
250 _test_sync_point_walk_1(repo)
251 _test_sync_point_walk_2(repo)
250
252
251 repo.ui.debug(
253 repo.ui.debug(
252 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
254 b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes)
253 )
255 )
254
256
255 svfs = repo.svfs
257 svfs = repo.svfs
256 debugflag = repo.ui.debugflag
258 debugflag = repo.ui.debugflag
257
259
258 def emitrevlogdata():
260 def emitrevlogdata():
259 for name, size in entries:
261 for name, size in entries:
260 if debugflag:
262 if debugflag:
261 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
263 repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size))
262 # partially encode name over the wire for backwards compat
264 # partially encode name over the wire for backwards compat
263 yield b'%s\0%d\n' % (store.encodedir(name), size)
265 yield b'%s\0%d\n' % (store.encodedir(name), size)
264 # auditing at this stage is both pointless (paths are already
266 # auditing at this stage is both pointless (paths are already
265 # trusted by the local repo) and expensive
267 # trusted by the local repo) and expensive
266 with svfs(name, b'rb', auditpath=False) as fp:
268 with svfs(name, b'rb', auditpath=False) as fp:
267 if size <= 65536:
269 if size <= 65536:
268 yield fp.read(size)
270 yield fp.read(size)
269 else:
271 else:
270 for chunk in util.filechunkiter(fp, limit=size):
272 for chunk in util.filechunkiter(fp, limit=size):
271 yield chunk
273 yield chunk
272
274
273 return len(entries), total_bytes, emitrevlogdata()
275 return len(entries), total_bytes, emitrevlogdata()
274
276
275
277
276 def generatev1wireproto(repo):
278 def generatev1wireproto(repo):
277 """Emit content for version 1 of streaming clone suitable for the wire.
279 """Emit content for version 1 of streaming clone suitable for the wire.
278
280
279 This is the data output from ``generatev1()`` with 2 header lines. The
281 This is the data output from ``generatev1()`` with 2 header lines. The
280 first line indicates overall success. The 2nd contains the file count and
282 first line indicates overall success. The 2nd contains the file count and
281 byte size of payload.
283 byte size of payload.
282
284
283 The success line contains "0" for success, "1" for stream generation not
285 The success line contains "0" for success, "1" for stream generation not
284 allowed, and "2" for error locking the repository (possibly indicating
286 allowed, and "2" for error locking the repository (possibly indicating
285 a permissions error for the server process).
287 a permissions error for the server process).
286 """
288 """
287 if not allowservergeneration(repo):
289 if not allowservergeneration(repo):
288 yield b'1\n'
290 yield b'1\n'
289 return
291 return
290
292
291 try:
293 try:
292 filecount, bytecount, it = generatev1(repo)
294 filecount, bytecount, it = generatev1(repo)
293 except error.LockError:
295 except error.LockError:
294 yield b'2\n'
296 yield b'2\n'
295 return
297 return
296
298
297 # Indicates successful response.
299 # Indicates successful response.
298 yield b'0\n'
300 yield b'0\n'
299 yield b'%d %d\n' % (filecount, bytecount)
301 yield b'%d %d\n' % (filecount, bytecount)
300 for chunk in it:
302 for chunk in it:
301 yield chunk
303 yield chunk
302
304
303
305
304 def generatebundlev1(repo, compression=b'UN'):
306 def generatebundlev1(repo, compression=b'UN'):
305 """Emit content for version 1 of a stream clone bundle.
307 """Emit content for version 1 of a stream clone bundle.
306
308
307 The first 4 bytes of the output ("HGS1") denote this as stream clone
309 The first 4 bytes of the output ("HGS1") denote this as stream clone
308 bundle version 1.
310 bundle version 1.
309
311
310 The next 2 bytes indicate the compression type. Only "UN" is currently
312 The next 2 bytes indicate the compression type. Only "UN" is currently
311 supported.
313 supported.
312
314
313 The next 16 bytes are two 64-bit big endian unsigned integers indicating
315 The next 16 bytes are two 64-bit big endian unsigned integers indicating
314 file count and byte count, respectively.
316 file count and byte count, respectively.
315
317
316 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
318 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
317 of the requirements string, including a trailing \0. The following N bytes
319 of the requirements string, including a trailing \0. The following N bytes
318 are the requirements string, which is ASCII containing a comma-delimited
320 are the requirements string, which is ASCII containing a comma-delimited
319 list of repo requirements that are needed to support the data.
321 list of repo requirements that are needed to support the data.
320
322
321 The remaining content is the output of ``generatev1()`` (which may be
323 The remaining content is the output of ``generatev1()`` (which may be
322 compressed in the future).
324 compressed in the future).
323
325
324 Returns a tuple of (requirements, data generator).
326 Returns a tuple of (requirements, data generator).
325 """
327 """
326 if compression != b'UN':
328 if compression != b'UN':
327 raise ValueError(b'we do not support the compression argument yet')
329 raise ValueError(b'we do not support the compression argument yet')
328
330
329 requirements = repo.requirements & repo.supportedformats
331 requirements = repo.requirements & repo.supportedformats
330 requires = b','.join(sorted(requirements))
332 requires = b','.join(sorted(requirements))
331
333
332 def gen():
334 def gen():
333 yield b'HGS1'
335 yield b'HGS1'
334 yield compression
336 yield compression
335
337
336 filecount, bytecount, it = generatev1(repo)
338 filecount, bytecount, it = generatev1(repo)
337 repo.ui.status(
339 repo.ui.status(
338 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
340 _(b'writing %d bytes for %d files\n') % (bytecount, filecount)
339 )
341 )
340
342
341 yield struct.pack(b'>QQ', filecount, bytecount)
343 yield struct.pack(b'>QQ', filecount, bytecount)
342 yield struct.pack(b'>H', len(requires) + 1)
344 yield struct.pack(b'>H', len(requires) + 1)
343 yield requires + b'\0'
345 yield requires + b'\0'
344
346
345 # This is where we'll add compression in the future.
347 # This is where we'll add compression in the future.
346 assert compression == b'UN'
348 assert compression == b'UN'
347
349
348 progress = repo.ui.makeprogress(
350 progress = repo.ui.makeprogress(
349 _(b'bundle'), total=bytecount, unit=_(b'bytes')
351 _(b'bundle'), total=bytecount, unit=_(b'bytes')
350 )
352 )
351 progress.update(0)
353 progress.update(0)
352
354
353 for chunk in it:
355 for chunk in it:
354 progress.increment(step=len(chunk))
356 progress.increment(step=len(chunk))
355 yield chunk
357 yield chunk
356
358
357 progress.complete()
359 progress.complete()
358
360
359 return requirements, gen()
361 return requirements, gen()
360
362
361
363
362 def consumev1(repo, fp, filecount, bytecount):
364 def consumev1(repo, fp, filecount, bytecount):
363 """Apply the contents from version 1 of a streaming clone file handle.
365 """Apply the contents from version 1 of a streaming clone file handle.
364
366
365 This takes the output from "stream_out" and applies it to the specified
367 This takes the output from "stream_out" and applies it to the specified
366 repository.
368 repository.
367
369
368 Like "stream_out," the status line added by the wire protocol is not
370 Like "stream_out," the status line added by the wire protocol is not
369 handled by this function.
371 handled by this function.
370 """
372 """
371 with repo.lock():
373 with repo.lock():
372 repo.ui.status(
374 repo.ui.status(
373 _(b'%d files to transfer, %s of data\n')
375 _(b'%d files to transfer, %s of data\n')
374 % (filecount, util.bytecount(bytecount))
376 % (filecount, util.bytecount(bytecount))
375 )
377 )
376 progress = repo.ui.makeprogress(
378 progress = repo.ui.makeprogress(
377 _(b'clone'), total=bytecount, unit=_(b'bytes')
379 _(b'clone'), total=bytecount, unit=_(b'bytes')
378 )
380 )
379 progress.update(0)
381 progress.update(0)
380 start = util.timer()
382 start = util.timer()
381
383
382 # TODO: get rid of (potential) inconsistency
384 # TODO: get rid of (potential) inconsistency
383 #
385 #
384 # If transaction is started and any @filecache property is
386 # If transaction is started and any @filecache property is
385 # changed at this point, it causes inconsistency between
387 # changed at this point, it causes inconsistency between
386 # in-memory cached property and streamclone-ed file on the
388 # in-memory cached property and streamclone-ed file on the
387 # disk. Nested transaction prevents transaction scope "clone"
389 # disk. Nested transaction prevents transaction scope "clone"
388 # below from writing in-memory changes out at the end of it,
390 # below from writing in-memory changes out at the end of it,
389 # even though in-memory changes are discarded at the end of it
391 # even though in-memory changes are discarded at the end of it
390 # regardless of transaction nesting.
392 # regardless of transaction nesting.
391 #
393 #
392 # But transaction nesting can't be simply prohibited, because
394 # But transaction nesting can't be simply prohibited, because
393 # nesting occurs also in ordinary case (e.g. enabling
395 # nesting occurs also in ordinary case (e.g. enabling
394 # clonebundles).
396 # clonebundles).
395
397
396 with repo.transaction(b'clone'):
398 with repo.transaction(b'clone'):
397 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
399 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
398 for i in pycompat.xrange(filecount):
400 for i in pycompat.xrange(filecount):
399 # XXX doesn't support '\n' or '\r' in filenames
401 # XXX doesn't support '\n' or '\r' in filenames
400 l = fp.readline()
402 l = fp.readline()
401 try:
403 try:
402 name, size = l.split(b'\0', 1)
404 name, size = l.split(b'\0', 1)
403 size = int(size)
405 size = int(size)
404 except (ValueError, TypeError):
406 except (ValueError, TypeError):
405 raise error.ResponseError(
407 raise error.ResponseError(
406 _(b'unexpected response from remote server:'), l
408 _(b'unexpected response from remote server:'), l
407 )
409 )
408 if repo.ui.debugflag:
410 if repo.ui.debugflag:
409 repo.ui.debug(
411 repo.ui.debug(
410 b'adding %s (%s)\n' % (name, util.bytecount(size))
412 b'adding %s (%s)\n' % (name, util.bytecount(size))
411 )
413 )
412 # for backwards compat, name was partially encoded
414 # for backwards compat, name was partially encoded
413 path = store.decodedir(name)
415 path = store.decodedir(name)
414 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
416 with repo.svfs(path, b'w', backgroundclose=True) as ofp:
415 for chunk in util.filechunkiter(fp, limit=size):
417 for chunk in util.filechunkiter(fp, limit=size):
416 progress.increment(step=len(chunk))
418 progress.increment(step=len(chunk))
417 ofp.write(chunk)
419 ofp.write(chunk)
418
420
419 # force @filecache properties to be reloaded from
421 # force @filecache properties to be reloaded from
420 # streamclone-ed file at next access
422 # streamclone-ed file at next access
421 repo.invalidate(clearfilecache=True)
423 repo.invalidate(clearfilecache=True)
422
424
423 elapsed = util.timer() - start
425 elapsed = util.timer() - start
424 if elapsed <= 0:
426 if elapsed <= 0:
425 elapsed = 0.001
427 elapsed = 0.001
426 progress.complete()
428 progress.complete()
427 repo.ui.status(
429 repo.ui.status(
428 _(b'transferred %s in %.1f seconds (%s/sec)\n')
430 _(b'transferred %s in %.1f seconds (%s/sec)\n')
429 % (
431 % (
430 util.bytecount(bytecount),
432 util.bytecount(bytecount),
431 elapsed,
433 elapsed,
432 util.bytecount(bytecount / elapsed),
434 util.bytecount(bytecount / elapsed),
433 )
435 )
434 )
436 )
435
437
436
438
437 def readbundle1header(fp):
439 def readbundle1header(fp):
438 compression = fp.read(2)
440 compression = fp.read(2)
439 if compression != b'UN':
441 if compression != b'UN':
440 raise error.Abort(
442 raise error.Abort(
441 _(
443 _(
442 b'only uncompressed stream clone bundles are '
444 b'only uncompressed stream clone bundles are '
443 b'supported; got %s'
445 b'supported; got %s'
444 )
446 )
445 % compression
447 % compression
446 )
448 )
447
449
448 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
450 filecount, bytecount = struct.unpack(b'>QQ', fp.read(16))
449 requireslen = struct.unpack(b'>H', fp.read(2))[0]
451 requireslen = struct.unpack(b'>H', fp.read(2))[0]
450 requires = fp.read(requireslen)
452 requires = fp.read(requireslen)
451
453
452 if not requires.endswith(b'\0'):
454 if not requires.endswith(b'\0'):
453 raise error.Abort(
455 raise error.Abort(
454 _(
456 _(
455 b'malformed stream clone bundle: '
457 b'malformed stream clone bundle: '
456 b'requirements not properly encoded'
458 b'requirements not properly encoded'
457 )
459 )
458 )
460 )
459
461
460 requirements = set(requires.rstrip(b'\0').split(b','))
462 requirements = set(requires.rstrip(b'\0').split(b','))
461
463
462 return filecount, bytecount, requirements
464 return filecount, bytecount, requirements
463
465
464
466
465 def applybundlev1(repo, fp):
467 def applybundlev1(repo, fp):
466 """Apply the content from a stream clone bundle version 1.
468 """Apply the content from a stream clone bundle version 1.
467
469
468 We assume the 4 byte header has been read and validated and the file handle
470 We assume the 4 byte header has been read and validated and the file handle
469 is at the 2 byte compression identifier.
471 is at the 2 byte compression identifier.
470 """
472 """
471 if len(repo):
473 if len(repo):
472 raise error.Abort(
474 raise error.Abort(
473 _(b'cannot apply stream clone bundle on non-empty repo')
475 _(b'cannot apply stream clone bundle on non-empty repo')
474 )
476 )
475
477
476 filecount, bytecount, requirements = readbundle1header(fp)
478 filecount, bytecount, requirements = readbundle1header(fp)
477 missingreqs = requirements - repo.supportedformats
479 missingreqs = requirements - repo.supportedformats
478 if missingreqs:
480 if missingreqs:
479 raise error.Abort(
481 raise error.Abort(
480 _(b'unable to apply stream clone: unsupported format: %s')
482 _(b'unable to apply stream clone: unsupported format: %s')
481 % b', '.join(sorted(missingreqs))
483 % b', '.join(sorted(missingreqs))
482 )
484 )
483
485
484 consumev1(repo, fp, filecount, bytecount)
486 consumev1(repo, fp, filecount, bytecount)
485
487
486
488
487 class streamcloneapplier(object):
489 class streamcloneapplier(object):
488 """Class to manage applying streaming clone bundles.
490 """Class to manage applying streaming clone bundles.
489
491
490 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
492 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
491 readers to perform bundle type-specific functionality.
493 readers to perform bundle type-specific functionality.
492 """
494 """
493
495
494 def __init__(self, fh):
496 def __init__(self, fh):
495 self._fh = fh
497 self._fh = fh
496
498
497 def apply(self, repo):
499 def apply(self, repo):
498 return applybundlev1(repo, self._fh)
500 return applybundlev1(repo, self._fh)
499
501
500
502
501 # type of file to stream
503 # type of file to stream
502 _fileappend = 0 # append only file
504 _fileappend = 0 # append only file
503 _filefull = 1 # full snapshot file
505 _filefull = 1 # full snapshot file
504
506
505 # Source of the file
507 # Source of the file
506 _srcstore = b's' # store (svfs)
508 _srcstore = b's' # store (svfs)
507 _srccache = b'c' # cache (cache)
509 _srccache = b'c' # cache (cache)
508
510
509 # This is it's own function so extensions can override it.
511 # This is it's own function so extensions can override it.
510 def _walkstreamfullstorefiles(repo):
512 def _walkstreamfullstorefiles(repo):
511 """list snapshot file from the store"""
513 """list snapshot file from the store"""
512 fnames = []
514 fnames = []
513 if not repo.publishing():
515 if not repo.publishing():
514 fnames.append(b'phaseroots')
516 fnames.append(b'phaseroots')
515 return fnames
517 return fnames
516
518
517
519
518 def _filterfull(entry, copy, vfsmap):
520 def _filterfull(entry, copy, vfsmap):
519 """actually copy the snapshot files"""
521 """actually copy the snapshot files"""
520 src, name, ftype, data = entry
522 src, name, ftype, data = entry
521 if ftype != _filefull:
523 if ftype != _filefull:
522 return entry
524 return entry
523 return (src, name, ftype, copy(vfsmap[src].join(name)))
525 return (src, name, ftype, copy(vfsmap[src].join(name)))
524
526
525
527
526 @contextlib.contextmanager
528 @contextlib.contextmanager
527 def maketempcopies():
529 def maketempcopies():
528 """return a function to temporary copy file"""
530 """return a function to temporary copy file"""
529 files = []
531 files = []
530 try:
532 try:
531
533
532 def copy(src):
534 def copy(src):
533 fd, dst = pycompat.mkstemp()
535 fd, dst = pycompat.mkstemp()
534 os.close(fd)
536 os.close(fd)
535 files.append(dst)
537 files.append(dst)
536 util.copyfiles(src, dst, hardlink=True)
538 util.copyfiles(src, dst, hardlink=True)
537 return dst
539 return dst
538
540
539 yield copy
541 yield copy
540 finally:
542 finally:
541 for tmp in files:
543 for tmp in files:
542 util.tryunlink(tmp)
544 util.tryunlink(tmp)
543
545
544
546
545 def _makemap(repo):
547 def _makemap(repo):
546 """make a (src -> vfs) map for the repo"""
548 """make a (src -> vfs) map for the repo"""
547 vfsmap = {
549 vfsmap = {
548 _srcstore: repo.svfs,
550 _srcstore: repo.svfs,
549 _srccache: repo.cachevfs,
551 _srccache: repo.cachevfs,
550 }
552 }
551 # we keep repo.vfs out of the on purpose, ther are too many danger there
553 # we keep repo.vfs out of the on purpose, ther are too many danger there
552 # (eg: .hg/hgrc)
554 # (eg: .hg/hgrc)
553 assert repo.vfs not in vfsmap.values()
555 assert repo.vfs not in vfsmap.values()
554
556
555 return vfsmap
557 return vfsmap
556
558
557
559
558 def _emit2(repo, entries, totalfilesize):
560 def _emit2(repo, entries, totalfilesize):
559 """actually emit the stream bundle"""
561 """actually emit the stream bundle"""
560 vfsmap = _makemap(repo)
562 vfsmap = _makemap(repo)
561 progress = repo.ui.makeprogress(
563 progress = repo.ui.makeprogress(
562 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
564 _(b'bundle'), total=totalfilesize, unit=_(b'bytes')
563 )
565 )
564 progress.update(0)
566 progress.update(0)
565 with maketempcopies() as copy, progress:
567 with maketempcopies() as copy, progress:
566 # copy is delayed until we are in the try
568 # copy is delayed until we are in the try
567 entries = [_filterfull(e, copy, vfsmap) for e in entries]
569 entries = [_filterfull(e, copy, vfsmap) for e in entries]
568 yield None # this release the lock on the repository
570 yield None # this release the lock on the repository
569 seen = 0
571 seen = 0
570
572
571 for src, name, ftype, data in entries:
573 for src, name, ftype, data in entries:
572 vfs = vfsmap[src]
574 vfs = vfsmap[src]
573 yield src
575 yield src
574 yield util.uvarintencode(len(name))
576 yield util.uvarintencode(len(name))
575 if ftype == _fileappend:
577 if ftype == _fileappend:
576 fp = vfs(name)
578 fp = vfs(name)
577 size = data
579 size = data
578 elif ftype == _filefull:
580 elif ftype == _filefull:
579 fp = open(data, b'rb')
581 fp = open(data, b'rb')
580 size = util.fstat(fp).st_size
582 size = util.fstat(fp).st_size
581 try:
583 try:
582 yield util.uvarintencode(size)
584 yield util.uvarintencode(size)
583 yield name
585 yield name
584 if size <= 65536:
586 if size <= 65536:
585 chunks = (fp.read(size),)
587 chunks = (fp.read(size),)
586 else:
588 else:
587 chunks = util.filechunkiter(fp, limit=size)
589 chunks = util.filechunkiter(fp, limit=size)
588 for chunk in chunks:
590 for chunk in chunks:
589 seen += len(chunk)
591 seen += len(chunk)
590 progress.update(seen)
592 progress.update(seen)
591 yield chunk
593 yield chunk
592 finally:
594 finally:
593 fp.close()
595 fp.close()
594
596
595
597
598 def _test_sync_point_walk_1(repo):
599 """a function for synchronisation during tests"""
600
601
602 def _test_sync_point_walk_2(repo):
603 """a function for synchronisation during tests"""
604
605
596 def generatev2(repo, includes, excludes, includeobsmarkers):
606 def generatev2(repo, includes, excludes, includeobsmarkers):
597 """Emit content for version 2 of a streaming clone.
607 """Emit content for version 2 of a streaming clone.
598
608
599 the data stream consists the following entries:
609 the data stream consists the following entries:
600 1) A char representing the file destination (eg: store or cache)
610 1) A char representing the file destination (eg: store or cache)
601 2) A varint containing the length of the filename
611 2) A varint containing the length of the filename
602 3) A varint containing the length of file data
612 3) A varint containing the length of file data
603 4) N bytes containing the filename (the internal, store-agnostic form)
613 4) N bytes containing the filename (the internal, store-agnostic form)
604 5) N bytes containing the file data
614 5) N bytes containing the file data
605
615
606 Returns a 3-tuple of (file count, file size, data iterator).
616 Returns a 3-tuple of (file count, file size, data iterator).
607 """
617 """
608
618
609 with repo.lock():
619 with repo.lock():
610
620
611 entries = []
621 entries = []
612 totalfilesize = 0
622 totalfilesize = 0
613
623
614 matcher = None
624 matcher = None
615 if includes or excludes:
625 if includes or excludes:
616 matcher = narrowspec.match(repo.root, includes, excludes)
626 matcher = narrowspec.match(repo.root, includes, excludes)
617
627
618 repo.ui.debug(b'scanning\n')
628 repo.ui.debug(b'scanning\n')
619 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
629 for rl_type, name, ename, size in _walkstreamfiles(repo, matcher):
620 if size:
630 if size:
621 entries.append((_srcstore, name, _fileappend, size))
631 entries.append((_srcstore, name, _fileappend, size))
622 totalfilesize += size
632 totalfilesize += size
623 for name in _walkstreamfullstorefiles(repo):
633 for name in _walkstreamfullstorefiles(repo):
624 if repo.svfs.exists(name):
634 if repo.svfs.exists(name):
625 totalfilesize += repo.svfs.lstat(name).st_size
635 totalfilesize += repo.svfs.lstat(name).st_size
626 entries.append((_srcstore, name, _filefull, None))
636 entries.append((_srcstore, name, _filefull, None))
627 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
637 if includeobsmarkers and repo.svfs.exists(b'obsstore'):
628 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
638 totalfilesize += repo.svfs.lstat(b'obsstore').st_size
629 entries.append((_srcstore, b'obsstore', _filefull, None))
639 entries.append((_srcstore, b'obsstore', _filefull, None))
630 for name in cacheutil.cachetocopy(repo):
640 for name in cacheutil.cachetocopy(repo):
631 if repo.cachevfs.exists(name):
641 if repo.cachevfs.exists(name):
632 totalfilesize += repo.cachevfs.lstat(name).st_size
642 totalfilesize += repo.cachevfs.lstat(name).st_size
633 entries.append((_srccache, name, _filefull, None))
643 entries.append((_srccache, name, _filefull, None))
634
644
635 chunks = _emit2(repo, entries, totalfilesize)
645 chunks = _emit2(repo, entries, totalfilesize)
636 first = next(chunks)
646 first = next(chunks)
637 assert first is None
647 assert first is None
648 _test_sync_point_walk_1(repo)
649 _test_sync_point_walk_2(repo)
638
650
639 return len(entries), totalfilesize, chunks
651 return len(entries), totalfilesize, chunks
640
652
641
653
642 @contextlib.contextmanager
654 @contextlib.contextmanager
643 def nested(*ctxs):
655 def nested(*ctxs):
644 this = ctxs[0]
656 this = ctxs[0]
645 rest = ctxs[1:]
657 rest = ctxs[1:]
646 with this:
658 with this:
647 if rest:
659 if rest:
648 with nested(*rest):
660 with nested(*rest):
649 yield
661 yield
650 else:
662 else:
651 yield
663 yield
652
664
653
665
654 def consumev2(repo, fp, filecount, filesize):
666 def consumev2(repo, fp, filecount, filesize):
655 """Apply the contents from a version 2 streaming clone.
667 """Apply the contents from a version 2 streaming clone.
656
668
657 Data is read from an object that only needs to provide a ``read(size)``
669 Data is read from an object that only needs to provide a ``read(size)``
658 method.
670 method.
659 """
671 """
660 with repo.lock():
672 with repo.lock():
661 repo.ui.status(
673 repo.ui.status(
662 _(b'%d files to transfer, %s of data\n')
674 _(b'%d files to transfer, %s of data\n')
663 % (filecount, util.bytecount(filesize))
675 % (filecount, util.bytecount(filesize))
664 )
676 )
665
677
666 start = util.timer()
678 start = util.timer()
667 progress = repo.ui.makeprogress(
679 progress = repo.ui.makeprogress(
668 _(b'clone'), total=filesize, unit=_(b'bytes')
680 _(b'clone'), total=filesize, unit=_(b'bytes')
669 )
681 )
670 progress.update(0)
682 progress.update(0)
671
683
672 vfsmap = _makemap(repo)
684 vfsmap = _makemap(repo)
673
685
674 with repo.transaction(b'clone'):
686 with repo.transaction(b'clone'):
675 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
687 ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values())
676 with nested(*ctxs):
688 with nested(*ctxs):
677 for i in range(filecount):
689 for i in range(filecount):
678 src = util.readexactly(fp, 1)
690 src = util.readexactly(fp, 1)
679 vfs = vfsmap[src]
691 vfs = vfsmap[src]
680 namelen = util.uvarintdecodestream(fp)
692 namelen = util.uvarintdecodestream(fp)
681 datalen = util.uvarintdecodestream(fp)
693 datalen = util.uvarintdecodestream(fp)
682
694
683 name = util.readexactly(fp, namelen)
695 name = util.readexactly(fp, namelen)
684
696
685 if repo.ui.debugflag:
697 if repo.ui.debugflag:
686 repo.ui.debug(
698 repo.ui.debug(
687 b'adding [%s] %s (%s)\n'
699 b'adding [%s] %s (%s)\n'
688 % (src, name, util.bytecount(datalen))
700 % (src, name, util.bytecount(datalen))
689 )
701 )
690
702
691 with vfs(name, b'w') as ofp:
703 with vfs(name, b'w') as ofp:
692 for chunk in util.filechunkiter(fp, limit=datalen):
704 for chunk in util.filechunkiter(fp, limit=datalen):
693 progress.increment(step=len(chunk))
705 progress.increment(step=len(chunk))
694 ofp.write(chunk)
706 ofp.write(chunk)
695
707
696 # force @filecache properties to be reloaded from
708 # force @filecache properties to be reloaded from
697 # streamclone-ed file at next access
709 # streamclone-ed file at next access
698 repo.invalidate(clearfilecache=True)
710 repo.invalidate(clearfilecache=True)
699
711
700 elapsed = util.timer() - start
712 elapsed = util.timer() - start
701 if elapsed <= 0:
713 if elapsed <= 0:
702 elapsed = 0.001
714 elapsed = 0.001
703 repo.ui.status(
715 repo.ui.status(
704 _(b'transferred %s in %.1f seconds (%s/sec)\n')
716 _(b'transferred %s in %.1f seconds (%s/sec)\n')
705 % (
717 % (
706 util.bytecount(progress.pos),
718 util.bytecount(progress.pos),
707 elapsed,
719 elapsed,
708 util.bytecount(progress.pos / elapsed),
720 util.bytecount(progress.pos / elapsed),
709 )
721 )
710 )
722 )
711 progress.complete()
723 progress.complete()
712
724
713
725
714 def applybundlev2(repo, fp, filecount, filesize, requirements):
726 def applybundlev2(repo, fp, filecount, filesize, requirements):
715 from . import localrepo
727 from . import localrepo
716
728
717 missingreqs = [r for r in requirements if r not in repo.supported]
729 missingreqs = [r for r in requirements if r not in repo.supported]
718 if missingreqs:
730 if missingreqs:
719 raise error.Abort(
731 raise error.Abort(
720 _(b'unable to apply stream clone: unsupported format: %s')
732 _(b'unable to apply stream clone: unsupported format: %s')
721 % b', '.join(sorted(missingreqs))
733 % b', '.join(sorted(missingreqs))
722 )
734 )
723
735
724 consumev2(repo, fp, filecount, filesize)
736 consumev2(repo, fp, filecount, filesize)
725
737
726 # new requirements = old non-format requirements +
738 # new requirements = old non-format requirements +
727 # new format-related remote requirements
739 # new format-related remote requirements
728 # requirements from the streamed-in repository
740 # requirements from the streamed-in repository
729 repo.requirements = set(requirements) | (
741 repo.requirements = set(requirements) | (
730 repo.requirements - repo.supportedformats
742 repo.requirements - repo.supportedformats
731 )
743 )
732 repo.svfs.options = localrepo.resolvestorevfsoptions(
744 repo.svfs.options = localrepo.resolvestorevfsoptions(
733 repo.ui, repo.requirements, repo.features
745 repo.ui, repo.requirements, repo.features
734 )
746 )
735 scmutil.writereporequirements(repo)
747 scmutil.writereporequirements(repo)
@@ -1,638 +1,671 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-legacy
5 #if stream-legacy
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [server]
7 > [server]
8 > bundle2.stream = no
8 > bundle2.stream = no
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(b"%d" % i) and None
21 ... fh.write(b"%d" % i) and None
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid > $DAEMON_PIDS
25 $ cat hg.pid > $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Cannot stream clone when server.uncompressed is set
28 Cannot stream clone when server.uncompressed is set
29
29
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
30 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
31 200 Script output follows
31 200 Script output follows
32
32
33 1
33 1
34
34
35 #if stream-legacy
35 #if stream-legacy
36 $ hg debugcapabilities http://localhost:$HGPORT
36 $ hg debugcapabilities http://localhost:$HGPORT
37 Main capabilities:
37 Main capabilities:
38 batch
38 batch
39 branchmap
39 branchmap
40 $USUAL_BUNDLE2_CAPS_SERVER$
40 $USUAL_BUNDLE2_CAPS_SERVER$
41 changegroupsubset
41 changegroupsubset
42 compression=$BUNDLE2_COMPRESSIONS$
42 compression=$BUNDLE2_COMPRESSIONS$
43 getbundle
43 getbundle
44 httpheader=1024
44 httpheader=1024
45 httpmediatype=0.1rx,0.1tx,0.2tx
45 httpmediatype=0.1rx,0.1tx,0.2tx
46 known
46 known
47 lookup
47 lookup
48 pushkey
48 pushkey
49 unbundle=HG10GZ,HG10BZ,HG10UN
49 unbundle=HG10GZ,HG10BZ,HG10UN
50 unbundlehash
50 unbundlehash
51 Bundle2 capabilities:
51 Bundle2 capabilities:
52 HG20
52 HG20
53 bookmarks
53 bookmarks
54 changegroup
54 changegroup
55 01
55 01
56 02
56 02
57 checkheads
57 checkheads
58 related
58 related
59 digests
59 digests
60 md5
60 md5
61 sha1
61 sha1
62 sha512
62 sha512
63 error
63 error
64 abort
64 abort
65 unsupportedcontent
65 unsupportedcontent
66 pushraced
66 pushraced
67 pushkey
67 pushkey
68 hgtagsfnodes
68 hgtagsfnodes
69 listkeys
69 listkeys
70 phases
70 phases
71 heads
71 heads
72 pushkey
72 pushkey
73 remote-changegroup
73 remote-changegroup
74 http
74 http
75 https
75 https
76
76
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
77 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
78 warning: stream clone requested but server has them disabled
78 warning: stream clone requested but server has them disabled
79 requesting all changes
79 requesting all changes
80 adding changesets
80 adding changesets
81 adding manifests
81 adding manifests
82 adding file changes
82 adding file changes
83 added 2 changesets with 1025 changes to 1025 files
83 added 2 changesets with 1025 changes to 1025 files
84 new changesets 96ee1d7354c4:c17445101a72
84 new changesets 96ee1d7354c4:c17445101a72
85
85
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
86 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
87 200 Script output follows
87 200 Script output follows
88 content-type: application/mercurial-0.2
88 content-type: application/mercurial-0.2
89
89
90
90
91 $ f --size body --hexdump --bytes 100
91 $ f --size body --hexdump --bytes 100
92 body: size=232
92 body: size=232
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
93 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
94 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
95 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
96 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
97 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
98 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
99 0060: 69 73 20 66 |is f|
99 0060: 69 73 20 66 |is f|
100
100
101 #endif
101 #endif
102 #if stream-bundle2
102 #if stream-bundle2
103 $ hg debugcapabilities http://localhost:$HGPORT
103 $ hg debugcapabilities http://localhost:$HGPORT
104 Main capabilities:
104 Main capabilities:
105 batch
105 batch
106 branchmap
106 branchmap
107 $USUAL_BUNDLE2_CAPS_SERVER$
107 $USUAL_BUNDLE2_CAPS_SERVER$
108 changegroupsubset
108 changegroupsubset
109 compression=$BUNDLE2_COMPRESSIONS$
109 compression=$BUNDLE2_COMPRESSIONS$
110 getbundle
110 getbundle
111 httpheader=1024
111 httpheader=1024
112 httpmediatype=0.1rx,0.1tx,0.2tx
112 httpmediatype=0.1rx,0.1tx,0.2tx
113 known
113 known
114 lookup
114 lookup
115 pushkey
115 pushkey
116 unbundle=HG10GZ,HG10BZ,HG10UN
116 unbundle=HG10GZ,HG10BZ,HG10UN
117 unbundlehash
117 unbundlehash
118 Bundle2 capabilities:
118 Bundle2 capabilities:
119 HG20
119 HG20
120 bookmarks
120 bookmarks
121 changegroup
121 changegroup
122 01
122 01
123 02
123 02
124 checkheads
124 checkheads
125 related
125 related
126 digests
126 digests
127 md5
127 md5
128 sha1
128 sha1
129 sha512
129 sha512
130 error
130 error
131 abort
131 abort
132 unsupportedcontent
132 unsupportedcontent
133 pushraced
133 pushraced
134 pushkey
134 pushkey
135 hgtagsfnodes
135 hgtagsfnodes
136 listkeys
136 listkeys
137 phases
137 phases
138 heads
138 heads
139 pushkey
139 pushkey
140 remote-changegroup
140 remote-changegroup
141 http
141 http
142 https
142 https
143
143
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
144 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
145 warning: stream clone requested but server has them disabled
145 warning: stream clone requested but server has them disabled
146 requesting all changes
146 requesting all changes
147 adding changesets
147 adding changesets
148 adding manifests
148 adding manifests
149 adding file changes
149 adding file changes
150 added 2 changesets with 1025 changes to 1025 files
150 added 2 changesets with 1025 changes to 1025 files
151 new changesets 96ee1d7354c4:c17445101a72
151 new changesets 96ee1d7354c4:c17445101a72
152
152
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
153 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
154 200 Script output follows
154 200 Script output follows
155 content-type: application/mercurial-0.2
155 content-type: application/mercurial-0.2
156
156
157
157
158 $ f --size body --hexdump --bytes 100
158 $ f --size body --hexdump --bytes 100
159 body: size=232
159 body: size=232
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
160 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
161 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
162 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
163 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
164 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
165 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
166 0060: 69 73 20 66 |is f|
166 0060: 69 73 20 66 |is f|
167
167
168 #endif
168 #endif
169
169
170 $ killdaemons.py
170 $ killdaemons.py
171 $ cd server
171 $ cd server
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
172 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
173 $ cat hg.pid > $DAEMON_PIDS
173 $ cat hg.pid > $DAEMON_PIDS
174 $ cd ..
174 $ cd ..
175
175
176 Basic clone
176 Basic clone
177
177
178 #if stream-legacy
178 #if stream-legacy
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
179 $ hg clone --stream -U http://localhost:$HGPORT clone1
180 streaming all changes
180 streaming all changes
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
181 1027 files to transfer, 96.3 KB of data (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
182 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
183 1027 files to transfer, 93.5 KB of data (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
184 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
185 searching for changes
185 searching for changes
186 no changes found
186 no changes found
187 $ cat server/errors.txt
187 $ cat server/errors.txt
188 #endif
188 #endif
189 #if stream-bundle2
189 #if stream-bundle2
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
190 $ hg clone --stream -U http://localhost:$HGPORT clone1
191 streaming all changes
191 streaming all changes
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
192 1030 files to transfer, 96.5 KB of data (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
193 transferred 96.5 KB in * seconds (*/sec) (glob) (no-zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
194 1030 files to transfer, 93.6 KB of data (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
195 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
196
196
197 $ ls -1 clone1/.hg/cache
197 $ ls -1 clone1/.hg/cache
198 branch2-base
198 branch2-base
199 branch2-immutable
199 branch2-immutable
200 branch2-served
200 branch2-served
201 branch2-served.hidden
201 branch2-served.hidden
202 branch2-visible
202 branch2-visible
203 branch2-visible-hidden
203 branch2-visible-hidden
204 hgtagsfnodes1
204 hgtagsfnodes1
205 rbc-names-v1
205 rbc-names-v1
206 rbc-revs-v1
206 rbc-revs-v1
207 tags2
207 tags2
208 tags2-served
208 tags2-served
209 $ cat server/errors.txt
209 $ cat server/errors.txt
210 #endif
210 #endif
211
211
212 getbundle requests with stream=1 are uncompressed
212 getbundle requests with stream=1 are uncompressed
213
213
214 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
214 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
215 200 Script output follows
215 200 Script output follows
216 content-type: application/mercurial-0.2
216 content-type: application/mercurial-0.2
217
217
218
218
219 $ f --size --hex --bytes 256 body
219 $ f --size --hex --bytes 256 body
220 body: size=112262 (no-zstd !)
220 body: size=112262 (no-zstd !)
221 body: size=109410 (zstd no-rust !)
221 body: size=109410 (zstd no-rust !)
222 body: size=109431 (rust !)
222 body: size=109431 (rust !)
223 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
223 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
224 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
224 0010: 7f 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (no-zstd !)
225 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
225 0020: 05 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 39 38 |....Dbytecount98| (no-zstd !)
226 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
226 0030: 37 37 35 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |775filecount1030| (no-zstd !)
227 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
227 0010: 99 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (zstd no-rust !)
228 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
228 0010: ae 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| (rust !)
229 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
229 0020: 05 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 39 35 |....^bytecount95| (zstd no-rust !)
230 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
230 0020: 05 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 39 35 |....sbytecount95| (rust !)
231 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
231 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| (zstd !)
232 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
232 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
233 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
233 0050: 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 25 |ncode%2Cfncache%|
234 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
234 0060: 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 32 |2Cgeneraldelta%2|
235 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
235 0070: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| (no-zstd !)
236 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
236 0080: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| (no-zstd !)
237 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
237 0090: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| (no-zstd !)
238 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
238 00a0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| (no-zstd !)
239 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
239 00b0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| (no-zstd !)
240 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
240 00c0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| (no-zstd !)
241 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
241 00d0: d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 75 |..9............u| (no-zstd !)
242 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
242 00e0: 30 73 08 42 64 61 74 61 2f 31 2e 69 00 03 00 01 |0s.Bdata/1.i....| (no-zstd !)
243 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
243 00f0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................| (no-zstd !)
244 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
244 0070: 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 73 |Crevlog-compress| (zstd no-rust !)
245 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
245 0070: 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 65 |Cpersistent-node| (rust !)
246 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
246 0080: 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c 6f |ion-zstd%2Crevlo| (zstd no-rust !)
247 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
247 0080: 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f 6d |map%2Crevlog-com| (rust !)
248 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
248 0090: 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 6c |gv1%2Csparserevl| (zstd no-rust !)
249 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
249 0090: 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 43 |pression-zstd%2C| (rust !)
250 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
250 00a0: 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 08 |og%2Cstore....s.| (zstd no-rust !)
251 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
251 00a0: 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 73 |revlogv1%2Cspars| (rust !)
252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
252 00b0: 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 00 |Bdata/0.i.......| (zstd no-rust !)
253 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
253 00b0: 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 00 |erevlog%2Cstore.| (rust !)
254 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
254 00c0: 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 00 |................| (zstd no-rust !)
255 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
255 00c0: 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 |...s.Bdata/0.i..| (rust !)
256 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
256 00d0: 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 23 |..........)c.I.#| (zstd no-rust !)
257 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
257 00d0: 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 00 |................| (rust !)
258 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
258 00e0: 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 00 |....Vg.g,i..9...| (zstd no-rust !)
259 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
259 00e0: 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 |...............)| (rust !)
260 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
260 00f0: 00 00 00 00 00 00 00 00 00 75 30 73 08 42 64 61 |.........u0s.Bda| (zstd no-rust !)
261 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
261 00f0: 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 |c.I.#....Vg.g,i.| (rust !)
262
262
263 --uncompressed is an alias to --stream
263 --uncompressed is an alias to --stream
264
264
265 #if stream-legacy
265 #if stream-legacy
266 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
266 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
267 streaming all changes
267 streaming all changes
268 1027 files to transfer, 96.3 KB of data (no-zstd !)
268 1027 files to transfer, 96.3 KB of data (no-zstd !)
269 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
269 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
270 1027 files to transfer, 93.5 KB of data (zstd !)
270 1027 files to transfer, 93.5 KB of data (zstd !)
271 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
271 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
272 searching for changes
272 searching for changes
273 no changes found
273 no changes found
274 #endif
274 #endif
275 #if stream-bundle2
275 #if stream-bundle2
276 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
276 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
277 streaming all changes
277 streaming all changes
278 1030 files to transfer, 96.5 KB of data (no-zstd !)
278 1030 files to transfer, 96.5 KB of data (no-zstd !)
279 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
279 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
280 1030 files to transfer, 93.6 KB of data (zstd !)
280 1030 files to transfer, 93.6 KB of data (zstd !)
281 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
281 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
282 #endif
282 #endif
283
283
284 Clone with background file closing enabled
284 Clone with background file closing enabled
285
285
286 #if stream-legacy
286 #if stream-legacy
287 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
287 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
288 using http://localhost:$HGPORT/
288 using http://localhost:$HGPORT/
289 sending capabilities command
289 sending capabilities command
290 sending branchmap command
290 sending branchmap command
291 streaming all changes
291 streaming all changes
292 sending stream_out command
292 sending stream_out command
293 1027 files to transfer, 96.3 KB of data (no-zstd !)
293 1027 files to transfer, 96.3 KB of data (no-zstd !)
294 1027 files to transfer, 93.5 KB of data (zstd !)
294 1027 files to transfer, 93.5 KB of data (zstd !)
295 starting 4 threads for background file closing
295 starting 4 threads for background file closing
296 updating the branch cache
296 updating the branch cache
297 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
297 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
298 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
298 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
299 query 1; heads
299 query 1; heads
300 sending batch command
300 sending batch command
301 searching for changes
301 searching for changes
302 all remote heads known locally
302 all remote heads known locally
303 no changes found
303 no changes found
304 sending getbundle command
304 sending getbundle command
305 bundle2-input-bundle: with-transaction
305 bundle2-input-bundle: with-transaction
306 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
306 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
307 bundle2-input-part: "phase-heads" supported
307 bundle2-input-part: "phase-heads" supported
308 bundle2-input-part: total payload size 24
308 bundle2-input-part: total payload size 24
309 bundle2-input-bundle: 2 parts total
309 bundle2-input-bundle: 2 parts total
310 checking for updated bookmarks
310 checking for updated bookmarks
311 updating the branch cache
311 updating the branch cache
312 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
312 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
313 #endif
313 #endif
314 #if stream-bundle2
314 #if stream-bundle2
315 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
315 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
316 using http://localhost:$HGPORT/
316 using http://localhost:$HGPORT/
317 sending capabilities command
317 sending capabilities command
318 query 1; heads
318 query 1; heads
319 sending batch command
319 sending batch command
320 streaming all changes
320 streaming all changes
321 sending getbundle command
321 sending getbundle command
322 bundle2-input-bundle: with-transaction
322 bundle2-input-bundle: with-transaction
323 bundle2-input-part: "stream2" (params: 3 mandatory) supported
323 bundle2-input-part: "stream2" (params: 3 mandatory) supported
324 applying stream bundle
324 applying stream bundle
325 1030 files to transfer, 96.5 KB of data (no-zstd !)
325 1030 files to transfer, 96.5 KB of data (no-zstd !)
326 1030 files to transfer, 93.6 KB of data (zstd !)
326 1030 files to transfer, 93.6 KB of data (zstd !)
327 starting 4 threads for background file closing
327 starting 4 threads for background file closing
328 starting 4 threads for background file closing
328 starting 4 threads for background file closing
329 updating the branch cache
329 updating the branch cache
330 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
330 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
331 bundle2-input-part: total payload size 112094 (no-zstd !)
331 bundle2-input-part: total payload size 112094 (no-zstd !)
332 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
332 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
333 bundle2-input-part: total payload size 109216 (zstd !)
333 bundle2-input-part: total payload size 109216 (zstd !)
334 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
334 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
335 bundle2-input-bundle: 2 parts total
335 bundle2-input-bundle: 2 parts total
336 checking for updated bookmarks
336 checking for updated bookmarks
337 updating the branch cache
337 updating the branch cache
338 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
338 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
339 #endif
339 #endif
340
340
341 Cannot stream clone when there are secret changesets
341 Cannot stream clone when there are secret changesets
342
342
343 $ hg -R server phase --force --secret -r tip
343 $ hg -R server phase --force --secret -r tip
344 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
344 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
345 warning: stream clone requested but server has them disabled
345 warning: stream clone requested but server has them disabled
346 requesting all changes
346 requesting all changes
347 adding changesets
347 adding changesets
348 adding manifests
348 adding manifests
349 adding file changes
349 adding file changes
350 added 1 changesets with 1 changes to 1 files
350 added 1 changesets with 1 changes to 1 files
351 new changesets 96ee1d7354c4
351 new changesets 96ee1d7354c4
352
352
353 $ killdaemons.py
353 $ killdaemons.py
354
354
355 Streaming of secrets can be overridden by server config
355 Streaming of secrets can be overridden by server config
356
356
357 $ cd server
357 $ cd server
358 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
358 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
359 $ cat hg.pid > $DAEMON_PIDS
359 $ cat hg.pid > $DAEMON_PIDS
360 $ cd ..
360 $ cd ..
361
361
362 #if stream-legacy
362 #if stream-legacy
363 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
363 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
364 streaming all changes
364 streaming all changes
365 1027 files to transfer, 96.3 KB of data (no-zstd !)
365 1027 files to transfer, 96.3 KB of data (no-zstd !)
366 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
366 transferred 96.3 KB in * seconds (*/sec) (glob) (no-zstd !)
367 1027 files to transfer, 93.5 KB of data (zstd !)
367 1027 files to transfer, 93.5 KB of data (zstd !)
368 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
368 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
369 searching for changes
369 searching for changes
370 no changes found
370 no changes found
371 #endif
371 #endif
372 #if stream-bundle2
372 #if stream-bundle2
373 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
373 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
374 streaming all changes
374 streaming all changes
375 1030 files to transfer, 96.5 KB of data (no-zstd !)
375 1030 files to transfer, 96.5 KB of data (no-zstd !)
376 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
376 transferred 96.5 KB in * seconds (* */sec) (glob) (no-zstd !)
377 1030 files to transfer, 93.6 KB of data (zstd !)
377 1030 files to transfer, 93.6 KB of data (zstd !)
378 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
378 transferred 93.6 KB in * seconds (* */sec) (glob) (zstd !)
379 #endif
379 #endif
380
380
381 $ killdaemons.py
381 $ killdaemons.py
382
382
383 Verify interaction between preferuncompressed and secret presence
383 Verify interaction between preferuncompressed and secret presence
384
384
385 $ cd server
385 $ cd server
386 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
386 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
387 $ cat hg.pid > $DAEMON_PIDS
387 $ cat hg.pid > $DAEMON_PIDS
388 $ cd ..
388 $ cd ..
389
389
390 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
390 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
391 requesting all changes
391 requesting all changes
392 adding changesets
392 adding changesets
393 adding manifests
393 adding manifests
394 adding file changes
394 adding file changes
395 added 1 changesets with 1 changes to 1 files
395 added 1 changesets with 1 changes to 1 files
396 new changesets 96ee1d7354c4
396 new changesets 96ee1d7354c4
397
397
398 $ killdaemons.py
398 $ killdaemons.py
399
399
400 Clone not allowed when full bundles disabled and can't serve secrets
400 Clone not allowed when full bundles disabled and can't serve secrets
401
401
402 $ cd server
402 $ cd server
403 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
403 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
404 $ cat hg.pid > $DAEMON_PIDS
404 $ cat hg.pid > $DAEMON_PIDS
405 $ cd ..
405 $ cd ..
406
406
407 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
407 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
408 warning: stream clone requested but server has them disabled
408 warning: stream clone requested but server has them disabled
409 requesting all changes
409 requesting all changes
410 remote: abort: server has pull-based clones disabled
410 remote: abort: server has pull-based clones disabled
411 abort: pull failed on remote
411 abort: pull failed on remote
412 (remove --pull if specified or upgrade Mercurial)
412 (remove --pull if specified or upgrade Mercurial)
413 [100]
413 [100]
414
414
415 Local stream clone with secrets involved
415 Local stream clone with secrets involved
416 (This is just a test over behavior: if you have access to the repo's files,
416 (This is just a test over behavior: if you have access to the repo's files,
417 there is no security so it isn't important to prevent a clone here.)
417 there is no security so it isn't important to prevent a clone here.)
418
418
419 $ hg clone -U --stream server local-secret
419 $ hg clone -U --stream server local-secret
420 warning: stream clone requested but server has them disabled
420 warning: stream clone requested but server has them disabled
421 requesting all changes
421 requesting all changes
422 adding changesets
422 adding changesets
423 adding manifests
423 adding manifests
424 adding file changes
424 adding file changes
425 added 1 changesets with 1 changes to 1 files
425 added 1 changesets with 1 changes to 1 files
426 new changesets 96ee1d7354c4
426 new changesets 96ee1d7354c4
427
427
428 Stream clone while repo is changing:
428 Stream clone while repo is changing:
429
429
430 $ mkdir changing
430 $ mkdir changing
431 $ cd changing
431 $ cd changing
432
432
433 extension for delaying the server process so we reliably can modify the repo
433 extension for delaying the server process so we reliably can modify the repo
434 while cloning
434 while cloning
435
435
436 $ cat > delayer.py <<EOF
436 $ cat > stream_steps.py <<EOF
437 > import time
437 > import os
438 > from mercurial import extensions, vfs
438 > import sys
439 > def __call__(orig, self, path, *args, **kwargs):
439 > from mercurial import (
440 > if path == 'data/f1.i':
440 > encoding,
441 > time.sleep(2)
441 > extensions,
442 > return orig(self, path, *args, **kwargs)
442 > streamclone,
443 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
443 > testing,
444 > )
445 > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1']
446 > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2']
447 >
448 > def _test_sync_point_walk_1(orig, repo):
449 > testing.write_file(WALKED_FILE_1)
450 >
451 > def _test_sync_point_walk_2(orig, repo):
452 > assert repo._currentlock(repo._lockref) is None
453 > testing.wait_file(WALKED_FILE_2)
454 >
455 > extensions.wrapfunction(
456 > streamclone,
457 > '_test_sync_point_walk_1',
458 > _test_sync_point_walk_1
459 > )
460 > extensions.wrapfunction(
461 > streamclone,
462 > '_test_sync_point_walk_2',
463 > _test_sync_point_walk_2
464 > )
444 > EOF
465 > EOF
445
466
446 prepare repo with small and big file to cover both code paths in emitrevlogdata
467 prepare repo with small and big file to cover both code paths in emitrevlogdata
447
468
448 $ hg init repo
469 $ hg init repo
449 $ touch repo/f1
470 $ touch repo/f1
450 $ $TESTDIR/seq.py 50000 > repo/f2
471 $ $TESTDIR/seq.py 50000 > repo/f2
451 $ hg -R repo ci -Aqm "0"
472 $ hg -R repo ci -Aqm "0"
452 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
473 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
474 $ export HG_TEST_STREAM_WALKED_FILE_1
475 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
476 $ export HG_TEST_STREAM_WALKED_FILE_2
477 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
478 $ export HG_TEST_STREAM_WALKED_FILE_3
479 # $ cat << EOF >> $HGRCPATH
480 # > [hooks]
481 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
482 # > EOF
483 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
453 $ cat hg.pid >> $DAEMON_PIDS
484 $ cat hg.pid >> $DAEMON_PIDS
454
485
455 clone while modifying the repo between stating file with write lock and
486 clone while modifying the repo between stating file with write lock and
456 actually serving file content
487 actually serving file content
457
488
458 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
489 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
459 $ sleep 1
490 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
460 $ echo >> repo/f1
491 $ echo >> repo/f1
461 $ echo >> repo/f2
492 $ echo >> repo/f2
462 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
493 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
463 $ wait
494 $ touch $HG_TEST_STREAM_WALKED_FILE_2
495 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
464 $ hg -R clone id
496 $ hg -R clone id
465 000000000000
497 000000000000
498 $ cat errors.log
466 $ cd ..
499 $ cd ..
467
500
468 Stream repository with bookmarks
501 Stream repository with bookmarks
469 --------------------------------
502 --------------------------------
470
503
471 (revert introduction of secret changeset)
504 (revert introduction of secret changeset)
472
505
473 $ hg -R server phase --draft 'secret()'
506 $ hg -R server phase --draft 'secret()'
474
507
475 add a bookmark
508 add a bookmark
476
509
477 $ hg -R server bookmark -r tip some-bookmark
510 $ hg -R server bookmark -r tip some-bookmark
478
511
479 clone it
512 clone it
480
513
481 #if stream-legacy
514 #if stream-legacy
482 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
515 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
483 streaming all changes
516 streaming all changes
484 1027 files to transfer, 96.3 KB of data (no-zstd !)
517 1027 files to transfer, 96.3 KB of data (no-zstd !)
485 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
518 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
486 1027 files to transfer, 93.5 KB of data (zstd !)
519 1027 files to transfer, 93.5 KB of data (zstd !)
487 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
520 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
488 searching for changes
521 searching for changes
489 no changes found
522 no changes found
490 updating to branch default
523 updating to branch default
491 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
524 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
492 #endif
525 #endif
493 #if stream-bundle2
526 #if stream-bundle2
494 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
527 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
495 streaming all changes
528 streaming all changes
496 1033 files to transfer, 96.6 KB of data (no-zstd !)
529 1033 files to transfer, 96.6 KB of data (no-zstd !)
497 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
530 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
498 1033 files to transfer, 93.8 KB of data (zstd !)
531 1033 files to transfer, 93.8 KB of data (zstd !)
499 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
532 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
500 updating to branch default
533 updating to branch default
501 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
534 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
502 #endif
535 #endif
503 $ hg -R with-bookmarks bookmarks
536 $ hg -R with-bookmarks bookmarks
504 some-bookmark 1:c17445101a72
537 some-bookmark 1:c17445101a72
505
538
506 Stream repository with phases
539 Stream repository with phases
507 -----------------------------
540 -----------------------------
508
541
509 Clone as publishing
542 Clone as publishing
510
543
511 $ hg -R server phase -r 'all()'
544 $ hg -R server phase -r 'all()'
512 0: draft
545 0: draft
513 1: draft
546 1: draft
514
547
515 #if stream-legacy
548 #if stream-legacy
516 $ hg clone --stream http://localhost:$HGPORT phase-publish
549 $ hg clone --stream http://localhost:$HGPORT phase-publish
517 streaming all changes
550 streaming all changes
518 1027 files to transfer, 96.3 KB of data (no-zstd !)
551 1027 files to transfer, 96.3 KB of data (no-zstd !)
519 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
552 transferred 96.3 KB in * seconds (*) (glob) (no-zstd !)
520 1027 files to transfer, 93.5 KB of data (zstd !)
553 1027 files to transfer, 93.5 KB of data (zstd !)
521 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
554 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
522 searching for changes
555 searching for changes
523 no changes found
556 no changes found
524 updating to branch default
557 updating to branch default
525 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
558 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
526 #endif
559 #endif
527 #if stream-bundle2
560 #if stream-bundle2
528 $ hg clone --stream http://localhost:$HGPORT phase-publish
561 $ hg clone --stream http://localhost:$HGPORT phase-publish
529 streaming all changes
562 streaming all changes
530 1033 files to transfer, 96.6 KB of data (no-zstd !)
563 1033 files to transfer, 96.6 KB of data (no-zstd !)
531 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
564 transferred 96.6 KB in * seconds (* */sec) (glob) (no-zstd !)
532 1033 files to transfer, 93.8 KB of data (zstd !)
565 1033 files to transfer, 93.8 KB of data (zstd !)
533 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
566 transferred 93.8 KB in * seconds (* */sec) (glob) (zstd !)
534 updating to branch default
567 updating to branch default
535 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
568 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
536 #endif
569 #endif
537 $ hg -R phase-publish phase -r 'all()'
570 $ hg -R phase-publish phase -r 'all()'
538 0: public
571 0: public
539 1: public
572 1: public
540
573
541 Clone as non publishing
574 Clone as non publishing
542
575
543 $ cat << EOF >> server/.hg/hgrc
576 $ cat << EOF >> server/.hg/hgrc
544 > [phases]
577 > [phases]
545 > publish = False
578 > publish = False
546 > EOF
579 > EOF
547 $ killdaemons.py
580 $ killdaemons.py
548 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
581 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
549 $ cat hg.pid > $DAEMON_PIDS
582 $ cat hg.pid > $DAEMON_PIDS
550
583
551 #if stream-legacy
584 #if stream-legacy
552
585
553 With v1 of the stream protocol, changeset are always cloned as public. It make
586 With v1 of the stream protocol, changeset are always cloned as public. It make
554 stream v1 unsuitable for non-publishing repository.
587 stream v1 unsuitable for non-publishing repository.
555
588
556 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
589 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
557 streaming all changes
590 streaming all changes
558 1027 files to transfer, 96.3 KB of data (no-zstd !)
591 1027 files to transfer, 96.3 KB of data (no-zstd !)
559 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
592 transferred 96.3 KB in * seconds (* */sec) (glob) (no-zstd !)
560 1027 files to transfer, 93.5 KB of data (zstd !)
593 1027 files to transfer, 93.5 KB of data (zstd !)
561 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
594 transferred 93.5 KB in * seconds (* */sec) (glob) (zstd !)
562 searching for changes
595 searching for changes
563 no changes found
596 no changes found
564 updating to branch default
597 updating to branch default
565 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
598 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
566 $ hg -R phase-no-publish phase -r 'all()'
599 $ hg -R phase-no-publish phase -r 'all()'
567 0: public
600 0: public
568 1: public
601 1: public
569 #endif
602 #endif
570 #if stream-bundle2
603 #if stream-bundle2
571 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
604 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
572 streaming all changes
605 streaming all changes
573 1034 files to transfer, 96.7 KB of data (no-zstd !)
606 1034 files to transfer, 96.7 KB of data (no-zstd !)
574 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
607 transferred 96.7 KB in * seconds (* */sec) (glob) (no-zstd !)
575 1034 files to transfer, 93.9 KB of data (zstd !)
608 1034 files to transfer, 93.9 KB of data (zstd !)
576 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
609 transferred 93.9 KB in * seconds (* */sec) (glob) (zstd !)
577 updating to branch default
610 updating to branch default
578 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
611 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
579 $ hg -R phase-no-publish phase -r 'all()'
612 $ hg -R phase-no-publish phase -r 'all()'
580 0: draft
613 0: draft
581 1: draft
614 1: draft
582 #endif
615 #endif
583
616
584 $ killdaemons.py
617 $ killdaemons.py
585
618
586 #if stream-legacy
619 #if stream-legacy
587
620
588 With v1 of the stream protocol, changeset are always cloned as public. There's
621 With v1 of the stream protocol, changeset are always cloned as public. There's
589 no obsolescence markers exchange in stream v1.
622 no obsolescence markers exchange in stream v1.
590
623
591 #endif
624 #endif
592 #if stream-bundle2
625 #if stream-bundle2
593
626
594 Stream repository with obsolescence
627 Stream repository with obsolescence
595 -----------------------------------
628 -----------------------------------
596
629
597 Clone non-publishing with obsolescence
630 Clone non-publishing with obsolescence
598
631
599 $ cat >> $HGRCPATH << EOF
632 $ cat >> $HGRCPATH << EOF
600 > [experimental]
633 > [experimental]
601 > evolution=all
634 > evolution=all
602 > EOF
635 > EOF
603
636
604 $ cd server
637 $ cd server
605 $ echo foo > foo
638 $ echo foo > foo
606 $ hg -q commit -m 'about to be pruned'
639 $ hg -q commit -m 'about to be pruned'
607 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
640 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
608 1 new obsolescence markers
641 1 new obsolescence markers
609 obsoleted 1 changesets
642 obsoleted 1 changesets
610 $ hg up null -q
643 $ hg up null -q
611 $ hg log -T '{rev}: {phase}\n'
644 $ hg log -T '{rev}: {phase}\n'
612 1: draft
645 1: draft
613 0: draft
646 0: draft
614 $ hg serve -p $HGPORT -d --pid-file=hg.pid
647 $ hg serve -p $HGPORT -d --pid-file=hg.pid
615 $ cat hg.pid > $DAEMON_PIDS
648 $ cat hg.pid > $DAEMON_PIDS
616 $ cd ..
649 $ cd ..
617
650
618 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
651 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
619 streaming all changes
652 streaming all changes
620 1035 files to transfer, 97.1 KB of data (no-zstd !)
653 1035 files to transfer, 97.1 KB of data (no-zstd !)
621 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
654 transferred 97.1 KB in * seconds (* */sec) (glob) (no-zstd !)
622 1035 files to transfer, 94.3 KB of data (zstd !)
655 1035 files to transfer, 94.3 KB of data (zstd !)
623 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
656 transferred 94.3 KB in * seconds (* */sec) (glob) (zstd !)
624 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
657 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
625 1: draft
658 1: draft
626 0: draft
659 0: draft
627 $ hg debugobsolete -R with-obsolescence
660 $ hg debugobsolete -R with-obsolescence
628 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
661 50382b884f66690b7045cac93a540cba4d4c906f 0 {c17445101a72edac06facd130d14808dfbd5c7c2} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
629
662
630 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
663 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
631 streaming all changes
664 streaming all changes
632 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
665 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
633 abort: pull failed on remote
666 abort: pull failed on remote
634 [100]
667 [100]
635
668
636 $ killdaemons.py
669 $ killdaemons.py
637
670
638 #endif
671 #endif
General Comments 0
You need to be logged in to leave comments. Login now