Show More
@@ -1,918 +1,920 b'' | |||||
1 | # streamclone.py - producing and consuming streaming repository data |
|
1 | # streamclone.py - producing and consuming streaming repository data | |
2 | # |
|
2 | # | |
3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> |
|
3 | # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import contextlib |
|
10 | import contextlib | |
11 | import errno |
|
11 | import errno | |
12 | import os |
|
12 | import os | |
13 | import struct |
|
13 | import struct | |
14 |
|
14 | |||
15 | from .i18n import _ |
|
15 | from .i18n import _ | |
16 | from .pycompat import open |
|
16 | from .pycompat import open | |
17 | from .interfaces import repository |
|
17 | from .interfaces import repository | |
18 | from . import ( |
|
18 | from . import ( | |
19 | bookmarks, |
|
19 | bookmarks, | |
20 | cacheutil, |
|
20 | cacheutil, | |
21 | error, |
|
21 | error, | |
22 | narrowspec, |
|
22 | narrowspec, | |
23 | phases, |
|
23 | phases, | |
24 | pycompat, |
|
24 | pycompat, | |
25 | requirements as requirementsmod, |
|
25 | requirements as requirementsmod, | |
26 | scmutil, |
|
26 | scmutil, | |
27 | store, |
|
27 | store, | |
28 | util, |
|
28 | util, | |
29 | ) |
|
29 | ) | |
30 | from .utils import ( |
|
30 | from .utils import ( | |
31 | stringutil, |
|
31 | stringutil, | |
32 | ) |
|
32 | ) | |
33 |
|
33 | |||
34 |
|
34 | |||
35 | def canperformstreamclone(pullop, bundle2=False): |
|
35 | def canperformstreamclone(pullop, bundle2=False): | |
36 | """Whether it is possible to perform a streaming clone as part of pull. |
|
36 | """Whether it is possible to perform a streaming clone as part of pull. | |
37 |
|
37 | |||
38 | ``bundle2`` will cause the function to consider stream clone through |
|
38 | ``bundle2`` will cause the function to consider stream clone through | |
39 | bundle2 and only through bundle2. |
|
39 | bundle2 and only through bundle2. | |
40 |
|
40 | |||
41 | Returns a tuple of (supported, requirements). ``supported`` is True if |
|
41 | Returns a tuple of (supported, requirements). ``supported`` is True if | |
42 | streaming clone is supported and False otherwise. ``requirements`` is |
|
42 | streaming clone is supported and False otherwise. ``requirements`` is | |
43 | a set of repo requirements from the remote, or ``None`` if stream clone |
|
43 | a set of repo requirements from the remote, or ``None`` if stream clone | |
44 | isn't supported. |
|
44 | isn't supported. | |
45 | """ |
|
45 | """ | |
46 | repo = pullop.repo |
|
46 | repo = pullop.repo | |
47 | remote = pullop.remote |
|
47 | remote = pullop.remote | |
48 |
|
48 | |||
49 | bundle2supported = False |
|
49 | bundle2supported = False | |
50 | if pullop.canusebundle2: |
|
50 | if pullop.canusebundle2: | |
51 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): |
|
51 | if b'v2' in pullop.remotebundle2caps.get(b'stream', []): | |
52 | bundle2supported = True |
|
52 | bundle2supported = True | |
53 | # else |
|
53 | # else | |
54 | # Server doesn't support bundle2 stream clone or doesn't support |
|
54 | # Server doesn't support bundle2 stream clone or doesn't support | |
55 | # the versions we support. Fall back and possibly allow legacy. |
|
55 | # the versions we support. Fall back and possibly allow legacy. | |
56 |
|
56 | |||
57 | # Ensures legacy code path uses available bundle2. |
|
57 | # Ensures legacy code path uses available bundle2. | |
58 | if bundle2supported and not bundle2: |
|
58 | if bundle2supported and not bundle2: | |
59 | return False, None |
|
59 | return False, None | |
60 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. |
|
60 | # Ensures bundle2 doesn't try to do a stream clone if it isn't supported. | |
61 | elif bundle2 and not bundle2supported: |
|
61 | elif bundle2 and not bundle2supported: | |
62 | return False, None |
|
62 | return False, None | |
63 |
|
63 | |||
64 | # Streaming clone only works on empty repositories. |
|
64 | # Streaming clone only works on empty repositories. | |
65 | if len(repo): |
|
65 | if len(repo): | |
66 | return False, None |
|
66 | return False, None | |
67 |
|
67 | |||
68 | # Streaming clone only works if all data is being requested. |
|
68 | # Streaming clone only works if all data is being requested. | |
69 | if pullop.heads: |
|
69 | if pullop.heads: | |
70 | return False, None |
|
70 | return False, None | |
71 |
|
71 | |||
72 | streamrequested = pullop.streamclonerequested |
|
72 | streamrequested = pullop.streamclonerequested | |
73 |
|
73 | |||
74 | # If we don't have a preference, let the server decide for us. This |
|
74 | # If we don't have a preference, let the server decide for us. This | |
75 | # likely only comes into play in LANs. |
|
75 | # likely only comes into play in LANs. | |
76 | if streamrequested is None: |
|
76 | if streamrequested is None: | |
77 | # The server can advertise whether to prefer streaming clone. |
|
77 | # The server can advertise whether to prefer streaming clone. | |
78 | streamrequested = remote.capable(b'stream-preferred') |
|
78 | streamrequested = remote.capable(b'stream-preferred') | |
79 |
|
79 | |||
80 | if not streamrequested: |
|
80 | if not streamrequested: | |
81 | return False, None |
|
81 | return False, None | |
82 |
|
82 | |||
83 | # In order for stream clone to work, the client has to support all the |
|
83 | # In order for stream clone to work, the client has to support all the | |
84 | # requirements advertised by the server. |
|
84 | # requirements advertised by the server. | |
85 | # |
|
85 | # | |
86 | # The server advertises its requirements via the "stream" and "streamreqs" |
|
86 | # The server advertises its requirements via the "stream" and "streamreqs" | |
87 | # capability. "stream" (a value-less capability) is advertised if and only |
|
87 | # capability. "stream" (a value-less capability) is advertised if and only | |
88 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability |
|
88 | # if the only requirement is "revlogv1." Else, the "streamreqs" capability | |
89 | # is advertised and contains a comma-delimited list of requirements. |
|
89 | # is advertised and contains a comma-delimited list of requirements. | |
90 | requirements = set() |
|
90 | requirements = set() | |
91 | if remote.capable(b'stream'): |
|
91 | if remote.capable(b'stream'): | |
92 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) |
|
92 | requirements.add(requirementsmod.REVLOGV1_REQUIREMENT) | |
93 | else: |
|
93 | else: | |
94 | streamreqs = remote.capable(b'streamreqs') |
|
94 | streamreqs = remote.capable(b'streamreqs') | |
95 | # This is weird and shouldn't happen with modern servers. |
|
95 | # This is weird and shouldn't happen with modern servers. | |
96 | if not streamreqs: |
|
96 | if not streamreqs: | |
97 | pullop.repo.ui.warn( |
|
97 | pullop.repo.ui.warn( | |
98 | _( |
|
98 | _( | |
99 | b'warning: stream clone requested but server has them ' |
|
99 | b'warning: stream clone requested but server has them ' | |
100 | b'disabled\n' |
|
100 | b'disabled\n' | |
101 | ) |
|
101 | ) | |
102 | ) |
|
102 | ) | |
103 | return False, None |
|
103 | return False, None | |
104 |
|
104 | |||
105 | streamreqs = set(streamreqs.split(b',')) |
|
105 | streamreqs = set(streamreqs.split(b',')) | |
106 | # Server requires something we don't support. Bail. |
|
106 | # Server requires something we don't support. Bail. | |
107 | missingreqs = streamreqs - repo.supportedformats |
|
107 | missingreqs = streamreqs - repo.supportedformats | |
108 | if missingreqs: |
|
108 | if missingreqs: | |
109 | pullop.repo.ui.warn( |
|
109 | pullop.repo.ui.warn( | |
110 | _( |
|
110 | _( | |
111 | b'warning: stream clone requested but client is missing ' |
|
111 | b'warning: stream clone requested but client is missing ' | |
112 | b'requirements: %s\n' |
|
112 | b'requirements: %s\n' | |
113 | ) |
|
113 | ) | |
114 | % b', '.join(sorted(missingreqs)) |
|
114 | % b', '.join(sorted(missingreqs)) | |
115 | ) |
|
115 | ) | |
116 | pullop.repo.ui.warn( |
|
116 | pullop.repo.ui.warn( | |
117 | _( |
|
117 | _( | |
118 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' |
|
118 | b'(see https://www.mercurial-scm.org/wiki/MissingRequirement ' | |
119 | b'for more information)\n' |
|
119 | b'for more information)\n' | |
120 | ) |
|
120 | ) | |
121 | ) |
|
121 | ) | |
122 | return False, None |
|
122 | return False, None | |
123 | requirements = streamreqs |
|
123 | requirements = streamreqs | |
124 |
|
124 | |||
125 | return True, requirements |
|
125 | return True, requirements | |
126 |
|
126 | |||
127 |
|
127 | |||
128 | def maybeperformlegacystreamclone(pullop): |
|
128 | def maybeperformlegacystreamclone(pullop): | |
129 | """Possibly perform a legacy stream clone operation. |
|
129 | """Possibly perform a legacy stream clone operation. | |
130 |
|
130 | |||
131 | Legacy stream clones are performed as part of pull but before all other |
|
131 | Legacy stream clones are performed as part of pull but before all other | |
132 | operations. |
|
132 | operations. | |
133 |
|
133 | |||
134 | A legacy stream clone will not be performed if a bundle2 stream clone is |
|
134 | A legacy stream clone will not be performed if a bundle2 stream clone is | |
135 | supported. |
|
135 | supported. | |
136 | """ |
|
136 | """ | |
137 | from . import localrepo |
|
137 | from . import localrepo | |
138 |
|
138 | |||
139 | supported, requirements = canperformstreamclone(pullop) |
|
139 | supported, requirements = canperformstreamclone(pullop) | |
140 |
|
140 | |||
141 | if not supported: |
|
141 | if not supported: | |
142 | return |
|
142 | return | |
143 |
|
143 | |||
144 | repo = pullop.repo |
|
144 | repo = pullop.repo | |
145 | remote = pullop.remote |
|
145 | remote = pullop.remote | |
146 |
|
146 | |||
147 | # Save remote branchmap. We will use it later to speed up branchcache |
|
147 | # Save remote branchmap. We will use it later to speed up branchcache | |
148 | # creation. |
|
148 | # creation. | |
149 | rbranchmap = None |
|
149 | rbranchmap = None | |
150 | if remote.capable(b'branchmap'): |
|
150 | if remote.capable(b'branchmap'): | |
151 | with remote.commandexecutor() as e: |
|
151 | with remote.commandexecutor() as e: | |
152 | rbranchmap = e.callcommand(b'branchmap', {}).result() |
|
152 | rbranchmap = e.callcommand(b'branchmap', {}).result() | |
153 |
|
153 | |||
154 | repo.ui.status(_(b'streaming all changes\n')) |
|
154 | repo.ui.status(_(b'streaming all changes\n')) | |
155 |
|
155 | |||
156 | with remote.commandexecutor() as e: |
|
156 | with remote.commandexecutor() as e: | |
157 | fp = e.callcommand(b'stream_out', {}).result() |
|
157 | fp = e.callcommand(b'stream_out', {}).result() | |
158 |
|
158 | |||
159 | # TODO strictly speaking, this code should all be inside the context |
|
159 | # TODO strictly speaking, this code should all be inside the context | |
160 | # manager because the context manager is supposed to ensure all wire state |
|
160 | # manager because the context manager is supposed to ensure all wire state | |
161 | # is flushed when exiting. But the legacy peers don't do this, so it |
|
161 | # is flushed when exiting. But the legacy peers don't do this, so it | |
162 | # doesn't matter. |
|
162 | # doesn't matter. | |
163 | l = fp.readline() |
|
163 | l = fp.readline() | |
164 | try: |
|
164 | try: | |
165 | resp = int(l) |
|
165 | resp = int(l) | |
166 | except ValueError: |
|
166 | except ValueError: | |
167 | raise error.ResponseError( |
|
167 | raise error.ResponseError( | |
168 | _(b'unexpected response from remote server:'), l |
|
168 | _(b'unexpected response from remote server:'), l | |
169 | ) |
|
169 | ) | |
170 | if resp == 1: |
|
170 | if resp == 1: | |
171 | raise error.Abort(_(b'operation forbidden by server')) |
|
171 | raise error.Abort(_(b'operation forbidden by server')) | |
172 | elif resp == 2: |
|
172 | elif resp == 2: | |
173 | raise error.Abort(_(b'locking the remote repository failed')) |
|
173 | raise error.Abort(_(b'locking the remote repository failed')) | |
174 | elif resp != 0: |
|
174 | elif resp != 0: | |
175 | raise error.Abort(_(b'the server sent an unknown error code')) |
|
175 | raise error.Abort(_(b'the server sent an unknown error code')) | |
176 |
|
176 | |||
177 | l = fp.readline() |
|
177 | l = fp.readline() | |
178 | try: |
|
178 | try: | |
179 | filecount, bytecount = map(int, l.split(b' ', 1)) |
|
179 | filecount, bytecount = map(int, l.split(b' ', 1)) | |
180 | except (ValueError, TypeError): |
|
180 | except (ValueError, TypeError): | |
181 | raise error.ResponseError( |
|
181 | raise error.ResponseError( | |
182 | _(b'unexpected response from remote server:'), l |
|
182 | _(b'unexpected response from remote server:'), l | |
183 | ) |
|
183 | ) | |
184 |
|
184 | |||
185 | with repo.lock(): |
|
185 | with repo.lock(): | |
186 | consumev1(repo, fp, filecount, bytecount) |
|
186 | consumev1(repo, fp, filecount, bytecount) | |
187 |
|
187 | |||
188 | # new requirements = old non-format requirements + |
|
188 | # new requirements = old non-format requirements + | |
189 | # new format-related remote requirements |
|
189 | # new format-related remote requirements | |
190 | # requirements from the streamed-in repository |
|
190 | # requirements from the streamed-in repository | |
191 | repo.requirements = requirements | ( |
|
191 | repo.requirements = requirements | ( | |
192 | repo.requirements - repo.supportedformats |
|
192 | repo.requirements - repo.supportedformats | |
193 | ) |
|
193 | ) | |
194 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
194 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
195 | repo.ui, repo.requirements, repo.features |
|
195 | repo.ui, repo.requirements, repo.features | |
196 | ) |
|
196 | ) | |
197 | scmutil.writereporequirements(repo) |
|
197 | scmutil.writereporequirements(repo) | |
198 |
|
198 | |||
199 | if rbranchmap: |
|
199 | if rbranchmap: | |
200 | repo._branchcaches.replace(repo, rbranchmap) |
|
200 | repo._branchcaches.replace(repo, rbranchmap) | |
201 |
|
201 | |||
202 | repo.invalidate() |
|
202 | repo.invalidate() | |
203 |
|
203 | |||
204 |
|
204 | |||
205 | def allowservergeneration(repo): |
|
205 | def allowservergeneration(repo): | |
206 | """Whether streaming clones are allowed from the server.""" |
|
206 | """Whether streaming clones are allowed from the server.""" | |
207 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: |
|
207 | if repository.REPO_FEATURE_STREAM_CLONE not in repo.features: | |
208 | return False |
|
208 | return False | |
209 |
|
209 | |||
210 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): |
|
210 | if not repo.ui.configbool(b'server', b'uncompressed', untrusted=True): | |
211 | return False |
|
211 | return False | |
212 |
|
212 | |||
213 | # The way stream clone works makes it impossible to hide secret changesets. |
|
213 | # The way stream clone works makes it impossible to hide secret changesets. | |
214 | # So don't allow this by default. |
|
214 | # So don't allow this by default. | |
215 | secret = phases.hassecret(repo) |
|
215 | secret = phases.hassecret(repo) | |
216 | if secret: |
|
216 | if secret: | |
217 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') |
|
217 | return repo.ui.configbool(b'server', b'uncompressedallowsecret') | |
218 |
|
218 | |||
219 | return True |
|
219 | return True | |
220 |
|
220 | |||
221 |
|
221 | |||
222 | # This is it's own function so extensions can override it. |
|
222 | # This is it's own function so extensions can override it. | |
223 | def _walkstreamfiles(repo, matcher=None): |
|
223 | def _walkstreamfiles(repo, matcher=None): | |
224 | return repo.store.walk(matcher) |
|
224 | return repo.store.walk(matcher) | |
225 |
|
225 | |||
226 |
|
226 | |||
227 | def generatev1(repo): |
|
227 | def generatev1(repo): | |
228 | """Emit content for version 1 of a streaming clone. |
|
228 | """Emit content for version 1 of a streaming clone. | |
229 |
|
229 | |||
230 | This returns a 3-tuple of (file count, byte size, data iterator). |
|
230 | This returns a 3-tuple of (file count, byte size, data iterator). | |
231 |
|
231 | |||
232 | The data iterator consists of N entries for each file being transferred. |
|
232 | The data iterator consists of N entries for each file being transferred. | |
233 | Each file entry starts as a line with the file name and integer size |
|
233 | Each file entry starts as a line with the file name and integer size | |
234 | delimited by a null byte. |
|
234 | delimited by a null byte. | |
235 |
|
235 | |||
236 | The raw file data follows. Following the raw file data is the next file |
|
236 | The raw file data follows. Following the raw file data is the next file | |
237 | entry, or EOF. |
|
237 | entry, or EOF. | |
238 |
|
238 | |||
239 | When used on the wire protocol, an additional line indicating protocol |
|
239 | When used on the wire protocol, an additional line indicating protocol | |
240 | success will be prepended to the stream. This function is not responsible |
|
240 | success will be prepended to the stream. This function is not responsible | |
241 | for adding it. |
|
241 | for adding it. | |
242 |
|
242 | |||
243 | This function will obtain a repository lock to ensure a consistent view of |
|
243 | This function will obtain a repository lock to ensure a consistent view of | |
244 | the store is captured. It therefore may raise LockError. |
|
244 | the store is captured. It therefore may raise LockError. | |
245 | """ |
|
245 | """ | |
246 | entries = [] |
|
246 | entries = [] | |
247 | total_bytes = 0 |
|
247 | total_bytes = 0 | |
248 | # Get consistent snapshot of repo, lock during scan. |
|
248 | # Get consistent snapshot of repo, lock during scan. | |
249 | with repo.lock(): |
|
249 | with repo.lock(): | |
250 | repo.ui.debug(b'scanning\n') |
|
250 | repo.ui.debug(b'scanning\n') | |
251 | for file_type, name, ename, size in _walkstreamfiles(repo): |
|
251 | for file_type, name, ename, size in _walkstreamfiles(repo): | |
252 | if size: |
|
252 | if size: | |
253 | entries.append((name, size)) |
|
253 | entries.append((name, size)) | |
254 | total_bytes += size |
|
254 | total_bytes += size | |
255 | _test_sync_point_walk_1(repo) |
|
255 | _test_sync_point_walk_1(repo) | |
256 | _test_sync_point_walk_2(repo) |
|
256 | _test_sync_point_walk_2(repo) | |
257 |
|
257 | |||
258 | repo.ui.debug( |
|
258 | repo.ui.debug( | |
259 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) |
|
259 | b'%d files, %d bytes to transfer\n' % (len(entries), total_bytes) | |
260 | ) |
|
260 | ) | |
261 |
|
261 | |||
262 | svfs = repo.svfs |
|
262 | svfs = repo.svfs | |
263 | debugflag = repo.ui.debugflag |
|
263 | debugflag = repo.ui.debugflag | |
264 |
|
264 | |||
265 | def emitrevlogdata(): |
|
265 | def emitrevlogdata(): | |
266 | for name, size in entries: |
|
266 | for name, size in entries: | |
267 | if debugflag: |
|
267 | if debugflag: | |
268 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) |
|
268 | repo.ui.debug(b'sending %s (%d bytes)\n' % (name, size)) | |
269 | # partially encode name over the wire for backwards compat |
|
269 | # partially encode name over the wire for backwards compat | |
270 | yield b'%s\0%d\n' % (store.encodedir(name), size) |
|
270 | yield b'%s\0%d\n' % (store.encodedir(name), size) | |
271 | # auditing at this stage is both pointless (paths are already |
|
271 | # auditing at this stage is both pointless (paths are already | |
272 | # trusted by the local repo) and expensive |
|
272 | # trusted by the local repo) and expensive | |
273 | with svfs(name, b'rb', auditpath=False) as fp: |
|
273 | with svfs(name, b'rb', auditpath=False) as fp: | |
274 | if size <= 65536: |
|
274 | if size <= 65536: | |
275 | yield fp.read(size) |
|
275 | yield fp.read(size) | |
276 | else: |
|
276 | else: | |
277 | for chunk in util.filechunkiter(fp, limit=size): |
|
277 | for chunk in util.filechunkiter(fp, limit=size): | |
278 | yield chunk |
|
278 | yield chunk | |
279 |
|
279 | |||
280 | return len(entries), total_bytes, emitrevlogdata() |
|
280 | return len(entries), total_bytes, emitrevlogdata() | |
281 |
|
281 | |||
282 |
|
282 | |||
283 | def generatev1wireproto(repo): |
|
283 | def generatev1wireproto(repo): | |
284 | """Emit content for version 1 of streaming clone suitable for the wire. |
|
284 | """Emit content for version 1 of streaming clone suitable for the wire. | |
285 |
|
285 | |||
286 | This is the data output from ``generatev1()`` with 2 header lines. The |
|
286 | This is the data output from ``generatev1()`` with 2 header lines. The | |
287 | first line indicates overall success. The 2nd contains the file count and |
|
287 | first line indicates overall success. The 2nd contains the file count and | |
288 | byte size of payload. |
|
288 | byte size of payload. | |
289 |
|
289 | |||
290 | The success line contains "0" for success, "1" for stream generation not |
|
290 | The success line contains "0" for success, "1" for stream generation not | |
291 | allowed, and "2" for error locking the repository (possibly indicating |
|
291 | allowed, and "2" for error locking the repository (possibly indicating | |
292 | a permissions error for the server process). |
|
292 | a permissions error for the server process). | |
293 | """ |
|
293 | """ | |
294 | if not allowservergeneration(repo): |
|
294 | if not allowservergeneration(repo): | |
295 | yield b'1\n' |
|
295 | yield b'1\n' | |
296 | return |
|
296 | return | |
297 |
|
297 | |||
298 | try: |
|
298 | try: | |
299 | filecount, bytecount, it = generatev1(repo) |
|
299 | filecount, bytecount, it = generatev1(repo) | |
300 | except error.LockError: |
|
300 | except error.LockError: | |
301 | yield b'2\n' |
|
301 | yield b'2\n' | |
302 | return |
|
302 | return | |
303 |
|
303 | |||
304 | # Indicates successful response. |
|
304 | # Indicates successful response. | |
305 | yield b'0\n' |
|
305 | yield b'0\n' | |
306 | yield b'%d %d\n' % (filecount, bytecount) |
|
306 | yield b'%d %d\n' % (filecount, bytecount) | |
307 | for chunk in it: |
|
307 | for chunk in it: | |
308 | yield chunk |
|
308 | yield chunk | |
309 |
|
309 | |||
310 |
|
310 | |||
311 | def generatebundlev1(repo, compression=b'UN'): |
|
311 | def generatebundlev1(repo, compression=b'UN'): | |
312 | """Emit content for version 1 of a stream clone bundle. |
|
312 | """Emit content for version 1 of a stream clone bundle. | |
313 |
|
313 | |||
314 | The first 4 bytes of the output ("HGS1") denote this as stream clone |
|
314 | The first 4 bytes of the output ("HGS1") denote this as stream clone | |
315 | bundle version 1. |
|
315 | bundle version 1. | |
316 |
|
316 | |||
317 | The next 2 bytes indicate the compression type. Only "UN" is currently |
|
317 | The next 2 bytes indicate the compression type. Only "UN" is currently | |
318 | supported. |
|
318 | supported. | |
319 |
|
319 | |||
320 | The next 16 bytes are two 64-bit big endian unsigned integers indicating |
|
320 | The next 16 bytes are two 64-bit big endian unsigned integers indicating | |
321 | file count and byte count, respectively. |
|
321 | file count and byte count, respectively. | |
322 |
|
322 | |||
323 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length |
|
323 | The next 2 bytes is a 16-bit big endian unsigned short declaring the length | |
324 | of the requirements string, including a trailing \0. The following N bytes |
|
324 | of the requirements string, including a trailing \0. The following N bytes | |
325 | are the requirements string, which is ASCII containing a comma-delimited |
|
325 | are the requirements string, which is ASCII containing a comma-delimited | |
326 | list of repo requirements that are needed to support the data. |
|
326 | list of repo requirements that are needed to support the data. | |
327 |
|
327 | |||
328 | The remaining content is the output of ``generatev1()`` (which may be |
|
328 | The remaining content is the output of ``generatev1()`` (which may be | |
329 | compressed in the future). |
|
329 | compressed in the future). | |
330 |
|
330 | |||
331 | Returns a tuple of (requirements, data generator). |
|
331 | Returns a tuple of (requirements, data generator). | |
332 | """ |
|
332 | """ | |
333 | if compression != b'UN': |
|
333 | if compression != b'UN': | |
334 | raise ValueError(b'we do not support the compression argument yet') |
|
334 | raise ValueError(b'we do not support the compression argument yet') | |
335 |
|
335 | |||
336 | requirements = repo.requirements & repo.supportedformats |
|
336 | requirements = repo.requirements & repo.supportedformats | |
337 | requires = b','.join(sorted(requirements)) |
|
337 | requires = b','.join(sorted(requirements)) | |
338 |
|
338 | |||
339 | def gen(): |
|
339 | def gen(): | |
340 | yield b'HGS1' |
|
340 | yield b'HGS1' | |
341 | yield compression |
|
341 | yield compression | |
342 |
|
342 | |||
343 | filecount, bytecount, it = generatev1(repo) |
|
343 | filecount, bytecount, it = generatev1(repo) | |
344 | repo.ui.status( |
|
344 | repo.ui.status( | |
345 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) |
|
345 | _(b'writing %d bytes for %d files\n') % (bytecount, filecount) | |
346 | ) |
|
346 | ) | |
347 |
|
347 | |||
348 | yield struct.pack(b'>QQ', filecount, bytecount) |
|
348 | yield struct.pack(b'>QQ', filecount, bytecount) | |
349 | yield struct.pack(b'>H', len(requires) + 1) |
|
349 | yield struct.pack(b'>H', len(requires) + 1) | |
350 | yield requires + b'\0' |
|
350 | yield requires + b'\0' | |
351 |
|
351 | |||
352 | # This is where we'll add compression in the future. |
|
352 | # This is where we'll add compression in the future. | |
353 | assert compression == b'UN' |
|
353 | assert compression == b'UN' | |
354 |
|
354 | |||
355 | progress = repo.ui.makeprogress( |
|
355 | progress = repo.ui.makeprogress( | |
356 | _(b'bundle'), total=bytecount, unit=_(b'bytes') |
|
356 | _(b'bundle'), total=bytecount, unit=_(b'bytes') | |
357 | ) |
|
357 | ) | |
358 | progress.update(0) |
|
358 | progress.update(0) | |
359 |
|
359 | |||
360 | for chunk in it: |
|
360 | for chunk in it: | |
361 | progress.increment(step=len(chunk)) |
|
361 | progress.increment(step=len(chunk)) | |
362 | yield chunk |
|
362 | yield chunk | |
363 |
|
363 | |||
364 | progress.complete() |
|
364 | progress.complete() | |
365 |
|
365 | |||
366 | return requirements, gen() |
|
366 | return requirements, gen() | |
367 |
|
367 | |||
368 |
|
368 | |||
369 | def consumev1(repo, fp, filecount, bytecount): |
|
369 | def consumev1(repo, fp, filecount, bytecount): | |
370 | """Apply the contents from version 1 of a streaming clone file handle. |
|
370 | """Apply the contents from version 1 of a streaming clone file handle. | |
371 |
|
371 | |||
372 | This takes the output from "stream_out" and applies it to the specified |
|
372 | This takes the output from "stream_out" and applies it to the specified | |
373 | repository. |
|
373 | repository. | |
374 |
|
374 | |||
375 | Like "stream_out," the status line added by the wire protocol is not |
|
375 | Like "stream_out," the status line added by the wire protocol is not | |
376 | handled by this function. |
|
376 | handled by this function. | |
377 | """ |
|
377 | """ | |
378 | with repo.lock(): |
|
378 | with repo.lock(): | |
379 | repo.ui.status( |
|
379 | repo.ui.status( | |
380 | _(b'%d files to transfer, %s of data\n') |
|
380 | _(b'%d files to transfer, %s of data\n') | |
381 | % (filecount, util.bytecount(bytecount)) |
|
381 | % (filecount, util.bytecount(bytecount)) | |
382 | ) |
|
382 | ) | |
383 | progress = repo.ui.makeprogress( |
|
383 | progress = repo.ui.makeprogress( | |
384 | _(b'clone'), total=bytecount, unit=_(b'bytes') |
|
384 | _(b'clone'), total=bytecount, unit=_(b'bytes') | |
385 | ) |
|
385 | ) | |
386 | progress.update(0) |
|
386 | progress.update(0) | |
387 | start = util.timer() |
|
387 | start = util.timer() | |
388 |
|
388 | |||
389 | # TODO: get rid of (potential) inconsistency |
|
389 | # TODO: get rid of (potential) inconsistency | |
390 | # |
|
390 | # | |
391 | # If transaction is started and any @filecache property is |
|
391 | # If transaction is started and any @filecache property is | |
392 | # changed at this point, it causes inconsistency between |
|
392 | # changed at this point, it causes inconsistency between | |
393 | # in-memory cached property and streamclone-ed file on the |
|
393 | # in-memory cached property and streamclone-ed file on the | |
394 | # disk. Nested transaction prevents transaction scope "clone" |
|
394 | # disk. Nested transaction prevents transaction scope "clone" | |
395 | # below from writing in-memory changes out at the end of it, |
|
395 | # below from writing in-memory changes out at the end of it, | |
396 | # even though in-memory changes are discarded at the end of it |
|
396 | # even though in-memory changes are discarded at the end of it | |
397 | # regardless of transaction nesting. |
|
397 | # regardless of transaction nesting. | |
398 | # |
|
398 | # | |
399 | # But transaction nesting can't be simply prohibited, because |
|
399 | # But transaction nesting can't be simply prohibited, because | |
400 | # nesting occurs also in ordinary case (e.g. enabling |
|
400 | # nesting occurs also in ordinary case (e.g. enabling | |
401 | # clonebundles). |
|
401 | # clonebundles). | |
402 |
|
402 | |||
403 | with repo.transaction(b'clone'): |
|
403 | with repo.transaction(b'clone'): | |
404 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): |
|
404 | with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount): | |
405 | for i in pycompat.xrange(filecount): |
|
405 | for i in pycompat.xrange(filecount): | |
406 | # XXX doesn't support '\n' or '\r' in filenames |
|
406 | # XXX doesn't support '\n' or '\r' in filenames | |
407 | l = fp.readline() |
|
407 | l = fp.readline() | |
408 | try: |
|
408 | try: | |
409 | name, size = l.split(b'\0', 1) |
|
409 | name, size = l.split(b'\0', 1) | |
410 | size = int(size) |
|
410 | size = int(size) | |
411 | except (ValueError, TypeError): |
|
411 | except (ValueError, TypeError): | |
412 | raise error.ResponseError( |
|
412 | raise error.ResponseError( | |
413 | _(b'unexpected response from remote server:'), l |
|
413 | _(b'unexpected response from remote server:'), l | |
414 | ) |
|
414 | ) | |
415 | if repo.ui.debugflag: |
|
415 | if repo.ui.debugflag: | |
416 | repo.ui.debug( |
|
416 | repo.ui.debug( | |
417 | b'adding %s (%s)\n' % (name, util.bytecount(size)) |
|
417 | b'adding %s (%s)\n' % (name, util.bytecount(size)) | |
418 | ) |
|
418 | ) | |
419 | # for backwards compat, name was partially encoded |
|
419 | # for backwards compat, name was partially encoded | |
420 | path = store.decodedir(name) |
|
420 | path = store.decodedir(name) | |
421 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: |
|
421 | with repo.svfs(path, b'w', backgroundclose=True) as ofp: | |
422 | for chunk in util.filechunkiter(fp, limit=size): |
|
422 | for chunk in util.filechunkiter(fp, limit=size): | |
423 | progress.increment(step=len(chunk)) |
|
423 | progress.increment(step=len(chunk)) | |
424 | ofp.write(chunk) |
|
424 | ofp.write(chunk) | |
425 |
|
425 | |||
426 | # force @filecache properties to be reloaded from |
|
426 | # force @filecache properties to be reloaded from | |
427 | # streamclone-ed file at next access |
|
427 | # streamclone-ed file at next access | |
428 | repo.invalidate(clearfilecache=True) |
|
428 | repo.invalidate(clearfilecache=True) | |
429 |
|
429 | |||
430 | elapsed = util.timer() - start |
|
430 | elapsed = util.timer() - start | |
431 | if elapsed <= 0: |
|
431 | if elapsed <= 0: | |
432 | elapsed = 0.001 |
|
432 | elapsed = 0.001 | |
433 | progress.complete() |
|
433 | progress.complete() | |
434 | repo.ui.status( |
|
434 | repo.ui.status( | |
435 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
435 | _(b'transferred %s in %.1f seconds (%s/sec)\n') | |
436 | % ( |
|
436 | % ( | |
437 | util.bytecount(bytecount), |
|
437 | util.bytecount(bytecount), | |
438 | elapsed, |
|
438 | elapsed, | |
439 | util.bytecount(bytecount / elapsed), |
|
439 | util.bytecount(bytecount / elapsed), | |
440 | ) |
|
440 | ) | |
441 | ) |
|
441 | ) | |
442 |
|
442 | |||
443 |
|
443 | |||
444 | def readbundle1header(fp): |
|
444 | def readbundle1header(fp): | |
445 | compression = fp.read(2) |
|
445 | compression = fp.read(2) | |
446 | if compression != b'UN': |
|
446 | if compression != b'UN': | |
447 | raise error.Abort( |
|
447 | raise error.Abort( | |
448 | _( |
|
448 | _( | |
449 | b'only uncompressed stream clone bundles are ' |
|
449 | b'only uncompressed stream clone bundles are ' | |
450 | b'supported; got %s' |
|
450 | b'supported; got %s' | |
451 | ) |
|
451 | ) | |
452 | % compression |
|
452 | % compression | |
453 | ) |
|
453 | ) | |
454 |
|
454 | |||
455 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) |
|
455 | filecount, bytecount = struct.unpack(b'>QQ', fp.read(16)) | |
456 | requireslen = struct.unpack(b'>H', fp.read(2))[0] |
|
456 | requireslen = struct.unpack(b'>H', fp.read(2))[0] | |
457 | requires = fp.read(requireslen) |
|
457 | requires = fp.read(requireslen) | |
458 |
|
458 | |||
459 | if not requires.endswith(b'\0'): |
|
459 | if not requires.endswith(b'\0'): | |
460 | raise error.Abort( |
|
460 | raise error.Abort( | |
461 | _( |
|
461 | _( | |
462 | b'malformed stream clone bundle: ' |
|
462 | b'malformed stream clone bundle: ' | |
463 | b'requirements not properly encoded' |
|
463 | b'requirements not properly encoded' | |
464 | ) |
|
464 | ) | |
465 | ) |
|
465 | ) | |
466 |
|
466 | |||
467 | requirements = set(requires.rstrip(b'\0').split(b',')) |
|
467 | requirements = set(requires.rstrip(b'\0').split(b',')) | |
468 |
|
468 | |||
469 | return filecount, bytecount, requirements |
|
469 | return filecount, bytecount, requirements | |
470 |
|
470 | |||
471 |
|
471 | |||
472 | def applybundlev1(repo, fp): |
|
472 | def applybundlev1(repo, fp): | |
473 | """Apply the content from a stream clone bundle version 1. |
|
473 | """Apply the content from a stream clone bundle version 1. | |
474 |
|
474 | |||
475 | We assume the 4 byte header has been read and validated and the file handle |
|
475 | We assume the 4 byte header has been read and validated and the file handle | |
476 | is at the 2 byte compression identifier. |
|
476 | is at the 2 byte compression identifier. | |
477 | """ |
|
477 | """ | |
478 | if len(repo): |
|
478 | if len(repo): | |
479 | raise error.Abort( |
|
479 | raise error.Abort( | |
480 | _(b'cannot apply stream clone bundle on non-empty repo') |
|
480 | _(b'cannot apply stream clone bundle on non-empty repo') | |
481 | ) |
|
481 | ) | |
482 |
|
482 | |||
483 | filecount, bytecount, requirements = readbundle1header(fp) |
|
483 | filecount, bytecount, requirements = readbundle1header(fp) | |
484 | missingreqs = requirements - repo.supportedformats |
|
484 | missingreqs = requirements - repo.supportedformats | |
485 | if missingreqs: |
|
485 | if missingreqs: | |
486 | raise error.Abort( |
|
486 | raise error.Abort( | |
487 | _(b'unable to apply stream clone: unsupported format: %s') |
|
487 | _(b'unable to apply stream clone: unsupported format: %s') | |
488 | % b', '.join(sorted(missingreqs)) |
|
488 | % b', '.join(sorted(missingreqs)) | |
489 | ) |
|
489 | ) | |
490 |
|
490 | |||
491 | consumev1(repo, fp, filecount, bytecount) |
|
491 | consumev1(repo, fp, filecount, bytecount) | |
492 |
|
492 | |||
493 |
|
493 | |||
494 | class streamcloneapplier(object): |
|
494 | class streamcloneapplier(object): | |
495 | """Class to manage applying streaming clone bundles. |
|
495 | """Class to manage applying streaming clone bundles. | |
496 |
|
496 | |||
497 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle |
|
497 | We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle | |
498 | readers to perform bundle type-specific functionality. |
|
498 | readers to perform bundle type-specific functionality. | |
499 | """ |
|
499 | """ | |
500 |
|
500 | |||
501 | def __init__(self, fh): |
|
501 | def __init__(self, fh): | |
502 | self._fh = fh |
|
502 | self._fh = fh | |
503 |
|
503 | |||
504 | def apply(self, repo): |
|
504 | def apply(self, repo): | |
505 | return applybundlev1(repo, self._fh) |
|
505 | return applybundlev1(repo, self._fh) | |
506 |
|
506 | |||
507 |
|
507 | |||
508 | # type of file to stream |
|
508 | # type of file to stream | |
509 | _fileappend = 0 # append only file |
|
509 | _fileappend = 0 # append only file | |
510 | _filefull = 1 # full snapshot file |
|
510 | _filefull = 1 # full snapshot file | |
511 |
|
511 | |||
512 | # Source of the file |
|
512 | # Source of the file | |
513 | _srcstore = b's' # store (svfs) |
|
513 | _srcstore = b's' # store (svfs) | |
514 | _srccache = b'c' # cache (cache) |
|
514 | _srccache = b'c' # cache (cache) | |
515 |
|
515 | |||
516 | # This is it's own function so extensions can override it. |
|
516 | # This is it's own function so extensions can override it. | |
517 | def _walkstreamfullstorefiles(repo): |
|
517 | def _walkstreamfullstorefiles(repo): | |
518 | """list snapshot file from the store""" |
|
518 | """list snapshot file from the store""" | |
519 | fnames = [] |
|
519 | fnames = [] | |
520 | if not repo.publishing(): |
|
520 | if not repo.publishing(): | |
521 | fnames.append(b'phaseroots') |
|
521 | fnames.append(b'phaseroots') | |
522 | return fnames |
|
522 | return fnames | |
523 |
|
523 | |||
524 |
|
524 | |||
525 | def _filterfull(entry, copy, vfsmap): |
|
525 | def _filterfull(entry, copy, vfsmap): | |
526 | """actually copy the snapshot files""" |
|
526 | """actually copy the snapshot files""" | |
527 | src, name, ftype, data = entry |
|
527 | src, name, ftype, data = entry | |
528 | if ftype != _filefull: |
|
528 | if ftype != _filefull: | |
529 | return entry |
|
529 | return entry | |
530 | return (src, name, ftype, copy(vfsmap[src].join(name))) |
|
530 | return (src, name, ftype, copy(vfsmap[src].join(name))) | |
531 |
|
531 | |||
532 |
|
532 | |||
533 | @contextlib.contextmanager |
|
533 | @contextlib.contextmanager | |
534 | def maketempcopies(): |
|
534 | def maketempcopies(): | |
535 | """return a function to temporary copy file""" |
|
535 | """return a function to temporary copy file""" | |
536 | files = [] |
|
536 | files = [] | |
537 | try: |
|
537 | try: | |
538 |
|
538 | |||
539 | def copy(src): |
|
539 | def copy(src): | |
540 | fd, dst = pycompat.mkstemp() |
|
540 | fd, dst = pycompat.mkstemp() | |
541 | os.close(fd) |
|
541 | os.close(fd) | |
542 | files.append(dst) |
|
542 | files.append(dst) | |
543 | util.copyfiles(src, dst, hardlink=True) |
|
543 | util.copyfiles(src, dst, hardlink=True) | |
544 | return dst |
|
544 | return dst | |
545 |
|
545 | |||
546 | yield copy |
|
546 | yield copy | |
547 | finally: |
|
547 | finally: | |
548 | for tmp in files: |
|
548 | for tmp in files: | |
549 | util.tryunlink(tmp) |
|
549 | util.tryunlink(tmp) | |
550 |
|
550 | |||
551 |
|
551 | |||
552 | def _makemap(repo): |
|
552 | def _makemap(repo): | |
553 | """make a (src -> vfs) map for the repo""" |
|
553 | """make a (src -> vfs) map for the repo""" | |
554 | vfsmap = { |
|
554 | vfsmap = { | |
555 | _srcstore: repo.svfs, |
|
555 | _srcstore: repo.svfs, | |
556 | _srccache: repo.cachevfs, |
|
556 | _srccache: repo.cachevfs, | |
557 | } |
|
557 | } | |
558 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
558 | # we keep repo.vfs out of the on purpose, ther are too many danger there | |
559 | # (eg: .hg/hgrc) |
|
559 | # (eg: .hg/hgrc) | |
560 | assert repo.vfs not in vfsmap.values() |
|
560 | assert repo.vfs not in vfsmap.values() | |
561 |
|
561 | |||
562 | return vfsmap |
|
562 | return vfsmap | |
563 |
|
563 | |||
564 |
|
564 | |||
565 | def _emit2(repo, entries, totalfilesize): |
|
565 | def _emit2(repo, entries, totalfilesize): | |
566 | """actually emit the stream bundle""" |
|
566 | """actually emit the stream bundle""" | |
567 | vfsmap = _makemap(repo) |
|
567 | vfsmap = _makemap(repo) | |
568 | # we keep repo.vfs out of the on purpose, ther are too many danger there |
|
568 | # we keep repo.vfs out of the on purpose, ther are too many danger there | |
569 | # (eg: .hg/hgrc), |
|
569 | # (eg: .hg/hgrc), | |
570 | # |
|
570 | # | |
571 | # this assert is duplicated (from _makemap) as author might think this is |
|
571 | # this assert is duplicated (from _makemap) as author might think this is | |
572 | # fine, while this is really not fine. |
|
572 | # fine, while this is really not fine. | |
573 | if repo.vfs in vfsmap.values(): |
|
573 | if repo.vfs in vfsmap.values(): | |
574 | raise error.ProgrammingError( |
|
574 | raise error.ProgrammingError( | |
575 | b'repo.vfs must not be added to vfsmap for security reasons' |
|
575 | b'repo.vfs must not be added to vfsmap for security reasons' | |
576 | ) |
|
576 | ) | |
577 |
|
577 | |||
578 | progress = repo.ui.makeprogress( |
|
578 | progress = repo.ui.makeprogress( | |
579 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') |
|
579 | _(b'bundle'), total=totalfilesize, unit=_(b'bytes') | |
580 | ) |
|
580 | ) | |
581 | progress.update(0) |
|
581 | progress.update(0) | |
582 | with maketempcopies() as copy, progress: |
|
582 | with maketempcopies() as copy, progress: | |
583 | # copy is delayed until we are in the try |
|
583 | # copy is delayed until we are in the try | |
584 | entries = [_filterfull(e, copy, vfsmap) for e in entries] |
|
584 | entries = [_filterfull(e, copy, vfsmap) for e in entries] | |
585 | yield None # this release the lock on the repository |
|
585 | yield None # this release the lock on the repository | |
586 | totalbytecount = 0 |
|
586 | totalbytecount = 0 | |
587 |
|
587 | |||
588 | for src, name, ftype, data in entries: |
|
588 | for src, name, ftype, data in entries: | |
589 | vfs = vfsmap[src] |
|
589 | vfs = vfsmap[src] | |
590 | yield src |
|
590 | yield src | |
591 | yield util.uvarintencode(len(name)) |
|
591 | yield util.uvarintencode(len(name)) | |
592 | if ftype == _fileappend: |
|
592 | if ftype == _fileappend: | |
593 | fp = vfs(name) |
|
593 | fp = vfs(name) | |
594 | size = data |
|
594 | size = data | |
595 | elif ftype == _filefull: |
|
595 | elif ftype == _filefull: | |
596 | fp = open(data, b'rb') |
|
596 | fp = open(data, b'rb') | |
597 | size = util.fstat(fp).st_size |
|
597 | size = util.fstat(fp).st_size | |
598 | bytecount = 0 |
|
598 | bytecount = 0 | |
599 | try: |
|
599 | try: | |
600 | yield util.uvarintencode(size) |
|
600 | yield util.uvarintencode(size) | |
601 | yield name |
|
601 | yield name | |
602 | if size <= 65536: |
|
602 | if size <= 65536: | |
603 | chunks = (fp.read(size),) |
|
603 | chunks = (fp.read(size),) | |
604 | else: |
|
604 | else: | |
605 | chunks = util.filechunkiter(fp, limit=size) |
|
605 | chunks = util.filechunkiter(fp, limit=size) | |
606 | for chunk in chunks: |
|
606 | for chunk in chunks: | |
607 | bytecount += len(chunk) |
|
607 | bytecount += len(chunk) | |
608 | totalbytecount += len(chunk) |
|
608 | totalbytecount += len(chunk) | |
609 | progress.update(totalbytecount) |
|
609 | progress.update(totalbytecount) | |
610 | yield chunk |
|
610 | yield chunk | |
611 | if bytecount != size: |
|
611 | if bytecount != size: | |
612 | # Would most likely be caused by a race due to `hg strip` or |
|
612 | # Would most likely be caused by a race due to `hg strip` or | |
613 | # a revlog split |
|
613 | # a revlog split | |
614 | raise error.Abort( |
|
614 | raise error.Abort( | |
615 | _( |
|
615 | _( | |
616 | b'clone could only read %d bytes from %s, but ' |
|
616 | b'clone could only read %d bytes from %s, but ' | |
617 | b'expected %d bytes' |
|
617 | b'expected %d bytes' | |
618 | ) |
|
618 | ) | |
619 | % (bytecount, name, size) |
|
619 | % (bytecount, name, size) | |
620 | ) |
|
620 | ) | |
621 | finally: |
|
621 | finally: | |
622 | fp.close() |
|
622 | fp.close() | |
623 |
|
623 | |||
624 |
|
624 | |||
625 | def _test_sync_point_walk_1(repo): |
|
625 | def _test_sync_point_walk_1(repo): | |
626 | """a function for synchronisation during tests""" |
|
626 | """a function for synchronisation during tests""" | |
627 |
|
627 | |||
628 |
|
628 | |||
629 | def _test_sync_point_walk_2(repo): |
|
629 | def _test_sync_point_walk_2(repo): | |
630 | """a function for synchronisation during tests""" |
|
630 | """a function for synchronisation during tests""" | |
631 |
|
631 | |||
632 |
|
632 | |||
633 | def _v2_walk(repo, includes, excludes, includeobsmarkers): |
|
633 | def _v2_walk(repo, includes, excludes, includeobsmarkers): | |
634 | """emit a seris of files information useful to clone a repo |
|
634 | """emit a seris of files information useful to clone a repo | |
635 |
|
635 | |||
636 | return (entries, totalfilesize) |
|
636 | return (entries, totalfilesize) | |
637 |
|
637 | |||
638 | entries is a list of tuple (vfs-key, file-path, file-type, size) |
|
638 | entries is a list of tuple (vfs-key, file-path, file-type, size) | |
639 |
|
639 | |||
640 | - `vfs-key`: is a key to the right vfs to write the file (see _makemap) |
|
640 | - `vfs-key`: is a key to the right vfs to write the file (see _makemap) | |
641 | - `name`: file path of the file to copy (to be feed to the vfss) |
|
641 | - `name`: file path of the file to copy (to be feed to the vfss) | |
642 | - `file-type`: do this file need to be copied with the source lock ? |
|
642 | - `file-type`: do this file need to be copied with the source lock ? | |
643 | - `size`: the size of the file (or None) |
|
643 | - `size`: the size of the file (or None) | |
644 | """ |
|
644 | """ | |
645 | assert repo._currentlock(repo._lockref) is not None |
|
645 | assert repo._currentlock(repo._lockref) is not None | |
646 | entries = [] |
|
646 | entries = [] | |
647 | totalfilesize = 0 |
|
647 | totalfilesize = 0 | |
648 |
|
648 | |||
649 | matcher = None |
|
649 | matcher = None | |
650 | if includes or excludes: |
|
650 | if includes or excludes: | |
651 | matcher = narrowspec.match(repo.root, includes, excludes) |
|
651 | matcher = narrowspec.match(repo.root, includes, excludes) | |
652 |
|
652 | |||
653 | for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): |
|
653 | for rl_type, name, ename, size in _walkstreamfiles(repo, matcher): | |
654 | if size: |
|
654 | if size: | |
655 | ft = _fileappend |
|
655 | ft = _fileappend | |
656 | if rl_type & store.FILEFLAGS_VOLATILE: |
|
656 | if rl_type & store.FILEFLAGS_VOLATILE: | |
657 | ft = _filefull |
|
657 | ft = _filefull | |
658 | entries.append((_srcstore, name, ft, size)) |
|
658 | entries.append((_srcstore, name, ft, size)) | |
659 | totalfilesize += size |
|
659 | totalfilesize += size | |
660 | for name in _walkstreamfullstorefiles(repo): |
|
660 | for name in _walkstreamfullstorefiles(repo): | |
661 | if repo.svfs.exists(name): |
|
661 | if repo.svfs.exists(name): | |
662 | totalfilesize += repo.svfs.lstat(name).st_size |
|
662 | totalfilesize += repo.svfs.lstat(name).st_size | |
663 | entries.append((_srcstore, name, _filefull, None)) |
|
663 | entries.append((_srcstore, name, _filefull, None)) | |
664 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): |
|
664 | if includeobsmarkers and repo.svfs.exists(b'obsstore'): | |
665 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size |
|
665 | totalfilesize += repo.svfs.lstat(b'obsstore').st_size | |
666 | entries.append((_srcstore, b'obsstore', _filefull, None)) |
|
666 | entries.append((_srcstore, b'obsstore', _filefull, None)) | |
667 | for name in cacheutil.cachetocopy(repo): |
|
667 | for name in cacheutil.cachetocopy(repo): | |
668 | if repo.cachevfs.exists(name): |
|
668 | if repo.cachevfs.exists(name): | |
669 | totalfilesize += repo.cachevfs.lstat(name).st_size |
|
669 | totalfilesize += repo.cachevfs.lstat(name).st_size | |
670 | entries.append((_srccache, name, _filefull, None)) |
|
670 | entries.append((_srccache, name, _filefull, None)) | |
671 | return entries, totalfilesize |
|
671 | return entries, totalfilesize | |
672 |
|
672 | |||
673 |
|
673 | |||
674 | def generatev2(repo, includes, excludes, includeobsmarkers): |
|
674 | def generatev2(repo, includes, excludes, includeobsmarkers): | |
675 | """Emit content for version 2 of a streaming clone. |
|
675 | """Emit content for version 2 of a streaming clone. | |
676 |
|
676 | |||
677 | the data stream consists the following entries: |
|
677 | the data stream consists the following entries: | |
678 | 1) A char representing the file destination (eg: store or cache) |
|
678 | 1) A char representing the file destination (eg: store or cache) | |
679 | 2) A varint containing the length of the filename |
|
679 | 2) A varint containing the length of the filename | |
680 | 3) A varint containing the length of file data |
|
680 | 3) A varint containing the length of file data | |
681 | 4) N bytes containing the filename (the internal, store-agnostic form) |
|
681 | 4) N bytes containing the filename (the internal, store-agnostic form) | |
682 | 5) N bytes containing the file data |
|
682 | 5) N bytes containing the file data | |
683 |
|
683 | |||
684 | Returns a 3-tuple of (file count, file size, data iterator). |
|
684 | Returns a 3-tuple of (file count, file size, data iterator). | |
685 | """ |
|
685 | """ | |
686 |
|
686 | |||
687 | with repo.lock(): |
|
687 | with repo.lock(): | |
688 |
|
688 | |||
689 | repo.ui.debug(b'scanning\n') |
|
689 | repo.ui.debug(b'scanning\n') | |
690 |
|
690 | |||
691 | entries, totalfilesize = _v2_walk( |
|
691 | entries, totalfilesize = _v2_walk( | |
692 | repo, |
|
692 | repo, | |
693 | includes=includes, |
|
693 | includes=includes, | |
694 | excludes=excludes, |
|
694 | excludes=excludes, | |
695 | includeobsmarkers=includeobsmarkers, |
|
695 | includeobsmarkers=includeobsmarkers, | |
696 | ) |
|
696 | ) | |
697 |
|
697 | |||
698 | chunks = _emit2(repo, entries, totalfilesize) |
|
698 | chunks = _emit2(repo, entries, totalfilesize) | |
699 | first = next(chunks) |
|
699 | first = next(chunks) | |
700 | assert first is None |
|
700 | assert first is None | |
701 | _test_sync_point_walk_1(repo) |
|
701 | _test_sync_point_walk_1(repo) | |
702 | _test_sync_point_walk_2(repo) |
|
702 | _test_sync_point_walk_2(repo) | |
703 |
|
703 | |||
704 | return len(entries), totalfilesize, chunks |
|
704 | return len(entries), totalfilesize, chunks | |
705 |
|
705 | |||
706 |
|
706 | |||
707 | @contextlib.contextmanager |
|
707 | @contextlib.contextmanager | |
708 | def nested(*ctxs): |
|
708 | def nested(*ctxs): | |
709 | this = ctxs[0] |
|
709 | this = ctxs[0] | |
710 | rest = ctxs[1:] |
|
710 | rest = ctxs[1:] | |
711 | with this: |
|
711 | with this: | |
712 | if rest: |
|
712 | if rest: | |
713 | with nested(*rest): |
|
713 | with nested(*rest): | |
714 | yield |
|
714 | yield | |
715 | else: |
|
715 | else: | |
716 | yield |
|
716 | yield | |
717 |
|
717 | |||
718 |
|
718 | |||
719 | def consumev2(repo, fp, filecount, filesize): |
|
719 | def consumev2(repo, fp, filecount, filesize): | |
720 | """Apply the contents from a version 2 streaming clone. |
|
720 | """Apply the contents from a version 2 streaming clone. | |
721 |
|
721 | |||
722 | Data is read from an object that only needs to provide a ``read(size)`` |
|
722 | Data is read from an object that only needs to provide a ``read(size)`` | |
723 | method. |
|
723 | method. | |
724 | """ |
|
724 | """ | |
725 | with repo.lock(): |
|
725 | with repo.lock(): | |
726 | repo.ui.status( |
|
726 | repo.ui.status( | |
727 | _(b'%d files to transfer, %s of data\n') |
|
727 | _(b'%d files to transfer, %s of data\n') | |
728 | % (filecount, util.bytecount(filesize)) |
|
728 | % (filecount, util.bytecount(filesize)) | |
729 | ) |
|
729 | ) | |
730 |
|
730 | |||
731 | start = util.timer() |
|
731 | start = util.timer() | |
732 | progress = repo.ui.makeprogress( |
|
732 | progress = repo.ui.makeprogress( | |
733 | _(b'clone'), total=filesize, unit=_(b'bytes') |
|
733 | _(b'clone'), total=filesize, unit=_(b'bytes') | |
734 | ) |
|
734 | ) | |
735 | progress.update(0) |
|
735 | progress.update(0) | |
736 |
|
736 | |||
737 | vfsmap = _makemap(repo) |
|
737 | vfsmap = _makemap(repo) | |
738 | # we keep repo.vfs out of the on purpose, ther are too many danger |
|
738 | # we keep repo.vfs out of the on purpose, ther are too many danger | |
739 | # there (eg: .hg/hgrc), |
|
739 | # there (eg: .hg/hgrc), | |
740 | # |
|
740 | # | |
741 | # this assert is duplicated (from _makemap) as author might think this |
|
741 | # this assert is duplicated (from _makemap) as author might think this | |
742 | # is fine, while this is really not fine. |
|
742 | # is fine, while this is really not fine. | |
743 | if repo.vfs in vfsmap.values(): |
|
743 | if repo.vfs in vfsmap.values(): | |
744 | raise error.ProgrammingError( |
|
744 | raise error.ProgrammingError( | |
745 | b'repo.vfs must not be added to vfsmap for security reasons' |
|
745 | b'repo.vfs must not be added to vfsmap for security reasons' | |
746 | ) |
|
746 | ) | |
747 |
|
747 | |||
748 | with repo.transaction(b'clone'): |
|
748 | with repo.transaction(b'clone'): | |
749 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) |
|
749 | ctxs = (vfs.backgroundclosing(repo.ui) for vfs in vfsmap.values()) | |
750 | with nested(*ctxs): |
|
750 | with nested(*ctxs): | |
751 | for i in range(filecount): |
|
751 | for i in range(filecount): | |
752 | src = util.readexactly(fp, 1) |
|
752 | src = util.readexactly(fp, 1) | |
753 | vfs = vfsmap[src] |
|
753 | vfs = vfsmap[src] | |
754 | namelen = util.uvarintdecodestream(fp) |
|
754 | namelen = util.uvarintdecodestream(fp) | |
755 | datalen = util.uvarintdecodestream(fp) |
|
755 | datalen = util.uvarintdecodestream(fp) | |
756 |
|
756 | |||
757 | name = util.readexactly(fp, namelen) |
|
757 | name = util.readexactly(fp, namelen) | |
758 |
|
758 | |||
759 | if repo.ui.debugflag: |
|
759 | if repo.ui.debugflag: | |
760 | repo.ui.debug( |
|
760 | repo.ui.debug( | |
761 | b'adding [%s] %s (%s)\n' |
|
761 | b'adding [%s] %s (%s)\n' | |
762 | % (src, name, util.bytecount(datalen)) |
|
762 | % (src, name, util.bytecount(datalen)) | |
763 | ) |
|
763 | ) | |
764 |
|
764 | |||
765 | with vfs(name, b'w') as ofp: |
|
765 | with vfs(name, b'w') as ofp: | |
766 | for chunk in util.filechunkiter(fp, limit=datalen): |
|
766 | for chunk in util.filechunkiter(fp, limit=datalen): | |
767 | progress.increment(step=len(chunk)) |
|
767 | progress.increment(step=len(chunk)) | |
768 | ofp.write(chunk) |
|
768 | ofp.write(chunk) | |
769 |
|
769 | |||
770 | # force @filecache properties to be reloaded from |
|
770 | # force @filecache properties to be reloaded from | |
771 | # streamclone-ed file at next access |
|
771 | # streamclone-ed file at next access | |
772 | repo.invalidate(clearfilecache=True) |
|
772 | repo.invalidate(clearfilecache=True) | |
773 |
|
773 | |||
774 | elapsed = util.timer() - start |
|
774 | elapsed = util.timer() - start | |
775 | if elapsed <= 0: |
|
775 | if elapsed <= 0: | |
776 | elapsed = 0.001 |
|
776 | elapsed = 0.001 | |
777 | repo.ui.status( |
|
777 | repo.ui.status( | |
778 | _(b'transferred %s in %.1f seconds (%s/sec)\n') |
|
778 | _(b'transferred %s in %.1f seconds (%s/sec)\n') | |
779 | % ( |
|
779 | % ( | |
780 | util.bytecount(progress.pos), |
|
780 | util.bytecount(progress.pos), | |
781 | elapsed, |
|
781 | elapsed, | |
782 | util.bytecount(progress.pos / elapsed), |
|
782 | util.bytecount(progress.pos / elapsed), | |
783 | ) |
|
783 | ) | |
784 | ) |
|
784 | ) | |
785 | progress.complete() |
|
785 | progress.complete() | |
786 |
|
786 | |||
787 |
|
787 | |||
788 | def applybundlev2(repo, fp, filecount, filesize, requirements): |
|
788 | def applybundlev2(repo, fp, filecount, filesize, requirements): | |
789 | from . import localrepo |
|
789 | from . import localrepo | |
790 |
|
790 | |||
791 | missingreqs = [r for r in requirements if r not in repo.supported] |
|
791 | missingreqs = [r for r in requirements if r not in repo.supported] | |
792 | if missingreqs: |
|
792 | if missingreqs: | |
793 | raise error.Abort( |
|
793 | raise error.Abort( | |
794 | _(b'unable to apply stream clone: unsupported format: %s') |
|
794 | _(b'unable to apply stream clone: unsupported format: %s') | |
795 | % b', '.join(sorted(missingreqs)) |
|
795 | % b', '.join(sorted(missingreqs)) | |
796 | ) |
|
796 | ) | |
797 |
|
797 | |||
798 | consumev2(repo, fp, filecount, filesize) |
|
798 | consumev2(repo, fp, filecount, filesize) | |
799 |
|
799 | |||
800 | # new requirements = old non-format requirements + |
|
800 | # new requirements = old non-format requirements + | |
801 | # new format-related remote requirements |
|
801 | # new format-related remote requirements | |
802 | # requirements from the streamed-in repository |
|
802 | # requirements from the streamed-in repository | |
803 | repo.requirements = set(requirements) | ( |
|
803 | repo.requirements = set(requirements) | ( | |
804 | repo.requirements - repo.supportedformats |
|
804 | repo.requirements - repo.supportedformats | |
805 | ) |
|
805 | ) | |
806 | repo.svfs.options = localrepo.resolvestorevfsoptions( |
|
806 | repo.svfs.options = localrepo.resolvestorevfsoptions( | |
807 | repo.ui, repo.requirements, repo.features |
|
807 | repo.ui, repo.requirements, repo.features | |
808 | ) |
|
808 | ) | |
809 | scmutil.writereporequirements(repo) |
|
809 | scmutil.writereporequirements(repo) | |
810 |
|
810 | |||
811 |
|
811 | |||
812 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): |
|
812 | def _copy_files(src_vfs_map, dst_vfs_map, entries, progress): | |
813 | hardlink = [True] |
|
813 | hardlink = [True] | |
814 |
|
814 | |||
815 | def copy_used(): |
|
815 | def copy_used(): | |
816 | hardlink[0] = False |
|
816 | hardlink[0] = False | |
817 | progress.topic = _(b'copying') |
|
817 | progress.topic = _(b'copying') | |
818 |
|
818 | |||
819 | for k, path, size in entries: |
|
819 | for k, path, size in entries: | |
820 | src_vfs = src_vfs_map[k] |
|
820 | src_vfs = src_vfs_map[k] | |
821 | dst_vfs = dst_vfs_map[k] |
|
821 | dst_vfs = dst_vfs_map[k] | |
822 | src_path = src_vfs.join(path) |
|
822 | src_path = src_vfs.join(path) | |
823 | dst_path = dst_vfs.join(path) |
|
823 | dst_path = dst_vfs.join(path) | |
824 | dirname = dst_vfs.dirname(path) |
|
824 | # We cannot use dirname and makedirs of dst_vfs here because the store | |
825 | if not dst_vfs.exists(dirname): |
|
825 | # encoding confuses them. See issue 6581 for details. | |
826 | dst_vfs.makedirs(dirname) |
|
826 | dirname = os.path.dirname(dst_path) | |
|
827 | if not os.path.exists(dirname): | |||
|
828 | util.makedirs(dirname) | |||
827 | dst_vfs.register_file(path) |
|
829 | dst_vfs.register_file(path) | |
828 | # XXX we could use the #nb_bytes argument. |
|
830 | # XXX we could use the #nb_bytes argument. | |
829 | util.copyfile( |
|
831 | util.copyfile( | |
830 | src_path, |
|
832 | src_path, | |
831 | dst_path, |
|
833 | dst_path, | |
832 | hardlink=hardlink[0], |
|
834 | hardlink=hardlink[0], | |
833 | no_hardlink_cb=copy_used, |
|
835 | no_hardlink_cb=copy_used, | |
834 | check_fs_hardlink=False, |
|
836 | check_fs_hardlink=False, | |
835 | ) |
|
837 | ) | |
836 | progress.increment() |
|
838 | progress.increment() | |
837 | return hardlink[0] |
|
839 | return hardlink[0] | |
838 |
|
840 | |||
839 |
|
841 | |||
840 | def local_copy(src_repo, dest_repo): |
|
842 | def local_copy(src_repo, dest_repo): | |
841 | """copy all content from one local repository to another |
|
843 | """copy all content from one local repository to another | |
842 |
|
844 | |||
843 | This is useful for local clone""" |
|
845 | This is useful for local clone""" | |
844 | src_store_requirements = { |
|
846 | src_store_requirements = { | |
845 | r |
|
847 | r | |
846 | for r in src_repo.requirements |
|
848 | for r in src_repo.requirements | |
847 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS |
|
849 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS | |
848 | } |
|
850 | } | |
849 | dest_store_requirements = { |
|
851 | dest_store_requirements = { | |
850 | r |
|
852 | r | |
851 | for r in dest_repo.requirements |
|
853 | for r in dest_repo.requirements | |
852 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS |
|
854 | if r not in requirementsmod.WORKING_DIR_REQUIREMENTS | |
853 | } |
|
855 | } | |
854 | assert src_store_requirements == dest_store_requirements |
|
856 | assert src_store_requirements == dest_store_requirements | |
855 |
|
857 | |||
856 | with dest_repo.lock(): |
|
858 | with dest_repo.lock(): | |
857 | with src_repo.lock(): |
|
859 | with src_repo.lock(): | |
858 |
|
860 | |||
859 | # bookmark is not integrated to the streaming as it might use the |
|
861 | # bookmark is not integrated to the streaming as it might use the | |
860 | # `repo.vfs` and they are too many sentitive data accessible |
|
862 | # `repo.vfs` and they are too many sentitive data accessible | |
861 | # through `repo.vfs` to expose it to streaming clone. |
|
863 | # through `repo.vfs` to expose it to streaming clone. | |
862 | src_book_vfs = bookmarks.bookmarksvfs(src_repo) |
|
864 | src_book_vfs = bookmarks.bookmarksvfs(src_repo) | |
863 | srcbookmarks = src_book_vfs.join(b'bookmarks') |
|
865 | srcbookmarks = src_book_vfs.join(b'bookmarks') | |
864 | bm_count = 0 |
|
866 | bm_count = 0 | |
865 | if os.path.exists(srcbookmarks): |
|
867 | if os.path.exists(srcbookmarks): | |
866 | bm_count = 1 |
|
868 | bm_count = 1 | |
867 |
|
869 | |||
868 | entries, totalfilesize = _v2_walk( |
|
870 | entries, totalfilesize = _v2_walk( | |
869 | src_repo, |
|
871 | src_repo, | |
870 | includes=None, |
|
872 | includes=None, | |
871 | excludes=None, |
|
873 | excludes=None, | |
872 | includeobsmarkers=True, |
|
874 | includeobsmarkers=True, | |
873 | ) |
|
875 | ) | |
874 | src_vfs_map = _makemap(src_repo) |
|
876 | src_vfs_map = _makemap(src_repo) | |
875 | dest_vfs_map = _makemap(dest_repo) |
|
877 | dest_vfs_map = _makemap(dest_repo) | |
876 | progress = src_repo.ui.makeprogress( |
|
878 | progress = src_repo.ui.makeprogress( | |
877 | topic=_(b'linking'), |
|
879 | topic=_(b'linking'), | |
878 | total=len(entries) + bm_count, |
|
880 | total=len(entries) + bm_count, | |
879 | unit=_(b'files'), |
|
881 | unit=_(b'files'), | |
880 | ) |
|
882 | ) | |
881 | # copy files |
|
883 | # copy files | |
882 | # |
|
884 | # | |
883 | # We could copy the full file while the source repository is locked |
|
885 | # We could copy the full file while the source repository is locked | |
884 | # and the other one without the lock. However, in the linking case, |
|
886 | # and the other one without the lock. However, in the linking case, | |
885 | # this would also requires checks that nobody is appending any data |
|
887 | # this would also requires checks that nobody is appending any data | |
886 | # to the files while we do the clone, so this is not done yet. We |
|
888 | # to the files while we do the clone, so this is not done yet. We | |
887 | # could do this blindly when copying files. |
|
889 | # could do this blindly when copying files. | |
888 | files = ((k, path, size) for k, path, ftype, size in entries) |
|
890 | files = ((k, path, size) for k, path, ftype, size in entries) | |
889 | hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress) |
|
891 | hardlink = _copy_files(src_vfs_map, dest_vfs_map, files, progress) | |
890 |
|
892 | |||
891 | # copy bookmarks over |
|
893 | # copy bookmarks over | |
892 | if bm_count: |
|
894 | if bm_count: | |
893 | dst_book_vfs = bookmarks.bookmarksvfs(dest_repo) |
|
895 | dst_book_vfs = bookmarks.bookmarksvfs(dest_repo) | |
894 | dstbookmarks = dst_book_vfs.join(b'bookmarks') |
|
896 | dstbookmarks = dst_book_vfs.join(b'bookmarks') | |
895 | util.copyfile(srcbookmarks, dstbookmarks) |
|
897 | util.copyfile(srcbookmarks, dstbookmarks) | |
896 | progress.complete() |
|
898 | progress.complete() | |
897 | if hardlink: |
|
899 | if hardlink: | |
898 | msg = b'linked %d files\n' |
|
900 | msg = b'linked %d files\n' | |
899 | else: |
|
901 | else: | |
900 | msg = b'copied %d files\n' |
|
902 | msg = b'copied %d files\n' | |
901 | src_repo.ui.debug(msg % (len(entries) + bm_count)) |
|
903 | src_repo.ui.debug(msg % (len(entries) + bm_count)) | |
902 |
|
904 | |||
903 | with dest_repo.transaction(b"localclone") as tr: |
|
905 | with dest_repo.transaction(b"localclone") as tr: | |
904 | dest_repo.store.write(tr) |
|
906 | dest_repo.store.write(tr) | |
905 |
|
907 | |||
906 | # clean up transaction file as they do not make sense |
|
908 | # clean up transaction file as they do not make sense | |
907 | undo_files = [(dest_repo.svfs, b'undo.backupfiles')] |
|
909 | undo_files = [(dest_repo.svfs, b'undo.backupfiles')] | |
908 | undo_files.extend(dest_repo.undofiles()) |
|
910 | undo_files.extend(dest_repo.undofiles()) | |
909 | for undovfs, undofile in undo_files: |
|
911 | for undovfs, undofile in undo_files: | |
910 | try: |
|
912 | try: | |
911 | undovfs.unlink(undofile) |
|
913 | undovfs.unlink(undofile) | |
912 | except OSError as e: |
|
914 | except OSError as e: | |
913 | if e.errno != errno.ENOENT: |
|
915 | if e.errno != errno.ENOENT: | |
914 | msg = _(b'error removing %s: %s\n') |
|
916 | msg = _(b'error removing %s: %s\n') | |
915 | path = undovfs.join(undofile) |
|
917 | path = undovfs.join(undofile) | |
916 | e_msg = stringutil.forcebytestr(e) |
|
918 | e_msg = stringutil.forcebytestr(e) | |
917 | msg %= (path, e_msg) |
|
919 | msg %= (path, e_msg) | |
918 | dest_repo.ui.warn(msg) |
|
920 | dest_repo.ui.warn(msg) |
@@ -1,908 +1,904 b'' | |||||
1 | #require serve no-reposimplestore no-chg |
|
1 | #require serve no-reposimplestore no-chg | |
2 |
|
2 | |||
3 | #testcases stream-legacy stream-bundle2 |
|
3 | #testcases stream-legacy stream-bundle2 | |
4 |
|
4 | |||
5 | #if stream-legacy |
|
5 | #if stream-legacy | |
6 | $ cat << EOF >> $HGRCPATH |
|
6 | $ cat << EOF >> $HGRCPATH | |
7 | > [server] |
|
7 | > [server] | |
8 | > bundle2.stream = no |
|
8 | > bundle2.stream = no | |
9 | > EOF |
|
9 | > EOF | |
10 | #endif |
|
10 | #endif | |
11 |
|
11 | |||
12 | Initialize repository |
|
12 | Initialize repository | |
13 | the status call is to check for issue5130 |
|
13 | the status call is to check for issue5130 | |
14 |
|
14 | |||
15 | $ hg init server |
|
15 | $ hg init server | |
16 | $ cd server |
|
16 | $ cd server | |
17 | $ touch foo |
|
17 | $ touch foo | |
18 | $ hg -q commit -A -m initial |
|
18 | $ hg -q commit -A -m initial | |
19 | >>> for i in range(1024): |
|
19 | >>> for i in range(1024): | |
20 | ... with open(str(i), 'wb') as fh: |
|
20 | ... with open(str(i), 'wb') as fh: | |
21 | ... fh.write(b"%d" % i) and None |
|
21 | ... fh.write(b"%d" % i) and None | |
22 | $ hg -q commit -A -m 'add a lot of files' |
|
22 | $ hg -q commit -A -m 'add a lot of files' | |
23 | $ hg st |
|
23 | $ hg st | |
24 |
|
24 | |||
25 | add files with "tricky" name: |
|
25 | add files with "tricky" name: | |
26 |
|
26 | |||
27 | $ echo foo > 00changelog.i |
|
27 | $ echo foo > 00changelog.i | |
28 | $ echo foo > 00changelog.d |
|
28 | $ echo foo > 00changelog.d | |
29 | $ echo foo > 00changelog.n |
|
29 | $ echo foo > 00changelog.n | |
30 | $ echo foo > 00changelog-ab349180a0405010.nd |
|
30 | $ echo foo > 00changelog-ab349180a0405010.nd | |
31 | $ echo foo > 00manifest.i |
|
31 | $ echo foo > 00manifest.i | |
32 | $ echo foo > 00manifest.d |
|
32 | $ echo foo > 00manifest.d | |
33 | $ echo foo > foo.i |
|
33 | $ echo foo > foo.i | |
34 | $ echo foo > foo.d |
|
34 | $ echo foo > foo.d | |
35 | $ echo foo > foo.n |
|
35 | $ echo foo > foo.n | |
36 | $ echo foo > undo.py |
|
36 | $ echo foo > undo.py | |
37 | $ echo foo > undo.i |
|
37 | $ echo foo > undo.i | |
38 | $ echo foo > undo.d |
|
38 | $ echo foo > undo.d | |
39 | $ echo foo > undo.n |
|
39 | $ echo foo > undo.n | |
40 | $ echo foo > undo.foo.i |
|
40 | $ echo foo > undo.foo.i | |
41 | $ echo foo > undo.foo.d |
|
41 | $ echo foo > undo.foo.d | |
42 | $ echo foo > undo.foo.n |
|
42 | $ echo foo > undo.foo.n | |
43 | $ echo foo > undo.babar |
|
43 | $ echo foo > undo.babar | |
44 | $ mkdir savanah |
|
44 | $ mkdir savanah | |
45 | $ echo foo > savanah/foo.i |
|
45 | $ echo foo > savanah/foo.i | |
46 | $ echo foo > savanah/foo.d |
|
46 | $ echo foo > savanah/foo.d | |
47 | $ echo foo > savanah/foo.n |
|
47 | $ echo foo > savanah/foo.n | |
48 | $ echo foo > savanah/undo.py |
|
48 | $ echo foo > savanah/undo.py | |
49 | $ echo foo > savanah/undo.i |
|
49 | $ echo foo > savanah/undo.i | |
50 | $ echo foo > savanah/undo.d |
|
50 | $ echo foo > savanah/undo.d | |
51 | $ echo foo > savanah/undo.n |
|
51 | $ echo foo > savanah/undo.n | |
52 | $ echo foo > savanah/undo.foo.i |
|
52 | $ echo foo > savanah/undo.foo.i | |
53 | $ echo foo > savanah/undo.foo.d |
|
53 | $ echo foo > savanah/undo.foo.d | |
54 | $ echo foo > savanah/undo.foo.n |
|
54 | $ echo foo > savanah/undo.foo.n | |
55 | $ echo foo > savanah/undo.babar |
|
55 | $ echo foo > savanah/undo.babar | |
56 | $ mkdir data |
|
56 | $ mkdir data | |
57 | $ echo foo > data/foo.i |
|
57 | $ echo foo > data/foo.i | |
58 | $ echo foo > data/foo.d |
|
58 | $ echo foo > data/foo.d | |
59 | $ echo foo > data/foo.n |
|
59 | $ echo foo > data/foo.n | |
60 | $ echo foo > data/undo.py |
|
60 | $ echo foo > data/undo.py | |
61 | $ echo foo > data/undo.i |
|
61 | $ echo foo > data/undo.i | |
62 | $ echo foo > data/undo.d |
|
62 | $ echo foo > data/undo.d | |
63 | $ echo foo > data/undo.n |
|
63 | $ echo foo > data/undo.n | |
64 | $ echo foo > data/undo.foo.i |
|
64 | $ echo foo > data/undo.foo.i | |
65 | $ echo foo > data/undo.foo.d |
|
65 | $ echo foo > data/undo.foo.d | |
66 | $ echo foo > data/undo.foo.n |
|
66 | $ echo foo > data/undo.foo.n | |
67 | $ echo foo > data/undo.babar |
|
67 | $ echo foo > data/undo.babar | |
68 | $ mkdir meta |
|
68 | $ mkdir meta | |
69 | $ echo foo > meta/foo.i |
|
69 | $ echo foo > meta/foo.i | |
70 | $ echo foo > meta/foo.d |
|
70 | $ echo foo > meta/foo.d | |
71 | $ echo foo > meta/foo.n |
|
71 | $ echo foo > meta/foo.n | |
72 | $ echo foo > meta/undo.py |
|
72 | $ echo foo > meta/undo.py | |
73 | $ echo foo > meta/undo.i |
|
73 | $ echo foo > meta/undo.i | |
74 | $ echo foo > meta/undo.d |
|
74 | $ echo foo > meta/undo.d | |
75 | $ echo foo > meta/undo.n |
|
75 | $ echo foo > meta/undo.n | |
76 | $ echo foo > meta/undo.foo.i |
|
76 | $ echo foo > meta/undo.foo.i | |
77 | $ echo foo > meta/undo.foo.d |
|
77 | $ echo foo > meta/undo.foo.d | |
78 | $ echo foo > meta/undo.foo.n |
|
78 | $ echo foo > meta/undo.foo.n | |
79 | $ echo foo > meta/undo.babar |
|
79 | $ echo foo > meta/undo.babar | |
80 | $ mkdir store |
|
80 | $ mkdir store | |
81 | $ echo foo > store/foo.i |
|
81 | $ echo foo > store/foo.i | |
82 | $ echo foo > store/foo.d |
|
82 | $ echo foo > store/foo.d | |
83 | $ echo foo > store/foo.n |
|
83 | $ echo foo > store/foo.n | |
84 | $ echo foo > store/undo.py |
|
84 | $ echo foo > store/undo.py | |
85 | $ echo foo > store/undo.i |
|
85 | $ echo foo > store/undo.i | |
86 | $ echo foo > store/undo.d |
|
86 | $ echo foo > store/undo.d | |
87 | $ echo foo > store/undo.n |
|
87 | $ echo foo > store/undo.n | |
88 | $ echo foo > store/undo.foo.i |
|
88 | $ echo foo > store/undo.foo.i | |
89 | $ echo foo > store/undo.foo.d |
|
89 | $ echo foo > store/undo.foo.d | |
90 | $ echo foo > store/undo.foo.n |
|
90 | $ echo foo > store/undo.foo.n | |
91 | $ echo foo > store/undo.babar |
|
91 | $ echo foo > store/undo.babar | |
92 |
|
92 | |||
93 | Name with special characters |
|
93 | Name with special characters | |
94 |
|
94 | |||
95 | $ echo foo > store/CΓ©lesteVille_is_a_Capital_City |
|
95 | $ echo foo > store/CΓ©lesteVille_is_a_Capital_City | |
96 |
|
96 | |||
97 | name causing issue6581 |
|
97 | name causing issue6581 | |
98 |
|
98 | |||
99 | $ mkdir --parents container/isam-build-centos7/ |
|
99 | $ mkdir --parents container/isam-build-centos7/ | |
100 | $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch |
|
100 | $ touch container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch | |
101 |
|
101 | |||
102 | Add all that |
|
102 | Add all that | |
103 |
|
103 | |||
104 | $ hg add . |
|
104 | $ hg add . | |
105 | adding 00changelog-ab349180a0405010.nd |
|
105 | adding 00changelog-ab349180a0405010.nd | |
106 | adding 00changelog.d |
|
106 | adding 00changelog.d | |
107 | adding 00changelog.i |
|
107 | adding 00changelog.i | |
108 | adding 00changelog.n |
|
108 | adding 00changelog.n | |
109 | adding 00manifest.d |
|
109 | adding 00manifest.d | |
110 | adding 00manifest.i |
|
110 | adding 00manifest.i | |
111 | adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch |
|
111 | adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch | |
112 | adding data/foo.d |
|
112 | adding data/foo.d | |
113 | adding data/foo.i |
|
113 | adding data/foo.i | |
114 | adding data/foo.n |
|
114 | adding data/foo.n | |
115 | adding data/undo.babar |
|
115 | adding data/undo.babar | |
116 | adding data/undo.d |
|
116 | adding data/undo.d | |
117 | adding data/undo.foo.d |
|
117 | adding data/undo.foo.d | |
118 | adding data/undo.foo.i |
|
118 | adding data/undo.foo.i | |
119 | adding data/undo.foo.n |
|
119 | adding data/undo.foo.n | |
120 | adding data/undo.i |
|
120 | adding data/undo.i | |
121 | adding data/undo.n |
|
121 | adding data/undo.n | |
122 | adding data/undo.py |
|
122 | adding data/undo.py | |
123 | adding foo.d |
|
123 | adding foo.d | |
124 | adding foo.i |
|
124 | adding foo.i | |
125 | adding foo.n |
|
125 | adding foo.n | |
126 | adding meta/foo.d |
|
126 | adding meta/foo.d | |
127 | adding meta/foo.i |
|
127 | adding meta/foo.i | |
128 | adding meta/foo.n |
|
128 | adding meta/foo.n | |
129 | adding meta/undo.babar |
|
129 | adding meta/undo.babar | |
130 | adding meta/undo.d |
|
130 | adding meta/undo.d | |
131 | adding meta/undo.foo.d |
|
131 | adding meta/undo.foo.d | |
132 | adding meta/undo.foo.i |
|
132 | adding meta/undo.foo.i | |
133 | adding meta/undo.foo.n |
|
133 | adding meta/undo.foo.n | |
134 | adding meta/undo.i |
|
134 | adding meta/undo.i | |
135 | adding meta/undo.n |
|
135 | adding meta/undo.n | |
136 | adding meta/undo.py |
|
136 | adding meta/undo.py | |
137 | adding savanah/foo.d |
|
137 | adding savanah/foo.d | |
138 | adding savanah/foo.i |
|
138 | adding savanah/foo.i | |
139 | adding savanah/foo.n |
|
139 | adding savanah/foo.n | |
140 | adding savanah/undo.babar |
|
140 | adding savanah/undo.babar | |
141 | adding savanah/undo.d |
|
141 | adding savanah/undo.d | |
142 | adding savanah/undo.foo.d |
|
142 | adding savanah/undo.foo.d | |
143 | adding savanah/undo.foo.i |
|
143 | adding savanah/undo.foo.i | |
144 | adding savanah/undo.foo.n |
|
144 | adding savanah/undo.foo.n | |
145 | adding savanah/undo.i |
|
145 | adding savanah/undo.i | |
146 | adding savanah/undo.n |
|
146 | adding savanah/undo.n | |
147 | adding savanah/undo.py |
|
147 | adding savanah/undo.py | |
148 | adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc) |
|
148 | adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc) | |
149 | adding store/foo.d |
|
149 | adding store/foo.d | |
150 | adding store/foo.i |
|
150 | adding store/foo.i | |
151 | adding store/foo.n |
|
151 | adding store/foo.n | |
152 | adding store/undo.babar |
|
152 | adding store/undo.babar | |
153 | adding store/undo.d |
|
153 | adding store/undo.d | |
154 | adding store/undo.foo.d |
|
154 | adding store/undo.foo.d | |
155 | adding store/undo.foo.i |
|
155 | adding store/undo.foo.i | |
156 | adding store/undo.foo.n |
|
156 | adding store/undo.foo.n | |
157 | adding store/undo.i |
|
157 | adding store/undo.i | |
158 | adding store/undo.n |
|
158 | adding store/undo.n | |
159 | adding store/undo.py |
|
159 | adding store/undo.py | |
160 | adding undo.babar |
|
160 | adding undo.babar | |
161 | adding undo.d |
|
161 | adding undo.d | |
162 | adding undo.foo.d |
|
162 | adding undo.foo.d | |
163 | adding undo.foo.i |
|
163 | adding undo.foo.i | |
164 | adding undo.foo.n |
|
164 | adding undo.foo.n | |
165 | adding undo.i |
|
165 | adding undo.i | |
166 | adding undo.n |
|
166 | adding undo.n | |
167 | adding undo.py |
|
167 | adding undo.py | |
168 | $ hg ci -m 'add files with "tricky" name' |
|
168 | $ hg ci -m 'add files with "tricky" name' | |
169 | $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid |
|
169 | $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid | |
170 | $ cat hg.pid > $DAEMON_PIDS |
|
170 | $ cat hg.pid > $DAEMON_PIDS | |
171 | $ cd .. |
|
171 | $ cd .. | |
172 |
|
172 | |||
173 | Check local clone |
|
173 | Check local clone | |
174 | ================== |
|
174 | ================== | |
175 |
|
175 | |||
176 | The logic is close enough of uncompressed. |
|
176 | The logic is close enough of uncompressed. | |
177 | This is present here to reuse the testing around file with "special" names. |
|
177 | This is present here to reuse the testing around file with "special" names. | |
178 |
|
178 | |||
179 | $ hg clone server local-clone |
|
179 | $ hg clone server local-clone | |
180 |
updating to branch default |
|
180 | updating to branch default | |
181 |
1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
181 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
182 | abort: $ENOENT$: '$TESTTMP/local-clone/.hg/store/dh/containe/isam-bui/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4d94041277bcd011e1d54c247523c124b4a325686.i' (known-bad-output !) |
|
|||
183 | [255] |
|
|||
184 |
|
182 | |||
185 | Check that the clone went well |
|
183 | Check that the clone went well | |
186 |
|
184 | |||
187 | $ hg verify -R local-clone |
|
185 | $ hg verify -R local-clone | |
188 |
checking changesets |
|
186 | checking changesets | |
189 | checking manifests (missing-correct-output !) |
|
187 | checking manifests | |
190 |
crosschecking files in changesets and manifests |
|
188 | crosschecking files in changesets and manifests | |
191 | checking files (missing-correct-output !) |
|
189 | checking files | |
192 |
checked 3 changesets with 1088 changes to 1088 files |
|
190 | checked 3 changesets with 1088 changes to 1088 files | |
193 | abort: repository local-clone not found (known-bad-output !) |
|
|||
194 | [255] |
|
|||
195 |
|
191 | |||
196 | Check uncompressed |
|
192 | Check uncompressed | |
197 |
================= |
|
193 | ================== | |
198 |
|
194 | |||
199 | Cannot stream clone when server.uncompressed is set |
|
195 | Cannot stream clone when server.uncompressed is set | |
200 |
|
196 | |||
201 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' |
|
197 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out' | |
202 | 200 Script output follows |
|
198 | 200 Script output follows | |
203 |
|
199 | |||
204 | 1 |
|
200 | 1 | |
205 |
|
201 | |||
206 | #if stream-legacy |
|
202 | #if stream-legacy | |
207 | $ hg debugcapabilities http://localhost:$HGPORT |
|
203 | $ hg debugcapabilities http://localhost:$HGPORT | |
208 | Main capabilities: |
|
204 | Main capabilities: | |
209 | batch |
|
205 | batch | |
210 | branchmap |
|
206 | branchmap | |
211 | $USUAL_BUNDLE2_CAPS_SERVER$ |
|
207 | $USUAL_BUNDLE2_CAPS_SERVER$ | |
212 | changegroupsubset |
|
208 | changegroupsubset | |
213 | compression=$BUNDLE2_COMPRESSIONS$ |
|
209 | compression=$BUNDLE2_COMPRESSIONS$ | |
214 | getbundle |
|
210 | getbundle | |
215 | httpheader=1024 |
|
211 | httpheader=1024 | |
216 | httpmediatype=0.1rx,0.1tx,0.2tx |
|
212 | httpmediatype=0.1rx,0.1tx,0.2tx | |
217 | known |
|
213 | known | |
218 | lookup |
|
214 | lookup | |
219 | pushkey |
|
215 | pushkey | |
220 | unbundle=HG10GZ,HG10BZ,HG10UN |
|
216 | unbundle=HG10GZ,HG10BZ,HG10UN | |
221 | unbundlehash |
|
217 | unbundlehash | |
222 | Bundle2 capabilities: |
|
218 | Bundle2 capabilities: | |
223 | HG20 |
|
219 | HG20 | |
224 | bookmarks |
|
220 | bookmarks | |
225 | changegroup |
|
221 | changegroup | |
226 | 01 |
|
222 | 01 | |
227 | 02 |
|
223 | 02 | |
228 | checkheads |
|
224 | checkheads | |
229 | related |
|
225 | related | |
230 | digests |
|
226 | digests | |
231 | md5 |
|
227 | md5 | |
232 | sha1 |
|
228 | sha1 | |
233 | sha512 |
|
229 | sha512 | |
234 | error |
|
230 | error | |
235 | abort |
|
231 | abort | |
236 | unsupportedcontent |
|
232 | unsupportedcontent | |
237 | pushraced |
|
233 | pushraced | |
238 | pushkey |
|
234 | pushkey | |
239 | hgtagsfnodes |
|
235 | hgtagsfnodes | |
240 | listkeys |
|
236 | listkeys | |
241 | phases |
|
237 | phases | |
242 | heads |
|
238 | heads | |
243 | pushkey |
|
239 | pushkey | |
244 | remote-changegroup |
|
240 | remote-changegroup | |
245 | http |
|
241 | http | |
246 | https |
|
242 | https | |
247 |
|
243 | |||
248 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled |
|
244 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled | |
249 | warning: stream clone requested but server has them disabled |
|
245 | warning: stream clone requested but server has them disabled | |
250 | requesting all changes |
|
246 | requesting all changes | |
251 | adding changesets |
|
247 | adding changesets | |
252 | adding manifests |
|
248 | adding manifests | |
253 | adding file changes |
|
249 | adding file changes | |
254 | added 3 changesets with 1088 changes to 1088 files |
|
250 | added 3 changesets with 1088 changes to 1088 files | |
255 | new changesets 96ee1d7354c4:5223b5e3265f |
|
251 | new changesets 96ee1d7354c4:5223b5e3265f | |
256 |
|
252 | |||
257 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
253 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" | |
258 | 200 Script output follows |
|
254 | 200 Script output follows | |
259 | content-type: application/mercurial-0.2 |
|
255 | content-type: application/mercurial-0.2 | |
260 |
|
256 | |||
261 |
|
257 | |||
262 | $ f --size body --hexdump --bytes 100 |
|
258 | $ f --size body --hexdump --bytes 100 | |
263 | body: size=232 |
|
259 | body: size=232 | |
264 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
260 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
265 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| |
|
261 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| | |
266 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| |
|
262 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| | |
267 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| |
|
263 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| | |
268 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| |
|
264 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| | |
269 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| |
|
265 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| | |
270 | 0060: 69 73 20 66 |is f| |
|
266 | 0060: 69 73 20 66 |is f| | |
271 |
|
267 | |||
272 | #endif |
|
268 | #endif | |
273 | #if stream-bundle2 |
|
269 | #if stream-bundle2 | |
274 | $ hg debugcapabilities http://localhost:$HGPORT |
|
270 | $ hg debugcapabilities http://localhost:$HGPORT | |
275 | Main capabilities: |
|
271 | Main capabilities: | |
276 | batch |
|
272 | batch | |
277 | branchmap |
|
273 | branchmap | |
278 | $USUAL_BUNDLE2_CAPS_SERVER$ |
|
274 | $USUAL_BUNDLE2_CAPS_SERVER$ | |
279 | changegroupsubset |
|
275 | changegroupsubset | |
280 | compression=$BUNDLE2_COMPRESSIONS$ |
|
276 | compression=$BUNDLE2_COMPRESSIONS$ | |
281 | getbundle |
|
277 | getbundle | |
282 | httpheader=1024 |
|
278 | httpheader=1024 | |
283 | httpmediatype=0.1rx,0.1tx,0.2tx |
|
279 | httpmediatype=0.1rx,0.1tx,0.2tx | |
284 | known |
|
280 | known | |
285 | lookup |
|
281 | lookup | |
286 | pushkey |
|
282 | pushkey | |
287 | unbundle=HG10GZ,HG10BZ,HG10UN |
|
283 | unbundle=HG10GZ,HG10BZ,HG10UN | |
288 | unbundlehash |
|
284 | unbundlehash | |
289 | Bundle2 capabilities: |
|
285 | Bundle2 capabilities: | |
290 | HG20 |
|
286 | HG20 | |
291 | bookmarks |
|
287 | bookmarks | |
292 | changegroup |
|
288 | changegroup | |
293 | 01 |
|
289 | 01 | |
294 | 02 |
|
290 | 02 | |
295 | checkheads |
|
291 | checkheads | |
296 | related |
|
292 | related | |
297 | digests |
|
293 | digests | |
298 | md5 |
|
294 | md5 | |
299 | sha1 |
|
295 | sha1 | |
300 | sha512 |
|
296 | sha512 | |
301 | error |
|
297 | error | |
302 | abort |
|
298 | abort | |
303 | unsupportedcontent |
|
299 | unsupportedcontent | |
304 | pushraced |
|
300 | pushraced | |
305 | pushkey |
|
301 | pushkey | |
306 | hgtagsfnodes |
|
302 | hgtagsfnodes | |
307 | listkeys |
|
303 | listkeys | |
308 | phases |
|
304 | phases | |
309 | heads |
|
305 | heads | |
310 | pushkey |
|
306 | pushkey | |
311 | remote-changegroup |
|
307 | remote-changegroup | |
312 | http |
|
308 | http | |
313 | https |
|
309 | https | |
314 |
|
310 | |||
315 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled |
|
311 | $ hg clone --stream -U http://localhost:$HGPORT server-disabled | |
316 | warning: stream clone requested but server has them disabled |
|
312 | warning: stream clone requested but server has them disabled | |
317 | requesting all changes |
|
313 | requesting all changes | |
318 | adding changesets |
|
314 | adding changesets | |
319 | adding manifests |
|
315 | adding manifests | |
320 | adding file changes |
|
316 | adding file changes | |
321 | added 3 changesets with 1088 changes to 1088 files |
|
317 | added 3 changesets with 1088 changes to 1088 files | |
322 | new changesets 96ee1d7354c4:5223b5e3265f |
|
318 | new changesets 96ee1d7354c4:5223b5e3265f | |
323 |
|
319 | |||
324 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
320 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" | |
325 | 200 Script output follows |
|
321 | 200 Script output follows | |
326 | content-type: application/mercurial-0.2 |
|
322 | content-type: application/mercurial-0.2 | |
327 |
|
323 | |||
328 |
|
324 | |||
329 | $ f --size body --hexdump --bytes 100 |
|
325 | $ f --size body --hexdump --bytes 100 | |
330 | body: size=232 |
|
326 | body: size=232 | |
331 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
327 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
332 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| |
|
328 | 0010: cf 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |..ERROR:ABORT...| | |
333 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| |
|
329 | 0020: 00 01 01 07 3c 04 72 6d 65 73 73 61 67 65 73 74 |....<.rmessagest| | |
334 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| |
|
330 | 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques| | |
335 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| |
|
331 | 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d| | |
336 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| |
|
332 | 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th| | |
337 | 0060: 69 73 20 66 |is f| |
|
333 | 0060: 69 73 20 66 |is f| | |
338 |
|
334 | |||
339 | #endif |
|
335 | #endif | |
340 |
|
336 | |||
341 | $ killdaemons.py |
|
337 | $ killdaemons.py | |
342 | $ cd server |
|
338 | $ cd server | |
343 | $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt |
|
339 | $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt | |
344 | $ cat hg.pid > $DAEMON_PIDS |
|
340 | $ cat hg.pid > $DAEMON_PIDS | |
345 | $ cd .. |
|
341 | $ cd .. | |
346 |
|
342 | |||
347 | Basic clone |
|
343 | Basic clone | |
348 |
|
344 | |||
349 | #if stream-legacy |
|
345 | #if stream-legacy | |
350 | $ hg clone --stream -U http://localhost:$HGPORT clone1 |
|
346 | $ hg clone --stream -U http://localhost:$HGPORT clone1 | |
351 | streaming all changes |
|
347 | streaming all changes | |
352 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
348 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
353 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
349 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
354 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
350 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
355 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
351 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
356 | searching for changes |
|
352 | searching for changes | |
357 | no changes found |
|
353 | no changes found | |
358 | $ cat server/errors.txt |
|
354 | $ cat server/errors.txt | |
359 | #endif |
|
355 | #endif | |
360 | #if stream-bundle2 |
|
356 | #if stream-bundle2 | |
361 | $ hg clone --stream -U http://localhost:$HGPORT clone1 |
|
357 | $ hg clone --stream -U http://localhost:$HGPORT clone1 | |
362 | streaming all changes |
|
358 | streaming all changes | |
363 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
359 | 1093 files to transfer, 102 KB of data (no-zstd !) | |
364 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
360 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
365 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
361 | 1093 files to transfer, 98.9 KB of data (zstd !) | |
366 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
362 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) | |
367 |
|
363 | |||
368 | $ ls -1 clone1/.hg/cache |
|
364 | $ ls -1 clone1/.hg/cache | |
369 | branch2-base |
|
365 | branch2-base | |
370 | branch2-immutable |
|
366 | branch2-immutable | |
371 | branch2-served |
|
367 | branch2-served | |
372 | branch2-served.hidden |
|
368 | branch2-served.hidden | |
373 | branch2-visible |
|
369 | branch2-visible | |
374 | branch2-visible-hidden |
|
370 | branch2-visible-hidden | |
375 | rbc-names-v1 |
|
371 | rbc-names-v1 | |
376 | rbc-revs-v1 |
|
372 | rbc-revs-v1 | |
377 | tags2 |
|
373 | tags2 | |
378 | tags2-served |
|
374 | tags2-served | |
379 | $ cat server/errors.txt |
|
375 | $ cat server/errors.txt | |
380 | #endif |
|
376 | #endif | |
381 |
|
377 | |||
382 | getbundle requests with stream=1 are uncompressed |
|
378 | getbundle requests with stream=1 are uncompressed | |
383 |
|
379 | |||
384 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" |
|
380 | $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1" | |
385 | 200 Script output follows |
|
381 | 200 Script output follows | |
386 | content-type: application/mercurial-0.2 |
|
382 | content-type: application/mercurial-0.2 | |
387 |
|
383 | |||
388 |
|
384 | |||
389 | #if no-zstd no-rust |
|
385 | #if no-zstd no-rust | |
390 | $ f --size --hex --bytes 256 body |
|
386 | $ f --size --hex --bytes 256 body | |
391 | body: size=119153 |
|
387 | body: size=119153 | |
392 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
388 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
393 | 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
389 | 0010: 80 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| | |
394 | 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10| |
|
390 | 0020: 06 09 04 0c 44 62 79 74 65 63 6f 75 6e 74 31 30 |....Dbytecount10| | |
395 | 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109| |
|
391 | 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109| | |
396 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
392 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| | |
397 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
393 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| | |
398 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
394 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| | |
399 | 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| |
|
395 | 0070: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| | |
400 | 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| |
|
396 | 0080: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| | |
401 | 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| |
|
397 | 0090: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| | |
402 | 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| |
|
398 | 00a0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| | |
403 | 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| |
|
399 | 00b0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| | |
404 | 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,| |
|
400 | 00c0: 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c |.)c.I.#....Vg.g,| | |
405 | 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............| |
|
401 | 00d0: 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 00 00 |i..9............| | |
406 | 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan| |
|
402 | 00e0: 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 61 6e |u0s&Edata/00chan| | |
407 | 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0| |
|
403 | 00f0: 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 61 30 |gelog-ab349180a0| | |
408 | #endif |
|
404 | #endif | |
409 | #if zstd no-rust |
|
405 | #if zstd no-rust | |
410 | $ f --size --hex --bytes 256 body |
|
406 | $ f --size --hex --bytes 256 body | |
411 | body: size=116340 |
|
407 | body: size=116340 | |
412 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
408 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
413 | 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
409 | 0010: 9a 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| | |
414 | 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10| |
|
410 | 0020: 06 09 04 0c 5e 62 79 74 65 63 6f 75 6e 74 31 30 |....^bytecount10| | |
415 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| |
|
411 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| | |
416 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
412 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| | |
417 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
413 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| | |
418 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
414 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| | |
419 | 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres| |
|
415 | 0070: 32 43 72 65 76 6c 6f 67 2d 63 6f 6d 70 72 65 73 |2Crevlog-compres| | |
420 | 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl| |
|
416 | 0080: 73 69 6f 6e 2d 7a 73 74 64 25 32 43 72 65 76 6c |sion-zstd%2Crevl| | |
421 | 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev| |
|
417 | 0090: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev| | |
422 | 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s| |
|
418 | 00a0: 6c 6f 67 25 32 43 73 74 6f 72 65 00 00 80 00 73 |log%2Cstore....s| | |
423 | 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......| |
|
419 | 00b0: 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 00 00 |.Bdata/0.i......| | |
424 | 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................| |
|
420 | 00c0: 00 00 00 00 00 02 00 00 00 01 00 00 00 00 00 00 |................| | |
425 | 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.| |
|
421 | 00d0: 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 49 d3 |...........)c.I.| | |
426 | 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..| |
|
422 | 00e0: 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 00 00 |#....Vg.g,i..9..| | |
427 | 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed| |
|
423 | 00f0: 00 00 00 00 00 00 00 00 00 00 75 30 73 26 45 64 |..........u0s&Ed| | |
428 | #endif |
|
424 | #endif | |
429 | #if zstd rust no-dirstate-v2 |
|
425 | #if zstd rust no-dirstate-v2 | |
430 | $ f --size --hex --bytes 256 body |
|
426 | $ f --size --hex --bytes 256 body | |
431 | body: size=116361 |
|
427 | body: size=116361 | |
432 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
428 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
433 | 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
429 | 0010: af 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| | |
434 | 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10| |
|
430 | 0020: 06 09 04 0c 73 62 79 74 65 63 6f 75 6e 74 31 30 |....sbytecount10| | |
435 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| |
|
431 | 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| | |
436 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| |
|
432 | 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 |3requirementsdot| | |
437 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| |
|
433 | 0050: 65 6e 63 6f 64 65 25 32 43 66 6e 63 61 63 68 65 |encode%2Cfncache| | |
438 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| |
|
434 | 0060: 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 25 |%2Cgeneraldelta%| | |
439 | 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod| |
|
435 | 0070: 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f 64 |2Cpersistent-nod| | |
440 | 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co| |
|
436 | 0080: 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 6f |emap%2Crevlog-co| | |
441 | 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2| |
|
437 | 0090: 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 32 |mpression-zstd%2| | |
442 | 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| |
|
438 | 00a0: 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 72 |Crevlogv1%2Cspar| | |
443 | 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| |
|
439 | 00b0: 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 65 |serevlog%2Cstore| | |
444 | 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| |
|
440 | 00c0: 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 00 |....s.Bdata/0.i.| | |
445 | 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| |
|
441 | 00d0: 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 00 |................| | |
446 | 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| |
|
442 | 00e0: 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff 80 |................| | |
447 | 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| |
|
443 | 00f0: 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 |)c.I.#....Vg.g,i| | |
448 | #endif |
|
444 | #endif | |
449 | #if zstd dirstate-v2 |
|
445 | #if zstd dirstate-v2 | |
450 | $ f --size --hex --bytes 256 body |
|
446 | $ f --size --hex --bytes 256 body | |
451 | body: size=109549 |
|
447 | body: size=109549 | |
452 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| |
|
448 | 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......| | |
453 | 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| |
|
449 | 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......| | |
454 | 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95| |
|
450 | 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95| | |
455 | 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| |
|
451 | 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030| | |
456 | 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| |
|
452 | 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote| | |
457 | 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs| |
|
453 | 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs| | |
458 | 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach| |
|
454 | 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach| | |
459 | 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta| |
|
455 | 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta| | |
460 | 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no| |
|
456 | 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no| | |
461 | 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c| |
|
457 | 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c| | |
462 | 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%| |
|
458 | 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%| | |
463 | 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| |
|
459 | 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa| | |
464 | 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| |
|
460 | 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor| | |
465 | 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| |
|
461 | 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i| | |
466 | 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| |
|
462 | 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................| | |
467 | 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| |
|
463 | 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................| | |
468 | #endif |
|
464 | #endif | |
469 |
|
465 | |||
470 | --uncompressed is an alias to --stream |
|
466 | --uncompressed is an alias to --stream | |
471 |
|
467 | |||
472 | #if stream-legacy |
|
468 | #if stream-legacy | |
473 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed |
|
469 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed | |
474 | streaming all changes |
|
470 | streaming all changes | |
475 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
471 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
476 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
472 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
477 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
473 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
478 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
474 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
479 | searching for changes |
|
475 | searching for changes | |
480 | no changes found |
|
476 | no changes found | |
481 | #endif |
|
477 | #endif | |
482 | #if stream-bundle2 |
|
478 | #if stream-bundle2 | |
483 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed |
|
479 | $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed | |
484 | streaming all changes |
|
480 | streaming all changes | |
485 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
481 | 1093 files to transfer, 102 KB of data (no-zstd !) | |
486 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
482 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
487 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
483 | 1093 files to transfer, 98.9 KB of data (zstd !) | |
488 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
484 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) | |
489 | #endif |
|
485 | #endif | |
490 |
|
486 | |||
491 | Clone with background file closing enabled |
|
487 | Clone with background file closing enabled | |
492 |
|
488 | |||
493 | #if stream-legacy |
|
489 | #if stream-legacy | |
494 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding |
|
490 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding | |
495 | using http://localhost:$HGPORT/ |
|
491 | using http://localhost:$HGPORT/ | |
496 | sending capabilities command |
|
492 | sending capabilities command | |
497 | sending branchmap command |
|
493 | sending branchmap command | |
498 | streaming all changes |
|
494 | streaming all changes | |
499 | sending stream_out command |
|
495 | sending stream_out command | |
500 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
496 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
501 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
497 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
502 | starting 4 threads for background file closing |
|
498 | starting 4 threads for background file closing | |
503 | updating the branch cache |
|
499 | updating the branch cache | |
504 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
500 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
505 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
501 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
506 | query 1; heads |
|
502 | query 1; heads | |
507 | sending batch command |
|
503 | sending batch command | |
508 | searching for changes |
|
504 | searching for changes | |
509 | all remote heads known locally |
|
505 | all remote heads known locally | |
510 | no changes found |
|
506 | no changes found | |
511 | sending getbundle command |
|
507 | sending getbundle command | |
512 | bundle2-input-bundle: with-transaction |
|
508 | bundle2-input-bundle: with-transaction | |
513 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
509 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported | |
514 | bundle2-input-part: "phase-heads" supported |
|
510 | bundle2-input-part: "phase-heads" supported | |
515 | bundle2-input-part: total payload size 24 |
|
511 | bundle2-input-part: total payload size 24 | |
516 | bundle2-input-bundle: 2 parts total |
|
512 | bundle2-input-bundle: 2 parts total | |
517 | checking for updated bookmarks |
|
513 | checking for updated bookmarks | |
518 | updating the branch cache |
|
514 | updating the branch cache | |
519 | (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) |
|
515 | (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob) | |
520 | #endif |
|
516 | #endif | |
521 | #if stream-bundle2 |
|
517 | #if stream-bundle2 | |
522 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding |
|
518 | $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding | |
523 | using http://localhost:$HGPORT/ |
|
519 | using http://localhost:$HGPORT/ | |
524 | sending capabilities command |
|
520 | sending capabilities command | |
525 | query 1; heads |
|
521 | query 1; heads | |
526 | sending batch command |
|
522 | sending batch command | |
527 | streaming all changes |
|
523 | streaming all changes | |
528 | sending getbundle command |
|
524 | sending getbundle command | |
529 | bundle2-input-bundle: with-transaction |
|
525 | bundle2-input-bundle: with-transaction | |
530 | bundle2-input-part: "stream2" (params: 3 mandatory) supported |
|
526 | bundle2-input-part: "stream2" (params: 3 mandatory) supported | |
531 | applying stream bundle |
|
527 | applying stream bundle | |
532 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
528 | 1093 files to transfer, 102 KB of data (no-zstd !) | |
533 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
529 | 1093 files to transfer, 98.9 KB of data (zstd !) | |
534 | starting 4 threads for background file closing |
|
530 | starting 4 threads for background file closing | |
535 | starting 4 threads for background file closing |
|
531 | starting 4 threads for background file closing | |
536 | updating the branch cache |
|
532 | updating the branch cache | |
537 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
533 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
538 | bundle2-input-part: total payload size 118984 (no-zstd !) |
|
534 | bundle2-input-part: total payload size 118984 (no-zstd !) | |
539 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
535 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) | |
540 | bundle2-input-part: total payload size 116145 (zstd !) |
|
536 | bundle2-input-part: total payload size 116145 (zstd !) | |
541 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported |
|
537 | bundle2-input-part: "listkeys" (params: 1 mandatory) supported | |
542 | bundle2-input-bundle: 2 parts total |
|
538 | bundle2-input-bundle: 2 parts total | |
543 | checking for updated bookmarks |
|
539 | checking for updated bookmarks | |
544 | updating the branch cache |
|
540 | updating the branch cache | |
545 | (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) |
|
541 | (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob) | |
546 | #endif |
|
542 | #endif | |
547 |
|
543 | |||
548 | Cannot stream clone when there are secret changesets |
|
544 | Cannot stream clone when there are secret changesets | |
549 |
|
545 | |||
550 | $ hg -R server phase --force --secret -r tip |
|
546 | $ hg -R server phase --force --secret -r tip | |
551 | $ hg clone --stream -U http://localhost:$HGPORT secret-denied |
|
547 | $ hg clone --stream -U http://localhost:$HGPORT secret-denied | |
552 | warning: stream clone requested but server has them disabled |
|
548 | warning: stream clone requested but server has them disabled | |
553 | requesting all changes |
|
549 | requesting all changes | |
554 | adding changesets |
|
550 | adding changesets | |
555 | adding manifests |
|
551 | adding manifests | |
556 | adding file changes |
|
552 | adding file changes | |
557 | added 2 changesets with 1025 changes to 1025 files |
|
553 | added 2 changesets with 1025 changes to 1025 files | |
558 | new changesets 96ee1d7354c4:c17445101a72 |
|
554 | new changesets 96ee1d7354c4:c17445101a72 | |
559 |
|
555 | |||
560 | $ killdaemons.py |
|
556 | $ killdaemons.py | |
561 |
|
557 | |||
562 | Streaming of secrets can be overridden by server config |
|
558 | Streaming of secrets can be overridden by server config | |
563 |
|
559 | |||
564 | $ cd server |
|
560 | $ cd server | |
565 | $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid |
|
561 | $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid | |
566 | $ cat hg.pid > $DAEMON_PIDS |
|
562 | $ cat hg.pid > $DAEMON_PIDS | |
567 | $ cd .. |
|
563 | $ cd .. | |
568 |
|
564 | |||
569 | #if stream-legacy |
|
565 | #if stream-legacy | |
570 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed |
|
566 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed | |
571 | streaming all changes |
|
567 | streaming all changes | |
572 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
568 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
573 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
569 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
574 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
570 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
575 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
571 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
576 | searching for changes |
|
572 | searching for changes | |
577 | no changes found |
|
573 | no changes found | |
578 | #endif |
|
574 | #endif | |
579 | #if stream-bundle2 |
|
575 | #if stream-bundle2 | |
580 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed |
|
576 | $ hg clone --stream -U http://localhost:$HGPORT secret-allowed | |
581 | streaming all changes |
|
577 | streaming all changes | |
582 | 1093 files to transfer, 102 KB of data (no-zstd !) |
|
578 | 1093 files to transfer, 102 KB of data (no-zstd !) | |
583 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
579 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
584 | 1093 files to transfer, 98.9 KB of data (zstd !) |
|
580 | 1093 files to transfer, 98.9 KB of data (zstd !) | |
585 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) |
|
581 | transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !) | |
586 | #endif |
|
582 | #endif | |
587 |
|
583 | |||
588 | $ killdaemons.py |
|
584 | $ killdaemons.py | |
589 |
|
585 | |||
590 | Verify interaction between preferuncompressed and secret presence |
|
586 | Verify interaction between preferuncompressed and secret presence | |
591 |
|
587 | |||
592 | $ cd server |
|
588 | $ cd server | |
593 | $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid |
|
589 | $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid | |
594 | $ cat hg.pid > $DAEMON_PIDS |
|
590 | $ cat hg.pid > $DAEMON_PIDS | |
595 | $ cd .. |
|
591 | $ cd .. | |
596 |
|
592 | |||
597 | $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret |
|
593 | $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret | |
598 | requesting all changes |
|
594 | requesting all changes | |
599 | adding changesets |
|
595 | adding changesets | |
600 | adding manifests |
|
596 | adding manifests | |
601 | adding file changes |
|
597 | adding file changes | |
602 | added 2 changesets with 1025 changes to 1025 files |
|
598 | added 2 changesets with 1025 changes to 1025 files | |
603 | new changesets 96ee1d7354c4:c17445101a72 |
|
599 | new changesets 96ee1d7354c4:c17445101a72 | |
604 |
|
600 | |||
605 | $ killdaemons.py |
|
601 | $ killdaemons.py | |
606 |
|
602 | |||
607 | Clone not allowed when full bundles disabled and can't serve secrets |
|
603 | Clone not allowed when full bundles disabled and can't serve secrets | |
608 |
|
604 | |||
609 | $ cd server |
|
605 | $ cd server | |
610 | $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid |
|
606 | $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid | |
611 | $ cat hg.pid > $DAEMON_PIDS |
|
607 | $ cat hg.pid > $DAEMON_PIDS | |
612 | $ cd .. |
|
608 | $ cd .. | |
613 |
|
609 | |||
614 | $ hg clone --stream http://localhost:$HGPORT secret-full-disabled |
|
610 | $ hg clone --stream http://localhost:$HGPORT secret-full-disabled | |
615 | warning: stream clone requested but server has them disabled |
|
611 | warning: stream clone requested but server has them disabled | |
616 | requesting all changes |
|
612 | requesting all changes | |
617 | remote: abort: server has pull-based clones disabled |
|
613 | remote: abort: server has pull-based clones disabled | |
618 | abort: pull failed on remote |
|
614 | abort: pull failed on remote | |
619 | (remove --pull if specified or upgrade Mercurial) |
|
615 | (remove --pull if specified or upgrade Mercurial) | |
620 | [100] |
|
616 | [100] | |
621 |
|
617 | |||
622 | Local stream clone with secrets involved |
|
618 | Local stream clone with secrets involved | |
623 | (This is just a test over behavior: if you have access to the repo's files, |
|
619 | (This is just a test over behavior: if you have access to the repo's files, | |
624 | there is no security so it isn't important to prevent a clone here.) |
|
620 | there is no security so it isn't important to prevent a clone here.) | |
625 |
|
621 | |||
626 | $ hg clone -U --stream server local-secret |
|
622 | $ hg clone -U --stream server local-secret | |
627 | warning: stream clone requested but server has them disabled |
|
623 | warning: stream clone requested but server has them disabled | |
628 | requesting all changes |
|
624 | requesting all changes | |
629 | adding changesets |
|
625 | adding changesets | |
630 | adding manifests |
|
626 | adding manifests | |
631 | adding file changes |
|
627 | adding file changes | |
632 | added 2 changesets with 1025 changes to 1025 files |
|
628 | added 2 changesets with 1025 changes to 1025 files | |
633 | new changesets 96ee1d7354c4:c17445101a72 |
|
629 | new changesets 96ee1d7354c4:c17445101a72 | |
634 |
|
630 | |||
635 | Stream clone while repo is changing: |
|
631 | Stream clone while repo is changing: | |
636 |
|
632 | |||
637 | $ mkdir changing |
|
633 | $ mkdir changing | |
638 | $ cd changing |
|
634 | $ cd changing | |
639 |
|
635 | |||
640 | extension for delaying the server process so we reliably can modify the repo |
|
636 | extension for delaying the server process so we reliably can modify the repo | |
641 | while cloning |
|
637 | while cloning | |
642 |
|
638 | |||
643 | $ cat > stream_steps.py <<EOF |
|
639 | $ cat > stream_steps.py <<EOF | |
644 | > import os |
|
640 | > import os | |
645 | > import sys |
|
641 | > import sys | |
646 | > from mercurial import ( |
|
642 | > from mercurial import ( | |
647 | > encoding, |
|
643 | > encoding, | |
648 | > extensions, |
|
644 | > extensions, | |
649 | > streamclone, |
|
645 | > streamclone, | |
650 | > testing, |
|
646 | > testing, | |
651 | > ) |
|
647 | > ) | |
652 | > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1'] |
|
648 | > WALKED_FILE_1 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_1'] | |
653 | > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2'] |
|
649 | > WALKED_FILE_2 = encoding.environ[b'HG_TEST_STREAM_WALKED_FILE_2'] | |
654 | > |
|
650 | > | |
655 | > def _test_sync_point_walk_1(orig, repo): |
|
651 | > def _test_sync_point_walk_1(orig, repo): | |
656 | > testing.write_file(WALKED_FILE_1) |
|
652 | > testing.write_file(WALKED_FILE_1) | |
657 | > |
|
653 | > | |
658 | > def _test_sync_point_walk_2(orig, repo): |
|
654 | > def _test_sync_point_walk_2(orig, repo): | |
659 | > assert repo._currentlock(repo._lockref) is None |
|
655 | > assert repo._currentlock(repo._lockref) is None | |
660 | > testing.wait_file(WALKED_FILE_2) |
|
656 | > testing.wait_file(WALKED_FILE_2) | |
661 | > |
|
657 | > | |
662 | > extensions.wrapfunction( |
|
658 | > extensions.wrapfunction( | |
663 | > streamclone, |
|
659 | > streamclone, | |
664 | > '_test_sync_point_walk_1', |
|
660 | > '_test_sync_point_walk_1', | |
665 | > _test_sync_point_walk_1 |
|
661 | > _test_sync_point_walk_1 | |
666 | > ) |
|
662 | > ) | |
667 | > extensions.wrapfunction( |
|
663 | > extensions.wrapfunction( | |
668 | > streamclone, |
|
664 | > streamclone, | |
669 | > '_test_sync_point_walk_2', |
|
665 | > '_test_sync_point_walk_2', | |
670 | > _test_sync_point_walk_2 |
|
666 | > _test_sync_point_walk_2 | |
671 | > ) |
|
667 | > ) | |
672 | > EOF |
|
668 | > EOF | |
673 |
|
669 | |||
674 | prepare repo with small and big file to cover both code paths in emitrevlogdata |
|
670 | prepare repo with small and big file to cover both code paths in emitrevlogdata | |
675 |
|
671 | |||
676 | $ hg init repo |
|
672 | $ hg init repo | |
677 | $ touch repo/f1 |
|
673 | $ touch repo/f1 | |
678 | $ $TESTDIR/seq.py 50000 > repo/f2 |
|
674 | $ $TESTDIR/seq.py 50000 > repo/f2 | |
679 | $ hg -R repo ci -Aqm "0" |
|
675 | $ hg -R repo ci -Aqm "0" | |
680 | $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1" |
|
676 | $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1" | |
681 | $ export HG_TEST_STREAM_WALKED_FILE_1 |
|
677 | $ export HG_TEST_STREAM_WALKED_FILE_1 | |
682 | $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2" |
|
678 | $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2" | |
683 | $ export HG_TEST_STREAM_WALKED_FILE_2 |
|
679 | $ export HG_TEST_STREAM_WALKED_FILE_2 | |
684 | $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3" |
|
680 | $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3" | |
685 | $ export HG_TEST_STREAM_WALKED_FILE_3 |
|
681 | $ export HG_TEST_STREAM_WALKED_FILE_3 | |
686 | # $ cat << EOF >> $HGRCPATH |
|
682 | # $ cat << EOF >> $HGRCPATH | |
687 | # > [hooks] |
|
683 | # > [hooks] | |
688 | # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*" |
|
684 | # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*" | |
689 | # > EOF |
|
685 | # > EOF | |
690 | $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py" |
|
686 | $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py" | |
691 | $ cat hg.pid >> $DAEMON_PIDS |
|
687 | $ cat hg.pid >> $DAEMON_PIDS | |
692 |
|
688 | |||
693 | clone while modifying the repo between stating file with write lock and |
|
689 | clone while modifying the repo between stating file with write lock and | |
694 | actually serving file content |
|
690 | actually serving file content | |
695 |
|
691 | |||
696 | $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") & |
|
692 | $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") & | |
697 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 |
|
693 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1 | |
698 | $ echo >> repo/f1 |
|
694 | $ echo >> repo/f1 | |
699 | $ echo >> repo/f2 |
|
695 | $ echo >> repo/f2 | |
700 | $ hg -R repo ci -m "1" --config ui.timeout.warn=-1 |
|
696 | $ hg -R repo ci -m "1" --config ui.timeout.warn=-1 | |
701 | $ touch $HG_TEST_STREAM_WALKED_FILE_2 |
|
697 | $ touch $HG_TEST_STREAM_WALKED_FILE_2 | |
702 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3 |
|
698 | $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3 | |
703 | $ hg -R clone id |
|
699 | $ hg -R clone id | |
704 | 000000000000 |
|
700 | 000000000000 | |
705 | $ cat errors.log |
|
701 | $ cat errors.log | |
706 | $ cd .. |
|
702 | $ cd .. | |
707 |
|
703 | |||
708 | Stream repository with bookmarks |
|
704 | Stream repository with bookmarks | |
709 | -------------------------------- |
|
705 | -------------------------------- | |
710 |
|
706 | |||
711 | (revert introduction of secret changeset) |
|
707 | (revert introduction of secret changeset) | |
712 |
|
708 | |||
713 | $ hg -R server phase --draft 'secret()' |
|
709 | $ hg -R server phase --draft 'secret()' | |
714 |
|
710 | |||
715 | add a bookmark |
|
711 | add a bookmark | |
716 |
|
712 | |||
717 | $ hg -R server bookmark -r tip some-bookmark |
|
713 | $ hg -R server bookmark -r tip some-bookmark | |
718 |
|
714 | |||
719 | clone it |
|
715 | clone it | |
720 |
|
716 | |||
721 | #if stream-legacy |
|
717 | #if stream-legacy | |
722 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks |
|
718 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks | |
723 | streaming all changes |
|
719 | streaming all changes | |
724 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
720 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
725 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
721 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
726 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
722 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
727 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
723 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
728 | searching for changes |
|
724 | searching for changes | |
729 | no changes found |
|
725 | no changes found | |
730 | updating to branch default |
|
726 | updating to branch default | |
731 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
727 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
732 | #endif |
|
728 | #endif | |
733 | #if stream-bundle2 |
|
729 | #if stream-bundle2 | |
734 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks |
|
730 | $ hg clone --stream http://localhost:$HGPORT with-bookmarks | |
735 | streaming all changes |
|
731 | streaming all changes | |
736 | 1096 files to transfer, 102 KB of data (no-zstd !) |
|
732 | 1096 files to transfer, 102 KB of data (no-zstd !) | |
737 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
733 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
738 | 1096 files to transfer, 99.1 KB of data (zstd !) |
|
734 | 1096 files to transfer, 99.1 KB of data (zstd !) | |
739 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
735 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) | |
740 | updating to branch default |
|
736 | updating to branch default | |
741 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
737 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
742 | #endif |
|
738 | #endif | |
743 | $ hg verify -R with-bookmarks |
|
739 | $ hg verify -R with-bookmarks | |
744 | checking changesets |
|
740 | checking changesets | |
745 | checking manifests |
|
741 | checking manifests | |
746 | crosschecking files in changesets and manifests |
|
742 | crosschecking files in changesets and manifests | |
747 | checking files |
|
743 | checking files | |
748 | checked 3 changesets with 1088 changes to 1088 files |
|
744 | checked 3 changesets with 1088 changes to 1088 files | |
749 | $ hg -R with-bookmarks bookmarks |
|
745 | $ hg -R with-bookmarks bookmarks | |
750 | some-bookmark 2:5223b5e3265f |
|
746 | some-bookmark 2:5223b5e3265f | |
751 |
|
747 | |||
752 | Stream repository with phases |
|
748 | Stream repository with phases | |
753 | ----------------------------- |
|
749 | ----------------------------- | |
754 |
|
750 | |||
755 | Clone as publishing |
|
751 | Clone as publishing | |
756 |
|
752 | |||
757 | $ hg -R server phase -r 'all()' |
|
753 | $ hg -R server phase -r 'all()' | |
758 | 0: draft |
|
754 | 0: draft | |
759 | 1: draft |
|
755 | 1: draft | |
760 | 2: draft |
|
756 | 2: draft | |
761 |
|
757 | |||
762 | #if stream-legacy |
|
758 | #if stream-legacy | |
763 | $ hg clone --stream http://localhost:$HGPORT phase-publish |
|
759 | $ hg clone --stream http://localhost:$HGPORT phase-publish | |
764 | streaming all changes |
|
760 | streaming all changes | |
765 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
761 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
766 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
762 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
767 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
763 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
768 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
764 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
769 | searching for changes |
|
765 | searching for changes | |
770 | no changes found |
|
766 | no changes found | |
771 | updating to branch default |
|
767 | updating to branch default | |
772 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
768 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
773 | #endif |
|
769 | #endif | |
774 | #if stream-bundle2 |
|
770 | #if stream-bundle2 | |
775 | $ hg clone --stream http://localhost:$HGPORT phase-publish |
|
771 | $ hg clone --stream http://localhost:$HGPORT phase-publish | |
776 | streaming all changes |
|
772 | streaming all changes | |
777 | 1096 files to transfer, 102 KB of data (no-zstd !) |
|
773 | 1096 files to transfer, 102 KB of data (no-zstd !) | |
778 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
774 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
779 | 1096 files to transfer, 99.1 KB of data (zstd !) |
|
775 | 1096 files to transfer, 99.1 KB of data (zstd !) | |
780 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
776 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) | |
781 | updating to branch default |
|
777 | updating to branch default | |
782 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
778 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
783 | #endif |
|
779 | #endif | |
784 | $ hg verify -R phase-publish |
|
780 | $ hg verify -R phase-publish | |
785 | checking changesets |
|
781 | checking changesets | |
786 | checking manifests |
|
782 | checking manifests | |
787 | crosschecking files in changesets and manifests |
|
783 | crosschecking files in changesets and manifests | |
788 | checking files |
|
784 | checking files | |
789 | checked 3 changesets with 1088 changes to 1088 files |
|
785 | checked 3 changesets with 1088 changes to 1088 files | |
790 | $ hg -R phase-publish phase -r 'all()' |
|
786 | $ hg -R phase-publish phase -r 'all()' | |
791 | 0: public |
|
787 | 0: public | |
792 | 1: public |
|
788 | 1: public | |
793 | 2: public |
|
789 | 2: public | |
794 |
|
790 | |||
795 | Clone as non publishing |
|
791 | Clone as non publishing | |
796 |
|
792 | |||
797 | $ cat << EOF >> server/.hg/hgrc |
|
793 | $ cat << EOF >> server/.hg/hgrc | |
798 | > [phases] |
|
794 | > [phases] | |
799 | > publish = False |
|
795 | > publish = False | |
800 | > EOF |
|
796 | > EOF | |
801 | $ killdaemons.py |
|
797 | $ killdaemons.py | |
802 | $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid |
|
798 | $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid | |
803 | $ cat hg.pid > $DAEMON_PIDS |
|
799 | $ cat hg.pid > $DAEMON_PIDS | |
804 |
|
800 | |||
805 | #if stream-legacy |
|
801 | #if stream-legacy | |
806 |
|
802 | |||
807 | With v1 of the stream protocol, changeset are always cloned as public. It make |
|
803 | With v1 of the stream protocol, changeset are always cloned as public. It make | |
808 | stream v1 unsuitable for non-publishing repository. |
|
804 | stream v1 unsuitable for non-publishing repository. | |
809 |
|
805 | |||
810 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish |
|
806 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish | |
811 | streaming all changes |
|
807 | streaming all changes | |
812 | 1090 files to transfer, 102 KB of data (no-zstd !) |
|
808 | 1090 files to transfer, 102 KB of data (no-zstd !) | |
813 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
809 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
814 | 1090 files to transfer, 98.8 KB of data (zstd !) |
|
810 | 1090 files to transfer, 98.8 KB of data (zstd !) | |
815 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) |
|
811 | transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !) | |
816 | searching for changes |
|
812 | searching for changes | |
817 | no changes found |
|
813 | no changes found | |
818 | updating to branch default |
|
814 | updating to branch default | |
819 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
815 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
820 | $ hg -R phase-no-publish phase -r 'all()' |
|
816 | $ hg -R phase-no-publish phase -r 'all()' | |
821 | 0: public |
|
817 | 0: public | |
822 | 1: public |
|
818 | 1: public | |
823 | 2: public |
|
819 | 2: public | |
824 | #endif |
|
820 | #endif | |
825 | #if stream-bundle2 |
|
821 | #if stream-bundle2 | |
826 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish |
|
822 | $ hg clone --stream http://localhost:$HGPORT phase-no-publish | |
827 | streaming all changes |
|
823 | streaming all changes | |
828 | 1097 files to transfer, 102 KB of data (no-zstd !) |
|
824 | 1097 files to transfer, 102 KB of data (no-zstd !) | |
829 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
825 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
830 | 1097 files to transfer, 99.1 KB of data (zstd !) |
|
826 | 1097 files to transfer, 99.1 KB of data (zstd !) | |
831 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) |
|
827 | transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !) | |
832 | updating to branch default |
|
828 | updating to branch default | |
833 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
829 | 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
834 | $ hg -R phase-no-publish phase -r 'all()' |
|
830 | $ hg -R phase-no-publish phase -r 'all()' | |
835 | 0: draft |
|
831 | 0: draft | |
836 | 1: draft |
|
832 | 1: draft | |
837 | 2: draft |
|
833 | 2: draft | |
838 | #endif |
|
834 | #endif | |
839 | $ hg verify -R phase-no-publish |
|
835 | $ hg verify -R phase-no-publish | |
840 | checking changesets |
|
836 | checking changesets | |
841 | checking manifests |
|
837 | checking manifests | |
842 | crosschecking files in changesets and manifests |
|
838 | crosschecking files in changesets and manifests | |
843 | checking files |
|
839 | checking files | |
844 | checked 3 changesets with 1088 changes to 1088 files |
|
840 | checked 3 changesets with 1088 changes to 1088 files | |
845 |
|
841 | |||
846 | $ killdaemons.py |
|
842 | $ killdaemons.py | |
847 |
|
843 | |||
848 | #if stream-legacy |
|
844 | #if stream-legacy | |
849 |
|
845 | |||
850 | With v1 of the stream protocol, changeset are always cloned as public. There's |
|
846 | With v1 of the stream protocol, changeset are always cloned as public. There's | |
851 | no obsolescence markers exchange in stream v1. |
|
847 | no obsolescence markers exchange in stream v1. | |
852 |
|
848 | |||
853 | #endif |
|
849 | #endif | |
854 | #if stream-bundle2 |
|
850 | #if stream-bundle2 | |
855 |
|
851 | |||
856 | Stream repository with obsolescence |
|
852 | Stream repository with obsolescence | |
857 | ----------------------------------- |
|
853 | ----------------------------------- | |
858 |
|
854 | |||
859 | Clone non-publishing with obsolescence |
|
855 | Clone non-publishing with obsolescence | |
860 |
|
856 | |||
861 | $ cat >> $HGRCPATH << EOF |
|
857 | $ cat >> $HGRCPATH << EOF | |
862 | > [experimental] |
|
858 | > [experimental] | |
863 | > evolution=all |
|
859 | > evolution=all | |
864 | > EOF |
|
860 | > EOF | |
865 |
|
861 | |||
866 | $ cd server |
|
862 | $ cd server | |
867 | $ echo foo > foo |
|
863 | $ echo foo > foo | |
868 | $ hg -q commit -m 'about to be pruned' |
|
864 | $ hg -q commit -m 'about to be pruned' | |
869 | $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents |
|
865 | $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents | |
870 | 1 new obsolescence markers |
|
866 | 1 new obsolescence markers | |
871 | obsoleted 1 changesets |
|
867 | obsoleted 1 changesets | |
872 | $ hg up null -q |
|
868 | $ hg up null -q | |
873 | $ hg log -T '{rev}: {phase}\n' |
|
869 | $ hg log -T '{rev}: {phase}\n' | |
874 | 2: draft |
|
870 | 2: draft | |
875 | 1: draft |
|
871 | 1: draft | |
876 | 0: draft |
|
872 | 0: draft | |
877 | $ hg serve -p $HGPORT -d --pid-file=hg.pid |
|
873 | $ hg serve -p $HGPORT -d --pid-file=hg.pid | |
878 | $ cat hg.pid > $DAEMON_PIDS |
|
874 | $ cat hg.pid > $DAEMON_PIDS | |
879 | $ cd .. |
|
875 | $ cd .. | |
880 |
|
876 | |||
881 | $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence |
|
877 | $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence | |
882 | streaming all changes |
|
878 | streaming all changes | |
883 | 1098 files to transfer, 102 KB of data (no-zstd !) |
|
879 | 1098 files to transfer, 102 KB of data (no-zstd !) | |
884 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) |
|
880 | transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !) | |
885 | 1098 files to transfer, 99.5 KB of data (zstd !) |
|
881 | 1098 files to transfer, 99.5 KB of data (zstd !) | |
886 | transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !) |
|
882 | transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !) | |
887 | $ hg -R with-obsolescence log -T '{rev}: {phase}\n' |
|
883 | $ hg -R with-obsolescence log -T '{rev}: {phase}\n' | |
888 | 2: draft |
|
884 | 2: draft | |
889 | 1: draft |
|
885 | 1: draft | |
890 | 0: draft |
|
886 | 0: draft | |
891 | $ hg debugobsolete -R with-obsolescence |
|
887 | $ hg debugobsolete -R with-obsolescence | |
892 | 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} |
|
888 | 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'} | |
893 | $ hg verify -R with-obsolescence |
|
889 | $ hg verify -R with-obsolescence | |
894 | checking changesets |
|
890 | checking changesets | |
895 | checking manifests |
|
891 | checking manifests | |
896 | crosschecking files in changesets and manifests |
|
892 | crosschecking files in changesets and manifests | |
897 | checking files |
|
893 | checking files | |
898 | checked 4 changesets with 1089 changes to 1088 files |
|
894 | checked 4 changesets with 1089 changes to 1088 files | |
899 |
|
895 | |||
900 | $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution |
|
896 | $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution | |
901 | streaming all changes |
|
897 | streaming all changes | |
902 | remote: abort: server has obsolescence markers, but client cannot receive them via stream clone |
|
898 | remote: abort: server has obsolescence markers, but client cannot receive them via stream clone | |
903 | abort: pull failed on remote |
|
899 | abort: pull failed on remote | |
904 | [100] |
|
900 | [100] | |
905 |
|
901 | |||
906 | $ killdaemons.py |
|
902 | $ killdaemons.py | |
907 |
|
903 | |||
908 | #endif |
|
904 | #endif |
General Comments 0
You need to be logged in to leave comments.
Login now