##// END OF EJS Templates
streamclone: also stream caches to the client...
Boris Feld -
r35785:5f5fb279 default
parent child Browse files
Show More
@@ -1,595 +1,634 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12 import struct
12 import struct
13 import tempfile
13 import tempfile
14 import warnings
14
15
15 from .i18n import _
16 from .i18n import _
16 from . import (
17 from . import (
17 branchmap,
18 branchmap,
19 cacheutil,
18 error,
20 error,
19 phases,
21 phases,
20 store,
22 store,
21 util,
23 util,
22 )
24 )
23
25
24 def canperformstreamclone(pullop, bundle2=False):
26 def canperformstreamclone(pullop, bundle2=False):
25 """Whether it is possible to perform a streaming clone as part of pull.
27 """Whether it is possible to perform a streaming clone as part of pull.
26
28
27 ``bundle2`` will cause the function to consider stream clone through
29 ``bundle2`` will cause the function to consider stream clone through
28 bundle2 and only through bundle2.
30 bundle2 and only through bundle2.
29
31
30 Returns a tuple of (supported, requirements). ``supported`` is True if
32 Returns a tuple of (supported, requirements). ``supported`` is True if
31 streaming clone is supported and False otherwise. ``requirements`` is
33 streaming clone is supported and False otherwise. ``requirements`` is
32 a set of repo requirements from the remote, or ``None`` if stream clone
34 a set of repo requirements from the remote, or ``None`` if stream clone
33 isn't supported.
35 isn't supported.
34 """
36 """
35 repo = pullop.repo
37 repo = pullop.repo
36 remote = pullop.remote
38 remote = pullop.remote
37
39
38 bundle2supported = False
40 bundle2supported = False
39 if pullop.canusebundle2:
41 if pullop.canusebundle2:
40 if 'v2' in pullop.remotebundle2caps.get('stream', []):
42 if 'v2' in pullop.remotebundle2caps.get('stream', []):
41 bundle2supported = True
43 bundle2supported = True
42 # else
44 # else
43 # Server doesn't support bundle2 stream clone or doesn't support
45 # Server doesn't support bundle2 stream clone or doesn't support
44 # the versions we support. Fall back and possibly allow legacy.
46 # the versions we support. Fall back and possibly allow legacy.
45
47
46 # Ensures legacy code path uses available bundle2.
48 # Ensures legacy code path uses available bundle2.
47 if bundle2supported and not bundle2:
49 if bundle2supported and not bundle2:
48 return False, None
50 return False, None
49 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
51 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
50 elif bundle2 and not bundle2supported:
52 elif bundle2 and not bundle2supported:
51 return False, None
53 return False, None
52
54
53 # Streaming clone only works on empty repositories.
55 # Streaming clone only works on empty repositories.
54 if len(repo):
56 if len(repo):
55 return False, None
57 return False, None
56
58
57 # Streaming clone only works if all data is being requested.
59 # Streaming clone only works if all data is being requested.
58 if pullop.heads:
60 if pullop.heads:
59 return False, None
61 return False, None
60
62
61 streamrequested = pullop.streamclonerequested
63 streamrequested = pullop.streamclonerequested
62
64
63 # If we don't have a preference, let the server decide for us. This
65 # If we don't have a preference, let the server decide for us. This
64 # likely only comes into play in LANs.
66 # likely only comes into play in LANs.
65 if streamrequested is None:
67 if streamrequested is None:
66 # The server can advertise whether to prefer streaming clone.
68 # The server can advertise whether to prefer streaming clone.
67 streamrequested = remote.capable('stream-preferred')
69 streamrequested = remote.capable('stream-preferred')
68
70
69 if not streamrequested:
71 if not streamrequested:
70 return False, None
72 return False, None
71
73
72 # In order for stream clone to work, the client has to support all the
74 # In order for stream clone to work, the client has to support all the
73 # requirements advertised by the server.
75 # requirements advertised by the server.
74 #
76 #
75 # The server advertises its requirements via the "stream" and "streamreqs"
77 # The server advertises its requirements via the "stream" and "streamreqs"
76 # capability. "stream" (a value-less capability) is advertised if and only
78 # capability. "stream" (a value-less capability) is advertised if and only
77 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
79 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
78 # is advertised and contains a comma-delimited list of requirements.
80 # is advertised and contains a comma-delimited list of requirements.
79 requirements = set()
81 requirements = set()
80 if remote.capable('stream'):
82 if remote.capable('stream'):
81 requirements.add('revlogv1')
83 requirements.add('revlogv1')
82 else:
84 else:
83 streamreqs = remote.capable('streamreqs')
85 streamreqs = remote.capable('streamreqs')
84 # This is weird and shouldn't happen with modern servers.
86 # This is weird and shouldn't happen with modern servers.
85 if not streamreqs:
87 if not streamreqs:
86 pullop.repo.ui.warn(_(
88 pullop.repo.ui.warn(_(
87 'warning: stream clone requested but server has them '
89 'warning: stream clone requested but server has them '
88 'disabled\n'))
90 'disabled\n'))
89 return False, None
91 return False, None
90
92
91 streamreqs = set(streamreqs.split(','))
93 streamreqs = set(streamreqs.split(','))
92 # Server requires something we don't support. Bail.
94 # Server requires something we don't support. Bail.
93 missingreqs = streamreqs - repo.supportedformats
95 missingreqs = streamreqs - repo.supportedformats
94 if missingreqs:
96 if missingreqs:
95 pullop.repo.ui.warn(_(
97 pullop.repo.ui.warn(_(
96 'warning: stream clone requested but client is missing '
98 'warning: stream clone requested but client is missing '
97 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
99 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
98 pullop.repo.ui.warn(
100 pullop.repo.ui.warn(
99 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
101 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
100 'for more information)\n'))
102 'for more information)\n'))
101 return False, None
103 return False, None
102 requirements = streamreqs
104 requirements = streamreqs
103
105
104 return True, requirements
106 return True, requirements
105
107
106 def maybeperformlegacystreamclone(pullop):
108 def maybeperformlegacystreamclone(pullop):
107 """Possibly perform a legacy stream clone operation.
109 """Possibly perform a legacy stream clone operation.
108
110
109 Legacy stream clones are performed as part of pull but before all other
111 Legacy stream clones are performed as part of pull but before all other
110 operations.
112 operations.
111
113
112 A legacy stream clone will not be performed if a bundle2 stream clone is
114 A legacy stream clone will not be performed if a bundle2 stream clone is
113 supported.
115 supported.
114 """
116 """
115 supported, requirements = canperformstreamclone(pullop)
117 supported, requirements = canperformstreamclone(pullop)
116
118
117 if not supported:
119 if not supported:
118 return
120 return
119
121
120 repo = pullop.repo
122 repo = pullop.repo
121 remote = pullop.remote
123 remote = pullop.remote
122
124
123 # Save remote branchmap. We will use it later to speed up branchcache
125 # Save remote branchmap. We will use it later to speed up branchcache
124 # creation.
126 # creation.
125 rbranchmap = None
127 rbranchmap = None
126 if remote.capable('branchmap'):
128 if remote.capable('branchmap'):
127 rbranchmap = remote.branchmap()
129 rbranchmap = remote.branchmap()
128
130
129 repo.ui.status(_('streaming all changes\n'))
131 repo.ui.status(_('streaming all changes\n'))
130
132
131 fp = remote.stream_out()
133 fp = remote.stream_out()
132 l = fp.readline()
134 l = fp.readline()
133 try:
135 try:
134 resp = int(l)
136 resp = int(l)
135 except ValueError:
137 except ValueError:
136 raise error.ResponseError(
138 raise error.ResponseError(
137 _('unexpected response from remote server:'), l)
139 _('unexpected response from remote server:'), l)
138 if resp == 1:
140 if resp == 1:
139 raise error.Abort(_('operation forbidden by server'))
141 raise error.Abort(_('operation forbidden by server'))
140 elif resp == 2:
142 elif resp == 2:
141 raise error.Abort(_('locking the remote repository failed'))
143 raise error.Abort(_('locking the remote repository failed'))
142 elif resp != 0:
144 elif resp != 0:
143 raise error.Abort(_('the server sent an unknown error code'))
145 raise error.Abort(_('the server sent an unknown error code'))
144
146
145 l = fp.readline()
147 l = fp.readline()
146 try:
148 try:
147 filecount, bytecount = map(int, l.split(' ', 1))
149 filecount, bytecount = map(int, l.split(' ', 1))
148 except (ValueError, TypeError):
150 except (ValueError, TypeError):
149 raise error.ResponseError(
151 raise error.ResponseError(
150 _('unexpected response from remote server:'), l)
152 _('unexpected response from remote server:'), l)
151
153
152 with repo.lock():
154 with repo.lock():
153 consumev1(repo, fp, filecount, bytecount)
155 consumev1(repo, fp, filecount, bytecount)
154
156
155 # new requirements = old non-format requirements +
157 # new requirements = old non-format requirements +
156 # new format-related remote requirements
158 # new format-related remote requirements
157 # requirements from the streamed-in repository
159 # requirements from the streamed-in repository
158 repo.requirements = requirements | (
160 repo.requirements = requirements | (
159 repo.requirements - repo.supportedformats)
161 repo.requirements - repo.supportedformats)
160 repo._applyopenerreqs()
162 repo._applyopenerreqs()
161 repo._writerequirements()
163 repo._writerequirements()
162
164
163 if rbranchmap:
165 if rbranchmap:
164 branchmap.replacecache(repo, rbranchmap)
166 branchmap.replacecache(repo, rbranchmap)
165
167
166 repo.invalidate()
168 repo.invalidate()
167
169
168 def allowservergeneration(repo):
170 def allowservergeneration(repo):
169 """Whether streaming clones are allowed from the server."""
171 """Whether streaming clones are allowed from the server."""
170 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
172 if not repo.ui.configbool('server', 'uncompressed', untrusted=True):
171 return False
173 return False
172
174
173 # The way stream clone works makes it impossible to hide secret changesets.
175 # The way stream clone works makes it impossible to hide secret changesets.
174 # So don't allow this by default.
176 # So don't allow this by default.
175 secret = phases.hassecret(repo)
177 secret = phases.hassecret(repo)
176 if secret:
178 if secret:
177 return repo.ui.configbool('server', 'uncompressedallowsecret')
179 return repo.ui.configbool('server', 'uncompressedallowsecret')
178
180
179 return True
181 return True
180
182
181 # This is it's own function so extensions can override it.
183 # This is it's own function so extensions can override it.
182 def _walkstreamfiles(repo):
184 def _walkstreamfiles(repo):
183 return repo.store.walk()
185 return repo.store.walk()
184
186
185 def generatev1(repo):
187 def generatev1(repo):
186 """Emit content for version 1 of a streaming clone.
188 """Emit content for version 1 of a streaming clone.
187
189
188 This returns a 3-tuple of (file count, byte size, data iterator).
190 This returns a 3-tuple of (file count, byte size, data iterator).
189
191
190 The data iterator consists of N entries for each file being transferred.
192 The data iterator consists of N entries for each file being transferred.
191 Each file entry starts as a line with the file name and integer size
193 Each file entry starts as a line with the file name and integer size
192 delimited by a null byte.
194 delimited by a null byte.
193
195
194 The raw file data follows. Following the raw file data is the next file
196 The raw file data follows. Following the raw file data is the next file
195 entry, or EOF.
197 entry, or EOF.
196
198
197 When used on the wire protocol, an additional line indicating protocol
199 When used on the wire protocol, an additional line indicating protocol
198 success will be prepended to the stream. This function is not responsible
200 success will be prepended to the stream. This function is not responsible
199 for adding it.
201 for adding it.
200
202
201 This function will obtain a repository lock to ensure a consistent view of
203 This function will obtain a repository lock to ensure a consistent view of
202 the store is captured. It therefore may raise LockError.
204 the store is captured. It therefore may raise LockError.
203 """
205 """
204 entries = []
206 entries = []
205 total_bytes = 0
207 total_bytes = 0
206 # Get consistent snapshot of repo, lock during scan.
208 # Get consistent snapshot of repo, lock during scan.
207 with repo.lock():
209 with repo.lock():
208 repo.ui.debug('scanning\n')
210 repo.ui.debug('scanning\n')
209 for name, ename, size in _walkstreamfiles(repo):
211 for name, ename, size in _walkstreamfiles(repo):
210 if size:
212 if size:
211 entries.append((name, size))
213 entries.append((name, size))
212 total_bytes += size
214 total_bytes += size
213
215
214 repo.ui.debug('%d files, %d bytes to transfer\n' %
216 repo.ui.debug('%d files, %d bytes to transfer\n' %
215 (len(entries), total_bytes))
217 (len(entries), total_bytes))
216
218
217 svfs = repo.svfs
219 svfs = repo.svfs
218 debugflag = repo.ui.debugflag
220 debugflag = repo.ui.debugflag
219
221
220 def emitrevlogdata():
222 def emitrevlogdata():
221 for name, size in entries:
223 for name, size in entries:
222 if debugflag:
224 if debugflag:
223 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
225 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
224 # partially encode name over the wire for backwards compat
226 # partially encode name over the wire for backwards compat
225 yield '%s\0%d\n' % (store.encodedir(name), size)
227 yield '%s\0%d\n' % (store.encodedir(name), size)
226 # auditing at this stage is both pointless (paths are already
228 # auditing at this stage is both pointless (paths are already
227 # trusted by the local repo) and expensive
229 # trusted by the local repo) and expensive
228 with svfs(name, 'rb', auditpath=False) as fp:
230 with svfs(name, 'rb', auditpath=False) as fp:
229 if size <= 65536:
231 if size <= 65536:
230 yield fp.read(size)
232 yield fp.read(size)
231 else:
233 else:
232 for chunk in util.filechunkiter(fp, limit=size):
234 for chunk in util.filechunkiter(fp, limit=size):
233 yield chunk
235 yield chunk
234
236
235 return len(entries), total_bytes, emitrevlogdata()
237 return len(entries), total_bytes, emitrevlogdata()
236
238
237 def generatev1wireproto(repo):
239 def generatev1wireproto(repo):
238 """Emit content for version 1 of streaming clone suitable for the wire.
240 """Emit content for version 1 of streaming clone suitable for the wire.
239
241
240 This is the data output from ``generatev1()`` with 2 header lines. The
242 This is the data output from ``generatev1()`` with 2 header lines. The
241 first line indicates overall success. The 2nd contains the file count and
243 first line indicates overall success. The 2nd contains the file count and
242 byte size of payload.
244 byte size of payload.
243
245
244 The success line contains "0" for success, "1" for stream generation not
246 The success line contains "0" for success, "1" for stream generation not
245 allowed, and "2" for error locking the repository (possibly indicating
247 allowed, and "2" for error locking the repository (possibly indicating
246 a permissions error for the server process).
248 a permissions error for the server process).
247 """
249 """
248 if not allowservergeneration(repo):
250 if not allowservergeneration(repo):
249 yield '1\n'
251 yield '1\n'
250 return
252 return
251
253
252 try:
254 try:
253 filecount, bytecount, it = generatev1(repo)
255 filecount, bytecount, it = generatev1(repo)
254 except error.LockError:
256 except error.LockError:
255 yield '2\n'
257 yield '2\n'
256 return
258 return
257
259
258 # Indicates successful response.
260 # Indicates successful response.
259 yield '0\n'
261 yield '0\n'
260 yield '%d %d\n' % (filecount, bytecount)
262 yield '%d %d\n' % (filecount, bytecount)
261 for chunk in it:
263 for chunk in it:
262 yield chunk
264 yield chunk
263
265
264 def generatebundlev1(repo, compression='UN'):
266 def generatebundlev1(repo, compression='UN'):
265 """Emit content for version 1 of a stream clone bundle.
267 """Emit content for version 1 of a stream clone bundle.
266
268
267 The first 4 bytes of the output ("HGS1") denote this as stream clone
269 The first 4 bytes of the output ("HGS1") denote this as stream clone
268 bundle version 1.
270 bundle version 1.
269
271
270 The next 2 bytes indicate the compression type. Only "UN" is currently
272 The next 2 bytes indicate the compression type. Only "UN" is currently
271 supported.
273 supported.
272
274
273 The next 16 bytes are two 64-bit big endian unsigned integers indicating
275 The next 16 bytes are two 64-bit big endian unsigned integers indicating
274 file count and byte count, respectively.
276 file count and byte count, respectively.
275
277
276 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
278 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
277 of the requirements string, including a trailing \0. The following N bytes
279 of the requirements string, including a trailing \0. The following N bytes
278 are the requirements string, which is ASCII containing a comma-delimited
280 are the requirements string, which is ASCII containing a comma-delimited
279 list of repo requirements that are needed to support the data.
281 list of repo requirements that are needed to support the data.
280
282
281 The remaining content is the output of ``generatev1()`` (which may be
283 The remaining content is the output of ``generatev1()`` (which may be
282 compressed in the future).
284 compressed in the future).
283
285
284 Returns a tuple of (requirements, data generator).
286 Returns a tuple of (requirements, data generator).
285 """
287 """
286 if compression != 'UN':
288 if compression != 'UN':
287 raise ValueError('we do not support the compression argument yet')
289 raise ValueError('we do not support the compression argument yet')
288
290
289 requirements = repo.requirements & repo.supportedformats
291 requirements = repo.requirements & repo.supportedformats
290 requires = ','.join(sorted(requirements))
292 requires = ','.join(sorted(requirements))
291
293
292 def gen():
294 def gen():
293 yield 'HGS1'
295 yield 'HGS1'
294 yield compression
296 yield compression
295
297
296 filecount, bytecount, it = generatev1(repo)
298 filecount, bytecount, it = generatev1(repo)
297 repo.ui.status(_('writing %d bytes for %d files\n') %
299 repo.ui.status(_('writing %d bytes for %d files\n') %
298 (bytecount, filecount))
300 (bytecount, filecount))
299
301
300 yield struct.pack('>QQ', filecount, bytecount)
302 yield struct.pack('>QQ', filecount, bytecount)
301 yield struct.pack('>H', len(requires) + 1)
303 yield struct.pack('>H', len(requires) + 1)
302 yield requires + '\0'
304 yield requires + '\0'
303
305
304 # This is where we'll add compression in the future.
306 # This is where we'll add compression in the future.
305 assert compression == 'UN'
307 assert compression == 'UN'
306
308
307 seen = 0
309 seen = 0
308 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
310 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
309
311
310 for chunk in it:
312 for chunk in it:
311 seen += len(chunk)
313 seen += len(chunk)
312 repo.ui.progress(_('bundle'), seen, total=bytecount,
314 repo.ui.progress(_('bundle'), seen, total=bytecount,
313 unit=_('bytes'))
315 unit=_('bytes'))
314 yield chunk
316 yield chunk
315
317
316 repo.ui.progress(_('bundle'), None)
318 repo.ui.progress(_('bundle'), None)
317
319
318 return requirements, gen()
320 return requirements, gen()
319
321
320 def consumev1(repo, fp, filecount, bytecount):
322 def consumev1(repo, fp, filecount, bytecount):
321 """Apply the contents from version 1 of a streaming clone file handle.
323 """Apply the contents from version 1 of a streaming clone file handle.
322
324
323 This takes the output from "stream_out" and applies it to the specified
325 This takes the output from "stream_out" and applies it to the specified
324 repository.
326 repository.
325
327
326 Like "stream_out," the status line added by the wire protocol is not
328 Like "stream_out," the status line added by the wire protocol is not
327 handled by this function.
329 handled by this function.
328 """
330 """
329 with repo.lock():
331 with repo.lock():
330 repo.ui.status(_('%d files to transfer, %s of data\n') %
332 repo.ui.status(_('%d files to transfer, %s of data\n') %
331 (filecount, util.bytecount(bytecount)))
333 (filecount, util.bytecount(bytecount)))
332 handled_bytes = 0
334 handled_bytes = 0
333 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
335 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
334 start = util.timer()
336 start = util.timer()
335
337
336 # TODO: get rid of (potential) inconsistency
338 # TODO: get rid of (potential) inconsistency
337 #
339 #
338 # If transaction is started and any @filecache property is
340 # If transaction is started and any @filecache property is
339 # changed at this point, it causes inconsistency between
341 # changed at this point, it causes inconsistency between
340 # in-memory cached property and streamclone-ed file on the
342 # in-memory cached property and streamclone-ed file on the
341 # disk. Nested transaction prevents transaction scope "clone"
343 # disk. Nested transaction prevents transaction scope "clone"
342 # below from writing in-memory changes out at the end of it,
344 # below from writing in-memory changes out at the end of it,
343 # even though in-memory changes are discarded at the end of it
345 # even though in-memory changes are discarded at the end of it
344 # regardless of transaction nesting.
346 # regardless of transaction nesting.
345 #
347 #
346 # But transaction nesting can't be simply prohibited, because
348 # But transaction nesting can't be simply prohibited, because
347 # nesting occurs also in ordinary case (e.g. enabling
349 # nesting occurs also in ordinary case (e.g. enabling
348 # clonebundles).
350 # clonebundles).
349
351
350 with repo.transaction('clone'):
352 with repo.transaction('clone'):
351 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
353 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
352 for i in xrange(filecount):
354 for i in xrange(filecount):
353 # XXX doesn't support '\n' or '\r' in filenames
355 # XXX doesn't support '\n' or '\r' in filenames
354 l = fp.readline()
356 l = fp.readline()
355 try:
357 try:
356 name, size = l.split('\0', 1)
358 name, size = l.split('\0', 1)
357 size = int(size)
359 size = int(size)
358 except (ValueError, TypeError):
360 except (ValueError, TypeError):
359 raise error.ResponseError(
361 raise error.ResponseError(
360 _('unexpected response from remote server:'), l)
362 _('unexpected response from remote server:'), l)
361 if repo.ui.debugflag:
363 if repo.ui.debugflag:
362 repo.ui.debug('adding %s (%s)\n' %
364 repo.ui.debug('adding %s (%s)\n' %
363 (name, util.bytecount(size)))
365 (name, util.bytecount(size)))
364 # for backwards compat, name was partially encoded
366 # for backwards compat, name was partially encoded
365 path = store.decodedir(name)
367 path = store.decodedir(name)
366 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
368 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
367 for chunk in util.filechunkiter(fp, limit=size):
369 for chunk in util.filechunkiter(fp, limit=size):
368 handled_bytes += len(chunk)
370 handled_bytes += len(chunk)
369 repo.ui.progress(_('clone'), handled_bytes,
371 repo.ui.progress(_('clone'), handled_bytes,
370 total=bytecount, unit=_('bytes'))
372 total=bytecount, unit=_('bytes'))
371 ofp.write(chunk)
373 ofp.write(chunk)
372
374
373 # force @filecache properties to be reloaded from
375 # force @filecache properties to be reloaded from
374 # streamclone-ed file at next access
376 # streamclone-ed file at next access
375 repo.invalidate(clearfilecache=True)
377 repo.invalidate(clearfilecache=True)
376
378
377 elapsed = util.timer() - start
379 elapsed = util.timer() - start
378 if elapsed <= 0:
380 if elapsed <= 0:
379 elapsed = 0.001
381 elapsed = 0.001
380 repo.ui.progress(_('clone'), None)
382 repo.ui.progress(_('clone'), None)
381 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
383 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
382 (util.bytecount(bytecount), elapsed,
384 (util.bytecount(bytecount), elapsed,
383 util.bytecount(bytecount / elapsed)))
385 util.bytecount(bytecount / elapsed)))
384
386
385 def readbundle1header(fp):
387 def readbundle1header(fp):
386 compression = fp.read(2)
388 compression = fp.read(2)
387 if compression != 'UN':
389 if compression != 'UN':
388 raise error.Abort(_('only uncompressed stream clone bundles are '
390 raise error.Abort(_('only uncompressed stream clone bundles are '
389 'supported; got %s') % compression)
391 'supported; got %s') % compression)
390
392
391 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
393 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
392 requireslen = struct.unpack('>H', fp.read(2))[0]
394 requireslen = struct.unpack('>H', fp.read(2))[0]
393 requires = fp.read(requireslen)
395 requires = fp.read(requireslen)
394
396
395 if not requires.endswith('\0'):
397 if not requires.endswith('\0'):
396 raise error.Abort(_('malformed stream clone bundle: '
398 raise error.Abort(_('malformed stream clone bundle: '
397 'requirements not properly encoded'))
399 'requirements not properly encoded'))
398
400
399 requirements = set(requires.rstrip('\0').split(','))
401 requirements = set(requires.rstrip('\0').split(','))
400
402
401 return filecount, bytecount, requirements
403 return filecount, bytecount, requirements
402
404
403 def applybundlev1(repo, fp):
405 def applybundlev1(repo, fp):
404 """Apply the content from a stream clone bundle version 1.
406 """Apply the content from a stream clone bundle version 1.
405
407
406 We assume the 4 byte header has been read and validated and the file handle
408 We assume the 4 byte header has been read and validated and the file handle
407 is at the 2 byte compression identifier.
409 is at the 2 byte compression identifier.
408 """
410 """
409 if len(repo):
411 if len(repo):
410 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
412 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
411 'repo'))
413 'repo'))
412
414
413 filecount, bytecount, requirements = readbundle1header(fp)
415 filecount, bytecount, requirements = readbundle1header(fp)
414 missingreqs = requirements - repo.supportedformats
416 missingreqs = requirements - repo.supportedformats
415 if missingreqs:
417 if missingreqs:
416 raise error.Abort(_('unable to apply stream clone: '
418 raise error.Abort(_('unable to apply stream clone: '
417 'unsupported format: %s') %
419 'unsupported format: %s') %
418 ', '.join(sorted(missingreqs)))
420 ', '.join(sorted(missingreqs)))
419
421
420 consumev1(repo, fp, filecount, bytecount)
422 consumev1(repo, fp, filecount, bytecount)
421
423
422 class streamcloneapplier(object):
424 class streamcloneapplier(object):
423 """Class to manage applying streaming clone bundles.
425 """Class to manage applying streaming clone bundles.
424
426
425 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
427 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
426 readers to perform bundle type-specific functionality.
428 readers to perform bundle type-specific functionality.
427 """
429 """
428 def __init__(self, fh):
430 def __init__(self, fh):
429 self._fh = fh
431 self._fh = fh
430
432
431 def apply(self, repo):
433 def apply(self, repo):
432 return applybundlev1(repo, self._fh)
434 return applybundlev1(repo, self._fh)
433
435
434 # type of file to stream
436 # type of file to stream
435 _fileappend = 0 # append only file
437 _fileappend = 0 # append only file
436 _filefull = 1 # full snapshot file
438 _filefull = 1 # full snapshot file
437
439
440 # Source of the file
441 _srcstore = 's' # store (svfs)
442 _srccache = 'c' # cache (cache)
443
438 # This is it's own function so extensions can override it.
444 # This is it's own function so extensions can override it.
439 def _walkstreamfullstorefiles(repo):
445 def _walkstreamfullstorefiles(repo):
440 """list snapshot file from the store"""
446 """list snapshot file from the store"""
441 fnames = []
447 fnames = []
442 if not repo.publishing():
448 if not repo.publishing():
443 fnames.append('phaseroots')
449 fnames.append('phaseroots')
444 return fnames
450 return fnames
445
451
446 def _filterfull(entry, copy, vfs):
452 def _filterfull(entry, copy, vfsmap):
447 """actually copy the snapshot files"""
453 """actually copy the snapshot files"""
448 name, ftype, data = entry
454 src, name, ftype, data = entry
449 if ftype != _filefull:
455 if ftype != _filefull:
450 return entry
456 return entry
451 return (name, ftype, copy(vfs.join(name)))
457 return (src, name, ftype, copy(vfsmap[src].join(name)))
452
458
453 @contextlib.contextmanager
459 @contextlib.contextmanager
454 def maketempcopies():
460 def maketempcopies():
455 """return a function to temporary copy file"""
461 """return a function to temporary copy file"""
456 files = []
462 files = []
457 try:
463 try:
458 def copy(src):
464 def copy(src):
459 fd, dst = tempfile.mkstemp()
465 fd, dst = tempfile.mkstemp()
460 os.close(fd)
466 os.close(fd)
461 files.append(dst)
467 files.append(dst)
462 util.copyfiles(src, dst, hardlink=True)
468 util.copyfiles(src, dst, hardlink=True)
463 return dst
469 return dst
464 yield copy
470 yield copy
465 finally:
471 finally:
466 for tmp in files:
472 for tmp in files:
467 util.tryunlink(tmp)
473 util.tryunlink(tmp)
468
474
475 def _makemap(repo):
476 """make a (src -> vfs) map for the repo"""
477 vfsmap = {
478 _srcstore: repo.svfs,
479 _srccache: repo.cachevfs,
480 }
481 # we keep repo.vfs out of the on purpose, ther are too many danger there
482 # (eg: .hg/hgrc)
483 assert repo.vfs not in vfsmap.values()
484
485 return vfsmap
486
469 def _emit(repo, entries, totalfilesize):
487 def _emit(repo, entries, totalfilesize):
470 """actually emit the stream bundle"""
488 """actually emit the stream bundle"""
471 vfs = repo.svfs
489 vfsmap = _makemap(repo)
472 progress = repo.ui.progress
490 progress = repo.ui.progress
473 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
491 progress(_('bundle'), 0, total=totalfilesize, unit=_('bytes'))
474 with maketempcopies() as copy:
492 with maketempcopies() as copy:
475 try:
493 try:
476 # copy is delayed until we are in the try
494 # copy is delayed until we are in the try
477 entries = [_filterfull(e, copy, vfs) for e in entries]
495 entries = [_filterfull(e, copy, vfsmap) for e in entries]
478 yield None # this release the lock on the repository
496 yield None # this release the lock on the repository
479 seen = 0
497 seen = 0
480
498
481 for name, ftype, data in entries:
499 for src, name, ftype, data in entries:
500 vfs = vfsmap[src]
501 yield src
482 yield util.uvarintencode(len(name))
502 yield util.uvarintencode(len(name))
483 if ftype == _fileappend:
503 if ftype == _fileappend:
484 fp = vfs(name)
504 fp = vfs(name)
485 size = data
505 size = data
486 elif ftype == _filefull:
506 elif ftype == _filefull:
487 fp = open(data, 'rb')
507 fp = open(data, 'rb')
488 size = util.fstat(fp).st_size
508 size = util.fstat(fp).st_size
489 try:
509 try:
490 yield util.uvarintencode(size)
510 yield util.uvarintencode(size)
491 yield name
511 yield name
492 if size <= 65536:
512 if size <= 65536:
493 chunks = (fp.read(size),)
513 chunks = (fp.read(size),)
494 else:
514 else:
495 chunks = util.filechunkiter(fp, limit=size)
515 chunks = util.filechunkiter(fp, limit=size)
496 for chunk in chunks:
516 for chunk in chunks:
497 seen += len(chunk)
517 seen += len(chunk)
498 progress(_('bundle'), seen, total=totalfilesize,
518 progress(_('bundle'), seen, total=totalfilesize,
499 unit=_('bytes'))
519 unit=_('bytes'))
500 yield chunk
520 yield chunk
501 finally:
521 finally:
502 fp.close()
522 fp.close()
503 finally:
523 finally:
504 progress(_('bundle'), None)
524 progress(_('bundle'), None)
505
525
506 def generatev2(repo):
526 def generatev2(repo):
507 """Emit content for version 2 of a streaming clone.
527 """Emit content for version 2 of a streaming clone.
508
528
509 the data stream consists the following entries:
529 the data stream consists the following entries:
510 1) A varint containing the length of the filename
530 1) A char representing the file destination (eg: store or cache)
511 2) A varint containing the length of file data
531 2) A varint containing the length of the filename
512 3) N bytes containing the filename (the internal, store-agnostic form)
532 3) A varint containing the length of file data
513 4) N bytes containing the file data
533 4) N bytes containing the filename (the internal, store-agnostic form)
534 5) N bytes containing the file data
514
535
515 Returns a 3-tuple of (file count, file size, data iterator).
536 Returns a 3-tuple of (file count, file size, data iterator).
516 """
537 """
517
538
518 with repo.lock():
539 with repo.lock():
519
540
520 entries = []
541 entries = []
521 totalfilesize = 0
542 totalfilesize = 0
522
543
523 repo.ui.debug('scanning\n')
544 repo.ui.debug('scanning\n')
524 for name, ename, size in _walkstreamfiles(repo):
545 for name, ename, size in _walkstreamfiles(repo):
525 if size:
546 if size:
526 entries.append((name, _fileappend, size))
547 entries.append((_srcstore, name, _fileappend, size))
527 totalfilesize += size
548 totalfilesize += size
528 for name in _walkstreamfullstorefiles(repo):
549 for name in _walkstreamfullstorefiles(repo):
529 if repo.svfs.exists(name):
550 if repo.svfs.exists(name):
530 totalfilesize += repo.svfs.lstat(name).st_size
551 totalfilesize += repo.svfs.lstat(name).st_size
531 entries.append((name, _filefull, None))
552 entries.append((_srcstore, name, _filefull, None))
553 for name in cacheutil.cachetocopy(repo):
554 if repo.cachevfs.exists(name):
555 totalfilesize += repo.cachevfs.lstat(name).st_size
556 entries.append((_srccache, name, _filefull, None))
532
557
533 chunks = _emit(repo, entries, totalfilesize)
558 chunks = _emit(repo, entries, totalfilesize)
534 first = next(chunks)
559 first = next(chunks)
535 assert first is None
560 assert first is None
536
561
537 return len(entries), totalfilesize, chunks
562 return len(entries), totalfilesize, chunks
538
563
564 @contextlib.contextmanager
565 def nested(*ctxs):
566 with warnings.catch_warnings():
567 # For some reason, Python decided 'nested' was deprecated without
568 # replacement. They officially advertised for filtering the deprecation
569 # warning for people who actually need the feature.
570 warnings.filterwarnings("ignore",category=DeprecationWarning)
571 with contextlib.nested(*ctxs):
572 yield
573
539 def consumev2(repo, fp, filecount, filesize):
574 def consumev2(repo, fp, filecount, filesize):
540 """Apply the contents from a version 2 streaming clone.
575 """Apply the contents from a version 2 streaming clone.
541
576
542 Data is read from an object that only needs to provide a ``read(size)``
577 Data is read from an object that only needs to provide a ``read(size)``
543 method.
578 method.
544 """
579 """
545 with repo.lock():
580 with repo.lock():
546 repo.ui.status(_('%d files to transfer, %s of data\n') %
581 repo.ui.status(_('%d files to transfer, %s of data\n') %
547 (filecount, util.bytecount(filesize)))
582 (filecount, util.bytecount(filesize)))
548
583
549 start = util.timer()
584 start = util.timer()
550 handledbytes = 0
585 handledbytes = 0
551 progress = repo.ui.progress
586 progress = repo.ui.progress
552
587
553 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
588 progress(_('clone'), handledbytes, total=filesize, unit=_('bytes'))
554
589
555 vfs = repo.svfs
590 vfsmap = _makemap(repo)
556
591
557 with repo.transaction('clone'):
592 with repo.transaction('clone'):
558 with vfs.backgroundclosing(repo.ui):
593 ctxs = (vfs.backgroundclosing(repo.ui)
594 for vfs in vfsmap.values())
595 with nested(*ctxs):
559 for i in range(filecount):
596 for i in range(filecount):
597 src = fp.read(1)
598 vfs = vfsmap[src]
560 namelen = util.uvarintdecodestream(fp)
599 namelen = util.uvarintdecodestream(fp)
561 datalen = util.uvarintdecodestream(fp)
600 datalen = util.uvarintdecodestream(fp)
562
601
563 name = fp.read(namelen)
602 name = fp.read(namelen)
564
603
565 if repo.ui.debugflag:
604 if repo.ui.debugflag:
566 repo.ui.debug('adding %s (%s)\n' %
605 repo.ui.debug('adding [%s] %s (%s)\n' %
567 (name, util.bytecount(datalen)))
606 (src, name, util.bytecount(datalen)))
568
607
569 with vfs(name, 'w') as ofp:
608 with vfs(name, 'w') as ofp:
570 for chunk in util.filechunkiter(fp, limit=datalen):
609 for chunk in util.filechunkiter(fp, limit=datalen):
571 handledbytes += len(chunk)
610 handledbytes += len(chunk)
572 progress(_('clone'), handledbytes, total=filesize,
611 progress(_('clone'), handledbytes, total=filesize,
573 unit=_('bytes'))
612 unit=_('bytes'))
574 ofp.write(chunk)
613 ofp.write(chunk)
575
614
576 # force @filecache properties to be reloaded from
615 # force @filecache properties to be reloaded from
577 # streamclone-ed file at next access
616 # streamclone-ed file at next access
578 repo.invalidate(clearfilecache=True)
617 repo.invalidate(clearfilecache=True)
579
618
580 elapsed = util.timer() - start
619 elapsed = util.timer() - start
581 if elapsed <= 0:
620 if elapsed <= 0:
582 elapsed = 0.001
621 elapsed = 0.001
583 progress(_('clone'), None)
622 progress(_('clone'), None)
584 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
623 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
585 (util.bytecount(handledbytes), elapsed,
624 (util.bytecount(handledbytes), elapsed,
586 util.bytecount(handledbytes / elapsed)))
625 util.bytecount(handledbytes / elapsed)))
587
626
588 def applybundlev2(repo, fp, filecount, filesize, requirements):
627 def applybundlev2(repo, fp, filecount, filesize, requirements):
589 missingreqs = [r for r in requirements if r not in repo.supported]
628 missingreqs = [r for r in requirements if r not in repo.supported]
590 if missingreqs:
629 if missingreqs:
591 raise error.Abort(_('unable to apply stream clone: '
630 raise error.Abort(_('unable to apply stream clone: '
592 'unsupported format: %s') %
631 'unsupported format: %s') %
593 ', '.join(sorted(missingreqs)))
632 ', '.join(sorted(missingreqs)))
594
633
595 consumev2(repo, fp, filecount, filesize)
634 consumev2(repo, fp, filecount, filesize)
@@ -1,330 +1,336 b''
1 #require serve
1 #require serve
2
2
3 #testcases stream-legacy stream-bundle2
3 #testcases stream-legacy stream-bundle2
4
4
5 #if stream-bundle2
5 #if stream-bundle2
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [experimental]
7 > [experimental]
8 > bundle2.stream = yes
8 > bundle2.stream = yes
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 Initialize repository
12 Initialize repository
13 the status call is to check for issue5130
13 the status call is to check for issue5130
14
14
15 $ hg init server
15 $ hg init server
16 $ cd server
16 $ cd server
17 $ touch foo
17 $ touch foo
18 $ hg -q commit -A -m initial
18 $ hg -q commit -A -m initial
19 >>> for i in range(1024):
19 >>> for i in range(1024):
20 ... with open(str(i), 'wb') as fh:
20 ... with open(str(i), 'wb') as fh:
21 ... fh.write(str(i))
21 ... fh.write(str(i))
22 $ hg -q commit -A -m 'add a lot of files'
22 $ hg -q commit -A -m 'add a lot of files'
23 $ hg st
23 $ hg st
24 $ hg serve -p $HGPORT -d --pid-file=hg.pid
24 $ hg serve -p $HGPORT -d --pid-file=hg.pid
25 $ cat hg.pid >> $DAEMON_PIDS
25 $ cat hg.pid >> $DAEMON_PIDS
26 $ cd ..
26 $ cd ..
27
27
28 Basic clone
28 Basic clone
29
29
30 #if stream-legacy
30 #if stream-legacy
31 $ hg clone --stream -U http://localhost:$HGPORT clone1
31 $ hg clone --stream -U http://localhost:$HGPORT clone1
32 streaming all changes
32 streaming all changes
33 1027 files to transfer, 96.3 KB of data
33 1027 files to transfer, 96.3 KB of data
34 transferred 96.3 KB in * seconds (*/sec) (glob)
34 transferred 96.3 KB in * seconds (*/sec) (glob)
35 searching for changes
35 searching for changes
36 no changes found
36 no changes found
37 #endif
37 #endif
38 #if stream-bundle2
38 #if stream-bundle2
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
39 $ hg clone --stream -U http://localhost:$HGPORT clone1
40 streaming all changes
40 streaming all changes
41 1027 files to transfer, 96.3 KB of data
41 1030 files to transfer, 96.4 KB of data
42 transferred 96.3 KB in * seconds (* */sec) (glob)
42 transferred 96.4 KB in * seconds (* */sec) (glob)
43
44 $ ls -1 clone1/.hg/cache
45 branch2-served
46 rbc-names-v1
47 rbc-revs-v1
43 #endif
48 #endif
44
49
45 --uncompressed is an alias to --stream
50 --uncompressed is an alias to --stream
46
51
47 #if stream-legacy
52 #if stream-legacy
48 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
53 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
49 streaming all changes
54 streaming all changes
50 1027 files to transfer, 96.3 KB of data
55 1027 files to transfer, 96.3 KB of data
51 transferred 96.3 KB in * seconds (*/sec) (glob)
56 transferred 96.3 KB in * seconds (*/sec) (glob)
52 searching for changes
57 searching for changes
53 no changes found
58 no changes found
54 #endif
59 #endif
55 #if stream-bundle2
60 #if stream-bundle2
56 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
61 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
57 streaming all changes
62 streaming all changes
58 1027 files to transfer, 96.3 KB of data
63 1030 files to transfer, 96.4 KB of data
59 transferred 96.3 KB in * seconds (* */sec) (glob)
64 transferred 96.4 KB in * seconds (* */sec) (glob)
60 #endif
65 #endif
61
66
62 Clone with background file closing enabled
67 Clone with background file closing enabled
63
68
64 #if stream-legacy
69 #if stream-legacy
65 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
70 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
66 using http://localhost:$HGPORT/
71 using http://localhost:$HGPORT/
67 sending capabilities command
72 sending capabilities command
68 sending branchmap command
73 sending branchmap command
69 streaming all changes
74 streaming all changes
70 sending stream_out command
75 sending stream_out command
71 1027 files to transfer, 96.3 KB of data
76 1027 files to transfer, 96.3 KB of data
72 starting 4 threads for background file closing
77 starting 4 threads for background file closing
73 transferred 96.3 KB in * seconds (*/sec) (glob)
78 transferred 96.3 KB in * seconds (*/sec) (glob)
74 query 1; heads
79 query 1; heads
75 sending batch command
80 sending batch command
76 searching for changes
81 searching for changes
77 all remote heads known locally
82 all remote heads known locally
78 no changes found
83 no changes found
79 sending getbundle command
84 sending getbundle command
80 bundle2-input-bundle: with-transaction
85 bundle2-input-bundle: with-transaction
81 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
86 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
82 bundle2-input-part: "phase-heads" supported
87 bundle2-input-part: "phase-heads" supported
83 bundle2-input-part: total payload size 24
88 bundle2-input-part: total payload size 24
84 bundle2-input-bundle: 1 parts total
89 bundle2-input-bundle: 1 parts total
85 checking for updated bookmarks
90 checking for updated bookmarks
86 #endif
91 #endif
87 #if stream-bundle2
92 #if stream-bundle2
88 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
93 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
89 using http://localhost:$HGPORT/
94 using http://localhost:$HGPORT/
90 sending capabilities command
95 sending capabilities command
91 query 1; heads
96 query 1; heads
92 sending batch command
97 sending batch command
93 streaming all changes
98 streaming all changes
94 sending getbundle command
99 sending getbundle command
95 bundle2-input-bundle: with-transaction
100 bundle2-input-bundle: with-transaction
96 bundle2-input-part: "stream" (params: 4 mandatory) supported
101 bundle2-input-part: "stream" (params: 4 mandatory) supported
97 applying stream bundle
102 applying stream bundle
98 1027 files to transfer, 96.3 KB of data
103 1030 files to transfer, 96.4 KB of data
104 starting 4 threads for background file closing
99 starting 4 threads for background file closing
105 starting 4 threads for background file closing
100 transferred 96.3 KB in * seconds (* */sec) (glob)
106 transferred 96.4 KB in * seconds (* */sec) (glob)
101 bundle2-input-part: total payload size 110887
107 bundle2-input-part: total payload size 112077
102 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
108 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
103 bundle2-input-bundle: 1 parts total
109 bundle2-input-bundle: 1 parts total
104 checking for updated bookmarks
110 checking for updated bookmarks
105 #endif
111 #endif
106
112
107 Cannot stream clone when there are secret changesets
113 Cannot stream clone when there are secret changesets
108
114
109 $ hg -R server phase --force --secret -r tip
115 $ hg -R server phase --force --secret -r tip
110 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
116 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
111 warning: stream clone requested but server has them disabled
117 warning: stream clone requested but server has them disabled
112 requesting all changes
118 requesting all changes
113 adding changesets
119 adding changesets
114 adding manifests
120 adding manifests
115 adding file changes
121 adding file changes
116 added 1 changesets with 1 changes to 1 files
122 added 1 changesets with 1 changes to 1 files
117 new changesets 96ee1d7354c4
123 new changesets 96ee1d7354c4
118
124
119 $ killdaemons.py
125 $ killdaemons.py
120
126
121 Streaming of secrets can be overridden by server config
127 Streaming of secrets can be overridden by server config
122
128
123 $ cd server
129 $ cd server
124 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
130 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
125 $ cat hg.pid > $DAEMON_PIDS
131 $ cat hg.pid > $DAEMON_PIDS
126 $ cd ..
132 $ cd ..
127
133
128 #if stream-legacy
134 #if stream-legacy
129 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
135 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
130 streaming all changes
136 streaming all changes
131 1027 files to transfer, 96.3 KB of data
137 1027 files to transfer, 96.3 KB of data
132 transferred 96.3 KB in * seconds (*/sec) (glob)
138 transferred 96.3 KB in * seconds (*/sec) (glob)
133 searching for changes
139 searching for changes
134 no changes found
140 no changes found
135 #endif
141 #endif
136 #if stream-bundle2
142 #if stream-bundle2
137 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
143 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
138 streaming all changes
144 streaming all changes
139 1027 files to transfer, 96.3 KB of data
145 1030 files to transfer, 96.4 KB of data
140 transferred 96.3 KB in * seconds (* */sec) (glob)
146 transferred 96.4 KB in * seconds (* */sec) (glob)
141 #endif
147 #endif
142
148
143 $ killdaemons.py
149 $ killdaemons.py
144
150
145 Verify interaction between preferuncompressed and secret presence
151 Verify interaction between preferuncompressed and secret presence
146
152
147 $ cd server
153 $ cd server
148 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
154 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
149 $ cat hg.pid > $DAEMON_PIDS
155 $ cat hg.pid > $DAEMON_PIDS
150 $ cd ..
156 $ cd ..
151
157
152 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
158 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
153 requesting all changes
159 requesting all changes
154 adding changesets
160 adding changesets
155 adding manifests
161 adding manifests
156 adding file changes
162 adding file changes
157 added 1 changesets with 1 changes to 1 files
163 added 1 changesets with 1 changes to 1 files
158 new changesets 96ee1d7354c4
164 new changesets 96ee1d7354c4
159
165
160 $ killdaemons.py
166 $ killdaemons.py
161
167
162 Clone not allowed when full bundles disabled and can't serve secrets
168 Clone not allowed when full bundles disabled and can't serve secrets
163
169
164 $ cd server
170 $ cd server
165 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
171 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
166 $ cat hg.pid > $DAEMON_PIDS
172 $ cat hg.pid > $DAEMON_PIDS
167 $ cd ..
173 $ cd ..
168
174
169 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
175 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
170 warning: stream clone requested but server has them disabled
176 warning: stream clone requested but server has them disabled
171 requesting all changes
177 requesting all changes
172 remote: abort: server has pull-based clones disabled
178 remote: abort: server has pull-based clones disabled
173 abort: pull failed on remote
179 abort: pull failed on remote
174 (remove --pull if specified or upgrade Mercurial)
180 (remove --pull if specified or upgrade Mercurial)
175 [255]
181 [255]
176
182
177 Local stream clone with secrets involved
183 Local stream clone with secrets involved
178 (This is just a test over behavior: if you have access to the repo's files,
184 (This is just a test over behavior: if you have access to the repo's files,
179 there is no security so it isn't important to prevent a clone here.)
185 there is no security so it isn't important to prevent a clone here.)
180
186
181 $ hg clone -U --stream server local-secret
187 $ hg clone -U --stream server local-secret
182 warning: stream clone requested but server has them disabled
188 warning: stream clone requested but server has them disabled
183 requesting all changes
189 requesting all changes
184 adding changesets
190 adding changesets
185 adding manifests
191 adding manifests
186 adding file changes
192 adding file changes
187 added 1 changesets with 1 changes to 1 files
193 added 1 changesets with 1 changes to 1 files
188 new changesets 96ee1d7354c4
194 new changesets 96ee1d7354c4
189
195
190 Stream clone while repo is changing:
196 Stream clone while repo is changing:
191
197
192 $ mkdir changing
198 $ mkdir changing
193 $ cd changing
199 $ cd changing
194
200
195 extension for delaying the server process so we reliably can modify the repo
201 extension for delaying the server process so we reliably can modify the repo
196 while cloning
202 while cloning
197
203
198 $ cat > delayer.py <<EOF
204 $ cat > delayer.py <<EOF
199 > import time
205 > import time
200 > from mercurial import extensions, vfs
206 > from mercurial import extensions, vfs
201 > def __call__(orig, self, path, *args, **kwargs):
207 > def __call__(orig, self, path, *args, **kwargs):
202 > if path == 'data/f1.i':
208 > if path == 'data/f1.i':
203 > time.sleep(2)
209 > time.sleep(2)
204 > return orig(self, path, *args, **kwargs)
210 > return orig(self, path, *args, **kwargs)
205 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
211 > extensions.wrapfunction(vfs.vfs, '__call__', __call__)
206 > EOF
212 > EOF
207
213
208 prepare repo with small and big file to cover both code paths in emitrevlogdata
214 prepare repo with small and big file to cover both code paths in emitrevlogdata
209
215
210 $ hg init repo
216 $ hg init repo
211 $ touch repo/f1
217 $ touch repo/f1
212 $ $TESTDIR/seq.py 50000 > repo/f2
218 $ $TESTDIR/seq.py 50000 > repo/f2
213 $ hg -R repo ci -Aqm "0"
219 $ hg -R repo ci -Aqm "0"
214 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
220 $ hg serve -R repo -p $HGPORT1 -d --pid-file=hg.pid --config extensions.delayer=delayer.py
215 $ cat hg.pid >> $DAEMON_PIDS
221 $ cat hg.pid >> $DAEMON_PIDS
216
222
217 clone while modifying the repo between stating file with write lock and
223 clone while modifying the repo between stating file with write lock and
218 actually serving file content
224 actually serving file content
219
225
220 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
226 $ hg clone -q --stream -U http://localhost:$HGPORT1 clone &
221 $ sleep 1
227 $ sleep 1
222 $ echo >> repo/f1
228 $ echo >> repo/f1
223 $ echo >> repo/f2
229 $ echo >> repo/f2
224 $ hg -R repo ci -m "1"
230 $ hg -R repo ci -m "1"
225 $ wait
231 $ wait
226 $ hg -R clone id
232 $ hg -R clone id
227 000000000000
233 000000000000
228 $ cd ..
234 $ cd ..
229
235
230 Stream repository with bookmarks
236 Stream repository with bookmarks
231 --------------------------------
237 --------------------------------
232
238
233 (revert introduction of secret changeset)
239 (revert introduction of secret changeset)
234
240
235 $ hg -R server phase --draft 'secret()'
241 $ hg -R server phase --draft 'secret()'
236
242
237 add a bookmark
243 add a bookmark
238
244
239 $ hg -R server bookmark -r tip some-bookmark
245 $ hg -R server bookmark -r tip some-bookmark
240
246
241 clone it
247 clone it
242
248
243 #if stream-legacy
249 #if stream-legacy
244 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
250 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
245 streaming all changes
251 streaming all changes
246 1027 files to transfer, 96.3 KB of data
252 1027 files to transfer, 96.3 KB of data
247 transferred 96.3 KB in * seconds (*) (glob)
253 transferred 96.3 KB in * seconds (*) (glob)
248 searching for changes
254 searching for changes
249 no changes found
255 no changes found
250 updating to branch default
256 updating to branch default
251 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
257 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
252 #endif
258 #endif
253 #if stream-bundle2
259 #if stream-bundle2
254 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
260 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
255 streaming all changes
261 streaming all changes
256 1027 files to transfer, 96.3 KB of data
262 1033 files to transfer, 96.6 KB of data
257 transferred 96.3 KB in * seconds (* */sec) (glob)
263 transferred 96.6 KB in * seconds (* */sec) (glob)
258 updating to branch default
264 updating to branch default
259 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
265 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
260 #endif
266 #endif
261 $ hg -R with-bookmarks bookmarks
267 $ hg -R with-bookmarks bookmarks
262 some-bookmark 1:c17445101a72
268 some-bookmark 1:c17445101a72
263
269
264 Stream repository with phases
270 Stream repository with phases
265 -----------------------------
271 -----------------------------
266
272
267 Clone as publishing
273 Clone as publishing
268
274
269 $ hg -R server phase -r 'all()'
275 $ hg -R server phase -r 'all()'
270 0: draft
276 0: draft
271 1: draft
277 1: draft
272
278
273 #if stream-legacy
279 #if stream-legacy
274 $ hg clone --stream http://localhost:$HGPORT phase-publish
280 $ hg clone --stream http://localhost:$HGPORT phase-publish
275 streaming all changes
281 streaming all changes
276 1027 files to transfer, 96.3 KB of data
282 1027 files to transfer, 96.3 KB of data
277 transferred 96.3 KB in * seconds (*) (glob)
283 transferred 96.3 KB in * seconds (*) (glob)
278 searching for changes
284 searching for changes
279 no changes found
285 no changes found
280 updating to branch default
286 updating to branch default
281 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
287 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
282 #endif
288 #endif
283 #if stream-bundle2
289 #if stream-bundle2
284 $ hg clone --stream http://localhost:$HGPORT phase-publish
290 $ hg clone --stream http://localhost:$HGPORT phase-publish
285 streaming all changes
291 streaming all changes
286 1027 files to transfer, 96.3 KB of data
292 1033 files to transfer, 96.6 KB of data
287 transferred 96.3 KB in * seconds (* */sec) (glob)
293 transferred 96.6 KB in * seconds (* */sec) (glob)
288 updating to branch default
294 updating to branch default
289 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
295 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
290 #endif
296 #endif
291 $ hg -R phase-publish phase -r 'all()'
297 $ hg -R phase-publish phase -r 'all()'
292 0: public
298 0: public
293 1: public
299 1: public
294
300
295 Clone as non publishing
301 Clone as non publishing
296
302
297 $ cat << EOF >> server/.hg/hgrc
303 $ cat << EOF >> server/.hg/hgrc
298 > [phases]
304 > [phases]
299 > publish = False
305 > publish = False
300 > EOF
306 > EOF
301 $ killdaemons.py
307 $ killdaemons.py
302 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
308 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
303 $ cat hg.pid >> $DAEMON_PIDS
309 $ cat hg.pid >> $DAEMON_PIDS
304
310
305 #if stream-legacy
311 #if stream-legacy
306 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
312 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
307 streaming all changes
313 streaming all changes
308 1027 files to transfer, 96.3 KB of data
314 1027 files to transfer, 96.3 KB of data
309 transferred 96.3 KB in * seconds (*) (glob)
315 transferred 96.3 KB in * seconds (*) (glob)
310 searching for changes
316 searching for changes
311 no changes found
317 no changes found
312 updating to branch default
318 updating to branch default
313 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
319 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
314 $ hg -R phase-no-publish phase -r 'all()'
320 $ hg -R phase-no-publish phase -r 'all()'
315 0: public
321 0: public
316 1: public
322 1: public
317 #endif
323 #endif
318 #if stream-bundle2
324 #if stream-bundle2
319 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
325 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
320 streaming all changes
326 streaming all changes
321 1028 files to transfer, 96.4 KB of data
327 1034 files to transfer, 96.7 KB of data
322 transferred 96.4 KB in * seconds (* */sec) (glob)
328 transferred 96.7 KB in * seconds (* */sec) (glob)
323 updating to branch default
329 updating to branch default
324 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
330 1025 files updated, 0 files merged, 0 files removed, 0 files unresolved
325 $ hg -R phase-no-publish phase -r 'all()'
331 $ hg -R phase-no-publish phase -r 'all()'
326 0: draft
332 0: draft
327 1: draft
333 1: draft
328 #endif
334 #endif
329
335
330 $ killdaemons.py
336 $ killdaemons.py
General Comments 0
You need to be logged in to leave comments. Login now