##// END OF EJS Templates
configitems: register the 'server.uncompressedallowsecret' config
marmoute -
r33223:b045344f default
parent child Browse files
Show More
@@ -1,145 +1,148 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11
12 12 from . import (
13 13 error,
14 14 )
15 15
16 16 def loadconfigtable(ui, extname, configtable):
17 17 """update config item known to the ui with the extension ones"""
18 18 for section, items in configtable.items():
19 19 knownitems = ui._knownconfig.setdefault(section, {})
20 20 knownkeys = set(knownitems)
21 21 newkeys = set(items)
22 22 for key in sorted(knownkeys & newkeys):
23 23 msg = "extension '%s' overwrite config item '%s.%s'"
24 24 msg %= (extname, section, key)
25 25 ui.develwarn(msg, config='warn-config')
26 26
27 27 knownitems.update(items)
28 28
29 29 class configitem(object):
30 30 """represent a known config item
31 31
32 32 :section: the official config section where to find this item,
33 33 :name: the official name within the section,
34 34 :default: default value for this item,
35 35 """
36 36
37 37 def __init__(self, section, name, default=None):
38 38 self.section = section
39 39 self.name = name
40 40 self.default = default
41 41
42 42 coreitems = {}
43 43
44 44 def _register(configtable, *args, **kwargs):
45 45 item = configitem(*args, **kwargs)
46 46 section = configtable.setdefault(item.section, {})
47 47 if item.name in section:
48 48 msg = "duplicated config item registration for '%s.%s'"
49 49 raise error.ProgrammingError(msg % (item.section, item.name))
50 50 section[item.name] = item
51 51
52 52 # Registering actual config items
53 53
54 54 def getitemregister(configtable):
55 55 return functools.partial(_register, configtable)
56 56
57 57 coreconfigitem = getitemregister(coreitems)
58 58
59 59 coreconfigitem('auth', 'cookiefile',
60 60 default=None,
61 61 )
62 62 # bookmarks.pushing: internal hack for discovery
63 63 coreconfigitem('bookmarks', 'pushing',
64 64 default=list,
65 65 )
66 66 # bundle.mainreporoot: internal hack for bundlerepo
67 67 coreconfigitem('bundle', 'mainreporoot',
68 68 default='',
69 69 )
70 70 # bundle.reorder: experimental config
71 71 coreconfigitem('bundle', 'reorder',
72 72 default='auto',
73 73 )
74 74 coreconfigitem('color', 'mode',
75 75 default='auto',
76 76 )
77 77 coreconfigitem('devel', 'all-warnings',
78 78 default=False,
79 79 )
80 80 coreconfigitem('devel', 'bundle2.debug',
81 81 default=False,
82 82 )
83 83 coreconfigitem('devel', 'check-locks',
84 84 default=False,
85 85 )
86 86 coreconfigitem('devel', 'check-relroot',
87 87 default=False,
88 88 )
89 89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 90 default=False,
91 91 )
92 92 coreconfigitem('devel', 'legacy.exchange',
93 93 default=list,
94 94 )
95 95 coreconfigitem('devel', 'servercafile',
96 96 default='',
97 97 )
98 98 coreconfigitem('devel', 'serverexactprotocol',
99 99 default='',
100 100 )
101 101 coreconfigitem('devel', 'serverrequirecert',
102 102 default=False,
103 103 )
104 104 coreconfigitem('devel', 'strip-obsmarkers',
105 105 default=True,
106 106 )
107 107 coreconfigitem('hostsecurity', 'ciphers',
108 108 default=None,
109 109 )
110 110 coreconfigitem('hostsecurity', 'disabletls10warning',
111 111 default=False,
112 112 )
113 113 coreconfigitem('patch', 'fuzz',
114 114 default=2,
115 115 )
116 116 coreconfigitem('server', 'bundle1',
117 117 default=True,
118 118 )
119 119 coreconfigitem('server', 'bundle1gd',
120 120 default=None,
121 121 )
122 122 coreconfigitem('server', 'compressionengines',
123 123 default=list,
124 124 )
125 125 coreconfigitem('server', 'concurrent-push-mode',
126 126 default='strict',
127 127 )
128 128 coreconfigitem('server', 'disablefullbundle',
129 129 default=False,
130 130 )
131 131 coreconfigitem('server', 'maxhttpheaderlen',
132 132 default=1024,
133 133 )
134 134 coreconfigitem('server', 'preferuncompressed',
135 135 default=False,
136 136 )
137 coreconfigitem('server', 'uncompressedallowsecret',
138 default=False,
139 )
137 140 coreconfigitem('ui', 'clonebundleprefers',
138 141 default=list,
139 142 )
140 143 coreconfigitem('ui', 'interactive',
141 144 default=None,
142 145 )
143 146 coreconfigitem('ui', 'quiet',
144 147 default=False,
145 148 )
@@ -1,417 +1,417 b''
1 1 # streamclone.py - producing and consuming streaming repository data
2 2 #
3 3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11
12 12 from .i18n import _
13 13 from . import (
14 14 branchmap,
15 15 error,
16 16 phases,
17 17 store,
18 18 util,
19 19 )
20 20
21 21 def canperformstreamclone(pullop, bailifbundle2supported=False):
22 22 """Whether it is possible to perform a streaming clone as part of pull.
23 23
24 24 ``bailifbundle2supported`` will cause the function to return False if
25 25 bundle2 stream clones are supported. It should only be called by the
26 26 legacy stream clone code path.
27 27
28 28 Returns a tuple of (supported, requirements). ``supported`` is True if
29 29 streaming clone is supported and False otherwise. ``requirements`` is
30 30 a set of repo requirements from the remote, or ``None`` if stream clone
31 31 isn't supported.
32 32 """
33 33 repo = pullop.repo
34 34 remote = pullop.remote
35 35
36 36 bundle2supported = False
37 37 if pullop.canusebundle2:
38 38 if 'v1' in pullop.remotebundle2caps.get('stream', []):
39 39 bundle2supported = True
40 40 # else
41 41 # Server doesn't support bundle2 stream clone or doesn't support
42 42 # the versions we support. Fall back and possibly allow legacy.
43 43
44 44 # Ensures legacy code path uses available bundle2.
45 45 if bailifbundle2supported and bundle2supported:
46 46 return False, None
47 47 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
48 48 #elif not bailifbundle2supported and not bundle2supported:
49 49 # return False, None
50 50
51 51 # Streaming clone only works on empty repositories.
52 52 if len(repo):
53 53 return False, None
54 54
55 55 # Streaming clone only works if all data is being requested.
56 56 if pullop.heads:
57 57 return False, None
58 58
59 59 streamrequested = pullop.streamclonerequested
60 60
61 61 # If we don't have a preference, let the server decide for us. This
62 62 # likely only comes into play in LANs.
63 63 if streamrequested is None:
64 64 # The server can advertise whether to prefer streaming clone.
65 65 streamrequested = remote.capable('stream-preferred')
66 66
67 67 if not streamrequested:
68 68 return False, None
69 69
70 70 # In order for stream clone to work, the client has to support all the
71 71 # requirements advertised by the server.
72 72 #
73 73 # The server advertises its requirements via the "stream" and "streamreqs"
74 74 # capability. "stream" (a value-less capability) is advertised if and only
75 75 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
76 76 # is advertised and contains a comma-delimited list of requirements.
77 77 requirements = set()
78 78 if remote.capable('stream'):
79 79 requirements.add('revlogv1')
80 80 else:
81 81 streamreqs = remote.capable('streamreqs')
82 82 # This is weird and shouldn't happen with modern servers.
83 83 if not streamreqs:
84 84 pullop.repo.ui.warn(_(
85 85 'warning: stream clone requested but server has them '
86 86 'disabled\n'))
87 87 return False, None
88 88
89 89 streamreqs = set(streamreqs.split(','))
90 90 # Server requires something we don't support. Bail.
91 91 missingreqs = streamreqs - repo.supportedformats
92 92 if missingreqs:
93 93 pullop.repo.ui.warn(_(
94 94 'warning: stream clone requested but client is missing '
95 95 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
96 96 pullop.repo.ui.warn(
97 97 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
98 98 'for more information)\n'))
99 99 return False, None
100 100 requirements = streamreqs
101 101
102 102 return True, requirements
103 103
104 104 def maybeperformlegacystreamclone(pullop):
105 105 """Possibly perform a legacy stream clone operation.
106 106
107 107 Legacy stream clones are performed as part of pull but before all other
108 108 operations.
109 109
110 110 A legacy stream clone will not be performed if a bundle2 stream clone is
111 111 supported.
112 112 """
113 113 supported, requirements = canperformstreamclone(pullop)
114 114
115 115 if not supported:
116 116 return
117 117
118 118 repo = pullop.repo
119 119 remote = pullop.remote
120 120
121 121 # Save remote branchmap. We will use it later to speed up branchcache
122 122 # creation.
123 123 rbranchmap = None
124 124 if remote.capable('branchmap'):
125 125 rbranchmap = remote.branchmap()
126 126
127 127 repo.ui.status(_('streaming all changes\n'))
128 128
129 129 fp = remote.stream_out()
130 130 l = fp.readline()
131 131 try:
132 132 resp = int(l)
133 133 except ValueError:
134 134 raise error.ResponseError(
135 135 _('unexpected response from remote server:'), l)
136 136 if resp == 1:
137 137 raise error.Abort(_('operation forbidden by server'))
138 138 elif resp == 2:
139 139 raise error.Abort(_('locking the remote repository failed'))
140 140 elif resp != 0:
141 141 raise error.Abort(_('the server sent an unknown error code'))
142 142
143 143 l = fp.readline()
144 144 try:
145 145 filecount, bytecount = map(int, l.split(' ', 1))
146 146 except (ValueError, TypeError):
147 147 raise error.ResponseError(
148 148 _('unexpected response from remote server:'), l)
149 149
150 150 with repo.lock():
151 151 consumev1(repo, fp, filecount, bytecount)
152 152
153 153 # new requirements = old non-format requirements +
154 154 # new format-related remote requirements
155 155 # requirements from the streamed-in repository
156 156 repo.requirements = requirements | (
157 157 repo.requirements - repo.supportedformats)
158 158 repo._applyopenerreqs()
159 159 repo._writerequirements()
160 160
161 161 if rbranchmap:
162 162 branchmap.replacecache(repo, rbranchmap)
163 163
164 164 repo.invalidate()
165 165
166 166 def allowservergeneration(repo):
167 167 """Whether streaming clones are allowed from the server."""
168 168 if not repo.ui.configbool('server', 'uncompressed', True, untrusted=True):
169 169 return False
170 170
171 171 # The way stream clone works makes it impossible to hide secret changesets.
172 172 # So don't allow this by default.
173 173 secret = phases.hassecret(repo)
174 174 if secret:
175 return repo.ui.configbool('server', 'uncompressedallowsecret', False)
175 return repo.ui.configbool('server', 'uncompressedallowsecret')
176 176
177 177 return True
178 178
179 179 # This is it's own function so extensions can override it.
180 180 def _walkstreamfiles(repo):
181 181 return repo.store.walk()
182 182
183 183 def generatev1(repo):
184 184 """Emit content for version 1 of a streaming clone.
185 185
186 186 This returns a 3-tuple of (file count, byte size, data iterator).
187 187
188 188 The data iterator consists of N entries for each file being transferred.
189 189 Each file entry starts as a line with the file name and integer size
190 190 delimited by a null byte.
191 191
192 192 The raw file data follows. Following the raw file data is the next file
193 193 entry, or EOF.
194 194
195 195 When used on the wire protocol, an additional line indicating protocol
196 196 success will be prepended to the stream. This function is not responsible
197 197 for adding it.
198 198
199 199 This function will obtain a repository lock to ensure a consistent view of
200 200 the store is captured. It therefore may raise LockError.
201 201 """
202 202 entries = []
203 203 total_bytes = 0
204 204 # Get consistent snapshot of repo, lock during scan.
205 205 with repo.lock():
206 206 repo.ui.debug('scanning\n')
207 207 for name, ename, size in _walkstreamfiles(repo):
208 208 if size:
209 209 entries.append((name, size))
210 210 total_bytes += size
211 211
212 212 repo.ui.debug('%d files, %d bytes to transfer\n' %
213 213 (len(entries), total_bytes))
214 214
215 215 svfs = repo.svfs
216 216 oldaudit = svfs.mustaudit
217 217 debugflag = repo.ui.debugflag
218 218 svfs.mustaudit = False
219 219
220 220 def emitrevlogdata():
221 221 try:
222 222 for name, size in entries:
223 223 if debugflag:
224 224 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
225 225 # partially encode name over the wire for backwards compat
226 226 yield '%s\0%d\n' % (store.encodedir(name), size)
227 227 if size <= 65536:
228 228 with svfs(name, 'rb') as fp:
229 229 yield fp.read(size)
230 230 else:
231 231 for chunk in util.filechunkiter(svfs(name), limit=size):
232 232 yield chunk
233 233 finally:
234 234 svfs.mustaudit = oldaudit
235 235
236 236 return len(entries), total_bytes, emitrevlogdata()
237 237
238 238 def generatev1wireproto(repo):
239 239 """Emit content for version 1 of streaming clone suitable for the wire.
240 240
241 241 This is the data output from ``generatev1()`` with a header line
242 242 indicating file count and byte size.
243 243 """
244 244 filecount, bytecount, it = generatev1(repo)
245 245 yield '%d %d\n' % (filecount, bytecount)
246 246 for chunk in it:
247 247 yield chunk
248 248
249 249 def generatebundlev1(repo, compression='UN'):
250 250 """Emit content for version 1 of a stream clone bundle.
251 251
252 252 The first 4 bytes of the output ("HGS1") denote this as stream clone
253 253 bundle version 1.
254 254
255 255 The next 2 bytes indicate the compression type. Only "UN" is currently
256 256 supported.
257 257
258 258 The next 16 bytes are two 64-bit big endian unsigned integers indicating
259 259 file count and byte count, respectively.
260 260
261 261 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
262 262 of the requirements string, including a trailing \0. The following N bytes
263 263 are the requirements string, which is ASCII containing a comma-delimited
264 264 list of repo requirements that are needed to support the data.
265 265
266 266 The remaining content is the output of ``generatev1()`` (which may be
267 267 compressed in the future).
268 268
269 269 Returns a tuple of (requirements, data generator).
270 270 """
271 271 if compression != 'UN':
272 272 raise ValueError('we do not support the compression argument yet')
273 273
274 274 requirements = repo.requirements & repo.supportedformats
275 275 requires = ','.join(sorted(requirements))
276 276
277 277 def gen():
278 278 yield 'HGS1'
279 279 yield compression
280 280
281 281 filecount, bytecount, it = generatev1(repo)
282 282 repo.ui.status(_('writing %d bytes for %d files\n') %
283 283 (bytecount, filecount))
284 284
285 285 yield struct.pack('>QQ', filecount, bytecount)
286 286 yield struct.pack('>H', len(requires) + 1)
287 287 yield requires + '\0'
288 288
289 289 # This is where we'll add compression in the future.
290 290 assert compression == 'UN'
291 291
292 292 seen = 0
293 293 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
294 294
295 295 for chunk in it:
296 296 seen += len(chunk)
297 297 repo.ui.progress(_('bundle'), seen, total=bytecount,
298 298 unit=_('bytes'))
299 299 yield chunk
300 300
301 301 repo.ui.progress(_('bundle'), None)
302 302
303 303 return requirements, gen()
304 304
305 305 def consumev1(repo, fp, filecount, bytecount):
306 306 """Apply the contents from version 1 of a streaming clone file handle.
307 307
308 308 This takes the output from "stream_out" and applies it to the specified
309 309 repository.
310 310
311 311 Like "stream_out," the status line added by the wire protocol is not
312 312 handled by this function.
313 313 """
314 314 with repo.lock():
315 315 repo.ui.status(_('%d files to transfer, %s of data\n') %
316 316 (filecount, util.bytecount(bytecount)))
317 317 handled_bytes = 0
318 318 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
319 319 start = util.timer()
320 320
321 321 # TODO: get rid of (potential) inconsistency
322 322 #
323 323 # If transaction is started and any @filecache property is
324 324 # changed at this point, it causes inconsistency between
325 325 # in-memory cached property and streamclone-ed file on the
326 326 # disk. Nested transaction prevents transaction scope "clone"
327 327 # below from writing in-memory changes out at the end of it,
328 328 # even though in-memory changes are discarded at the end of it
329 329 # regardless of transaction nesting.
330 330 #
331 331 # But transaction nesting can't be simply prohibited, because
332 332 # nesting occurs also in ordinary case (e.g. enabling
333 333 # clonebundles).
334 334
335 335 with repo.transaction('clone'):
336 336 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
337 337 for i in xrange(filecount):
338 338 # XXX doesn't support '\n' or '\r' in filenames
339 339 l = fp.readline()
340 340 try:
341 341 name, size = l.split('\0', 1)
342 342 size = int(size)
343 343 except (ValueError, TypeError):
344 344 raise error.ResponseError(
345 345 _('unexpected response from remote server:'), l)
346 346 if repo.ui.debugflag:
347 347 repo.ui.debug('adding %s (%s)\n' %
348 348 (name, util.bytecount(size)))
349 349 # for backwards compat, name was partially encoded
350 350 path = store.decodedir(name)
351 351 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
352 352 for chunk in util.filechunkiter(fp, limit=size):
353 353 handled_bytes += len(chunk)
354 354 repo.ui.progress(_('clone'), handled_bytes,
355 355 total=bytecount, unit=_('bytes'))
356 356 ofp.write(chunk)
357 357
358 358 # force @filecache properties to be reloaded from
359 359 # streamclone-ed file at next access
360 360 repo.invalidate(clearfilecache=True)
361 361
362 362 elapsed = util.timer() - start
363 363 if elapsed <= 0:
364 364 elapsed = 0.001
365 365 repo.ui.progress(_('clone'), None)
366 366 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
367 367 (util.bytecount(bytecount), elapsed,
368 368 util.bytecount(bytecount / elapsed)))
369 369
370 370 def readbundle1header(fp):
371 371 compression = fp.read(2)
372 372 if compression != 'UN':
373 373 raise error.Abort(_('only uncompressed stream clone bundles are '
374 374 'supported; got %s') % compression)
375 375
376 376 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
377 377 requireslen = struct.unpack('>H', fp.read(2))[0]
378 378 requires = fp.read(requireslen)
379 379
380 380 if not requires.endswith('\0'):
381 381 raise error.Abort(_('malformed stream clone bundle: '
382 382 'requirements not properly encoded'))
383 383
384 384 requirements = set(requires.rstrip('\0').split(','))
385 385
386 386 return filecount, bytecount, requirements
387 387
388 388 def applybundlev1(repo, fp):
389 389 """Apply the content from a stream clone bundle version 1.
390 390
391 391 We assume the 4 byte header has been read and validated and the file handle
392 392 is at the 2 byte compression identifier.
393 393 """
394 394 if len(repo):
395 395 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
396 396 'repo'))
397 397
398 398 filecount, bytecount, requirements = readbundle1header(fp)
399 399 missingreqs = requirements - repo.supportedformats
400 400 if missingreqs:
401 401 raise error.Abort(_('unable to apply stream clone: '
402 402 'unsupported format: %s') %
403 403 ', '.join(sorted(missingreqs)))
404 404
405 405 consumev1(repo, fp, filecount, bytecount)
406 406
407 407 class streamcloneapplier(object):
408 408 """Class to manage applying streaming clone bundles.
409 409
410 410 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
411 411 readers to perform bundle type-specific functionality.
412 412 """
413 413 def __init__(self, fh):
414 414 self._fh = fh
415 415
416 416 def apply(self, repo):
417 417 return applybundlev1(repo, self._fh)
General Comments 0
You need to be logged in to leave comments. Login now