##// END OF EJS Templates
configitems: register the 'server.uncompressedallowsecret' config
marmoute -
r33223:b045344f default
parent child Browse files
Show More
@@ -1,145 +1,148 b''
1 # configitems.py - centralized declaration of configuration option
1 # configitems.py - centralized declaration of configuration option
2 #
2 #
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import functools
10 import functools
11
11
12 from . import (
12 from . import (
13 error,
13 error,
14 )
14 )
15
15
16 def loadconfigtable(ui, extname, configtable):
16 def loadconfigtable(ui, extname, configtable):
17 """update config item known to the ui with the extension ones"""
17 """update config item known to the ui with the extension ones"""
18 for section, items in configtable.items():
18 for section, items in configtable.items():
19 knownitems = ui._knownconfig.setdefault(section, {})
19 knownitems = ui._knownconfig.setdefault(section, {})
20 knownkeys = set(knownitems)
20 knownkeys = set(knownitems)
21 newkeys = set(items)
21 newkeys = set(items)
22 for key in sorted(knownkeys & newkeys):
22 for key in sorted(knownkeys & newkeys):
23 msg = "extension '%s' overwrite config item '%s.%s'"
23 msg = "extension '%s' overwrite config item '%s.%s'"
24 msg %= (extname, section, key)
24 msg %= (extname, section, key)
25 ui.develwarn(msg, config='warn-config')
25 ui.develwarn(msg, config='warn-config')
26
26
27 knownitems.update(items)
27 knownitems.update(items)
28
28
29 class configitem(object):
29 class configitem(object):
30 """represent a known config item
30 """represent a known config item
31
31
32 :section: the official config section where to find this item,
32 :section: the official config section where to find this item,
33 :name: the official name within the section,
33 :name: the official name within the section,
34 :default: default value for this item,
34 :default: default value for this item,
35 """
35 """
36
36
37 def __init__(self, section, name, default=None):
37 def __init__(self, section, name, default=None):
38 self.section = section
38 self.section = section
39 self.name = name
39 self.name = name
40 self.default = default
40 self.default = default
41
41
42 coreitems = {}
42 coreitems = {}
43
43
44 def _register(configtable, *args, **kwargs):
44 def _register(configtable, *args, **kwargs):
45 item = configitem(*args, **kwargs)
45 item = configitem(*args, **kwargs)
46 section = configtable.setdefault(item.section, {})
46 section = configtable.setdefault(item.section, {})
47 if item.name in section:
47 if item.name in section:
48 msg = "duplicated config item registration for '%s.%s'"
48 msg = "duplicated config item registration for '%s.%s'"
49 raise error.ProgrammingError(msg % (item.section, item.name))
49 raise error.ProgrammingError(msg % (item.section, item.name))
50 section[item.name] = item
50 section[item.name] = item
51
51
52 # Registering actual config items
52 # Registering actual config items
53
53
54 def getitemregister(configtable):
54 def getitemregister(configtable):
55 return functools.partial(_register, configtable)
55 return functools.partial(_register, configtable)
56
56
57 coreconfigitem = getitemregister(coreitems)
57 coreconfigitem = getitemregister(coreitems)
58
58
59 coreconfigitem('auth', 'cookiefile',
59 coreconfigitem('auth', 'cookiefile',
60 default=None,
60 default=None,
61 )
61 )
62 # bookmarks.pushing: internal hack for discovery
62 # bookmarks.pushing: internal hack for discovery
63 coreconfigitem('bookmarks', 'pushing',
63 coreconfigitem('bookmarks', 'pushing',
64 default=list,
64 default=list,
65 )
65 )
66 # bundle.mainreporoot: internal hack for bundlerepo
66 # bundle.mainreporoot: internal hack for bundlerepo
67 coreconfigitem('bundle', 'mainreporoot',
67 coreconfigitem('bundle', 'mainreporoot',
68 default='',
68 default='',
69 )
69 )
70 # bundle.reorder: experimental config
70 # bundle.reorder: experimental config
71 coreconfigitem('bundle', 'reorder',
71 coreconfigitem('bundle', 'reorder',
72 default='auto',
72 default='auto',
73 )
73 )
74 coreconfigitem('color', 'mode',
74 coreconfigitem('color', 'mode',
75 default='auto',
75 default='auto',
76 )
76 )
77 coreconfigitem('devel', 'all-warnings',
77 coreconfigitem('devel', 'all-warnings',
78 default=False,
78 default=False,
79 )
79 )
80 coreconfigitem('devel', 'bundle2.debug',
80 coreconfigitem('devel', 'bundle2.debug',
81 default=False,
81 default=False,
82 )
82 )
83 coreconfigitem('devel', 'check-locks',
83 coreconfigitem('devel', 'check-locks',
84 default=False,
84 default=False,
85 )
85 )
86 coreconfigitem('devel', 'check-relroot',
86 coreconfigitem('devel', 'check-relroot',
87 default=False,
87 default=False,
88 )
88 )
89 coreconfigitem('devel', 'disableloaddefaultcerts',
89 coreconfigitem('devel', 'disableloaddefaultcerts',
90 default=False,
90 default=False,
91 )
91 )
92 coreconfigitem('devel', 'legacy.exchange',
92 coreconfigitem('devel', 'legacy.exchange',
93 default=list,
93 default=list,
94 )
94 )
95 coreconfigitem('devel', 'servercafile',
95 coreconfigitem('devel', 'servercafile',
96 default='',
96 default='',
97 )
97 )
98 coreconfigitem('devel', 'serverexactprotocol',
98 coreconfigitem('devel', 'serverexactprotocol',
99 default='',
99 default='',
100 )
100 )
101 coreconfigitem('devel', 'serverrequirecert',
101 coreconfigitem('devel', 'serverrequirecert',
102 default=False,
102 default=False,
103 )
103 )
104 coreconfigitem('devel', 'strip-obsmarkers',
104 coreconfigitem('devel', 'strip-obsmarkers',
105 default=True,
105 default=True,
106 )
106 )
107 coreconfigitem('hostsecurity', 'ciphers',
107 coreconfigitem('hostsecurity', 'ciphers',
108 default=None,
108 default=None,
109 )
109 )
110 coreconfigitem('hostsecurity', 'disabletls10warning',
110 coreconfigitem('hostsecurity', 'disabletls10warning',
111 default=False,
111 default=False,
112 )
112 )
113 coreconfigitem('patch', 'fuzz',
113 coreconfigitem('patch', 'fuzz',
114 default=2,
114 default=2,
115 )
115 )
116 coreconfigitem('server', 'bundle1',
116 coreconfigitem('server', 'bundle1',
117 default=True,
117 default=True,
118 )
118 )
119 coreconfigitem('server', 'bundle1gd',
119 coreconfigitem('server', 'bundle1gd',
120 default=None,
120 default=None,
121 )
121 )
122 coreconfigitem('server', 'compressionengines',
122 coreconfigitem('server', 'compressionengines',
123 default=list,
123 default=list,
124 )
124 )
125 coreconfigitem('server', 'concurrent-push-mode',
125 coreconfigitem('server', 'concurrent-push-mode',
126 default='strict',
126 default='strict',
127 )
127 )
128 coreconfigitem('server', 'disablefullbundle',
128 coreconfigitem('server', 'disablefullbundle',
129 default=False,
129 default=False,
130 )
130 )
131 coreconfigitem('server', 'maxhttpheaderlen',
131 coreconfigitem('server', 'maxhttpheaderlen',
132 default=1024,
132 default=1024,
133 )
133 )
134 coreconfigitem('server', 'preferuncompressed',
134 coreconfigitem('server', 'preferuncompressed',
135 default=False,
135 default=False,
136 )
136 )
137 coreconfigitem('server', 'uncompressedallowsecret',
138 default=False,
139 )
137 coreconfigitem('ui', 'clonebundleprefers',
140 coreconfigitem('ui', 'clonebundleprefers',
138 default=list,
141 default=list,
139 )
142 )
140 coreconfigitem('ui', 'interactive',
143 coreconfigitem('ui', 'interactive',
141 default=None,
144 default=None,
142 )
145 )
143 coreconfigitem('ui', 'quiet',
146 coreconfigitem('ui', 'quiet',
144 default=False,
147 default=False,
145 )
148 )
@@ -1,417 +1,417 b''
1 # streamclone.py - producing and consuming streaming repository data
1 # streamclone.py - producing and consuming streaming repository data
2 #
2 #
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2015 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import struct
10 import struct
11
11
12 from .i18n import _
12 from .i18n import _
13 from . import (
13 from . import (
14 branchmap,
14 branchmap,
15 error,
15 error,
16 phases,
16 phases,
17 store,
17 store,
18 util,
18 util,
19 )
19 )
20
20
21 def canperformstreamclone(pullop, bailifbundle2supported=False):
21 def canperformstreamclone(pullop, bailifbundle2supported=False):
22 """Whether it is possible to perform a streaming clone as part of pull.
22 """Whether it is possible to perform a streaming clone as part of pull.
23
23
24 ``bailifbundle2supported`` will cause the function to return False if
24 ``bailifbundle2supported`` will cause the function to return False if
25 bundle2 stream clones are supported. It should only be called by the
25 bundle2 stream clones are supported. It should only be called by the
26 legacy stream clone code path.
26 legacy stream clone code path.
27
27
28 Returns a tuple of (supported, requirements). ``supported`` is True if
28 Returns a tuple of (supported, requirements). ``supported`` is True if
29 streaming clone is supported and False otherwise. ``requirements`` is
29 streaming clone is supported and False otherwise. ``requirements`` is
30 a set of repo requirements from the remote, or ``None`` if stream clone
30 a set of repo requirements from the remote, or ``None`` if stream clone
31 isn't supported.
31 isn't supported.
32 """
32 """
33 repo = pullop.repo
33 repo = pullop.repo
34 remote = pullop.remote
34 remote = pullop.remote
35
35
36 bundle2supported = False
36 bundle2supported = False
37 if pullop.canusebundle2:
37 if pullop.canusebundle2:
38 if 'v1' in pullop.remotebundle2caps.get('stream', []):
38 if 'v1' in pullop.remotebundle2caps.get('stream', []):
39 bundle2supported = True
39 bundle2supported = True
40 # else
40 # else
41 # Server doesn't support bundle2 stream clone or doesn't support
41 # Server doesn't support bundle2 stream clone or doesn't support
42 # the versions we support. Fall back and possibly allow legacy.
42 # the versions we support. Fall back and possibly allow legacy.
43
43
44 # Ensures legacy code path uses available bundle2.
44 # Ensures legacy code path uses available bundle2.
45 if bailifbundle2supported and bundle2supported:
45 if bailifbundle2supported and bundle2supported:
46 return False, None
46 return False, None
47 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
47 # Ensures bundle2 doesn't try to do a stream clone if it isn't supported.
48 #elif not bailifbundle2supported and not bundle2supported:
48 #elif not bailifbundle2supported and not bundle2supported:
49 # return False, None
49 # return False, None
50
50
51 # Streaming clone only works on empty repositories.
51 # Streaming clone only works on empty repositories.
52 if len(repo):
52 if len(repo):
53 return False, None
53 return False, None
54
54
55 # Streaming clone only works if all data is being requested.
55 # Streaming clone only works if all data is being requested.
56 if pullop.heads:
56 if pullop.heads:
57 return False, None
57 return False, None
58
58
59 streamrequested = pullop.streamclonerequested
59 streamrequested = pullop.streamclonerequested
60
60
61 # If we don't have a preference, let the server decide for us. This
61 # If we don't have a preference, let the server decide for us. This
62 # likely only comes into play in LANs.
62 # likely only comes into play in LANs.
63 if streamrequested is None:
63 if streamrequested is None:
64 # The server can advertise whether to prefer streaming clone.
64 # The server can advertise whether to prefer streaming clone.
65 streamrequested = remote.capable('stream-preferred')
65 streamrequested = remote.capable('stream-preferred')
66
66
67 if not streamrequested:
67 if not streamrequested:
68 return False, None
68 return False, None
69
69
70 # In order for stream clone to work, the client has to support all the
70 # In order for stream clone to work, the client has to support all the
71 # requirements advertised by the server.
71 # requirements advertised by the server.
72 #
72 #
73 # The server advertises its requirements via the "stream" and "streamreqs"
73 # The server advertises its requirements via the "stream" and "streamreqs"
74 # capability. "stream" (a value-less capability) is advertised if and only
74 # capability. "stream" (a value-less capability) is advertised if and only
75 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
75 # if the only requirement is "revlogv1." Else, the "streamreqs" capability
76 # is advertised and contains a comma-delimited list of requirements.
76 # is advertised and contains a comma-delimited list of requirements.
77 requirements = set()
77 requirements = set()
78 if remote.capable('stream'):
78 if remote.capable('stream'):
79 requirements.add('revlogv1')
79 requirements.add('revlogv1')
80 else:
80 else:
81 streamreqs = remote.capable('streamreqs')
81 streamreqs = remote.capable('streamreqs')
82 # This is weird and shouldn't happen with modern servers.
82 # This is weird and shouldn't happen with modern servers.
83 if not streamreqs:
83 if not streamreqs:
84 pullop.repo.ui.warn(_(
84 pullop.repo.ui.warn(_(
85 'warning: stream clone requested but server has them '
85 'warning: stream clone requested but server has them '
86 'disabled\n'))
86 'disabled\n'))
87 return False, None
87 return False, None
88
88
89 streamreqs = set(streamreqs.split(','))
89 streamreqs = set(streamreqs.split(','))
90 # Server requires something we don't support. Bail.
90 # Server requires something we don't support. Bail.
91 missingreqs = streamreqs - repo.supportedformats
91 missingreqs = streamreqs - repo.supportedformats
92 if missingreqs:
92 if missingreqs:
93 pullop.repo.ui.warn(_(
93 pullop.repo.ui.warn(_(
94 'warning: stream clone requested but client is missing '
94 'warning: stream clone requested but client is missing '
95 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
95 'requirements: %s\n') % ', '.join(sorted(missingreqs)))
96 pullop.repo.ui.warn(
96 pullop.repo.ui.warn(
97 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
97 _('(see https://www.mercurial-scm.org/wiki/MissingRequirement '
98 'for more information)\n'))
98 'for more information)\n'))
99 return False, None
99 return False, None
100 requirements = streamreqs
100 requirements = streamreqs
101
101
102 return True, requirements
102 return True, requirements
103
103
104 def maybeperformlegacystreamclone(pullop):
104 def maybeperformlegacystreamclone(pullop):
105 """Possibly perform a legacy stream clone operation.
105 """Possibly perform a legacy stream clone operation.
106
106
107 Legacy stream clones are performed as part of pull but before all other
107 Legacy stream clones are performed as part of pull but before all other
108 operations.
108 operations.
109
109
110 A legacy stream clone will not be performed if a bundle2 stream clone is
110 A legacy stream clone will not be performed if a bundle2 stream clone is
111 supported.
111 supported.
112 """
112 """
113 supported, requirements = canperformstreamclone(pullop)
113 supported, requirements = canperformstreamclone(pullop)
114
114
115 if not supported:
115 if not supported:
116 return
116 return
117
117
118 repo = pullop.repo
118 repo = pullop.repo
119 remote = pullop.remote
119 remote = pullop.remote
120
120
121 # Save remote branchmap. We will use it later to speed up branchcache
121 # Save remote branchmap. We will use it later to speed up branchcache
122 # creation.
122 # creation.
123 rbranchmap = None
123 rbranchmap = None
124 if remote.capable('branchmap'):
124 if remote.capable('branchmap'):
125 rbranchmap = remote.branchmap()
125 rbranchmap = remote.branchmap()
126
126
127 repo.ui.status(_('streaming all changes\n'))
127 repo.ui.status(_('streaming all changes\n'))
128
128
129 fp = remote.stream_out()
129 fp = remote.stream_out()
130 l = fp.readline()
130 l = fp.readline()
131 try:
131 try:
132 resp = int(l)
132 resp = int(l)
133 except ValueError:
133 except ValueError:
134 raise error.ResponseError(
134 raise error.ResponseError(
135 _('unexpected response from remote server:'), l)
135 _('unexpected response from remote server:'), l)
136 if resp == 1:
136 if resp == 1:
137 raise error.Abort(_('operation forbidden by server'))
137 raise error.Abort(_('operation forbidden by server'))
138 elif resp == 2:
138 elif resp == 2:
139 raise error.Abort(_('locking the remote repository failed'))
139 raise error.Abort(_('locking the remote repository failed'))
140 elif resp != 0:
140 elif resp != 0:
141 raise error.Abort(_('the server sent an unknown error code'))
141 raise error.Abort(_('the server sent an unknown error code'))
142
142
143 l = fp.readline()
143 l = fp.readline()
144 try:
144 try:
145 filecount, bytecount = map(int, l.split(' ', 1))
145 filecount, bytecount = map(int, l.split(' ', 1))
146 except (ValueError, TypeError):
146 except (ValueError, TypeError):
147 raise error.ResponseError(
147 raise error.ResponseError(
148 _('unexpected response from remote server:'), l)
148 _('unexpected response from remote server:'), l)
149
149
150 with repo.lock():
150 with repo.lock():
151 consumev1(repo, fp, filecount, bytecount)
151 consumev1(repo, fp, filecount, bytecount)
152
152
153 # new requirements = old non-format requirements +
153 # new requirements = old non-format requirements +
154 # new format-related remote requirements
154 # new format-related remote requirements
155 # requirements from the streamed-in repository
155 # requirements from the streamed-in repository
156 repo.requirements = requirements | (
156 repo.requirements = requirements | (
157 repo.requirements - repo.supportedformats)
157 repo.requirements - repo.supportedformats)
158 repo._applyopenerreqs()
158 repo._applyopenerreqs()
159 repo._writerequirements()
159 repo._writerequirements()
160
160
161 if rbranchmap:
161 if rbranchmap:
162 branchmap.replacecache(repo, rbranchmap)
162 branchmap.replacecache(repo, rbranchmap)
163
163
164 repo.invalidate()
164 repo.invalidate()
165
165
166 def allowservergeneration(repo):
166 def allowservergeneration(repo):
167 """Whether streaming clones are allowed from the server."""
167 """Whether streaming clones are allowed from the server."""
168 if not repo.ui.configbool('server', 'uncompressed', True, untrusted=True):
168 if not repo.ui.configbool('server', 'uncompressed', True, untrusted=True):
169 return False
169 return False
170
170
171 # The way stream clone works makes it impossible to hide secret changesets.
171 # The way stream clone works makes it impossible to hide secret changesets.
172 # So don't allow this by default.
172 # So don't allow this by default.
173 secret = phases.hassecret(repo)
173 secret = phases.hassecret(repo)
174 if secret:
174 if secret:
175 return repo.ui.configbool('server', 'uncompressedallowsecret', False)
175 return repo.ui.configbool('server', 'uncompressedallowsecret')
176
176
177 return True
177 return True
178
178
179 # This is it's own function so extensions can override it.
179 # This is it's own function so extensions can override it.
180 def _walkstreamfiles(repo):
180 def _walkstreamfiles(repo):
181 return repo.store.walk()
181 return repo.store.walk()
182
182
183 def generatev1(repo):
183 def generatev1(repo):
184 """Emit content for version 1 of a streaming clone.
184 """Emit content for version 1 of a streaming clone.
185
185
186 This returns a 3-tuple of (file count, byte size, data iterator).
186 This returns a 3-tuple of (file count, byte size, data iterator).
187
187
188 The data iterator consists of N entries for each file being transferred.
188 The data iterator consists of N entries for each file being transferred.
189 Each file entry starts as a line with the file name and integer size
189 Each file entry starts as a line with the file name and integer size
190 delimited by a null byte.
190 delimited by a null byte.
191
191
192 The raw file data follows. Following the raw file data is the next file
192 The raw file data follows. Following the raw file data is the next file
193 entry, or EOF.
193 entry, or EOF.
194
194
195 When used on the wire protocol, an additional line indicating protocol
195 When used on the wire protocol, an additional line indicating protocol
196 success will be prepended to the stream. This function is not responsible
196 success will be prepended to the stream. This function is not responsible
197 for adding it.
197 for adding it.
198
198
199 This function will obtain a repository lock to ensure a consistent view of
199 This function will obtain a repository lock to ensure a consistent view of
200 the store is captured. It therefore may raise LockError.
200 the store is captured. It therefore may raise LockError.
201 """
201 """
202 entries = []
202 entries = []
203 total_bytes = 0
203 total_bytes = 0
204 # Get consistent snapshot of repo, lock during scan.
204 # Get consistent snapshot of repo, lock during scan.
205 with repo.lock():
205 with repo.lock():
206 repo.ui.debug('scanning\n')
206 repo.ui.debug('scanning\n')
207 for name, ename, size in _walkstreamfiles(repo):
207 for name, ename, size in _walkstreamfiles(repo):
208 if size:
208 if size:
209 entries.append((name, size))
209 entries.append((name, size))
210 total_bytes += size
210 total_bytes += size
211
211
212 repo.ui.debug('%d files, %d bytes to transfer\n' %
212 repo.ui.debug('%d files, %d bytes to transfer\n' %
213 (len(entries), total_bytes))
213 (len(entries), total_bytes))
214
214
215 svfs = repo.svfs
215 svfs = repo.svfs
216 oldaudit = svfs.mustaudit
216 oldaudit = svfs.mustaudit
217 debugflag = repo.ui.debugflag
217 debugflag = repo.ui.debugflag
218 svfs.mustaudit = False
218 svfs.mustaudit = False
219
219
220 def emitrevlogdata():
220 def emitrevlogdata():
221 try:
221 try:
222 for name, size in entries:
222 for name, size in entries:
223 if debugflag:
223 if debugflag:
224 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
224 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
225 # partially encode name over the wire for backwards compat
225 # partially encode name over the wire for backwards compat
226 yield '%s\0%d\n' % (store.encodedir(name), size)
226 yield '%s\0%d\n' % (store.encodedir(name), size)
227 if size <= 65536:
227 if size <= 65536:
228 with svfs(name, 'rb') as fp:
228 with svfs(name, 'rb') as fp:
229 yield fp.read(size)
229 yield fp.read(size)
230 else:
230 else:
231 for chunk in util.filechunkiter(svfs(name), limit=size):
231 for chunk in util.filechunkiter(svfs(name), limit=size):
232 yield chunk
232 yield chunk
233 finally:
233 finally:
234 svfs.mustaudit = oldaudit
234 svfs.mustaudit = oldaudit
235
235
236 return len(entries), total_bytes, emitrevlogdata()
236 return len(entries), total_bytes, emitrevlogdata()
237
237
238 def generatev1wireproto(repo):
238 def generatev1wireproto(repo):
239 """Emit content for version 1 of streaming clone suitable for the wire.
239 """Emit content for version 1 of streaming clone suitable for the wire.
240
240
241 This is the data output from ``generatev1()`` with a header line
241 This is the data output from ``generatev1()`` with a header line
242 indicating file count and byte size.
242 indicating file count and byte size.
243 """
243 """
244 filecount, bytecount, it = generatev1(repo)
244 filecount, bytecount, it = generatev1(repo)
245 yield '%d %d\n' % (filecount, bytecount)
245 yield '%d %d\n' % (filecount, bytecount)
246 for chunk in it:
246 for chunk in it:
247 yield chunk
247 yield chunk
248
248
249 def generatebundlev1(repo, compression='UN'):
249 def generatebundlev1(repo, compression='UN'):
250 """Emit content for version 1 of a stream clone bundle.
250 """Emit content for version 1 of a stream clone bundle.
251
251
252 The first 4 bytes of the output ("HGS1") denote this as stream clone
252 The first 4 bytes of the output ("HGS1") denote this as stream clone
253 bundle version 1.
253 bundle version 1.
254
254
255 The next 2 bytes indicate the compression type. Only "UN" is currently
255 The next 2 bytes indicate the compression type. Only "UN" is currently
256 supported.
256 supported.
257
257
258 The next 16 bytes are two 64-bit big endian unsigned integers indicating
258 The next 16 bytes are two 64-bit big endian unsigned integers indicating
259 file count and byte count, respectively.
259 file count and byte count, respectively.
260
260
261 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
261 The next 2 bytes is a 16-bit big endian unsigned short declaring the length
262 of the requirements string, including a trailing \0. The following N bytes
262 of the requirements string, including a trailing \0. The following N bytes
263 are the requirements string, which is ASCII containing a comma-delimited
263 are the requirements string, which is ASCII containing a comma-delimited
264 list of repo requirements that are needed to support the data.
264 list of repo requirements that are needed to support the data.
265
265
266 The remaining content is the output of ``generatev1()`` (which may be
266 The remaining content is the output of ``generatev1()`` (which may be
267 compressed in the future).
267 compressed in the future).
268
268
269 Returns a tuple of (requirements, data generator).
269 Returns a tuple of (requirements, data generator).
270 """
270 """
271 if compression != 'UN':
271 if compression != 'UN':
272 raise ValueError('we do not support the compression argument yet')
272 raise ValueError('we do not support the compression argument yet')
273
273
274 requirements = repo.requirements & repo.supportedformats
274 requirements = repo.requirements & repo.supportedformats
275 requires = ','.join(sorted(requirements))
275 requires = ','.join(sorted(requirements))
276
276
277 def gen():
277 def gen():
278 yield 'HGS1'
278 yield 'HGS1'
279 yield compression
279 yield compression
280
280
281 filecount, bytecount, it = generatev1(repo)
281 filecount, bytecount, it = generatev1(repo)
282 repo.ui.status(_('writing %d bytes for %d files\n') %
282 repo.ui.status(_('writing %d bytes for %d files\n') %
283 (bytecount, filecount))
283 (bytecount, filecount))
284
284
285 yield struct.pack('>QQ', filecount, bytecount)
285 yield struct.pack('>QQ', filecount, bytecount)
286 yield struct.pack('>H', len(requires) + 1)
286 yield struct.pack('>H', len(requires) + 1)
287 yield requires + '\0'
287 yield requires + '\0'
288
288
289 # This is where we'll add compression in the future.
289 # This is where we'll add compression in the future.
290 assert compression == 'UN'
290 assert compression == 'UN'
291
291
292 seen = 0
292 seen = 0
293 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
293 repo.ui.progress(_('bundle'), 0, total=bytecount, unit=_('bytes'))
294
294
295 for chunk in it:
295 for chunk in it:
296 seen += len(chunk)
296 seen += len(chunk)
297 repo.ui.progress(_('bundle'), seen, total=bytecount,
297 repo.ui.progress(_('bundle'), seen, total=bytecount,
298 unit=_('bytes'))
298 unit=_('bytes'))
299 yield chunk
299 yield chunk
300
300
301 repo.ui.progress(_('bundle'), None)
301 repo.ui.progress(_('bundle'), None)
302
302
303 return requirements, gen()
303 return requirements, gen()
304
304
305 def consumev1(repo, fp, filecount, bytecount):
305 def consumev1(repo, fp, filecount, bytecount):
306 """Apply the contents from version 1 of a streaming clone file handle.
306 """Apply the contents from version 1 of a streaming clone file handle.
307
307
308 This takes the output from "stream_out" and applies it to the specified
308 This takes the output from "stream_out" and applies it to the specified
309 repository.
309 repository.
310
310
311 Like "stream_out," the status line added by the wire protocol is not
311 Like "stream_out," the status line added by the wire protocol is not
312 handled by this function.
312 handled by this function.
313 """
313 """
314 with repo.lock():
314 with repo.lock():
315 repo.ui.status(_('%d files to transfer, %s of data\n') %
315 repo.ui.status(_('%d files to transfer, %s of data\n') %
316 (filecount, util.bytecount(bytecount)))
316 (filecount, util.bytecount(bytecount)))
317 handled_bytes = 0
317 handled_bytes = 0
318 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
318 repo.ui.progress(_('clone'), 0, total=bytecount, unit=_('bytes'))
319 start = util.timer()
319 start = util.timer()
320
320
321 # TODO: get rid of (potential) inconsistency
321 # TODO: get rid of (potential) inconsistency
322 #
322 #
323 # If transaction is started and any @filecache property is
323 # If transaction is started and any @filecache property is
324 # changed at this point, it causes inconsistency between
324 # changed at this point, it causes inconsistency between
325 # in-memory cached property and streamclone-ed file on the
325 # in-memory cached property and streamclone-ed file on the
326 # disk. Nested transaction prevents transaction scope "clone"
326 # disk. Nested transaction prevents transaction scope "clone"
327 # below from writing in-memory changes out at the end of it,
327 # below from writing in-memory changes out at the end of it,
328 # even though in-memory changes are discarded at the end of it
328 # even though in-memory changes are discarded at the end of it
329 # regardless of transaction nesting.
329 # regardless of transaction nesting.
330 #
330 #
331 # But transaction nesting can't be simply prohibited, because
331 # But transaction nesting can't be simply prohibited, because
332 # nesting occurs also in ordinary case (e.g. enabling
332 # nesting occurs also in ordinary case (e.g. enabling
333 # clonebundles).
333 # clonebundles).
334
334
335 with repo.transaction('clone'):
335 with repo.transaction('clone'):
336 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
336 with repo.svfs.backgroundclosing(repo.ui, expectedcount=filecount):
337 for i in xrange(filecount):
337 for i in xrange(filecount):
338 # XXX doesn't support '\n' or '\r' in filenames
338 # XXX doesn't support '\n' or '\r' in filenames
339 l = fp.readline()
339 l = fp.readline()
340 try:
340 try:
341 name, size = l.split('\0', 1)
341 name, size = l.split('\0', 1)
342 size = int(size)
342 size = int(size)
343 except (ValueError, TypeError):
343 except (ValueError, TypeError):
344 raise error.ResponseError(
344 raise error.ResponseError(
345 _('unexpected response from remote server:'), l)
345 _('unexpected response from remote server:'), l)
346 if repo.ui.debugflag:
346 if repo.ui.debugflag:
347 repo.ui.debug('adding %s (%s)\n' %
347 repo.ui.debug('adding %s (%s)\n' %
348 (name, util.bytecount(size)))
348 (name, util.bytecount(size)))
349 # for backwards compat, name was partially encoded
349 # for backwards compat, name was partially encoded
350 path = store.decodedir(name)
350 path = store.decodedir(name)
351 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
351 with repo.svfs(path, 'w', backgroundclose=True) as ofp:
352 for chunk in util.filechunkiter(fp, limit=size):
352 for chunk in util.filechunkiter(fp, limit=size):
353 handled_bytes += len(chunk)
353 handled_bytes += len(chunk)
354 repo.ui.progress(_('clone'), handled_bytes,
354 repo.ui.progress(_('clone'), handled_bytes,
355 total=bytecount, unit=_('bytes'))
355 total=bytecount, unit=_('bytes'))
356 ofp.write(chunk)
356 ofp.write(chunk)
357
357
358 # force @filecache properties to be reloaded from
358 # force @filecache properties to be reloaded from
359 # streamclone-ed file at next access
359 # streamclone-ed file at next access
360 repo.invalidate(clearfilecache=True)
360 repo.invalidate(clearfilecache=True)
361
361
362 elapsed = util.timer() - start
362 elapsed = util.timer() - start
363 if elapsed <= 0:
363 if elapsed <= 0:
364 elapsed = 0.001
364 elapsed = 0.001
365 repo.ui.progress(_('clone'), None)
365 repo.ui.progress(_('clone'), None)
366 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
366 repo.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
367 (util.bytecount(bytecount), elapsed,
367 (util.bytecount(bytecount), elapsed,
368 util.bytecount(bytecount / elapsed)))
368 util.bytecount(bytecount / elapsed)))
369
369
370 def readbundle1header(fp):
370 def readbundle1header(fp):
371 compression = fp.read(2)
371 compression = fp.read(2)
372 if compression != 'UN':
372 if compression != 'UN':
373 raise error.Abort(_('only uncompressed stream clone bundles are '
373 raise error.Abort(_('only uncompressed stream clone bundles are '
374 'supported; got %s') % compression)
374 'supported; got %s') % compression)
375
375
376 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
376 filecount, bytecount = struct.unpack('>QQ', fp.read(16))
377 requireslen = struct.unpack('>H', fp.read(2))[0]
377 requireslen = struct.unpack('>H', fp.read(2))[0]
378 requires = fp.read(requireslen)
378 requires = fp.read(requireslen)
379
379
380 if not requires.endswith('\0'):
380 if not requires.endswith('\0'):
381 raise error.Abort(_('malformed stream clone bundle: '
381 raise error.Abort(_('malformed stream clone bundle: '
382 'requirements not properly encoded'))
382 'requirements not properly encoded'))
383
383
384 requirements = set(requires.rstrip('\0').split(','))
384 requirements = set(requires.rstrip('\0').split(','))
385
385
386 return filecount, bytecount, requirements
386 return filecount, bytecount, requirements
387
387
388 def applybundlev1(repo, fp):
388 def applybundlev1(repo, fp):
389 """Apply the content from a stream clone bundle version 1.
389 """Apply the content from a stream clone bundle version 1.
390
390
391 We assume the 4 byte header has been read and validated and the file handle
391 We assume the 4 byte header has been read and validated and the file handle
392 is at the 2 byte compression identifier.
392 is at the 2 byte compression identifier.
393 """
393 """
394 if len(repo):
394 if len(repo):
395 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
395 raise error.Abort(_('cannot apply stream clone bundle on non-empty '
396 'repo'))
396 'repo'))
397
397
398 filecount, bytecount, requirements = readbundle1header(fp)
398 filecount, bytecount, requirements = readbundle1header(fp)
399 missingreqs = requirements - repo.supportedformats
399 missingreqs = requirements - repo.supportedformats
400 if missingreqs:
400 if missingreqs:
401 raise error.Abort(_('unable to apply stream clone: '
401 raise error.Abort(_('unable to apply stream clone: '
402 'unsupported format: %s') %
402 'unsupported format: %s') %
403 ', '.join(sorted(missingreqs)))
403 ', '.join(sorted(missingreqs)))
404
404
405 consumev1(repo, fp, filecount, bytecount)
405 consumev1(repo, fp, filecount, bytecount)
406
406
407 class streamcloneapplier(object):
407 class streamcloneapplier(object):
408 """Class to manage applying streaming clone bundles.
408 """Class to manage applying streaming clone bundles.
409
409
410 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
410 We need to wrap ``applybundlev1()`` in a dedicated type to enable bundle
411 readers to perform bundle type-specific functionality.
411 readers to perform bundle type-specific functionality.
412 """
412 """
413 def __init__(self, fh):
413 def __init__(self, fh):
414 self._fh = fh
414 self._fh = fh
415
415
416 def apply(self, repo):
416 def apply(self, repo):
417 return applybundlev1(repo, self._fh)
417 return applybundlev1(repo, self._fh)
General Comments 0
You need to be logged in to leave comments. Login now