##// END OF EJS Templates
wireproto: properly call clonebundles command...
Gregory Szorc -
r37667:a1687996 default
parent child Browse files
Show More
@@ -1,2409 +1,2410 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 discovery,
27 discovery,
28 error,
28 error,
29 lock as lockmod,
29 lock as lockmod,
30 logexchange,
30 logexchange,
31 obsolete,
31 obsolete,
32 phases,
32 phases,
33 pushkey,
33 pushkey,
34 pycompat,
34 pycompat,
35 scmutil,
35 scmutil,
36 sslutil,
36 sslutil,
37 streamclone,
37 streamclone,
38 url as urlmod,
38 url as urlmod,
39 util,
39 util,
40 )
40 )
41 from .utils import (
41 from .utils import (
42 stringutil,
42 stringutil,
43 )
43 )
44
44
45 urlerr = util.urlerr
45 urlerr = util.urlerr
46 urlreq = util.urlreq
46 urlreq = util.urlreq
47
47
48 # Maps bundle version human names to changegroup versions.
48 # Maps bundle version human names to changegroup versions.
49 _bundlespeccgversions = {'v1': '01',
49 _bundlespeccgversions = {'v1': '01',
50 'v2': '02',
50 'v2': '02',
51 'packed1': 's1',
51 'packed1': 's1',
52 'bundle2': '02', #legacy
52 'bundle2': '02', #legacy
53 }
53 }
54
54
55 # Maps bundle version with content opts to choose which part to bundle
55 # Maps bundle version with content opts to choose which part to bundle
56 _bundlespeccontentopts = {
56 _bundlespeccontentopts = {
57 'v1': {
57 'v1': {
58 'changegroup': True,
58 'changegroup': True,
59 'cg.version': '01',
59 'cg.version': '01',
60 'obsolescence': False,
60 'obsolescence': False,
61 'phases': False,
61 'phases': False,
62 'tagsfnodescache': False,
62 'tagsfnodescache': False,
63 'revbranchcache': False
63 'revbranchcache': False
64 },
64 },
65 'v2': {
65 'v2': {
66 'changegroup': True,
66 'changegroup': True,
67 'cg.version': '02',
67 'cg.version': '02',
68 'obsolescence': False,
68 'obsolescence': False,
69 'phases': False,
69 'phases': False,
70 'tagsfnodescache': True,
70 'tagsfnodescache': True,
71 'revbranchcache': True
71 'revbranchcache': True
72 },
72 },
73 'packed1' : {
73 'packed1' : {
74 'cg.version': 's1'
74 'cg.version': 's1'
75 }
75 }
76 }
76 }
77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
77 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
78
78
79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
79 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
80 "tagsfnodescache": False,
80 "tagsfnodescache": False,
81 "revbranchcache": False}}
81 "revbranchcache": False}}
82
82
83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
83 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
84 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
85
85
86 @attr.s
86 @attr.s
87 class bundlespec(object):
87 class bundlespec(object):
88 compression = attr.ib()
88 compression = attr.ib()
89 version = attr.ib()
89 version = attr.ib()
90 params = attr.ib()
90 params = attr.ib()
91 contentopts = attr.ib()
91 contentopts = attr.ib()
92
92
93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
93 def parsebundlespec(repo, spec, strict=True, externalnames=False):
94 """Parse a bundle string specification into parts.
94 """Parse a bundle string specification into parts.
95
95
96 Bundle specifications denote a well-defined bundle/exchange format.
96 Bundle specifications denote a well-defined bundle/exchange format.
97 The content of a given specification should not change over time in
97 The content of a given specification should not change over time in
98 order to ensure that bundles produced by a newer version of Mercurial are
98 order to ensure that bundles produced by a newer version of Mercurial are
99 readable from an older version.
99 readable from an older version.
100
100
101 The string currently has the form:
101 The string currently has the form:
102
102
103 <compression>-<type>[;<parameter0>[;<parameter1>]]
103 <compression>-<type>[;<parameter0>[;<parameter1>]]
104
104
105 Where <compression> is one of the supported compression formats
105 Where <compression> is one of the supported compression formats
106 and <type> is (currently) a version string. A ";" can follow the type and
106 and <type> is (currently) a version string. A ";" can follow the type and
107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
107 all text afterwards is interpreted as URI encoded, ";" delimited key=value
108 pairs.
108 pairs.
109
109
110 If ``strict`` is True (the default) <compression> is required. Otherwise,
110 If ``strict`` is True (the default) <compression> is required. Otherwise,
111 it is optional.
111 it is optional.
112
112
113 If ``externalnames`` is False (the default), the human-centric names will
113 If ``externalnames`` is False (the default), the human-centric names will
114 be converted to their internal representation.
114 be converted to their internal representation.
115
115
116 Returns a bundlespec object of (compression, version, parameters).
116 Returns a bundlespec object of (compression, version, parameters).
117 Compression will be ``None`` if not in strict mode and a compression isn't
117 Compression will be ``None`` if not in strict mode and a compression isn't
118 defined.
118 defined.
119
119
120 An ``InvalidBundleSpecification`` is raised when the specification is
120 An ``InvalidBundleSpecification`` is raised when the specification is
121 not syntactically well formed.
121 not syntactically well formed.
122
122
123 An ``UnsupportedBundleSpecification`` is raised when the compression or
123 An ``UnsupportedBundleSpecification`` is raised when the compression or
124 bundle type/version is not recognized.
124 bundle type/version is not recognized.
125
125
126 Note: this function will likely eventually return a more complex data
126 Note: this function will likely eventually return a more complex data
127 structure, including bundle2 part information.
127 structure, including bundle2 part information.
128 """
128 """
129 def parseparams(s):
129 def parseparams(s):
130 if ';' not in s:
130 if ';' not in s:
131 return s, {}
131 return s, {}
132
132
133 params = {}
133 params = {}
134 version, paramstr = s.split(';', 1)
134 version, paramstr = s.split(';', 1)
135
135
136 for p in paramstr.split(';'):
136 for p in paramstr.split(';'):
137 if '=' not in p:
137 if '=' not in p:
138 raise error.InvalidBundleSpecification(
138 raise error.InvalidBundleSpecification(
139 _('invalid bundle specification: '
139 _('invalid bundle specification: '
140 'missing "=" in parameter: %s') % p)
140 'missing "=" in parameter: %s') % p)
141
141
142 key, value = p.split('=', 1)
142 key, value = p.split('=', 1)
143 key = urlreq.unquote(key)
143 key = urlreq.unquote(key)
144 value = urlreq.unquote(value)
144 value = urlreq.unquote(value)
145 params[key] = value
145 params[key] = value
146
146
147 return version, params
147 return version, params
148
148
149
149
150 if strict and '-' not in spec:
150 if strict and '-' not in spec:
151 raise error.InvalidBundleSpecification(
151 raise error.InvalidBundleSpecification(
152 _('invalid bundle specification; '
152 _('invalid bundle specification; '
153 'must be prefixed with compression: %s') % spec)
153 'must be prefixed with compression: %s') % spec)
154
154
155 if '-' in spec:
155 if '-' in spec:
156 compression, version = spec.split('-', 1)
156 compression, version = spec.split('-', 1)
157
157
158 if compression not in util.compengines.supportedbundlenames:
158 if compression not in util.compengines.supportedbundlenames:
159 raise error.UnsupportedBundleSpecification(
159 raise error.UnsupportedBundleSpecification(
160 _('%s compression is not supported') % compression)
160 _('%s compression is not supported') % compression)
161
161
162 version, params = parseparams(version)
162 version, params = parseparams(version)
163
163
164 if version not in _bundlespeccgversions:
164 if version not in _bundlespeccgversions:
165 raise error.UnsupportedBundleSpecification(
165 raise error.UnsupportedBundleSpecification(
166 _('%s is not a recognized bundle version') % version)
166 _('%s is not a recognized bundle version') % version)
167 else:
167 else:
168 # Value could be just the compression or just the version, in which
168 # Value could be just the compression or just the version, in which
169 # case some defaults are assumed (but only when not in strict mode).
169 # case some defaults are assumed (but only when not in strict mode).
170 assert not strict
170 assert not strict
171
171
172 spec, params = parseparams(spec)
172 spec, params = parseparams(spec)
173
173
174 if spec in util.compengines.supportedbundlenames:
174 if spec in util.compengines.supportedbundlenames:
175 compression = spec
175 compression = spec
176 version = 'v1'
176 version = 'v1'
177 # Generaldelta repos require v2.
177 # Generaldelta repos require v2.
178 if 'generaldelta' in repo.requirements:
178 if 'generaldelta' in repo.requirements:
179 version = 'v2'
179 version = 'v2'
180 # Modern compression engines require v2.
180 # Modern compression engines require v2.
181 if compression not in _bundlespecv1compengines:
181 if compression not in _bundlespecv1compengines:
182 version = 'v2'
182 version = 'v2'
183 elif spec in _bundlespeccgversions:
183 elif spec in _bundlespeccgversions:
184 if spec == 'packed1':
184 if spec == 'packed1':
185 compression = 'none'
185 compression = 'none'
186 else:
186 else:
187 compression = 'bzip2'
187 compression = 'bzip2'
188 version = spec
188 version = spec
189 else:
189 else:
190 raise error.UnsupportedBundleSpecification(
190 raise error.UnsupportedBundleSpecification(
191 _('%s is not a recognized bundle specification') % spec)
191 _('%s is not a recognized bundle specification') % spec)
192
192
193 # Bundle version 1 only supports a known set of compression engines.
193 # Bundle version 1 only supports a known set of compression engines.
194 if version == 'v1' and compression not in _bundlespecv1compengines:
194 if version == 'v1' and compression not in _bundlespecv1compengines:
195 raise error.UnsupportedBundleSpecification(
195 raise error.UnsupportedBundleSpecification(
196 _('compression engine %s is not supported on v1 bundles') %
196 _('compression engine %s is not supported on v1 bundles') %
197 compression)
197 compression)
198
198
199 # The specification for packed1 can optionally declare the data formats
199 # The specification for packed1 can optionally declare the data formats
200 # required to apply it. If we see this metadata, compare against what the
200 # required to apply it. If we see this metadata, compare against what the
201 # repo supports and error if the bundle isn't compatible.
201 # repo supports and error if the bundle isn't compatible.
202 if version == 'packed1' and 'requirements' in params:
202 if version == 'packed1' and 'requirements' in params:
203 requirements = set(params['requirements'].split(','))
203 requirements = set(params['requirements'].split(','))
204 missingreqs = requirements - repo.supportedformats
204 missingreqs = requirements - repo.supportedformats
205 if missingreqs:
205 if missingreqs:
206 raise error.UnsupportedBundleSpecification(
206 raise error.UnsupportedBundleSpecification(
207 _('missing support for repository features: %s') %
207 _('missing support for repository features: %s') %
208 ', '.join(sorted(missingreqs)))
208 ', '.join(sorted(missingreqs)))
209
209
210 # Compute contentopts based on the version
210 # Compute contentopts based on the version
211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
211 contentopts = _bundlespeccontentopts.get(version, {}).copy()
212
212
213 # Process the variants
213 # Process the variants
214 if "stream" in params and params["stream"] == "v2":
214 if "stream" in params and params["stream"] == "v2":
215 variant = _bundlespecvariants["streamv2"]
215 variant = _bundlespecvariants["streamv2"]
216 contentopts.update(variant)
216 contentopts.update(variant)
217
217
218 if not externalnames:
218 if not externalnames:
219 engine = util.compengines.forbundlename(compression)
219 engine = util.compengines.forbundlename(compression)
220 compression = engine.bundletype()[1]
220 compression = engine.bundletype()[1]
221 version = _bundlespeccgversions[version]
221 version = _bundlespeccgversions[version]
222
222
223 return bundlespec(compression, version, params, contentopts)
223 return bundlespec(compression, version, params, contentopts)
224
224
225 def readbundle(ui, fh, fname, vfs=None):
225 def readbundle(ui, fh, fname, vfs=None):
226 header = changegroup.readexactly(fh, 4)
226 header = changegroup.readexactly(fh, 4)
227
227
228 alg = None
228 alg = None
229 if not fname:
229 if not fname:
230 fname = "stream"
230 fname = "stream"
231 if not header.startswith('HG') and header.startswith('\0'):
231 if not header.startswith('HG') and header.startswith('\0'):
232 fh = changegroup.headerlessfixup(fh, header)
232 fh = changegroup.headerlessfixup(fh, header)
233 header = "HG10"
233 header = "HG10"
234 alg = 'UN'
234 alg = 'UN'
235 elif vfs:
235 elif vfs:
236 fname = vfs.join(fname)
236 fname = vfs.join(fname)
237
237
238 magic, version = header[0:2], header[2:4]
238 magic, version = header[0:2], header[2:4]
239
239
240 if magic != 'HG':
240 if magic != 'HG':
241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
241 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
242 if version == '10':
242 if version == '10':
243 if alg is None:
243 if alg is None:
244 alg = changegroup.readexactly(fh, 2)
244 alg = changegroup.readexactly(fh, 2)
245 return changegroup.cg1unpacker(fh, alg)
245 return changegroup.cg1unpacker(fh, alg)
246 elif version.startswith('2'):
246 elif version.startswith('2'):
247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
247 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
248 elif version == 'S1':
248 elif version == 'S1':
249 return streamclone.streamcloneapplier(fh)
249 return streamclone.streamcloneapplier(fh)
250 else:
250 else:
251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
251 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
252
252
253 def getbundlespec(ui, fh):
253 def getbundlespec(ui, fh):
254 """Infer the bundlespec from a bundle file handle.
254 """Infer the bundlespec from a bundle file handle.
255
255
256 The input file handle is seeked and the original seek position is not
256 The input file handle is seeked and the original seek position is not
257 restored.
257 restored.
258 """
258 """
259 def speccompression(alg):
259 def speccompression(alg):
260 try:
260 try:
261 return util.compengines.forbundletype(alg).bundletype()[0]
261 return util.compengines.forbundletype(alg).bundletype()[0]
262 except KeyError:
262 except KeyError:
263 return None
263 return None
264
264
265 b = readbundle(ui, fh, None)
265 b = readbundle(ui, fh, None)
266 if isinstance(b, changegroup.cg1unpacker):
266 if isinstance(b, changegroup.cg1unpacker):
267 alg = b._type
267 alg = b._type
268 if alg == '_truncatedBZ':
268 if alg == '_truncatedBZ':
269 alg = 'BZ'
269 alg = 'BZ'
270 comp = speccompression(alg)
270 comp = speccompression(alg)
271 if not comp:
271 if not comp:
272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
272 raise error.Abort(_('unknown compression algorithm: %s') % alg)
273 return '%s-v1' % comp
273 return '%s-v1' % comp
274 elif isinstance(b, bundle2.unbundle20):
274 elif isinstance(b, bundle2.unbundle20):
275 if 'Compression' in b.params:
275 if 'Compression' in b.params:
276 comp = speccompression(b.params['Compression'])
276 comp = speccompression(b.params['Compression'])
277 if not comp:
277 if not comp:
278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
278 raise error.Abort(_('unknown compression algorithm: %s') % comp)
279 else:
279 else:
280 comp = 'none'
280 comp = 'none'
281
281
282 version = None
282 version = None
283 for part in b.iterparts():
283 for part in b.iterparts():
284 if part.type == 'changegroup':
284 if part.type == 'changegroup':
285 version = part.params['version']
285 version = part.params['version']
286 if version in ('01', '02'):
286 if version in ('01', '02'):
287 version = 'v2'
287 version = 'v2'
288 else:
288 else:
289 raise error.Abort(_('changegroup version %s does not have '
289 raise error.Abort(_('changegroup version %s does not have '
290 'a known bundlespec') % version,
290 'a known bundlespec') % version,
291 hint=_('try upgrading your Mercurial '
291 hint=_('try upgrading your Mercurial '
292 'client'))
292 'client'))
293 elif part.type == 'stream2' and version is None:
293 elif part.type == 'stream2' and version is None:
294 # A stream2 part requires to be part of a v2 bundle
294 # A stream2 part requires to be part of a v2 bundle
295 version = "v2"
295 version = "v2"
296 requirements = urlreq.unquote(part.params['requirements'])
296 requirements = urlreq.unquote(part.params['requirements'])
297 splitted = requirements.split()
297 splitted = requirements.split()
298 params = bundle2._formatrequirementsparams(splitted)
298 params = bundle2._formatrequirementsparams(splitted)
299 return 'none-v2;stream=v2;%s' % params
299 return 'none-v2;stream=v2;%s' % params
300
300
301 if not version:
301 if not version:
302 raise error.Abort(_('could not identify changegroup version in '
302 raise error.Abort(_('could not identify changegroup version in '
303 'bundle'))
303 'bundle'))
304
304
305 return '%s-%s' % (comp, version)
305 return '%s-%s' % (comp, version)
306 elif isinstance(b, streamclone.streamcloneapplier):
306 elif isinstance(b, streamclone.streamcloneapplier):
307 requirements = streamclone.readbundle1header(fh)[2]
307 requirements = streamclone.readbundle1header(fh)[2]
308 formatted = bundle2._formatrequirementsparams(requirements)
308 formatted = bundle2._formatrequirementsparams(requirements)
309 return 'none-packed1;%s' % formatted
309 return 'none-packed1;%s' % formatted
310 else:
310 else:
311 raise error.Abort(_('unknown bundle type: %s') % b)
311 raise error.Abort(_('unknown bundle type: %s') % b)
312
312
313 def _computeoutgoing(repo, heads, common):
313 def _computeoutgoing(repo, heads, common):
314 """Computes which revs are outgoing given a set of common
314 """Computes which revs are outgoing given a set of common
315 and a set of heads.
315 and a set of heads.
316
316
317 This is a separate function so extensions can have access to
317 This is a separate function so extensions can have access to
318 the logic.
318 the logic.
319
319
320 Returns a discovery.outgoing object.
320 Returns a discovery.outgoing object.
321 """
321 """
322 cl = repo.changelog
322 cl = repo.changelog
323 if common:
323 if common:
324 hasnode = cl.hasnode
324 hasnode = cl.hasnode
325 common = [n for n in common if hasnode(n)]
325 common = [n for n in common if hasnode(n)]
326 else:
326 else:
327 common = [nullid]
327 common = [nullid]
328 if not heads:
328 if not heads:
329 heads = cl.heads()
329 heads = cl.heads()
330 return discovery.outgoing(repo, common, heads)
330 return discovery.outgoing(repo, common, heads)
331
331
332 def _forcebundle1(op):
332 def _forcebundle1(op):
333 """return true if a pull/push must use bundle1
333 """return true if a pull/push must use bundle1
334
334
335 This function is used to allow testing of the older bundle version"""
335 This function is used to allow testing of the older bundle version"""
336 ui = op.repo.ui
336 ui = op.repo.ui
337 # The goal is this config is to allow developer to choose the bundle
337 # The goal is this config is to allow developer to choose the bundle
338 # version used during exchanged. This is especially handy during test.
338 # version used during exchanged. This is especially handy during test.
339 # Value is a list of bundle version to be picked from, highest version
339 # Value is a list of bundle version to be picked from, highest version
340 # should be used.
340 # should be used.
341 #
341 #
342 # developer config: devel.legacy.exchange
342 # developer config: devel.legacy.exchange
343 exchange = ui.configlist('devel', 'legacy.exchange')
343 exchange = ui.configlist('devel', 'legacy.exchange')
344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
344 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
345 return forcebundle1 or not op.remote.capable('bundle2')
345 return forcebundle1 or not op.remote.capable('bundle2')
346
346
347 class pushoperation(object):
347 class pushoperation(object):
348 """A object that represent a single push operation
348 """A object that represent a single push operation
349
349
350 Its purpose is to carry push related state and very common operations.
350 Its purpose is to carry push related state and very common operations.
351
351
352 A new pushoperation should be created at the beginning of each push and
352 A new pushoperation should be created at the beginning of each push and
353 discarded afterward.
353 discarded afterward.
354 """
354 """
355
355
356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
356 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
357 bookmarks=(), pushvars=None):
357 bookmarks=(), pushvars=None):
358 # repo we push from
358 # repo we push from
359 self.repo = repo
359 self.repo = repo
360 self.ui = repo.ui
360 self.ui = repo.ui
361 # repo we push to
361 # repo we push to
362 self.remote = remote
362 self.remote = remote
363 # force option provided
363 # force option provided
364 self.force = force
364 self.force = force
365 # revs to be pushed (None is "all")
365 # revs to be pushed (None is "all")
366 self.revs = revs
366 self.revs = revs
367 # bookmark explicitly pushed
367 # bookmark explicitly pushed
368 self.bookmarks = bookmarks
368 self.bookmarks = bookmarks
369 # allow push of new branch
369 # allow push of new branch
370 self.newbranch = newbranch
370 self.newbranch = newbranch
371 # step already performed
371 # step already performed
372 # (used to check what steps have been already performed through bundle2)
372 # (used to check what steps have been already performed through bundle2)
373 self.stepsdone = set()
373 self.stepsdone = set()
374 # Integer version of the changegroup push result
374 # Integer version of the changegroup push result
375 # - None means nothing to push
375 # - None means nothing to push
376 # - 0 means HTTP error
376 # - 0 means HTTP error
377 # - 1 means we pushed and remote head count is unchanged *or*
377 # - 1 means we pushed and remote head count is unchanged *or*
378 # we have outgoing changesets but refused to push
378 # we have outgoing changesets but refused to push
379 # - other values as described by addchangegroup()
379 # - other values as described by addchangegroup()
380 self.cgresult = None
380 self.cgresult = None
381 # Boolean value for the bookmark push
381 # Boolean value for the bookmark push
382 self.bkresult = None
382 self.bkresult = None
383 # discover.outgoing object (contains common and outgoing data)
383 # discover.outgoing object (contains common and outgoing data)
384 self.outgoing = None
384 self.outgoing = None
385 # all remote topological heads before the push
385 # all remote topological heads before the push
386 self.remoteheads = None
386 self.remoteheads = None
387 # Details of the remote branch pre and post push
387 # Details of the remote branch pre and post push
388 #
388 #
389 # mapping: {'branch': ([remoteheads],
389 # mapping: {'branch': ([remoteheads],
390 # [newheads],
390 # [newheads],
391 # [unsyncedheads],
391 # [unsyncedheads],
392 # [discardedheads])}
392 # [discardedheads])}
393 # - branch: the branch name
393 # - branch: the branch name
394 # - remoteheads: the list of remote heads known locally
394 # - remoteheads: the list of remote heads known locally
395 # None if the branch is new
395 # None if the branch is new
396 # - newheads: the new remote heads (known locally) with outgoing pushed
396 # - newheads: the new remote heads (known locally) with outgoing pushed
397 # - unsyncedheads: the list of remote heads unknown locally.
397 # - unsyncedheads: the list of remote heads unknown locally.
398 # - discardedheads: the list of remote heads made obsolete by the push
398 # - discardedheads: the list of remote heads made obsolete by the push
399 self.pushbranchmap = None
399 self.pushbranchmap = None
400 # testable as a boolean indicating if any nodes are missing locally.
400 # testable as a boolean indicating if any nodes are missing locally.
401 self.incoming = None
401 self.incoming = None
402 # summary of the remote phase situation
402 # summary of the remote phase situation
403 self.remotephases = None
403 self.remotephases = None
404 # phases changes that must be pushed along side the changesets
404 # phases changes that must be pushed along side the changesets
405 self.outdatedphases = None
405 self.outdatedphases = None
406 # phases changes that must be pushed if changeset push fails
406 # phases changes that must be pushed if changeset push fails
407 self.fallbackoutdatedphases = None
407 self.fallbackoutdatedphases = None
408 # outgoing obsmarkers
408 # outgoing obsmarkers
409 self.outobsmarkers = set()
409 self.outobsmarkers = set()
410 # outgoing bookmarks
410 # outgoing bookmarks
411 self.outbookmarks = []
411 self.outbookmarks = []
412 # transaction manager
412 # transaction manager
413 self.trmanager = None
413 self.trmanager = None
414 # map { pushkey partid -> callback handling failure}
414 # map { pushkey partid -> callback handling failure}
415 # used to handle exception from mandatory pushkey part failure
415 # used to handle exception from mandatory pushkey part failure
416 self.pkfailcb = {}
416 self.pkfailcb = {}
417 # an iterable of pushvars or None
417 # an iterable of pushvars or None
418 self.pushvars = pushvars
418 self.pushvars = pushvars
419
419
420 @util.propertycache
420 @util.propertycache
421 def futureheads(self):
421 def futureheads(self):
422 """future remote heads if the changeset push succeeds"""
422 """future remote heads if the changeset push succeeds"""
423 return self.outgoing.missingheads
423 return self.outgoing.missingheads
424
424
425 @util.propertycache
425 @util.propertycache
426 def fallbackheads(self):
426 def fallbackheads(self):
427 """future remote heads if the changeset push fails"""
427 """future remote heads if the changeset push fails"""
428 if self.revs is None:
428 if self.revs is None:
429 # not target to push, all common are relevant
429 # not target to push, all common are relevant
430 return self.outgoing.commonheads
430 return self.outgoing.commonheads
431 unfi = self.repo.unfiltered()
431 unfi = self.repo.unfiltered()
432 # I want cheads = heads(::missingheads and ::commonheads)
432 # I want cheads = heads(::missingheads and ::commonheads)
433 # (missingheads is revs with secret changeset filtered out)
433 # (missingheads is revs with secret changeset filtered out)
434 #
434 #
435 # This can be expressed as:
435 # This can be expressed as:
436 # cheads = ( (missingheads and ::commonheads)
436 # cheads = ( (missingheads and ::commonheads)
437 # + (commonheads and ::missingheads))"
437 # + (commonheads and ::missingheads))"
438 # )
438 # )
439 #
439 #
440 # while trying to push we already computed the following:
440 # while trying to push we already computed the following:
441 # common = (::commonheads)
441 # common = (::commonheads)
442 # missing = ((commonheads::missingheads) - commonheads)
442 # missing = ((commonheads::missingheads) - commonheads)
443 #
443 #
444 # We can pick:
444 # We can pick:
445 # * missingheads part of common (::commonheads)
445 # * missingheads part of common (::commonheads)
446 common = self.outgoing.common
446 common = self.outgoing.common
447 nm = self.repo.changelog.nodemap
447 nm = self.repo.changelog.nodemap
448 cheads = [node for node in self.revs if nm[node] in common]
448 cheads = [node for node in self.revs if nm[node] in common]
449 # and
449 # and
450 # * commonheads parents on missing
450 # * commonheads parents on missing
451 revset = unfi.set('%ln and parents(roots(%ln))',
451 revset = unfi.set('%ln and parents(roots(%ln))',
452 self.outgoing.commonheads,
452 self.outgoing.commonheads,
453 self.outgoing.missing)
453 self.outgoing.missing)
454 cheads.extend(c.node() for c in revset)
454 cheads.extend(c.node() for c in revset)
455 return cheads
455 return cheads
456
456
457 @property
457 @property
458 def commonheads(self):
458 def commonheads(self):
459 """set of all common heads after changeset bundle push"""
459 """set of all common heads after changeset bundle push"""
460 if self.cgresult:
460 if self.cgresult:
461 return self.futureheads
461 return self.futureheads
462 else:
462 else:
463 return self.fallbackheads
463 return self.fallbackheads
464
464
465 # mapping of message used when pushing bookmark
465 # mapping of message used when pushing bookmark
466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
466 bookmsgmap = {'update': (_("updating bookmark %s\n"),
467 _('updating bookmark %s failed!\n')),
467 _('updating bookmark %s failed!\n')),
468 'export': (_("exporting bookmark %s\n"),
468 'export': (_("exporting bookmark %s\n"),
469 _('exporting bookmark %s failed!\n')),
469 _('exporting bookmark %s failed!\n')),
470 'delete': (_("deleting remote bookmark %s\n"),
470 'delete': (_("deleting remote bookmark %s\n"),
471 _('deleting remote bookmark %s failed!\n')),
471 _('deleting remote bookmark %s failed!\n')),
472 }
472 }
473
473
474
474
475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
475 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
476 opargs=None):
476 opargs=None):
477 '''Push outgoing changesets (limited by revs) from a local
477 '''Push outgoing changesets (limited by revs) from a local
478 repository to remote. Return an integer:
478 repository to remote. Return an integer:
479 - None means nothing to push
479 - None means nothing to push
480 - 0 means HTTP error
480 - 0 means HTTP error
481 - 1 means we pushed and remote head count is unchanged *or*
481 - 1 means we pushed and remote head count is unchanged *or*
482 we have outgoing changesets but refused to push
482 we have outgoing changesets but refused to push
483 - other values as described by addchangegroup()
483 - other values as described by addchangegroup()
484 '''
484 '''
485 if opargs is None:
485 if opargs is None:
486 opargs = {}
486 opargs = {}
487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
487 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
488 **pycompat.strkwargs(opargs))
488 **pycompat.strkwargs(opargs))
489 if pushop.remote.local():
489 if pushop.remote.local():
490 missing = (set(pushop.repo.requirements)
490 missing = (set(pushop.repo.requirements)
491 - pushop.remote.local().supported)
491 - pushop.remote.local().supported)
492 if missing:
492 if missing:
493 msg = _("required features are not"
493 msg = _("required features are not"
494 " supported in the destination:"
494 " supported in the destination:"
495 " %s") % (', '.join(sorted(missing)))
495 " %s") % (', '.join(sorted(missing)))
496 raise error.Abort(msg)
496 raise error.Abort(msg)
497
497
498 if not pushop.remote.canpush():
498 if not pushop.remote.canpush():
499 raise error.Abort(_("destination does not support push"))
499 raise error.Abort(_("destination does not support push"))
500
500
501 if not pushop.remote.capable('unbundle'):
501 if not pushop.remote.capable('unbundle'):
502 raise error.Abort(_('cannot push: destination does not support the '
502 raise error.Abort(_('cannot push: destination does not support the '
503 'unbundle wire protocol command'))
503 'unbundle wire protocol command'))
504
504
505 # get lock as we might write phase data
505 # get lock as we might write phase data
506 wlock = lock = None
506 wlock = lock = None
507 try:
507 try:
508 # bundle2 push may receive a reply bundle touching bookmarks or other
508 # bundle2 push may receive a reply bundle touching bookmarks or other
509 # things requiring the wlock. Take it now to ensure proper ordering.
509 # things requiring the wlock. Take it now to ensure proper ordering.
510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
510 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
511 if (not _forcebundle1(pushop)) and maypushback:
511 if (not _forcebundle1(pushop)) and maypushback:
512 wlock = pushop.repo.wlock()
512 wlock = pushop.repo.wlock()
513 lock = pushop.repo.lock()
513 lock = pushop.repo.lock()
514 pushop.trmanager = transactionmanager(pushop.repo,
514 pushop.trmanager = transactionmanager(pushop.repo,
515 'push-response',
515 'push-response',
516 pushop.remote.url())
516 pushop.remote.url())
517 except IOError as err:
517 except IOError as err:
518 if err.errno != errno.EACCES:
518 if err.errno != errno.EACCES:
519 raise
519 raise
520 # source repo cannot be locked.
520 # source repo cannot be locked.
521 # We do not abort the push, but just disable the local phase
521 # We do not abort the push, but just disable the local phase
522 # synchronisation.
522 # synchronisation.
523 msg = 'cannot lock source repository: %s\n' % err
523 msg = 'cannot lock source repository: %s\n' % err
524 pushop.ui.debug(msg)
524 pushop.ui.debug(msg)
525
525
526 with wlock or util.nullcontextmanager(), \
526 with wlock or util.nullcontextmanager(), \
527 lock or util.nullcontextmanager(), \
527 lock or util.nullcontextmanager(), \
528 pushop.trmanager or util.nullcontextmanager():
528 pushop.trmanager or util.nullcontextmanager():
529 pushop.repo.checkpush(pushop)
529 pushop.repo.checkpush(pushop)
530 _pushdiscovery(pushop)
530 _pushdiscovery(pushop)
531 if not _forcebundle1(pushop):
531 if not _forcebundle1(pushop):
532 _pushbundle2(pushop)
532 _pushbundle2(pushop)
533 _pushchangeset(pushop)
533 _pushchangeset(pushop)
534 _pushsyncphase(pushop)
534 _pushsyncphase(pushop)
535 _pushobsolete(pushop)
535 _pushobsolete(pushop)
536 _pushbookmark(pushop)
536 _pushbookmark(pushop)
537
537
538 return pushop
538 return pushop
539
539
540 # list of steps to perform discovery before push
540 # list of steps to perform discovery before push
541 pushdiscoveryorder = []
541 pushdiscoveryorder = []
542
542
543 # Mapping between step name and function
543 # Mapping between step name and function
544 #
544 #
545 # This exists to help extensions wrap steps if necessary
545 # This exists to help extensions wrap steps if necessary
546 pushdiscoverymapping = {}
546 pushdiscoverymapping = {}
547
547
548 def pushdiscovery(stepname):
548 def pushdiscovery(stepname):
549 """decorator for function performing discovery before push
549 """decorator for function performing discovery before push
550
550
551 The function is added to the step -> function mapping and appended to the
551 The function is added to the step -> function mapping and appended to the
552 list of steps. Beware that decorated function will be added in order (this
552 list of steps. Beware that decorated function will be added in order (this
553 may matter).
553 may matter).
554
554
555 You can only use this decorator for a new step, if you want to wrap a step
555 You can only use this decorator for a new step, if you want to wrap a step
556 from an extension, change the pushdiscovery dictionary directly."""
556 from an extension, change the pushdiscovery dictionary directly."""
557 def dec(func):
557 def dec(func):
558 assert stepname not in pushdiscoverymapping
558 assert stepname not in pushdiscoverymapping
559 pushdiscoverymapping[stepname] = func
559 pushdiscoverymapping[stepname] = func
560 pushdiscoveryorder.append(stepname)
560 pushdiscoveryorder.append(stepname)
561 return func
561 return func
562 return dec
562 return dec
563
563
564 def _pushdiscovery(pushop):
564 def _pushdiscovery(pushop):
565 """Run all discovery steps"""
565 """Run all discovery steps"""
566 for stepname in pushdiscoveryorder:
566 for stepname in pushdiscoveryorder:
567 step = pushdiscoverymapping[stepname]
567 step = pushdiscoverymapping[stepname]
568 step(pushop)
568 step(pushop)
569
569
570 @pushdiscovery('changeset')
570 @pushdiscovery('changeset')
571 def _pushdiscoverychangeset(pushop):
571 def _pushdiscoverychangeset(pushop):
572 """discover the changeset that need to be pushed"""
572 """discover the changeset that need to be pushed"""
573 fci = discovery.findcommonincoming
573 fci = discovery.findcommonincoming
574 if pushop.revs:
574 if pushop.revs:
575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
575 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
576 ancestorsof=pushop.revs)
576 ancestorsof=pushop.revs)
577 else:
577 else:
578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
578 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
579 common, inc, remoteheads = commoninc
579 common, inc, remoteheads = commoninc
580 fco = discovery.findcommonoutgoing
580 fco = discovery.findcommonoutgoing
581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
581 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
582 commoninc=commoninc, force=pushop.force)
582 commoninc=commoninc, force=pushop.force)
583 pushop.outgoing = outgoing
583 pushop.outgoing = outgoing
584 pushop.remoteheads = remoteheads
584 pushop.remoteheads = remoteheads
585 pushop.incoming = inc
585 pushop.incoming = inc
586
586
587 @pushdiscovery('phase')
587 @pushdiscovery('phase')
588 def _pushdiscoveryphase(pushop):
588 def _pushdiscoveryphase(pushop):
589 """discover the phase that needs to be pushed
589 """discover the phase that needs to be pushed
590
590
591 (computed for both success and failure case for changesets push)"""
591 (computed for both success and failure case for changesets push)"""
592 outgoing = pushop.outgoing
592 outgoing = pushop.outgoing
593 unfi = pushop.repo.unfiltered()
593 unfi = pushop.repo.unfiltered()
594 remotephases = pushop.remote.listkeys('phases')
594 remotephases = pushop.remote.listkeys('phases')
595 if (pushop.ui.configbool('ui', '_usedassubrepo')
595 if (pushop.ui.configbool('ui', '_usedassubrepo')
596 and remotephases # server supports phases
596 and remotephases # server supports phases
597 and not pushop.outgoing.missing # no changesets to be pushed
597 and not pushop.outgoing.missing # no changesets to be pushed
598 and remotephases.get('publishing', False)):
598 and remotephases.get('publishing', False)):
599 # When:
599 # When:
600 # - this is a subrepo push
600 # - this is a subrepo push
601 # - and remote support phase
601 # - and remote support phase
602 # - and no changeset are to be pushed
602 # - and no changeset are to be pushed
603 # - and remote is publishing
603 # - and remote is publishing
604 # We may be in issue 3781 case!
604 # We may be in issue 3781 case!
605 # We drop the possible phase synchronisation done by
605 # We drop the possible phase synchronisation done by
606 # courtesy to publish changesets possibly locally draft
606 # courtesy to publish changesets possibly locally draft
607 # on the remote.
607 # on the remote.
608 pushop.outdatedphases = []
608 pushop.outdatedphases = []
609 pushop.fallbackoutdatedphases = []
609 pushop.fallbackoutdatedphases = []
610 return
610 return
611
611
612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
612 pushop.remotephases = phases.remotephasessummary(pushop.repo,
613 pushop.fallbackheads,
613 pushop.fallbackheads,
614 remotephases)
614 remotephases)
615 droots = pushop.remotephases.draftroots
615 droots = pushop.remotephases.draftroots
616
616
617 extracond = ''
617 extracond = ''
618 if not pushop.remotephases.publishing:
618 if not pushop.remotephases.publishing:
619 extracond = ' and public()'
619 extracond = ' and public()'
620 revset = 'heads((%%ln::%%ln) %s)' % extracond
620 revset = 'heads((%%ln::%%ln) %s)' % extracond
621 # Get the list of all revs draft on remote by public here.
621 # Get the list of all revs draft on remote by public here.
622 # XXX Beware that revset break if droots is not strictly
622 # XXX Beware that revset break if droots is not strictly
623 # XXX root we may want to ensure it is but it is costly
623 # XXX root we may want to ensure it is but it is costly
624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
624 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
625 if not outgoing.missing:
625 if not outgoing.missing:
626 future = fallback
626 future = fallback
627 else:
627 else:
628 # adds changeset we are going to push as draft
628 # adds changeset we are going to push as draft
629 #
629 #
630 # should not be necessary for publishing server, but because of an
630 # should not be necessary for publishing server, but because of an
631 # issue fixed in xxxxx we have to do it anyway.
631 # issue fixed in xxxxx we have to do it anyway.
632 fdroots = list(unfi.set('roots(%ln + %ln::)',
632 fdroots = list(unfi.set('roots(%ln + %ln::)',
633 outgoing.missing, droots))
633 outgoing.missing, droots))
634 fdroots = [f.node() for f in fdroots]
634 fdroots = [f.node() for f in fdroots]
635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
635 future = list(unfi.set(revset, fdroots, pushop.futureheads))
636 pushop.outdatedphases = future
636 pushop.outdatedphases = future
637 pushop.fallbackoutdatedphases = fallback
637 pushop.fallbackoutdatedphases = fallback
638
638
639 @pushdiscovery('obsmarker')
639 @pushdiscovery('obsmarker')
640 def _pushdiscoveryobsmarkers(pushop):
640 def _pushdiscoveryobsmarkers(pushop):
641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
641 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
642 and pushop.repo.obsstore
642 and pushop.repo.obsstore
643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
643 and 'obsolete' in pushop.remote.listkeys('namespaces')):
644 repo = pushop.repo
644 repo = pushop.repo
645 # very naive computation, that can be quite expensive on big repo.
645 # very naive computation, that can be quite expensive on big repo.
646 # However: evolution is currently slow on them anyway.
646 # However: evolution is currently slow on them anyway.
647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
647 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
648 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
649
649
650 @pushdiscovery('bookmarks')
650 @pushdiscovery('bookmarks')
651 def _pushdiscoverybookmarks(pushop):
651 def _pushdiscoverybookmarks(pushop):
652 ui = pushop.ui
652 ui = pushop.ui
653 repo = pushop.repo.unfiltered()
653 repo = pushop.repo.unfiltered()
654 remote = pushop.remote
654 remote = pushop.remote
655 ui.debug("checking for updated bookmarks\n")
655 ui.debug("checking for updated bookmarks\n")
656 ancestors = ()
656 ancestors = ()
657 if pushop.revs:
657 if pushop.revs:
658 revnums = map(repo.changelog.rev, pushop.revs)
658 revnums = map(repo.changelog.rev, pushop.revs)
659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
659 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
660 remotebookmark = remote.listkeys('bookmarks')
660 remotebookmark = remote.listkeys('bookmarks')
661
661
662 explicit = set([repo._bookmarks.expandname(bookmark)
662 explicit = set([repo._bookmarks.expandname(bookmark)
663 for bookmark in pushop.bookmarks])
663 for bookmark in pushop.bookmarks])
664
664
665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
665 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
666 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
667
667
668 def safehex(x):
668 def safehex(x):
669 if x is None:
669 if x is None:
670 return x
670 return x
671 return hex(x)
671 return hex(x)
672
672
673 def hexifycompbookmarks(bookmarks):
673 def hexifycompbookmarks(bookmarks):
674 return [(b, safehex(scid), safehex(dcid))
674 return [(b, safehex(scid), safehex(dcid))
675 for (b, scid, dcid) in bookmarks]
675 for (b, scid, dcid) in bookmarks]
676
676
677 comp = [hexifycompbookmarks(marks) for marks in comp]
677 comp = [hexifycompbookmarks(marks) for marks in comp]
678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
678 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
679
679
680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
680 def _processcompared(pushop, pushed, explicit, remotebms, comp):
681 """take decision on bookmark to pull from the remote bookmark
681 """take decision on bookmark to pull from the remote bookmark
682
682
683 Exist to help extensions who want to alter this behavior.
683 Exist to help extensions who want to alter this behavior.
684 """
684 """
685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
685 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
686
686
687 repo = pushop.repo
687 repo = pushop.repo
688
688
689 for b, scid, dcid in advsrc:
689 for b, scid, dcid in advsrc:
690 if b in explicit:
690 if b in explicit:
691 explicit.remove(b)
691 explicit.remove(b)
692 if not pushed or repo[scid].rev() in pushed:
692 if not pushed or repo[scid].rev() in pushed:
693 pushop.outbookmarks.append((b, dcid, scid))
693 pushop.outbookmarks.append((b, dcid, scid))
694 # search added bookmark
694 # search added bookmark
695 for b, scid, dcid in addsrc:
695 for b, scid, dcid in addsrc:
696 if b in explicit:
696 if b in explicit:
697 explicit.remove(b)
697 explicit.remove(b)
698 pushop.outbookmarks.append((b, '', scid))
698 pushop.outbookmarks.append((b, '', scid))
699 # search for overwritten bookmark
699 # search for overwritten bookmark
700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
700 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
701 if b in explicit:
701 if b in explicit:
702 explicit.remove(b)
702 explicit.remove(b)
703 pushop.outbookmarks.append((b, dcid, scid))
703 pushop.outbookmarks.append((b, dcid, scid))
704 # search for bookmark to delete
704 # search for bookmark to delete
705 for b, scid, dcid in adddst:
705 for b, scid, dcid in adddst:
706 if b in explicit:
706 if b in explicit:
707 explicit.remove(b)
707 explicit.remove(b)
708 # treat as "deleted locally"
708 # treat as "deleted locally"
709 pushop.outbookmarks.append((b, dcid, ''))
709 pushop.outbookmarks.append((b, dcid, ''))
710 # identical bookmarks shouldn't get reported
710 # identical bookmarks shouldn't get reported
711 for b, scid, dcid in same:
711 for b, scid, dcid in same:
712 if b in explicit:
712 if b in explicit:
713 explicit.remove(b)
713 explicit.remove(b)
714
714
715 if explicit:
715 if explicit:
716 explicit = sorted(explicit)
716 explicit = sorted(explicit)
717 # we should probably list all of them
717 # we should probably list all of them
718 pushop.ui.warn(_('bookmark %s does not exist on the local '
718 pushop.ui.warn(_('bookmark %s does not exist on the local '
719 'or remote repository!\n') % explicit[0])
719 'or remote repository!\n') % explicit[0])
720 pushop.bkresult = 2
720 pushop.bkresult = 2
721
721
722 pushop.outbookmarks.sort()
722 pushop.outbookmarks.sort()
723
723
724 def _pushcheckoutgoing(pushop):
724 def _pushcheckoutgoing(pushop):
725 outgoing = pushop.outgoing
725 outgoing = pushop.outgoing
726 unfi = pushop.repo.unfiltered()
726 unfi = pushop.repo.unfiltered()
727 if not outgoing.missing:
727 if not outgoing.missing:
728 # nothing to push
728 # nothing to push
729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
729 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
730 return False
730 return False
731 # something to push
731 # something to push
732 if not pushop.force:
732 if not pushop.force:
733 # if repo.obsstore == False --> no obsolete
733 # if repo.obsstore == False --> no obsolete
734 # then, save the iteration
734 # then, save the iteration
735 if unfi.obsstore:
735 if unfi.obsstore:
736 # this message are here for 80 char limit reason
736 # this message are here for 80 char limit reason
737 mso = _("push includes obsolete changeset: %s!")
737 mso = _("push includes obsolete changeset: %s!")
738 mspd = _("push includes phase-divergent changeset: %s!")
738 mspd = _("push includes phase-divergent changeset: %s!")
739 mscd = _("push includes content-divergent changeset: %s!")
739 mscd = _("push includes content-divergent changeset: %s!")
740 mst = {"orphan": _("push includes orphan changeset: %s!"),
740 mst = {"orphan": _("push includes orphan changeset: %s!"),
741 "phase-divergent": mspd,
741 "phase-divergent": mspd,
742 "content-divergent": mscd}
742 "content-divergent": mscd}
743 # If we are to push if there is at least one
743 # If we are to push if there is at least one
744 # obsolete or unstable changeset in missing, at
744 # obsolete or unstable changeset in missing, at
745 # least one of the missinghead will be obsolete or
745 # least one of the missinghead will be obsolete or
746 # unstable. So checking heads only is ok
746 # unstable. So checking heads only is ok
747 for node in outgoing.missingheads:
747 for node in outgoing.missingheads:
748 ctx = unfi[node]
748 ctx = unfi[node]
749 if ctx.obsolete():
749 if ctx.obsolete():
750 raise error.Abort(mso % ctx)
750 raise error.Abort(mso % ctx)
751 elif ctx.isunstable():
751 elif ctx.isunstable():
752 # TODO print more than one instability in the abort
752 # TODO print more than one instability in the abort
753 # message
753 # message
754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
754 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
755
755
756 discovery.checkheads(pushop)
756 discovery.checkheads(pushop)
757 return True
757 return True
758
758
759 # List of names of steps to perform for an outgoing bundle2, order matters.
759 # List of names of steps to perform for an outgoing bundle2, order matters.
760 b2partsgenorder = []
760 b2partsgenorder = []
761
761
762 # Mapping between step name and function
762 # Mapping between step name and function
763 #
763 #
764 # This exists to help extensions wrap steps if necessary
764 # This exists to help extensions wrap steps if necessary
765 b2partsgenmapping = {}
765 b2partsgenmapping = {}
766
766
767 def b2partsgenerator(stepname, idx=None):
767 def b2partsgenerator(stepname, idx=None):
768 """decorator for function generating bundle2 part
768 """decorator for function generating bundle2 part
769
769
770 The function is added to the step -> function mapping and appended to the
770 The function is added to the step -> function mapping and appended to the
771 list of steps. Beware that decorated functions will be added in order
771 list of steps. Beware that decorated functions will be added in order
772 (this may matter).
772 (this may matter).
773
773
774 You can only use this decorator for new steps, if you want to wrap a step
774 You can only use this decorator for new steps, if you want to wrap a step
775 from an extension, attack the b2partsgenmapping dictionary directly."""
775 from an extension, attack the b2partsgenmapping dictionary directly."""
776 def dec(func):
776 def dec(func):
777 assert stepname not in b2partsgenmapping
777 assert stepname not in b2partsgenmapping
778 b2partsgenmapping[stepname] = func
778 b2partsgenmapping[stepname] = func
779 if idx is None:
779 if idx is None:
780 b2partsgenorder.append(stepname)
780 b2partsgenorder.append(stepname)
781 else:
781 else:
782 b2partsgenorder.insert(idx, stepname)
782 b2partsgenorder.insert(idx, stepname)
783 return func
783 return func
784 return dec
784 return dec
785
785
786 def _pushb2ctxcheckheads(pushop, bundler):
786 def _pushb2ctxcheckheads(pushop, bundler):
787 """Generate race condition checking parts
787 """Generate race condition checking parts
788
788
789 Exists as an independent function to aid extensions
789 Exists as an independent function to aid extensions
790 """
790 """
791 # * 'force' do not check for push race,
791 # * 'force' do not check for push race,
792 # * if we don't push anything, there are nothing to check.
792 # * if we don't push anything, there are nothing to check.
793 if not pushop.force and pushop.outgoing.missingheads:
793 if not pushop.force and pushop.outgoing.missingheads:
794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
794 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
795 emptyremote = pushop.pushbranchmap is None
795 emptyremote = pushop.pushbranchmap is None
796 if not allowunrelated or emptyremote:
796 if not allowunrelated or emptyremote:
797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
797 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
798 else:
798 else:
799 affected = set()
799 affected = set()
800 for branch, heads in pushop.pushbranchmap.iteritems():
800 for branch, heads in pushop.pushbranchmap.iteritems():
801 remoteheads, newheads, unsyncedheads, discardedheads = heads
801 remoteheads, newheads, unsyncedheads, discardedheads = heads
802 if remoteheads is not None:
802 if remoteheads is not None:
803 remote = set(remoteheads)
803 remote = set(remoteheads)
804 affected |= set(discardedheads) & remote
804 affected |= set(discardedheads) & remote
805 affected |= remote - set(newheads)
805 affected |= remote - set(newheads)
806 if affected:
806 if affected:
807 data = iter(sorted(affected))
807 data = iter(sorted(affected))
808 bundler.newpart('check:updated-heads', data=data)
808 bundler.newpart('check:updated-heads', data=data)
809
809
810 def _pushing(pushop):
810 def _pushing(pushop):
811 """return True if we are pushing anything"""
811 """return True if we are pushing anything"""
812 return bool(pushop.outgoing.missing
812 return bool(pushop.outgoing.missing
813 or pushop.outdatedphases
813 or pushop.outdatedphases
814 or pushop.outobsmarkers
814 or pushop.outobsmarkers
815 or pushop.outbookmarks)
815 or pushop.outbookmarks)
816
816
817 @b2partsgenerator('check-bookmarks')
817 @b2partsgenerator('check-bookmarks')
818 def _pushb2checkbookmarks(pushop, bundler):
818 def _pushb2checkbookmarks(pushop, bundler):
819 """insert bookmark move checking"""
819 """insert bookmark move checking"""
820 if not _pushing(pushop) or pushop.force:
820 if not _pushing(pushop) or pushop.force:
821 return
821 return
822 b2caps = bundle2.bundle2caps(pushop.remote)
822 b2caps = bundle2.bundle2caps(pushop.remote)
823 hasbookmarkcheck = 'bookmarks' in b2caps
823 hasbookmarkcheck = 'bookmarks' in b2caps
824 if not (pushop.outbookmarks and hasbookmarkcheck):
824 if not (pushop.outbookmarks and hasbookmarkcheck):
825 return
825 return
826 data = []
826 data = []
827 for book, old, new in pushop.outbookmarks:
827 for book, old, new in pushop.outbookmarks:
828 old = bin(old)
828 old = bin(old)
829 data.append((book, old))
829 data.append((book, old))
830 checkdata = bookmod.binaryencode(data)
830 checkdata = bookmod.binaryencode(data)
831 bundler.newpart('check:bookmarks', data=checkdata)
831 bundler.newpart('check:bookmarks', data=checkdata)
832
832
833 @b2partsgenerator('check-phases')
833 @b2partsgenerator('check-phases')
834 def _pushb2checkphases(pushop, bundler):
834 def _pushb2checkphases(pushop, bundler):
835 """insert phase move checking"""
835 """insert phase move checking"""
836 if not _pushing(pushop) or pushop.force:
836 if not _pushing(pushop) or pushop.force:
837 return
837 return
838 b2caps = bundle2.bundle2caps(pushop.remote)
838 b2caps = bundle2.bundle2caps(pushop.remote)
839 hasphaseheads = 'heads' in b2caps.get('phases', ())
839 hasphaseheads = 'heads' in b2caps.get('phases', ())
840 if pushop.remotephases is not None and hasphaseheads:
840 if pushop.remotephases is not None and hasphaseheads:
841 # check that the remote phase has not changed
841 # check that the remote phase has not changed
842 checks = [[] for p in phases.allphases]
842 checks = [[] for p in phases.allphases]
843 checks[phases.public].extend(pushop.remotephases.publicheads)
843 checks[phases.public].extend(pushop.remotephases.publicheads)
844 checks[phases.draft].extend(pushop.remotephases.draftroots)
844 checks[phases.draft].extend(pushop.remotephases.draftroots)
845 if any(checks):
845 if any(checks):
846 for nodes in checks:
846 for nodes in checks:
847 nodes.sort()
847 nodes.sort()
848 checkdata = phases.binaryencode(checks)
848 checkdata = phases.binaryencode(checks)
849 bundler.newpart('check:phases', data=checkdata)
849 bundler.newpart('check:phases', data=checkdata)
850
850
851 @b2partsgenerator('changeset')
851 @b2partsgenerator('changeset')
852 def _pushb2ctx(pushop, bundler):
852 def _pushb2ctx(pushop, bundler):
853 """handle changegroup push through bundle2
853 """handle changegroup push through bundle2
854
854
855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
855 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
856 """
856 """
857 if 'changesets' in pushop.stepsdone:
857 if 'changesets' in pushop.stepsdone:
858 return
858 return
859 pushop.stepsdone.add('changesets')
859 pushop.stepsdone.add('changesets')
860 # Send known heads to the server for race detection.
860 # Send known heads to the server for race detection.
861 if not _pushcheckoutgoing(pushop):
861 if not _pushcheckoutgoing(pushop):
862 return
862 return
863 pushop.repo.prepushoutgoinghooks(pushop)
863 pushop.repo.prepushoutgoinghooks(pushop)
864
864
865 _pushb2ctxcheckheads(pushop, bundler)
865 _pushb2ctxcheckheads(pushop, bundler)
866
866
867 b2caps = bundle2.bundle2caps(pushop.remote)
867 b2caps = bundle2.bundle2caps(pushop.remote)
868 version = '01'
868 version = '01'
869 cgversions = b2caps.get('changegroup')
869 cgversions = b2caps.get('changegroup')
870 if cgversions: # 3.1 and 3.2 ship with an empty value
870 if cgversions: # 3.1 and 3.2 ship with an empty value
871 cgversions = [v for v in cgversions
871 cgversions = [v for v in cgversions
872 if v in changegroup.supportedoutgoingversions(
872 if v in changegroup.supportedoutgoingversions(
873 pushop.repo)]
873 pushop.repo)]
874 if not cgversions:
874 if not cgversions:
875 raise ValueError(_('no common changegroup version'))
875 raise ValueError(_('no common changegroup version'))
876 version = max(cgversions)
876 version = max(cgversions)
877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
877 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
878 'push')
878 'push')
879 cgpart = bundler.newpart('changegroup', data=cgstream)
879 cgpart = bundler.newpart('changegroup', data=cgstream)
880 if cgversions:
880 if cgversions:
881 cgpart.addparam('version', version)
881 cgpart.addparam('version', version)
882 if 'treemanifest' in pushop.repo.requirements:
882 if 'treemanifest' in pushop.repo.requirements:
883 cgpart.addparam('treemanifest', '1')
883 cgpart.addparam('treemanifest', '1')
884 def handlereply(op):
884 def handlereply(op):
885 """extract addchangegroup returns from server reply"""
885 """extract addchangegroup returns from server reply"""
886 cgreplies = op.records.getreplies(cgpart.id)
886 cgreplies = op.records.getreplies(cgpart.id)
887 assert len(cgreplies['changegroup']) == 1
887 assert len(cgreplies['changegroup']) == 1
888 pushop.cgresult = cgreplies['changegroup'][0]['return']
888 pushop.cgresult = cgreplies['changegroup'][0]['return']
889 return handlereply
889 return handlereply
890
890
891 @b2partsgenerator('phase')
891 @b2partsgenerator('phase')
892 def _pushb2phases(pushop, bundler):
892 def _pushb2phases(pushop, bundler):
893 """handle phase push through bundle2"""
893 """handle phase push through bundle2"""
894 if 'phases' in pushop.stepsdone:
894 if 'phases' in pushop.stepsdone:
895 return
895 return
896 b2caps = bundle2.bundle2caps(pushop.remote)
896 b2caps = bundle2.bundle2caps(pushop.remote)
897 ui = pushop.repo.ui
897 ui = pushop.repo.ui
898
898
899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
899 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
900 haspushkey = 'pushkey' in b2caps
900 haspushkey = 'pushkey' in b2caps
901 hasphaseheads = 'heads' in b2caps.get('phases', ())
901 hasphaseheads = 'heads' in b2caps.get('phases', ())
902
902
903 if hasphaseheads and not legacyphase:
903 if hasphaseheads and not legacyphase:
904 return _pushb2phaseheads(pushop, bundler)
904 return _pushb2phaseheads(pushop, bundler)
905 elif haspushkey:
905 elif haspushkey:
906 return _pushb2phasespushkey(pushop, bundler)
906 return _pushb2phasespushkey(pushop, bundler)
907
907
908 def _pushb2phaseheads(pushop, bundler):
908 def _pushb2phaseheads(pushop, bundler):
909 """push phase information through a bundle2 - binary part"""
909 """push phase information through a bundle2 - binary part"""
910 pushop.stepsdone.add('phases')
910 pushop.stepsdone.add('phases')
911 if pushop.outdatedphases:
911 if pushop.outdatedphases:
912 updates = [[] for p in phases.allphases]
912 updates = [[] for p in phases.allphases]
913 updates[0].extend(h.node() for h in pushop.outdatedphases)
913 updates[0].extend(h.node() for h in pushop.outdatedphases)
914 phasedata = phases.binaryencode(updates)
914 phasedata = phases.binaryencode(updates)
915 bundler.newpart('phase-heads', data=phasedata)
915 bundler.newpart('phase-heads', data=phasedata)
916
916
917 def _pushb2phasespushkey(pushop, bundler):
917 def _pushb2phasespushkey(pushop, bundler):
918 """push phase information through a bundle2 - pushkey part"""
918 """push phase information through a bundle2 - pushkey part"""
919 pushop.stepsdone.add('phases')
919 pushop.stepsdone.add('phases')
920 part2node = []
920 part2node = []
921
921
922 def handlefailure(pushop, exc):
922 def handlefailure(pushop, exc):
923 targetid = int(exc.partid)
923 targetid = int(exc.partid)
924 for partid, node in part2node:
924 for partid, node in part2node:
925 if partid == targetid:
925 if partid == targetid:
926 raise error.Abort(_('updating %s to public failed') % node)
926 raise error.Abort(_('updating %s to public failed') % node)
927
927
928 enc = pushkey.encode
928 enc = pushkey.encode
929 for newremotehead in pushop.outdatedphases:
929 for newremotehead in pushop.outdatedphases:
930 part = bundler.newpart('pushkey')
930 part = bundler.newpart('pushkey')
931 part.addparam('namespace', enc('phases'))
931 part.addparam('namespace', enc('phases'))
932 part.addparam('key', enc(newremotehead.hex()))
932 part.addparam('key', enc(newremotehead.hex()))
933 part.addparam('old', enc('%d' % phases.draft))
933 part.addparam('old', enc('%d' % phases.draft))
934 part.addparam('new', enc('%d' % phases.public))
934 part.addparam('new', enc('%d' % phases.public))
935 part2node.append((part.id, newremotehead))
935 part2node.append((part.id, newremotehead))
936 pushop.pkfailcb[part.id] = handlefailure
936 pushop.pkfailcb[part.id] = handlefailure
937
937
938 def handlereply(op):
938 def handlereply(op):
939 for partid, node in part2node:
939 for partid, node in part2node:
940 partrep = op.records.getreplies(partid)
940 partrep = op.records.getreplies(partid)
941 results = partrep['pushkey']
941 results = partrep['pushkey']
942 assert len(results) <= 1
942 assert len(results) <= 1
943 msg = None
943 msg = None
944 if not results:
944 if not results:
945 msg = _('server ignored update of %s to public!\n') % node
945 msg = _('server ignored update of %s to public!\n') % node
946 elif not int(results[0]['return']):
946 elif not int(results[0]['return']):
947 msg = _('updating %s to public failed!\n') % node
947 msg = _('updating %s to public failed!\n') % node
948 if msg is not None:
948 if msg is not None:
949 pushop.ui.warn(msg)
949 pushop.ui.warn(msg)
950 return handlereply
950 return handlereply
951
951
952 @b2partsgenerator('obsmarkers')
952 @b2partsgenerator('obsmarkers')
953 def _pushb2obsmarkers(pushop, bundler):
953 def _pushb2obsmarkers(pushop, bundler):
954 if 'obsmarkers' in pushop.stepsdone:
954 if 'obsmarkers' in pushop.stepsdone:
955 return
955 return
956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
956 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
957 if obsolete.commonversion(remoteversions) is None:
957 if obsolete.commonversion(remoteversions) is None:
958 return
958 return
959 pushop.stepsdone.add('obsmarkers')
959 pushop.stepsdone.add('obsmarkers')
960 if pushop.outobsmarkers:
960 if pushop.outobsmarkers:
961 markers = sorted(pushop.outobsmarkers)
961 markers = sorted(pushop.outobsmarkers)
962 bundle2.buildobsmarkerspart(bundler, markers)
962 bundle2.buildobsmarkerspart(bundler, markers)
963
963
964 @b2partsgenerator('bookmarks')
964 @b2partsgenerator('bookmarks')
965 def _pushb2bookmarks(pushop, bundler):
965 def _pushb2bookmarks(pushop, bundler):
966 """handle bookmark push through bundle2"""
966 """handle bookmark push through bundle2"""
967 if 'bookmarks' in pushop.stepsdone:
967 if 'bookmarks' in pushop.stepsdone:
968 return
968 return
969 b2caps = bundle2.bundle2caps(pushop.remote)
969 b2caps = bundle2.bundle2caps(pushop.remote)
970
970
971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
971 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
972 legacybooks = 'bookmarks' in legacy
972 legacybooks = 'bookmarks' in legacy
973
973
974 if not legacybooks and 'bookmarks' in b2caps:
974 if not legacybooks and 'bookmarks' in b2caps:
975 return _pushb2bookmarkspart(pushop, bundler)
975 return _pushb2bookmarkspart(pushop, bundler)
976 elif 'pushkey' in b2caps:
976 elif 'pushkey' in b2caps:
977 return _pushb2bookmarkspushkey(pushop, bundler)
977 return _pushb2bookmarkspushkey(pushop, bundler)
978
978
979 def _bmaction(old, new):
979 def _bmaction(old, new):
980 """small utility for bookmark pushing"""
980 """small utility for bookmark pushing"""
981 if not old:
981 if not old:
982 return 'export'
982 return 'export'
983 elif not new:
983 elif not new:
984 return 'delete'
984 return 'delete'
985 return 'update'
985 return 'update'
986
986
987 def _pushb2bookmarkspart(pushop, bundler):
987 def _pushb2bookmarkspart(pushop, bundler):
988 pushop.stepsdone.add('bookmarks')
988 pushop.stepsdone.add('bookmarks')
989 if not pushop.outbookmarks:
989 if not pushop.outbookmarks:
990 return
990 return
991
991
992 allactions = []
992 allactions = []
993 data = []
993 data = []
994 for book, old, new in pushop.outbookmarks:
994 for book, old, new in pushop.outbookmarks:
995 new = bin(new)
995 new = bin(new)
996 data.append((book, new))
996 data.append((book, new))
997 allactions.append((book, _bmaction(old, new)))
997 allactions.append((book, _bmaction(old, new)))
998 checkdata = bookmod.binaryencode(data)
998 checkdata = bookmod.binaryencode(data)
999 bundler.newpart('bookmarks', data=checkdata)
999 bundler.newpart('bookmarks', data=checkdata)
1000
1000
1001 def handlereply(op):
1001 def handlereply(op):
1002 ui = pushop.ui
1002 ui = pushop.ui
1003 # if success
1003 # if success
1004 for book, action in allactions:
1004 for book, action in allactions:
1005 ui.status(bookmsgmap[action][0] % book)
1005 ui.status(bookmsgmap[action][0] % book)
1006
1006
1007 return handlereply
1007 return handlereply
1008
1008
1009 def _pushb2bookmarkspushkey(pushop, bundler):
1009 def _pushb2bookmarkspushkey(pushop, bundler):
1010 pushop.stepsdone.add('bookmarks')
1010 pushop.stepsdone.add('bookmarks')
1011 part2book = []
1011 part2book = []
1012 enc = pushkey.encode
1012 enc = pushkey.encode
1013
1013
1014 def handlefailure(pushop, exc):
1014 def handlefailure(pushop, exc):
1015 targetid = int(exc.partid)
1015 targetid = int(exc.partid)
1016 for partid, book, action in part2book:
1016 for partid, book, action in part2book:
1017 if partid == targetid:
1017 if partid == targetid:
1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1018 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1019 # we should not be called for part we did not generated
1019 # we should not be called for part we did not generated
1020 assert False
1020 assert False
1021
1021
1022 for book, old, new in pushop.outbookmarks:
1022 for book, old, new in pushop.outbookmarks:
1023 part = bundler.newpart('pushkey')
1023 part = bundler.newpart('pushkey')
1024 part.addparam('namespace', enc('bookmarks'))
1024 part.addparam('namespace', enc('bookmarks'))
1025 part.addparam('key', enc(book))
1025 part.addparam('key', enc(book))
1026 part.addparam('old', enc(old))
1026 part.addparam('old', enc(old))
1027 part.addparam('new', enc(new))
1027 part.addparam('new', enc(new))
1028 action = 'update'
1028 action = 'update'
1029 if not old:
1029 if not old:
1030 action = 'export'
1030 action = 'export'
1031 elif not new:
1031 elif not new:
1032 action = 'delete'
1032 action = 'delete'
1033 part2book.append((part.id, book, action))
1033 part2book.append((part.id, book, action))
1034 pushop.pkfailcb[part.id] = handlefailure
1034 pushop.pkfailcb[part.id] = handlefailure
1035
1035
1036 def handlereply(op):
1036 def handlereply(op):
1037 ui = pushop.ui
1037 ui = pushop.ui
1038 for partid, book, action in part2book:
1038 for partid, book, action in part2book:
1039 partrep = op.records.getreplies(partid)
1039 partrep = op.records.getreplies(partid)
1040 results = partrep['pushkey']
1040 results = partrep['pushkey']
1041 assert len(results) <= 1
1041 assert len(results) <= 1
1042 if not results:
1042 if not results:
1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1043 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1044 else:
1044 else:
1045 ret = int(results[0]['return'])
1045 ret = int(results[0]['return'])
1046 if ret:
1046 if ret:
1047 ui.status(bookmsgmap[action][0] % book)
1047 ui.status(bookmsgmap[action][0] % book)
1048 else:
1048 else:
1049 ui.warn(bookmsgmap[action][1] % book)
1049 ui.warn(bookmsgmap[action][1] % book)
1050 if pushop.bkresult is not None:
1050 if pushop.bkresult is not None:
1051 pushop.bkresult = 1
1051 pushop.bkresult = 1
1052 return handlereply
1052 return handlereply
1053
1053
1054 @b2partsgenerator('pushvars', idx=0)
1054 @b2partsgenerator('pushvars', idx=0)
1055 def _getbundlesendvars(pushop, bundler):
1055 def _getbundlesendvars(pushop, bundler):
1056 '''send shellvars via bundle2'''
1056 '''send shellvars via bundle2'''
1057 pushvars = pushop.pushvars
1057 pushvars = pushop.pushvars
1058 if pushvars:
1058 if pushvars:
1059 shellvars = {}
1059 shellvars = {}
1060 for raw in pushvars:
1060 for raw in pushvars:
1061 if '=' not in raw:
1061 if '=' not in raw:
1062 msg = ("unable to parse variable '%s', should follow "
1062 msg = ("unable to parse variable '%s', should follow "
1063 "'KEY=VALUE' or 'KEY=' format")
1063 "'KEY=VALUE' or 'KEY=' format")
1064 raise error.Abort(msg % raw)
1064 raise error.Abort(msg % raw)
1065 k, v = raw.split('=', 1)
1065 k, v = raw.split('=', 1)
1066 shellvars[k] = v
1066 shellvars[k] = v
1067
1067
1068 part = bundler.newpart('pushvars')
1068 part = bundler.newpart('pushvars')
1069
1069
1070 for key, value in shellvars.iteritems():
1070 for key, value in shellvars.iteritems():
1071 part.addparam(key, value, mandatory=False)
1071 part.addparam(key, value, mandatory=False)
1072
1072
1073 def _pushbundle2(pushop):
1073 def _pushbundle2(pushop):
1074 """push data to the remote using bundle2
1074 """push data to the remote using bundle2
1075
1075
1076 The only currently supported type of data is changegroup but this will
1076 The only currently supported type of data is changegroup but this will
1077 evolve in the future."""
1077 evolve in the future."""
1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1078 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1079 pushback = (pushop.trmanager
1079 pushback = (pushop.trmanager
1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1080 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1081
1081
1082 # create reply capability
1082 # create reply capability
1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1083 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1084 allowpushback=pushback,
1084 allowpushback=pushback,
1085 role='client'))
1085 role='client'))
1086 bundler.newpart('replycaps', data=capsblob)
1086 bundler.newpart('replycaps', data=capsblob)
1087 replyhandlers = []
1087 replyhandlers = []
1088 for partgenname in b2partsgenorder:
1088 for partgenname in b2partsgenorder:
1089 partgen = b2partsgenmapping[partgenname]
1089 partgen = b2partsgenmapping[partgenname]
1090 ret = partgen(pushop, bundler)
1090 ret = partgen(pushop, bundler)
1091 if callable(ret):
1091 if callable(ret):
1092 replyhandlers.append(ret)
1092 replyhandlers.append(ret)
1093 # do not push if nothing to push
1093 # do not push if nothing to push
1094 if bundler.nbparts <= 1:
1094 if bundler.nbparts <= 1:
1095 return
1095 return
1096 stream = util.chunkbuffer(bundler.getchunks())
1096 stream = util.chunkbuffer(bundler.getchunks())
1097 try:
1097 try:
1098 try:
1098 try:
1099 with pushop.remote.commandexecutor() as e:
1099 with pushop.remote.commandexecutor() as e:
1100 reply = e.callcommand('unbundle', {
1100 reply = e.callcommand('unbundle', {
1101 'bundle': stream,
1101 'bundle': stream,
1102 'heads': ['force'],
1102 'heads': ['force'],
1103 'url': pushop.remote.url(),
1103 'url': pushop.remote.url(),
1104 }).result()
1104 }).result()
1105 except error.BundleValueError as exc:
1105 except error.BundleValueError as exc:
1106 raise error.Abort(_('missing support for %s') % exc)
1106 raise error.Abort(_('missing support for %s') % exc)
1107 try:
1107 try:
1108 trgetter = None
1108 trgetter = None
1109 if pushback:
1109 if pushback:
1110 trgetter = pushop.trmanager.transaction
1110 trgetter = pushop.trmanager.transaction
1111 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1111 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1112 except error.BundleValueError as exc:
1112 except error.BundleValueError as exc:
1113 raise error.Abort(_('missing support for %s') % exc)
1113 raise error.Abort(_('missing support for %s') % exc)
1114 except bundle2.AbortFromPart as exc:
1114 except bundle2.AbortFromPart as exc:
1115 pushop.ui.status(_('remote: %s\n') % exc)
1115 pushop.ui.status(_('remote: %s\n') % exc)
1116 if exc.hint is not None:
1116 if exc.hint is not None:
1117 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1117 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1118 raise error.Abort(_('push failed on remote'))
1118 raise error.Abort(_('push failed on remote'))
1119 except error.PushkeyFailed as exc:
1119 except error.PushkeyFailed as exc:
1120 partid = int(exc.partid)
1120 partid = int(exc.partid)
1121 if partid not in pushop.pkfailcb:
1121 if partid not in pushop.pkfailcb:
1122 raise
1122 raise
1123 pushop.pkfailcb[partid](pushop, exc)
1123 pushop.pkfailcb[partid](pushop, exc)
1124 for rephand in replyhandlers:
1124 for rephand in replyhandlers:
1125 rephand(op)
1125 rephand(op)
1126
1126
1127 def _pushchangeset(pushop):
1127 def _pushchangeset(pushop):
1128 """Make the actual push of changeset bundle to remote repo"""
1128 """Make the actual push of changeset bundle to remote repo"""
1129 if 'changesets' in pushop.stepsdone:
1129 if 'changesets' in pushop.stepsdone:
1130 return
1130 return
1131 pushop.stepsdone.add('changesets')
1131 pushop.stepsdone.add('changesets')
1132 if not _pushcheckoutgoing(pushop):
1132 if not _pushcheckoutgoing(pushop):
1133 return
1133 return
1134
1134
1135 # Should have verified this in push().
1135 # Should have verified this in push().
1136 assert pushop.remote.capable('unbundle')
1136 assert pushop.remote.capable('unbundle')
1137
1137
1138 pushop.repo.prepushoutgoinghooks(pushop)
1138 pushop.repo.prepushoutgoinghooks(pushop)
1139 outgoing = pushop.outgoing
1139 outgoing = pushop.outgoing
1140 # TODO: get bundlecaps from remote
1140 # TODO: get bundlecaps from remote
1141 bundlecaps = None
1141 bundlecaps = None
1142 # create a changegroup from local
1142 # create a changegroup from local
1143 if pushop.revs is None and not (outgoing.excluded
1143 if pushop.revs is None and not (outgoing.excluded
1144 or pushop.repo.changelog.filteredrevs):
1144 or pushop.repo.changelog.filteredrevs):
1145 # push everything,
1145 # push everything,
1146 # use the fast path, no race possible on push
1146 # use the fast path, no race possible on push
1147 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1147 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1148 fastpath=True, bundlecaps=bundlecaps)
1148 fastpath=True, bundlecaps=bundlecaps)
1149 else:
1149 else:
1150 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1150 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1151 'push', bundlecaps=bundlecaps)
1151 'push', bundlecaps=bundlecaps)
1152
1152
1153 # apply changegroup to remote
1153 # apply changegroup to remote
1154 # local repo finds heads on server, finds out what
1154 # local repo finds heads on server, finds out what
1155 # revs it must push. once revs transferred, if server
1155 # revs it must push. once revs transferred, if server
1156 # finds it has different heads (someone else won
1156 # finds it has different heads (someone else won
1157 # commit/push race), server aborts.
1157 # commit/push race), server aborts.
1158 if pushop.force:
1158 if pushop.force:
1159 remoteheads = ['force']
1159 remoteheads = ['force']
1160 else:
1160 else:
1161 remoteheads = pushop.remoteheads
1161 remoteheads = pushop.remoteheads
1162 # ssh: return remote's addchangegroup()
1162 # ssh: return remote's addchangegroup()
1163 # http: return remote's addchangegroup() or 0 for error
1163 # http: return remote's addchangegroup() or 0 for error
1164 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1164 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1165 pushop.repo.url())
1165 pushop.repo.url())
1166
1166
1167 def _pushsyncphase(pushop):
1167 def _pushsyncphase(pushop):
1168 """synchronise phase information locally and remotely"""
1168 """synchronise phase information locally and remotely"""
1169 cheads = pushop.commonheads
1169 cheads = pushop.commonheads
1170 # even when we don't push, exchanging phase data is useful
1170 # even when we don't push, exchanging phase data is useful
1171 remotephases = pushop.remote.listkeys('phases')
1171 remotephases = pushop.remote.listkeys('phases')
1172 if (pushop.ui.configbool('ui', '_usedassubrepo')
1172 if (pushop.ui.configbool('ui', '_usedassubrepo')
1173 and remotephases # server supports phases
1173 and remotephases # server supports phases
1174 and pushop.cgresult is None # nothing was pushed
1174 and pushop.cgresult is None # nothing was pushed
1175 and remotephases.get('publishing', False)):
1175 and remotephases.get('publishing', False)):
1176 # When:
1176 # When:
1177 # - this is a subrepo push
1177 # - this is a subrepo push
1178 # - and remote support phase
1178 # - and remote support phase
1179 # - and no changeset was pushed
1179 # - and no changeset was pushed
1180 # - and remote is publishing
1180 # - and remote is publishing
1181 # We may be in issue 3871 case!
1181 # We may be in issue 3871 case!
1182 # We drop the possible phase synchronisation done by
1182 # We drop the possible phase synchronisation done by
1183 # courtesy to publish changesets possibly locally draft
1183 # courtesy to publish changesets possibly locally draft
1184 # on the remote.
1184 # on the remote.
1185 remotephases = {'publishing': 'True'}
1185 remotephases = {'publishing': 'True'}
1186 if not remotephases: # old server or public only reply from non-publishing
1186 if not remotephases: # old server or public only reply from non-publishing
1187 _localphasemove(pushop, cheads)
1187 _localphasemove(pushop, cheads)
1188 # don't push any phase data as there is nothing to push
1188 # don't push any phase data as there is nothing to push
1189 else:
1189 else:
1190 ana = phases.analyzeremotephases(pushop.repo, cheads,
1190 ana = phases.analyzeremotephases(pushop.repo, cheads,
1191 remotephases)
1191 remotephases)
1192 pheads, droots = ana
1192 pheads, droots = ana
1193 ### Apply remote phase on local
1193 ### Apply remote phase on local
1194 if remotephases.get('publishing', False):
1194 if remotephases.get('publishing', False):
1195 _localphasemove(pushop, cheads)
1195 _localphasemove(pushop, cheads)
1196 else: # publish = False
1196 else: # publish = False
1197 _localphasemove(pushop, pheads)
1197 _localphasemove(pushop, pheads)
1198 _localphasemove(pushop, cheads, phases.draft)
1198 _localphasemove(pushop, cheads, phases.draft)
1199 ### Apply local phase on remote
1199 ### Apply local phase on remote
1200
1200
1201 if pushop.cgresult:
1201 if pushop.cgresult:
1202 if 'phases' in pushop.stepsdone:
1202 if 'phases' in pushop.stepsdone:
1203 # phases already pushed though bundle2
1203 # phases already pushed though bundle2
1204 return
1204 return
1205 outdated = pushop.outdatedphases
1205 outdated = pushop.outdatedphases
1206 else:
1206 else:
1207 outdated = pushop.fallbackoutdatedphases
1207 outdated = pushop.fallbackoutdatedphases
1208
1208
1209 pushop.stepsdone.add('phases')
1209 pushop.stepsdone.add('phases')
1210
1210
1211 # filter heads already turned public by the push
1211 # filter heads already turned public by the push
1212 outdated = [c for c in outdated if c.node() not in pheads]
1212 outdated = [c for c in outdated if c.node() not in pheads]
1213 # fallback to independent pushkey command
1213 # fallback to independent pushkey command
1214 for newremotehead in outdated:
1214 for newremotehead in outdated:
1215 with pushop.remote.commandexecutor() as e:
1215 with pushop.remote.commandexecutor() as e:
1216 r = e.callcommand('pushkey', {
1216 r = e.callcommand('pushkey', {
1217 'namespace': 'phases',
1217 'namespace': 'phases',
1218 'key': newremotehead.hex(),
1218 'key': newremotehead.hex(),
1219 'old': '%d' % phases.draft,
1219 'old': '%d' % phases.draft,
1220 'new': '%d' % phases.public
1220 'new': '%d' % phases.public
1221 }).result()
1221 }).result()
1222
1222
1223 if not r:
1223 if not r:
1224 pushop.ui.warn(_('updating %s to public failed!\n')
1224 pushop.ui.warn(_('updating %s to public failed!\n')
1225 % newremotehead)
1225 % newremotehead)
1226
1226
1227 def _localphasemove(pushop, nodes, phase=phases.public):
1227 def _localphasemove(pushop, nodes, phase=phases.public):
1228 """move <nodes> to <phase> in the local source repo"""
1228 """move <nodes> to <phase> in the local source repo"""
1229 if pushop.trmanager:
1229 if pushop.trmanager:
1230 phases.advanceboundary(pushop.repo,
1230 phases.advanceboundary(pushop.repo,
1231 pushop.trmanager.transaction(),
1231 pushop.trmanager.transaction(),
1232 phase,
1232 phase,
1233 nodes)
1233 nodes)
1234 else:
1234 else:
1235 # repo is not locked, do not change any phases!
1235 # repo is not locked, do not change any phases!
1236 # Informs the user that phases should have been moved when
1236 # Informs the user that phases should have been moved when
1237 # applicable.
1237 # applicable.
1238 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1238 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1239 phasestr = phases.phasenames[phase]
1239 phasestr = phases.phasenames[phase]
1240 if actualmoves:
1240 if actualmoves:
1241 pushop.ui.status(_('cannot lock source repo, skipping '
1241 pushop.ui.status(_('cannot lock source repo, skipping '
1242 'local %s phase update\n') % phasestr)
1242 'local %s phase update\n') % phasestr)
1243
1243
1244 def _pushobsolete(pushop):
1244 def _pushobsolete(pushop):
1245 """utility function to push obsolete markers to a remote"""
1245 """utility function to push obsolete markers to a remote"""
1246 if 'obsmarkers' in pushop.stepsdone:
1246 if 'obsmarkers' in pushop.stepsdone:
1247 return
1247 return
1248 repo = pushop.repo
1248 repo = pushop.repo
1249 remote = pushop.remote
1249 remote = pushop.remote
1250 pushop.stepsdone.add('obsmarkers')
1250 pushop.stepsdone.add('obsmarkers')
1251 if pushop.outobsmarkers:
1251 if pushop.outobsmarkers:
1252 pushop.ui.debug('try to push obsolete markers to remote\n')
1252 pushop.ui.debug('try to push obsolete markers to remote\n')
1253 rslts = []
1253 rslts = []
1254 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1254 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1255 for key in sorted(remotedata, reverse=True):
1255 for key in sorted(remotedata, reverse=True):
1256 # reverse sort to ensure we end with dump0
1256 # reverse sort to ensure we end with dump0
1257 data = remotedata[key]
1257 data = remotedata[key]
1258 rslts.append(remote.pushkey('obsolete', key, '', data))
1258 rslts.append(remote.pushkey('obsolete', key, '', data))
1259 if [r for r in rslts if not r]:
1259 if [r for r in rslts if not r]:
1260 msg = _('failed to push some obsolete markers!\n')
1260 msg = _('failed to push some obsolete markers!\n')
1261 repo.ui.warn(msg)
1261 repo.ui.warn(msg)
1262
1262
1263 def _pushbookmark(pushop):
1263 def _pushbookmark(pushop):
1264 """Update bookmark position on remote"""
1264 """Update bookmark position on remote"""
1265 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1265 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1266 return
1266 return
1267 pushop.stepsdone.add('bookmarks')
1267 pushop.stepsdone.add('bookmarks')
1268 ui = pushop.ui
1268 ui = pushop.ui
1269 remote = pushop.remote
1269 remote = pushop.remote
1270
1270
1271 for b, old, new in pushop.outbookmarks:
1271 for b, old, new in pushop.outbookmarks:
1272 action = 'update'
1272 action = 'update'
1273 if not old:
1273 if not old:
1274 action = 'export'
1274 action = 'export'
1275 elif not new:
1275 elif not new:
1276 action = 'delete'
1276 action = 'delete'
1277
1277
1278 with remote.commandexecutor() as e:
1278 with remote.commandexecutor() as e:
1279 r = e.callcommand('pushkey', {
1279 r = e.callcommand('pushkey', {
1280 'namespace': 'bookmarks',
1280 'namespace': 'bookmarks',
1281 'key': b,
1281 'key': b,
1282 'old': old,
1282 'old': old,
1283 'new': new,
1283 'new': new,
1284 }).result()
1284 }).result()
1285
1285
1286 if r:
1286 if r:
1287 ui.status(bookmsgmap[action][0] % b)
1287 ui.status(bookmsgmap[action][0] % b)
1288 else:
1288 else:
1289 ui.warn(bookmsgmap[action][1] % b)
1289 ui.warn(bookmsgmap[action][1] % b)
1290 # discovery can have set the value form invalid entry
1290 # discovery can have set the value form invalid entry
1291 if pushop.bkresult is not None:
1291 if pushop.bkresult is not None:
1292 pushop.bkresult = 1
1292 pushop.bkresult = 1
1293
1293
1294 class pulloperation(object):
1294 class pulloperation(object):
1295 """A object that represent a single pull operation
1295 """A object that represent a single pull operation
1296
1296
1297 It purpose is to carry pull related state and very common operation.
1297 It purpose is to carry pull related state and very common operation.
1298
1298
1299 A new should be created at the beginning of each pull and discarded
1299 A new should be created at the beginning of each pull and discarded
1300 afterward.
1300 afterward.
1301 """
1301 """
1302
1302
1303 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1303 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1304 remotebookmarks=None, streamclonerequested=None):
1304 remotebookmarks=None, streamclonerequested=None):
1305 # repo we pull into
1305 # repo we pull into
1306 self.repo = repo
1306 self.repo = repo
1307 # repo we pull from
1307 # repo we pull from
1308 self.remote = remote
1308 self.remote = remote
1309 # revision we try to pull (None is "all")
1309 # revision we try to pull (None is "all")
1310 self.heads = heads
1310 self.heads = heads
1311 # bookmark pulled explicitly
1311 # bookmark pulled explicitly
1312 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1312 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1313 for bookmark in bookmarks]
1313 for bookmark in bookmarks]
1314 # do we force pull?
1314 # do we force pull?
1315 self.force = force
1315 self.force = force
1316 # whether a streaming clone was requested
1316 # whether a streaming clone was requested
1317 self.streamclonerequested = streamclonerequested
1317 self.streamclonerequested = streamclonerequested
1318 # transaction manager
1318 # transaction manager
1319 self.trmanager = None
1319 self.trmanager = None
1320 # set of common changeset between local and remote before pull
1320 # set of common changeset between local and remote before pull
1321 self.common = None
1321 self.common = None
1322 # set of pulled head
1322 # set of pulled head
1323 self.rheads = None
1323 self.rheads = None
1324 # list of missing changeset to fetch remotely
1324 # list of missing changeset to fetch remotely
1325 self.fetch = None
1325 self.fetch = None
1326 # remote bookmarks data
1326 # remote bookmarks data
1327 self.remotebookmarks = remotebookmarks
1327 self.remotebookmarks = remotebookmarks
1328 # result of changegroup pulling (used as return code by pull)
1328 # result of changegroup pulling (used as return code by pull)
1329 self.cgresult = None
1329 self.cgresult = None
1330 # list of step already done
1330 # list of step already done
1331 self.stepsdone = set()
1331 self.stepsdone = set()
1332 # Whether we attempted a clone from pre-generated bundles.
1332 # Whether we attempted a clone from pre-generated bundles.
1333 self.clonebundleattempted = False
1333 self.clonebundleattempted = False
1334
1334
1335 @util.propertycache
1335 @util.propertycache
1336 def pulledsubset(self):
1336 def pulledsubset(self):
1337 """heads of the set of changeset target by the pull"""
1337 """heads of the set of changeset target by the pull"""
1338 # compute target subset
1338 # compute target subset
1339 if self.heads is None:
1339 if self.heads is None:
1340 # We pulled every thing possible
1340 # We pulled every thing possible
1341 # sync on everything common
1341 # sync on everything common
1342 c = set(self.common)
1342 c = set(self.common)
1343 ret = list(self.common)
1343 ret = list(self.common)
1344 for n in self.rheads:
1344 for n in self.rheads:
1345 if n not in c:
1345 if n not in c:
1346 ret.append(n)
1346 ret.append(n)
1347 return ret
1347 return ret
1348 else:
1348 else:
1349 # We pulled a specific subset
1349 # We pulled a specific subset
1350 # sync on this subset
1350 # sync on this subset
1351 return self.heads
1351 return self.heads
1352
1352
1353 @util.propertycache
1353 @util.propertycache
1354 def canusebundle2(self):
1354 def canusebundle2(self):
1355 return not _forcebundle1(self)
1355 return not _forcebundle1(self)
1356
1356
1357 @util.propertycache
1357 @util.propertycache
1358 def remotebundle2caps(self):
1358 def remotebundle2caps(self):
1359 return bundle2.bundle2caps(self.remote)
1359 return bundle2.bundle2caps(self.remote)
1360
1360
1361 def gettransaction(self):
1361 def gettransaction(self):
1362 # deprecated; talk to trmanager directly
1362 # deprecated; talk to trmanager directly
1363 return self.trmanager.transaction()
1363 return self.trmanager.transaction()
1364
1364
1365 class transactionmanager(util.transactional):
1365 class transactionmanager(util.transactional):
1366 """An object to manage the life cycle of a transaction
1366 """An object to manage the life cycle of a transaction
1367
1367
1368 It creates the transaction on demand and calls the appropriate hooks when
1368 It creates the transaction on demand and calls the appropriate hooks when
1369 closing the transaction."""
1369 closing the transaction."""
1370 def __init__(self, repo, source, url):
1370 def __init__(self, repo, source, url):
1371 self.repo = repo
1371 self.repo = repo
1372 self.source = source
1372 self.source = source
1373 self.url = url
1373 self.url = url
1374 self._tr = None
1374 self._tr = None
1375
1375
1376 def transaction(self):
1376 def transaction(self):
1377 """Return an open transaction object, constructing if necessary"""
1377 """Return an open transaction object, constructing if necessary"""
1378 if not self._tr:
1378 if not self._tr:
1379 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1379 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1380 self._tr = self.repo.transaction(trname)
1380 self._tr = self.repo.transaction(trname)
1381 self._tr.hookargs['source'] = self.source
1381 self._tr.hookargs['source'] = self.source
1382 self._tr.hookargs['url'] = self.url
1382 self._tr.hookargs['url'] = self.url
1383 return self._tr
1383 return self._tr
1384
1384
1385 def close(self):
1385 def close(self):
1386 """close transaction if created"""
1386 """close transaction if created"""
1387 if self._tr is not None:
1387 if self._tr is not None:
1388 self._tr.close()
1388 self._tr.close()
1389
1389
1390 def release(self):
1390 def release(self):
1391 """release transaction if created"""
1391 """release transaction if created"""
1392 if self._tr is not None:
1392 if self._tr is not None:
1393 self._tr.release()
1393 self._tr.release()
1394
1394
1395 def _fullpullbundle2(repo, pullop):
1395 def _fullpullbundle2(repo, pullop):
1396 # The server may send a partial reply, i.e. when inlining
1396 # The server may send a partial reply, i.e. when inlining
1397 # pre-computed bundles. In that case, update the common
1397 # pre-computed bundles. In that case, update the common
1398 # set based on the results and pull another bundle.
1398 # set based on the results and pull another bundle.
1399 #
1399 #
1400 # There are two indicators that the process is finished:
1400 # There are two indicators that the process is finished:
1401 # - no changeset has been added, or
1401 # - no changeset has been added, or
1402 # - all remote heads are known locally.
1402 # - all remote heads are known locally.
1403 # The head check must use the unfiltered view as obsoletion
1403 # The head check must use the unfiltered view as obsoletion
1404 # markers can hide heads.
1404 # markers can hide heads.
1405 unfi = repo.unfiltered()
1405 unfi = repo.unfiltered()
1406 unficl = unfi.changelog
1406 unficl = unfi.changelog
1407 def headsofdiff(h1, h2):
1407 def headsofdiff(h1, h2):
1408 """Returns heads(h1 % h2)"""
1408 """Returns heads(h1 % h2)"""
1409 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1409 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1410 return set(ctx.node() for ctx in res)
1410 return set(ctx.node() for ctx in res)
1411 def headsofunion(h1, h2):
1411 def headsofunion(h1, h2):
1412 """Returns heads((h1 + h2) - null)"""
1412 """Returns heads((h1 + h2) - null)"""
1413 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1413 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1414 return set(ctx.node() for ctx in res)
1414 return set(ctx.node() for ctx in res)
1415 while True:
1415 while True:
1416 old_heads = unficl.heads()
1416 old_heads = unficl.heads()
1417 clstart = len(unficl)
1417 clstart = len(unficl)
1418 _pullbundle2(pullop)
1418 _pullbundle2(pullop)
1419 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1419 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1420 # XXX narrow clones filter the heads on the server side during
1420 # XXX narrow clones filter the heads on the server side during
1421 # XXX getbundle and result in partial replies as well.
1421 # XXX getbundle and result in partial replies as well.
1422 # XXX Disable pull bundles in this case as band aid to avoid
1422 # XXX Disable pull bundles in this case as band aid to avoid
1423 # XXX extra round trips.
1423 # XXX extra round trips.
1424 break
1424 break
1425 if clstart == len(unficl):
1425 if clstart == len(unficl):
1426 break
1426 break
1427 if all(unficl.hasnode(n) for n in pullop.rheads):
1427 if all(unficl.hasnode(n) for n in pullop.rheads):
1428 break
1428 break
1429 new_heads = headsofdiff(unficl.heads(), old_heads)
1429 new_heads = headsofdiff(unficl.heads(), old_heads)
1430 pullop.common = headsofunion(new_heads, pullop.common)
1430 pullop.common = headsofunion(new_heads, pullop.common)
1431 pullop.rheads = set(pullop.rheads) - pullop.common
1431 pullop.rheads = set(pullop.rheads) - pullop.common
1432
1432
1433 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1433 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1434 streamclonerequested=None):
1434 streamclonerequested=None):
1435 """Fetch repository data from a remote.
1435 """Fetch repository data from a remote.
1436
1436
1437 This is the main function used to retrieve data from a remote repository.
1437 This is the main function used to retrieve data from a remote repository.
1438
1438
1439 ``repo`` is the local repository to clone into.
1439 ``repo`` is the local repository to clone into.
1440 ``remote`` is a peer instance.
1440 ``remote`` is a peer instance.
1441 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1441 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1442 default) means to pull everything from the remote.
1442 default) means to pull everything from the remote.
1443 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1443 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1444 default, all remote bookmarks are pulled.
1444 default, all remote bookmarks are pulled.
1445 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1445 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1446 initialization.
1446 initialization.
1447 ``streamclonerequested`` is a boolean indicating whether a "streaming
1447 ``streamclonerequested`` is a boolean indicating whether a "streaming
1448 clone" is requested. A "streaming clone" is essentially a raw file copy
1448 clone" is requested. A "streaming clone" is essentially a raw file copy
1449 of revlogs from the server. This only works when the local repository is
1449 of revlogs from the server. This only works when the local repository is
1450 empty. The default value of ``None`` means to respect the server
1450 empty. The default value of ``None`` means to respect the server
1451 configuration for preferring stream clones.
1451 configuration for preferring stream clones.
1452
1452
1453 Returns the ``pulloperation`` created for this pull.
1453 Returns the ``pulloperation`` created for this pull.
1454 """
1454 """
1455 if opargs is None:
1455 if opargs is None:
1456 opargs = {}
1456 opargs = {}
1457 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1457 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1458 streamclonerequested=streamclonerequested,
1458 streamclonerequested=streamclonerequested,
1459 **pycompat.strkwargs(opargs))
1459 **pycompat.strkwargs(opargs))
1460
1460
1461 peerlocal = pullop.remote.local()
1461 peerlocal = pullop.remote.local()
1462 if peerlocal:
1462 if peerlocal:
1463 missing = set(peerlocal.requirements) - pullop.repo.supported
1463 missing = set(peerlocal.requirements) - pullop.repo.supported
1464 if missing:
1464 if missing:
1465 msg = _("required features are not"
1465 msg = _("required features are not"
1466 " supported in the destination:"
1466 " supported in the destination:"
1467 " %s") % (', '.join(sorted(missing)))
1467 " %s") % (', '.join(sorted(missing)))
1468 raise error.Abort(msg)
1468 raise error.Abort(msg)
1469
1469
1470 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1470 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1471 with repo.wlock(), repo.lock(), pullop.trmanager:
1471 with repo.wlock(), repo.lock(), pullop.trmanager:
1472 # This should ideally be in _pullbundle2(). However, it needs to run
1472 # This should ideally be in _pullbundle2(). However, it needs to run
1473 # before discovery to avoid extra work.
1473 # before discovery to avoid extra work.
1474 _maybeapplyclonebundle(pullop)
1474 _maybeapplyclonebundle(pullop)
1475 streamclone.maybeperformlegacystreamclone(pullop)
1475 streamclone.maybeperformlegacystreamclone(pullop)
1476 _pulldiscovery(pullop)
1476 _pulldiscovery(pullop)
1477 if pullop.canusebundle2:
1477 if pullop.canusebundle2:
1478 _fullpullbundle2(repo, pullop)
1478 _fullpullbundle2(repo, pullop)
1479 _pullchangeset(pullop)
1479 _pullchangeset(pullop)
1480 _pullphase(pullop)
1480 _pullphase(pullop)
1481 _pullbookmarks(pullop)
1481 _pullbookmarks(pullop)
1482 _pullobsolete(pullop)
1482 _pullobsolete(pullop)
1483
1483
1484 # storing remotenames
1484 # storing remotenames
1485 if repo.ui.configbool('experimental', 'remotenames'):
1485 if repo.ui.configbool('experimental', 'remotenames'):
1486 logexchange.pullremotenames(repo, remote)
1486 logexchange.pullremotenames(repo, remote)
1487
1487
1488 return pullop
1488 return pullop
1489
1489
1490 # list of steps to perform discovery before pull
1490 # list of steps to perform discovery before pull
1491 pulldiscoveryorder = []
1491 pulldiscoveryorder = []
1492
1492
1493 # Mapping between step name and function
1493 # Mapping between step name and function
1494 #
1494 #
1495 # This exists to help extensions wrap steps if necessary
1495 # This exists to help extensions wrap steps if necessary
1496 pulldiscoverymapping = {}
1496 pulldiscoverymapping = {}
1497
1497
1498 def pulldiscovery(stepname):
1498 def pulldiscovery(stepname):
1499 """decorator for function performing discovery before pull
1499 """decorator for function performing discovery before pull
1500
1500
1501 The function is added to the step -> function mapping and appended to the
1501 The function is added to the step -> function mapping and appended to the
1502 list of steps. Beware that decorated function will be added in order (this
1502 list of steps. Beware that decorated function will be added in order (this
1503 may matter).
1503 may matter).
1504
1504
1505 You can only use this decorator for a new step, if you want to wrap a step
1505 You can only use this decorator for a new step, if you want to wrap a step
1506 from an extension, change the pulldiscovery dictionary directly."""
1506 from an extension, change the pulldiscovery dictionary directly."""
1507 def dec(func):
1507 def dec(func):
1508 assert stepname not in pulldiscoverymapping
1508 assert stepname not in pulldiscoverymapping
1509 pulldiscoverymapping[stepname] = func
1509 pulldiscoverymapping[stepname] = func
1510 pulldiscoveryorder.append(stepname)
1510 pulldiscoveryorder.append(stepname)
1511 return func
1511 return func
1512 return dec
1512 return dec
1513
1513
1514 def _pulldiscovery(pullop):
1514 def _pulldiscovery(pullop):
1515 """Run all discovery steps"""
1515 """Run all discovery steps"""
1516 for stepname in pulldiscoveryorder:
1516 for stepname in pulldiscoveryorder:
1517 step = pulldiscoverymapping[stepname]
1517 step = pulldiscoverymapping[stepname]
1518 step(pullop)
1518 step(pullop)
1519
1519
1520 @pulldiscovery('b1:bookmarks')
1520 @pulldiscovery('b1:bookmarks')
1521 def _pullbookmarkbundle1(pullop):
1521 def _pullbookmarkbundle1(pullop):
1522 """fetch bookmark data in bundle1 case
1522 """fetch bookmark data in bundle1 case
1523
1523
1524 If not using bundle2, we have to fetch bookmarks before changeset
1524 If not using bundle2, we have to fetch bookmarks before changeset
1525 discovery to reduce the chance and impact of race conditions."""
1525 discovery to reduce the chance and impact of race conditions."""
1526 if pullop.remotebookmarks is not None:
1526 if pullop.remotebookmarks is not None:
1527 return
1527 return
1528 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1528 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1529 # all known bundle2 servers now support listkeys, but lets be nice with
1529 # all known bundle2 servers now support listkeys, but lets be nice with
1530 # new implementation.
1530 # new implementation.
1531 return
1531 return
1532 books = pullop.remote.listkeys('bookmarks')
1532 books = pullop.remote.listkeys('bookmarks')
1533 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1533 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1534
1534
1535
1535
1536 @pulldiscovery('changegroup')
1536 @pulldiscovery('changegroup')
1537 def _pulldiscoverychangegroup(pullop):
1537 def _pulldiscoverychangegroup(pullop):
1538 """discovery phase for the pull
1538 """discovery phase for the pull
1539
1539
1540 Current handle changeset discovery only, will change handle all discovery
1540 Current handle changeset discovery only, will change handle all discovery
1541 at some point."""
1541 at some point."""
1542 tmp = discovery.findcommonincoming(pullop.repo,
1542 tmp = discovery.findcommonincoming(pullop.repo,
1543 pullop.remote,
1543 pullop.remote,
1544 heads=pullop.heads,
1544 heads=pullop.heads,
1545 force=pullop.force)
1545 force=pullop.force)
1546 common, fetch, rheads = tmp
1546 common, fetch, rheads = tmp
1547 nm = pullop.repo.unfiltered().changelog.nodemap
1547 nm = pullop.repo.unfiltered().changelog.nodemap
1548 if fetch and rheads:
1548 if fetch and rheads:
1549 # If a remote heads is filtered locally, put in back in common.
1549 # If a remote heads is filtered locally, put in back in common.
1550 #
1550 #
1551 # This is a hackish solution to catch most of "common but locally
1551 # This is a hackish solution to catch most of "common but locally
1552 # hidden situation". We do not performs discovery on unfiltered
1552 # hidden situation". We do not performs discovery on unfiltered
1553 # repository because it end up doing a pathological amount of round
1553 # repository because it end up doing a pathological amount of round
1554 # trip for w huge amount of changeset we do not care about.
1554 # trip for w huge amount of changeset we do not care about.
1555 #
1555 #
1556 # If a set of such "common but filtered" changeset exist on the server
1556 # If a set of such "common but filtered" changeset exist on the server
1557 # but are not including a remote heads, we'll not be able to detect it,
1557 # but are not including a remote heads, we'll not be able to detect it,
1558 scommon = set(common)
1558 scommon = set(common)
1559 for n in rheads:
1559 for n in rheads:
1560 if n in nm:
1560 if n in nm:
1561 if n not in scommon:
1561 if n not in scommon:
1562 common.append(n)
1562 common.append(n)
1563 if set(rheads).issubset(set(common)):
1563 if set(rheads).issubset(set(common)):
1564 fetch = []
1564 fetch = []
1565 pullop.common = common
1565 pullop.common = common
1566 pullop.fetch = fetch
1566 pullop.fetch = fetch
1567 pullop.rheads = rheads
1567 pullop.rheads = rheads
1568
1568
1569 def _pullbundle2(pullop):
1569 def _pullbundle2(pullop):
1570 """pull data using bundle2
1570 """pull data using bundle2
1571
1571
1572 For now, the only supported data are changegroup."""
1572 For now, the only supported data are changegroup."""
1573 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1573 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1574
1574
1575 # make ui easier to access
1575 # make ui easier to access
1576 ui = pullop.repo.ui
1576 ui = pullop.repo.ui
1577
1577
1578 # At the moment we don't do stream clones over bundle2. If that is
1578 # At the moment we don't do stream clones over bundle2. If that is
1579 # implemented then here's where the check for that will go.
1579 # implemented then here's where the check for that will go.
1580 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1580 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1581
1581
1582 # declare pull perimeters
1582 # declare pull perimeters
1583 kwargs['common'] = pullop.common
1583 kwargs['common'] = pullop.common
1584 kwargs['heads'] = pullop.heads or pullop.rheads
1584 kwargs['heads'] = pullop.heads or pullop.rheads
1585
1585
1586 if streaming:
1586 if streaming:
1587 kwargs['cg'] = False
1587 kwargs['cg'] = False
1588 kwargs['stream'] = True
1588 kwargs['stream'] = True
1589 pullop.stepsdone.add('changegroup')
1589 pullop.stepsdone.add('changegroup')
1590 pullop.stepsdone.add('phases')
1590 pullop.stepsdone.add('phases')
1591
1591
1592 else:
1592 else:
1593 # pulling changegroup
1593 # pulling changegroup
1594 pullop.stepsdone.add('changegroup')
1594 pullop.stepsdone.add('changegroup')
1595
1595
1596 kwargs['cg'] = pullop.fetch
1596 kwargs['cg'] = pullop.fetch
1597
1597
1598 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1598 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1599 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1599 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1600 if (not legacyphase and hasbinaryphase):
1600 if (not legacyphase and hasbinaryphase):
1601 kwargs['phases'] = True
1601 kwargs['phases'] = True
1602 pullop.stepsdone.add('phases')
1602 pullop.stepsdone.add('phases')
1603
1603
1604 if 'listkeys' in pullop.remotebundle2caps:
1604 if 'listkeys' in pullop.remotebundle2caps:
1605 if 'phases' not in pullop.stepsdone:
1605 if 'phases' not in pullop.stepsdone:
1606 kwargs['listkeys'] = ['phases']
1606 kwargs['listkeys'] = ['phases']
1607
1607
1608 bookmarksrequested = False
1608 bookmarksrequested = False
1609 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1609 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1610 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1610 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1611
1611
1612 if pullop.remotebookmarks is not None:
1612 if pullop.remotebookmarks is not None:
1613 pullop.stepsdone.add('request-bookmarks')
1613 pullop.stepsdone.add('request-bookmarks')
1614
1614
1615 if ('request-bookmarks' not in pullop.stepsdone
1615 if ('request-bookmarks' not in pullop.stepsdone
1616 and pullop.remotebookmarks is None
1616 and pullop.remotebookmarks is None
1617 and not legacybookmark and hasbinarybook):
1617 and not legacybookmark and hasbinarybook):
1618 kwargs['bookmarks'] = True
1618 kwargs['bookmarks'] = True
1619 bookmarksrequested = True
1619 bookmarksrequested = True
1620
1620
1621 if 'listkeys' in pullop.remotebundle2caps:
1621 if 'listkeys' in pullop.remotebundle2caps:
1622 if 'request-bookmarks' not in pullop.stepsdone:
1622 if 'request-bookmarks' not in pullop.stepsdone:
1623 # make sure to always includes bookmark data when migrating
1623 # make sure to always includes bookmark data when migrating
1624 # `hg incoming --bundle` to using this function.
1624 # `hg incoming --bundle` to using this function.
1625 pullop.stepsdone.add('request-bookmarks')
1625 pullop.stepsdone.add('request-bookmarks')
1626 kwargs.setdefault('listkeys', []).append('bookmarks')
1626 kwargs.setdefault('listkeys', []).append('bookmarks')
1627
1627
1628 # If this is a full pull / clone and the server supports the clone bundles
1628 # If this is a full pull / clone and the server supports the clone bundles
1629 # feature, tell the server whether we attempted a clone bundle. The
1629 # feature, tell the server whether we attempted a clone bundle. The
1630 # presence of this flag indicates the client supports clone bundles. This
1630 # presence of this flag indicates the client supports clone bundles. This
1631 # will enable the server to treat clients that support clone bundles
1631 # will enable the server to treat clients that support clone bundles
1632 # differently from those that don't.
1632 # differently from those that don't.
1633 if (pullop.remote.capable('clonebundles')
1633 if (pullop.remote.capable('clonebundles')
1634 and pullop.heads is None and list(pullop.common) == [nullid]):
1634 and pullop.heads is None and list(pullop.common) == [nullid]):
1635 kwargs['cbattempted'] = pullop.clonebundleattempted
1635 kwargs['cbattempted'] = pullop.clonebundleattempted
1636
1636
1637 if streaming:
1637 if streaming:
1638 pullop.repo.ui.status(_('streaming all changes\n'))
1638 pullop.repo.ui.status(_('streaming all changes\n'))
1639 elif not pullop.fetch:
1639 elif not pullop.fetch:
1640 pullop.repo.ui.status(_("no changes found\n"))
1640 pullop.repo.ui.status(_("no changes found\n"))
1641 pullop.cgresult = 0
1641 pullop.cgresult = 0
1642 else:
1642 else:
1643 if pullop.heads is None and list(pullop.common) == [nullid]:
1643 if pullop.heads is None and list(pullop.common) == [nullid]:
1644 pullop.repo.ui.status(_("requesting all changes\n"))
1644 pullop.repo.ui.status(_("requesting all changes\n"))
1645 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1645 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1646 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1646 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1647 if obsolete.commonversion(remoteversions) is not None:
1647 if obsolete.commonversion(remoteversions) is not None:
1648 kwargs['obsmarkers'] = True
1648 kwargs['obsmarkers'] = True
1649 pullop.stepsdone.add('obsmarkers')
1649 pullop.stepsdone.add('obsmarkers')
1650 _pullbundle2extraprepare(pullop, kwargs)
1650 _pullbundle2extraprepare(pullop, kwargs)
1651
1651
1652 with pullop.remote.commandexecutor() as e:
1652 with pullop.remote.commandexecutor() as e:
1653 args = dict(kwargs)
1653 args = dict(kwargs)
1654 args['source'] = 'pull'
1654 args['source'] = 'pull'
1655 bundle = e.callcommand('getbundle', args).result()
1655 bundle = e.callcommand('getbundle', args).result()
1656
1656
1657 try:
1657 try:
1658 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1658 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1659 source='pull')
1659 source='pull')
1660 op.modes['bookmarks'] = 'records'
1660 op.modes['bookmarks'] = 'records'
1661 bundle2.processbundle(pullop.repo, bundle, op=op)
1661 bundle2.processbundle(pullop.repo, bundle, op=op)
1662 except bundle2.AbortFromPart as exc:
1662 except bundle2.AbortFromPart as exc:
1663 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1663 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1664 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1664 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1665 except error.BundleValueError as exc:
1665 except error.BundleValueError as exc:
1666 raise error.Abort(_('missing support for %s') % exc)
1666 raise error.Abort(_('missing support for %s') % exc)
1667
1667
1668 if pullop.fetch:
1668 if pullop.fetch:
1669 pullop.cgresult = bundle2.combinechangegroupresults(op)
1669 pullop.cgresult = bundle2.combinechangegroupresults(op)
1670
1670
1671 # processing phases change
1671 # processing phases change
1672 for namespace, value in op.records['listkeys']:
1672 for namespace, value in op.records['listkeys']:
1673 if namespace == 'phases':
1673 if namespace == 'phases':
1674 _pullapplyphases(pullop, value)
1674 _pullapplyphases(pullop, value)
1675
1675
1676 # processing bookmark update
1676 # processing bookmark update
1677 if bookmarksrequested:
1677 if bookmarksrequested:
1678 books = {}
1678 books = {}
1679 for record in op.records['bookmarks']:
1679 for record in op.records['bookmarks']:
1680 books[record['bookmark']] = record["node"]
1680 books[record['bookmark']] = record["node"]
1681 pullop.remotebookmarks = books
1681 pullop.remotebookmarks = books
1682 else:
1682 else:
1683 for namespace, value in op.records['listkeys']:
1683 for namespace, value in op.records['listkeys']:
1684 if namespace == 'bookmarks':
1684 if namespace == 'bookmarks':
1685 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1685 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1686
1686
1687 # bookmark data were either already there or pulled in the bundle
1687 # bookmark data were either already there or pulled in the bundle
1688 if pullop.remotebookmarks is not None:
1688 if pullop.remotebookmarks is not None:
1689 _pullbookmarks(pullop)
1689 _pullbookmarks(pullop)
1690
1690
1691 def _pullbundle2extraprepare(pullop, kwargs):
1691 def _pullbundle2extraprepare(pullop, kwargs):
1692 """hook function so that extensions can extend the getbundle call"""
1692 """hook function so that extensions can extend the getbundle call"""
1693
1693
1694 def _pullchangeset(pullop):
1694 def _pullchangeset(pullop):
1695 """pull changeset from unbundle into the local repo"""
1695 """pull changeset from unbundle into the local repo"""
1696 # We delay the open of the transaction as late as possible so we
1696 # We delay the open of the transaction as late as possible so we
1697 # don't open transaction for nothing or you break future useful
1697 # don't open transaction for nothing or you break future useful
1698 # rollback call
1698 # rollback call
1699 if 'changegroup' in pullop.stepsdone:
1699 if 'changegroup' in pullop.stepsdone:
1700 return
1700 return
1701 pullop.stepsdone.add('changegroup')
1701 pullop.stepsdone.add('changegroup')
1702 if not pullop.fetch:
1702 if not pullop.fetch:
1703 pullop.repo.ui.status(_("no changes found\n"))
1703 pullop.repo.ui.status(_("no changes found\n"))
1704 pullop.cgresult = 0
1704 pullop.cgresult = 0
1705 return
1705 return
1706 tr = pullop.gettransaction()
1706 tr = pullop.gettransaction()
1707 if pullop.heads is None and list(pullop.common) == [nullid]:
1707 if pullop.heads is None and list(pullop.common) == [nullid]:
1708 pullop.repo.ui.status(_("requesting all changes\n"))
1708 pullop.repo.ui.status(_("requesting all changes\n"))
1709 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1709 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1710 # issue1320, avoid a race if remote changed after discovery
1710 # issue1320, avoid a race if remote changed after discovery
1711 pullop.heads = pullop.rheads
1711 pullop.heads = pullop.rheads
1712
1712
1713 if pullop.remote.capable('getbundle'):
1713 if pullop.remote.capable('getbundle'):
1714 # TODO: get bundlecaps from remote
1714 # TODO: get bundlecaps from remote
1715 cg = pullop.remote.getbundle('pull', common=pullop.common,
1715 cg = pullop.remote.getbundle('pull', common=pullop.common,
1716 heads=pullop.heads or pullop.rheads)
1716 heads=pullop.heads or pullop.rheads)
1717 elif pullop.heads is None:
1717 elif pullop.heads is None:
1718 with pullop.remote.commandexecutor() as e:
1718 with pullop.remote.commandexecutor() as e:
1719 cg = e.callcommand('changegroup', {
1719 cg = e.callcommand('changegroup', {
1720 'nodes': pullop.fetch,
1720 'nodes': pullop.fetch,
1721 'source': 'pull',
1721 'source': 'pull',
1722 }).result()
1722 }).result()
1723
1723
1724 elif not pullop.remote.capable('changegroupsubset'):
1724 elif not pullop.remote.capable('changegroupsubset'):
1725 raise error.Abort(_("partial pull cannot be done because "
1725 raise error.Abort(_("partial pull cannot be done because "
1726 "other repository doesn't support "
1726 "other repository doesn't support "
1727 "changegroupsubset."))
1727 "changegroupsubset."))
1728 else:
1728 else:
1729 with pullop.remote.commandexecutor() as e:
1729 with pullop.remote.commandexecutor() as e:
1730 cg = e.callcommand('changegroupsubset', {
1730 cg = e.callcommand('changegroupsubset', {
1731 'bases': pullop.fetch,
1731 'bases': pullop.fetch,
1732 'heads': pullop.heads,
1732 'heads': pullop.heads,
1733 'source': 'pull',
1733 'source': 'pull',
1734 }).result()
1734 }).result()
1735
1735
1736 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1736 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1737 pullop.remote.url())
1737 pullop.remote.url())
1738 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1738 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1739
1739
1740 def _pullphase(pullop):
1740 def _pullphase(pullop):
1741 # Get remote phases data from remote
1741 # Get remote phases data from remote
1742 if 'phases' in pullop.stepsdone:
1742 if 'phases' in pullop.stepsdone:
1743 return
1743 return
1744 remotephases = pullop.remote.listkeys('phases')
1744 remotephases = pullop.remote.listkeys('phases')
1745 _pullapplyphases(pullop, remotephases)
1745 _pullapplyphases(pullop, remotephases)
1746
1746
1747 def _pullapplyphases(pullop, remotephases):
1747 def _pullapplyphases(pullop, remotephases):
1748 """apply phase movement from observed remote state"""
1748 """apply phase movement from observed remote state"""
1749 if 'phases' in pullop.stepsdone:
1749 if 'phases' in pullop.stepsdone:
1750 return
1750 return
1751 pullop.stepsdone.add('phases')
1751 pullop.stepsdone.add('phases')
1752 publishing = bool(remotephases.get('publishing', False))
1752 publishing = bool(remotephases.get('publishing', False))
1753 if remotephases and not publishing:
1753 if remotephases and not publishing:
1754 # remote is new and non-publishing
1754 # remote is new and non-publishing
1755 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1755 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1756 pullop.pulledsubset,
1756 pullop.pulledsubset,
1757 remotephases)
1757 remotephases)
1758 dheads = pullop.pulledsubset
1758 dheads = pullop.pulledsubset
1759 else:
1759 else:
1760 # Remote is old or publishing all common changesets
1760 # Remote is old or publishing all common changesets
1761 # should be seen as public
1761 # should be seen as public
1762 pheads = pullop.pulledsubset
1762 pheads = pullop.pulledsubset
1763 dheads = []
1763 dheads = []
1764 unfi = pullop.repo.unfiltered()
1764 unfi = pullop.repo.unfiltered()
1765 phase = unfi._phasecache.phase
1765 phase = unfi._phasecache.phase
1766 rev = unfi.changelog.nodemap.get
1766 rev = unfi.changelog.nodemap.get
1767 public = phases.public
1767 public = phases.public
1768 draft = phases.draft
1768 draft = phases.draft
1769
1769
1770 # exclude changesets already public locally and update the others
1770 # exclude changesets already public locally and update the others
1771 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1771 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1772 if pheads:
1772 if pheads:
1773 tr = pullop.gettransaction()
1773 tr = pullop.gettransaction()
1774 phases.advanceboundary(pullop.repo, tr, public, pheads)
1774 phases.advanceboundary(pullop.repo, tr, public, pheads)
1775
1775
1776 # exclude changesets already draft locally and update the others
1776 # exclude changesets already draft locally and update the others
1777 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1777 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1778 if dheads:
1778 if dheads:
1779 tr = pullop.gettransaction()
1779 tr = pullop.gettransaction()
1780 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1780 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1781
1781
1782 def _pullbookmarks(pullop):
1782 def _pullbookmarks(pullop):
1783 """process the remote bookmark information to update the local one"""
1783 """process the remote bookmark information to update the local one"""
1784 if 'bookmarks' in pullop.stepsdone:
1784 if 'bookmarks' in pullop.stepsdone:
1785 return
1785 return
1786 pullop.stepsdone.add('bookmarks')
1786 pullop.stepsdone.add('bookmarks')
1787 repo = pullop.repo
1787 repo = pullop.repo
1788 remotebookmarks = pullop.remotebookmarks
1788 remotebookmarks = pullop.remotebookmarks
1789 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1789 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1790 pullop.remote.url(),
1790 pullop.remote.url(),
1791 pullop.gettransaction,
1791 pullop.gettransaction,
1792 explicit=pullop.explicitbookmarks)
1792 explicit=pullop.explicitbookmarks)
1793
1793
1794 def _pullobsolete(pullop):
1794 def _pullobsolete(pullop):
1795 """utility function to pull obsolete markers from a remote
1795 """utility function to pull obsolete markers from a remote
1796
1796
1797 The `gettransaction` is function that return the pull transaction, creating
1797 The `gettransaction` is function that return the pull transaction, creating
1798 one if necessary. We return the transaction to inform the calling code that
1798 one if necessary. We return the transaction to inform the calling code that
1799 a new transaction have been created (when applicable).
1799 a new transaction have been created (when applicable).
1800
1800
1801 Exists mostly to allow overriding for experimentation purpose"""
1801 Exists mostly to allow overriding for experimentation purpose"""
1802 if 'obsmarkers' in pullop.stepsdone:
1802 if 'obsmarkers' in pullop.stepsdone:
1803 return
1803 return
1804 pullop.stepsdone.add('obsmarkers')
1804 pullop.stepsdone.add('obsmarkers')
1805 tr = None
1805 tr = None
1806 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1806 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1807 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1807 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1808 remoteobs = pullop.remote.listkeys('obsolete')
1808 remoteobs = pullop.remote.listkeys('obsolete')
1809 if 'dump0' in remoteobs:
1809 if 'dump0' in remoteobs:
1810 tr = pullop.gettransaction()
1810 tr = pullop.gettransaction()
1811 markers = []
1811 markers = []
1812 for key in sorted(remoteobs, reverse=True):
1812 for key in sorted(remoteobs, reverse=True):
1813 if key.startswith('dump'):
1813 if key.startswith('dump'):
1814 data = util.b85decode(remoteobs[key])
1814 data = util.b85decode(remoteobs[key])
1815 version, newmarks = obsolete._readmarkers(data)
1815 version, newmarks = obsolete._readmarkers(data)
1816 markers += newmarks
1816 markers += newmarks
1817 if markers:
1817 if markers:
1818 pullop.repo.obsstore.add(tr, markers)
1818 pullop.repo.obsstore.add(tr, markers)
1819 pullop.repo.invalidatevolatilesets()
1819 pullop.repo.invalidatevolatilesets()
1820 return tr
1820 return tr
1821
1821
1822 def caps20to10(repo, role):
1822 def caps20to10(repo, role):
1823 """return a set with appropriate options to use bundle20 during getbundle"""
1823 """return a set with appropriate options to use bundle20 during getbundle"""
1824 caps = {'HG20'}
1824 caps = {'HG20'}
1825 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1825 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
1826 caps.add('bundle2=' + urlreq.quote(capsblob))
1826 caps.add('bundle2=' + urlreq.quote(capsblob))
1827 return caps
1827 return caps
1828
1828
1829 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1829 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1830 getbundle2partsorder = []
1830 getbundle2partsorder = []
1831
1831
1832 # Mapping between step name and function
1832 # Mapping between step name and function
1833 #
1833 #
1834 # This exists to help extensions wrap steps if necessary
1834 # This exists to help extensions wrap steps if necessary
1835 getbundle2partsmapping = {}
1835 getbundle2partsmapping = {}
1836
1836
1837 def getbundle2partsgenerator(stepname, idx=None):
1837 def getbundle2partsgenerator(stepname, idx=None):
1838 """decorator for function generating bundle2 part for getbundle
1838 """decorator for function generating bundle2 part for getbundle
1839
1839
1840 The function is added to the step -> function mapping and appended to the
1840 The function is added to the step -> function mapping and appended to the
1841 list of steps. Beware that decorated functions will be added in order
1841 list of steps. Beware that decorated functions will be added in order
1842 (this may matter).
1842 (this may matter).
1843
1843
1844 You can only use this decorator for new steps, if you want to wrap a step
1844 You can only use this decorator for new steps, if you want to wrap a step
1845 from an extension, attack the getbundle2partsmapping dictionary directly."""
1845 from an extension, attack the getbundle2partsmapping dictionary directly."""
1846 def dec(func):
1846 def dec(func):
1847 assert stepname not in getbundle2partsmapping
1847 assert stepname not in getbundle2partsmapping
1848 getbundle2partsmapping[stepname] = func
1848 getbundle2partsmapping[stepname] = func
1849 if idx is None:
1849 if idx is None:
1850 getbundle2partsorder.append(stepname)
1850 getbundle2partsorder.append(stepname)
1851 else:
1851 else:
1852 getbundle2partsorder.insert(idx, stepname)
1852 getbundle2partsorder.insert(idx, stepname)
1853 return func
1853 return func
1854 return dec
1854 return dec
1855
1855
1856 def bundle2requested(bundlecaps):
1856 def bundle2requested(bundlecaps):
1857 if bundlecaps is not None:
1857 if bundlecaps is not None:
1858 return any(cap.startswith('HG2') for cap in bundlecaps)
1858 return any(cap.startswith('HG2') for cap in bundlecaps)
1859 return False
1859 return False
1860
1860
1861 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1861 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1862 **kwargs):
1862 **kwargs):
1863 """Return chunks constituting a bundle's raw data.
1863 """Return chunks constituting a bundle's raw data.
1864
1864
1865 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1865 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1866 passed.
1866 passed.
1867
1867
1868 Returns a 2-tuple of a dict with metadata about the generated bundle
1868 Returns a 2-tuple of a dict with metadata about the generated bundle
1869 and an iterator over raw chunks (of varying sizes).
1869 and an iterator over raw chunks (of varying sizes).
1870 """
1870 """
1871 kwargs = pycompat.byteskwargs(kwargs)
1871 kwargs = pycompat.byteskwargs(kwargs)
1872 info = {}
1872 info = {}
1873 usebundle2 = bundle2requested(bundlecaps)
1873 usebundle2 = bundle2requested(bundlecaps)
1874 # bundle10 case
1874 # bundle10 case
1875 if not usebundle2:
1875 if not usebundle2:
1876 if bundlecaps and not kwargs.get('cg', True):
1876 if bundlecaps and not kwargs.get('cg', True):
1877 raise ValueError(_('request for bundle10 must include changegroup'))
1877 raise ValueError(_('request for bundle10 must include changegroup'))
1878
1878
1879 if kwargs:
1879 if kwargs:
1880 raise ValueError(_('unsupported getbundle arguments: %s')
1880 raise ValueError(_('unsupported getbundle arguments: %s')
1881 % ', '.join(sorted(kwargs.keys())))
1881 % ', '.join(sorted(kwargs.keys())))
1882 outgoing = _computeoutgoing(repo, heads, common)
1882 outgoing = _computeoutgoing(repo, heads, common)
1883 info['bundleversion'] = 1
1883 info['bundleversion'] = 1
1884 return info, changegroup.makestream(repo, outgoing, '01', source,
1884 return info, changegroup.makestream(repo, outgoing, '01', source,
1885 bundlecaps=bundlecaps)
1885 bundlecaps=bundlecaps)
1886
1886
1887 # bundle20 case
1887 # bundle20 case
1888 info['bundleversion'] = 2
1888 info['bundleversion'] = 2
1889 b2caps = {}
1889 b2caps = {}
1890 for bcaps in bundlecaps:
1890 for bcaps in bundlecaps:
1891 if bcaps.startswith('bundle2='):
1891 if bcaps.startswith('bundle2='):
1892 blob = urlreq.unquote(bcaps[len('bundle2='):])
1892 blob = urlreq.unquote(bcaps[len('bundle2='):])
1893 b2caps.update(bundle2.decodecaps(blob))
1893 b2caps.update(bundle2.decodecaps(blob))
1894 bundler = bundle2.bundle20(repo.ui, b2caps)
1894 bundler = bundle2.bundle20(repo.ui, b2caps)
1895
1895
1896 kwargs['heads'] = heads
1896 kwargs['heads'] = heads
1897 kwargs['common'] = common
1897 kwargs['common'] = common
1898
1898
1899 for name in getbundle2partsorder:
1899 for name in getbundle2partsorder:
1900 func = getbundle2partsmapping[name]
1900 func = getbundle2partsmapping[name]
1901 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1901 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1902 **pycompat.strkwargs(kwargs))
1902 **pycompat.strkwargs(kwargs))
1903
1903
1904 info['prefercompressed'] = bundler.prefercompressed
1904 info['prefercompressed'] = bundler.prefercompressed
1905
1905
1906 return info, bundler.getchunks()
1906 return info, bundler.getchunks()
1907
1907
1908 @getbundle2partsgenerator('stream2')
1908 @getbundle2partsgenerator('stream2')
1909 def _getbundlestream2(bundler, repo, *args, **kwargs):
1909 def _getbundlestream2(bundler, repo, *args, **kwargs):
1910 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1910 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
1911
1911
1912 @getbundle2partsgenerator('changegroup')
1912 @getbundle2partsgenerator('changegroup')
1913 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1913 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1914 b2caps=None, heads=None, common=None, **kwargs):
1914 b2caps=None, heads=None, common=None, **kwargs):
1915 """add a changegroup part to the requested bundle"""
1915 """add a changegroup part to the requested bundle"""
1916 cgstream = None
1916 cgstream = None
1917 if kwargs.get(r'cg', True):
1917 if kwargs.get(r'cg', True):
1918 # build changegroup bundle here.
1918 # build changegroup bundle here.
1919 version = '01'
1919 version = '01'
1920 cgversions = b2caps.get('changegroup')
1920 cgversions = b2caps.get('changegroup')
1921 if cgversions: # 3.1 and 3.2 ship with an empty value
1921 if cgversions: # 3.1 and 3.2 ship with an empty value
1922 cgversions = [v for v in cgversions
1922 cgversions = [v for v in cgversions
1923 if v in changegroup.supportedoutgoingversions(repo)]
1923 if v in changegroup.supportedoutgoingversions(repo)]
1924 if not cgversions:
1924 if not cgversions:
1925 raise ValueError(_('no common changegroup version'))
1925 raise ValueError(_('no common changegroup version'))
1926 version = max(cgversions)
1926 version = max(cgversions)
1927 outgoing = _computeoutgoing(repo, heads, common)
1927 outgoing = _computeoutgoing(repo, heads, common)
1928 if outgoing.missing:
1928 if outgoing.missing:
1929 cgstream = changegroup.makestream(repo, outgoing, version, source,
1929 cgstream = changegroup.makestream(repo, outgoing, version, source,
1930 bundlecaps=bundlecaps)
1930 bundlecaps=bundlecaps)
1931
1931
1932 if cgstream:
1932 if cgstream:
1933 part = bundler.newpart('changegroup', data=cgstream)
1933 part = bundler.newpart('changegroup', data=cgstream)
1934 if cgversions:
1934 if cgversions:
1935 part.addparam('version', version)
1935 part.addparam('version', version)
1936 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1936 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1937 mandatory=False)
1937 mandatory=False)
1938 if 'treemanifest' in repo.requirements:
1938 if 'treemanifest' in repo.requirements:
1939 part.addparam('treemanifest', '1')
1939 part.addparam('treemanifest', '1')
1940
1940
1941 @getbundle2partsgenerator('bookmarks')
1941 @getbundle2partsgenerator('bookmarks')
1942 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1942 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1943 b2caps=None, **kwargs):
1943 b2caps=None, **kwargs):
1944 """add a bookmark part to the requested bundle"""
1944 """add a bookmark part to the requested bundle"""
1945 if not kwargs.get(r'bookmarks', False):
1945 if not kwargs.get(r'bookmarks', False):
1946 return
1946 return
1947 if 'bookmarks' not in b2caps:
1947 if 'bookmarks' not in b2caps:
1948 raise ValueError(_('no common bookmarks exchange method'))
1948 raise ValueError(_('no common bookmarks exchange method'))
1949 books = bookmod.listbinbookmarks(repo)
1949 books = bookmod.listbinbookmarks(repo)
1950 data = bookmod.binaryencode(books)
1950 data = bookmod.binaryencode(books)
1951 if data:
1951 if data:
1952 bundler.newpart('bookmarks', data=data)
1952 bundler.newpart('bookmarks', data=data)
1953
1953
1954 @getbundle2partsgenerator('listkeys')
1954 @getbundle2partsgenerator('listkeys')
1955 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1955 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1956 b2caps=None, **kwargs):
1956 b2caps=None, **kwargs):
1957 """add parts containing listkeys namespaces to the requested bundle"""
1957 """add parts containing listkeys namespaces to the requested bundle"""
1958 listkeys = kwargs.get(r'listkeys', ())
1958 listkeys = kwargs.get(r'listkeys', ())
1959 for namespace in listkeys:
1959 for namespace in listkeys:
1960 part = bundler.newpart('listkeys')
1960 part = bundler.newpart('listkeys')
1961 part.addparam('namespace', namespace)
1961 part.addparam('namespace', namespace)
1962 keys = repo.listkeys(namespace).items()
1962 keys = repo.listkeys(namespace).items()
1963 part.data = pushkey.encodekeys(keys)
1963 part.data = pushkey.encodekeys(keys)
1964
1964
1965 @getbundle2partsgenerator('obsmarkers')
1965 @getbundle2partsgenerator('obsmarkers')
1966 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1966 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1967 b2caps=None, heads=None, **kwargs):
1967 b2caps=None, heads=None, **kwargs):
1968 """add an obsolescence markers part to the requested bundle"""
1968 """add an obsolescence markers part to the requested bundle"""
1969 if kwargs.get(r'obsmarkers', False):
1969 if kwargs.get(r'obsmarkers', False):
1970 if heads is None:
1970 if heads is None:
1971 heads = repo.heads()
1971 heads = repo.heads()
1972 subset = [c.node() for c in repo.set('::%ln', heads)]
1972 subset = [c.node() for c in repo.set('::%ln', heads)]
1973 markers = repo.obsstore.relevantmarkers(subset)
1973 markers = repo.obsstore.relevantmarkers(subset)
1974 markers = sorted(markers)
1974 markers = sorted(markers)
1975 bundle2.buildobsmarkerspart(bundler, markers)
1975 bundle2.buildobsmarkerspart(bundler, markers)
1976
1976
1977 @getbundle2partsgenerator('phases')
1977 @getbundle2partsgenerator('phases')
1978 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1978 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1979 b2caps=None, heads=None, **kwargs):
1979 b2caps=None, heads=None, **kwargs):
1980 """add phase heads part to the requested bundle"""
1980 """add phase heads part to the requested bundle"""
1981 if kwargs.get(r'phases', False):
1981 if kwargs.get(r'phases', False):
1982 if not 'heads' in b2caps.get('phases'):
1982 if not 'heads' in b2caps.get('phases'):
1983 raise ValueError(_('no common phases exchange method'))
1983 raise ValueError(_('no common phases exchange method'))
1984 if heads is None:
1984 if heads is None:
1985 heads = repo.heads()
1985 heads = repo.heads()
1986
1986
1987 headsbyphase = collections.defaultdict(set)
1987 headsbyphase = collections.defaultdict(set)
1988 if repo.publishing():
1988 if repo.publishing():
1989 headsbyphase[phases.public] = heads
1989 headsbyphase[phases.public] = heads
1990 else:
1990 else:
1991 # find the appropriate heads to move
1991 # find the appropriate heads to move
1992
1992
1993 phase = repo._phasecache.phase
1993 phase = repo._phasecache.phase
1994 node = repo.changelog.node
1994 node = repo.changelog.node
1995 rev = repo.changelog.rev
1995 rev = repo.changelog.rev
1996 for h in heads:
1996 for h in heads:
1997 headsbyphase[phase(repo, rev(h))].add(h)
1997 headsbyphase[phase(repo, rev(h))].add(h)
1998 seenphases = list(headsbyphase.keys())
1998 seenphases = list(headsbyphase.keys())
1999
1999
2000 # We do not handle anything but public and draft phase for now)
2000 # We do not handle anything but public and draft phase for now)
2001 if seenphases:
2001 if seenphases:
2002 assert max(seenphases) <= phases.draft
2002 assert max(seenphases) <= phases.draft
2003
2003
2004 # if client is pulling non-public changesets, we need to find
2004 # if client is pulling non-public changesets, we need to find
2005 # intermediate public heads.
2005 # intermediate public heads.
2006 draftheads = headsbyphase.get(phases.draft, set())
2006 draftheads = headsbyphase.get(phases.draft, set())
2007 if draftheads:
2007 if draftheads:
2008 publicheads = headsbyphase.get(phases.public, set())
2008 publicheads = headsbyphase.get(phases.public, set())
2009
2009
2010 revset = 'heads(only(%ln, %ln) and public())'
2010 revset = 'heads(only(%ln, %ln) and public())'
2011 extraheads = repo.revs(revset, draftheads, publicheads)
2011 extraheads = repo.revs(revset, draftheads, publicheads)
2012 for r in extraheads:
2012 for r in extraheads:
2013 headsbyphase[phases.public].add(node(r))
2013 headsbyphase[phases.public].add(node(r))
2014
2014
2015 # transform data in a format used by the encoding function
2015 # transform data in a format used by the encoding function
2016 phasemapping = []
2016 phasemapping = []
2017 for phase in phases.allphases:
2017 for phase in phases.allphases:
2018 phasemapping.append(sorted(headsbyphase[phase]))
2018 phasemapping.append(sorted(headsbyphase[phase]))
2019
2019
2020 # generate the actual part
2020 # generate the actual part
2021 phasedata = phases.binaryencode(phasemapping)
2021 phasedata = phases.binaryencode(phasemapping)
2022 bundler.newpart('phase-heads', data=phasedata)
2022 bundler.newpart('phase-heads', data=phasedata)
2023
2023
2024 @getbundle2partsgenerator('hgtagsfnodes')
2024 @getbundle2partsgenerator('hgtagsfnodes')
2025 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2025 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2026 b2caps=None, heads=None, common=None,
2026 b2caps=None, heads=None, common=None,
2027 **kwargs):
2027 **kwargs):
2028 """Transfer the .hgtags filenodes mapping.
2028 """Transfer the .hgtags filenodes mapping.
2029
2029
2030 Only values for heads in this bundle will be transferred.
2030 Only values for heads in this bundle will be transferred.
2031
2031
2032 The part data consists of pairs of 20 byte changeset node and .hgtags
2032 The part data consists of pairs of 20 byte changeset node and .hgtags
2033 filenodes raw values.
2033 filenodes raw values.
2034 """
2034 """
2035 # Don't send unless:
2035 # Don't send unless:
2036 # - changeset are being exchanged,
2036 # - changeset are being exchanged,
2037 # - the client supports it.
2037 # - the client supports it.
2038 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2038 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2039 return
2039 return
2040
2040
2041 outgoing = _computeoutgoing(repo, heads, common)
2041 outgoing = _computeoutgoing(repo, heads, common)
2042 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2042 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2043
2043
2044 @getbundle2partsgenerator('cache:rev-branch-cache')
2044 @getbundle2partsgenerator('cache:rev-branch-cache')
2045 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2045 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2046 b2caps=None, heads=None, common=None,
2046 b2caps=None, heads=None, common=None,
2047 **kwargs):
2047 **kwargs):
2048 """Transfer the rev-branch-cache mapping
2048 """Transfer the rev-branch-cache mapping
2049
2049
2050 The payload is a series of data related to each branch
2050 The payload is a series of data related to each branch
2051
2051
2052 1) branch name length
2052 1) branch name length
2053 2) number of open heads
2053 2) number of open heads
2054 3) number of closed heads
2054 3) number of closed heads
2055 4) open heads nodes
2055 4) open heads nodes
2056 5) closed heads nodes
2056 5) closed heads nodes
2057 """
2057 """
2058 # Don't send unless:
2058 # Don't send unless:
2059 # - changeset are being exchanged,
2059 # - changeset are being exchanged,
2060 # - the client supports it.
2060 # - the client supports it.
2061 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2061 if not (kwargs.get(r'cg', True)) or 'rev-branch-cache' not in b2caps:
2062 return
2062 return
2063 outgoing = _computeoutgoing(repo, heads, common)
2063 outgoing = _computeoutgoing(repo, heads, common)
2064 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2064 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2065
2065
2066 def check_heads(repo, their_heads, context):
2066 def check_heads(repo, their_heads, context):
2067 """check if the heads of a repo have been modified
2067 """check if the heads of a repo have been modified
2068
2068
2069 Used by peer for unbundling.
2069 Used by peer for unbundling.
2070 """
2070 """
2071 heads = repo.heads()
2071 heads = repo.heads()
2072 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2072 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2073 if not (their_heads == ['force'] or their_heads == heads or
2073 if not (their_heads == ['force'] or their_heads == heads or
2074 their_heads == ['hashed', heads_hash]):
2074 their_heads == ['hashed', heads_hash]):
2075 # someone else committed/pushed/unbundled while we
2075 # someone else committed/pushed/unbundled while we
2076 # were transferring data
2076 # were transferring data
2077 raise error.PushRaced('repository changed while %s - '
2077 raise error.PushRaced('repository changed while %s - '
2078 'please try again' % context)
2078 'please try again' % context)
2079
2079
2080 def unbundle(repo, cg, heads, source, url):
2080 def unbundle(repo, cg, heads, source, url):
2081 """Apply a bundle to a repo.
2081 """Apply a bundle to a repo.
2082
2082
2083 this function makes sure the repo is locked during the application and have
2083 this function makes sure the repo is locked during the application and have
2084 mechanism to check that no push race occurred between the creation of the
2084 mechanism to check that no push race occurred between the creation of the
2085 bundle and its application.
2085 bundle and its application.
2086
2086
2087 If the push was raced as PushRaced exception is raised."""
2087 If the push was raced as PushRaced exception is raised."""
2088 r = 0
2088 r = 0
2089 # need a transaction when processing a bundle2 stream
2089 # need a transaction when processing a bundle2 stream
2090 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2090 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2091 lockandtr = [None, None, None]
2091 lockandtr = [None, None, None]
2092 recordout = None
2092 recordout = None
2093 # quick fix for output mismatch with bundle2 in 3.4
2093 # quick fix for output mismatch with bundle2 in 3.4
2094 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2094 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2095 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2095 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2096 captureoutput = True
2096 captureoutput = True
2097 try:
2097 try:
2098 # note: outside bundle1, 'heads' is expected to be empty and this
2098 # note: outside bundle1, 'heads' is expected to be empty and this
2099 # 'check_heads' call wil be a no-op
2099 # 'check_heads' call wil be a no-op
2100 check_heads(repo, heads, 'uploading changes')
2100 check_heads(repo, heads, 'uploading changes')
2101 # push can proceed
2101 # push can proceed
2102 if not isinstance(cg, bundle2.unbundle20):
2102 if not isinstance(cg, bundle2.unbundle20):
2103 # legacy case: bundle1 (changegroup 01)
2103 # legacy case: bundle1 (changegroup 01)
2104 txnname = "\n".join([source, util.hidepassword(url)])
2104 txnname = "\n".join([source, util.hidepassword(url)])
2105 with repo.lock(), repo.transaction(txnname) as tr:
2105 with repo.lock(), repo.transaction(txnname) as tr:
2106 op = bundle2.applybundle(repo, cg, tr, source, url)
2106 op = bundle2.applybundle(repo, cg, tr, source, url)
2107 r = bundle2.combinechangegroupresults(op)
2107 r = bundle2.combinechangegroupresults(op)
2108 else:
2108 else:
2109 r = None
2109 r = None
2110 try:
2110 try:
2111 def gettransaction():
2111 def gettransaction():
2112 if not lockandtr[2]:
2112 if not lockandtr[2]:
2113 lockandtr[0] = repo.wlock()
2113 lockandtr[0] = repo.wlock()
2114 lockandtr[1] = repo.lock()
2114 lockandtr[1] = repo.lock()
2115 lockandtr[2] = repo.transaction(source)
2115 lockandtr[2] = repo.transaction(source)
2116 lockandtr[2].hookargs['source'] = source
2116 lockandtr[2].hookargs['source'] = source
2117 lockandtr[2].hookargs['url'] = url
2117 lockandtr[2].hookargs['url'] = url
2118 lockandtr[2].hookargs['bundle2'] = '1'
2118 lockandtr[2].hookargs['bundle2'] = '1'
2119 return lockandtr[2]
2119 return lockandtr[2]
2120
2120
2121 # Do greedy locking by default until we're satisfied with lazy
2121 # Do greedy locking by default until we're satisfied with lazy
2122 # locking.
2122 # locking.
2123 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2123 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2124 gettransaction()
2124 gettransaction()
2125
2125
2126 op = bundle2.bundleoperation(repo, gettransaction,
2126 op = bundle2.bundleoperation(repo, gettransaction,
2127 captureoutput=captureoutput,
2127 captureoutput=captureoutput,
2128 source='push')
2128 source='push')
2129 try:
2129 try:
2130 op = bundle2.processbundle(repo, cg, op=op)
2130 op = bundle2.processbundle(repo, cg, op=op)
2131 finally:
2131 finally:
2132 r = op.reply
2132 r = op.reply
2133 if captureoutput and r is not None:
2133 if captureoutput and r is not None:
2134 repo.ui.pushbuffer(error=True, subproc=True)
2134 repo.ui.pushbuffer(error=True, subproc=True)
2135 def recordout(output):
2135 def recordout(output):
2136 r.newpart('output', data=output, mandatory=False)
2136 r.newpart('output', data=output, mandatory=False)
2137 if lockandtr[2] is not None:
2137 if lockandtr[2] is not None:
2138 lockandtr[2].close()
2138 lockandtr[2].close()
2139 except BaseException as exc:
2139 except BaseException as exc:
2140 exc.duringunbundle2 = True
2140 exc.duringunbundle2 = True
2141 if captureoutput and r is not None:
2141 if captureoutput and r is not None:
2142 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2142 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2143 def recordout(output):
2143 def recordout(output):
2144 part = bundle2.bundlepart('output', data=output,
2144 part = bundle2.bundlepart('output', data=output,
2145 mandatory=False)
2145 mandatory=False)
2146 parts.append(part)
2146 parts.append(part)
2147 raise
2147 raise
2148 finally:
2148 finally:
2149 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2149 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2150 if recordout is not None:
2150 if recordout is not None:
2151 recordout(repo.ui.popbuffer())
2151 recordout(repo.ui.popbuffer())
2152 return r
2152 return r
2153
2153
2154 def _maybeapplyclonebundle(pullop):
2154 def _maybeapplyclonebundle(pullop):
2155 """Apply a clone bundle from a remote, if possible."""
2155 """Apply a clone bundle from a remote, if possible."""
2156
2156
2157 repo = pullop.repo
2157 repo = pullop.repo
2158 remote = pullop.remote
2158 remote = pullop.remote
2159
2159
2160 if not repo.ui.configbool('ui', 'clonebundles'):
2160 if not repo.ui.configbool('ui', 'clonebundles'):
2161 return
2161 return
2162
2162
2163 # Only run if local repo is empty.
2163 # Only run if local repo is empty.
2164 if len(repo):
2164 if len(repo):
2165 return
2165 return
2166
2166
2167 if pullop.heads:
2167 if pullop.heads:
2168 return
2168 return
2169
2169
2170 if not remote.capable('clonebundles'):
2170 if not remote.capable('clonebundles'):
2171 return
2171 return
2172
2172
2173 res = remote._call('clonebundles')
2173 with remote.commandexecutor() as e:
2174 res = e.callcommand('clonebundles', {}).result()
2174
2175
2175 # If we call the wire protocol command, that's good enough to record the
2176 # If we call the wire protocol command, that's good enough to record the
2176 # attempt.
2177 # attempt.
2177 pullop.clonebundleattempted = True
2178 pullop.clonebundleattempted = True
2178
2179
2179 entries = parseclonebundlesmanifest(repo, res)
2180 entries = parseclonebundlesmanifest(repo, res)
2180 if not entries:
2181 if not entries:
2181 repo.ui.note(_('no clone bundles available on remote; '
2182 repo.ui.note(_('no clone bundles available on remote; '
2182 'falling back to regular clone\n'))
2183 'falling back to regular clone\n'))
2183 return
2184 return
2184
2185
2185 entries = filterclonebundleentries(
2186 entries = filterclonebundleentries(
2186 repo, entries, streamclonerequested=pullop.streamclonerequested)
2187 repo, entries, streamclonerequested=pullop.streamclonerequested)
2187
2188
2188 if not entries:
2189 if not entries:
2189 # There is a thundering herd concern here. However, if a server
2190 # There is a thundering herd concern here. However, if a server
2190 # operator doesn't advertise bundles appropriate for its clients,
2191 # operator doesn't advertise bundles appropriate for its clients,
2191 # they deserve what's coming. Furthermore, from a client's
2192 # they deserve what's coming. Furthermore, from a client's
2192 # perspective, no automatic fallback would mean not being able to
2193 # perspective, no automatic fallback would mean not being able to
2193 # clone!
2194 # clone!
2194 repo.ui.warn(_('no compatible clone bundles available on server; '
2195 repo.ui.warn(_('no compatible clone bundles available on server; '
2195 'falling back to regular clone\n'))
2196 'falling back to regular clone\n'))
2196 repo.ui.warn(_('(you may want to report this to the server '
2197 repo.ui.warn(_('(you may want to report this to the server '
2197 'operator)\n'))
2198 'operator)\n'))
2198 return
2199 return
2199
2200
2200 entries = sortclonebundleentries(repo.ui, entries)
2201 entries = sortclonebundleentries(repo.ui, entries)
2201
2202
2202 url = entries[0]['URL']
2203 url = entries[0]['URL']
2203 repo.ui.status(_('applying clone bundle from %s\n') % url)
2204 repo.ui.status(_('applying clone bundle from %s\n') % url)
2204 if trypullbundlefromurl(repo.ui, repo, url):
2205 if trypullbundlefromurl(repo.ui, repo, url):
2205 repo.ui.status(_('finished applying clone bundle\n'))
2206 repo.ui.status(_('finished applying clone bundle\n'))
2206 # Bundle failed.
2207 # Bundle failed.
2207 #
2208 #
2208 # We abort by default to avoid the thundering herd of
2209 # We abort by default to avoid the thundering herd of
2209 # clients flooding a server that was expecting expensive
2210 # clients flooding a server that was expecting expensive
2210 # clone load to be offloaded.
2211 # clone load to be offloaded.
2211 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2212 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2212 repo.ui.warn(_('falling back to normal clone\n'))
2213 repo.ui.warn(_('falling back to normal clone\n'))
2213 else:
2214 else:
2214 raise error.Abort(_('error applying bundle'),
2215 raise error.Abort(_('error applying bundle'),
2215 hint=_('if this error persists, consider contacting '
2216 hint=_('if this error persists, consider contacting '
2216 'the server operator or disable clone '
2217 'the server operator or disable clone '
2217 'bundles via '
2218 'bundles via '
2218 '"--config ui.clonebundles=false"'))
2219 '"--config ui.clonebundles=false"'))
2219
2220
2220 def parseclonebundlesmanifest(repo, s):
2221 def parseclonebundlesmanifest(repo, s):
2221 """Parses the raw text of a clone bundles manifest.
2222 """Parses the raw text of a clone bundles manifest.
2222
2223
2223 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2224 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2224 to the URL and other keys are the attributes for the entry.
2225 to the URL and other keys are the attributes for the entry.
2225 """
2226 """
2226 m = []
2227 m = []
2227 for line in s.splitlines():
2228 for line in s.splitlines():
2228 fields = line.split()
2229 fields = line.split()
2229 if not fields:
2230 if not fields:
2230 continue
2231 continue
2231 attrs = {'URL': fields[0]}
2232 attrs = {'URL': fields[0]}
2232 for rawattr in fields[1:]:
2233 for rawattr in fields[1:]:
2233 key, value = rawattr.split('=', 1)
2234 key, value = rawattr.split('=', 1)
2234 key = urlreq.unquote(key)
2235 key = urlreq.unquote(key)
2235 value = urlreq.unquote(value)
2236 value = urlreq.unquote(value)
2236 attrs[key] = value
2237 attrs[key] = value
2237
2238
2238 # Parse BUNDLESPEC into components. This makes client-side
2239 # Parse BUNDLESPEC into components. This makes client-side
2239 # preferences easier to specify since you can prefer a single
2240 # preferences easier to specify since you can prefer a single
2240 # component of the BUNDLESPEC.
2241 # component of the BUNDLESPEC.
2241 if key == 'BUNDLESPEC':
2242 if key == 'BUNDLESPEC':
2242 try:
2243 try:
2243 bundlespec = parsebundlespec(repo, value,
2244 bundlespec = parsebundlespec(repo, value,
2244 externalnames=True)
2245 externalnames=True)
2245 attrs['COMPRESSION'] = bundlespec.compression
2246 attrs['COMPRESSION'] = bundlespec.compression
2246 attrs['VERSION'] = bundlespec.version
2247 attrs['VERSION'] = bundlespec.version
2247 except error.InvalidBundleSpecification:
2248 except error.InvalidBundleSpecification:
2248 pass
2249 pass
2249 except error.UnsupportedBundleSpecification:
2250 except error.UnsupportedBundleSpecification:
2250 pass
2251 pass
2251
2252
2252 m.append(attrs)
2253 m.append(attrs)
2253
2254
2254 return m
2255 return m
2255
2256
2256 def isstreamclonespec(bundlespec):
2257 def isstreamclonespec(bundlespec):
2257 # Stream clone v1
2258 # Stream clone v1
2258 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2259 if (bundlespec.compression == 'UN' and bundlespec.version == 's1'):
2259 return True
2260 return True
2260
2261
2261 # Stream clone v2
2262 # Stream clone v2
2262 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2263 if (bundlespec.compression == 'UN' and bundlespec.version == '02' and \
2263 bundlespec.contentopts.get('streamv2')):
2264 bundlespec.contentopts.get('streamv2')):
2264 return True
2265 return True
2265
2266
2266 return False
2267 return False
2267
2268
2268 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2269 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2269 """Remove incompatible clone bundle manifest entries.
2270 """Remove incompatible clone bundle manifest entries.
2270
2271
2271 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2272 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2272 and returns a new list consisting of only the entries that this client
2273 and returns a new list consisting of only the entries that this client
2273 should be able to apply.
2274 should be able to apply.
2274
2275
2275 There is no guarantee we'll be able to apply all returned entries because
2276 There is no guarantee we'll be able to apply all returned entries because
2276 the metadata we use to filter on may be missing or wrong.
2277 the metadata we use to filter on may be missing or wrong.
2277 """
2278 """
2278 newentries = []
2279 newentries = []
2279 for entry in entries:
2280 for entry in entries:
2280 spec = entry.get('BUNDLESPEC')
2281 spec = entry.get('BUNDLESPEC')
2281 if spec:
2282 if spec:
2282 try:
2283 try:
2283 bundlespec = parsebundlespec(repo, spec, strict=True)
2284 bundlespec = parsebundlespec(repo, spec, strict=True)
2284
2285
2285 # If a stream clone was requested, filter out non-streamclone
2286 # If a stream clone was requested, filter out non-streamclone
2286 # entries.
2287 # entries.
2287 if streamclonerequested and not isstreamclonespec(bundlespec):
2288 if streamclonerequested and not isstreamclonespec(bundlespec):
2288 repo.ui.debug('filtering %s because not a stream clone\n' %
2289 repo.ui.debug('filtering %s because not a stream clone\n' %
2289 entry['URL'])
2290 entry['URL'])
2290 continue
2291 continue
2291
2292
2292 except error.InvalidBundleSpecification as e:
2293 except error.InvalidBundleSpecification as e:
2293 repo.ui.debug(str(e) + '\n')
2294 repo.ui.debug(str(e) + '\n')
2294 continue
2295 continue
2295 except error.UnsupportedBundleSpecification as e:
2296 except error.UnsupportedBundleSpecification as e:
2296 repo.ui.debug('filtering %s because unsupported bundle '
2297 repo.ui.debug('filtering %s because unsupported bundle '
2297 'spec: %s\n' % (
2298 'spec: %s\n' % (
2298 entry['URL'], stringutil.forcebytestr(e)))
2299 entry['URL'], stringutil.forcebytestr(e)))
2299 continue
2300 continue
2300 # If we don't have a spec and requested a stream clone, we don't know
2301 # If we don't have a spec and requested a stream clone, we don't know
2301 # what the entry is so don't attempt to apply it.
2302 # what the entry is so don't attempt to apply it.
2302 elif streamclonerequested:
2303 elif streamclonerequested:
2303 repo.ui.debug('filtering %s because cannot determine if a stream '
2304 repo.ui.debug('filtering %s because cannot determine if a stream '
2304 'clone bundle\n' % entry['URL'])
2305 'clone bundle\n' % entry['URL'])
2305 continue
2306 continue
2306
2307
2307 if 'REQUIRESNI' in entry and not sslutil.hassni:
2308 if 'REQUIRESNI' in entry and not sslutil.hassni:
2308 repo.ui.debug('filtering %s because SNI not supported\n' %
2309 repo.ui.debug('filtering %s because SNI not supported\n' %
2309 entry['URL'])
2310 entry['URL'])
2310 continue
2311 continue
2311
2312
2312 newentries.append(entry)
2313 newentries.append(entry)
2313
2314
2314 return newentries
2315 return newentries
2315
2316
2316 class clonebundleentry(object):
2317 class clonebundleentry(object):
2317 """Represents an item in a clone bundles manifest.
2318 """Represents an item in a clone bundles manifest.
2318
2319
2319 This rich class is needed to support sorting since sorted() in Python 3
2320 This rich class is needed to support sorting since sorted() in Python 3
2320 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2321 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2321 won't work.
2322 won't work.
2322 """
2323 """
2323
2324
2324 def __init__(self, value, prefers):
2325 def __init__(self, value, prefers):
2325 self.value = value
2326 self.value = value
2326 self.prefers = prefers
2327 self.prefers = prefers
2327
2328
2328 def _cmp(self, other):
2329 def _cmp(self, other):
2329 for prefkey, prefvalue in self.prefers:
2330 for prefkey, prefvalue in self.prefers:
2330 avalue = self.value.get(prefkey)
2331 avalue = self.value.get(prefkey)
2331 bvalue = other.value.get(prefkey)
2332 bvalue = other.value.get(prefkey)
2332
2333
2333 # Special case for b missing attribute and a matches exactly.
2334 # Special case for b missing attribute and a matches exactly.
2334 if avalue is not None and bvalue is None and avalue == prefvalue:
2335 if avalue is not None and bvalue is None and avalue == prefvalue:
2335 return -1
2336 return -1
2336
2337
2337 # Special case for a missing attribute and b matches exactly.
2338 # Special case for a missing attribute and b matches exactly.
2338 if bvalue is not None and avalue is None and bvalue == prefvalue:
2339 if bvalue is not None and avalue is None and bvalue == prefvalue:
2339 return 1
2340 return 1
2340
2341
2341 # We can't compare unless attribute present on both.
2342 # We can't compare unless attribute present on both.
2342 if avalue is None or bvalue is None:
2343 if avalue is None or bvalue is None:
2343 continue
2344 continue
2344
2345
2345 # Same values should fall back to next attribute.
2346 # Same values should fall back to next attribute.
2346 if avalue == bvalue:
2347 if avalue == bvalue:
2347 continue
2348 continue
2348
2349
2349 # Exact matches come first.
2350 # Exact matches come first.
2350 if avalue == prefvalue:
2351 if avalue == prefvalue:
2351 return -1
2352 return -1
2352 if bvalue == prefvalue:
2353 if bvalue == prefvalue:
2353 return 1
2354 return 1
2354
2355
2355 # Fall back to next attribute.
2356 # Fall back to next attribute.
2356 continue
2357 continue
2357
2358
2358 # If we got here we couldn't sort by attributes and prefers. Fall
2359 # If we got here we couldn't sort by attributes and prefers. Fall
2359 # back to index order.
2360 # back to index order.
2360 return 0
2361 return 0
2361
2362
2362 def __lt__(self, other):
2363 def __lt__(self, other):
2363 return self._cmp(other) < 0
2364 return self._cmp(other) < 0
2364
2365
2365 def __gt__(self, other):
2366 def __gt__(self, other):
2366 return self._cmp(other) > 0
2367 return self._cmp(other) > 0
2367
2368
2368 def __eq__(self, other):
2369 def __eq__(self, other):
2369 return self._cmp(other) == 0
2370 return self._cmp(other) == 0
2370
2371
2371 def __le__(self, other):
2372 def __le__(self, other):
2372 return self._cmp(other) <= 0
2373 return self._cmp(other) <= 0
2373
2374
2374 def __ge__(self, other):
2375 def __ge__(self, other):
2375 return self._cmp(other) >= 0
2376 return self._cmp(other) >= 0
2376
2377
2377 def __ne__(self, other):
2378 def __ne__(self, other):
2378 return self._cmp(other) != 0
2379 return self._cmp(other) != 0
2379
2380
2380 def sortclonebundleentries(ui, entries):
2381 def sortclonebundleentries(ui, entries):
2381 prefers = ui.configlist('ui', 'clonebundleprefers')
2382 prefers = ui.configlist('ui', 'clonebundleprefers')
2382 if not prefers:
2383 if not prefers:
2383 return list(entries)
2384 return list(entries)
2384
2385
2385 prefers = [p.split('=', 1) for p in prefers]
2386 prefers = [p.split('=', 1) for p in prefers]
2386
2387
2387 items = sorted(clonebundleentry(v, prefers) for v in entries)
2388 items = sorted(clonebundleentry(v, prefers) for v in entries)
2388 return [i.value for i in items]
2389 return [i.value for i in items]
2389
2390
2390 def trypullbundlefromurl(ui, repo, url):
2391 def trypullbundlefromurl(ui, repo, url):
2391 """Attempt to apply a bundle from a URL."""
2392 """Attempt to apply a bundle from a URL."""
2392 with repo.lock(), repo.transaction('bundleurl') as tr:
2393 with repo.lock(), repo.transaction('bundleurl') as tr:
2393 try:
2394 try:
2394 fh = urlmod.open(ui, url)
2395 fh = urlmod.open(ui, url)
2395 cg = readbundle(ui, fh, 'stream')
2396 cg = readbundle(ui, fh, 'stream')
2396
2397
2397 if isinstance(cg, streamclone.streamcloneapplier):
2398 if isinstance(cg, streamclone.streamcloneapplier):
2398 cg.apply(repo)
2399 cg.apply(repo)
2399 else:
2400 else:
2400 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2401 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2401 return True
2402 return True
2402 except urlerr.httperror as e:
2403 except urlerr.httperror as e:
2403 ui.warn(_('HTTP error fetching bundle: %s\n') %
2404 ui.warn(_('HTTP error fetching bundle: %s\n') %
2404 stringutil.forcebytestr(e))
2405 stringutil.forcebytestr(e))
2405 except urlerr.urlerror as e:
2406 except urlerr.urlerror as e:
2406 ui.warn(_('error fetching bundle: %s\n') %
2407 ui.warn(_('error fetching bundle: %s\n') %
2407 stringutil.forcebytestr(e.reason))
2408 stringutil.forcebytestr(e.reason))
2408
2409
2409 return False
2410 return False
@@ -1,2375 +1,2378 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 branchmap,
29 branchmap,
30 bundle2,
30 bundle2,
31 changegroup,
31 changegroup,
32 changelog,
32 changelog,
33 color,
33 color,
34 context,
34 context,
35 dirstate,
35 dirstate,
36 dirstateguard,
36 dirstateguard,
37 discovery,
37 discovery,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filelog,
42 filelog,
43 hook,
43 hook,
44 lock as lockmod,
44 lock as lockmod,
45 manifest,
45 manifest,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 @zi.implementer(repository.ipeercommandexecutor)
156 @zi.implementer(repository.ipeercommandexecutor)
157 class localcommandexecutor(object):
157 class localcommandexecutor(object):
158 def __init__(self, peer):
158 def __init__(self, peer):
159 self._peer = peer
159 self._peer = peer
160 self._sent = False
160 self._sent = False
161 self._closed = False
161 self._closed = False
162
162
163 def __enter__(self):
163 def __enter__(self):
164 return self
164 return self
165
165
166 def __exit__(self, exctype, excvalue, exctb):
166 def __exit__(self, exctype, excvalue, exctb):
167 self.close()
167 self.close()
168
168
169 def callcommand(self, command, args):
169 def callcommand(self, command, args):
170 if self._sent:
170 if self._sent:
171 raise error.ProgrammingError('callcommand() cannot be used after '
171 raise error.ProgrammingError('callcommand() cannot be used after '
172 'sendcommands()')
172 'sendcommands()')
173
173
174 if self._closed:
174 if self._closed:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'close()')
176 'close()')
177
177
178 # We don't need to support anything fancy. Just call the named
178 # We don't need to support anything fancy. Just call the named
179 # method on the peer and return a resolved future.
179 # method on the peer and return a resolved future.
180 fn = getattr(self._peer, pycompat.sysstr(command))
180 fn = getattr(self._peer, pycompat.sysstr(command))
181
181
182 f = pycompat.futures.Future()
182 f = pycompat.futures.Future()
183
183
184 try:
184 try:
185 result = fn(**args)
185 result = fn(**args)
186 except Exception:
186 except Exception:
187 f.set_exception_info(*sys.exc_info()[1:])
187 f.set_exception_info(*sys.exc_info()[1:])
188 else:
188 else:
189 f.set_result(result)
189 f.set_result(result)
190
190
191 return f
191 return f
192
192
193 def sendcommands(self):
193 def sendcommands(self):
194 self._sent = True
194 self._sent = True
195
195
196 def close(self):
196 def close(self):
197 self._closed = True
197 self._closed = True
198
198
199 class localpeer(repository.peer):
199 class localpeer(repository.peer):
200 '''peer for a local repo; reflects only the most recent API'''
200 '''peer for a local repo; reflects only the most recent API'''
201
201
202 def __init__(self, repo, caps=None):
202 def __init__(self, repo, caps=None):
203 super(localpeer, self).__init__()
203 super(localpeer, self).__init__()
204
204
205 if caps is None:
205 if caps is None:
206 caps = moderncaps.copy()
206 caps = moderncaps.copy()
207 self._repo = repo.filtered('served')
207 self._repo = repo.filtered('served')
208 self.ui = repo.ui
208 self.ui = repo.ui
209 self._caps = repo._restrictcapabilities(caps)
209 self._caps = repo._restrictcapabilities(caps)
210
210
211 # Begin of _basepeer interface.
211 # Begin of _basepeer interface.
212
212
213 def url(self):
213 def url(self):
214 return self._repo.url()
214 return self._repo.url()
215
215
216 def local(self):
216 def local(self):
217 return self._repo
217 return self._repo
218
218
219 def peer(self):
219 def peer(self):
220 return self
220 return self
221
221
222 def canpush(self):
222 def canpush(self):
223 return True
223 return True
224
224
225 def close(self):
225 def close(self):
226 self._repo.close()
226 self._repo.close()
227
227
228 # End of _basepeer interface.
228 # End of _basepeer interface.
229
229
230 # Begin of _basewirecommands interface.
230 # Begin of _basewirecommands interface.
231
231
232 def branchmap(self):
232 def branchmap(self):
233 return self._repo.branchmap()
233 return self._repo.branchmap()
234
234
235 def capabilities(self):
235 def capabilities(self):
236 return self._caps
236 return self._caps
237
237
238 def clonebundles(self):
239 return self._repo.tryread('clonebundles.manifest')
240
238 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 def debugwireargs(self, one, two, three=None, four=None, five=None):
239 """Used to test argument passing over the wire"""
242 """Used to test argument passing over the wire"""
240 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
241 pycompat.bytestr(four),
244 pycompat.bytestr(four),
242 pycompat.bytestr(five))
245 pycompat.bytestr(five))
243
246
244 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
245 **kwargs):
248 **kwargs):
246 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
247 common=common, bundlecaps=bundlecaps,
250 common=common, bundlecaps=bundlecaps,
248 **kwargs)[1]
251 **kwargs)[1]
249 cb = util.chunkbuffer(chunks)
252 cb = util.chunkbuffer(chunks)
250
253
251 if exchange.bundle2requested(bundlecaps):
254 if exchange.bundle2requested(bundlecaps):
252 # When requesting a bundle2, getbundle returns a stream to make the
255 # When requesting a bundle2, getbundle returns a stream to make the
253 # wire level function happier. We need to build a proper object
256 # wire level function happier. We need to build a proper object
254 # from it in local peer.
257 # from it in local peer.
255 return bundle2.getunbundler(self.ui, cb)
258 return bundle2.getunbundler(self.ui, cb)
256 else:
259 else:
257 return changegroup.getunbundler('01', cb, None)
260 return changegroup.getunbundler('01', cb, None)
258
261
259 def heads(self):
262 def heads(self):
260 return self._repo.heads()
263 return self._repo.heads()
261
264
262 def known(self, nodes):
265 def known(self, nodes):
263 return self._repo.known(nodes)
266 return self._repo.known(nodes)
264
267
265 def listkeys(self, namespace):
268 def listkeys(self, namespace):
266 return self._repo.listkeys(namespace)
269 return self._repo.listkeys(namespace)
267
270
268 def lookup(self, key):
271 def lookup(self, key):
269 return self._repo.lookup(key)
272 return self._repo.lookup(key)
270
273
271 def pushkey(self, namespace, key, old, new):
274 def pushkey(self, namespace, key, old, new):
272 return self._repo.pushkey(namespace, key, old, new)
275 return self._repo.pushkey(namespace, key, old, new)
273
276
274 def stream_out(self):
277 def stream_out(self):
275 raise error.Abort(_('cannot perform stream clone against local '
278 raise error.Abort(_('cannot perform stream clone against local '
276 'peer'))
279 'peer'))
277
280
278 def unbundle(self, bundle, heads, url):
281 def unbundle(self, bundle, heads, url):
279 """apply a bundle on a repo
282 """apply a bundle on a repo
280
283
281 This function handles the repo locking itself."""
284 This function handles the repo locking itself."""
282 try:
285 try:
283 try:
286 try:
284 bundle = exchange.readbundle(self.ui, bundle, None)
287 bundle = exchange.readbundle(self.ui, bundle, None)
285 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
286 if util.safehasattr(ret, 'getchunks'):
289 if util.safehasattr(ret, 'getchunks'):
287 # This is a bundle20 object, turn it into an unbundler.
290 # This is a bundle20 object, turn it into an unbundler.
288 # This little dance should be dropped eventually when the
291 # This little dance should be dropped eventually when the
289 # API is finally improved.
292 # API is finally improved.
290 stream = util.chunkbuffer(ret.getchunks())
293 stream = util.chunkbuffer(ret.getchunks())
291 ret = bundle2.getunbundler(self.ui, stream)
294 ret = bundle2.getunbundler(self.ui, stream)
292 return ret
295 return ret
293 except Exception as exc:
296 except Exception as exc:
294 # If the exception contains output salvaged from a bundle2
297 # If the exception contains output salvaged from a bundle2
295 # reply, we need to make sure it is printed before continuing
298 # reply, we need to make sure it is printed before continuing
296 # to fail. So we build a bundle2 with such output and consume
299 # to fail. So we build a bundle2 with such output and consume
297 # it directly.
300 # it directly.
298 #
301 #
299 # This is not very elegant but allows a "simple" solution for
302 # This is not very elegant but allows a "simple" solution for
300 # issue4594
303 # issue4594
301 output = getattr(exc, '_bundle2salvagedoutput', ())
304 output = getattr(exc, '_bundle2salvagedoutput', ())
302 if output:
305 if output:
303 bundler = bundle2.bundle20(self._repo.ui)
306 bundler = bundle2.bundle20(self._repo.ui)
304 for out in output:
307 for out in output:
305 bundler.addpart(out)
308 bundler.addpart(out)
306 stream = util.chunkbuffer(bundler.getchunks())
309 stream = util.chunkbuffer(bundler.getchunks())
307 b = bundle2.getunbundler(self.ui, stream)
310 b = bundle2.getunbundler(self.ui, stream)
308 bundle2.processbundle(self._repo, b)
311 bundle2.processbundle(self._repo, b)
309 raise
312 raise
310 except error.PushRaced as exc:
313 except error.PushRaced as exc:
311 raise error.ResponseError(_('push failed:'),
314 raise error.ResponseError(_('push failed:'),
312 stringutil.forcebytestr(exc))
315 stringutil.forcebytestr(exc))
313
316
314 # End of _basewirecommands interface.
317 # End of _basewirecommands interface.
315
318
316 # Begin of peer interface.
319 # Begin of peer interface.
317
320
318 def commandexecutor(self):
321 def commandexecutor(self):
319 return localcommandexecutor(self)
322 return localcommandexecutor(self)
320
323
321 # End of peer interface.
324 # End of peer interface.
322
325
323 @zi.implementer(repository.ipeerlegacycommands)
326 @zi.implementer(repository.ipeerlegacycommands)
324 class locallegacypeer(localpeer):
327 class locallegacypeer(localpeer):
325 '''peer extension which implements legacy methods too; used for tests with
328 '''peer extension which implements legacy methods too; used for tests with
326 restricted capabilities'''
329 restricted capabilities'''
327
330
328 def __init__(self, repo):
331 def __init__(self, repo):
329 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
330
333
331 # Begin of baselegacywirecommands interface.
334 # Begin of baselegacywirecommands interface.
332
335
333 def between(self, pairs):
336 def between(self, pairs):
334 return self._repo.between(pairs)
337 return self._repo.between(pairs)
335
338
336 def branches(self, nodes):
339 def branches(self, nodes):
337 return self._repo.branches(nodes)
340 return self._repo.branches(nodes)
338
341
339 def changegroup(self, nodes, source):
342 def changegroup(self, nodes, source):
340 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
341 missingheads=self._repo.heads())
344 missingheads=self._repo.heads())
342 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
343
346
344 def changegroupsubset(self, bases, heads, source):
347 def changegroupsubset(self, bases, heads, source):
345 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 outgoing = discovery.outgoing(self._repo, missingroots=bases,
346 missingheads=heads)
349 missingheads=heads)
347 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
348
351
349 # End of baselegacywirecommands interface.
352 # End of baselegacywirecommands interface.
350
353
351 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # Increment the sub-version when the revlog v2 format changes to lock out old
352 # clients.
355 # clients.
353 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
354
357
355 # Functions receiving (ui, features) that extensions can register to impact
358 # Functions receiving (ui, features) that extensions can register to impact
356 # the ability to load repositories with custom requirements. Only
359 # the ability to load repositories with custom requirements. Only
357 # functions defined in loaded extensions are called.
360 # functions defined in loaded extensions are called.
358 #
361 #
359 # The function receives a set of requirement strings that the repository
362 # The function receives a set of requirement strings that the repository
360 # is capable of opening. Functions will typically add elements to the
363 # is capable of opening. Functions will typically add elements to the
361 # set to reflect that the extension knows how to handle that requirements.
364 # set to reflect that the extension knows how to handle that requirements.
362 featuresetupfuncs = set()
365 featuresetupfuncs = set()
363
366
364 @zi.implementer(repository.completelocalrepository)
367 @zi.implementer(repository.completelocalrepository)
365 class localrepository(object):
368 class localrepository(object):
366
369
367 # obsolete experimental requirements:
370 # obsolete experimental requirements:
368 # - manifestv2: An experimental new manifest format that allowed
371 # - manifestv2: An experimental new manifest format that allowed
369 # for stem compression of long paths. Experiment ended up not
372 # for stem compression of long paths. Experiment ended up not
370 # being successful (repository sizes went up due to worse delta
373 # being successful (repository sizes went up due to worse delta
371 # chains), and the code was deleted in 4.6.
374 # chains), and the code was deleted in 4.6.
372 supportedformats = {
375 supportedformats = {
373 'revlogv1',
376 'revlogv1',
374 'generaldelta',
377 'generaldelta',
375 'treemanifest',
378 'treemanifest',
376 REVLOGV2_REQUIREMENT,
379 REVLOGV2_REQUIREMENT,
377 }
380 }
378 _basesupported = supportedformats | {
381 _basesupported = supportedformats | {
379 'store',
382 'store',
380 'fncache',
383 'fncache',
381 'shared',
384 'shared',
382 'relshared',
385 'relshared',
383 'dotencode',
386 'dotencode',
384 'exp-sparse',
387 'exp-sparse',
385 }
388 }
386 openerreqs = {
389 openerreqs = {
387 'revlogv1',
390 'revlogv1',
388 'generaldelta',
391 'generaldelta',
389 'treemanifest',
392 'treemanifest',
390 }
393 }
391
394
392 # list of prefix for file which can be written without 'wlock'
395 # list of prefix for file which can be written without 'wlock'
393 # Extensions should extend this list when needed
396 # Extensions should extend this list when needed
394 _wlockfreeprefix = {
397 _wlockfreeprefix = {
395 # We migh consider requiring 'wlock' for the next
398 # We migh consider requiring 'wlock' for the next
396 # two, but pretty much all the existing code assume
399 # two, but pretty much all the existing code assume
397 # wlock is not needed so we keep them excluded for
400 # wlock is not needed so we keep them excluded for
398 # now.
401 # now.
399 'hgrc',
402 'hgrc',
400 'requires',
403 'requires',
401 # XXX cache is a complicatged business someone
404 # XXX cache is a complicatged business someone
402 # should investigate this in depth at some point
405 # should investigate this in depth at some point
403 'cache/',
406 'cache/',
404 # XXX shouldn't be dirstate covered by the wlock?
407 # XXX shouldn't be dirstate covered by the wlock?
405 'dirstate',
408 'dirstate',
406 # XXX bisect was still a bit too messy at the time
409 # XXX bisect was still a bit too messy at the time
407 # this changeset was introduced. Someone should fix
410 # this changeset was introduced. Someone should fix
408 # the remainig bit and drop this line
411 # the remainig bit and drop this line
409 'bisect.state',
412 'bisect.state',
410 }
413 }
411
414
412 def __init__(self, baseui, path, create=False):
415 def __init__(self, baseui, path, create=False):
413 self.requirements = set()
416 self.requirements = set()
414 self.filtername = None
417 self.filtername = None
415 # wvfs: rooted at the repository root, used to access the working copy
418 # wvfs: rooted at the repository root, used to access the working copy
416 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
419 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
417 # vfs: rooted at .hg, used to access repo files outside of .hg/store
420 # vfs: rooted at .hg, used to access repo files outside of .hg/store
418 self.vfs = None
421 self.vfs = None
419 # svfs: usually rooted at .hg/store, used to access repository history
422 # svfs: usually rooted at .hg/store, used to access repository history
420 # If this is a shared repository, this vfs may point to another
423 # If this is a shared repository, this vfs may point to another
421 # repository's .hg/store directory.
424 # repository's .hg/store directory.
422 self.svfs = None
425 self.svfs = None
423 self.root = self.wvfs.base
426 self.root = self.wvfs.base
424 self.path = self.wvfs.join(".hg")
427 self.path = self.wvfs.join(".hg")
425 self.origroot = path
428 self.origroot = path
426 # This is only used by context.workingctx.match in order to
429 # This is only used by context.workingctx.match in order to
427 # detect files in subrepos.
430 # detect files in subrepos.
428 self.auditor = pathutil.pathauditor(
431 self.auditor = pathutil.pathauditor(
429 self.root, callback=self._checknested)
432 self.root, callback=self._checknested)
430 # This is only used by context.basectx.match in order to detect
433 # This is only used by context.basectx.match in order to detect
431 # files in subrepos.
434 # files in subrepos.
432 self.nofsauditor = pathutil.pathauditor(
435 self.nofsauditor = pathutil.pathauditor(
433 self.root, callback=self._checknested, realfs=False, cached=True)
436 self.root, callback=self._checknested, realfs=False, cached=True)
434 self.baseui = baseui
437 self.baseui = baseui
435 self.ui = baseui.copy()
438 self.ui = baseui.copy()
436 self.ui.copy = baseui.copy # prevent copying repo configuration
439 self.ui.copy = baseui.copy # prevent copying repo configuration
437 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
440 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
438 if (self.ui.configbool('devel', 'all-warnings') or
441 if (self.ui.configbool('devel', 'all-warnings') or
439 self.ui.configbool('devel', 'check-locks')):
442 self.ui.configbool('devel', 'check-locks')):
440 self.vfs.audit = self._getvfsward(self.vfs.audit)
443 self.vfs.audit = self._getvfsward(self.vfs.audit)
441 # A list of callback to shape the phase if no data were found.
444 # A list of callback to shape the phase if no data were found.
442 # Callback are in the form: func(repo, roots) --> processed root.
445 # Callback are in the form: func(repo, roots) --> processed root.
443 # This list it to be filled by extension during repo setup
446 # This list it to be filled by extension during repo setup
444 self._phasedefaults = []
447 self._phasedefaults = []
445 try:
448 try:
446 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
449 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
447 self._loadextensions()
450 self._loadextensions()
448 except IOError:
451 except IOError:
449 pass
452 pass
450
453
451 if featuresetupfuncs:
454 if featuresetupfuncs:
452 self.supported = set(self._basesupported) # use private copy
455 self.supported = set(self._basesupported) # use private copy
453 extmods = set(m.__name__ for n, m
456 extmods = set(m.__name__ for n, m
454 in extensions.extensions(self.ui))
457 in extensions.extensions(self.ui))
455 for setupfunc in featuresetupfuncs:
458 for setupfunc in featuresetupfuncs:
456 if setupfunc.__module__ in extmods:
459 if setupfunc.__module__ in extmods:
457 setupfunc(self.ui, self.supported)
460 setupfunc(self.ui, self.supported)
458 else:
461 else:
459 self.supported = self._basesupported
462 self.supported = self._basesupported
460 color.setup(self.ui)
463 color.setup(self.ui)
461
464
462 # Add compression engines.
465 # Add compression engines.
463 for name in util.compengines:
466 for name in util.compengines:
464 engine = util.compengines[name]
467 engine = util.compengines[name]
465 if engine.revlogheader():
468 if engine.revlogheader():
466 self.supported.add('exp-compression-%s' % name)
469 self.supported.add('exp-compression-%s' % name)
467
470
468 if not self.vfs.isdir():
471 if not self.vfs.isdir():
469 if create:
472 if create:
470 self.requirements = newreporequirements(self)
473 self.requirements = newreporequirements(self)
471
474
472 if not self.wvfs.exists():
475 if not self.wvfs.exists():
473 self.wvfs.makedirs()
476 self.wvfs.makedirs()
474 self.vfs.makedir(notindexed=True)
477 self.vfs.makedir(notindexed=True)
475
478
476 if 'store' in self.requirements:
479 if 'store' in self.requirements:
477 self.vfs.mkdir("store")
480 self.vfs.mkdir("store")
478
481
479 # create an invalid changelog
482 # create an invalid changelog
480 self.vfs.append(
483 self.vfs.append(
481 "00changelog.i",
484 "00changelog.i",
482 '\0\0\0\2' # represents revlogv2
485 '\0\0\0\2' # represents revlogv2
483 ' dummy changelog to prevent using the old repo layout'
486 ' dummy changelog to prevent using the old repo layout'
484 )
487 )
485 else:
488 else:
486 raise error.RepoError(_("repository %s not found") % path)
489 raise error.RepoError(_("repository %s not found") % path)
487 elif create:
490 elif create:
488 raise error.RepoError(_("repository %s already exists") % path)
491 raise error.RepoError(_("repository %s already exists") % path)
489 else:
492 else:
490 try:
493 try:
491 self.requirements = scmutil.readrequires(
494 self.requirements = scmutil.readrequires(
492 self.vfs, self.supported)
495 self.vfs, self.supported)
493 except IOError as inst:
496 except IOError as inst:
494 if inst.errno != errno.ENOENT:
497 if inst.errno != errno.ENOENT:
495 raise
498 raise
496
499
497 cachepath = self.vfs.join('cache')
500 cachepath = self.vfs.join('cache')
498 self.sharedpath = self.path
501 self.sharedpath = self.path
499 try:
502 try:
500 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
503 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
501 if 'relshared' in self.requirements:
504 if 'relshared' in self.requirements:
502 sharedpath = self.vfs.join(sharedpath)
505 sharedpath = self.vfs.join(sharedpath)
503 vfs = vfsmod.vfs(sharedpath, realpath=True)
506 vfs = vfsmod.vfs(sharedpath, realpath=True)
504 cachepath = vfs.join('cache')
507 cachepath = vfs.join('cache')
505 s = vfs.base
508 s = vfs.base
506 if not vfs.exists():
509 if not vfs.exists():
507 raise error.RepoError(
510 raise error.RepoError(
508 _('.hg/sharedpath points to nonexistent directory %s') % s)
511 _('.hg/sharedpath points to nonexistent directory %s') % s)
509 self.sharedpath = s
512 self.sharedpath = s
510 except IOError as inst:
513 except IOError as inst:
511 if inst.errno != errno.ENOENT:
514 if inst.errno != errno.ENOENT:
512 raise
515 raise
513
516
514 if 'exp-sparse' in self.requirements and not sparse.enabled:
517 if 'exp-sparse' in self.requirements and not sparse.enabled:
515 raise error.RepoError(_('repository is using sparse feature but '
518 raise error.RepoError(_('repository is using sparse feature but '
516 'sparse is not enabled; enable the '
519 'sparse is not enabled; enable the '
517 '"sparse" extensions to access'))
520 '"sparse" extensions to access'))
518
521
519 self.store = store.store(
522 self.store = store.store(
520 self.requirements, self.sharedpath,
523 self.requirements, self.sharedpath,
521 lambda base: vfsmod.vfs(base, cacheaudited=True))
524 lambda base: vfsmod.vfs(base, cacheaudited=True))
522 self.spath = self.store.path
525 self.spath = self.store.path
523 self.svfs = self.store.vfs
526 self.svfs = self.store.vfs
524 self.sjoin = self.store.join
527 self.sjoin = self.store.join
525 self.vfs.createmode = self.store.createmode
528 self.vfs.createmode = self.store.createmode
526 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
529 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
527 self.cachevfs.createmode = self.store.createmode
530 self.cachevfs.createmode = self.store.createmode
528 if (self.ui.configbool('devel', 'all-warnings') or
531 if (self.ui.configbool('devel', 'all-warnings') or
529 self.ui.configbool('devel', 'check-locks')):
532 self.ui.configbool('devel', 'check-locks')):
530 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
533 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
531 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
534 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
532 else: # standard vfs
535 else: # standard vfs
533 self.svfs.audit = self._getsvfsward(self.svfs.audit)
536 self.svfs.audit = self._getsvfsward(self.svfs.audit)
534 self._applyopenerreqs()
537 self._applyopenerreqs()
535 if create:
538 if create:
536 self._writerequirements()
539 self._writerequirements()
537
540
538 self._dirstatevalidatewarned = False
541 self._dirstatevalidatewarned = False
539
542
540 self._branchcaches = {}
543 self._branchcaches = {}
541 self._revbranchcache = None
544 self._revbranchcache = None
542 self._filterpats = {}
545 self._filterpats = {}
543 self._datafilters = {}
546 self._datafilters = {}
544 self._transref = self._lockref = self._wlockref = None
547 self._transref = self._lockref = self._wlockref = None
545
548
546 # A cache for various files under .hg/ that tracks file changes,
549 # A cache for various files under .hg/ that tracks file changes,
547 # (used by the filecache decorator)
550 # (used by the filecache decorator)
548 #
551 #
549 # Maps a property name to its util.filecacheentry
552 # Maps a property name to its util.filecacheentry
550 self._filecache = {}
553 self._filecache = {}
551
554
552 # hold sets of revision to be filtered
555 # hold sets of revision to be filtered
553 # should be cleared when something might have changed the filter value:
556 # should be cleared when something might have changed the filter value:
554 # - new changesets,
557 # - new changesets,
555 # - phase change,
558 # - phase change,
556 # - new obsolescence marker,
559 # - new obsolescence marker,
557 # - working directory parent change,
560 # - working directory parent change,
558 # - bookmark changes
561 # - bookmark changes
559 self.filteredrevcache = {}
562 self.filteredrevcache = {}
560
563
561 # post-dirstate-status hooks
564 # post-dirstate-status hooks
562 self._postdsstatus = []
565 self._postdsstatus = []
563
566
564 # generic mapping between names and nodes
567 # generic mapping between names and nodes
565 self.names = namespaces.namespaces()
568 self.names = namespaces.namespaces()
566
569
567 # Key to signature value.
570 # Key to signature value.
568 self._sparsesignaturecache = {}
571 self._sparsesignaturecache = {}
569 # Signature to cached matcher instance.
572 # Signature to cached matcher instance.
570 self._sparsematchercache = {}
573 self._sparsematchercache = {}
571
574
572 def _getvfsward(self, origfunc):
575 def _getvfsward(self, origfunc):
573 """build a ward for self.vfs"""
576 """build a ward for self.vfs"""
574 rref = weakref.ref(self)
577 rref = weakref.ref(self)
575 def checkvfs(path, mode=None):
578 def checkvfs(path, mode=None):
576 ret = origfunc(path, mode=mode)
579 ret = origfunc(path, mode=mode)
577 repo = rref()
580 repo = rref()
578 if (repo is None
581 if (repo is None
579 or not util.safehasattr(repo, '_wlockref')
582 or not util.safehasattr(repo, '_wlockref')
580 or not util.safehasattr(repo, '_lockref')):
583 or not util.safehasattr(repo, '_lockref')):
581 return
584 return
582 if mode in (None, 'r', 'rb'):
585 if mode in (None, 'r', 'rb'):
583 return
586 return
584 if path.startswith(repo.path):
587 if path.startswith(repo.path):
585 # truncate name relative to the repository (.hg)
588 # truncate name relative to the repository (.hg)
586 path = path[len(repo.path) + 1:]
589 path = path[len(repo.path) + 1:]
587 if path.startswith('cache/'):
590 if path.startswith('cache/'):
588 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
591 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
589 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
592 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
590 if path.startswith('journal.'):
593 if path.startswith('journal.'):
591 # journal is covered by 'lock'
594 # journal is covered by 'lock'
592 if repo._currentlock(repo._lockref) is None:
595 if repo._currentlock(repo._lockref) is None:
593 repo.ui.develwarn('write with no lock: "%s"' % path,
596 repo.ui.develwarn('write with no lock: "%s"' % path,
594 stacklevel=2, config='check-locks')
597 stacklevel=2, config='check-locks')
595 elif repo._currentlock(repo._wlockref) is None:
598 elif repo._currentlock(repo._wlockref) is None:
596 # rest of vfs files are covered by 'wlock'
599 # rest of vfs files are covered by 'wlock'
597 #
600 #
598 # exclude special files
601 # exclude special files
599 for prefix in self._wlockfreeprefix:
602 for prefix in self._wlockfreeprefix:
600 if path.startswith(prefix):
603 if path.startswith(prefix):
601 return
604 return
602 repo.ui.develwarn('write with no wlock: "%s"' % path,
605 repo.ui.develwarn('write with no wlock: "%s"' % path,
603 stacklevel=2, config='check-locks')
606 stacklevel=2, config='check-locks')
604 return ret
607 return ret
605 return checkvfs
608 return checkvfs
606
609
607 def _getsvfsward(self, origfunc):
610 def _getsvfsward(self, origfunc):
608 """build a ward for self.svfs"""
611 """build a ward for self.svfs"""
609 rref = weakref.ref(self)
612 rref = weakref.ref(self)
610 def checksvfs(path, mode=None):
613 def checksvfs(path, mode=None):
611 ret = origfunc(path, mode=mode)
614 ret = origfunc(path, mode=mode)
612 repo = rref()
615 repo = rref()
613 if repo is None or not util.safehasattr(repo, '_lockref'):
616 if repo is None or not util.safehasattr(repo, '_lockref'):
614 return
617 return
615 if mode in (None, 'r', 'rb'):
618 if mode in (None, 'r', 'rb'):
616 return
619 return
617 if path.startswith(repo.sharedpath):
620 if path.startswith(repo.sharedpath):
618 # truncate name relative to the repository (.hg)
621 # truncate name relative to the repository (.hg)
619 path = path[len(repo.sharedpath) + 1:]
622 path = path[len(repo.sharedpath) + 1:]
620 if repo._currentlock(repo._lockref) is None:
623 if repo._currentlock(repo._lockref) is None:
621 repo.ui.develwarn('write with no lock: "%s"' % path,
624 repo.ui.develwarn('write with no lock: "%s"' % path,
622 stacklevel=3)
625 stacklevel=3)
623 return ret
626 return ret
624 return checksvfs
627 return checksvfs
625
628
626 def close(self):
629 def close(self):
627 self._writecaches()
630 self._writecaches()
628
631
629 def _loadextensions(self):
632 def _loadextensions(self):
630 extensions.loadall(self.ui)
633 extensions.loadall(self.ui)
631
634
632 def _writecaches(self):
635 def _writecaches(self):
633 if self._revbranchcache:
636 if self._revbranchcache:
634 self._revbranchcache.write()
637 self._revbranchcache.write()
635
638
636 def _restrictcapabilities(self, caps):
639 def _restrictcapabilities(self, caps):
637 if self.ui.configbool('experimental', 'bundle2-advertise'):
640 if self.ui.configbool('experimental', 'bundle2-advertise'):
638 caps = set(caps)
641 caps = set(caps)
639 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
642 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
640 role='client'))
643 role='client'))
641 caps.add('bundle2=' + urlreq.quote(capsblob))
644 caps.add('bundle2=' + urlreq.quote(capsblob))
642 return caps
645 return caps
643
646
644 def _applyopenerreqs(self):
647 def _applyopenerreqs(self):
645 self.svfs.options = dict((r, 1) for r in self.requirements
648 self.svfs.options = dict((r, 1) for r in self.requirements
646 if r in self.openerreqs)
649 if r in self.openerreqs)
647 # experimental config: format.chunkcachesize
650 # experimental config: format.chunkcachesize
648 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
651 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
649 if chunkcachesize is not None:
652 if chunkcachesize is not None:
650 self.svfs.options['chunkcachesize'] = chunkcachesize
653 self.svfs.options['chunkcachesize'] = chunkcachesize
651 # experimental config: format.maxchainlen
654 # experimental config: format.maxchainlen
652 maxchainlen = self.ui.configint('format', 'maxchainlen')
655 maxchainlen = self.ui.configint('format', 'maxchainlen')
653 if maxchainlen is not None:
656 if maxchainlen is not None:
654 self.svfs.options['maxchainlen'] = maxchainlen
657 self.svfs.options['maxchainlen'] = maxchainlen
655 # experimental config: format.manifestcachesize
658 # experimental config: format.manifestcachesize
656 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
659 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
657 if manifestcachesize is not None:
660 if manifestcachesize is not None:
658 self.svfs.options['manifestcachesize'] = manifestcachesize
661 self.svfs.options['manifestcachesize'] = manifestcachesize
659 # experimental config: format.aggressivemergedeltas
662 # experimental config: format.aggressivemergedeltas
660 aggressivemergedeltas = self.ui.configbool('format',
663 aggressivemergedeltas = self.ui.configbool('format',
661 'aggressivemergedeltas')
664 'aggressivemergedeltas')
662 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
665 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
663 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
666 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
664 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
667 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
665 if 0 <= chainspan:
668 if 0 <= chainspan:
666 self.svfs.options['maxdeltachainspan'] = chainspan
669 self.svfs.options['maxdeltachainspan'] = chainspan
667 mmapindexthreshold = self.ui.configbytes('experimental',
670 mmapindexthreshold = self.ui.configbytes('experimental',
668 'mmapindexthreshold')
671 'mmapindexthreshold')
669 if mmapindexthreshold is not None:
672 if mmapindexthreshold is not None:
670 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
673 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
671 withsparseread = self.ui.configbool('experimental', 'sparse-read')
674 withsparseread = self.ui.configbool('experimental', 'sparse-read')
672 srdensitythres = float(self.ui.config('experimental',
675 srdensitythres = float(self.ui.config('experimental',
673 'sparse-read.density-threshold'))
676 'sparse-read.density-threshold'))
674 srmingapsize = self.ui.configbytes('experimental',
677 srmingapsize = self.ui.configbytes('experimental',
675 'sparse-read.min-gap-size')
678 'sparse-read.min-gap-size')
676 self.svfs.options['with-sparse-read'] = withsparseread
679 self.svfs.options['with-sparse-read'] = withsparseread
677 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
680 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
678 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
681 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
679
682
680 for r in self.requirements:
683 for r in self.requirements:
681 if r.startswith('exp-compression-'):
684 if r.startswith('exp-compression-'):
682 self.svfs.options['compengine'] = r[len('exp-compression-'):]
685 self.svfs.options['compengine'] = r[len('exp-compression-'):]
683
686
684 # TODO move "revlogv2" to openerreqs once finalized.
687 # TODO move "revlogv2" to openerreqs once finalized.
685 if REVLOGV2_REQUIREMENT in self.requirements:
688 if REVLOGV2_REQUIREMENT in self.requirements:
686 self.svfs.options['revlogv2'] = True
689 self.svfs.options['revlogv2'] = True
687
690
688 def _writerequirements(self):
691 def _writerequirements(self):
689 scmutil.writerequires(self.vfs, self.requirements)
692 scmutil.writerequires(self.vfs, self.requirements)
690
693
691 def _checknested(self, path):
694 def _checknested(self, path):
692 """Determine if path is a legal nested repository."""
695 """Determine if path is a legal nested repository."""
693 if not path.startswith(self.root):
696 if not path.startswith(self.root):
694 return False
697 return False
695 subpath = path[len(self.root) + 1:]
698 subpath = path[len(self.root) + 1:]
696 normsubpath = util.pconvert(subpath)
699 normsubpath = util.pconvert(subpath)
697
700
698 # XXX: Checking against the current working copy is wrong in
701 # XXX: Checking against the current working copy is wrong in
699 # the sense that it can reject things like
702 # the sense that it can reject things like
700 #
703 #
701 # $ hg cat -r 10 sub/x.txt
704 # $ hg cat -r 10 sub/x.txt
702 #
705 #
703 # if sub/ is no longer a subrepository in the working copy
706 # if sub/ is no longer a subrepository in the working copy
704 # parent revision.
707 # parent revision.
705 #
708 #
706 # However, it can of course also allow things that would have
709 # However, it can of course also allow things that would have
707 # been rejected before, such as the above cat command if sub/
710 # been rejected before, such as the above cat command if sub/
708 # is a subrepository now, but was a normal directory before.
711 # is a subrepository now, but was a normal directory before.
709 # The old path auditor would have rejected by mistake since it
712 # The old path auditor would have rejected by mistake since it
710 # panics when it sees sub/.hg/.
713 # panics when it sees sub/.hg/.
711 #
714 #
712 # All in all, checking against the working copy seems sensible
715 # All in all, checking against the working copy seems sensible
713 # since we want to prevent access to nested repositories on
716 # since we want to prevent access to nested repositories on
714 # the filesystem *now*.
717 # the filesystem *now*.
715 ctx = self[None]
718 ctx = self[None]
716 parts = util.splitpath(subpath)
719 parts = util.splitpath(subpath)
717 while parts:
720 while parts:
718 prefix = '/'.join(parts)
721 prefix = '/'.join(parts)
719 if prefix in ctx.substate:
722 if prefix in ctx.substate:
720 if prefix == normsubpath:
723 if prefix == normsubpath:
721 return True
724 return True
722 else:
725 else:
723 sub = ctx.sub(prefix)
726 sub = ctx.sub(prefix)
724 return sub.checknested(subpath[len(prefix) + 1:])
727 return sub.checknested(subpath[len(prefix) + 1:])
725 else:
728 else:
726 parts.pop()
729 parts.pop()
727 return False
730 return False
728
731
729 def peer(self):
732 def peer(self):
730 return localpeer(self) # not cached to avoid reference cycle
733 return localpeer(self) # not cached to avoid reference cycle
731
734
732 def unfiltered(self):
735 def unfiltered(self):
733 """Return unfiltered version of the repository
736 """Return unfiltered version of the repository
734
737
735 Intended to be overwritten by filtered repo."""
738 Intended to be overwritten by filtered repo."""
736 return self
739 return self
737
740
738 def filtered(self, name, visibilityexceptions=None):
741 def filtered(self, name, visibilityexceptions=None):
739 """Return a filtered version of a repository"""
742 """Return a filtered version of a repository"""
740 cls = repoview.newtype(self.unfiltered().__class__)
743 cls = repoview.newtype(self.unfiltered().__class__)
741 return cls(self, name, visibilityexceptions)
744 return cls(self, name, visibilityexceptions)
742
745
743 @repofilecache('bookmarks', 'bookmarks.current')
746 @repofilecache('bookmarks', 'bookmarks.current')
744 def _bookmarks(self):
747 def _bookmarks(self):
745 return bookmarks.bmstore(self)
748 return bookmarks.bmstore(self)
746
749
747 @property
750 @property
748 def _activebookmark(self):
751 def _activebookmark(self):
749 return self._bookmarks.active
752 return self._bookmarks.active
750
753
751 # _phasesets depend on changelog. what we need is to call
754 # _phasesets depend on changelog. what we need is to call
752 # _phasecache.invalidate() if '00changelog.i' was changed, but it
755 # _phasecache.invalidate() if '00changelog.i' was changed, but it
753 # can't be easily expressed in filecache mechanism.
756 # can't be easily expressed in filecache mechanism.
754 @storecache('phaseroots', '00changelog.i')
757 @storecache('phaseroots', '00changelog.i')
755 def _phasecache(self):
758 def _phasecache(self):
756 return phases.phasecache(self, self._phasedefaults)
759 return phases.phasecache(self, self._phasedefaults)
757
760
758 @storecache('obsstore')
761 @storecache('obsstore')
759 def obsstore(self):
762 def obsstore(self):
760 return obsolete.makestore(self.ui, self)
763 return obsolete.makestore(self.ui, self)
761
764
762 @storecache('00changelog.i')
765 @storecache('00changelog.i')
763 def changelog(self):
766 def changelog(self):
764 return changelog.changelog(self.svfs,
767 return changelog.changelog(self.svfs,
765 trypending=txnutil.mayhavepending(self.root))
768 trypending=txnutil.mayhavepending(self.root))
766
769
767 def _constructmanifest(self):
770 def _constructmanifest(self):
768 # This is a temporary function while we migrate from manifest to
771 # This is a temporary function while we migrate from manifest to
769 # manifestlog. It allows bundlerepo and unionrepo to intercept the
772 # manifestlog. It allows bundlerepo and unionrepo to intercept the
770 # manifest creation.
773 # manifest creation.
771 return manifest.manifestrevlog(self.svfs)
774 return manifest.manifestrevlog(self.svfs)
772
775
773 @storecache('00manifest.i')
776 @storecache('00manifest.i')
774 def manifestlog(self):
777 def manifestlog(self):
775 return manifest.manifestlog(self.svfs, self)
778 return manifest.manifestlog(self.svfs, self)
776
779
777 @repofilecache('dirstate')
780 @repofilecache('dirstate')
778 def dirstate(self):
781 def dirstate(self):
779 sparsematchfn = lambda: sparse.matcher(self)
782 sparsematchfn = lambda: sparse.matcher(self)
780
783
781 return dirstate.dirstate(self.vfs, self.ui, self.root,
784 return dirstate.dirstate(self.vfs, self.ui, self.root,
782 self._dirstatevalidate, sparsematchfn)
785 self._dirstatevalidate, sparsematchfn)
783
786
784 def _dirstatevalidate(self, node):
787 def _dirstatevalidate(self, node):
785 try:
788 try:
786 self.changelog.rev(node)
789 self.changelog.rev(node)
787 return node
790 return node
788 except error.LookupError:
791 except error.LookupError:
789 if not self._dirstatevalidatewarned:
792 if not self._dirstatevalidatewarned:
790 self._dirstatevalidatewarned = True
793 self._dirstatevalidatewarned = True
791 self.ui.warn(_("warning: ignoring unknown"
794 self.ui.warn(_("warning: ignoring unknown"
792 " working parent %s!\n") % short(node))
795 " working parent %s!\n") % short(node))
793 return nullid
796 return nullid
794
797
795 @repofilecache(narrowspec.FILENAME)
798 @repofilecache(narrowspec.FILENAME)
796 def narrowpats(self):
799 def narrowpats(self):
797 """matcher patterns for this repository's narrowspec
800 """matcher patterns for this repository's narrowspec
798
801
799 A tuple of (includes, excludes).
802 A tuple of (includes, excludes).
800 """
803 """
801 source = self
804 source = self
802 if self.shared():
805 if self.shared():
803 from . import hg
806 from . import hg
804 source = hg.sharedreposource(self)
807 source = hg.sharedreposource(self)
805 return narrowspec.load(source)
808 return narrowspec.load(source)
806
809
807 @repofilecache(narrowspec.FILENAME)
810 @repofilecache(narrowspec.FILENAME)
808 def _narrowmatch(self):
811 def _narrowmatch(self):
809 if changegroup.NARROW_REQUIREMENT not in self.requirements:
812 if changegroup.NARROW_REQUIREMENT not in self.requirements:
810 return matchmod.always(self.root, '')
813 return matchmod.always(self.root, '')
811 include, exclude = self.narrowpats
814 include, exclude = self.narrowpats
812 return narrowspec.match(self.root, include=include, exclude=exclude)
815 return narrowspec.match(self.root, include=include, exclude=exclude)
813
816
814 # TODO(martinvonz): make this property-like instead?
817 # TODO(martinvonz): make this property-like instead?
815 def narrowmatch(self):
818 def narrowmatch(self):
816 return self._narrowmatch
819 return self._narrowmatch
817
820
818 def setnarrowpats(self, newincludes, newexcludes):
821 def setnarrowpats(self, newincludes, newexcludes):
819 target = self
822 target = self
820 if self.shared():
823 if self.shared():
821 from . import hg
824 from . import hg
822 target = hg.sharedreposource(self)
825 target = hg.sharedreposource(self)
823 narrowspec.save(target, newincludes, newexcludes)
826 narrowspec.save(target, newincludes, newexcludes)
824 self.invalidate(clearfilecache=True)
827 self.invalidate(clearfilecache=True)
825
828
826 def __getitem__(self, changeid):
829 def __getitem__(self, changeid):
827 if changeid is None:
830 if changeid is None:
828 return context.workingctx(self)
831 return context.workingctx(self)
829 if isinstance(changeid, context.basectx):
832 if isinstance(changeid, context.basectx):
830 return changeid
833 return changeid
831 if isinstance(changeid, slice):
834 if isinstance(changeid, slice):
832 # wdirrev isn't contiguous so the slice shouldn't include it
835 # wdirrev isn't contiguous so the slice shouldn't include it
833 return [context.changectx(self, i)
836 return [context.changectx(self, i)
834 for i in xrange(*changeid.indices(len(self)))
837 for i in xrange(*changeid.indices(len(self)))
835 if i not in self.changelog.filteredrevs]
838 if i not in self.changelog.filteredrevs]
836 try:
839 try:
837 return context.changectx(self, changeid)
840 return context.changectx(self, changeid)
838 except error.WdirUnsupported:
841 except error.WdirUnsupported:
839 return context.workingctx(self)
842 return context.workingctx(self)
840
843
841 def __contains__(self, changeid):
844 def __contains__(self, changeid):
842 """True if the given changeid exists
845 """True if the given changeid exists
843
846
844 error.LookupError is raised if an ambiguous node specified.
847 error.LookupError is raised if an ambiguous node specified.
845 """
848 """
846 try:
849 try:
847 self[changeid]
850 self[changeid]
848 return True
851 return True
849 except (error.RepoLookupError, error.FilteredIndexError,
852 except (error.RepoLookupError, error.FilteredIndexError,
850 error.FilteredLookupError):
853 error.FilteredLookupError):
851 return False
854 return False
852
855
853 def __nonzero__(self):
856 def __nonzero__(self):
854 return True
857 return True
855
858
856 __bool__ = __nonzero__
859 __bool__ = __nonzero__
857
860
858 def __len__(self):
861 def __len__(self):
859 # no need to pay the cost of repoview.changelog
862 # no need to pay the cost of repoview.changelog
860 unfi = self.unfiltered()
863 unfi = self.unfiltered()
861 return len(unfi.changelog)
864 return len(unfi.changelog)
862
865
863 def __iter__(self):
866 def __iter__(self):
864 return iter(self.changelog)
867 return iter(self.changelog)
865
868
866 def revs(self, expr, *args):
869 def revs(self, expr, *args):
867 '''Find revisions matching a revset.
870 '''Find revisions matching a revset.
868
871
869 The revset is specified as a string ``expr`` that may contain
872 The revset is specified as a string ``expr`` that may contain
870 %-formatting to escape certain types. See ``revsetlang.formatspec``.
873 %-formatting to escape certain types. See ``revsetlang.formatspec``.
871
874
872 Revset aliases from the configuration are not expanded. To expand
875 Revset aliases from the configuration are not expanded. To expand
873 user aliases, consider calling ``scmutil.revrange()`` or
876 user aliases, consider calling ``scmutil.revrange()`` or
874 ``repo.anyrevs([expr], user=True)``.
877 ``repo.anyrevs([expr], user=True)``.
875
878
876 Returns a revset.abstractsmartset, which is a list-like interface
879 Returns a revset.abstractsmartset, which is a list-like interface
877 that contains integer revisions.
880 that contains integer revisions.
878 '''
881 '''
879 expr = revsetlang.formatspec(expr, *args)
882 expr = revsetlang.formatspec(expr, *args)
880 m = revset.match(None, expr)
883 m = revset.match(None, expr)
881 return m(self)
884 return m(self)
882
885
883 def set(self, expr, *args):
886 def set(self, expr, *args):
884 '''Find revisions matching a revset and emit changectx instances.
887 '''Find revisions matching a revset and emit changectx instances.
885
888
886 This is a convenience wrapper around ``revs()`` that iterates the
889 This is a convenience wrapper around ``revs()`` that iterates the
887 result and is a generator of changectx instances.
890 result and is a generator of changectx instances.
888
891
889 Revset aliases from the configuration are not expanded. To expand
892 Revset aliases from the configuration are not expanded. To expand
890 user aliases, consider calling ``scmutil.revrange()``.
893 user aliases, consider calling ``scmutil.revrange()``.
891 '''
894 '''
892 for r in self.revs(expr, *args):
895 for r in self.revs(expr, *args):
893 yield self[r]
896 yield self[r]
894
897
895 def anyrevs(self, specs, user=False, localalias=None):
898 def anyrevs(self, specs, user=False, localalias=None):
896 '''Find revisions matching one of the given revsets.
899 '''Find revisions matching one of the given revsets.
897
900
898 Revset aliases from the configuration are not expanded by default. To
901 Revset aliases from the configuration are not expanded by default. To
899 expand user aliases, specify ``user=True``. To provide some local
902 expand user aliases, specify ``user=True``. To provide some local
900 definitions overriding user aliases, set ``localalias`` to
903 definitions overriding user aliases, set ``localalias`` to
901 ``{name: definitionstring}``.
904 ``{name: definitionstring}``.
902 '''
905 '''
903 if user:
906 if user:
904 m = revset.matchany(self.ui, specs, repo=self,
907 m = revset.matchany(self.ui, specs, repo=self,
905 localalias=localalias)
908 localalias=localalias)
906 else:
909 else:
907 m = revset.matchany(None, specs, localalias=localalias)
910 m = revset.matchany(None, specs, localalias=localalias)
908 return m(self)
911 return m(self)
909
912
910 def url(self):
913 def url(self):
911 return 'file:' + self.root
914 return 'file:' + self.root
912
915
913 def hook(self, name, throw=False, **args):
916 def hook(self, name, throw=False, **args):
914 """Call a hook, passing this repo instance.
917 """Call a hook, passing this repo instance.
915
918
916 This a convenience method to aid invoking hooks. Extensions likely
919 This a convenience method to aid invoking hooks. Extensions likely
917 won't call this unless they have registered a custom hook or are
920 won't call this unless they have registered a custom hook or are
918 replacing code that is expected to call a hook.
921 replacing code that is expected to call a hook.
919 """
922 """
920 return hook.hook(self.ui, self, name, throw, **args)
923 return hook.hook(self.ui, self, name, throw, **args)
921
924
922 @filteredpropertycache
925 @filteredpropertycache
923 def _tagscache(self):
926 def _tagscache(self):
924 '''Returns a tagscache object that contains various tags related
927 '''Returns a tagscache object that contains various tags related
925 caches.'''
928 caches.'''
926
929
927 # This simplifies its cache management by having one decorated
930 # This simplifies its cache management by having one decorated
928 # function (this one) and the rest simply fetch things from it.
931 # function (this one) and the rest simply fetch things from it.
929 class tagscache(object):
932 class tagscache(object):
930 def __init__(self):
933 def __init__(self):
931 # These two define the set of tags for this repository. tags
934 # These two define the set of tags for this repository. tags
932 # maps tag name to node; tagtypes maps tag name to 'global' or
935 # maps tag name to node; tagtypes maps tag name to 'global' or
933 # 'local'. (Global tags are defined by .hgtags across all
936 # 'local'. (Global tags are defined by .hgtags across all
934 # heads, and local tags are defined in .hg/localtags.)
937 # heads, and local tags are defined in .hg/localtags.)
935 # They constitute the in-memory cache of tags.
938 # They constitute the in-memory cache of tags.
936 self.tags = self.tagtypes = None
939 self.tags = self.tagtypes = None
937
940
938 self.nodetagscache = self.tagslist = None
941 self.nodetagscache = self.tagslist = None
939
942
940 cache = tagscache()
943 cache = tagscache()
941 cache.tags, cache.tagtypes = self._findtags()
944 cache.tags, cache.tagtypes = self._findtags()
942
945
943 return cache
946 return cache
944
947
945 def tags(self):
948 def tags(self):
946 '''return a mapping of tag to node'''
949 '''return a mapping of tag to node'''
947 t = {}
950 t = {}
948 if self.changelog.filteredrevs:
951 if self.changelog.filteredrevs:
949 tags, tt = self._findtags()
952 tags, tt = self._findtags()
950 else:
953 else:
951 tags = self._tagscache.tags
954 tags = self._tagscache.tags
952 for k, v in tags.iteritems():
955 for k, v in tags.iteritems():
953 try:
956 try:
954 # ignore tags to unknown nodes
957 # ignore tags to unknown nodes
955 self.changelog.rev(v)
958 self.changelog.rev(v)
956 t[k] = v
959 t[k] = v
957 except (error.LookupError, ValueError):
960 except (error.LookupError, ValueError):
958 pass
961 pass
959 return t
962 return t
960
963
961 def _findtags(self):
964 def _findtags(self):
962 '''Do the hard work of finding tags. Return a pair of dicts
965 '''Do the hard work of finding tags. Return a pair of dicts
963 (tags, tagtypes) where tags maps tag name to node, and tagtypes
966 (tags, tagtypes) where tags maps tag name to node, and tagtypes
964 maps tag name to a string like \'global\' or \'local\'.
967 maps tag name to a string like \'global\' or \'local\'.
965 Subclasses or extensions are free to add their own tags, but
968 Subclasses or extensions are free to add their own tags, but
966 should be aware that the returned dicts will be retained for the
969 should be aware that the returned dicts will be retained for the
967 duration of the localrepo object.'''
970 duration of the localrepo object.'''
968
971
969 # XXX what tagtype should subclasses/extensions use? Currently
972 # XXX what tagtype should subclasses/extensions use? Currently
970 # mq and bookmarks add tags, but do not set the tagtype at all.
973 # mq and bookmarks add tags, but do not set the tagtype at all.
971 # Should each extension invent its own tag type? Should there
974 # Should each extension invent its own tag type? Should there
972 # be one tagtype for all such "virtual" tags? Or is the status
975 # be one tagtype for all such "virtual" tags? Or is the status
973 # quo fine?
976 # quo fine?
974
977
975
978
976 # map tag name to (node, hist)
979 # map tag name to (node, hist)
977 alltags = tagsmod.findglobaltags(self.ui, self)
980 alltags = tagsmod.findglobaltags(self.ui, self)
978 # map tag name to tag type
981 # map tag name to tag type
979 tagtypes = dict((tag, 'global') for tag in alltags)
982 tagtypes = dict((tag, 'global') for tag in alltags)
980
983
981 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
984 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
982
985
983 # Build the return dicts. Have to re-encode tag names because
986 # Build the return dicts. Have to re-encode tag names because
984 # the tags module always uses UTF-8 (in order not to lose info
987 # the tags module always uses UTF-8 (in order not to lose info
985 # writing to the cache), but the rest of Mercurial wants them in
988 # writing to the cache), but the rest of Mercurial wants them in
986 # local encoding.
989 # local encoding.
987 tags = {}
990 tags = {}
988 for (name, (node, hist)) in alltags.iteritems():
991 for (name, (node, hist)) in alltags.iteritems():
989 if node != nullid:
992 if node != nullid:
990 tags[encoding.tolocal(name)] = node
993 tags[encoding.tolocal(name)] = node
991 tags['tip'] = self.changelog.tip()
994 tags['tip'] = self.changelog.tip()
992 tagtypes = dict([(encoding.tolocal(name), value)
995 tagtypes = dict([(encoding.tolocal(name), value)
993 for (name, value) in tagtypes.iteritems()])
996 for (name, value) in tagtypes.iteritems()])
994 return (tags, tagtypes)
997 return (tags, tagtypes)
995
998
996 def tagtype(self, tagname):
999 def tagtype(self, tagname):
997 '''
1000 '''
998 return the type of the given tag. result can be:
1001 return the type of the given tag. result can be:
999
1002
1000 'local' : a local tag
1003 'local' : a local tag
1001 'global' : a global tag
1004 'global' : a global tag
1002 None : tag does not exist
1005 None : tag does not exist
1003 '''
1006 '''
1004
1007
1005 return self._tagscache.tagtypes.get(tagname)
1008 return self._tagscache.tagtypes.get(tagname)
1006
1009
1007 def tagslist(self):
1010 def tagslist(self):
1008 '''return a list of tags ordered by revision'''
1011 '''return a list of tags ordered by revision'''
1009 if not self._tagscache.tagslist:
1012 if not self._tagscache.tagslist:
1010 l = []
1013 l = []
1011 for t, n in self.tags().iteritems():
1014 for t, n in self.tags().iteritems():
1012 l.append((self.changelog.rev(n), t, n))
1015 l.append((self.changelog.rev(n), t, n))
1013 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1016 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1014
1017
1015 return self._tagscache.tagslist
1018 return self._tagscache.tagslist
1016
1019
1017 def nodetags(self, node):
1020 def nodetags(self, node):
1018 '''return the tags associated with a node'''
1021 '''return the tags associated with a node'''
1019 if not self._tagscache.nodetagscache:
1022 if not self._tagscache.nodetagscache:
1020 nodetagscache = {}
1023 nodetagscache = {}
1021 for t, n in self._tagscache.tags.iteritems():
1024 for t, n in self._tagscache.tags.iteritems():
1022 nodetagscache.setdefault(n, []).append(t)
1025 nodetagscache.setdefault(n, []).append(t)
1023 for tags in nodetagscache.itervalues():
1026 for tags in nodetagscache.itervalues():
1024 tags.sort()
1027 tags.sort()
1025 self._tagscache.nodetagscache = nodetagscache
1028 self._tagscache.nodetagscache = nodetagscache
1026 return self._tagscache.nodetagscache.get(node, [])
1029 return self._tagscache.nodetagscache.get(node, [])
1027
1030
1028 def nodebookmarks(self, node):
1031 def nodebookmarks(self, node):
1029 """return the list of bookmarks pointing to the specified node"""
1032 """return the list of bookmarks pointing to the specified node"""
1030 marks = []
1033 marks = []
1031 for bookmark, n in self._bookmarks.iteritems():
1034 for bookmark, n in self._bookmarks.iteritems():
1032 if n == node:
1035 if n == node:
1033 marks.append(bookmark)
1036 marks.append(bookmark)
1034 return sorted(marks)
1037 return sorted(marks)
1035
1038
1036 def branchmap(self):
1039 def branchmap(self):
1037 '''returns a dictionary {branch: [branchheads]} with branchheads
1040 '''returns a dictionary {branch: [branchheads]} with branchheads
1038 ordered by increasing revision number'''
1041 ordered by increasing revision number'''
1039 branchmap.updatecache(self)
1042 branchmap.updatecache(self)
1040 return self._branchcaches[self.filtername]
1043 return self._branchcaches[self.filtername]
1041
1044
1042 @unfilteredmethod
1045 @unfilteredmethod
1043 def revbranchcache(self):
1046 def revbranchcache(self):
1044 if not self._revbranchcache:
1047 if not self._revbranchcache:
1045 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1048 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1046 return self._revbranchcache
1049 return self._revbranchcache
1047
1050
1048 def branchtip(self, branch, ignoremissing=False):
1051 def branchtip(self, branch, ignoremissing=False):
1049 '''return the tip node for a given branch
1052 '''return the tip node for a given branch
1050
1053
1051 If ignoremissing is True, then this method will not raise an error.
1054 If ignoremissing is True, then this method will not raise an error.
1052 This is helpful for callers that only expect None for a missing branch
1055 This is helpful for callers that only expect None for a missing branch
1053 (e.g. namespace).
1056 (e.g. namespace).
1054
1057
1055 '''
1058 '''
1056 try:
1059 try:
1057 return self.branchmap().branchtip(branch)
1060 return self.branchmap().branchtip(branch)
1058 except KeyError:
1061 except KeyError:
1059 if not ignoremissing:
1062 if not ignoremissing:
1060 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1063 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1061 else:
1064 else:
1062 pass
1065 pass
1063
1066
1064 def lookup(self, key):
1067 def lookup(self, key):
1065 return scmutil.revsymbol(self, key).node()
1068 return scmutil.revsymbol(self, key).node()
1066
1069
1067 def lookupbranch(self, key):
1070 def lookupbranch(self, key):
1068 if key in self.branchmap():
1071 if key in self.branchmap():
1069 return key
1072 return key
1070
1073
1071 return scmutil.revsymbol(self, key).branch()
1074 return scmutil.revsymbol(self, key).branch()
1072
1075
1073 def known(self, nodes):
1076 def known(self, nodes):
1074 cl = self.changelog
1077 cl = self.changelog
1075 nm = cl.nodemap
1078 nm = cl.nodemap
1076 filtered = cl.filteredrevs
1079 filtered = cl.filteredrevs
1077 result = []
1080 result = []
1078 for n in nodes:
1081 for n in nodes:
1079 r = nm.get(n)
1082 r = nm.get(n)
1080 resp = not (r is None or r in filtered)
1083 resp = not (r is None or r in filtered)
1081 result.append(resp)
1084 result.append(resp)
1082 return result
1085 return result
1083
1086
1084 def local(self):
1087 def local(self):
1085 return self
1088 return self
1086
1089
1087 def publishing(self):
1090 def publishing(self):
1088 # it's safe (and desirable) to trust the publish flag unconditionally
1091 # it's safe (and desirable) to trust the publish flag unconditionally
1089 # so that we don't finalize changes shared between users via ssh or nfs
1092 # so that we don't finalize changes shared between users via ssh or nfs
1090 return self.ui.configbool('phases', 'publish', untrusted=True)
1093 return self.ui.configbool('phases', 'publish', untrusted=True)
1091
1094
1092 def cancopy(self):
1095 def cancopy(self):
1093 # so statichttprepo's override of local() works
1096 # so statichttprepo's override of local() works
1094 if not self.local():
1097 if not self.local():
1095 return False
1098 return False
1096 if not self.publishing():
1099 if not self.publishing():
1097 return True
1100 return True
1098 # if publishing we can't copy if there is filtered content
1101 # if publishing we can't copy if there is filtered content
1099 return not self.filtered('visible').changelog.filteredrevs
1102 return not self.filtered('visible').changelog.filteredrevs
1100
1103
1101 def shared(self):
1104 def shared(self):
1102 '''the type of shared repository (None if not shared)'''
1105 '''the type of shared repository (None if not shared)'''
1103 if self.sharedpath != self.path:
1106 if self.sharedpath != self.path:
1104 return 'store'
1107 return 'store'
1105 return None
1108 return None
1106
1109
1107 def wjoin(self, f, *insidef):
1110 def wjoin(self, f, *insidef):
1108 return self.vfs.reljoin(self.root, f, *insidef)
1111 return self.vfs.reljoin(self.root, f, *insidef)
1109
1112
1110 def file(self, f):
1113 def file(self, f):
1111 if f[0] == '/':
1114 if f[0] == '/':
1112 f = f[1:]
1115 f = f[1:]
1113 return filelog.filelog(self.svfs, f)
1116 return filelog.filelog(self.svfs, f)
1114
1117
1115 def setparents(self, p1, p2=nullid):
1118 def setparents(self, p1, p2=nullid):
1116 with self.dirstate.parentchange():
1119 with self.dirstate.parentchange():
1117 copies = self.dirstate.setparents(p1, p2)
1120 copies = self.dirstate.setparents(p1, p2)
1118 pctx = self[p1]
1121 pctx = self[p1]
1119 if copies:
1122 if copies:
1120 # Adjust copy records, the dirstate cannot do it, it
1123 # Adjust copy records, the dirstate cannot do it, it
1121 # requires access to parents manifests. Preserve them
1124 # requires access to parents manifests. Preserve them
1122 # only for entries added to first parent.
1125 # only for entries added to first parent.
1123 for f in copies:
1126 for f in copies:
1124 if f not in pctx and copies[f] in pctx:
1127 if f not in pctx and copies[f] in pctx:
1125 self.dirstate.copy(copies[f], f)
1128 self.dirstate.copy(copies[f], f)
1126 if p2 == nullid:
1129 if p2 == nullid:
1127 for f, s in sorted(self.dirstate.copies().items()):
1130 for f, s in sorted(self.dirstate.copies().items()):
1128 if f not in pctx and s not in pctx:
1131 if f not in pctx and s not in pctx:
1129 self.dirstate.copy(None, f)
1132 self.dirstate.copy(None, f)
1130
1133
1131 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1134 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1132 """changeid can be a changeset revision, node, or tag.
1135 """changeid can be a changeset revision, node, or tag.
1133 fileid can be a file revision or node."""
1136 fileid can be a file revision or node."""
1134 return context.filectx(self, path, changeid, fileid,
1137 return context.filectx(self, path, changeid, fileid,
1135 changectx=changectx)
1138 changectx=changectx)
1136
1139
1137 def getcwd(self):
1140 def getcwd(self):
1138 return self.dirstate.getcwd()
1141 return self.dirstate.getcwd()
1139
1142
1140 def pathto(self, f, cwd=None):
1143 def pathto(self, f, cwd=None):
1141 return self.dirstate.pathto(f, cwd)
1144 return self.dirstate.pathto(f, cwd)
1142
1145
1143 def _loadfilter(self, filter):
1146 def _loadfilter(self, filter):
1144 if filter not in self._filterpats:
1147 if filter not in self._filterpats:
1145 l = []
1148 l = []
1146 for pat, cmd in self.ui.configitems(filter):
1149 for pat, cmd in self.ui.configitems(filter):
1147 if cmd == '!':
1150 if cmd == '!':
1148 continue
1151 continue
1149 mf = matchmod.match(self.root, '', [pat])
1152 mf = matchmod.match(self.root, '', [pat])
1150 fn = None
1153 fn = None
1151 params = cmd
1154 params = cmd
1152 for name, filterfn in self._datafilters.iteritems():
1155 for name, filterfn in self._datafilters.iteritems():
1153 if cmd.startswith(name):
1156 if cmd.startswith(name):
1154 fn = filterfn
1157 fn = filterfn
1155 params = cmd[len(name):].lstrip()
1158 params = cmd[len(name):].lstrip()
1156 break
1159 break
1157 if not fn:
1160 if not fn:
1158 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1161 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1159 # Wrap old filters not supporting keyword arguments
1162 # Wrap old filters not supporting keyword arguments
1160 if not pycompat.getargspec(fn)[2]:
1163 if not pycompat.getargspec(fn)[2]:
1161 oldfn = fn
1164 oldfn = fn
1162 fn = lambda s, c, **kwargs: oldfn(s, c)
1165 fn = lambda s, c, **kwargs: oldfn(s, c)
1163 l.append((mf, fn, params))
1166 l.append((mf, fn, params))
1164 self._filterpats[filter] = l
1167 self._filterpats[filter] = l
1165 return self._filterpats[filter]
1168 return self._filterpats[filter]
1166
1169
1167 def _filter(self, filterpats, filename, data):
1170 def _filter(self, filterpats, filename, data):
1168 for mf, fn, cmd in filterpats:
1171 for mf, fn, cmd in filterpats:
1169 if mf(filename):
1172 if mf(filename):
1170 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1173 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1171 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1174 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1172 break
1175 break
1173
1176
1174 return data
1177 return data
1175
1178
1176 @unfilteredpropertycache
1179 @unfilteredpropertycache
1177 def _encodefilterpats(self):
1180 def _encodefilterpats(self):
1178 return self._loadfilter('encode')
1181 return self._loadfilter('encode')
1179
1182
1180 @unfilteredpropertycache
1183 @unfilteredpropertycache
1181 def _decodefilterpats(self):
1184 def _decodefilterpats(self):
1182 return self._loadfilter('decode')
1185 return self._loadfilter('decode')
1183
1186
1184 def adddatafilter(self, name, filter):
1187 def adddatafilter(self, name, filter):
1185 self._datafilters[name] = filter
1188 self._datafilters[name] = filter
1186
1189
1187 def wread(self, filename):
1190 def wread(self, filename):
1188 if self.wvfs.islink(filename):
1191 if self.wvfs.islink(filename):
1189 data = self.wvfs.readlink(filename)
1192 data = self.wvfs.readlink(filename)
1190 else:
1193 else:
1191 data = self.wvfs.read(filename)
1194 data = self.wvfs.read(filename)
1192 return self._filter(self._encodefilterpats, filename, data)
1195 return self._filter(self._encodefilterpats, filename, data)
1193
1196
1194 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1197 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1195 """write ``data`` into ``filename`` in the working directory
1198 """write ``data`` into ``filename`` in the working directory
1196
1199
1197 This returns length of written (maybe decoded) data.
1200 This returns length of written (maybe decoded) data.
1198 """
1201 """
1199 data = self._filter(self._decodefilterpats, filename, data)
1202 data = self._filter(self._decodefilterpats, filename, data)
1200 if 'l' in flags:
1203 if 'l' in flags:
1201 self.wvfs.symlink(data, filename)
1204 self.wvfs.symlink(data, filename)
1202 else:
1205 else:
1203 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1206 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1204 **kwargs)
1207 **kwargs)
1205 if 'x' in flags:
1208 if 'x' in flags:
1206 self.wvfs.setflags(filename, False, True)
1209 self.wvfs.setflags(filename, False, True)
1207 else:
1210 else:
1208 self.wvfs.setflags(filename, False, False)
1211 self.wvfs.setflags(filename, False, False)
1209 return len(data)
1212 return len(data)
1210
1213
1211 def wwritedata(self, filename, data):
1214 def wwritedata(self, filename, data):
1212 return self._filter(self._decodefilterpats, filename, data)
1215 return self._filter(self._decodefilterpats, filename, data)
1213
1216
1214 def currenttransaction(self):
1217 def currenttransaction(self):
1215 """return the current transaction or None if non exists"""
1218 """return the current transaction or None if non exists"""
1216 if self._transref:
1219 if self._transref:
1217 tr = self._transref()
1220 tr = self._transref()
1218 else:
1221 else:
1219 tr = None
1222 tr = None
1220
1223
1221 if tr and tr.running():
1224 if tr and tr.running():
1222 return tr
1225 return tr
1223 return None
1226 return None
1224
1227
1225 def transaction(self, desc, report=None):
1228 def transaction(self, desc, report=None):
1226 if (self.ui.configbool('devel', 'all-warnings')
1229 if (self.ui.configbool('devel', 'all-warnings')
1227 or self.ui.configbool('devel', 'check-locks')):
1230 or self.ui.configbool('devel', 'check-locks')):
1228 if self._currentlock(self._lockref) is None:
1231 if self._currentlock(self._lockref) is None:
1229 raise error.ProgrammingError('transaction requires locking')
1232 raise error.ProgrammingError('transaction requires locking')
1230 tr = self.currenttransaction()
1233 tr = self.currenttransaction()
1231 if tr is not None:
1234 if tr is not None:
1232 return tr.nest(name=desc)
1235 return tr.nest(name=desc)
1233
1236
1234 # abort here if the journal already exists
1237 # abort here if the journal already exists
1235 if self.svfs.exists("journal"):
1238 if self.svfs.exists("journal"):
1236 raise error.RepoError(
1239 raise error.RepoError(
1237 _("abandoned transaction found"),
1240 _("abandoned transaction found"),
1238 hint=_("run 'hg recover' to clean up transaction"))
1241 hint=_("run 'hg recover' to clean up transaction"))
1239
1242
1240 idbase = "%.40f#%f" % (random.random(), time.time())
1243 idbase = "%.40f#%f" % (random.random(), time.time())
1241 ha = hex(hashlib.sha1(idbase).digest())
1244 ha = hex(hashlib.sha1(idbase).digest())
1242 txnid = 'TXN:' + ha
1245 txnid = 'TXN:' + ha
1243 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1246 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1244
1247
1245 self._writejournal(desc)
1248 self._writejournal(desc)
1246 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1249 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1247 if report:
1250 if report:
1248 rp = report
1251 rp = report
1249 else:
1252 else:
1250 rp = self.ui.warn
1253 rp = self.ui.warn
1251 vfsmap = {'plain': self.vfs} # root of .hg/
1254 vfsmap = {'plain': self.vfs} # root of .hg/
1252 # we must avoid cyclic reference between repo and transaction.
1255 # we must avoid cyclic reference between repo and transaction.
1253 reporef = weakref.ref(self)
1256 reporef = weakref.ref(self)
1254 # Code to track tag movement
1257 # Code to track tag movement
1255 #
1258 #
1256 # Since tags are all handled as file content, it is actually quite hard
1259 # Since tags are all handled as file content, it is actually quite hard
1257 # to track these movement from a code perspective. So we fallback to a
1260 # to track these movement from a code perspective. So we fallback to a
1258 # tracking at the repository level. One could envision to track changes
1261 # tracking at the repository level. One could envision to track changes
1259 # to the '.hgtags' file through changegroup apply but that fails to
1262 # to the '.hgtags' file through changegroup apply but that fails to
1260 # cope with case where transaction expose new heads without changegroup
1263 # cope with case where transaction expose new heads without changegroup
1261 # being involved (eg: phase movement).
1264 # being involved (eg: phase movement).
1262 #
1265 #
1263 # For now, We gate the feature behind a flag since this likely comes
1266 # For now, We gate the feature behind a flag since this likely comes
1264 # with performance impacts. The current code run more often than needed
1267 # with performance impacts. The current code run more often than needed
1265 # and do not use caches as much as it could. The current focus is on
1268 # and do not use caches as much as it could. The current focus is on
1266 # the behavior of the feature so we disable it by default. The flag
1269 # the behavior of the feature so we disable it by default. The flag
1267 # will be removed when we are happy with the performance impact.
1270 # will be removed when we are happy with the performance impact.
1268 #
1271 #
1269 # Once this feature is no longer experimental move the following
1272 # Once this feature is no longer experimental move the following
1270 # documentation to the appropriate help section:
1273 # documentation to the appropriate help section:
1271 #
1274 #
1272 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1275 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1273 # tags (new or changed or deleted tags). In addition the details of
1276 # tags (new or changed or deleted tags). In addition the details of
1274 # these changes are made available in a file at:
1277 # these changes are made available in a file at:
1275 # ``REPOROOT/.hg/changes/tags.changes``.
1278 # ``REPOROOT/.hg/changes/tags.changes``.
1276 # Make sure you check for HG_TAG_MOVED before reading that file as it
1279 # Make sure you check for HG_TAG_MOVED before reading that file as it
1277 # might exist from a previous transaction even if no tag were touched
1280 # might exist from a previous transaction even if no tag were touched
1278 # in this one. Changes are recorded in a line base format::
1281 # in this one. Changes are recorded in a line base format::
1279 #
1282 #
1280 # <action> <hex-node> <tag-name>\n
1283 # <action> <hex-node> <tag-name>\n
1281 #
1284 #
1282 # Actions are defined as follow:
1285 # Actions are defined as follow:
1283 # "-R": tag is removed,
1286 # "-R": tag is removed,
1284 # "+A": tag is added,
1287 # "+A": tag is added,
1285 # "-M": tag is moved (old value),
1288 # "-M": tag is moved (old value),
1286 # "+M": tag is moved (new value),
1289 # "+M": tag is moved (new value),
1287 tracktags = lambda x: None
1290 tracktags = lambda x: None
1288 # experimental config: experimental.hook-track-tags
1291 # experimental config: experimental.hook-track-tags
1289 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1292 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1290 if desc != 'strip' and shouldtracktags:
1293 if desc != 'strip' and shouldtracktags:
1291 oldheads = self.changelog.headrevs()
1294 oldheads = self.changelog.headrevs()
1292 def tracktags(tr2):
1295 def tracktags(tr2):
1293 repo = reporef()
1296 repo = reporef()
1294 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1297 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1295 newheads = repo.changelog.headrevs()
1298 newheads = repo.changelog.headrevs()
1296 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1299 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1297 # notes: we compare lists here.
1300 # notes: we compare lists here.
1298 # As we do it only once buiding set would not be cheaper
1301 # As we do it only once buiding set would not be cheaper
1299 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1302 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1300 if changes:
1303 if changes:
1301 tr2.hookargs['tag_moved'] = '1'
1304 tr2.hookargs['tag_moved'] = '1'
1302 with repo.vfs('changes/tags.changes', 'w',
1305 with repo.vfs('changes/tags.changes', 'w',
1303 atomictemp=True) as changesfile:
1306 atomictemp=True) as changesfile:
1304 # note: we do not register the file to the transaction
1307 # note: we do not register the file to the transaction
1305 # because we needs it to still exist on the transaction
1308 # because we needs it to still exist on the transaction
1306 # is close (for txnclose hooks)
1309 # is close (for txnclose hooks)
1307 tagsmod.writediff(changesfile, changes)
1310 tagsmod.writediff(changesfile, changes)
1308 def validate(tr2):
1311 def validate(tr2):
1309 """will run pre-closing hooks"""
1312 """will run pre-closing hooks"""
1310 # XXX the transaction API is a bit lacking here so we take a hacky
1313 # XXX the transaction API is a bit lacking here so we take a hacky
1311 # path for now
1314 # path for now
1312 #
1315 #
1313 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1316 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1314 # dict is copied before these run. In addition we needs the data
1317 # dict is copied before these run. In addition we needs the data
1315 # available to in memory hooks too.
1318 # available to in memory hooks too.
1316 #
1319 #
1317 # Moreover, we also need to make sure this runs before txnclose
1320 # Moreover, we also need to make sure this runs before txnclose
1318 # hooks and there is no "pending" mechanism that would execute
1321 # hooks and there is no "pending" mechanism that would execute
1319 # logic only if hooks are about to run.
1322 # logic only if hooks are about to run.
1320 #
1323 #
1321 # Fixing this limitation of the transaction is also needed to track
1324 # Fixing this limitation of the transaction is also needed to track
1322 # other families of changes (bookmarks, phases, obsolescence).
1325 # other families of changes (bookmarks, phases, obsolescence).
1323 #
1326 #
1324 # This will have to be fixed before we remove the experimental
1327 # This will have to be fixed before we remove the experimental
1325 # gating.
1328 # gating.
1326 tracktags(tr2)
1329 tracktags(tr2)
1327 repo = reporef()
1330 repo = reporef()
1328 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1331 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1329 scmutil.enforcesinglehead(repo, tr2, desc)
1332 scmutil.enforcesinglehead(repo, tr2, desc)
1330 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1333 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1331 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1334 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1332 args = tr.hookargs.copy()
1335 args = tr.hookargs.copy()
1333 args.update(bookmarks.preparehookargs(name, old, new))
1336 args.update(bookmarks.preparehookargs(name, old, new))
1334 repo.hook('pretxnclose-bookmark', throw=True,
1337 repo.hook('pretxnclose-bookmark', throw=True,
1335 txnname=desc,
1338 txnname=desc,
1336 **pycompat.strkwargs(args))
1339 **pycompat.strkwargs(args))
1337 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1340 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1338 cl = repo.unfiltered().changelog
1341 cl = repo.unfiltered().changelog
1339 for rev, (old, new) in tr.changes['phases'].items():
1342 for rev, (old, new) in tr.changes['phases'].items():
1340 args = tr.hookargs.copy()
1343 args = tr.hookargs.copy()
1341 node = hex(cl.node(rev))
1344 node = hex(cl.node(rev))
1342 args.update(phases.preparehookargs(node, old, new))
1345 args.update(phases.preparehookargs(node, old, new))
1343 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1346 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1344 **pycompat.strkwargs(args))
1347 **pycompat.strkwargs(args))
1345
1348
1346 repo.hook('pretxnclose', throw=True,
1349 repo.hook('pretxnclose', throw=True,
1347 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1350 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1348 def releasefn(tr, success):
1351 def releasefn(tr, success):
1349 repo = reporef()
1352 repo = reporef()
1350 if success:
1353 if success:
1351 # this should be explicitly invoked here, because
1354 # this should be explicitly invoked here, because
1352 # in-memory changes aren't written out at closing
1355 # in-memory changes aren't written out at closing
1353 # transaction, if tr.addfilegenerator (via
1356 # transaction, if tr.addfilegenerator (via
1354 # dirstate.write or so) isn't invoked while
1357 # dirstate.write or so) isn't invoked while
1355 # transaction running
1358 # transaction running
1356 repo.dirstate.write(None)
1359 repo.dirstate.write(None)
1357 else:
1360 else:
1358 # discard all changes (including ones already written
1361 # discard all changes (including ones already written
1359 # out) in this transaction
1362 # out) in this transaction
1360 repo.dirstate.restorebackup(None, 'journal.dirstate')
1363 repo.dirstate.restorebackup(None, 'journal.dirstate')
1361
1364
1362 repo.invalidate(clearfilecache=True)
1365 repo.invalidate(clearfilecache=True)
1363
1366
1364 tr = transaction.transaction(rp, self.svfs, vfsmap,
1367 tr = transaction.transaction(rp, self.svfs, vfsmap,
1365 "journal",
1368 "journal",
1366 "undo",
1369 "undo",
1367 aftertrans(renames),
1370 aftertrans(renames),
1368 self.store.createmode,
1371 self.store.createmode,
1369 validator=validate,
1372 validator=validate,
1370 releasefn=releasefn,
1373 releasefn=releasefn,
1371 checkambigfiles=_cachedfiles,
1374 checkambigfiles=_cachedfiles,
1372 name=desc)
1375 name=desc)
1373 tr.changes['revs'] = xrange(0, 0)
1376 tr.changes['revs'] = xrange(0, 0)
1374 tr.changes['obsmarkers'] = set()
1377 tr.changes['obsmarkers'] = set()
1375 tr.changes['phases'] = {}
1378 tr.changes['phases'] = {}
1376 tr.changes['bookmarks'] = {}
1379 tr.changes['bookmarks'] = {}
1377
1380
1378 tr.hookargs['txnid'] = txnid
1381 tr.hookargs['txnid'] = txnid
1379 # note: writing the fncache only during finalize mean that the file is
1382 # note: writing the fncache only during finalize mean that the file is
1380 # outdated when running hooks. As fncache is used for streaming clone,
1383 # outdated when running hooks. As fncache is used for streaming clone,
1381 # this is not expected to break anything that happen during the hooks.
1384 # this is not expected to break anything that happen during the hooks.
1382 tr.addfinalize('flush-fncache', self.store.write)
1385 tr.addfinalize('flush-fncache', self.store.write)
1383 def txnclosehook(tr2):
1386 def txnclosehook(tr2):
1384 """To be run if transaction is successful, will schedule a hook run
1387 """To be run if transaction is successful, will schedule a hook run
1385 """
1388 """
1386 # Don't reference tr2 in hook() so we don't hold a reference.
1389 # Don't reference tr2 in hook() so we don't hold a reference.
1387 # This reduces memory consumption when there are multiple
1390 # This reduces memory consumption when there are multiple
1388 # transactions per lock. This can likely go away if issue5045
1391 # transactions per lock. This can likely go away if issue5045
1389 # fixes the function accumulation.
1392 # fixes the function accumulation.
1390 hookargs = tr2.hookargs
1393 hookargs = tr2.hookargs
1391
1394
1392 def hookfunc():
1395 def hookfunc():
1393 repo = reporef()
1396 repo = reporef()
1394 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1397 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1395 bmchanges = sorted(tr.changes['bookmarks'].items())
1398 bmchanges = sorted(tr.changes['bookmarks'].items())
1396 for name, (old, new) in bmchanges:
1399 for name, (old, new) in bmchanges:
1397 args = tr.hookargs.copy()
1400 args = tr.hookargs.copy()
1398 args.update(bookmarks.preparehookargs(name, old, new))
1401 args.update(bookmarks.preparehookargs(name, old, new))
1399 repo.hook('txnclose-bookmark', throw=False,
1402 repo.hook('txnclose-bookmark', throw=False,
1400 txnname=desc, **pycompat.strkwargs(args))
1403 txnname=desc, **pycompat.strkwargs(args))
1401
1404
1402 if hook.hashook(repo.ui, 'txnclose-phase'):
1405 if hook.hashook(repo.ui, 'txnclose-phase'):
1403 cl = repo.unfiltered().changelog
1406 cl = repo.unfiltered().changelog
1404 phasemv = sorted(tr.changes['phases'].items())
1407 phasemv = sorted(tr.changes['phases'].items())
1405 for rev, (old, new) in phasemv:
1408 for rev, (old, new) in phasemv:
1406 args = tr.hookargs.copy()
1409 args = tr.hookargs.copy()
1407 node = hex(cl.node(rev))
1410 node = hex(cl.node(rev))
1408 args.update(phases.preparehookargs(node, old, new))
1411 args.update(phases.preparehookargs(node, old, new))
1409 repo.hook('txnclose-phase', throw=False, txnname=desc,
1412 repo.hook('txnclose-phase', throw=False, txnname=desc,
1410 **pycompat.strkwargs(args))
1413 **pycompat.strkwargs(args))
1411
1414
1412 repo.hook('txnclose', throw=False, txnname=desc,
1415 repo.hook('txnclose', throw=False, txnname=desc,
1413 **pycompat.strkwargs(hookargs))
1416 **pycompat.strkwargs(hookargs))
1414 reporef()._afterlock(hookfunc)
1417 reporef()._afterlock(hookfunc)
1415 tr.addfinalize('txnclose-hook', txnclosehook)
1418 tr.addfinalize('txnclose-hook', txnclosehook)
1416 # Include a leading "-" to make it happen before the transaction summary
1419 # Include a leading "-" to make it happen before the transaction summary
1417 # reports registered via scmutil.registersummarycallback() whose names
1420 # reports registered via scmutil.registersummarycallback() whose names
1418 # are 00-txnreport etc. That way, the caches will be warm when the
1421 # are 00-txnreport etc. That way, the caches will be warm when the
1419 # callbacks run.
1422 # callbacks run.
1420 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1423 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1421 def txnaborthook(tr2):
1424 def txnaborthook(tr2):
1422 """To be run if transaction is aborted
1425 """To be run if transaction is aborted
1423 """
1426 """
1424 reporef().hook('txnabort', throw=False, txnname=desc,
1427 reporef().hook('txnabort', throw=False, txnname=desc,
1425 **pycompat.strkwargs(tr2.hookargs))
1428 **pycompat.strkwargs(tr2.hookargs))
1426 tr.addabort('txnabort-hook', txnaborthook)
1429 tr.addabort('txnabort-hook', txnaborthook)
1427 # avoid eager cache invalidation. in-memory data should be identical
1430 # avoid eager cache invalidation. in-memory data should be identical
1428 # to stored data if transaction has no error.
1431 # to stored data if transaction has no error.
1429 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1432 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1430 self._transref = weakref.ref(tr)
1433 self._transref = weakref.ref(tr)
1431 scmutil.registersummarycallback(self, tr, desc)
1434 scmutil.registersummarycallback(self, tr, desc)
1432 return tr
1435 return tr
1433
1436
1434 def _journalfiles(self):
1437 def _journalfiles(self):
1435 return ((self.svfs, 'journal'),
1438 return ((self.svfs, 'journal'),
1436 (self.vfs, 'journal.dirstate'),
1439 (self.vfs, 'journal.dirstate'),
1437 (self.vfs, 'journal.branch'),
1440 (self.vfs, 'journal.branch'),
1438 (self.vfs, 'journal.desc'),
1441 (self.vfs, 'journal.desc'),
1439 (self.vfs, 'journal.bookmarks'),
1442 (self.vfs, 'journal.bookmarks'),
1440 (self.svfs, 'journal.phaseroots'))
1443 (self.svfs, 'journal.phaseroots'))
1441
1444
1442 def undofiles(self):
1445 def undofiles(self):
1443 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1446 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1444
1447
1445 @unfilteredmethod
1448 @unfilteredmethod
1446 def _writejournal(self, desc):
1449 def _writejournal(self, desc):
1447 self.dirstate.savebackup(None, 'journal.dirstate')
1450 self.dirstate.savebackup(None, 'journal.dirstate')
1448 self.vfs.write("journal.branch",
1451 self.vfs.write("journal.branch",
1449 encoding.fromlocal(self.dirstate.branch()))
1452 encoding.fromlocal(self.dirstate.branch()))
1450 self.vfs.write("journal.desc",
1453 self.vfs.write("journal.desc",
1451 "%d\n%s\n" % (len(self), desc))
1454 "%d\n%s\n" % (len(self), desc))
1452 self.vfs.write("journal.bookmarks",
1455 self.vfs.write("journal.bookmarks",
1453 self.vfs.tryread("bookmarks"))
1456 self.vfs.tryread("bookmarks"))
1454 self.svfs.write("journal.phaseroots",
1457 self.svfs.write("journal.phaseroots",
1455 self.svfs.tryread("phaseroots"))
1458 self.svfs.tryread("phaseroots"))
1456
1459
1457 def recover(self):
1460 def recover(self):
1458 with self.lock():
1461 with self.lock():
1459 if self.svfs.exists("journal"):
1462 if self.svfs.exists("journal"):
1460 self.ui.status(_("rolling back interrupted transaction\n"))
1463 self.ui.status(_("rolling back interrupted transaction\n"))
1461 vfsmap = {'': self.svfs,
1464 vfsmap = {'': self.svfs,
1462 'plain': self.vfs,}
1465 'plain': self.vfs,}
1463 transaction.rollback(self.svfs, vfsmap, "journal",
1466 transaction.rollback(self.svfs, vfsmap, "journal",
1464 self.ui.warn,
1467 self.ui.warn,
1465 checkambigfiles=_cachedfiles)
1468 checkambigfiles=_cachedfiles)
1466 self.invalidate()
1469 self.invalidate()
1467 return True
1470 return True
1468 else:
1471 else:
1469 self.ui.warn(_("no interrupted transaction available\n"))
1472 self.ui.warn(_("no interrupted transaction available\n"))
1470 return False
1473 return False
1471
1474
1472 def rollback(self, dryrun=False, force=False):
1475 def rollback(self, dryrun=False, force=False):
1473 wlock = lock = dsguard = None
1476 wlock = lock = dsguard = None
1474 try:
1477 try:
1475 wlock = self.wlock()
1478 wlock = self.wlock()
1476 lock = self.lock()
1479 lock = self.lock()
1477 if self.svfs.exists("undo"):
1480 if self.svfs.exists("undo"):
1478 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1481 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1479
1482
1480 return self._rollback(dryrun, force, dsguard)
1483 return self._rollback(dryrun, force, dsguard)
1481 else:
1484 else:
1482 self.ui.warn(_("no rollback information available\n"))
1485 self.ui.warn(_("no rollback information available\n"))
1483 return 1
1486 return 1
1484 finally:
1487 finally:
1485 release(dsguard, lock, wlock)
1488 release(dsguard, lock, wlock)
1486
1489
1487 @unfilteredmethod # Until we get smarter cache management
1490 @unfilteredmethod # Until we get smarter cache management
1488 def _rollback(self, dryrun, force, dsguard):
1491 def _rollback(self, dryrun, force, dsguard):
1489 ui = self.ui
1492 ui = self.ui
1490 try:
1493 try:
1491 args = self.vfs.read('undo.desc').splitlines()
1494 args = self.vfs.read('undo.desc').splitlines()
1492 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1495 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1493 if len(args) >= 3:
1496 if len(args) >= 3:
1494 detail = args[2]
1497 detail = args[2]
1495 oldtip = oldlen - 1
1498 oldtip = oldlen - 1
1496
1499
1497 if detail and ui.verbose:
1500 if detail and ui.verbose:
1498 msg = (_('repository tip rolled back to revision %d'
1501 msg = (_('repository tip rolled back to revision %d'
1499 ' (undo %s: %s)\n')
1502 ' (undo %s: %s)\n')
1500 % (oldtip, desc, detail))
1503 % (oldtip, desc, detail))
1501 else:
1504 else:
1502 msg = (_('repository tip rolled back to revision %d'
1505 msg = (_('repository tip rolled back to revision %d'
1503 ' (undo %s)\n')
1506 ' (undo %s)\n')
1504 % (oldtip, desc))
1507 % (oldtip, desc))
1505 except IOError:
1508 except IOError:
1506 msg = _('rolling back unknown transaction\n')
1509 msg = _('rolling back unknown transaction\n')
1507 desc = None
1510 desc = None
1508
1511
1509 if not force and self['.'] != self['tip'] and desc == 'commit':
1512 if not force and self['.'] != self['tip'] and desc == 'commit':
1510 raise error.Abort(
1513 raise error.Abort(
1511 _('rollback of last commit while not checked out '
1514 _('rollback of last commit while not checked out '
1512 'may lose data'), hint=_('use -f to force'))
1515 'may lose data'), hint=_('use -f to force'))
1513
1516
1514 ui.status(msg)
1517 ui.status(msg)
1515 if dryrun:
1518 if dryrun:
1516 return 0
1519 return 0
1517
1520
1518 parents = self.dirstate.parents()
1521 parents = self.dirstate.parents()
1519 self.destroying()
1522 self.destroying()
1520 vfsmap = {'plain': self.vfs, '': self.svfs}
1523 vfsmap = {'plain': self.vfs, '': self.svfs}
1521 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1524 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1522 checkambigfiles=_cachedfiles)
1525 checkambigfiles=_cachedfiles)
1523 if self.vfs.exists('undo.bookmarks'):
1526 if self.vfs.exists('undo.bookmarks'):
1524 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1527 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1525 if self.svfs.exists('undo.phaseroots'):
1528 if self.svfs.exists('undo.phaseroots'):
1526 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1529 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1527 self.invalidate()
1530 self.invalidate()
1528
1531
1529 parentgone = (parents[0] not in self.changelog.nodemap or
1532 parentgone = (parents[0] not in self.changelog.nodemap or
1530 parents[1] not in self.changelog.nodemap)
1533 parents[1] not in self.changelog.nodemap)
1531 if parentgone:
1534 if parentgone:
1532 # prevent dirstateguard from overwriting already restored one
1535 # prevent dirstateguard from overwriting already restored one
1533 dsguard.close()
1536 dsguard.close()
1534
1537
1535 self.dirstate.restorebackup(None, 'undo.dirstate')
1538 self.dirstate.restorebackup(None, 'undo.dirstate')
1536 try:
1539 try:
1537 branch = self.vfs.read('undo.branch')
1540 branch = self.vfs.read('undo.branch')
1538 self.dirstate.setbranch(encoding.tolocal(branch))
1541 self.dirstate.setbranch(encoding.tolocal(branch))
1539 except IOError:
1542 except IOError:
1540 ui.warn(_('named branch could not be reset: '
1543 ui.warn(_('named branch could not be reset: '
1541 'current branch is still \'%s\'\n')
1544 'current branch is still \'%s\'\n')
1542 % self.dirstate.branch())
1545 % self.dirstate.branch())
1543
1546
1544 parents = tuple([p.rev() for p in self[None].parents()])
1547 parents = tuple([p.rev() for p in self[None].parents()])
1545 if len(parents) > 1:
1548 if len(parents) > 1:
1546 ui.status(_('working directory now based on '
1549 ui.status(_('working directory now based on '
1547 'revisions %d and %d\n') % parents)
1550 'revisions %d and %d\n') % parents)
1548 else:
1551 else:
1549 ui.status(_('working directory now based on '
1552 ui.status(_('working directory now based on '
1550 'revision %d\n') % parents)
1553 'revision %d\n') % parents)
1551 mergemod.mergestate.clean(self, self['.'].node())
1554 mergemod.mergestate.clean(self, self['.'].node())
1552
1555
1553 # TODO: if we know which new heads may result from this rollback, pass
1556 # TODO: if we know which new heads may result from this rollback, pass
1554 # them to destroy(), which will prevent the branchhead cache from being
1557 # them to destroy(), which will prevent the branchhead cache from being
1555 # invalidated.
1558 # invalidated.
1556 self.destroyed()
1559 self.destroyed()
1557 return 0
1560 return 0
1558
1561
1559 def _buildcacheupdater(self, newtransaction):
1562 def _buildcacheupdater(self, newtransaction):
1560 """called during transaction to build the callback updating cache
1563 """called during transaction to build the callback updating cache
1561
1564
1562 Lives on the repository to help extension who might want to augment
1565 Lives on the repository to help extension who might want to augment
1563 this logic. For this purpose, the created transaction is passed to the
1566 this logic. For this purpose, the created transaction is passed to the
1564 method.
1567 method.
1565 """
1568 """
1566 # we must avoid cyclic reference between repo and transaction.
1569 # we must avoid cyclic reference between repo and transaction.
1567 reporef = weakref.ref(self)
1570 reporef = weakref.ref(self)
1568 def updater(tr):
1571 def updater(tr):
1569 repo = reporef()
1572 repo = reporef()
1570 repo.updatecaches(tr)
1573 repo.updatecaches(tr)
1571 return updater
1574 return updater
1572
1575
1573 @unfilteredmethod
1576 @unfilteredmethod
1574 def updatecaches(self, tr=None, full=False):
1577 def updatecaches(self, tr=None, full=False):
1575 """warm appropriate caches
1578 """warm appropriate caches
1576
1579
1577 If this function is called after a transaction closed. The transaction
1580 If this function is called after a transaction closed. The transaction
1578 will be available in the 'tr' argument. This can be used to selectively
1581 will be available in the 'tr' argument. This can be used to selectively
1579 update caches relevant to the changes in that transaction.
1582 update caches relevant to the changes in that transaction.
1580
1583
1581 If 'full' is set, make sure all caches the function knows about have
1584 If 'full' is set, make sure all caches the function knows about have
1582 up-to-date data. Even the ones usually loaded more lazily.
1585 up-to-date data. Even the ones usually loaded more lazily.
1583 """
1586 """
1584 if tr is not None and tr.hookargs.get('source') == 'strip':
1587 if tr is not None and tr.hookargs.get('source') == 'strip':
1585 # During strip, many caches are invalid but
1588 # During strip, many caches are invalid but
1586 # later call to `destroyed` will refresh them.
1589 # later call to `destroyed` will refresh them.
1587 return
1590 return
1588
1591
1589 if tr is None or tr.changes['revs']:
1592 if tr is None or tr.changes['revs']:
1590 # updating the unfiltered branchmap should refresh all the others,
1593 # updating the unfiltered branchmap should refresh all the others,
1591 self.ui.debug('updating the branch cache\n')
1594 self.ui.debug('updating the branch cache\n')
1592 branchmap.updatecache(self.filtered('served'))
1595 branchmap.updatecache(self.filtered('served'))
1593
1596
1594 if full:
1597 if full:
1595 rbc = self.revbranchcache()
1598 rbc = self.revbranchcache()
1596 for r in self.changelog:
1599 for r in self.changelog:
1597 rbc.branchinfo(r)
1600 rbc.branchinfo(r)
1598 rbc.write()
1601 rbc.write()
1599
1602
1600 def invalidatecaches(self):
1603 def invalidatecaches(self):
1601
1604
1602 if '_tagscache' in vars(self):
1605 if '_tagscache' in vars(self):
1603 # can't use delattr on proxy
1606 # can't use delattr on proxy
1604 del self.__dict__['_tagscache']
1607 del self.__dict__['_tagscache']
1605
1608
1606 self.unfiltered()._branchcaches.clear()
1609 self.unfiltered()._branchcaches.clear()
1607 self.invalidatevolatilesets()
1610 self.invalidatevolatilesets()
1608 self._sparsesignaturecache.clear()
1611 self._sparsesignaturecache.clear()
1609
1612
1610 def invalidatevolatilesets(self):
1613 def invalidatevolatilesets(self):
1611 self.filteredrevcache.clear()
1614 self.filteredrevcache.clear()
1612 obsolete.clearobscaches(self)
1615 obsolete.clearobscaches(self)
1613
1616
1614 def invalidatedirstate(self):
1617 def invalidatedirstate(self):
1615 '''Invalidates the dirstate, causing the next call to dirstate
1618 '''Invalidates the dirstate, causing the next call to dirstate
1616 to check if it was modified since the last time it was read,
1619 to check if it was modified since the last time it was read,
1617 rereading it if it has.
1620 rereading it if it has.
1618
1621
1619 This is different to dirstate.invalidate() that it doesn't always
1622 This is different to dirstate.invalidate() that it doesn't always
1620 rereads the dirstate. Use dirstate.invalidate() if you want to
1623 rereads the dirstate. Use dirstate.invalidate() if you want to
1621 explicitly read the dirstate again (i.e. restoring it to a previous
1624 explicitly read the dirstate again (i.e. restoring it to a previous
1622 known good state).'''
1625 known good state).'''
1623 if hasunfilteredcache(self, 'dirstate'):
1626 if hasunfilteredcache(self, 'dirstate'):
1624 for k in self.dirstate._filecache:
1627 for k in self.dirstate._filecache:
1625 try:
1628 try:
1626 delattr(self.dirstate, k)
1629 delattr(self.dirstate, k)
1627 except AttributeError:
1630 except AttributeError:
1628 pass
1631 pass
1629 delattr(self.unfiltered(), 'dirstate')
1632 delattr(self.unfiltered(), 'dirstate')
1630
1633
1631 def invalidate(self, clearfilecache=False):
1634 def invalidate(self, clearfilecache=False):
1632 '''Invalidates both store and non-store parts other than dirstate
1635 '''Invalidates both store and non-store parts other than dirstate
1633
1636
1634 If a transaction is running, invalidation of store is omitted,
1637 If a transaction is running, invalidation of store is omitted,
1635 because discarding in-memory changes might cause inconsistency
1638 because discarding in-memory changes might cause inconsistency
1636 (e.g. incomplete fncache causes unintentional failure, but
1639 (e.g. incomplete fncache causes unintentional failure, but
1637 redundant one doesn't).
1640 redundant one doesn't).
1638 '''
1641 '''
1639 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1642 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1640 for k in list(self._filecache.keys()):
1643 for k in list(self._filecache.keys()):
1641 # dirstate is invalidated separately in invalidatedirstate()
1644 # dirstate is invalidated separately in invalidatedirstate()
1642 if k == 'dirstate':
1645 if k == 'dirstate':
1643 continue
1646 continue
1644 if (k == 'changelog' and
1647 if (k == 'changelog' and
1645 self.currenttransaction() and
1648 self.currenttransaction() and
1646 self.changelog._delayed):
1649 self.changelog._delayed):
1647 # The changelog object may store unwritten revisions. We don't
1650 # The changelog object may store unwritten revisions. We don't
1648 # want to lose them.
1651 # want to lose them.
1649 # TODO: Solve the problem instead of working around it.
1652 # TODO: Solve the problem instead of working around it.
1650 continue
1653 continue
1651
1654
1652 if clearfilecache:
1655 if clearfilecache:
1653 del self._filecache[k]
1656 del self._filecache[k]
1654 try:
1657 try:
1655 delattr(unfiltered, k)
1658 delattr(unfiltered, k)
1656 except AttributeError:
1659 except AttributeError:
1657 pass
1660 pass
1658 self.invalidatecaches()
1661 self.invalidatecaches()
1659 if not self.currenttransaction():
1662 if not self.currenttransaction():
1660 # TODO: Changing contents of store outside transaction
1663 # TODO: Changing contents of store outside transaction
1661 # causes inconsistency. We should make in-memory store
1664 # causes inconsistency. We should make in-memory store
1662 # changes detectable, and abort if changed.
1665 # changes detectable, and abort if changed.
1663 self.store.invalidatecaches()
1666 self.store.invalidatecaches()
1664
1667
1665 def invalidateall(self):
1668 def invalidateall(self):
1666 '''Fully invalidates both store and non-store parts, causing the
1669 '''Fully invalidates both store and non-store parts, causing the
1667 subsequent operation to reread any outside changes.'''
1670 subsequent operation to reread any outside changes.'''
1668 # extension should hook this to invalidate its caches
1671 # extension should hook this to invalidate its caches
1669 self.invalidate()
1672 self.invalidate()
1670 self.invalidatedirstate()
1673 self.invalidatedirstate()
1671
1674
1672 @unfilteredmethod
1675 @unfilteredmethod
1673 def _refreshfilecachestats(self, tr):
1676 def _refreshfilecachestats(self, tr):
1674 """Reload stats of cached files so that they are flagged as valid"""
1677 """Reload stats of cached files so that they are flagged as valid"""
1675 for k, ce in self._filecache.items():
1678 for k, ce in self._filecache.items():
1676 k = pycompat.sysstr(k)
1679 k = pycompat.sysstr(k)
1677 if k == r'dirstate' or k not in self.__dict__:
1680 if k == r'dirstate' or k not in self.__dict__:
1678 continue
1681 continue
1679 ce.refresh()
1682 ce.refresh()
1680
1683
1681 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1684 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1682 inheritchecker=None, parentenvvar=None):
1685 inheritchecker=None, parentenvvar=None):
1683 parentlock = None
1686 parentlock = None
1684 # the contents of parentenvvar are used by the underlying lock to
1687 # the contents of parentenvvar are used by the underlying lock to
1685 # determine whether it can be inherited
1688 # determine whether it can be inherited
1686 if parentenvvar is not None:
1689 if parentenvvar is not None:
1687 parentlock = encoding.environ.get(parentenvvar)
1690 parentlock = encoding.environ.get(parentenvvar)
1688
1691
1689 timeout = 0
1692 timeout = 0
1690 warntimeout = 0
1693 warntimeout = 0
1691 if wait:
1694 if wait:
1692 timeout = self.ui.configint("ui", "timeout")
1695 timeout = self.ui.configint("ui", "timeout")
1693 warntimeout = self.ui.configint("ui", "timeout.warn")
1696 warntimeout = self.ui.configint("ui", "timeout.warn")
1694
1697
1695 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1698 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1696 releasefn=releasefn,
1699 releasefn=releasefn,
1697 acquirefn=acquirefn, desc=desc,
1700 acquirefn=acquirefn, desc=desc,
1698 inheritchecker=inheritchecker,
1701 inheritchecker=inheritchecker,
1699 parentlock=parentlock)
1702 parentlock=parentlock)
1700 return l
1703 return l
1701
1704
1702 def _afterlock(self, callback):
1705 def _afterlock(self, callback):
1703 """add a callback to be run when the repository is fully unlocked
1706 """add a callback to be run when the repository is fully unlocked
1704
1707
1705 The callback will be executed when the outermost lock is released
1708 The callback will be executed when the outermost lock is released
1706 (with wlock being higher level than 'lock')."""
1709 (with wlock being higher level than 'lock')."""
1707 for ref in (self._wlockref, self._lockref):
1710 for ref in (self._wlockref, self._lockref):
1708 l = ref and ref()
1711 l = ref and ref()
1709 if l and l.held:
1712 if l and l.held:
1710 l.postrelease.append(callback)
1713 l.postrelease.append(callback)
1711 break
1714 break
1712 else: # no lock have been found.
1715 else: # no lock have been found.
1713 callback()
1716 callback()
1714
1717
1715 def lock(self, wait=True):
1718 def lock(self, wait=True):
1716 '''Lock the repository store (.hg/store) and return a weak reference
1719 '''Lock the repository store (.hg/store) and return a weak reference
1717 to the lock. Use this before modifying the store (e.g. committing or
1720 to the lock. Use this before modifying the store (e.g. committing or
1718 stripping). If you are opening a transaction, get a lock as well.)
1721 stripping). If you are opening a transaction, get a lock as well.)
1719
1722
1720 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1723 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1721 'wlock' first to avoid a dead-lock hazard.'''
1724 'wlock' first to avoid a dead-lock hazard.'''
1722 l = self._currentlock(self._lockref)
1725 l = self._currentlock(self._lockref)
1723 if l is not None:
1726 if l is not None:
1724 l.lock()
1727 l.lock()
1725 return l
1728 return l
1726
1729
1727 l = self._lock(self.svfs, "lock", wait, None,
1730 l = self._lock(self.svfs, "lock", wait, None,
1728 self.invalidate, _('repository %s') % self.origroot)
1731 self.invalidate, _('repository %s') % self.origroot)
1729 self._lockref = weakref.ref(l)
1732 self._lockref = weakref.ref(l)
1730 return l
1733 return l
1731
1734
1732 def _wlockchecktransaction(self):
1735 def _wlockchecktransaction(self):
1733 if self.currenttransaction() is not None:
1736 if self.currenttransaction() is not None:
1734 raise error.LockInheritanceContractViolation(
1737 raise error.LockInheritanceContractViolation(
1735 'wlock cannot be inherited in the middle of a transaction')
1738 'wlock cannot be inherited in the middle of a transaction')
1736
1739
1737 def wlock(self, wait=True):
1740 def wlock(self, wait=True):
1738 '''Lock the non-store parts of the repository (everything under
1741 '''Lock the non-store parts of the repository (everything under
1739 .hg except .hg/store) and return a weak reference to the lock.
1742 .hg except .hg/store) and return a weak reference to the lock.
1740
1743
1741 Use this before modifying files in .hg.
1744 Use this before modifying files in .hg.
1742
1745
1743 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1746 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1744 'wlock' first to avoid a dead-lock hazard.'''
1747 'wlock' first to avoid a dead-lock hazard.'''
1745 l = self._wlockref and self._wlockref()
1748 l = self._wlockref and self._wlockref()
1746 if l is not None and l.held:
1749 if l is not None and l.held:
1747 l.lock()
1750 l.lock()
1748 return l
1751 return l
1749
1752
1750 # We do not need to check for non-waiting lock acquisition. Such
1753 # We do not need to check for non-waiting lock acquisition. Such
1751 # acquisition would not cause dead-lock as they would just fail.
1754 # acquisition would not cause dead-lock as they would just fail.
1752 if wait and (self.ui.configbool('devel', 'all-warnings')
1755 if wait and (self.ui.configbool('devel', 'all-warnings')
1753 or self.ui.configbool('devel', 'check-locks')):
1756 or self.ui.configbool('devel', 'check-locks')):
1754 if self._currentlock(self._lockref) is not None:
1757 if self._currentlock(self._lockref) is not None:
1755 self.ui.develwarn('"wlock" acquired after "lock"')
1758 self.ui.develwarn('"wlock" acquired after "lock"')
1756
1759
1757 def unlock():
1760 def unlock():
1758 if self.dirstate.pendingparentchange():
1761 if self.dirstate.pendingparentchange():
1759 self.dirstate.invalidate()
1762 self.dirstate.invalidate()
1760 else:
1763 else:
1761 self.dirstate.write(None)
1764 self.dirstate.write(None)
1762
1765
1763 self._filecache['dirstate'].refresh()
1766 self._filecache['dirstate'].refresh()
1764
1767
1765 l = self._lock(self.vfs, "wlock", wait, unlock,
1768 l = self._lock(self.vfs, "wlock", wait, unlock,
1766 self.invalidatedirstate, _('working directory of %s') %
1769 self.invalidatedirstate, _('working directory of %s') %
1767 self.origroot,
1770 self.origroot,
1768 inheritchecker=self._wlockchecktransaction,
1771 inheritchecker=self._wlockchecktransaction,
1769 parentenvvar='HG_WLOCK_LOCKER')
1772 parentenvvar='HG_WLOCK_LOCKER')
1770 self._wlockref = weakref.ref(l)
1773 self._wlockref = weakref.ref(l)
1771 return l
1774 return l
1772
1775
1773 def _currentlock(self, lockref):
1776 def _currentlock(self, lockref):
1774 """Returns the lock if it's held, or None if it's not."""
1777 """Returns the lock if it's held, or None if it's not."""
1775 if lockref is None:
1778 if lockref is None:
1776 return None
1779 return None
1777 l = lockref()
1780 l = lockref()
1778 if l is None or not l.held:
1781 if l is None or not l.held:
1779 return None
1782 return None
1780 return l
1783 return l
1781
1784
1782 def currentwlock(self):
1785 def currentwlock(self):
1783 """Returns the wlock if it's held, or None if it's not."""
1786 """Returns the wlock if it's held, or None if it's not."""
1784 return self._currentlock(self._wlockref)
1787 return self._currentlock(self._wlockref)
1785
1788
1786 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1789 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1787 """
1790 """
1788 commit an individual file as part of a larger transaction
1791 commit an individual file as part of a larger transaction
1789 """
1792 """
1790
1793
1791 fname = fctx.path()
1794 fname = fctx.path()
1792 fparent1 = manifest1.get(fname, nullid)
1795 fparent1 = manifest1.get(fname, nullid)
1793 fparent2 = manifest2.get(fname, nullid)
1796 fparent2 = manifest2.get(fname, nullid)
1794 if isinstance(fctx, context.filectx):
1797 if isinstance(fctx, context.filectx):
1795 node = fctx.filenode()
1798 node = fctx.filenode()
1796 if node in [fparent1, fparent2]:
1799 if node in [fparent1, fparent2]:
1797 self.ui.debug('reusing %s filelog entry\n' % fname)
1800 self.ui.debug('reusing %s filelog entry\n' % fname)
1798 if manifest1.flags(fname) != fctx.flags():
1801 if manifest1.flags(fname) != fctx.flags():
1799 changelist.append(fname)
1802 changelist.append(fname)
1800 return node
1803 return node
1801
1804
1802 flog = self.file(fname)
1805 flog = self.file(fname)
1803 meta = {}
1806 meta = {}
1804 copy = fctx.renamed()
1807 copy = fctx.renamed()
1805 if copy and copy[0] != fname:
1808 if copy and copy[0] != fname:
1806 # Mark the new revision of this file as a copy of another
1809 # Mark the new revision of this file as a copy of another
1807 # file. This copy data will effectively act as a parent
1810 # file. This copy data will effectively act as a parent
1808 # of this new revision. If this is a merge, the first
1811 # of this new revision. If this is a merge, the first
1809 # parent will be the nullid (meaning "look up the copy data")
1812 # parent will be the nullid (meaning "look up the copy data")
1810 # and the second one will be the other parent. For example:
1813 # and the second one will be the other parent. For example:
1811 #
1814 #
1812 # 0 --- 1 --- 3 rev1 changes file foo
1815 # 0 --- 1 --- 3 rev1 changes file foo
1813 # \ / rev2 renames foo to bar and changes it
1816 # \ / rev2 renames foo to bar and changes it
1814 # \- 2 -/ rev3 should have bar with all changes and
1817 # \- 2 -/ rev3 should have bar with all changes and
1815 # should record that bar descends from
1818 # should record that bar descends from
1816 # bar in rev2 and foo in rev1
1819 # bar in rev2 and foo in rev1
1817 #
1820 #
1818 # this allows this merge to succeed:
1821 # this allows this merge to succeed:
1819 #
1822 #
1820 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1823 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1821 # \ / merging rev3 and rev4 should use bar@rev2
1824 # \ / merging rev3 and rev4 should use bar@rev2
1822 # \- 2 --- 4 as the merge base
1825 # \- 2 --- 4 as the merge base
1823 #
1826 #
1824
1827
1825 cfname = copy[0]
1828 cfname = copy[0]
1826 crev = manifest1.get(cfname)
1829 crev = manifest1.get(cfname)
1827 newfparent = fparent2
1830 newfparent = fparent2
1828
1831
1829 if manifest2: # branch merge
1832 if manifest2: # branch merge
1830 if fparent2 == nullid or crev is None: # copied on remote side
1833 if fparent2 == nullid or crev is None: # copied on remote side
1831 if cfname in manifest2:
1834 if cfname in manifest2:
1832 crev = manifest2[cfname]
1835 crev = manifest2[cfname]
1833 newfparent = fparent1
1836 newfparent = fparent1
1834
1837
1835 # Here, we used to search backwards through history to try to find
1838 # Here, we used to search backwards through history to try to find
1836 # where the file copy came from if the source of a copy was not in
1839 # where the file copy came from if the source of a copy was not in
1837 # the parent directory. However, this doesn't actually make sense to
1840 # the parent directory. However, this doesn't actually make sense to
1838 # do (what does a copy from something not in your working copy even
1841 # do (what does a copy from something not in your working copy even
1839 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1842 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1840 # the user that copy information was dropped, so if they didn't
1843 # the user that copy information was dropped, so if they didn't
1841 # expect this outcome it can be fixed, but this is the correct
1844 # expect this outcome it can be fixed, but this is the correct
1842 # behavior in this circumstance.
1845 # behavior in this circumstance.
1843
1846
1844 if crev:
1847 if crev:
1845 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1848 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1846 meta["copy"] = cfname
1849 meta["copy"] = cfname
1847 meta["copyrev"] = hex(crev)
1850 meta["copyrev"] = hex(crev)
1848 fparent1, fparent2 = nullid, newfparent
1851 fparent1, fparent2 = nullid, newfparent
1849 else:
1852 else:
1850 self.ui.warn(_("warning: can't find ancestor for '%s' "
1853 self.ui.warn(_("warning: can't find ancestor for '%s' "
1851 "copied from '%s'!\n") % (fname, cfname))
1854 "copied from '%s'!\n") % (fname, cfname))
1852
1855
1853 elif fparent1 == nullid:
1856 elif fparent1 == nullid:
1854 fparent1, fparent2 = fparent2, nullid
1857 fparent1, fparent2 = fparent2, nullid
1855 elif fparent2 != nullid:
1858 elif fparent2 != nullid:
1856 # is one parent an ancestor of the other?
1859 # is one parent an ancestor of the other?
1857 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1860 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1858 if fparent1 in fparentancestors:
1861 if fparent1 in fparentancestors:
1859 fparent1, fparent2 = fparent2, nullid
1862 fparent1, fparent2 = fparent2, nullid
1860 elif fparent2 in fparentancestors:
1863 elif fparent2 in fparentancestors:
1861 fparent2 = nullid
1864 fparent2 = nullid
1862
1865
1863 # is the file changed?
1866 # is the file changed?
1864 text = fctx.data()
1867 text = fctx.data()
1865 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1868 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1866 changelist.append(fname)
1869 changelist.append(fname)
1867 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1870 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1868 # are just the flags changed during merge?
1871 # are just the flags changed during merge?
1869 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1872 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1870 changelist.append(fname)
1873 changelist.append(fname)
1871
1874
1872 return fparent1
1875 return fparent1
1873
1876
1874 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1877 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1875 """check for commit arguments that aren't committable"""
1878 """check for commit arguments that aren't committable"""
1876 if match.isexact() or match.prefix():
1879 if match.isexact() or match.prefix():
1877 matched = set(status.modified + status.added + status.removed)
1880 matched = set(status.modified + status.added + status.removed)
1878
1881
1879 for f in match.files():
1882 for f in match.files():
1880 f = self.dirstate.normalize(f)
1883 f = self.dirstate.normalize(f)
1881 if f == '.' or f in matched or f in wctx.substate:
1884 if f == '.' or f in matched or f in wctx.substate:
1882 continue
1885 continue
1883 if f in status.deleted:
1886 if f in status.deleted:
1884 fail(f, _('file not found!'))
1887 fail(f, _('file not found!'))
1885 if f in vdirs: # visited directory
1888 if f in vdirs: # visited directory
1886 d = f + '/'
1889 d = f + '/'
1887 for mf in matched:
1890 for mf in matched:
1888 if mf.startswith(d):
1891 if mf.startswith(d):
1889 break
1892 break
1890 else:
1893 else:
1891 fail(f, _("no match under directory!"))
1894 fail(f, _("no match under directory!"))
1892 elif f not in self.dirstate:
1895 elif f not in self.dirstate:
1893 fail(f, _("file not tracked!"))
1896 fail(f, _("file not tracked!"))
1894
1897
1895 @unfilteredmethod
1898 @unfilteredmethod
1896 def commit(self, text="", user=None, date=None, match=None, force=False,
1899 def commit(self, text="", user=None, date=None, match=None, force=False,
1897 editor=False, extra=None):
1900 editor=False, extra=None):
1898 """Add a new revision to current repository.
1901 """Add a new revision to current repository.
1899
1902
1900 Revision information is gathered from the working directory,
1903 Revision information is gathered from the working directory,
1901 match can be used to filter the committed files. If editor is
1904 match can be used to filter the committed files. If editor is
1902 supplied, it is called to get a commit message.
1905 supplied, it is called to get a commit message.
1903 """
1906 """
1904 if extra is None:
1907 if extra is None:
1905 extra = {}
1908 extra = {}
1906
1909
1907 def fail(f, msg):
1910 def fail(f, msg):
1908 raise error.Abort('%s: %s' % (f, msg))
1911 raise error.Abort('%s: %s' % (f, msg))
1909
1912
1910 if not match:
1913 if not match:
1911 match = matchmod.always(self.root, '')
1914 match = matchmod.always(self.root, '')
1912
1915
1913 if not force:
1916 if not force:
1914 vdirs = []
1917 vdirs = []
1915 match.explicitdir = vdirs.append
1918 match.explicitdir = vdirs.append
1916 match.bad = fail
1919 match.bad = fail
1917
1920
1918 wlock = lock = tr = None
1921 wlock = lock = tr = None
1919 try:
1922 try:
1920 wlock = self.wlock()
1923 wlock = self.wlock()
1921 lock = self.lock() # for recent changelog (see issue4368)
1924 lock = self.lock() # for recent changelog (see issue4368)
1922
1925
1923 wctx = self[None]
1926 wctx = self[None]
1924 merge = len(wctx.parents()) > 1
1927 merge = len(wctx.parents()) > 1
1925
1928
1926 if not force and merge and not match.always():
1929 if not force and merge and not match.always():
1927 raise error.Abort(_('cannot partially commit a merge '
1930 raise error.Abort(_('cannot partially commit a merge '
1928 '(do not specify files or patterns)'))
1931 '(do not specify files or patterns)'))
1929
1932
1930 status = self.status(match=match, clean=force)
1933 status = self.status(match=match, clean=force)
1931 if force:
1934 if force:
1932 status.modified.extend(status.clean) # mq may commit clean files
1935 status.modified.extend(status.clean) # mq may commit clean files
1933
1936
1934 # check subrepos
1937 # check subrepos
1935 subs, commitsubs, newstate = subrepoutil.precommit(
1938 subs, commitsubs, newstate = subrepoutil.precommit(
1936 self.ui, wctx, status, match, force=force)
1939 self.ui, wctx, status, match, force=force)
1937
1940
1938 # make sure all explicit patterns are matched
1941 # make sure all explicit patterns are matched
1939 if not force:
1942 if not force:
1940 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1943 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1941
1944
1942 cctx = context.workingcommitctx(self, status,
1945 cctx = context.workingcommitctx(self, status,
1943 text, user, date, extra)
1946 text, user, date, extra)
1944
1947
1945 # internal config: ui.allowemptycommit
1948 # internal config: ui.allowemptycommit
1946 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1949 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1947 or extra.get('close') or merge or cctx.files()
1950 or extra.get('close') or merge or cctx.files()
1948 or self.ui.configbool('ui', 'allowemptycommit'))
1951 or self.ui.configbool('ui', 'allowemptycommit'))
1949 if not allowemptycommit:
1952 if not allowemptycommit:
1950 return None
1953 return None
1951
1954
1952 if merge and cctx.deleted():
1955 if merge and cctx.deleted():
1953 raise error.Abort(_("cannot commit merge with missing files"))
1956 raise error.Abort(_("cannot commit merge with missing files"))
1954
1957
1955 ms = mergemod.mergestate.read(self)
1958 ms = mergemod.mergestate.read(self)
1956 mergeutil.checkunresolved(ms)
1959 mergeutil.checkunresolved(ms)
1957
1960
1958 if editor:
1961 if editor:
1959 cctx._text = editor(self, cctx, subs)
1962 cctx._text = editor(self, cctx, subs)
1960 edited = (text != cctx._text)
1963 edited = (text != cctx._text)
1961
1964
1962 # Save commit message in case this transaction gets rolled back
1965 # Save commit message in case this transaction gets rolled back
1963 # (e.g. by a pretxncommit hook). Leave the content alone on
1966 # (e.g. by a pretxncommit hook). Leave the content alone on
1964 # the assumption that the user will use the same editor again.
1967 # the assumption that the user will use the same editor again.
1965 msgfn = self.savecommitmessage(cctx._text)
1968 msgfn = self.savecommitmessage(cctx._text)
1966
1969
1967 # commit subs and write new state
1970 # commit subs and write new state
1968 if subs:
1971 if subs:
1969 for s in sorted(commitsubs):
1972 for s in sorted(commitsubs):
1970 sub = wctx.sub(s)
1973 sub = wctx.sub(s)
1971 self.ui.status(_('committing subrepository %s\n') %
1974 self.ui.status(_('committing subrepository %s\n') %
1972 subrepoutil.subrelpath(sub))
1975 subrepoutil.subrelpath(sub))
1973 sr = sub.commit(cctx._text, user, date)
1976 sr = sub.commit(cctx._text, user, date)
1974 newstate[s] = (newstate[s][0], sr)
1977 newstate[s] = (newstate[s][0], sr)
1975 subrepoutil.writestate(self, newstate)
1978 subrepoutil.writestate(self, newstate)
1976
1979
1977 p1, p2 = self.dirstate.parents()
1980 p1, p2 = self.dirstate.parents()
1978 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1981 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1979 try:
1982 try:
1980 self.hook("precommit", throw=True, parent1=hookp1,
1983 self.hook("precommit", throw=True, parent1=hookp1,
1981 parent2=hookp2)
1984 parent2=hookp2)
1982 tr = self.transaction('commit')
1985 tr = self.transaction('commit')
1983 ret = self.commitctx(cctx, True)
1986 ret = self.commitctx(cctx, True)
1984 except: # re-raises
1987 except: # re-raises
1985 if edited:
1988 if edited:
1986 self.ui.write(
1989 self.ui.write(
1987 _('note: commit message saved in %s\n') % msgfn)
1990 _('note: commit message saved in %s\n') % msgfn)
1988 raise
1991 raise
1989 # update bookmarks, dirstate and mergestate
1992 # update bookmarks, dirstate and mergestate
1990 bookmarks.update(self, [p1, p2], ret)
1993 bookmarks.update(self, [p1, p2], ret)
1991 cctx.markcommitted(ret)
1994 cctx.markcommitted(ret)
1992 ms.reset()
1995 ms.reset()
1993 tr.close()
1996 tr.close()
1994
1997
1995 finally:
1998 finally:
1996 lockmod.release(tr, lock, wlock)
1999 lockmod.release(tr, lock, wlock)
1997
2000
1998 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2001 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1999 # hack for command that use a temporary commit (eg: histedit)
2002 # hack for command that use a temporary commit (eg: histedit)
2000 # temporary commit got stripped before hook release
2003 # temporary commit got stripped before hook release
2001 if self.changelog.hasnode(ret):
2004 if self.changelog.hasnode(ret):
2002 self.hook("commit", node=node, parent1=parent1,
2005 self.hook("commit", node=node, parent1=parent1,
2003 parent2=parent2)
2006 parent2=parent2)
2004 self._afterlock(commithook)
2007 self._afterlock(commithook)
2005 return ret
2008 return ret
2006
2009
2007 @unfilteredmethod
2010 @unfilteredmethod
2008 def commitctx(self, ctx, error=False):
2011 def commitctx(self, ctx, error=False):
2009 """Add a new revision to current repository.
2012 """Add a new revision to current repository.
2010 Revision information is passed via the context argument.
2013 Revision information is passed via the context argument.
2011 """
2014 """
2012
2015
2013 tr = None
2016 tr = None
2014 p1, p2 = ctx.p1(), ctx.p2()
2017 p1, p2 = ctx.p1(), ctx.p2()
2015 user = ctx.user()
2018 user = ctx.user()
2016
2019
2017 lock = self.lock()
2020 lock = self.lock()
2018 try:
2021 try:
2019 tr = self.transaction("commit")
2022 tr = self.transaction("commit")
2020 trp = weakref.proxy(tr)
2023 trp = weakref.proxy(tr)
2021
2024
2022 if ctx.manifestnode():
2025 if ctx.manifestnode():
2023 # reuse an existing manifest revision
2026 # reuse an existing manifest revision
2024 mn = ctx.manifestnode()
2027 mn = ctx.manifestnode()
2025 files = ctx.files()
2028 files = ctx.files()
2026 elif ctx.files():
2029 elif ctx.files():
2027 m1ctx = p1.manifestctx()
2030 m1ctx = p1.manifestctx()
2028 m2ctx = p2.manifestctx()
2031 m2ctx = p2.manifestctx()
2029 mctx = m1ctx.copy()
2032 mctx = m1ctx.copy()
2030
2033
2031 m = mctx.read()
2034 m = mctx.read()
2032 m1 = m1ctx.read()
2035 m1 = m1ctx.read()
2033 m2 = m2ctx.read()
2036 m2 = m2ctx.read()
2034
2037
2035 # check in files
2038 # check in files
2036 added = []
2039 added = []
2037 changed = []
2040 changed = []
2038 removed = list(ctx.removed())
2041 removed = list(ctx.removed())
2039 linkrev = len(self)
2042 linkrev = len(self)
2040 self.ui.note(_("committing files:\n"))
2043 self.ui.note(_("committing files:\n"))
2041 for f in sorted(ctx.modified() + ctx.added()):
2044 for f in sorted(ctx.modified() + ctx.added()):
2042 self.ui.note(f + "\n")
2045 self.ui.note(f + "\n")
2043 try:
2046 try:
2044 fctx = ctx[f]
2047 fctx = ctx[f]
2045 if fctx is None:
2048 if fctx is None:
2046 removed.append(f)
2049 removed.append(f)
2047 else:
2050 else:
2048 added.append(f)
2051 added.append(f)
2049 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2052 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2050 trp, changed)
2053 trp, changed)
2051 m.setflag(f, fctx.flags())
2054 m.setflag(f, fctx.flags())
2052 except OSError as inst:
2055 except OSError as inst:
2053 self.ui.warn(_("trouble committing %s!\n") % f)
2056 self.ui.warn(_("trouble committing %s!\n") % f)
2054 raise
2057 raise
2055 except IOError as inst:
2058 except IOError as inst:
2056 errcode = getattr(inst, 'errno', errno.ENOENT)
2059 errcode = getattr(inst, 'errno', errno.ENOENT)
2057 if error or errcode and errcode != errno.ENOENT:
2060 if error or errcode and errcode != errno.ENOENT:
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2061 self.ui.warn(_("trouble committing %s!\n") % f)
2059 raise
2062 raise
2060
2063
2061 # update manifest
2064 # update manifest
2062 self.ui.note(_("committing manifest\n"))
2065 self.ui.note(_("committing manifest\n"))
2063 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2066 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2064 drop = [f for f in removed if f in m]
2067 drop = [f for f in removed if f in m]
2065 for f in drop:
2068 for f in drop:
2066 del m[f]
2069 del m[f]
2067 mn = mctx.write(trp, linkrev,
2070 mn = mctx.write(trp, linkrev,
2068 p1.manifestnode(), p2.manifestnode(),
2071 p1.manifestnode(), p2.manifestnode(),
2069 added, drop)
2072 added, drop)
2070 files = changed + removed
2073 files = changed + removed
2071 else:
2074 else:
2072 mn = p1.manifestnode()
2075 mn = p1.manifestnode()
2073 files = []
2076 files = []
2074
2077
2075 # update changelog
2078 # update changelog
2076 self.ui.note(_("committing changelog\n"))
2079 self.ui.note(_("committing changelog\n"))
2077 self.changelog.delayupdate(tr)
2080 self.changelog.delayupdate(tr)
2078 n = self.changelog.add(mn, files, ctx.description(),
2081 n = self.changelog.add(mn, files, ctx.description(),
2079 trp, p1.node(), p2.node(),
2082 trp, p1.node(), p2.node(),
2080 user, ctx.date(), ctx.extra().copy())
2083 user, ctx.date(), ctx.extra().copy())
2081 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2084 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2082 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2085 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2083 parent2=xp2)
2086 parent2=xp2)
2084 # set the new commit is proper phase
2087 # set the new commit is proper phase
2085 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2088 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2086 if targetphase:
2089 if targetphase:
2087 # retract boundary do not alter parent changeset.
2090 # retract boundary do not alter parent changeset.
2088 # if a parent have higher the resulting phase will
2091 # if a parent have higher the resulting phase will
2089 # be compliant anyway
2092 # be compliant anyway
2090 #
2093 #
2091 # if minimal phase was 0 we don't need to retract anything
2094 # if minimal phase was 0 we don't need to retract anything
2092 phases.registernew(self, tr, targetphase, [n])
2095 phases.registernew(self, tr, targetphase, [n])
2093 tr.close()
2096 tr.close()
2094 return n
2097 return n
2095 finally:
2098 finally:
2096 if tr:
2099 if tr:
2097 tr.release()
2100 tr.release()
2098 lock.release()
2101 lock.release()
2099
2102
2100 @unfilteredmethod
2103 @unfilteredmethod
2101 def destroying(self):
2104 def destroying(self):
2102 '''Inform the repository that nodes are about to be destroyed.
2105 '''Inform the repository that nodes are about to be destroyed.
2103 Intended for use by strip and rollback, so there's a common
2106 Intended for use by strip and rollback, so there's a common
2104 place for anything that has to be done before destroying history.
2107 place for anything that has to be done before destroying history.
2105
2108
2106 This is mostly useful for saving state that is in memory and waiting
2109 This is mostly useful for saving state that is in memory and waiting
2107 to be flushed when the current lock is released. Because a call to
2110 to be flushed when the current lock is released. Because a call to
2108 destroyed is imminent, the repo will be invalidated causing those
2111 destroyed is imminent, the repo will be invalidated causing those
2109 changes to stay in memory (waiting for the next unlock), or vanish
2112 changes to stay in memory (waiting for the next unlock), or vanish
2110 completely.
2113 completely.
2111 '''
2114 '''
2112 # When using the same lock to commit and strip, the phasecache is left
2115 # When using the same lock to commit and strip, the phasecache is left
2113 # dirty after committing. Then when we strip, the repo is invalidated,
2116 # dirty after committing. Then when we strip, the repo is invalidated,
2114 # causing those changes to disappear.
2117 # causing those changes to disappear.
2115 if '_phasecache' in vars(self):
2118 if '_phasecache' in vars(self):
2116 self._phasecache.write()
2119 self._phasecache.write()
2117
2120
2118 @unfilteredmethod
2121 @unfilteredmethod
2119 def destroyed(self):
2122 def destroyed(self):
2120 '''Inform the repository that nodes have been destroyed.
2123 '''Inform the repository that nodes have been destroyed.
2121 Intended for use by strip and rollback, so there's a common
2124 Intended for use by strip and rollback, so there's a common
2122 place for anything that has to be done after destroying history.
2125 place for anything that has to be done after destroying history.
2123 '''
2126 '''
2124 # When one tries to:
2127 # When one tries to:
2125 # 1) destroy nodes thus calling this method (e.g. strip)
2128 # 1) destroy nodes thus calling this method (e.g. strip)
2126 # 2) use phasecache somewhere (e.g. commit)
2129 # 2) use phasecache somewhere (e.g. commit)
2127 #
2130 #
2128 # then 2) will fail because the phasecache contains nodes that were
2131 # then 2) will fail because the phasecache contains nodes that were
2129 # removed. We can either remove phasecache from the filecache,
2132 # removed. We can either remove phasecache from the filecache,
2130 # causing it to reload next time it is accessed, or simply filter
2133 # causing it to reload next time it is accessed, or simply filter
2131 # the removed nodes now and write the updated cache.
2134 # the removed nodes now and write the updated cache.
2132 self._phasecache.filterunknown(self)
2135 self._phasecache.filterunknown(self)
2133 self._phasecache.write()
2136 self._phasecache.write()
2134
2137
2135 # refresh all repository caches
2138 # refresh all repository caches
2136 self.updatecaches()
2139 self.updatecaches()
2137
2140
2138 # Ensure the persistent tag cache is updated. Doing it now
2141 # Ensure the persistent tag cache is updated. Doing it now
2139 # means that the tag cache only has to worry about destroyed
2142 # means that the tag cache only has to worry about destroyed
2140 # heads immediately after a strip/rollback. That in turn
2143 # heads immediately after a strip/rollback. That in turn
2141 # guarantees that "cachetip == currenttip" (comparing both rev
2144 # guarantees that "cachetip == currenttip" (comparing both rev
2142 # and node) always means no nodes have been added or destroyed.
2145 # and node) always means no nodes have been added or destroyed.
2143
2146
2144 # XXX this is suboptimal when qrefresh'ing: we strip the current
2147 # XXX this is suboptimal when qrefresh'ing: we strip the current
2145 # head, refresh the tag cache, then immediately add a new head.
2148 # head, refresh the tag cache, then immediately add a new head.
2146 # But I think doing it this way is necessary for the "instant
2149 # But I think doing it this way is necessary for the "instant
2147 # tag cache retrieval" case to work.
2150 # tag cache retrieval" case to work.
2148 self.invalidate()
2151 self.invalidate()
2149
2152
2150 def status(self, node1='.', node2=None, match=None,
2153 def status(self, node1='.', node2=None, match=None,
2151 ignored=False, clean=False, unknown=False,
2154 ignored=False, clean=False, unknown=False,
2152 listsubrepos=False):
2155 listsubrepos=False):
2153 '''a convenience method that calls node1.status(node2)'''
2156 '''a convenience method that calls node1.status(node2)'''
2154 return self[node1].status(node2, match, ignored, clean, unknown,
2157 return self[node1].status(node2, match, ignored, clean, unknown,
2155 listsubrepos)
2158 listsubrepos)
2156
2159
2157 def addpostdsstatus(self, ps):
2160 def addpostdsstatus(self, ps):
2158 """Add a callback to run within the wlock, at the point at which status
2161 """Add a callback to run within the wlock, at the point at which status
2159 fixups happen.
2162 fixups happen.
2160
2163
2161 On status completion, callback(wctx, status) will be called with the
2164 On status completion, callback(wctx, status) will be called with the
2162 wlock held, unless the dirstate has changed from underneath or the wlock
2165 wlock held, unless the dirstate has changed from underneath or the wlock
2163 couldn't be grabbed.
2166 couldn't be grabbed.
2164
2167
2165 Callbacks should not capture and use a cached copy of the dirstate --
2168 Callbacks should not capture and use a cached copy of the dirstate --
2166 it might change in the meanwhile. Instead, they should access the
2169 it might change in the meanwhile. Instead, they should access the
2167 dirstate via wctx.repo().dirstate.
2170 dirstate via wctx.repo().dirstate.
2168
2171
2169 This list is emptied out after each status run -- extensions should
2172 This list is emptied out after each status run -- extensions should
2170 make sure it adds to this list each time dirstate.status is called.
2173 make sure it adds to this list each time dirstate.status is called.
2171 Extensions should also make sure they don't call this for statuses
2174 Extensions should also make sure they don't call this for statuses
2172 that don't involve the dirstate.
2175 that don't involve the dirstate.
2173 """
2176 """
2174
2177
2175 # The list is located here for uniqueness reasons -- it is actually
2178 # The list is located here for uniqueness reasons -- it is actually
2176 # managed by the workingctx, but that isn't unique per-repo.
2179 # managed by the workingctx, but that isn't unique per-repo.
2177 self._postdsstatus.append(ps)
2180 self._postdsstatus.append(ps)
2178
2181
2179 def postdsstatus(self):
2182 def postdsstatus(self):
2180 """Used by workingctx to get the list of post-dirstate-status hooks."""
2183 """Used by workingctx to get the list of post-dirstate-status hooks."""
2181 return self._postdsstatus
2184 return self._postdsstatus
2182
2185
2183 def clearpostdsstatus(self):
2186 def clearpostdsstatus(self):
2184 """Used by workingctx to clear post-dirstate-status hooks."""
2187 """Used by workingctx to clear post-dirstate-status hooks."""
2185 del self._postdsstatus[:]
2188 del self._postdsstatus[:]
2186
2189
2187 def heads(self, start=None):
2190 def heads(self, start=None):
2188 if start is None:
2191 if start is None:
2189 cl = self.changelog
2192 cl = self.changelog
2190 headrevs = reversed(cl.headrevs())
2193 headrevs = reversed(cl.headrevs())
2191 return [cl.node(rev) for rev in headrevs]
2194 return [cl.node(rev) for rev in headrevs]
2192
2195
2193 heads = self.changelog.heads(start)
2196 heads = self.changelog.heads(start)
2194 # sort the output in rev descending order
2197 # sort the output in rev descending order
2195 return sorted(heads, key=self.changelog.rev, reverse=True)
2198 return sorted(heads, key=self.changelog.rev, reverse=True)
2196
2199
2197 def branchheads(self, branch=None, start=None, closed=False):
2200 def branchheads(self, branch=None, start=None, closed=False):
2198 '''return a (possibly filtered) list of heads for the given branch
2201 '''return a (possibly filtered) list of heads for the given branch
2199
2202
2200 Heads are returned in topological order, from newest to oldest.
2203 Heads are returned in topological order, from newest to oldest.
2201 If branch is None, use the dirstate branch.
2204 If branch is None, use the dirstate branch.
2202 If start is not None, return only heads reachable from start.
2205 If start is not None, return only heads reachable from start.
2203 If closed is True, return heads that are marked as closed as well.
2206 If closed is True, return heads that are marked as closed as well.
2204 '''
2207 '''
2205 if branch is None:
2208 if branch is None:
2206 branch = self[None].branch()
2209 branch = self[None].branch()
2207 branches = self.branchmap()
2210 branches = self.branchmap()
2208 if branch not in branches:
2211 if branch not in branches:
2209 return []
2212 return []
2210 # the cache returns heads ordered lowest to highest
2213 # the cache returns heads ordered lowest to highest
2211 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2214 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2212 if start is not None:
2215 if start is not None:
2213 # filter out the heads that cannot be reached from startrev
2216 # filter out the heads that cannot be reached from startrev
2214 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2217 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2215 bheads = [h for h in bheads if h in fbheads]
2218 bheads = [h for h in bheads if h in fbheads]
2216 return bheads
2219 return bheads
2217
2220
2218 def branches(self, nodes):
2221 def branches(self, nodes):
2219 if not nodes:
2222 if not nodes:
2220 nodes = [self.changelog.tip()]
2223 nodes = [self.changelog.tip()]
2221 b = []
2224 b = []
2222 for n in nodes:
2225 for n in nodes:
2223 t = n
2226 t = n
2224 while True:
2227 while True:
2225 p = self.changelog.parents(n)
2228 p = self.changelog.parents(n)
2226 if p[1] != nullid or p[0] == nullid:
2229 if p[1] != nullid or p[0] == nullid:
2227 b.append((t, n, p[0], p[1]))
2230 b.append((t, n, p[0], p[1]))
2228 break
2231 break
2229 n = p[0]
2232 n = p[0]
2230 return b
2233 return b
2231
2234
2232 def between(self, pairs):
2235 def between(self, pairs):
2233 r = []
2236 r = []
2234
2237
2235 for top, bottom in pairs:
2238 for top, bottom in pairs:
2236 n, l, i = top, [], 0
2239 n, l, i = top, [], 0
2237 f = 1
2240 f = 1
2238
2241
2239 while n != bottom and n != nullid:
2242 while n != bottom and n != nullid:
2240 p = self.changelog.parents(n)[0]
2243 p = self.changelog.parents(n)[0]
2241 if i == f:
2244 if i == f:
2242 l.append(n)
2245 l.append(n)
2243 f = f * 2
2246 f = f * 2
2244 n = p
2247 n = p
2245 i += 1
2248 i += 1
2246
2249
2247 r.append(l)
2250 r.append(l)
2248
2251
2249 return r
2252 return r
2250
2253
2251 def checkpush(self, pushop):
2254 def checkpush(self, pushop):
2252 """Extensions can override this function if additional checks have
2255 """Extensions can override this function if additional checks have
2253 to be performed before pushing, or call it if they override push
2256 to be performed before pushing, or call it if they override push
2254 command.
2257 command.
2255 """
2258 """
2256
2259
2257 @unfilteredpropertycache
2260 @unfilteredpropertycache
2258 def prepushoutgoinghooks(self):
2261 def prepushoutgoinghooks(self):
2259 """Return util.hooks consists of a pushop with repo, remote, outgoing
2262 """Return util.hooks consists of a pushop with repo, remote, outgoing
2260 methods, which are called before pushing changesets.
2263 methods, which are called before pushing changesets.
2261 """
2264 """
2262 return util.hooks()
2265 return util.hooks()
2263
2266
2264 def pushkey(self, namespace, key, old, new):
2267 def pushkey(self, namespace, key, old, new):
2265 try:
2268 try:
2266 tr = self.currenttransaction()
2269 tr = self.currenttransaction()
2267 hookargs = {}
2270 hookargs = {}
2268 if tr is not None:
2271 if tr is not None:
2269 hookargs.update(tr.hookargs)
2272 hookargs.update(tr.hookargs)
2270 hookargs = pycompat.strkwargs(hookargs)
2273 hookargs = pycompat.strkwargs(hookargs)
2271 hookargs[r'namespace'] = namespace
2274 hookargs[r'namespace'] = namespace
2272 hookargs[r'key'] = key
2275 hookargs[r'key'] = key
2273 hookargs[r'old'] = old
2276 hookargs[r'old'] = old
2274 hookargs[r'new'] = new
2277 hookargs[r'new'] = new
2275 self.hook('prepushkey', throw=True, **hookargs)
2278 self.hook('prepushkey', throw=True, **hookargs)
2276 except error.HookAbort as exc:
2279 except error.HookAbort as exc:
2277 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2280 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2278 if exc.hint:
2281 if exc.hint:
2279 self.ui.write_err(_("(%s)\n") % exc.hint)
2282 self.ui.write_err(_("(%s)\n") % exc.hint)
2280 return False
2283 return False
2281 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2284 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2282 ret = pushkey.push(self, namespace, key, old, new)
2285 ret = pushkey.push(self, namespace, key, old, new)
2283 def runhook():
2286 def runhook():
2284 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2287 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2285 ret=ret)
2288 ret=ret)
2286 self._afterlock(runhook)
2289 self._afterlock(runhook)
2287 return ret
2290 return ret
2288
2291
2289 def listkeys(self, namespace):
2292 def listkeys(self, namespace):
2290 self.hook('prelistkeys', throw=True, namespace=namespace)
2293 self.hook('prelistkeys', throw=True, namespace=namespace)
2291 self.ui.debug('listing keys for "%s"\n' % namespace)
2294 self.ui.debug('listing keys for "%s"\n' % namespace)
2292 values = pushkey.list(self, namespace)
2295 values = pushkey.list(self, namespace)
2293 self.hook('listkeys', namespace=namespace, values=values)
2296 self.hook('listkeys', namespace=namespace, values=values)
2294 return values
2297 return values
2295
2298
2296 def debugwireargs(self, one, two, three=None, four=None, five=None):
2299 def debugwireargs(self, one, two, three=None, four=None, five=None):
2297 '''used to test argument passing over the wire'''
2300 '''used to test argument passing over the wire'''
2298 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2301 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2299 pycompat.bytestr(four),
2302 pycompat.bytestr(four),
2300 pycompat.bytestr(five))
2303 pycompat.bytestr(five))
2301
2304
2302 def savecommitmessage(self, text):
2305 def savecommitmessage(self, text):
2303 fp = self.vfs('last-message.txt', 'wb')
2306 fp = self.vfs('last-message.txt', 'wb')
2304 try:
2307 try:
2305 fp.write(text)
2308 fp.write(text)
2306 finally:
2309 finally:
2307 fp.close()
2310 fp.close()
2308 return self.pathto(fp.name[len(self.root) + 1:])
2311 return self.pathto(fp.name[len(self.root) + 1:])
2309
2312
2310 # used to avoid circular references so destructors work
2313 # used to avoid circular references so destructors work
2311 def aftertrans(files):
2314 def aftertrans(files):
2312 renamefiles = [tuple(t) for t in files]
2315 renamefiles = [tuple(t) for t in files]
2313 def a():
2316 def a():
2314 for vfs, src, dest in renamefiles:
2317 for vfs, src, dest in renamefiles:
2315 # if src and dest refer to a same file, vfs.rename is a no-op,
2318 # if src and dest refer to a same file, vfs.rename is a no-op,
2316 # leaving both src and dest on disk. delete dest to make sure
2319 # leaving both src and dest on disk. delete dest to make sure
2317 # the rename couldn't be such a no-op.
2320 # the rename couldn't be such a no-op.
2318 vfs.tryunlink(dest)
2321 vfs.tryunlink(dest)
2319 try:
2322 try:
2320 vfs.rename(src, dest)
2323 vfs.rename(src, dest)
2321 except OSError: # journal file does not yet exist
2324 except OSError: # journal file does not yet exist
2322 pass
2325 pass
2323 return a
2326 return a
2324
2327
2325 def undoname(fn):
2328 def undoname(fn):
2326 base, name = os.path.split(fn)
2329 base, name = os.path.split(fn)
2327 assert name.startswith('journal')
2330 assert name.startswith('journal')
2328 return os.path.join(base, name.replace('journal', 'undo', 1))
2331 return os.path.join(base, name.replace('journal', 'undo', 1))
2329
2332
2330 def instance(ui, path, create):
2333 def instance(ui, path, create):
2331 return localrepository(ui, util.urllocalpath(path), create)
2334 return localrepository(ui, util.urllocalpath(path), create)
2332
2335
2333 def islocal(path):
2336 def islocal(path):
2334 return True
2337 return True
2335
2338
2336 def newreporequirements(repo):
2339 def newreporequirements(repo):
2337 """Determine the set of requirements for a new local repository.
2340 """Determine the set of requirements for a new local repository.
2338
2341
2339 Extensions can wrap this function to specify custom requirements for
2342 Extensions can wrap this function to specify custom requirements for
2340 new repositories.
2343 new repositories.
2341 """
2344 """
2342 ui = repo.ui
2345 ui = repo.ui
2343 requirements = {'revlogv1'}
2346 requirements = {'revlogv1'}
2344 if ui.configbool('format', 'usestore'):
2347 if ui.configbool('format', 'usestore'):
2345 requirements.add('store')
2348 requirements.add('store')
2346 if ui.configbool('format', 'usefncache'):
2349 if ui.configbool('format', 'usefncache'):
2347 requirements.add('fncache')
2350 requirements.add('fncache')
2348 if ui.configbool('format', 'dotencode'):
2351 if ui.configbool('format', 'dotencode'):
2349 requirements.add('dotencode')
2352 requirements.add('dotencode')
2350
2353
2351 compengine = ui.config('experimental', 'format.compression')
2354 compengine = ui.config('experimental', 'format.compression')
2352 if compengine not in util.compengines:
2355 if compengine not in util.compengines:
2353 raise error.Abort(_('compression engine %s defined by '
2356 raise error.Abort(_('compression engine %s defined by '
2354 'experimental.format.compression not available') %
2357 'experimental.format.compression not available') %
2355 compengine,
2358 compengine,
2356 hint=_('run "hg debuginstall" to list available '
2359 hint=_('run "hg debuginstall" to list available '
2357 'compression engines'))
2360 'compression engines'))
2358
2361
2359 # zlib is the historical default and doesn't need an explicit requirement.
2362 # zlib is the historical default and doesn't need an explicit requirement.
2360 if compengine != 'zlib':
2363 if compengine != 'zlib':
2361 requirements.add('exp-compression-%s' % compengine)
2364 requirements.add('exp-compression-%s' % compengine)
2362
2365
2363 if scmutil.gdinitconfig(ui):
2366 if scmutil.gdinitconfig(ui):
2364 requirements.add('generaldelta')
2367 requirements.add('generaldelta')
2365 if ui.configbool('experimental', 'treemanifest'):
2368 if ui.configbool('experimental', 'treemanifest'):
2366 requirements.add('treemanifest')
2369 requirements.add('treemanifest')
2367
2370
2368 revlogv2 = ui.config('experimental', 'revlogv2')
2371 revlogv2 = ui.config('experimental', 'revlogv2')
2369 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2372 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2370 requirements.remove('revlogv1')
2373 requirements.remove('revlogv1')
2371 # generaldelta is implied by revlogv2.
2374 # generaldelta is implied by revlogv2.
2372 requirements.discard('generaldelta')
2375 requirements.discard('generaldelta')
2373 requirements.add(REVLOGV2_REQUIREMENT)
2376 requirements.add(REVLOGV2_REQUIREMENT)
2374
2377
2375 return requirements
2378 return requirements
@@ -1,989 +1,995 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .thirdparty.zope import (
11 from .thirdparty.zope import (
12 interface as zi,
12 interface as zi,
13 )
13 )
14 from . import (
14 from . import (
15 error,
15 error,
16 )
16 )
17
17
18 class ipeerconnection(zi.Interface):
18 class ipeerconnection(zi.Interface):
19 """Represents a "connection" to a repository.
19 """Represents a "connection" to a repository.
20
20
21 This is the base interface for representing a connection to a repository.
21 This is the base interface for representing a connection to a repository.
22 It holds basic properties and methods applicable to all peer types.
22 It holds basic properties and methods applicable to all peer types.
23
23
24 This is not a complete interface definition and should not be used
24 This is not a complete interface definition and should not be used
25 outside of this module.
25 outside of this module.
26 """
26 """
27 ui = zi.Attribute("""ui.ui instance""")
27 ui = zi.Attribute("""ui.ui instance""")
28
28
29 def url():
29 def url():
30 """Returns a URL string representing this peer.
30 """Returns a URL string representing this peer.
31
31
32 Currently, implementations expose the raw URL used to construct the
32 Currently, implementations expose the raw URL used to construct the
33 instance. It may contain credentials as part of the URL. The
33 instance. It may contain credentials as part of the URL. The
34 expectations of the value aren't well-defined and this could lead to
34 expectations of the value aren't well-defined and this could lead to
35 data leakage.
35 data leakage.
36
36
37 TODO audit/clean consumers and more clearly define the contents of this
37 TODO audit/clean consumers and more clearly define the contents of this
38 value.
38 value.
39 """
39 """
40
40
41 def local():
41 def local():
42 """Returns a local repository instance.
42 """Returns a local repository instance.
43
43
44 If the peer represents a local repository, returns an object that
44 If the peer represents a local repository, returns an object that
45 can be used to interface with it. Otherwise returns ``None``.
45 can be used to interface with it. Otherwise returns ``None``.
46 """
46 """
47
47
48 def peer():
48 def peer():
49 """Returns an object conforming to this interface.
49 """Returns an object conforming to this interface.
50
50
51 Most implementations will ``return self``.
51 Most implementations will ``return self``.
52 """
52 """
53
53
54 def canpush():
54 def canpush():
55 """Returns a boolean indicating if this peer can be pushed to."""
55 """Returns a boolean indicating if this peer can be pushed to."""
56
56
57 def close():
57 def close():
58 """Close the connection to this peer.
58 """Close the connection to this peer.
59
59
60 This is called when the peer will no longer be used. Resources
60 This is called when the peer will no longer be used. Resources
61 associated with the peer should be cleaned up.
61 associated with the peer should be cleaned up.
62 """
62 """
63
63
64 class ipeercapabilities(zi.Interface):
64 class ipeercapabilities(zi.Interface):
65 """Peer sub-interface related to capabilities."""
65 """Peer sub-interface related to capabilities."""
66
66
67 def capable(name):
67 def capable(name):
68 """Determine support for a named capability.
68 """Determine support for a named capability.
69
69
70 Returns ``False`` if capability not supported.
70 Returns ``False`` if capability not supported.
71
71
72 Returns ``True`` if boolean capability is supported. Returns a string
72 Returns ``True`` if boolean capability is supported. Returns a string
73 if capability support is non-boolean.
73 if capability support is non-boolean.
74
74
75 Capability strings may or may not map to wire protocol capabilities.
75 Capability strings may or may not map to wire protocol capabilities.
76 """
76 """
77
77
78 def requirecap(name, purpose):
78 def requirecap(name, purpose):
79 """Require a capability to be present.
79 """Require a capability to be present.
80
80
81 Raises a ``CapabilityError`` if the capability isn't present.
81 Raises a ``CapabilityError`` if the capability isn't present.
82 """
82 """
83
83
84 class ipeercommands(zi.Interface):
84 class ipeercommands(zi.Interface):
85 """Client-side interface for communicating over the wire protocol.
85 """Client-side interface for communicating over the wire protocol.
86
86
87 This interface is used as a gateway to the Mercurial wire protocol.
87 This interface is used as a gateway to the Mercurial wire protocol.
88 methods commonly call wire protocol commands of the same name.
88 methods commonly call wire protocol commands of the same name.
89 """
89 """
90
90
91 def branchmap():
91 def branchmap():
92 """Obtain heads in named branches.
92 """Obtain heads in named branches.
93
93
94 Returns a dict mapping branch name to an iterable of nodes that are
94 Returns a dict mapping branch name to an iterable of nodes that are
95 heads on that branch.
95 heads on that branch.
96 """
96 """
97
97
98 def capabilities():
98 def capabilities():
99 """Obtain capabilities of the peer.
99 """Obtain capabilities of the peer.
100
100
101 Returns a set of string capabilities.
101 Returns a set of string capabilities.
102 """
102 """
103
103
104 def clonebundles():
105 """Obtains the clone bundles manifest for the repo.
106
107 Returns the manifest as unparsed bytes.
108 """
109
104 def debugwireargs(one, two, three=None, four=None, five=None):
110 def debugwireargs(one, two, three=None, four=None, five=None):
105 """Used to facilitate debugging of arguments passed over the wire."""
111 """Used to facilitate debugging of arguments passed over the wire."""
106
112
107 def getbundle(source, **kwargs):
113 def getbundle(source, **kwargs):
108 """Obtain remote repository data as a bundle.
114 """Obtain remote repository data as a bundle.
109
115
110 This command is how the bulk of repository data is transferred from
116 This command is how the bulk of repository data is transferred from
111 the peer to the local repository
117 the peer to the local repository
112
118
113 Returns a generator of bundle data.
119 Returns a generator of bundle data.
114 """
120 """
115
121
116 def heads():
122 def heads():
117 """Determine all known head revisions in the peer.
123 """Determine all known head revisions in the peer.
118
124
119 Returns an iterable of binary nodes.
125 Returns an iterable of binary nodes.
120 """
126 """
121
127
122 def known(nodes):
128 def known(nodes):
123 """Determine whether multiple nodes are known.
129 """Determine whether multiple nodes are known.
124
130
125 Accepts an iterable of nodes whose presence to check for.
131 Accepts an iterable of nodes whose presence to check for.
126
132
127 Returns an iterable of booleans indicating of the corresponding node
133 Returns an iterable of booleans indicating of the corresponding node
128 at that index is known to the peer.
134 at that index is known to the peer.
129 """
135 """
130
136
131 def listkeys(namespace):
137 def listkeys(namespace):
132 """Obtain all keys in a pushkey namespace.
138 """Obtain all keys in a pushkey namespace.
133
139
134 Returns an iterable of key names.
140 Returns an iterable of key names.
135 """
141 """
136
142
137 def lookup(key):
143 def lookup(key):
138 """Resolve a value to a known revision.
144 """Resolve a value to a known revision.
139
145
140 Returns a binary node of the resolved revision on success.
146 Returns a binary node of the resolved revision on success.
141 """
147 """
142
148
143 def pushkey(namespace, key, old, new):
149 def pushkey(namespace, key, old, new):
144 """Set a value using the ``pushkey`` protocol.
150 """Set a value using the ``pushkey`` protocol.
145
151
146 Arguments correspond to the pushkey namespace and key to operate on and
152 Arguments correspond to the pushkey namespace and key to operate on and
147 the old and new values for that key.
153 the old and new values for that key.
148
154
149 Returns a string with the peer result. The value inside varies by the
155 Returns a string with the peer result. The value inside varies by the
150 namespace.
156 namespace.
151 """
157 """
152
158
153 def stream_out():
159 def stream_out():
154 """Obtain streaming clone data.
160 """Obtain streaming clone data.
155
161
156 Successful result should be a generator of data chunks.
162 Successful result should be a generator of data chunks.
157 """
163 """
158
164
159 def unbundle(bundle, heads, url):
165 def unbundle(bundle, heads, url):
160 """Transfer repository data to the peer.
166 """Transfer repository data to the peer.
161
167
162 This is how the bulk of data during a push is transferred.
168 This is how the bulk of data during a push is transferred.
163
169
164 Returns the integer number of heads added to the peer.
170 Returns the integer number of heads added to the peer.
165 """
171 """
166
172
167 class ipeerlegacycommands(zi.Interface):
173 class ipeerlegacycommands(zi.Interface):
168 """Interface for implementing support for legacy wire protocol commands.
174 """Interface for implementing support for legacy wire protocol commands.
169
175
170 Wire protocol commands transition to legacy status when they are no longer
176 Wire protocol commands transition to legacy status when they are no longer
171 used by modern clients. To facilitate identifying which commands are
177 used by modern clients. To facilitate identifying which commands are
172 legacy, the interfaces are split.
178 legacy, the interfaces are split.
173 """
179 """
174
180
175 def between(pairs):
181 def between(pairs):
176 """Obtain nodes between pairs of nodes.
182 """Obtain nodes between pairs of nodes.
177
183
178 ``pairs`` is an iterable of node pairs.
184 ``pairs`` is an iterable of node pairs.
179
185
180 Returns an iterable of iterables of nodes corresponding to each
186 Returns an iterable of iterables of nodes corresponding to each
181 requested pair.
187 requested pair.
182 """
188 """
183
189
184 def branches(nodes):
190 def branches(nodes):
185 """Obtain ancestor changesets of specific nodes back to a branch point.
191 """Obtain ancestor changesets of specific nodes back to a branch point.
186
192
187 For each requested node, the peer finds the first ancestor node that is
193 For each requested node, the peer finds the first ancestor node that is
188 a DAG root or is a merge.
194 a DAG root or is a merge.
189
195
190 Returns an iterable of iterables with the resolved values for each node.
196 Returns an iterable of iterables with the resolved values for each node.
191 """
197 """
192
198
193 def changegroup(nodes, source):
199 def changegroup(nodes, source):
194 """Obtain a changegroup with data for descendants of specified nodes."""
200 """Obtain a changegroup with data for descendants of specified nodes."""
195
201
196 def changegroupsubset(bases, heads, source):
202 def changegroupsubset(bases, heads, source):
197 pass
203 pass
198
204
199 class ipeercommandexecutor(zi.Interface):
205 class ipeercommandexecutor(zi.Interface):
200 """Represents a mechanism to execute remote commands.
206 """Represents a mechanism to execute remote commands.
201
207
202 This is the primary interface for requesting that wire protocol commands
208 This is the primary interface for requesting that wire protocol commands
203 be executed. Instances of this interface are active in a context manager
209 be executed. Instances of this interface are active in a context manager
204 and have a well-defined lifetime. When the context manager exits, all
210 and have a well-defined lifetime. When the context manager exits, all
205 outstanding requests are waited on.
211 outstanding requests are waited on.
206 """
212 """
207
213
208 def callcommand(name, args):
214 def callcommand(name, args):
209 """Request that a named command be executed.
215 """Request that a named command be executed.
210
216
211 Receives the command name and a dictionary of command arguments.
217 Receives the command name and a dictionary of command arguments.
212
218
213 Returns a ``concurrent.futures.Future`` that will resolve to the
219 Returns a ``concurrent.futures.Future`` that will resolve to the
214 result of that command request. That exact value is left up to
220 result of that command request. That exact value is left up to
215 the implementation and possibly varies by command.
221 the implementation and possibly varies by command.
216
222
217 Not all commands can coexist with other commands in an executor
223 Not all commands can coexist with other commands in an executor
218 instance: it depends on the underlying wire protocol transport being
224 instance: it depends on the underlying wire protocol transport being
219 used and the command itself.
225 used and the command itself.
220
226
221 Implementations MAY call ``sendcommands()`` automatically if the
227 Implementations MAY call ``sendcommands()`` automatically if the
222 requested command can not coexist with other commands in this executor.
228 requested command can not coexist with other commands in this executor.
223
229
224 Implementations MAY call ``sendcommands()`` automatically when the
230 Implementations MAY call ``sendcommands()`` automatically when the
225 future's ``result()`` is called. So, consumers using multiple
231 future's ``result()`` is called. So, consumers using multiple
226 commands with an executor MUST ensure that ``result()`` is not called
232 commands with an executor MUST ensure that ``result()`` is not called
227 until all command requests have been issued.
233 until all command requests have been issued.
228 """
234 """
229
235
230 def sendcommands():
236 def sendcommands():
231 """Trigger submission of queued command requests.
237 """Trigger submission of queued command requests.
232
238
233 Not all transports submit commands as soon as they are requested to
239 Not all transports submit commands as soon as they are requested to
234 run. When called, this method forces queued command requests to be
240 run. When called, this method forces queued command requests to be
235 issued. It will no-op if all commands have already been sent.
241 issued. It will no-op if all commands have already been sent.
236
242
237 When called, no more new commands may be issued with this executor.
243 When called, no more new commands may be issued with this executor.
238 """
244 """
239
245
240 def close():
246 def close():
241 """Signal that this command request is finished.
247 """Signal that this command request is finished.
242
248
243 When called, no more new commands may be issued. All outstanding
249 When called, no more new commands may be issued. All outstanding
244 commands that have previously been issued are waited on before
250 commands that have previously been issued are waited on before
245 returning. This not only includes waiting for the futures to resolve,
251 returning. This not only includes waiting for the futures to resolve,
246 but also waiting for all response data to arrive. In other words,
252 but also waiting for all response data to arrive. In other words,
247 calling this waits for all on-wire state for issued command requests
253 calling this waits for all on-wire state for issued command requests
248 to finish.
254 to finish.
249
255
250 When used as a context manager, this method is called when exiting the
256 When used as a context manager, this method is called when exiting the
251 context manager.
257 context manager.
252
258
253 This method may call ``sendcommands()`` if there are buffered commands.
259 This method may call ``sendcommands()`` if there are buffered commands.
254 """
260 """
255
261
256 class ipeerrequests(zi.Interface):
262 class ipeerrequests(zi.Interface):
257 """Interface for executing commands on a peer."""
263 """Interface for executing commands on a peer."""
258
264
259 def commandexecutor():
265 def commandexecutor():
260 """A context manager that resolves to an ipeercommandexecutor.
266 """A context manager that resolves to an ipeercommandexecutor.
261
267
262 The object this resolves to can be used to issue command requests
268 The object this resolves to can be used to issue command requests
263 to the peer.
269 to the peer.
264
270
265 Callers should call its ``callcommand`` method to issue command
271 Callers should call its ``callcommand`` method to issue command
266 requests.
272 requests.
267
273
268 A new executor should be obtained for each distinct set of commands
274 A new executor should be obtained for each distinct set of commands
269 (possibly just a single command) that the consumer wants to execute
275 (possibly just a single command) that the consumer wants to execute
270 as part of a single operation or round trip. This is because some
276 as part of a single operation or round trip. This is because some
271 peers are half-duplex and/or don't support persistent connections.
277 peers are half-duplex and/or don't support persistent connections.
272 e.g. in the case of HTTP peers, commands sent to an executor represent
278 e.g. in the case of HTTP peers, commands sent to an executor represent
273 a single HTTP request. While some peers may support multiple command
279 a single HTTP request. While some peers may support multiple command
274 sends over the wire per executor, consumers need to code to the least
280 sends over the wire per executor, consumers need to code to the least
275 capable peer. So it should be assumed that command executors buffer
281 capable peer. So it should be assumed that command executors buffer
276 called commands until they are told to send them and that each
282 called commands until they are told to send them and that each
277 command executor could result in a new connection or wire-level request
283 command executor could result in a new connection or wire-level request
278 being issued.
284 being issued.
279 """
285 """
280
286
281 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands,
287 class ipeerbase(ipeerconnection, ipeercapabilities, ipeercommands,
282 ipeerrequests):
288 ipeerrequests):
283 """Unified interface for peer repositories.
289 """Unified interface for peer repositories.
284
290
285 All peer instances must conform to this interface.
291 All peer instances must conform to this interface.
286 """
292 """
287
293
288 @zi.implementer(ipeerbase)
294 @zi.implementer(ipeerbase)
289 class peer(object):
295 class peer(object):
290 """Base class for peer repositories."""
296 """Base class for peer repositories."""
291
297
292 def capable(self, name):
298 def capable(self, name):
293 caps = self.capabilities()
299 caps = self.capabilities()
294 if name in caps:
300 if name in caps:
295 return True
301 return True
296
302
297 name = '%s=' % name
303 name = '%s=' % name
298 for cap in caps:
304 for cap in caps:
299 if cap.startswith(name):
305 if cap.startswith(name):
300 return cap[len(name):]
306 return cap[len(name):]
301
307
302 return False
308 return False
303
309
304 def requirecap(self, name, purpose):
310 def requirecap(self, name, purpose):
305 if self.capable(name):
311 if self.capable(name):
306 return
312 return
307
313
308 raise error.CapabilityError(
314 raise error.CapabilityError(
309 _('cannot %s; remote repository does not support the %r '
315 _('cannot %s; remote repository does not support the %r '
310 'capability') % (purpose, name))
316 'capability') % (purpose, name))
311
317
312 class ifilerevisionssequence(zi.Interface):
318 class ifilerevisionssequence(zi.Interface):
313 """Contains index data for all revisions of a file.
319 """Contains index data for all revisions of a file.
314
320
315 Types implementing this behave like lists of tuples. The index
321 Types implementing this behave like lists of tuples. The index
316 in the list corresponds to the revision number. The values contain
322 in the list corresponds to the revision number. The values contain
317 index metadata.
323 index metadata.
318
324
319 The *null* revision (revision number -1) is always the last item
325 The *null* revision (revision number -1) is always the last item
320 in the index.
326 in the index.
321 """
327 """
322
328
323 def __len__():
329 def __len__():
324 """The total number of revisions."""
330 """The total number of revisions."""
325
331
326 def __getitem__(rev):
332 def __getitem__(rev):
327 """Returns the object having a specific revision number.
333 """Returns the object having a specific revision number.
328
334
329 Returns an 8-tuple with the following fields:
335 Returns an 8-tuple with the following fields:
330
336
331 offset+flags
337 offset+flags
332 Contains the offset and flags for the revision. 64-bit unsigned
338 Contains the offset and flags for the revision. 64-bit unsigned
333 integer where first 6 bytes are the offset and the next 2 bytes
339 integer where first 6 bytes are the offset and the next 2 bytes
334 are flags. The offset can be 0 if it is not used by the store.
340 are flags. The offset can be 0 if it is not used by the store.
335 compressed size
341 compressed size
336 Size of the revision data in the store. It can be 0 if it isn't
342 Size of the revision data in the store. It can be 0 if it isn't
337 needed by the store.
343 needed by the store.
338 uncompressed size
344 uncompressed size
339 Fulltext size. It can be 0 if it isn't needed by the store.
345 Fulltext size. It can be 0 if it isn't needed by the store.
340 base revision
346 base revision
341 Revision number of revision the delta for storage is encoded
347 Revision number of revision the delta for storage is encoded
342 against. -1 indicates not encoded against a base revision.
348 against. -1 indicates not encoded against a base revision.
343 link revision
349 link revision
344 Revision number of changelog revision this entry is related to.
350 Revision number of changelog revision this entry is related to.
345 p1 revision
351 p1 revision
346 Revision number of 1st parent. -1 if no 1st parent.
352 Revision number of 1st parent. -1 if no 1st parent.
347 p2 revision
353 p2 revision
348 Revision number of 2nd parent. -1 if no 1st parent.
354 Revision number of 2nd parent. -1 if no 1st parent.
349 node
355 node
350 Binary node value for this revision number.
356 Binary node value for this revision number.
351
357
352 Negative values should index off the end of the sequence. ``-1``
358 Negative values should index off the end of the sequence. ``-1``
353 should return the null revision. ``-2`` should return the most
359 should return the null revision. ``-2`` should return the most
354 recent revision.
360 recent revision.
355 """
361 """
356
362
357 def __contains__(rev):
363 def __contains__(rev):
358 """Whether a revision number exists."""
364 """Whether a revision number exists."""
359
365
360 def insert(self, i, entry):
366 def insert(self, i, entry):
361 """Add an item to the index at specific revision."""
367 """Add an item to the index at specific revision."""
362
368
363 class ifileindex(zi.Interface):
369 class ifileindex(zi.Interface):
364 """Storage interface for index data of a single file.
370 """Storage interface for index data of a single file.
365
371
366 File storage data is divided into index metadata and data storage.
372 File storage data is divided into index metadata and data storage.
367 This interface defines the index portion of the interface.
373 This interface defines the index portion of the interface.
368
374
369 The index logically consists of:
375 The index logically consists of:
370
376
371 * A mapping between revision numbers and nodes.
377 * A mapping between revision numbers and nodes.
372 * DAG data (storing and querying the relationship between nodes).
378 * DAG data (storing and querying the relationship between nodes).
373 * Metadata to facilitate storage.
379 * Metadata to facilitate storage.
374 """
380 """
375 index = zi.Attribute(
381 index = zi.Attribute(
376 """An ``ifilerevisionssequence`` instance.""")
382 """An ``ifilerevisionssequence`` instance.""")
377
383
378 def __len__():
384 def __len__():
379 """Obtain the number of revisions stored for this file."""
385 """Obtain the number of revisions stored for this file."""
380
386
381 def __iter__():
387 def __iter__():
382 """Iterate over revision numbers for this file."""
388 """Iterate over revision numbers for this file."""
383
389
384 def revs(start=0, stop=None):
390 def revs(start=0, stop=None):
385 """Iterate over revision numbers for this file, with control."""
391 """Iterate over revision numbers for this file, with control."""
386
392
387 def parents(node):
393 def parents(node):
388 """Returns a 2-tuple of parent nodes for a revision.
394 """Returns a 2-tuple of parent nodes for a revision.
389
395
390 Values will be ``nullid`` if the parent is empty.
396 Values will be ``nullid`` if the parent is empty.
391 """
397 """
392
398
393 def parentrevs(rev):
399 def parentrevs(rev):
394 """Like parents() but operates on revision numbers."""
400 """Like parents() but operates on revision numbers."""
395
401
396 def rev(node):
402 def rev(node):
397 """Obtain the revision number given a node.
403 """Obtain the revision number given a node.
398
404
399 Raises ``error.LookupError`` if the node is not known.
405 Raises ``error.LookupError`` if the node is not known.
400 """
406 """
401
407
402 def node(rev):
408 def node(rev):
403 """Obtain the node value given a revision number.
409 """Obtain the node value given a revision number.
404
410
405 Raises ``IndexError`` if the node is not known.
411 Raises ``IndexError`` if the node is not known.
406 """
412 """
407
413
408 def lookup(node):
414 def lookup(node):
409 """Attempt to resolve a value to a node.
415 """Attempt to resolve a value to a node.
410
416
411 Value can be a binary node, hex node, revision number, or a string
417 Value can be a binary node, hex node, revision number, or a string
412 that can be converted to an integer.
418 that can be converted to an integer.
413
419
414 Raises ``error.LookupError`` if a node could not be resolved.
420 Raises ``error.LookupError`` if a node could not be resolved.
415 """
421 """
416
422
417 def linkrev(rev):
423 def linkrev(rev):
418 """Obtain the changeset revision number a revision is linked to."""
424 """Obtain the changeset revision number a revision is linked to."""
419
425
420 def flags(rev):
426 def flags(rev):
421 """Obtain flags used to affect storage of a revision."""
427 """Obtain flags used to affect storage of a revision."""
422
428
423 def iscensored(rev):
429 def iscensored(rev):
424 """Return whether a revision's content has been censored."""
430 """Return whether a revision's content has been censored."""
425
431
426 def commonancestorsheads(node1, node2):
432 def commonancestorsheads(node1, node2):
427 """Obtain an iterable of nodes containing heads of common ancestors.
433 """Obtain an iterable of nodes containing heads of common ancestors.
428
434
429 See ``ancestor.commonancestorsheads()``.
435 See ``ancestor.commonancestorsheads()``.
430 """
436 """
431
437
432 def descendants(revs):
438 def descendants(revs):
433 """Obtain descendant revision numbers for a set of revision numbers.
439 """Obtain descendant revision numbers for a set of revision numbers.
434
440
435 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
441 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
436 """
442 """
437
443
438 def headrevs():
444 def headrevs():
439 """Obtain a list of revision numbers that are DAG heads.
445 """Obtain a list of revision numbers that are DAG heads.
440
446
441 The list is sorted oldest to newest.
447 The list is sorted oldest to newest.
442
448
443 TODO determine if sorting is required.
449 TODO determine if sorting is required.
444 """
450 """
445
451
446 def heads(start=None, stop=None):
452 def heads(start=None, stop=None):
447 """Obtain a list of nodes that are DAG heads, with control.
453 """Obtain a list of nodes that are DAG heads, with control.
448
454
449 The set of revisions examined can be limited by specifying
455 The set of revisions examined can be limited by specifying
450 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
456 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
451 iterable of nodes. DAG traversal starts at earlier revision
457 iterable of nodes. DAG traversal starts at earlier revision
452 ``start`` and iterates forward until any node in ``stop`` is
458 ``start`` and iterates forward until any node in ``stop`` is
453 encountered.
459 encountered.
454 """
460 """
455
461
456 def children(node):
462 def children(node):
457 """Obtain nodes that are children of a node.
463 """Obtain nodes that are children of a node.
458
464
459 Returns a list of nodes.
465 Returns a list of nodes.
460 """
466 """
461
467
462 def deltaparent(rev):
468 def deltaparent(rev):
463 """"Return the revision that is a suitable parent to delta against."""
469 """"Return the revision that is a suitable parent to delta against."""
464
470
465 def candelta(baserev, rev):
471 def candelta(baserev, rev):
466 """"Whether a delta can be generated between two revisions."""
472 """"Whether a delta can be generated between two revisions."""
467
473
468 class ifiledata(zi.Interface):
474 class ifiledata(zi.Interface):
469 """Storage interface for data storage of a specific file.
475 """Storage interface for data storage of a specific file.
470
476
471 This complements ``ifileindex`` and provides an interface for accessing
477 This complements ``ifileindex`` and provides an interface for accessing
472 data for a tracked file.
478 data for a tracked file.
473 """
479 """
474 def rawsize(rev):
480 def rawsize(rev):
475 """The size of the fulltext data for a revision as stored."""
481 """The size of the fulltext data for a revision as stored."""
476
482
477 def size(rev):
483 def size(rev):
478 """Obtain the fulltext size of file data.
484 """Obtain the fulltext size of file data.
479
485
480 Any metadata is excluded from size measurements. Use ``rawsize()`` if
486 Any metadata is excluded from size measurements. Use ``rawsize()`` if
481 metadata size is important.
487 metadata size is important.
482 """
488 """
483
489
484 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
490 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
485 """Validate the stored hash of a given fulltext and node.
491 """Validate the stored hash of a given fulltext and node.
486
492
487 Raises ``error.RevlogError`` is hash validation fails.
493 Raises ``error.RevlogError`` is hash validation fails.
488 """
494 """
489
495
490 def revision(node, raw=False):
496 def revision(node, raw=False):
491 """"Obtain fulltext data for a node.
497 """"Obtain fulltext data for a node.
492
498
493 By default, any storage transformations are applied before the data
499 By default, any storage transformations are applied before the data
494 is returned. If ``raw`` is True, non-raw storage transformations
500 is returned. If ``raw`` is True, non-raw storage transformations
495 are not applied.
501 are not applied.
496
502
497 The fulltext data may contain a header containing metadata. Most
503 The fulltext data may contain a header containing metadata. Most
498 consumers should use ``read()`` to obtain the actual file data.
504 consumers should use ``read()`` to obtain the actual file data.
499 """
505 """
500
506
501 def read(node):
507 def read(node):
502 """Resolve file fulltext data.
508 """Resolve file fulltext data.
503
509
504 This is similar to ``revision()`` except any metadata in the data
510 This is similar to ``revision()`` except any metadata in the data
505 headers is stripped.
511 headers is stripped.
506 """
512 """
507
513
508 def renamed(node):
514 def renamed(node):
509 """Obtain copy metadata for a node.
515 """Obtain copy metadata for a node.
510
516
511 Returns ``False`` if no copy metadata is stored or a 2-tuple of
517 Returns ``False`` if no copy metadata is stored or a 2-tuple of
512 (path, node) from which this revision was copied.
518 (path, node) from which this revision was copied.
513 """
519 """
514
520
515 def cmp(node, fulltext):
521 def cmp(node, fulltext):
516 """Compare fulltext to another revision.
522 """Compare fulltext to another revision.
517
523
518 Returns True if the fulltext is different from what is stored.
524 Returns True if the fulltext is different from what is stored.
519
525
520 This takes copy metadata into account.
526 This takes copy metadata into account.
521
527
522 TODO better document the copy metadata and censoring logic.
528 TODO better document the copy metadata and censoring logic.
523 """
529 """
524
530
525 def revdiff(rev1, rev2):
531 def revdiff(rev1, rev2):
526 """Obtain a delta between two revision numbers.
532 """Obtain a delta between two revision numbers.
527
533
528 Operates on raw data in the store (``revision(node, raw=True)``).
534 Operates on raw data in the store (``revision(node, raw=True)``).
529
535
530 The returned data is the result of ``bdiff.bdiff`` on the raw
536 The returned data is the result of ``bdiff.bdiff`` on the raw
531 revision data.
537 revision data.
532 """
538 """
533
539
534 class ifilemutation(zi.Interface):
540 class ifilemutation(zi.Interface):
535 """Storage interface for mutation events of a tracked file."""
541 """Storage interface for mutation events of a tracked file."""
536
542
537 def add(filedata, meta, transaction, linkrev, p1, p2):
543 def add(filedata, meta, transaction, linkrev, p1, p2):
538 """Add a new revision to the store.
544 """Add a new revision to the store.
539
545
540 Takes file data, dictionary of metadata, a transaction, linkrev,
546 Takes file data, dictionary of metadata, a transaction, linkrev,
541 and parent nodes.
547 and parent nodes.
542
548
543 Returns the node that was added.
549 Returns the node that was added.
544
550
545 May no-op if a revision matching the supplied data is already stored.
551 May no-op if a revision matching the supplied data is already stored.
546 """
552 """
547
553
548 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
554 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
549 flags=0, cachedelta=None):
555 flags=0, cachedelta=None):
550 """Add a new revision to the store.
556 """Add a new revision to the store.
551
557
552 This is similar to ``add()`` except it operates at a lower level.
558 This is similar to ``add()`` except it operates at a lower level.
553
559
554 The data passed in already contains a metadata header, if any.
560 The data passed in already contains a metadata header, if any.
555
561
556 ``node`` and ``flags`` can be used to define the expected node and
562 ``node`` and ``flags`` can be used to define the expected node and
557 the flags to use with storage.
563 the flags to use with storage.
558
564
559 ``add()`` is usually called when adding files from e.g. the working
565 ``add()`` is usually called when adding files from e.g. the working
560 directory. ``addrevision()`` is often called by ``add()`` and for
566 directory. ``addrevision()`` is often called by ``add()`` and for
561 scenarios where revision data has already been computed, such as when
567 scenarios where revision data has already been computed, such as when
562 applying raw data from a peer repo.
568 applying raw data from a peer repo.
563 """
569 """
564
570
565 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
571 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
566 """Process a series of deltas for storage.
572 """Process a series of deltas for storage.
567
573
568 ``deltas`` is an iterable of 7-tuples of
574 ``deltas`` is an iterable of 7-tuples of
569 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
575 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
570 to add.
576 to add.
571
577
572 The ``delta`` field contains ``mpatch`` data to apply to a base
578 The ``delta`` field contains ``mpatch`` data to apply to a base
573 revision, identified by ``deltabase``. The base node can be
579 revision, identified by ``deltabase``. The base node can be
574 ``nullid``, in which case the header from the delta can be ignored
580 ``nullid``, in which case the header from the delta can be ignored
575 and the delta used as the fulltext.
581 and the delta used as the fulltext.
576
582
577 ``addrevisioncb`` should be called for each node as it is committed.
583 ``addrevisioncb`` should be called for each node as it is committed.
578
584
579 Returns a list of nodes that were processed. A node will be in the list
585 Returns a list of nodes that were processed. A node will be in the list
580 even if it existed in the store previously.
586 even if it existed in the store previously.
581 """
587 """
582
588
583 def getstrippoint(minlink):
589 def getstrippoint(minlink):
584 """Find the minimum revision that must be stripped to strip a linkrev.
590 """Find the minimum revision that must be stripped to strip a linkrev.
585
591
586 Returns a 2-tuple containing the minimum revision number and a set
592 Returns a 2-tuple containing the minimum revision number and a set
587 of all revisions numbers that would be broken by this strip.
593 of all revisions numbers that would be broken by this strip.
588
594
589 TODO this is highly revlog centric and should be abstracted into
595 TODO this is highly revlog centric and should be abstracted into
590 a higher-level deletion API. ``repair.strip()`` relies on this.
596 a higher-level deletion API. ``repair.strip()`` relies on this.
591 """
597 """
592
598
593 def strip(minlink, transaction):
599 def strip(minlink, transaction):
594 """Remove storage of items starting at a linkrev.
600 """Remove storage of items starting at a linkrev.
595
601
596 This uses ``getstrippoint()`` to determine the first node to remove.
602 This uses ``getstrippoint()`` to determine the first node to remove.
597 Then it effectively truncates storage for all revisions after that.
603 Then it effectively truncates storage for all revisions after that.
598
604
599 TODO this is highly revlog centric and should be abstracted into a
605 TODO this is highly revlog centric and should be abstracted into a
600 higher-level deletion API.
606 higher-level deletion API.
601 """
607 """
602
608
603 class ifilestorage(ifileindex, ifiledata, ifilemutation):
609 class ifilestorage(ifileindex, ifiledata, ifilemutation):
604 """Complete storage interface for a single tracked file."""
610 """Complete storage interface for a single tracked file."""
605
611
606 version = zi.Attribute(
612 version = zi.Attribute(
607 """Version number of storage.
613 """Version number of storage.
608
614
609 TODO this feels revlog centric and could likely be removed.
615 TODO this feels revlog centric and could likely be removed.
610 """)
616 """)
611
617
612 storedeltachains = zi.Attribute(
618 storedeltachains = zi.Attribute(
613 """Whether the store stores deltas.
619 """Whether the store stores deltas.
614
620
615 TODO deltachains are revlog centric. This can probably removed
621 TODO deltachains are revlog centric. This can probably removed
616 once there are better abstractions for obtaining/writing
622 once there are better abstractions for obtaining/writing
617 data.
623 data.
618 """)
624 """)
619
625
620 _generaldelta = zi.Attribute(
626 _generaldelta = zi.Attribute(
621 """Whether deltas can be against any parent revision.
627 """Whether deltas can be against any parent revision.
622
628
623 TODO this is used by changegroup code and it could probably be
629 TODO this is used by changegroup code and it could probably be
624 folded into another API.
630 folded into another API.
625 """)
631 """)
626
632
627 def files():
633 def files():
628 """Obtain paths that are backing storage for this file.
634 """Obtain paths that are backing storage for this file.
629
635
630 TODO this is used heavily by verify code and there should probably
636 TODO this is used heavily by verify code and there should probably
631 be a better API for that.
637 be a better API for that.
632 """
638 """
633
639
634 def checksize():
640 def checksize():
635 """Obtain the expected sizes of backing files.
641 """Obtain the expected sizes of backing files.
636
642
637 TODO this is used by verify and it should not be part of the interface.
643 TODO this is used by verify and it should not be part of the interface.
638 """
644 """
639
645
640 class completelocalrepository(zi.Interface):
646 class completelocalrepository(zi.Interface):
641 """Monolithic interface for local repositories.
647 """Monolithic interface for local repositories.
642
648
643 This currently captures the reality of things - not how things should be.
649 This currently captures the reality of things - not how things should be.
644 """
650 """
645
651
646 supportedformats = zi.Attribute(
652 supportedformats = zi.Attribute(
647 """Set of requirements that apply to stream clone.
653 """Set of requirements that apply to stream clone.
648
654
649 This is actually a class attribute and is shared among all instances.
655 This is actually a class attribute and is shared among all instances.
650 """)
656 """)
651
657
652 openerreqs = zi.Attribute(
658 openerreqs = zi.Attribute(
653 """Set of requirements that are passed to the opener.
659 """Set of requirements that are passed to the opener.
654
660
655 This is actually a class attribute and is shared among all instances.
661 This is actually a class attribute and is shared among all instances.
656 """)
662 """)
657
663
658 supported = zi.Attribute(
664 supported = zi.Attribute(
659 """Set of requirements that this repo is capable of opening.""")
665 """Set of requirements that this repo is capable of opening.""")
660
666
661 requirements = zi.Attribute(
667 requirements = zi.Attribute(
662 """Set of requirements this repo uses.""")
668 """Set of requirements this repo uses.""")
663
669
664 filtername = zi.Attribute(
670 filtername = zi.Attribute(
665 """Name of the repoview that is active on this repo.""")
671 """Name of the repoview that is active on this repo.""")
666
672
667 wvfs = zi.Attribute(
673 wvfs = zi.Attribute(
668 """VFS used to access the working directory.""")
674 """VFS used to access the working directory.""")
669
675
670 vfs = zi.Attribute(
676 vfs = zi.Attribute(
671 """VFS rooted at the .hg directory.
677 """VFS rooted at the .hg directory.
672
678
673 Used to access repository data not in the store.
679 Used to access repository data not in the store.
674 """)
680 """)
675
681
676 svfs = zi.Attribute(
682 svfs = zi.Attribute(
677 """VFS rooted at the store.
683 """VFS rooted at the store.
678
684
679 Used to access repository data in the store. Typically .hg/store.
685 Used to access repository data in the store. Typically .hg/store.
680 But can point elsewhere if the store is shared.
686 But can point elsewhere if the store is shared.
681 """)
687 """)
682
688
683 root = zi.Attribute(
689 root = zi.Attribute(
684 """Path to the root of the working directory.""")
690 """Path to the root of the working directory.""")
685
691
686 path = zi.Attribute(
692 path = zi.Attribute(
687 """Path to the .hg directory.""")
693 """Path to the .hg directory.""")
688
694
689 origroot = zi.Attribute(
695 origroot = zi.Attribute(
690 """The filesystem path that was used to construct the repo.""")
696 """The filesystem path that was used to construct the repo.""")
691
697
692 auditor = zi.Attribute(
698 auditor = zi.Attribute(
693 """A pathauditor for the working directory.
699 """A pathauditor for the working directory.
694
700
695 This checks if a path refers to a nested repository.
701 This checks if a path refers to a nested repository.
696
702
697 Operates on the filesystem.
703 Operates on the filesystem.
698 """)
704 """)
699
705
700 nofsauditor = zi.Attribute(
706 nofsauditor = zi.Attribute(
701 """A pathauditor for the working directory.
707 """A pathauditor for the working directory.
702
708
703 This is like ``auditor`` except it doesn't do filesystem checks.
709 This is like ``auditor`` except it doesn't do filesystem checks.
704 """)
710 """)
705
711
706 baseui = zi.Attribute(
712 baseui = zi.Attribute(
707 """Original ui instance passed into constructor.""")
713 """Original ui instance passed into constructor.""")
708
714
709 ui = zi.Attribute(
715 ui = zi.Attribute(
710 """Main ui instance for this instance.""")
716 """Main ui instance for this instance.""")
711
717
712 sharedpath = zi.Attribute(
718 sharedpath = zi.Attribute(
713 """Path to the .hg directory of the repo this repo was shared from.""")
719 """Path to the .hg directory of the repo this repo was shared from.""")
714
720
715 store = zi.Attribute(
721 store = zi.Attribute(
716 """A store instance.""")
722 """A store instance.""")
717
723
718 spath = zi.Attribute(
724 spath = zi.Attribute(
719 """Path to the store.""")
725 """Path to the store.""")
720
726
721 sjoin = zi.Attribute(
727 sjoin = zi.Attribute(
722 """Alias to self.store.join.""")
728 """Alias to self.store.join.""")
723
729
724 cachevfs = zi.Attribute(
730 cachevfs = zi.Attribute(
725 """A VFS used to access the cache directory.
731 """A VFS used to access the cache directory.
726
732
727 Typically .hg/cache.
733 Typically .hg/cache.
728 """)
734 """)
729
735
730 filteredrevcache = zi.Attribute(
736 filteredrevcache = zi.Attribute(
731 """Holds sets of revisions to be filtered.""")
737 """Holds sets of revisions to be filtered.""")
732
738
733 names = zi.Attribute(
739 names = zi.Attribute(
734 """A ``namespaces`` instance.""")
740 """A ``namespaces`` instance.""")
735
741
736 def close():
742 def close():
737 """Close the handle on this repository."""
743 """Close the handle on this repository."""
738
744
739 def peer():
745 def peer():
740 """Obtain an object conforming to the ``peer`` interface."""
746 """Obtain an object conforming to the ``peer`` interface."""
741
747
742 def unfiltered():
748 def unfiltered():
743 """Obtain an unfiltered/raw view of this repo."""
749 """Obtain an unfiltered/raw view of this repo."""
744
750
745 def filtered(name, visibilityexceptions=None):
751 def filtered(name, visibilityexceptions=None):
746 """Obtain a named view of this repository."""
752 """Obtain a named view of this repository."""
747
753
748 obsstore = zi.Attribute(
754 obsstore = zi.Attribute(
749 """A store of obsolescence data.""")
755 """A store of obsolescence data.""")
750
756
751 changelog = zi.Attribute(
757 changelog = zi.Attribute(
752 """A handle on the changelog revlog.""")
758 """A handle on the changelog revlog.""")
753
759
754 manifestlog = zi.Attribute(
760 manifestlog = zi.Attribute(
755 """A handle on the root manifest revlog.""")
761 """A handle on the root manifest revlog.""")
756
762
757 dirstate = zi.Attribute(
763 dirstate = zi.Attribute(
758 """Working directory state.""")
764 """Working directory state.""")
759
765
760 narrowpats = zi.Attribute(
766 narrowpats = zi.Attribute(
761 """Matcher patterns for this repository's narrowspec.""")
767 """Matcher patterns for this repository's narrowspec.""")
762
768
763 def narrowmatch():
769 def narrowmatch():
764 """Obtain a matcher for the narrowspec."""
770 """Obtain a matcher for the narrowspec."""
765
771
766 def setnarrowpats(newincludes, newexcludes):
772 def setnarrowpats(newincludes, newexcludes):
767 """Define the narrowspec for this repository."""
773 """Define the narrowspec for this repository."""
768
774
769 def __getitem__(changeid):
775 def __getitem__(changeid):
770 """Try to resolve a changectx."""
776 """Try to resolve a changectx."""
771
777
772 def __contains__(changeid):
778 def __contains__(changeid):
773 """Whether a changeset exists."""
779 """Whether a changeset exists."""
774
780
775 def __nonzero__():
781 def __nonzero__():
776 """Always returns True."""
782 """Always returns True."""
777 return True
783 return True
778
784
779 __bool__ = __nonzero__
785 __bool__ = __nonzero__
780
786
781 def __len__():
787 def __len__():
782 """Returns the number of changesets in the repo."""
788 """Returns the number of changesets in the repo."""
783
789
784 def __iter__():
790 def __iter__():
785 """Iterate over revisions in the changelog."""
791 """Iterate over revisions in the changelog."""
786
792
787 def revs(expr, *args):
793 def revs(expr, *args):
788 """Evaluate a revset.
794 """Evaluate a revset.
789
795
790 Emits revisions.
796 Emits revisions.
791 """
797 """
792
798
793 def set(expr, *args):
799 def set(expr, *args):
794 """Evaluate a revset.
800 """Evaluate a revset.
795
801
796 Emits changectx instances.
802 Emits changectx instances.
797 """
803 """
798
804
799 def anyrevs(specs, user=False, localalias=None):
805 def anyrevs(specs, user=False, localalias=None):
800 """Find revisions matching one of the given revsets."""
806 """Find revisions matching one of the given revsets."""
801
807
802 def url():
808 def url():
803 """Returns a string representing the location of this repo."""
809 """Returns a string representing the location of this repo."""
804
810
805 def hook(name, throw=False, **args):
811 def hook(name, throw=False, **args):
806 """Call a hook."""
812 """Call a hook."""
807
813
808 def tags():
814 def tags():
809 """Return a mapping of tag to node."""
815 """Return a mapping of tag to node."""
810
816
811 def tagtype(tagname):
817 def tagtype(tagname):
812 """Return the type of a given tag."""
818 """Return the type of a given tag."""
813
819
814 def tagslist():
820 def tagslist():
815 """Return a list of tags ordered by revision."""
821 """Return a list of tags ordered by revision."""
816
822
817 def nodetags(node):
823 def nodetags(node):
818 """Return the tags associated with a node."""
824 """Return the tags associated with a node."""
819
825
820 def nodebookmarks(node):
826 def nodebookmarks(node):
821 """Return the list of bookmarks pointing to the specified node."""
827 """Return the list of bookmarks pointing to the specified node."""
822
828
823 def branchmap():
829 def branchmap():
824 """Return a mapping of branch to heads in that branch."""
830 """Return a mapping of branch to heads in that branch."""
825
831
826 def revbranchcache():
832 def revbranchcache():
827 pass
833 pass
828
834
829 def branchtip(branchtip, ignoremissing=False):
835 def branchtip(branchtip, ignoremissing=False):
830 """Return the tip node for a given branch."""
836 """Return the tip node for a given branch."""
831
837
832 def lookup(key):
838 def lookup(key):
833 """Resolve the node for a revision."""
839 """Resolve the node for a revision."""
834
840
835 def lookupbranch(key):
841 def lookupbranch(key):
836 """Look up the branch name of the given revision or branch name."""
842 """Look up the branch name of the given revision or branch name."""
837
843
838 def known(nodes):
844 def known(nodes):
839 """Determine whether a series of nodes is known.
845 """Determine whether a series of nodes is known.
840
846
841 Returns a list of bools.
847 Returns a list of bools.
842 """
848 """
843
849
844 def local():
850 def local():
845 """Whether the repository is local."""
851 """Whether the repository is local."""
846 return True
852 return True
847
853
848 def publishing():
854 def publishing():
849 """Whether the repository is a publishing repository."""
855 """Whether the repository is a publishing repository."""
850
856
851 def cancopy():
857 def cancopy():
852 pass
858 pass
853
859
854 def shared():
860 def shared():
855 """The type of shared repository or None."""
861 """The type of shared repository or None."""
856
862
857 def wjoin(f, *insidef):
863 def wjoin(f, *insidef):
858 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
864 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
859
865
860 def file(f):
866 def file(f):
861 """Obtain a filelog for a tracked path."""
867 """Obtain a filelog for a tracked path."""
862
868
863 def setparents(p1, p2):
869 def setparents(p1, p2):
864 """Set the parent nodes of the working directory."""
870 """Set the parent nodes of the working directory."""
865
871
866 def filectx(path, changeid=None, fileid=None):
872 def filectx(path, changeid=None, fileid=None):
867 """Obtain a filectx for the given file revision."""
873 """Obtain a filectx for the given file revision."""
868
874
869 def getcwd():
875 def getcwd():
870 """Obtain the current working directory from the dirstate."""
876 """Obtain the current working directory from the dirstate."""
871
877
872 def pathto(f, cwd=None):
878 def pathto(f, cwd=None):
873 """Obtain the relative path to a file."""
879 """Obtain the relative path to a file."""
874
880
875 def adddatafilter(name, fltr):
881 def adddatafilter(name, fltr):
876 pass
882 pass
877
883
878 def wread(filename):
884 def wread(filename):
879 """Read a file from wvfs, using data filters."""
885 """Read a file from wvfs, using data filters."""
880
886
881 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
887 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
882 """Write data to a file in the wvfs, using data filters."""
888 """Write data to a file in the wvfs, using data filters."""
883
889
884 def wwritedata(filename, data):
890 def wwritedata(filename, data):
885 """Resolve data for writing to the wvfs, using data filters."""
891 """Resolve data for writing to the wvfs, using data filters."""
886
892
887 def currenttransaction():
893 def currenttransaction():
888 """Obtain the current transaction instance or None."""
894 """Obtain the current transaction instance or None."""
889
895
890 def transaction(desc, report=None):
896 def transaction(desc, report=None):
891 """Open a new transaction to write to the repository."""
897 """Open a new transaction to write to the repository."""
892
898
893 def undofiles():
899 def undofiles():
894 """Returns a list of (vfs, path) for files to undo transactions."""
900 """Returns a list of (vfs, path) for files to undo transactions."""
895
901
896 def recover():
902 def recover():
897 """Roll back an interrupted transaction."""
903 """Roll back an interrupted transaction."""
898
904
899 def rollback(dryrun=False, force=False):
905 def rollback(dryrun=False, force=False):
900 """Undo the last transaction.
906 """Undo the last transaction.
901
907
902 DANGEROUS.
908 DANGEROUS.
903 """
909 """
904
910
905 def updatecaches(tr=None, full=False):
911 def updatecaches(tr=None, full=False):
906 """Warm repo caches."""
912 """Warm repo caches."""
907
913
908 def invalidatecaches():
914 def invalidatecaches():
909 """Invalidate cached data due to the repository mutating."""
915 """Invalidate cached data due to the repository mutating."""
910
916
911 def invalidatevolatilesets():
917 def invalidatevolatilesets():
912 pass
918 pass
913
919
914 def invalidatedirstate():
920 def invalidatedirstate():
915 """Invalidate the dirstate."""
921 """Invalidate the dirstate."""
916
922
917 def invalidate(clearfilecache=False):
923 def invalidate(clearfilecache=False):
918 pass
924 pass
919
925
920 def invalidateall():
926 def invalidateall():
921 pass
927 pass
922
928
923 def lock(wait=True):
929 def lock(wait=True):
924 """Lock the repository store and return a lock instance."""
930 """Lock the repository store and return a lock instance."""
925
931
926 def wlock(wait=True):
932 def wlock(wait=True):
927 """Lock the non-store parts of the repository."""
933 """Lock the non-store parts of the repository."""
928
934
929 def currentwlock():
935 def currentwlock():
930 """Return the wlock if it's held or None."""
936 """Return the wlock if it's held or None."""
931
937
932 def checkcommitpatterns(wctx, vdirs, match, status, fail):
938 def checkcommitpatterns(wctx, vdirs, match, status, fail):
933 pass
939 pass
934
940
935 def commit(text='', user=None, date=None, match=None, force=False,
941 def commit(text='', user=None, date=None, match=None, force=False,
936 editor=False, extra=None):
942 editor=False, extra=None):
937 """Add a new revision to the repository."""
943 """Add a new revision to the repository."""
938
944
939 def commitctx(ctx, error=False):
945 def commitctx(ctx, error=False):
940 """Commit a commitctx instance to the repository."""
946 """Commit a commitctx instance to the repository."""
941
947
942 def destroying():
948 def destroying():
943 """Inform the repository that nodes are about to be destroyed."""
949 """Inform the repository that nodes are about to be destroyed."""
944
950
945 def destroyed():
951 def destroyed():
946 """Inform the repository that nodes have been destroyed."""
952 """Inform the repository that nodes have been destroyed."""
947
953
948 def status(node1='.', node2=None, match=None, ignored=False,
954 def status(node1='.', node2=None, match=None, ignored=False,
949 clean=False, unknown=False, listsubrepos=False):
955 clean=False, unknown=False, listsubrepos=False):
950 """Convenience method to call repo[x].status()."""
956 """Convenience method to call repo[x].status()."""
951
957
952 def addpostdsstatus(ps):
958 def addpostdsstatus(ps):
953 pass
959 pass
954
960
955 def postdsstatus():
961 def postdsstatus():
956 pass
962 pass
957
963
958 def clearpostdsstatus():
964 def clearpostdsstatus():
959 pass
965 pass
960
966
961 def heads(start=None):
967 def heads(start=None):
962 """Obtain list of nodes that are DAG heads."""
968 """Obtain list of nodes that are DAG heads."""
963
969
964 def branchheads(branch=None, start=None, closed=False):
970 def branchheads(branch=None, start=None, closed=False):
965 pass
971 pass
966
972
967 def branches(nodes):
973 def branches(nodes):
968 pass
974 pass
969
975
970 def between(pairs):
976 def between(pairs):
971 pass
977 pass
972
978
973 def checkpush(pushop):
979 def checkpush(pushop):
974 pass
980 pass
975
981
976 prepushoutgoinghooks = zi.Attribute(
982 prepushoutgoinghooks = zi.Attribute(
977 """util.hooks instance.""")
983 """util.hooks instance.""")
978
984
979 def pushkey(namespace, key, old, new):
985 def pushkey(namespace, key, old, new):
980 pass
986 pass
981
987
982 def listkeys(namespace):
988 def listkeys(namespace):
983 pass
989 pass
984
990
985 def debugwireargs(one, two, three=None, four=None, five=None):
991 def debugwireargs(one, two, three=None, four=None, five=None):
986 pass
992 pass
987
993
988 def savecommitmessage(text):
994 def savecommitmessage(text):
989 pass
995 pass
@@ -1,615 +1,619 b''
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
1 # wireprotov1peer.py - Client-side functionality for wire protocol version 1.
2 #
2 #
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2010 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import hashlib
10 import hashlib
11 import sys
11 import sys
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 )
17 )
18 from .thirdparty.zope import (
18 from .thirdparty.zope import (
19 interface as zi,
19 interface as zi,
20 )
20 )
21 from . import (
21 from . import (
22 bundle2,
22 bundle2,
23 changegroup as changegroupmod,
23 changegroup as changegroupmod,
24 encoding,
24 encoding,
25 error,
25 error,
26 pushkey as pushkeymod,
26 pushkey as pushkeymod,
27 pycompat,
27 pycompat,
28 repository,
28 repository,
29 util,
29 util,
30 wireprototypes,
30 wireprototypes,
31 )
31 )
32
32
33 urlreq = util.urlreq
33 urlreq = util.urlreq
34
34
35 def batchable(f):
35 def batchable(f):
36 '''annotation for batchable methods
36 '''annotation for batchable methods
37
37
38 Such methods must implement a coroutine as follows:
38 Such methods must implement a coroutine as follows:
39
39
40 @batchable
40 @batchable
41 def sample(self, one, two=None):
41 def sample(self, one, two=None):
42 # Build list of encoded arguments suitable for your wire protocol:
42 # Build list of encoded arguments suitable for your wire protocol:
43 encargs = [('one', encode(one),), ('two', encode(two),)]
43 encargs = [('one', encode(one),), ('two', encode(two),)]
44 # Create future for injection of encoded result:
44 # Create future for injection of encoded result:
45 encresref = future()
45 encresref = future()
46 # Return encoded arguments and future:
46 # Return encoded arguments and future:
47 yield encargs, encresref
47 yield encargs, encresref
48 # Assuming the future to be filled with the result from the batched
48 # Assuming the future to be filled with the result from the batched
49 # request now. Decode it:
49 # request now. Decode it:
50 yield decode(encresref.value)
50 yield decode(encresref.value)
51
51
52 The decorator returns a function which wraps this coroutine as a plain
52 The decorator returns a function which wraps this coroutine as a plain
53 method, but adds the original method as an attribute called "batchable",
53 method, but adds the original method as an attribute called "batchable",
54 which is used by remotebatch to split the call into separate encoding and
54 which is used by remotebatch to split the call into separate encoding and
55 decoding phases.
55 decoding phases.
56 '''
56 '''
57 def plain(*args, **opts):
57 def plain(*args, **opts):
58 batchable = f(*args, **opts)
58 batchable = f(*args, **opts)
59 encargsorres, encresref = next(batchable)
59 encargsorres, encresref = next(batchable)
60 if not encresref:
60 if not encresref:
61 return encargsorres # a local result in this case
61 return encargsorres # a local result in this case
62 self = args[0]
62 self = args[0]
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
63 cmd = pycompat.bytesurl(f.__name__) # ensure cmd is ascii bytestr
64 encresref.set(self._submitone(cmd, encargsorres))
64 encresref.set(self._submitone(cmd, encargsorres))
65 return next(batchable)
65 return next(batchable)
66 setattr(plain, 'batchable', f)
66 setattr(plain, 'batchable', f)
67 return plain
67 return plain
68
68
69 class future(object):
69 class future(object):
70 '''placeholder for a value to be set later'''
70 '''placeholder for a value to be set later'''
71 def set(self, value):
71 def set(self, value):
72 if util.safehasattr(self, 'value'):
72 if util.safehasattr(self, 'value'):
73 raise error.RepoError("future is already set")
73 raise error.RepoError("future is already set")
74 self.value = value
74 self.value = value
75
75
76 def encodebatchcmds(req):
76 def encodebatchcmds(req):
77 """Return a ``cmds`` argument value for the ``batch`` command."""
77 """Return a ``cmds`` argument value for the ``batch`` command."""
78 escapearg = wireprototypes.escapebatcharg
78 escapearg = wireprototypes.escapebatcharg
79
79
80 cmds = []
80 cmds = []
81 for op, argsdict in req:
81 for op, argsdict in req:
82 # Old servers didn't properly unescape argument names. So prevent
82 # Old servers didn't properly unescape argument names. So prevent
83 # the sending of argument names that may not be decoded properly by
83 # the sending of argument names that may not be decoded properly by
84 # servers.
84 # servers.
85 assert all(escapearg(k) == k for k in argsdict)
85 assert all(escapearg(k) == k for k in argsdict)
86
86
87 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
87 args = ','.join('%s=%s' % (escapearg(k), escapearg(v))
88 for k, v in argsdict.iteritems())
88 for k, v in argsdict.iteritems())
89 cmds.append('%s %s' % (op, args))
89 cmds.append('%s %s' % (op, args))
90
90
91 return ';'.join(cmds)
91 return ';'.join(cmds)
92
92
93 class unsentfuture(pycompat.futures.Future):
93 class unsentfuture(pycompat.futures.Future):
94 """A Future variation to represent an unsent command.
94 """A Future variation to represent an unsent command.
95
95
96 Because we buffer commands and don't submit them immediately, calling
96 Because we buffer commands and don't submit them immediately, calling
97 ``result()`` on an unsent future could deadlock. Futures for buffered
97 ``result()`` on an unsent future could deadlock. Futures for buffered
98 commands are represented by this type, which wraps ``result()`` to
98 commands are represented by this type, which wraps ``result()`` to
99 call ``sendcommands()``.
99 call ``sendcommands()``.
100 """
100 """
101
101
102 def result(self, timeout=None):
102 def result(self, timeout=None):
103 if self.done():
103 if self.done():
104 return pycompat.futures.Future.result(self, timeout)
104 return pycompat.futures.Future.result(self, timeout)
105
105
106 self._peerexecutor.sendcommands()
106 self._peerexecutor.sendcommands()
107
107
108 # This looks like it will infinitely recurse. However,
108 # This looks like it will infinitely recurse. However,
109 # sendcommands() should modify __class__. This call serves as a check
109 # sendcommands() should modify __class__. This call serves as a check
110 # on that.
110 # on that.
111 return self.result(timeout)
111 return self.result(timeout)
112
112
113 @zi.implementer(repository.ipeercommandexecutor)
113 @zi.implementer(repository.ipeercommandexecutor)
114 class peerexecutor(object):
114 class peerexecutor(object):
115 def __init__(self, peer):
115 def __init__(self, peer):
116 self._peer = peer
116 self._peer = peer
117 self._sent = False
117 self._sent = False
118 self._closed = False
118 self._closed = False
119 self._calls = []
119 self._calls = []
120 self._futures = weakref.WeakSet()
120 self._futures = weakref.WeakSet()
121 self._responseexecutor = None
121 self._responseexecutor = None
122 self._responsef = None
122 self._responsef = None
123
123
124 def __enter__(self):
124 def __enter__(self):
125 return self
125 return self
126
126
127 def __exit__(self, exctype, excvalee, exctb):
127 def __exit__(self, exctype, excvalee, exctb):
128 self.close()
128 self.close()
129
129
130 def callcommand(self, command, args):
130 def callcommand(self, command, args):
131 if self._sent:
131 if self._sent:
132 raise error.ProgrammingError('callcommand() cannot be used '
132 raise error.ProgrammingError('callcommand() cannot be used '
133 'after commands are sent')
133 'after commands are sent')
134
134
135 if self._closed:
135 if self._closed:
136 raise error.ProgrammingError('callcommand() cannot be used '
136 raise error.ProgrammingError('callcommand() cannot be used '
137 'after close()')
137 'after close()')
138
138
139 # Commands are dispatched through methods on the peer.
139 # Commands are dispatched through methods on the peer.
140 fn = getattr(self._peer, pycompat.sysstr(command), None)
140 fn = getattr(self._peer, pycompat.sysstr(command), None)
141
141
142 if not fn:
142 if not fn:
143 raise error.ProgrammingError(
143 raise error.ProgrammingError(
144 'cannot call command %s: method of same name not available '
144 'cannot call command %s: method of same name not available '
145 'on peer' % command)
145 'on peer' % command)
146
146
147 # Commands are either batchable or they aren't. If a command
147 # Commands are either batchable or they aren't. If a command
148 # isn't batchable, we send it immediately because the executor
148 # isn't batchable, we send it immediately because the executor
149 # can no longer accept new commands after a non-batchable command.
149 # can no longer accept new commands after a non-batchable command.
150 # If a command is batchable, we queue it for later. But we have
150 # If a command is batchable, we queue it for later. But we have
151 # to account for the case of a non-batchable command arriving after
151 # to account for the case of a non-batchable command arriving after
152 # a batchable one and refuse to service it.
152 # a batchable one and refuse to service it.
153
153
154 def addcall():
154 def addcall():
155 f = pycompat.futures.Future()
155 f = pycompat.futures.Future()
156 self._futures.add(f)
156 self._futures.add(f)
157 self._calls.append((command, args, fn, f))
157 self._calls.append((command, args, fn, f))
158 return f
158 return f
159
159
160 if getattr(fn, 'batchable', False):
160 if getattr(fn, 'batchable', False):
161 f = addcall()
161 f = addcall()
162
162
163 # But since we don't issue it immediately, we wrap its result()
163 # But since we don't issue it immediately, we wrap its result()
164 # to trigger sending so we avoid deadlocks.
164 # to trigger sending so we avoid deadlocks.
165 f.__class__ = unsentfuture
165 f.__class__ = unsentfuture
166 f._peerexecutor = self
166 f._peerexecutor = self
167 else:
167 else:
168 if self._calls:
168 if self._calls:
169 raise error.ProgrammingError(
169 raise error.ProgrammingError(
170 '%s is not batchable and cannot be called on a command '
170 '%s is not batchable and cannot be called on a command '
171 'executor along with other commands' % command)
171 'executor along with other commands' % command)
172
172
173 f = addcall()
173 f = addcall()
174
174
175 # Non-batchable commands can never coexist with another command
175 # Non-batchable commands can never coexist with another command
176 # in this executor. So send the command immediately.
176 # in this executor. So send the command immediately.
177 self.sendcommands()
177 self.sendcommands()
178
178
179 return f
179 return f
180
180
181 def sendcommands(self):
181 def sendcommands(self):
182 if self._sent:
182 if self._sent:
183 return
183 return
184
184
185 if not self._calls:
185 if not self._calls:
186 return
186 return
187
187
188 self._sent = True
188 self._sent = True
189
189
190 # Unhack any future types so caller seens a clean type and to break
190 # Unhack any future types so caller seens a clean type and to break
191 # cycle between us and futures.
191 # cycle between us and futures.
192 for f in self._futures:
192 for f in self._futures:
193 if isinstance(f, unsentfuture):
193 if isinstance(f, unsentfuture):
194 f.__class__ = pycompat.futures.Future
194 f.__class__ = pycompat.futures.Future
195 f._peerexecutor = None
195 f._peerexecutor = None
196
196
197 calls = self._calls
197 calls = self._calls
198 # Mainly to destroy references to futures.
198 # Mainly to destroy references to futures.
199 self._calls = None
199 self._calls = None
200
200
201 # Simple case of a single command. We call it synchronously.
201 # Simple case of a single command. We call it synchronously.
202 if len(calls) == 1:
202 if len(calls) == 1:
203 command, args, fn, f = calls[0]
203 command, args, fn, f = calls[0]
204
204
205 # Future was cancelled. Ignore it.
205 # Future was cancelled. Ignore it.
206 if not f.set_running_or_notify_cancel():
206 if not f.set_running_or_notify_cancel():
207 return
207 return
208
208
209 try:
209 try:
210 result = fn(**pycompat.strkwargs(args))
210 result = fn(**pycompat.strkwargs(args))
211 except Exception:
211 except Exception:
212 f.set_exception_info(*sys.exc_info()[1:])
212 f.set_exception_info(*sys.exc_info()[1:])
213 else:
213 else:
214 f.set_result(result)
214 f.set_result(result)
215
215
216 return
216 return
217
217
218 # Batch commands are a bit harder. First, we have to deal with the
218 # Batch commands are a bit harder. First, we have to deal with the
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
219 # @batchable coroutine. That's a bit annoying. Furthermore, we also
220 # need to preserve streaming. i.e. it should be possible for the
220 # need to preserve streaming. i.e. it should be possible for the
221 # futures to resolve as data is coming in off the wire without having
221 # futures to resolve as data is coming in off the wire without having
222 # to wait for the final byte of the final response. We do this by
222 # to wait for the final byte of the final response. We do this by
223 # spinning up a thread to read the responses.
223 # spinning up a thread to read the responses.
224
224
225 requests = []
225 requests = []
226 states = []
226 states = []
227
227
228 for command, args, fn, f in calls:
228 for command, args, fn, f in calls:
229 # Future was cancelled. Ignore it.
229 # Future was cancelled. Ignore it.
230 if not f.set_running_or_notify_cancel():
230 if not f.set_running_or_notify_cancel():
231 continue
231 continue
232
232
233 try:
233 try:
234 batchable = fn.batchable(fn.__self__,
234 batchable = fn.batchable(fn.__self__,
235 **pycompat.strkwargs(args))
235 **pycompat.strkwargs(args))
236 except Exception:
236 except Exception:
237 f.set_exception_info(*sys.exc_info()[1:])
237 f.set_exception_info(*sys.exc_info()[1:])
238 return
238 return
239
239
240 # Encoded arguments and future holding remote result.
240 # Encoded arguments and future holding remote result.
241 try:
241 try:
242 encodedargs, fremote = next(batchable)
242 encodedargs, fremote = next(batchable)
243 except Exception:
243 except Exception:
244 f.set_exception_info(*sys.exc_info()[1:])
244 f.set_exception_info(*sys.exc_info()[1:])
245 return
245 return
246
246
247 requests.append((command, encodedargs))
247 requests.append((command, encodedargs))
248 states.append((command, f, batchable, fremote))
248 states.append((command, f, batchable, fremote))
249
249
250 if not requests:
250 if not requests:
251 return
251 return
252
252
253 # This will emit responses in order they were executed.
253 # This will emit responses in order they were executed.
254 wireresults = self._peer._submitbatch(requests)
254 wireresults = self._peer._submitbatch(requests)
255
255
256 # The use of a thread pool executor here is a bit weird for something
256 # The use of a thread pool executor here is a bit weird for something
257 # that only spins up a single thread. However, thread management is
257 # that only spins up a single thread. However, thread management is
258 # hard and it is easy to encounter race conditions, deadlocks, etc.
258 # hard and it is easy to encounter race conditions, deadlocks, etc.
259 # concurrent.futures already solves these problems and its thread pool
259 # concurrent.futures already solves these problems and its thread pool
260 # executor has minimal overhead. So we use it.
260 # executor has minimal overhead. So we use it.
261 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
261 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
262 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
262 self._responsef = self._responseexecutor.submit(self._readbatchresponse,
263 states, wireresults)
263 states, wireresults)
264
264
265 def close(self):
265 def close(self):
266 self.sendcommands()
266 self.sendcommands()
267
267
268 if self._closed:
268 if self._closed:
269 return
269 return
270
270
271 self._closed = True
271 self._closed = True
272
272
273 if not self._responsef:
273 if not self._responsef:
274 return
274 return
275
275
276 # We need to wait on our in-flight response and then shut down the
276 # We need to wait on our in-flight response and then shut down the
277 # executor once we have a result.
277 # executor once we have a result.
278 try:
278 try:
279 self._responsef.result()
279 self._responsef.result()
280 finally:
280 finally:
281 self._responseexecutor.shutdown(wait=True)
281 self._responseexecutor.shutdown(wait=True)
282 self._responsef = None
282 self._responsef = None
283 self._responseexecutor = None
283 self._responseexecutor = None
284
284
285 # If any of our futures are still in progress, mark them as
285 # If any of our futures are still in progress, mark them as
286 # errored. Otherwise a result() could wait indefinitely.
286 # errored. Otherwise a result() could wait indefinitely.
287 for f in self._futures:
287 for f in self._futures:
288 if not f.done():
288 if not f.done():
289 f.set_exception(error.ResponseError(
289 f.set_exception(error.ResponseError(
290 _('unfulfilled batch command response')))
290 _('unfulfilled batch command response')))
291
291
292 self._futures = None
292 self._futures = None
293
293
294 def _readbatchresponse(self, states, wireresults):
294 def _readbatchresponse(self, states, wireresults):
295 # Executes in a thread to read data off the wire.
295 # Executes in a thread to read data off the wire.
296
296
297 for command, f, batchable, fremote in states:
297 for command, f, batchable, fremote in states:
298 # Grab raw result off the wire and teach the internal future
298 # Grab raw result off the wire and teach the internal future
299 # about it.
299 # about it.
300 remoteresult = next(wireresults)
300 remoteresult = next(wireresults)
301 fremote.set(remoteresult)
301 fremote.set(remoteresult)
302
302
303 # And ask the coroutine to decode that value.
303 # And ask the coroutine to decode that value.
304 try:
304 try:
305 result = next(batchable)
305 result = next(batchable)
306 except Exception:
306 except Exception:
307 f.set_exception_info(*sys.exc_info()[1:])
307 f.set_exception_info(*sys.exc_info()[1:])
308 else:
308 else:
309 f.set_result(result)
309 f.set_result(result)
310
310
311 @zi.implementer(repository.ipeerlegacycommands)
311 @zi.implementer(repository.ipeerlegacycommands)
312 class wirepeer(repository.peer):
312 class wirepeer(repository.peer):
313 """Client-side interface for communicating with a peer repository.
313 """Client-side interface for communicating with a peer repository.
314
314
315 Methods commonly call wire protocol commands of the same name.
315 Methods commonly call wire protocol commands of the same name.
316
316
317 See also httppeer.py and sshpeer.py for protocol-specific
317 See also httppeer.py and sshpeer.py for protocol-specific
318 implementations of this interface.
318 implementations of this interface.
319 """
319 """
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return peerexecutor(self)
321 return peerexecutor(self)
322
322
323 # Begin of ipeercommands interface.
323 # Begin of ipeercommands interface.
324
324
325 def clonebundles(self):
326 self.requirecap('clonebundles', _('clone bundles'))
327 return self._call('clonebundles')
328
325 @batchable
329 @batchable
326 def lookup(self, key):
330 def lookup(self, key):
327 self.requirecap('lookup', _('look up remote revision'))
331 self.requirecap('lookup', _('look up remote revision'))
328 f = future()
332 f = future()
329 yield {'key': encoding.fromlocal(key)}, f
333 yield {'key': encoding.fromlocal(key)}, f
330 d = f.value
334 d = f.value
331 success, data = d[:-1].split(" ", 1)
335 success, data = d[:-1].split(" ", 1)
332 if int(success):
336 if int(success):
333 yield bin(data)
337 yield bin(data)
334 else:
338 else:
335 self._abort(error.RepoError(data))
339 self._abort(error.RepoError(data))
336
340
337 @batchable
341 @batchable
338 def heads(self):
342 def heads(self):
339 f = future()
343 f = future()
340 yield {}, f
344 yield {}, f
341 d = f.value
345 d = f.value
342 try:
346 try:
343 yield wireprototypes.decodelist(d[:-1])
347 yield wireprototypes.decodelist(d[:-1])
344 except ValueError:
348 except ValueError:
345 self._abort(error.ResponseError(_("unexpected response:"), d))
349 self._abort(error.ResponseError(_("unexpected response:"), d))
346
350
347 @batchable
351 @batchable
348 def known(self, nodes):
352 def known(self, nodes):
349 f = future()
353 f = future()
350 yield {'nodes': wireprototypes.encodelist(nodes)}, f
354 yield {'nodes': wireprototypes.encodelist(nodes)}, f
351 d = f.value
355 d = f.value
352 try:
356 try:
353 yield [bool(int(b)) for b in d]
357 yield [bool(int(b)) for b in d]
354 except ValueError:
358 except ValueError:
355 self._abort(error.ResponseError(_("unexpected response:"), d))
359 self._abort(error.ResponseError(_("unexpected response:"), d))
356
360
357 @batchable
361 @batchable
358 def branchmap(self):
362 def branchmap(self):
359 f = future()
363 f = future()
360 yield {}, f
364 yield {}, f
361 d = f.value
365 d = f.value
362 try:
366 try:
363 branchmap = {}
367 branchmap = {}
364 for branchpart in d.splitlines():
368 for branchpart in d.splitlines():
365 branchname, branchheads = branchpart.split(' ', 1)
369 branchname, branchheads = branchpart.split(' ', 1)
366 branchname = encoding.tolocal(urlreq.unquote(branchname))
370 branchname = encoding.tolocal(urlreq.unquote(branchname))
367 branchheads = wireprototypes.decodelist(branchheads)
371 branchheads = wireprototypes.decodelist(branchheads)
368 branchmap[branchname] = branchheads
372 branchmap[branchname] = branchheads
369 yield branchmap
373 yield branchmap
370 except TypeError:
374 except TypeError:
371 self._abort(error.ResponseError(_("unexpected response:"), d))
375 self._abort(error.ResponseError(_("unexpected response:"), d))
372
376
373 @batchable
377 @batchable
374 def listkeys(self, namespace):
378 def listkeys(self, namespace):
375 if not self.capable('pushkey'):
379 if not self.capable('pushkey'):
376 yield {}, None
380 yield {}, None
377 f = future()
381 f = future()
378 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
382 self.ui.debug('preparing listkeys for "%s"\n' % namespace)
379 yield {'namespace': encoding.fromlocal(namespace)}, f
383 yield {'namespace': encoding.fromlocal(namespace)}, f
380 d = f.value
384 d = f.value
381 self.ui.debug('received listkey for "%s": %i bytes\n'
385 self.ui.debug('received listkey for "%s": %i bytes\n'
382 % (namespace, len(d)))
386 % (namespace, len(d)))
383 yield pushkeymod.decodekeys(d)
387 yield pushkeymod.decodekeys(d)
384
388
385 @batchable
389 @batchable
386 def pushkey(self, namespace, key, old, new):
390 def pushkey(self, namespace, key, old, new):
387 if not self.capable('pushkey'):
391 if not self.capable('pushkey'):
388 yield False, None
392 yield False, None
389 f = future()
393 f = future()
390 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
394 self.ui.debug('preparing pushkey for "%s:%s"\n' % (namespace, key))
391 yield {'namespace': encoding.fromlocal(namespace),
395 yield {'namespace': encoding.fromlocal(namespace),
392 'key': encoding.fromlocal(key),
396 'key': encoding.fromlocal(key),
393 'old': encoding.fromlocal(old),
397 'old': encoding.fromlocal(old),
394 'new': encoding.fromlocal(new)}, f
398 'new': encoding.fromlocal(new)}, f
395 d = f.value
399 d = f.value
396 d, output = d.split('\n', 1)
400 d, output = d.split('\n', 1)
397 try:
401 try:
398 d = bool(int(d))
402 d = bool(int(d))
399 except ValueError:
403 except ValueError:
400 raise error.ResponseError(
404 raise error.ResponseError(
401 _('push failed (unexpected response):'), d)
405 _('push failed (unexpected response):'), d)
402 for l in output.splitlines(True):
406 for l in output.splitlines(True):
403 self.ui.status(_('remote: '), l)
407 self.ui.status(_('remote: '), l)
404 yield d
408 yield d
405
409
406 def stream_out(self):
410 def stream_out(self):
407 return self._callstream('stream_out')
411 return self._callstream('stream_out')
408
412
409 def getbundle(self, source, **kwargs):
413 def getbundle(self, source, **kwargs):
410 kwargs = pycompat.byteskwargs(kwargs)
414 kwargs = pycompat.byteskwargs(kwargs)
411 self.requirecap('getbundle', _('look up remote changes'))
415 self.requirecap('getbundle', _('look up remote changes'))
412 opts = {}
416 opts = {}
413 bundlecaps = kwargs.get('bundlecaps') or set()
417 bundlecaps = kwargs.get('bundlecaps') or set()
414 for key, value in kwargs.iteritems():
418 for key, value in kwargs.iteritems():
415 if value is None:
419 if value is None:
416 continue
420 continue
417 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
421 keytype = wireprototypes.GETBUNDLE_ARGUMENTS.get(key)
418 if keytype is None:
422 if keytype is None:
419 raise error.ProgrammingError(
423 raise error.ProgrammingError(
420 'Unexpectedly None keytype for key %s' % key)
424 'Unexpectedly None keytype for key %s' % key)
421 elif keytype == 'nodes':
425 elif keytype == 'nodes':
422 value = wireprototypes.encodelist(value)
426 value = wireprototypes.encodelist(value)
423 elif keytype == 'csv':
427 elif keytype == 'csv':
424 value = ','.join(value)
428 value = ','.join(value)
425 elif keytype == 'scsv':
429 elif keytype == 'scsv':
426 value = ','.join(sorted(value))
430 value = ','.join(sorted(value))
427 elif keytype == 'boolean':
431 elif keytype == 'boolean':
428 value = '%i' % bool(value)
432 value = '%i' % bool(value)
429 elif keytype != 'plain':
433 elif keytype != 'plain':
430 raise KeyError('unknown getbundle option type %s'
434 raise KeyError('unknown getbundle option type %s'
431 % keytype)
435 % keytype)
432 opts[key] = value
436 opts[key] = value
433 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
437 f = self._callcompressable("getbundle", **pycompat.strkwargs(opts))
434 if any((cap.startswith('HG2') for cap in bundlecaps)):
438 if any((cap.startswith('HG2') for cap in bundlecaps)):
435 return bundle2.getunbundler(self.ui, f)
439 return bundle2.getunbundler(self.ui, f)
436 else:
440 else:
437 return changegroupmod.cg1unpacker(f, 'UN')
441 return changegroupmod.cg1unpacker(f, 'UN')
438
442
439 def unbundle(self, bundle, heads, url):
443 def unbundle(self, bundle, heads, url):
440 '''Send cg (a readable file-like object representing the
444 '''Send cg (a readable file-like object representing the
441 changegroup to push, typically a chunkbuffer object) to the
445 changegroup to push, typically a chunkbuffer object) to the
442 remote server as a bundle.
446 remote server as a bundle.
443
447
444 When pushing a bundle10 stream, return an integer indicating the
448 When pushing a bundle10 stream, return an integer indicating the
445 result of the push (see changegroup.apply()).
449 result of the push (see changegroup.apply()).
446
450
447 When pushing a bundle20 stream, return a bundle20 stream.
451 When pushing a bundle20 stream, return a bundle20 stream.
448
452
449 `url` is the url the client thinks it's pushing to, which is
453 `url` is the url the client thinks it's pushing to, which is
450 visible to hooks.
454 visible to hooks.
451 '''
455 '''
452
456
453 if heads != ['force'] and self.capable('unbundlehash'):
457 if heads != ['force'] and self.capable('unbundlehash'):
454 heads = wireprototypes.encodelist(
458 heads = wireprototypes.encodelist(
455 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
459 ['hashed', hashlib.sha1(''.join(sorted(heads))).digest()])
456 else:
460 else:
457 heads = wireprototypes.encodelist(heads)
461 heads = wireprototypes.encodelist(heads)
458
462
459 if util.safehasattr(bundle, 'deltaheader'):
463 if util.safehasattr(bundle, 'deltaheader'):
460 # this a bundle10, do the old style call sequence
464 # this a bundle10, do the old style call sequence
461 ret, output = self._callpush("unbundle", bundle, heads=heads)
465 ret, output = self._callpush("unbundle", bundle, heads=heads)
462 if ret == "":
466 if ret == "":
463 raise error.ResponseError(
467 raise error.ResponseError(
464 _('push failed:'), output)
468 _('push failed:'), output)
465 try:
469 try:
466 ret = int(ret)
470 ret = int(ret)
467 except ValueError:
471 except ValueError:
468 raise error.ResponseError(
472 raise error.ResponseError(
469 _('push failed (unexpected response):'), ret)
473 _('push failed (unexpected response):'), ret)
470
474
471 for l in output.splitlines(True):
475 for l in output.splitlines(True):
472 self.ui.status(_('remote: '), l)
476 self.ui.status(_('remote: '), l)
473 else:
477 else:
474 # bundle2 push. Send a stream, fetch a stream.
478 # bundle2 push. Send a stream, fetch a stream.
475 stream = self._calltwowaystream('unbundle', bundle, heads=heads)
479 stream = self._calltwowaystream('unbundle', bundle, heads=heads)
476 ret = bundle2.getunbundler(self.ui, stream)
480 ret = bundle2.getunbundler(self.ui, stream)
477 return ret
481 return ret
478
482
479 # End of ipeercommands interface.
483 # End of ipeercommands interface.
480
484
481 # Begin of ipeerlegacycommands interface.
485 # Begin of ipeerlegacycommands interface.
482
486
483 def branches(self, nodes):
487 def branches(self, nodes):
484 n = wireprototypes.encodelist(nodes)
488 n = wireprototypes.encodelist(nodes)
485 d = self._call("branches", nodes=n)
489 d = self._call("branches", nodes=n)
486 try:
490 try:
487 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
491 br = [tuple(wireprototypes.decodelist(b)) for b in d.splitlines()]
488 return br
492 return br
489 except ValueError:
493 except ValueError:
490 self._abort(error.ResponseError(_("unexpected response:"), d))
494 self._abort(error.ResponseError(_("unexpected response:"), d))
491
495
492 def between(self, pairs):
496 def between(self, pairs):
493 batch = 8 # avoid giant requests
497 batch = 8 # avoid giant requests
494 r = []
498 r = []
495 for i in xrange(0, len(pairs), batch):
499 for i in xrange(0, len(pairs), batch):
496 n = " ".join([wireprototypes.encodelist(p, '-')
500 n = " ".join([wireprototypes.encodelist(p, '-')
497 for p in pairs[i:i + batch]])
501 for p in pairs[i:i + batch]])
498 d = self._call("between", pairs=n)
502 d = self._call("between", pairs=n)
499 try:
503 try:
500 r.extend(l and wireprototypes.decodelist(l) or []
504 r.extend(l and wireprototypes.decodelist(l) or []
501 for l in d.splitlines())
505 for l in d.splitlines())
502 except ValueError:
506 except ValueError:
503 self._abort(error.ResponseError(_("unexpected response:"), d))
507 self._abort(error.ResponseError(_("unexpected response:"), d))
504 return r
508 return r
505
509
506 def changegroup(self, nodes, source):
510 def changegroup(self, nodes, source):
507 n = wireprototypes.encodelist(nodes)
511 n = wireprototypes.encodelist(nodes)
508 f = self._callcompressable("changegroup", roots=n)
512 f = self._callcompressable("changegroup", roots=n)
509 return changegroupmod.cg1unpacker(f, 'UN')
513 return changegroupmod.cg1unpacker(f, 'UN')
510
514
511 def changegroupsubset(self, bases, heads, source):
515 def changegroupsubset(self, bases, heads, source):
512 self.requirecap('changegroupsubset', _('look up remote changes'))
516 self.requirecap('changegroupsubset', _('look up remote changes'))
513 bases = wireprototypes.encodelist(bases)
517 bases = wireprototypes.encodelist(bases)
514 heads = wireprototypes.encodelist(heads)
518 heads = wireprototypes.encodelist(heads)
515 f = self._callcompressable("changegroupsubset",
519 f = self._callcompressable("changegroupsubset",
516 bases=bases, heads=heads)
520 bases=bases, heads=heads)
517 return changegroupmod.cg1unpacker(f, 'UN')
521 return changegroupmod.cg1unpacker(f, 'UN')
518
522
519 # End of ipeerlegacycommands interface.
523 # End of ipeerlegacycommands interface.
520
524
521 def _submitbatch(self, req):
525 def _submitbatch(self, req):
522 """run batch request <req> on the server
526 """run batch request <req> on the server
523
527
524 Returns an iterator of the raw responses from the server.
528 Returns an iterator of the raw responses from the server.
525 """
529 """
526 ui = self.ui
530 ui = self.ui
527 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
531 if ui.debugflag and ui.configbool('devel', 'debug.peer-request'):
528 ui.debug('devel-peer-request: batched-content\n')
532 ui.debug('devel-peer-request: batched-content\n')
529 for op, args in req:
533 for op, args in req:
530 msg = 'devel-peer-request: - %s (%d arguments)\n'
534 msg = 'devel-peer-request: - %s (%d arguments)\n'
531 ui.debug(msg % (op, len(args)))
535 ui.debug(msg % (op, len(args)))
532
536
533 unescapearg = wireprototypes.unescapebatcharg
537 unescapearg = wireprototypes.unescapebatcharg
534
538
535 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
539 rsp = self._callstream("batch", cmds=encodebatchcmds(req))
536 chunk = rsp.read(1024)
540 chunk = rsp.read(1024)
537 work = [chunk]
541 work = [chunk]
538 while chunk:
542 while chunk:
539 while ';' not in chunk and chunk:
543 while ';' not in chunk and chunk:
540 chunk = rsp.read(1024)
544 chunk = rsp.read(1024)
541 work.append(chunk)
545 work.append(chunk)
542 merged = ''.join(work)
546 merged = ''.join(work)
543 while ';' in merged:
547 while ';' in merged:
544 one, merged = merged.split(';', 1)
548 one, merged = merged.split(';', 1)
545 yield unescapearg(one)
549 yield unescapearg(one)
546 chunk = rsp.read(1024)
550 chunk = rsp.read(1024)
547 work = [merged, chunk]
551 work = [merged, chunk]
548 yield unescapearg(''.join(work))
552 yield unescapearg(''.join(work))
549
553
550 def _submitone(self, op, args):
554 def _submitone(self, op, args):
551 return self._call(op, **pycompat.strkwargs(args))
555 return self._call(op, **pycompat.strkwargs(args))
552
556
553 def debugwireargs(self, one, two, three=None, four=None, five=None):
557 def debugwireargs(self, one, two, three=None, four=None, five=None):
554 # don't pass optional arguments left at their default value
558 # don't pass optional arguments left at their default value
555 opts = {}
559 opts = {}
556 if three is not None:
560 if three is not None:
557 opts[r'three'] = three
561 opts[r'three'] = three
558 if four is not None:
562 if four is not None:
559 opts[r'four'] = four
563 opts[r'four'] = four
560 return self._call('debugwireargs', one=one, two=two, **opts)
564 return self._call('debugwireargs', one=one, two=two, **opts)
561
565
562 def _call(self, cmd, **args):
566 def _call(self, cmd, **args):
563 """execute <cmd> on the server
567 """execute <cmd> on the server
564
568
565 The command is expected to return a simple string.
569 The command is expected to return a simple string.
566
570
567 returns the server reply as a string."""
571 returns the server reply as a string."""
568 raise NotImplementedError()
572 raise NotImplementedError()
569
573
570 def _callstream(self, cmd, **args):
574 def _callstream(self, cmd, **args):
571 """execute <cmd> on the server
575 """execute <cmd> on the server
572
576
573 The command is expected to return a stream. Note that if the
577 The command is expected to return a stream. Note that if the
574 command doesn't return a stream, _callstream behaves
578 command doesn't return a stream, _callstream behaves
575 differently for ssh and http peers.
579 differently for ssh and http peers.
576
580
577 returns the server reply as a file like object.
581 returns the server reply as a file like object.
578 """
582 """
579 raise NotImplementedError()
583 raise NotImplementedError()
580
584
581 def _callcompressable(self, cmd, **args):
585 def _callcompressable(self, cmd, **args):
582 """execute <cmd> on the server
586 """execute <cmd> on the server
583
587
584 The command is expected to return a stream.
588 The command is expected to return a stream.
585
589
586 The stream may have been compressed in some implementations. This
590 The stream may have been compressed in some implementations. This
587 function takes care of the decompression. This is the only difference
591 function takes care of the decompression. This is the only difference
588 with _callstream.
592 with _callstream.
589
593
590 returns the server reply as a file like object.
594 returns the server reply as a file like object.
591 """
595 """
592 raise NotImplementedError()
596 raise NotImplementedError()
593
597
594 def _callpush(self, cmd, fp, **args):
598 def _callpush(self, cmd, fp, **args):
595 """execute a <cmd> on server
599 """execute a <cmd> on server
596
600
597 The command is expected to be related to a push. Push has a special
601 The command is expected to be related to a push. Push has a special
598 return method.
602 return method.
599
603
600 returns the server reply as a (ret, output) tuple. ret is either
604 returns the server reply as a (ret, output) tuple. ret is either
601 empty (error) or a stringified int.
605 empty (error) or a stringified int.
602 """
606 """
603 raise NotImplementedError()
607 raise NotImplementedError()
604
608
605 def _calltwowaystream(self, cmd, fp, **args):
609 def _calltwowaystream(self, cmd, fp, **args):
606 """execute <cmd> on server
610 """execute <cmd> on server
607
611
608 The command will send a stream to the server and get a stream in reply.
612 The command will send a stream to the server and get a stream in reply.
609 """
613 """
610 raise NotImplementedError()
614 raise NotImplementedError()
611
615
612 def _abort(self, exception):
616 def _abort(self, exception):
613 """clearly abort the wire protocol connection and raise the exception
617 """clearly abort the wire protocol connection and raise the exception
614 """
618 """
615 raise NotImplementedError()
619 raise NotImplementedError()
General Comments 0
You need to be logged in to leave comments. Login now