|
@@
-1,2644
+1,2649
|
|
1
|
# exchange.py - utility to exchange data between repos.
|
|
1
|
# exchange.py - utility to exchange data between repos.
|
|
2
|
#
|
|
2
|
#
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
3
|
# Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
|
|
4
|
#
|
|
4
|
#
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
5
|
# This software may be used and distributed according to the terms of the
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
6
|
# GNU General Public License version 2 or any later version.
|
|
7
|
|
|
7
|
|
|
8
|
from __future__ import absolute_import
|
|
8
|
from __future__ import absolute_import
|
|
9
|
|
|
9
|
|
|
10
|
import collections
|
|
10
|
import collections
|
|
11
|
import hashlib
|
|
11
|
import hashlib
|
|
12
|
|
|
12
|
|
|
13
|
from .i18n import _
|
|
13
|
from .i18n import _
|
|
14
|
from .node import (
|
|
14
|
from .node import (
|
|
15
|
bin,
|
|
15
|
bin,
|
|
16
|
hex,
|
|
16
|
hex,
|
|
17
|
nullid,
|
|
17
|
nullid,
|
|
18
|
nullrev,
|
|
18
|
nullrev,
|
|
19
|
)
|
|
19
|
)
|
|
20
|
from .thirdparty import (
|
|
20
|
from .thirdparty import (
|
|
21
|
attr,
|
|
21
|
attr,
|
|
22
|
)
|
|
22
|
)
|
|
23
|
from . import (
|
|
23
|
from . import (
|
|
24
|
bookmarks as bookmod,
|
|
24
|
bookmarks as bookmod,
|
|
25
|
bundle2,
|
|
25
|
bundle2,
|
|
26
|
changegroup,
|
|
26
|
changegroup,
|
|
27
|
discovery,
|
|
27
|
discovery,
|
|
28
|
error,
|
|
28
|
error,
|
|
|
|
|
29
|
exchangev2,
|
|
29
|
lock as lockmod,
|
|
30
|
lock as lockmod,
|
|
30
|
logexchange,
|
|
31
|
logexchange,
|
|
31
|
narrowspec,
|
|
32
|
narrowspec,
|
|
32
|
obsolete,
|
|
33
|
obsolete,
|
|
33
|
phases,
|
|
34
|
phases,
|
|
34
|
pushkey,
|
|
35
|
pushkey,
|
|
35
|
pycompat,
|
|
36
|
pycompat,
|
|
36
|
repository,
|
|
37
|
repository,
|
|
37
|
scmutil,
|
|
38
|
scmutil,
|
|
38
|
sslutil,
|
|
39
|
sslutil,
|
|
39
|
streamclone,
|
|
40
|
streamclone,
|
|
40
|
url as urlmod,
|
|
41
|
url as urlmod,
|
|
41
|
util,
|
|
42
|
util,
|
|
42
|
)
|
|
43
|
)
|
|
43
|
from .utils import (
|
|
44
|
from .utils import (
|
|
44
|
stringutil,
|
|
45
|
stringutil,
|
|
45
|
)
|
|
46
|
)
|
|
46
|
|
|
47
|
|
|
47
|
urlerr = util.urlerr
|
|
48
|
urlerr = util.urlerr
|
|
48
|
urlreq = util.urlreq
|
|
49
|
urlreq = util.urlreq
|
|
49
|
|
|
50
|
|
|
50
|
_NARROWACL_SECTION = 'narrowhgacl'
|
|
51
|
_NARROWACL_SECTION = 'narrowhgacl'
|
|
51
|
|
|
52
|
|
|
52
|
# Maps bundle version human names to changegroup versions.
|
|
53
|
# Maps bundle version human names to changegroup versions.
|
|
53
|
_bundlespeccgversions = {'v1': '01',
|
|
54
|
_bundlespeccgversions = {'v1': '01',
|
|
54
|
'v2': '02',
|
|
55
|
'v2': '02',
|
|
55
|
'packed1': 's1',
|
|
56
|
'packed1': 's1',
|
|
56
|
'bundle2': '02', #legacy
|
|
57
|
'bundle2': '02', #legacy
|
|
57
|
}
|
|
58
|
}
|
|
58
|
|
|
59
|
|
|
59
|
# Maps bundle version with content opts to choose which part to bundle
|
|
60
|
# Maps bundle version with content opts to choose which part to bundle
|
|
60
|
_bundlespeccontentopts = {
|
|
61
|
_bundlespeccontentopts = {
|
|
61
|
'v1': {
|
|
62
|
'v1': {
|
|
62
|
'changegroup': True,
|
|
63
|
'changegroup': True,
|
|
63
|
'cg.version': '01',
|
|
64
|
'cg.version': '01',
|
|
64
|
'obsolescence': False,
|
|
65
|
'obsolescence': False,
|
|
65
|
'phases': False,
|
|
66
|
'phases': False,
|
|
66
|
'tagsfnodescache': False,
|
|
67
|
'tagsfnodescache': False,
|
|
67
|
'revbranchcache': False
|
|
68
|
'revbranchcache': False
|
|
68
|
},
|
|
69
|
},
|
|
69
|
'v2': {
|
|
70
|
'v2': {
|
|
70
|
'changegroup': True,
|
|
71
|
'changegroup': True,
|
|
71
|
'cg.version': '02',
|
|
72
|
'cg.version': '02',
|
|
72
|
'obsolescence': False,
|
|
73
|
'obsolescence': False,
|
|
73
|
'phases': False,
|
|
74
|
'phases': False,
|
|
74
|
'tagsfnodescache': True,
|
|
75
|
'tagsfnodescache': True,
|
|
75
|
'revbranchcache': True
|
|
76
|
'revbranchcache': True
|
|
76
|
},
|
|
77
|
},
|
|
77
|
'packed1' : {
|
|
78
|
'packed1' : {
|
|
78
|
'cg.version': 's1'
|
|
79
|
'cg.version': 's1'
|
|
79
|
}
|
|
80
|
}
|
|
80
|
}
|
|
81
|
}
|
|
81
|
_bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
|
|
82
|
_bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
|
|
82
|
|
|
83
|
|
|
83
|
_bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
|
|
84
|
_bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
|
|
84
|
"tagsfnodescache": False,
|
|
85
|
"tagsfnodescache": False,
|
|
85
|
"revbranchcache": False}}
|
|
86
|
"revbranchcache": False}}
|
|
86
|
|
|
87
|
|
|
87
|
# Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
|
|
88
|
# Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
|
|
88
|
_bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
|
|
89
|
_bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
|
|
89
|
|
|
90
|
|
|
90
|
@attr.s
|
|
91
|
@attr.s
|
|
91
|
class bundlespec(object):
|
|
92
|
class bundlespec(object):
|
|
92
|
compression = attr.ib()
|
|
93
|
compression = attr.ib()
|
|
93
|
wirecompression = attr.ib()
|
|
94
|
wirecompression = attr.ib()
|
|
94
|
version = attr.ib()
|
|
95
|
version = attr.ib()
|
|
95
|
wireversion = attr.ib()
|
|
96
|
wireversion = attr.ib()
|
|
96
|
params = attr.ib()
|
|
97
|
params = attr.ib()
|
|
97
|
contentopts = attr.ib()
|
|
98
|
contentopts = attr.ib()
|
|
98
|
|
|
99
|
|
|
99
|
def parsebundlespec(repo, spec, strict=True):
|
|
100
|
def parsebundlespec(repo, spec, strict=True):
|
|
100
|
"""Parse a bundle string specification into parts.
|
|
101
|
"""Parse a bundle string specification into parts.
|
|
101
|
|
|
102
|
|
|
102
|
Bundle specifications denote a well-defined bundle/exchange format.
|
|
103
|
Bundle specifications denote a well-defined bundle/exchange format.
|
|
103
|
The content of a given specification should not change over time in
|
|
104
|
The content of a given specification should not change over time in
|
|
104
|
order to ensure that bundles produced by a newer version of Mercurial are
|
|
105
|
order to ensure that bundles produced by a newer version of Mercurial are
|
|
105
|
readable from an older version.
|
|
106
|
readable from an older version.
|
|
106
|
|
|
107
|
|
|
107
|
The string currently has the form:
|
|
108
|
The string currently has the form:
|
|
108
|
|
|
109
|
|
|
109
|
<compression>-<type>[;<parameter0>[;<parameter1>]]
|
|
110
|
<compression>-<type>[;<parameter0>[;<parameter1>]]
|
|
110
|
|
|
111
|
|
|
111
|
Where <compression> is one of the supported compression formats
|
|
112
|
Where <compression> is one of the supported compression formats
|
|
112
|
and <type> is (currently) a version string. A ";" can follow the type and
|
|
113
|
and <type> is (currently) a version string. A ";" can follow the type and
|
|
113
|
all text afterwards is interpreted as URI encoded, ";" delimited key=value
|
|
114
|
all text afterwards is interpreted as URI encoded, ";" delimited key=value
|
|
114
|
pairs.
|
|
115
|
pairs.
|
|
115
|
|
|
116
|
|
|
116
|
If ``strict`` is True (the default) <compression> is required. Otherwise,
|
|
117
|
If ``strict`` is True (the default) <compression> is required. Otherwise,
|
|
117
|
it is optional.
|
|
118
|
it is optional.
|
|
118
|
|
|
119
|
|
|
119
|
Returns a bundlespec object of (compression, version, parameters).
|
|
120
|
Returns a bundlespec object of (compression, version, parameters).
|
|
120
|
Compression will be ``None`` if not in strict mode and a compression isn't
|
|
121
|
Compression will be ``None`` if not in strict mode and a compression isn't
|
|
121
|
defined.
|
|
122
|
defined.
|
|
122
|
|
|
123
|
|
|
123
|
An ``InvalidBundleSpecification`` is raised when the specification is
|
|
124
|
An ``InvalidBundleSpecification`` is raised when the specification is
|
|
124
|
not syntactically well formed.
|
|
125
|
not syntactically well formed.
|
|
125
|
|
|
126
|
|
|
126
|
An ``UnsupportedBundleSpecification`` is raised when the compression or
|
|
127
|
An ``UnsupportedBundleSpecification`` is raised when the compression or
|
|
127
|
bundle type/version is not recognized.
|
|
128
|
bundle type/version is not recognized.
|
|
128
|
|
|
129
|
|
|
129
|
Note: this function will likely eventually return a more complex data
|
|
130
|
Note: this function will likely eventually return a more complex data
|
|
130
|
structure, including bundle2 part information.
|
|
131
|
structure, including bundle2 part information.
|
|
131
|
"""
|
|
132
|
"""
|
|
132
|
def parseparams(s):
|
|
133
|
def parseparams(s):
|
|
133
|
if ';' not in s:
|
|
134
|
if ';' not in s:
|
|
134
|
return s, {}
|
|
135
|
return s, {}
|
|
135
|
|
|
136
|
|
|
136
|
params = {}
|
|
137
|
params = {}
|
|
137
|
version, paramstr = s.split(';', 1)
|
|
138
|
version, paramstr = s.split(';', 1)
|
|
138
|
|
|
139
|
|
|
139
|
for p in paramstr.split(';'):
|
|
140
|
for p in paramstr.split(';'):
|
|
140
|
if '=' not in p:
|
|
141
|
if '=' not in p:
|
|
141
|
raise error.InvalidBundleSpecification(
|
|
142
|
raise error.InvalidBundleSpecification(
|
|
142
|
_('invalid bundle specification: '
|
|
143
|
_('invalid bundle specification: '
|
|
143
|
'missing "=" in parameter: %s') % p)
|
|
144
|
'missing "=" in parameter: %s') % p)
|
|
144
|
|
|
145
|
|
|
145
|
key, value = p.split('=', 1)
|
|
146
|
key, value = p.split('=', 1)
|
|
146
|
key = urlreq.unquote(key)
|
|
147
|
key = urlreq.unquote(key)
|
|
147
|
value = urlreq.unquote(value)
|
|
148
|
value = urlreq.unquote(value)
|
|
148
|
params[key] = value
|
|
149
|
params[key] = value
|
|
149
|
|
|
150
|
|
|
150
|
return version, params
|
|
151
|
return version, params
|
|
151
|
|
|
152
|
|
|
152
|
|
|
153
|
|
|
153
|
if strict and '-' not in spec:
|
|
154
|
if strict and '-' not in spec:
|
|
154
|
raise error.InvalidBundleSpecification(
|
|
155
|
raise error.InvalidBundleSpecification(
|
|
155
|
_('invalid bundle specification; '
|
|
156
|
_('invalid bundle specification; '
|
|
156
|
'must be prefixed with compression: %s') % spec)
|
|
157
|
'must be prefixed with compression: %s') % spec)
|
|
157
|
|
|
158
|
|
|
158
|
if '-' in spec:
|
|
159
|
if '-' in spec:
|
|
159
|
compression, version = spec.split('-', 1)
|
|
160
|
compression, version = spec.split('-', 1)
|
|
160
|
|
|
161
|
|
|
161
|
if compression not in util.compengines.supportedbundlenames:
|
|
162
|
if compression not in util.compengines.supportedbundlenames:
|
|
162
|
raise error.UnsupportedBundleSpecification(
|
|
163
|
raise error.UnsupportedBundleSpecification(
|
|
163
|
_('%s compression is not supported') % compression)
|
|
164
|
_('%s compression is not supported') % compression)
|
|
164
|
|
|
165
|
|
|
165
|
version, params = parseparams(version)
|
|
166
|
version, params = parseparams(version)
|
|
166
|
|
|
167
|
|
|
167
|
if version not in _bundlespeccgversions:
|
|
168
|
if version not in _bundlespeccgversions:
|
|
168
|
raise error.UnsupportedBundleSpecification(
|
|
169
|
raise error.UnsupportedBundleSpecification(
|
|
169
|
_('%s is not a recognized bundle version') % version)
|
|
170
|
_('%s is not a recognized bundle version') % version)
|
|
170
|
else:
|
|
171
|
else:
|
|
171
|
# Value could be just the compression or just the version, in which
|
|
172
|
# Value could be just the compression or just the version, in which
|
|
172
|
# case some defaults are assumed (but only when not in strict mode).
|
|
173
|
# case some defaults are assumed (but only when not in strict mode).
|
|
173
|
assert not strict
|
|
174
|
assert not strict
|
|
174
|
|
|
175
|
|
|
175
|
spec, params = parseparams(spec)
|
|
176
|
spec, params = parseparams(spec)
|
|
176
|
|
|
177
|
|
|
177
|
if spec in util.compengines.supportedbundlenames:
|
|
178
|
if spec in util.compengines.supportedbundlenames:
|
|
178
|
compression = spec
|
|
179
|
compression = spec
|
|
179
|
version = 'v1'
|
|
180
|
version = 'v1'
|
|
180
|
# Generaldelta repos require v2.
|
|
181
|
# Generaldelta repos require v2.
|
|
181
|
if 'generaldelta' in repo.requirements:
|
|
182
|
if 'generaldelta' in repo.requirements:
|
|
182
|
version = 'v2'
|
|
183
|
version = 'v2'
|
|
183
|
# Modern compression engines require v2.
|
|
184
|
# Modern compression engines require v2.
|
|
184
|
if compression not in _bundlespecv1compengines:
|
|
185
|
if compression not in _bundlespecv1compengines:
|
|
185
|
version = 'v2'
|
|
186
|
version = 'v2'
|
|
186
|
elif spec in _bundlespeccgversions:
|
|
187
|
elif spec in _bundlespeccgversions:
|
|
187
|
if spec == 'packed1':
|
|
188
|
if spec == 'packed1':
|
|
188
|
compression = 'none'
|
|
189
|
compression = 'none'
|
|
189
|
else:
|
|
190
|
else:
|
|
190
|
compression = 'bzip2'
|
|
191
|
compression = 'bzip2'
|
|
191
|
version = spec
|
|
192
|
version = spec
|
|
192
|
else:
|
|
193
|
else:
|
|
193
|
raise error.UnsupportedBundleSpecification(
|
|
194
|
raise error.UnsupportedBundleSpecification(
|
|
194
|
_('%s is not a recognized bundle specification') % spec)
|
|
195
|
_('%s is not a recognized bundle specification') % spec)
|
|
195
|
|
|
196
|
|
|
196
|
# Bundle version 1 only supports a known set of compression engines.
|
|
197
|
# Bundle version 1 only supports a known set of compression engines.
|
|
197
|
if version == 'v1' and compression not in _bundlespecv1compengines:
|
|
198
|
if version == 'v1' and compression not in _bundlespecv1compengines:
|
|
198
|
raise error.UnsupportedBundleSpecification(
|
|
199
|
raise error.UnsupportedBundleSpecification(
|
|
199
|
_('compression engine %s is not supported on v1 bundles') %
|
|
200
|
_('compression engine %s is not supported on v1 bundles') %
|
|
200
|
compression)
|
|
201
|
compression)
|
|
201
|
|
|
202
|
|
|
202
|
# The specification for packed1 can optionally declare the data formats
|
|
203
|
# The specification for packed1 can optionally declare the data formats
|
|
203
|
# required to apply it. If we see this metadata, compare against what the
|
|
204
|
# required to apply it. If we see this metadata, compare against what the
|
|
204
|
# repo supports and error if the bundle isn't compatible.
|
|
205
|
# repo supports and error if the bundle isn't compatible.
|
|
205
|
if version == 'packed1' and 'requirements' in params:
|
|
206
|
if version == 'packed1' and 'requirements' in params:
|
|
206
|
requirements = set(params['requirements'].split(','))
|
|
207
|
requirements = set(params['requirements'].split(','))
|
|
207
|
missingreqs = requirements - repo.supportedformats
|
|
208
|
missingreqs = requirements - repo.supportedformats
|
|
208
|
if missingreqs:
|
|
209
|
if missingreqs:
|
|
209
|
raise error.UnsupportedBundleSpecification(
|
|
210
|
raise error.UnsupportedBundleSpecification(
|
|
210
|
_('missing support for repository features: %s') %
|
|
211
|
_('missing support for repository features: %s') %
|
|
211
|
', '.join(sorted(missingreqs)))
|
|
212
|
', '.join(sorted(missingreqs)))
|
|
212
|
|
|
213
|
|
|
213
|
# Compute contentopts based on the version
|
|
214
|
# Compute contentopts based on the version
|
|
214
|
contentopts = _bundlespeccontentopts.get(version, {}).copy()
|
|
215
|
contentopts = _bundlespeccontentopts.get(version, {}).copy()
|
|
215
|
|
|
216
|
|
|
216
|
# Process the variants
|
|
217
|
# Process the variants
|
|
217
|
if "stream" in params and params["stream"] == "v2":
|
|
218
|
if "stream" in params and params["stream"] == "v2":
|
|
218
|
variant = _bundlespecvariants["streamv2"]
|
|
219
|
variant = _bundlespecvariants["streamv2"]
|
|
219
|
contentopts.update(variant)
|
|
220
|
contentopts.update(variant)
|
|
220
|
|
|
221
|
|
|
221
|
engine = util.compengines.forbundlename(compression)
|
|
222
|
engine = util.compengines.forbundlename(compression)
|
|
222
|
compression, wirecompression = engine.bundletype()
|
|
223
|
compression, wirecompression = engine.bundletype()
|
|
223
|
wireversion = _bundlespeccgversions[version]
|
|
224
|
wireversion = _bundlespeccgversions[version]
|
|
224
|
|
|
225
|
|
|
225
|
return bundlespec(compression, wirecompression, version, wireversion,
|
|
226
|
return bundlespec(compression, wirecompression, version, wireversion,
|
|
226
|
params, contentopts)
|
|
227
|
params, contentopts)
|
|
227
|
|
|
228
|
|
|
228
|
def readbundle(ui, fh, fname, vfs=None):
|
|
229
|
def readbundle(ui, fh, fname, vfs=None):
|
|
229
|
header = changegroup.readexactly(fh, 4)
|
|
230
|
header = changegroup.readexactly(fh, 4)
|
|
230
|
|
|
231
|
|
|
231
|
alg = None
|
|
232
|
alg = None
|
|
232
|
if not fname:
|
|
233
|
if not fname:
|
|
233
|
fname = "stream"
|
|
234
|
fname = "stream"
|
|
234
|
if not header.startswith('HG') and header.startswith('\0'):
|
|
235
|
if not header.startswith('HG') and header.startswith('\0'):
|
|
235
|
fh = changegroup.headerlessfixup(fh, header)
|
|
236
|
fh = changegroup.headerlessfixup(fh, header)
|
|
236
|
header = "HG10"
|
|
237
|
header = "HG10"
|
|
237
|
alg = 'UN'
|
|
238
|
alg = 'UN'
|
|
238
|
elif vfs:
|
|
239
|
elif vfs:
|
|
239
|
fname = vfs.join(fname)
|
|
240
|
fname = vfs.join(fname)
|
|
240
|
|
|
241
|
|
|
241
|
magic, version = header[0:2], header[2:4]
|
|
242
|
magic, version = header[0:2], header[2:4]
|
|
242
|
|
|
243
|
|
|
243
|
if magic != 'HG':
|
|
244
|
if magic != 'HG':
|
|
244
|
raise error.Abort(_('%s: not a Mercurial bundle') % fname)
|
|
245
|
raise error.Abort(_('%s: not a Mercurial bundle') % fname)
|
|
245
|
if version == '10':
|
|
246
|
if version == '10':
|
|
246
|
if alg is None:
|
|
247
|
if alg is None:
|
|
247
|
alg = changegroup.readexactly(fh, 2)
|
|
248
|
alg = changegroup.readexactly(fh, 2)
|
|
248
|
return changegroup.cg1unpacker(fh, alg)
|
|
249
|
return changegroup.cg1unpacker(fh, alg)
|
|
249
|
elif version.startswith('2'):
|
|
250
|
elif version.startswith('2'):
|
|
250
|
return bundle2.getunbundler(ui, fh, magicstring=magic + version)
|
|
251
|
return bundle2.getunbundler(ui, fh, magicstring=magic + version)
|
|
251
|
elif version == 'S1':
|
|
252
|
elif version == 'S1':
|
|
252
|
return streamclone.streamcloneapplier(fh)
|
|
253
|
return streamclone.streamcloneapplier(fh)
|
|
253
|
else:
|
|
254
|
else:
|
|
254
|
raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
|
|
255
|
raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
|
|
255
|
|
|
256
|
|
|
256
|
def getbundlespec(ui, fh):
|
|
257
|
def getbundlespec(ui, fh):
|
|
257
|
"""Infer the bundlespec from a bundle file handle.
|
|
258
|
"""Infer the bundlespec from a bundle file handle.
|
|
258
|
|
|
259
|
|
|
259
|
The input file handle is seeked and the original seek position is not
|
|
260
|
The input file handle is seeked and the original seek position is not
|
|
260
|
restored.
|
|
261
|
restored.
|
|
261
|
"""
|
|
262
|
"""
|
|
262
|
def speccompression(alg):
|
|
263
|
def speccompression(alg):
|
|
263
|
try:
|
|
264
|
try:
|
|
264
|
return util.compengines.forbundletype(alg).bundletype()[0]
|
|
265
|
return util.compengines.forbundletype(alg).bundletype()[0]
|
|
265
|
except KeyError:
|
|
266
|
except KeyError:
|
|
266
|
return None
|
|
267
|
return None
|
|
267
|
|
|
268
|
|
|
268
|
b = readbundle(ui, fh, None)
|
|
269
|
b = readbundle(ui, fh, None)
|
|
269
|
if isinstance(b, changegroup.cg1unpacker):
|
|
270
|
if isinstance(b, changegroup.cg1unpacker):
|
|
270
|
alg = b._type
|
|
271
|
alg = b._type
|
|
271
|
if alg == '_truncatedBZ':
|
|
272
|
if alg == '_truncatedBZ':
|
|
272
|
alg = 'BZ'
|
|
273
|
alg = 'BZ'
|
|
273
|
comp = speccompression(alg)
|
|
274
|
comp = speccompression(alg)
|
|
274
|
if not comp:
|
|
275
|
if not comp:
|
|
275
|
raise error.Abort(_('unknown compression algorithm: %s') % alg)
|
|
276
|
raise error.Abort(_('unknown compression algorithm: %s') % alg)
|
|
276
|
return '%s-v1' % comp
|
|
277
|
return '%s-v1' % comp
|
|
277
|
elif isinstance(b, bundle2.unbundle20):
|
|
278
|
elif isinstance(b, bundle2.unbundle20):
|
|
278
|
if 'Compression' in b.params:
|
|
279
|
if 'Compression' in b.params:
|
|
279
|
comp = speccompression(b.params['Compression'])
|
|
280
|
comp = speccompression(b.params['Compression'])
|
|
280
|
if not comp:
|
|
281
|
if not comp:
|
|
281
|
raise error.Abort(_('unknown compression algorithm: %s') % comp)
|
|
282
|
raise error.Abort(_('unknown compression algorithm: %s') % comp)
|
|
282
|
else:
|
|
283
|
else:
|
|
283
|
comp = 'none'
|
|
284
|
comp = 'none'
|
|
284
|
|
|
285
|
|
|
285
|
version = None
|
|
286
|
version = None
|
|
286
|
for part in b.iterparts():
|
|
287
|
for part in b.iterparts():
|
|
287
|
if part.type == 'changegroup':
|
|
288
|
if part.type == 'changegroup':
|
|
288
|
version = part.params['version']
|
|
289
|
version = part.params['version']
|
|
289
|
if version in ('01', '02'):
|
|
290
|
if version in ('01', '02'):
|
|
290
|
version = 'v2'
|
|
291
|
version = 'v2'
|
|
291
|
else:
|
|
292
|
else:
|
|
292
|
raise error.Abort(_('changegroup version %s does not have '
|
|
293
|
raise error.Abort(_('changegroup version %s does not have '
|
|
293
|
'a known bundlespec') % version,
|
|
294
|
'a known bundlespec') % version,
|
|
294
|
hint=_('try upgrading your Mercurial '
|
|
295
|
hint=_('try upgrading your Mercurial '
|
|
295
|
'client'))
|
|
296
|
'client'))
|
|
296
|
elif part.type == 'stream2' and version is None:
|
|
297
|
elif part.type == 'stream2' and version is None:
|
|
297
|
# A stream2 part requires to be part of a v2 bundle
|
|
298
|
# A stream2 part requires to be part of a v2 bundle
|
|
298
|
version = "v2"
|
|
299
|
version = "v2"
|
|
299
|
requirements = urlreq.unquote(part.params['requirements'])
|
|
300
|
requirements = urlreq.unquote(part.params['requirements'])
|
|
300
|
splitted = requirements.split()
|
|
301
|
splitted = requirements.split()
|
|
301
|
params = bundle2._formatrequirementsparams(splitted)
|
|
302
|
params = bundle2._formatrequirementsparams(splitted)
|
|
302
|
return 'none-v2;stream=v2;%s' % params
|
|
303
|
return 'none-v2;stream=v2;%s' % params
|
|
303
|
|
|
304
|
|
|
304
|
if not version:
|
|
305
|
if not version:
|
|
305
|
raise error.Abort(_('could not identify changegroup version in '
|
|
306
|
raise error.Abort(_('could not identify changegroup version in '
|
|
306
|
'bundle'))
|
|
307
|
'bundle'))
|
|
307
|
|
|
308
|
|
|
308
|
return '%s-%s' % (comp, version)
|
|
309
|
return '%s-%s' % (comp, version)
|
|
309
|
elif isinstance(b, streamclone.streamcloneapplier):
|
|
310
|
elif isinstance(b, streamclone.streamcloneapplier):
|
|
310
|
requirements = streamclone.readbundle1header(fh)[2]
|
|
311
|
requirements = streamclone.readbundle1header(fh)[2]
|
|
311
|
formatted = bundle2._formatrequirementsparams(requirements)
|
|
312
|
formatted = bundle2._formatrequirementsparams(requirements)
|
|
312
|
return 'none-packed1;%s' % formatted
|
|
313
|
return 'none-packed1;%s' % formatted
|
|
313
|
else:
|
|
314
|
else:
|
|
314
|
raise error.Abort(_('unknown bundle type: %s') % b)
|
|
315
|
raise error.Abort(_('unknown bundle type: %s') % b)
|
|
315
|
|
|
316
|
|
|
316
|
def _computeoutgoing(repo, heads, common):
|
|
317
|
def _computeoutgoing(repo, heads, common):
|
|
317
|
"""Computes which revs are outgoing given a set of common
|
|
318
|
"""Computes which revs are outgoing given a set of common
|
|
318
|
and a set of heads.
|
|
319
|
and a set of heads.
|
|
319
|
|
|
320
|
|
|
320
|
This is a separate function so extensions can have access to
|
|
321
|
This is a separate function so extensions can have access to
|
|
321
|
the logic.
|
|
322
|
the logic.
|
|
322
|
|
|
323
|
|
|
323
|
Returns a discovery.outgoing object.
|
|
324
|
Returns a discovery.outgoing object.
|
|
324
|
"""
|
|
325
|
"""
|
|
325
|
cl = repo.changelog
|
|
326
|
cl = repo.changelog
|
|
326
|
if common:
|
|
327
|
if common:
|
|
327
|
hasnode = cl.hasnode
|
|
328
|
hasnode = cl.hasnode
|
|
328
|
common = [n for n in common if hasnode(n)]
|
|
329
|
common = [n for n in common if hasnode(n)]
|
|
329
|
else:
|
|
330
|
else:
|
|
330
|
common = [nullid]
|
|
331
|
common = [nullid]
|
|
331
|
if not heads:
|
|
332
|
if not heads:
|
|
332
|
heads = cl.heads()
|
|
333
|
heads = cl.heads()
|
|
333
|
return discovery.outgoing(repo, common, heads)
|
|
334
|
return discovery.outgoing(repo, common, heads)
|
|
334
|
|
|
335
|
|
|
335
|
def _forcebundle1(op):
|
|
336
|
def _forcebundle1(op):
|
|
336
|
"""return true if a pull/push must use bundle1
|
|
337
|
"""return true if a pull/push must use bundle1
|
|
337
|
|
|
338
|
|
|
338
|
This function is used to allow testing of the older bundle version"""
|
|
339
|
This function is used to allow testing of the older bundle version"""
|
|
339
|
ui = op.repo.ui
|
|
340
|
ui = op.repo.ui
|
|
340
|
# The goal is this config is to allow developer to choose the bundle
|
|
341
|
# The goal is this config is to allow developer to choose the bundle
|
|
341
|
# version used during exchanged. This is especially handy during test.
|
|
342
|
# version used during exchanged. This is especially handy during test.
|
|
342
|
# Value is a list of bundle version to be picked from, highest version
|
|
343
|
# Value is a list of bundle version to be picked from, highest version
|
|
343
|
# should be used.
|
|
344
|
# should be used.
|
|
344
|
#
|
|
345
|
#
|
|
345
|
# developer config: devel.legacy.exchange
|
|
346
|
# developer config: devel.legacy.exchange
|
|
346
|
exchange = ui.configlist('devel', 'legacy.exchange')
|
|
347
|
exchange = ui.configlist('devel', 'legacy.exchange')
|
|
347
|
forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
|
|
348
|
forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
|
|
348
|
return forcebundle1 or not op.remote.capable('bundle2')
|
|
349
|
return forcebundle1 or not op.remote.capable('bundle2')
|
|
349
|
|
|
350
|
|
|
350
|
class pushoperation(object):
|
|
351
|
class pushoperation(object):
|
|
351
|
"""A object that represent a single push operation
|
|
352
|
"""A object that represent a single push operation
|
|
352
|
|
|
353
|
|
|
353
|
Its purpose is to carry push related state and very common operations.
|
|
354
|
Its purpose is to carry push related state and very common operations.
|
|
354
|
|
|
355
|
|
|
355
|
A new pushoperation should be created at the beginning of each push and
|
|
356
|
A new pushoperation should be created at the beginning of each push and
|
|
356
|
discarded afterward.
|
|
357
|
discarded afterward.
|
|
357
|
"""
|
|
358
|
"""
|
|
358
|
|
|
359
|
|
|
359
|
def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
|
|
360
|
def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
|
|
360
|
bookmarks=(), pushvars=None):
|
|
361
|
bookmarks=(), pushvars=None):
|
|
361
|
# repo we push from
|
|
362
|
# repo we push from
|
|
362
|
self.repo = repo
|
|
363
|
self.repo = repo
|
|
363
|
self.ui = repo.ui
|
|
364
|
self.ui = repo.ui
|
|
364
|
# repo we push to
|
|
365
|
# repo we push to
|
|
365
|
self.remote = remote
|
|
366
|
self.remote = remote
|
|
366
|
# force option provided
|
|
367
|
# force option provided
|
|
367
|
self.force = force
|
|
368
|
self.force = force
|
|
368
|
# revs to be pushed (None is "all")
|
|
369
|
# revs to be pushed (None is "all")
|
|
369
|
self.revs = revs
|
|
370
|
self.revs = revs
|
|
370
|
# bookmark explicitly pushed
|
|
371
|
# bookmark explicitly pushed
|
|
371
|
self.bookmarks = bookmarks
|
|
372
|
self.bookmarks = bookmarks
|
|
372
|
# allow push of new branch
|
|
373
|
# allow push of new branch
|
|
373
|
self.newbranch = newbranch
|
|
374
|
self.newbranch = newbranch
|
|
374
|
# step already performed
|
|
375
|
# step already performed
|
|
375
|
# (used to check what steps have been already performed through bundle2)
|
|
376
|
# (used to check what steps have been already performed through bundle2)
|
|
376
|
self.stepsdone = set()
|
|
377
|
self.stepsdone = set()
|
|
377
|
# Integer version of the changegroup push result
|
|
378
|
# Integer version of the changegroup push result
|
|
378
|
# - None means nothing to push
|
|
379
|
# - None means nothing to push
|
|
379
|
# - 0 means HTTP error
|
|
380
|
# - 0 means HTTP error
|
|
380
|
# - 1 means we pushed and remote head count is unchanged *or*
|
|
381
|
# - 1 means we pushed and remote head count is unchanged *or*
|
|
381
|
# we have outgoing changesets but refused to push
|
|
382
|
# we have outgoing changesets but refused to push
|
|
382
|
# - other values as described by addchangegroup()
|
|
383
|
# - other values as described by addchangegroup()
|
|
383
|
self.cgresult = None
|
|
384
|
self.cgresult = None
|
|
384
|
# Boolean value for the bookmark push
|
|
385
|
# Boolean value for the bookmark push
|
|
385
|
self.bkresult = None
|
|
386
|
self.bkresult = None
|
|
386
|
# discover.outgoing object (contains common and outgoing data)
|
|
387
|
# discover.outgoing object (contains common and outgoing data)
|
|
387
|
self.outgoing = None
|
|
388
|
self.outgoing = None
|
|
388
|
# all remote topological heads before the push
|
|
389
|
# all remote topological heads before the push
|
|
389
|
self.remoteheads = None
|
|
390
|
self.remoteheads = None
|
|
390
|
# Details of the remote branch pre and post push
|
|
391
|
# Details of the remote branch pre and post push
|
|
391
|
#
|
|
392
|
#
|
|
392
|
# mapping: {'branch': ([remoteheads],
|
|
393
|
# mapping: {'branch': ([remoteheads],
|
|
393
|
# [newheads],
|
|
394
|
# [newheads],
|
|
394
|
# [unsyncedheads],
|
|
395
|
# [unsyncedheads],
|
|
395
|
# [discardedheads])}
|
|
396
|
# [discardedheads])}
|
|
396
|
# - branch: the branch name
|
|
397
|
# - branch: the branch name
|
|
397
|
# - remoteheads: the list of remote heads known locally
|
|
398
|
# - remoteheads: the list of remote heads known locally
|
|
398
|
# None if the branch is new
|
|
399
|
# None if the branch is new
|
|
399
|
# - newheads: the new remote heads (known locally) with outgoing pushed
|
|
400
|
# - newheads: the new remote heads (known locally) with outgoing pushed
|
|
400
|
# - unsyncedheads: the list of remote heads unknown locally.
|
|
401
|
# - unsyncedheads: the list of remote heads unknown locally.
|
|
401
|
# - discardedheads: the list of remote heads made obsolete by the push
|
|
402
|
# - discardedheads: the list of remote heads made obsolete by the push
|
|
402
|
self.pushbranchmap = None
|
|
403
|
self.pushbranchmap = None
|
|
403
|
# testable as a boolean indicating if any nodes are missing locally.
|
|
404
|
# testable as a boolean indicating if any nodes are missing locally.
|
|
404
|
self.incoming = None
|
|
405
|
self.incoming = None
|
|
405
|
# summary of the remote phase situation
|
|
406
|
# summary of the remote phase situation
|
|
406
|
self.remotephases = None
|
|
407
|
self.remotephases = None
|
|
407
|
# phases changes that must be pushed along side the changesets
|
|
408
|
# phases changes that must be pushed along side the changesets
|
|
408
|
self.outdatedphases = None
|
|
409
|
self.outdatedphases = None
|
|
409
|
# phases changes that must be pushed if changeset push fails
|
|
410
|
# phases changes that must be pushed if changeset push fails
|
|
410
|
self.fallbackoutdatedphases = None
|
|
411
|
self.fallbackoutdatedphases = None
|
|
411
|
# outgoing obsmarkers
|
|
412
|
# outgoing obsmarkers
|
|
412
|
self.outobsmarkers = set()
|
|
413
|
self.outobsmarkers = set()
|
|
413
|
# outgoing bookmarks
|
|
414
|
# outgoing bookmarks
|
|
414
|
self.outbookmarks = []
|
|
415
|
self.outbookmarks = []
|
|
415
|
# transaction manager
|
|
416
|
# transaction manager
|
|
416
|
self.trmanager = None
|
|
417
|
self.trmanager = None
|
|
417
|
# map { pushkey partid -> callback handling failure}
|
|
418
|
# map { pushkey partid -> callback handling failure}
|
|
418
|
# used to handle exception from mandatory pushkey part failure
|
|
419
|
# used to handle exception from mandatory pushkey part failure
|
|
419
|
self.pkfailcb = {}
|
|
420
|
self.pkfailcb = {}
|
|
420
|
# an iterable of pushvars or None
|
|
421
|
# an iterable of pushvars or None
|
|
421
|
self.pushvars = pushvars
|
|
422
|
self.pushvars = pushvars
|
|
422
|
|
|
423
|
|
|
423
|
@util.propertycache
|
|
424
|
@util.propertycache
|
|
424
|
def futureheads(self):
|
|
425
|
def futureheads(self):
|
|
425
|
"""future remote heads if the changeset push succeeds"""
|
|
426
|
"""future remote heads if the changeset push succeeds"""
|
|
426
|
return self.outgoing.missingheads
|
|
427
|
return self.outgoing.missingheads
|
|
427
|
|
|
428
|
|
|
428
|
@util.propertycache
|
|
429
|
@util.propertycache
|
|
429
|
def fallbackheads(self):
|
|
430
|
def fallbackheads(self):
|
|
430
|
"""future remote heads if the changeset push fails"""
|
|
431
|
"""future remote heads if the changeset push fails"""
|
|
431
|
if self.revs is None:
|
|
432
|
if self.revs is None:
|
|
432
|
# not target to push, all common are relevant
|
|
433
|
# not target to push, all common are relevant
|
|
433
|
return self.outgoing.commonheads
|
|
434
|
return self.outgoing.commonheads
|
|
434
|
unfi = self.repo.unfiltered()
|
|
435
|
unfi = self.repo.unfiltered()
|
|
435
|
# I want cheads = heads(::missingheads and ::commonheads)
|
|
436
|
# I want cheads = heads(::missingheads and ::commonheads)
|
|
436
|
# (missingheads is revs with secret changeset filtered out)
|
|
437
|
# (missingheads is revs with secret changeset filtered out)
|
|
437
|
#
|
|
438
|
#
|
|
438
|
# This can be expressed as:
|
|
439
|
# This can be expressed as:
|
|
439
|
# cheads = ( (missingheads and ::commonheads)
|
|
440
|
# cheads = ( (missingheads and ::commonheads)
|
|
440
|
# + (commonheads and ::missingheads))"
|
|
441
|
# + (commonheads and ::missingheads))"
|
|
441
|
# )
|
|
442
|
# )
|
|
442
|
#
|
|
443
|
#
|
|
443
|
# while trying to push we already computed the following:
|
|
444
|
# while trying to push we already computed the following:
|
|
444
|
# common = (::commonheads)
|
|
445
|
# common = (::commonheads)
|
|
445
|
# missing = ((commonheads::missingheads) - commonheads)
|
|
446
|
# missing = ((commonheads::missingheads) - commonheads)
|
|
446
|
#
|
|
447
|
#
|
|
447
|
# We can pick:
|
|
448
|
# We can pick:
|
|
448
|
# * missingheads part of common (::commonheads)
|
|
449
|
# * missingheads part of common (::commonheads)
|
|
449
|
common = self.outgoing.common
|
|
450
|
common = self.outgoing.common
|
|
450
|
nm = self.repo.changelog.nodemap
|
|
451
|
nm = self.repo.changelog.nodemap
|
|
451
|
cheads = [node for node in self.revs if nm[node] in common]
|
|
452
|
cheads = [node for node in self.revs if nm[node] in common]
|
|
452
|
# and
|
|
453
|
# and
|
|
453
|
# * commonheads parents on missing
|
|
454
|
# * commonheads parents on missing
|
|
454
|
revset = unfi.set('%ln and parents(roots(%ln))',
|
|
455
|
revset = unfi.set('%ln and parents(roots(%ln))',
|
|
455
|
self.outgoing.commonheads,
|
|
456
|
self.outgoing.commonheads,
|
|
456
|
self.outgoing.missing)
|
|
457
|
self.outgoing.missing)
|
|
457
|
cheads.extend(c.node() for c in revset)
|
|
458
|
cheads.extend(c.node() for c in revset)
|
|
458
|
return cheads
|
|
459
|
return cheads
|
|
459
|
|
|
460
|
|
|
460
|
@property
|
|
461
|
@property
|
|
461
|
def commonheads(self):
|
|
462
|
def commonheads(self):
|
|
462
|
"""set of all common heads after changeset bundle push"""
|
|
463
|
"""set of all common heads after changeset bundle push"""
|
|
463
|
if self.cgresult:
|
|
464
|
if self.cgresult:
|
|
464
|
return self.futureheads
|
|
465
|
return self.futureheads
|
|
465
|
else:
|
|
466
|
else:
|
|
466
|
return self.fallbackheads
|
|
467
|
return self.fallbackheads
|
|
467
|
|
|
468
|
|
|
468
|
# mapping of message used when pushing bookmark
|
|
469
|
# mapping of message used when pushing bookmark
|
|
469
|
bookmsgmap = {'update': (_("updating bookmark %s\n"),
|
|
470
|
bookmsgmap = {'update': (_("updating bookmark %s\n"),
|
|
470
|
_('updating bookmark %s failed!\n')),
|
|
471
|
_('updating bookmark %s failed!\n')),
|
|
471
|
'export': (_("exporting bookmark %s\n"),
|
|
472
|
'export': (_("exporting bookmark %s\n"),
|
|
472
|
_('exporting bookmark %s failed!\n')),
|
|
473
|
_('exporting bookmark %s failed!\n')),
|
|
473
|
'delete': (_("deleting remote bookmark %s\n"),
|
|
474
|
'delete': (_("deleting remote bookmark %s\n"),
|
|
474
|
_('deleting remote bookmark %s failed!\n')),
|
|
475
|
_('deleting remote bookmark %s failed!\n')),
|
|
475
|
}
|
|
476
|
}
|
|
476
|
|
|
477
|
|
|
477
|
|
|
478
|
|
|
478
|
def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
|
|
479
|
def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
|
|
479
|
opargs=None):
|
|
480
|
opargs=None):
|
|
480
|
'''Push outgoing changesets (limited by revs) from a local
|
|
481
|
'''Push outgoing changesets (limited by revs) from a local
|
|
481
|
repository to remote. Return an integer:
|
|
482
|
repository to remote. Return an integer:
|
|
482
|
- None means nothing to push
|
|
483
|
- None means nothing to push
|
|
483
|
- 0 means HTTP error
|
|
484
|
- 0 means HTTP error
|
|
484
|
- 1 means we pushed and remote head count is unchanged *or*
|
|
485
|
- 1 means we pushed and remote head count is unchanged *or*
|
|
485
|
we have outgoing changesets but refused to push
|
|
486
|
we have outgoing changesets but refused to push
|
|
486
|
- other values as described by addchangegroup()
|
|
487
|
- other values as described by addchangegroup()
|
|
487
|
'''
|
|
488
|
'''
|
|
488
|
if opargs is None:
|
|
489
|
if opargs is None:
|
|
489
|
opargs = {}
|
|
490
|
opargs = {}
|
|
490
|
pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
|
|
491
|
pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
|
|
491
|
**pycompat.strkwargs(opargs))
|
|
492
|
**pycompat.strkwargs(opargs))
|
|
492
|
if pushop.remote.local():
|
|
493
|
if pushop.remote.local():
|
|
493
|
missing = (set(pushop.repo.requirements)
|
|
494
|
missing = (set(pushop.repo.requirements)
|
|
494
|
- pushop.remote.local().supported)
|
|
495
|
- pushop.remote.local().supported)
|
|
495
|
if missing:
|
|
496
|
if missing:
|
|
496
|
msg = _("required features are not"
|
|
497
|
msg = _("required features are not"
|
|
497
|
" supported in the destination:"
|
|
498
|
" supported in the destination:"
|
|
498
|
" %s") % (', '.join(sorted(missing)))
|
|
499
|
" %s") % (', '.join(sorted(missing)))
|
|
499
|
raise error.Abort(msg)
|
|
500
|
raise error.Abort(msg)
|
|
500
|
|
|
501
|
|
|
501
|
if not pushop.remote.canpush():
|
|
502
|
if not pushop.remote.canpush():
|
|
502
|
raise error.Abort(_("destination does not support push"))
|
|
503
|
raise error.Abort(_("destination does not support push"))
|
|
503
|
|
|
504
|
|
|
504
|
if not pushop.remote.capable('unbundle'):
|
|
505
|
if not pushop.remote.capable('unbundle'):
|
|
505
|
raise error.Abort(_('cannot push: destination does not support the '
|
|
506
|
raise error.Abort(_('cannot push: destination does not support the '
|
|
506
|
'unbundle wire protocol command'))
|
|
507
|
'unbundle wire protocol command'))
|
|
507
|
|
|
508
|
|
|
508
|
# get lock as we might write phase data
|
|
509
|
# get lock as we might write phase data
|
|
509
|
wlock = lock = None
|
|
510
|
wlock = lock = None
|
|
510
|
try:
|
|
511
|
try:
|
|
511
|
# bundle2 push may receive a reply bundle touching bookmarks or other
|
|
512
|
# bundle2 push may receive a reply bundle touching bookmarks or other
|
|
512
|
# things requiring the wlock. Take it now to ensure proper ordering.
|
|
513
|
# things requiring the wlock. Take it now to ensure proper ordering.
|
|
513
|
maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
|
|
514
|
maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
|
|
514
|
if (not _forcebundle1(pushop)) and maypushback:
|
|
515
|
if (not _forcebundle1(pushop)) and maypushback:
|
|
515
|
wlock = pushop.repo.wlock()
|
|
516
|
wlock = pushop.repo.wlock()
|
|
516
|
lock = pushop.repo.lock()
|
|
517
|
lock = pushop.repo.lock()
|
|
517
|
pushop.trmanager = transactionmanager(pushop.repo,
|
|
518
|
pushop.trmanager = transactionmanager(pushop.repo,
|
|
518
|
'push-response',
|
|
519
|
'push-response',
|
|
519
|
pushop.remote.url())
|
|
520
|
pushop.remote.url())
|
|
520
|
except error.LockUnavailable as err:
|
|
521
|
except error.LockUnavailable as err:
|
|
521
|
# source repo cannot be locked.
|
|
522
|
# source repo cannot be locked.
|
|
522
|
# We do not abort the push, but just disable the local phase
|
|
523
|
# We do not abort the push, but just disable the local phase
|
|
523
|
# synchronisation.
|
|
524
|
# synchronisation.
|
|
524
|
msg = 'cannot lock source repository: %s\n' % err
|
|
525
|
msg = 'cannot lock source repository: %s\n' % err
|
|
525
|
pushop.ui.debug(msg)
|
|
526
|
pushop.ui.debug(msg)
|
|
526
|
|
|
527
|
|
|
527
|
with wlock or util.nullcontextmanager(), \
|
|
528
|
with wlock or util.nullcontextmanager(), \
|
|
528
|
lock or util.nullcontextmanager(), \
|
|
529
|
lock or util.nullcontextmanager(), \
|
|
529
|
pushop.trmanager or util.nullcontextmanager():
|
|
530
|
pushop.trmanager or util.nullcontextmanager():
|
|
530
|
pushop.repo.checkpush(pushop)
|
|
531
|
pushop.repo.checkpush(pushop)
|
|
531
|
_pushdiscovery(pushop)
|
|
532
|
_pushdiscovery(pushop)
|
|
532
|
if not _forcebundle1(pushop):
|
|
533
|
if not _forcebundle1(pushop):
|
|
533
|
_pushbundle2(pushop)
|
|
534
|
_pushbundle2(pushop)
|
|
534
|
_pushchangeset(pushop)
|
|
535
|
_pushchangeset(pushop)
|
|
535
|
_pushsyncphase(pushop)
|
|
536
|
_pushsyncphase(pushop)
|
|
536
|
_pushobsolete(pushop)
|
|
537
|
_pushobsolete(pushop)
|
|
537
|
_pushbookmark(pushop)
|
|
538
|
_pushbookmark(pushop)
|
|
538
|
|
|
539
|
|
|
539
|
if repo.ui.configbool('experimental', 'remotenames'):
|
|
540
|
if repo.ui.configbool('experimental', 'remotenames'):
|
|
540
|
logexchange.pullremotenames(repo, remote)
|
|
541
|
logexchange.pullremotenames(repo, remote)
|
|
541
|
|
|
542
|
|
|
542
|
return pushop
|
|
543
|
return pushop
|
|
543
|
|
|
544
|
|
|
544
|
# list of steps to perform discovery before push
|
|
545
|
# list of steps to perform discovery before push
|
|
545
|
pushdiscoveryorder = []
|
|
546
|
pushdiscoveryorder = []
|
|
546
|
|
|
547
|
|
|
547
|
# Mapping between step name and function
|
|
548
|
# Mapping between step name and function
|
|
548
|
#
|
|
549
|
#
|
|
549
|
# This exists to help extensions wrap steps if necessary
|
|
550
|
# This exists to help extensions wrap steps if necessary
|
|
550
|
pushdiscoverymapping = {}
|
|
551
|
pushdiscoverymapping = {}
|
|
551
|
|
|
552
|
|
|
552
|
def pushdiscovery(stepname):
|
|
553
|
def pushdiscovery(stepname):
|
|
553
|
"""decorator for function performing discovery before push
|
|
554
|
"""decorator for function performing discovery before push
|
|
554
|
|
|
555
|
|
|
555
|
The function is added to the step -> function mapping and appended to the
|
|
556
|
The function is added to the step -> function mapping and appended to the
|
|
556
|
list of steps. Beware that decorated function will be added in order (this
|
|
557
|
list of steps. Beware that decorated function will be added in order (this
|
|
557
|
may matter).
|
|
558
|
may matter).
|
|
558
|
|
|
559
|
|
|
559
|
You can only use this decorator for a new step, if you want to wrap a step
|
|
560
|
You can only use this decorator for a new step, if you want to wrap a step
|
|
560
|
from an extension, change the pushdiscovery dictionary directly."""
|
|
561
|
from an extension, change the pushdiscovery dictionary directly."""
|
|
561
|
def dec(func):
|
|
562
|
def dec(func):
|
|
562
|
assert stepname not in pushdiscoverymapping
|
|
563
|
assert stepname not in pushdiscoverymapping
|
|
563
|
pushdiscoverymapping[stepname] = func
|
|
564
|
pushdiscoverymapping[stepname] = func
|
|
564
|
pushdiscoveryorder.append(stepname)
|
|
565
|
pushdiscoveryorder.append(stepname)
|
|
565
|
return func
|
|
566
|
return func
|
|
566
|
return dec
|
|
567
|
return dec
|
|
567
|
|
|
568
|
|
|
568
|
def _pushdiscovery(pushop):
|
|
569
|
def _pushdiscovery(pushop):
|
|
569
|
"""Run all discovery steps"""
|
|
570
|
"""Run all discovery steps"""
|
|
570
|
for stepname in pushdiscoveryorder:
|
|
571
|
for stepname in pushdiscoveryorder:
|
|
571
|
step = pushdiscoverymapping[stepname]
|
|
572
|
step = pushdiscoverymapping[stepname]
|
|
572
|
step(pushop)
|
|
573
|
step(pushop)
|
|
573
|
|
|
574
|
|
|
574
|
@pushdiscovery('changeset')
|
|
575
|
@pushdiscovery('changeset')
|
|
575
|
def _pushdiscoverychangeset(pushop):
|
|
576
|
def _pushdiscoverychangeset(pushop):
|
|
576
|
"""discover the changeset that need to be pushed"""
|
|
577
|
"""discover the changeset that need to be pushed"""
|
|
577
|
fci = discovery.findcommonincoming
|
|
578
|
fci = discovery.findcommonincoming
|
|
578
|
if pushop.revs:
|
|
579
|
if pushop.revs:
|
|
579
|
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
|
|
580
|
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
|
|
580
|
ancestorsof=pushop.revs)
|
|
581
|
ancestorsof=pushop.revs)
|
|
581
|
else:
|
|
582
|
else:
|
|
582
|
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
|
|
583
|
commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
|
|
583
|
common, inc, remoteheads = commoninc
|
|
584
|
common, inc, remoteheads = commoninc
|
|
584
|
fco = discovery.findcommonoutgoing
|
|
585
|
fco = discovery.findcommonoutgoing
|
|
585
|
outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
|
|
586
|
outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
|
|
586
|
commoninc=commoninc, force=pushop.force)
|
|
587
|
commoninc=commoninc, force=pushop.force)
|
|
587
|
pushop.outgoing = outgoing
|
|
588
|
pushop.outgoing = outgoing
|
|
588
|
pushop.remoteheads = remoteheads
|
|
589
|
pushop.remoteheads = remoteheads
|
|
589
|
pushop.incoming = inc
|
|
590
|
pushop.incoming = inc
|
|
590
|
|
|
591
|
|
|
591
|
@pushdiscovery('phase')
|
|
592
|
@pushdiscovery('phase')
|
|
592
|
def _pushdiscoveryphase(pushop):
|
|
593
|
def _pushdiscoveryphase(pushop):
|
|
593
|
"""discover the phase that needs to be pushed
|
|
594
|
"""discover the phase that needs to be pushed
|
|
594
|
|
|
595
|
|
|
595
|
(computed for both success and failure case for changesets push)"""
|
|
596
|
(computed for both success and failure case for changesets push)"""
|
|
596
|
outgoing = pushop.outgoing
|
|
597
|
outgoing = pushop.outgoing
|
|
597
|
unfi = pushop.repo.unfiltered()
|
|
598
|
unfi = pushop.repo.unfiltered()
|
|
598
|
remotephases = listkeys(pushop.remote, 'phases')
|
|
599
|
remotephases = listkeys(pushop.remote, 'phases')
|
|
599
|
|
|
600
|
|
|
600
|
if (pushop.ui.configbool('ui', '_usedassubrepo')
|
|
601
|
if (pushop.ui.configbool('ui', '_usedassubrepo')
|
|
601
|
and remotephases # server supports phases
|
|
602
|
and remotephases # server supports phases
|
|
602
|
and not pushop.outgoing.missing # no changesets to be pushed
|
|
603
|
and not pushop.outgoing.missing # no changesets to be pushed
|
|
603
|
and remotephases.get('publishing', False)):
|
|
604
|
and remotephases.get('publishing', False)):
|
|
604
|
# When:
|
|
605
|
# When:
|
|
605
|
# - this is a subrepo push
|
|
606
|
# - this is a subrepo push
|
|
606
|
# - and remote support phase
|
|
607
|
# - and remote support phase
|
|
607
|
# - and no changeset are to be pushed
|
|
608
|
# - and no changeset are to be pushed
|
|
608
|
# - and remote is publishing
|
|
609
|
# - and remote is publishing
|
|
609
|
# We may be in issue 3781 case!
|
|
610
|
# We may be in issue 3781 case!
|
|
610
|
# We drop the possible phase synchronisation done by
|
|
611
|
# We drop the possible phase synchronisation done by
|
|
611
|
# courtesy to publish changesets possibly locally draft
|
|
612
|
# courtesy to publish changesets possibly locally draft
|
|
612
|
# on the remote.
|
|
613
|
# on the remote.
|
|
613
|
pushop.outdatedphases = []
|
|
614
|
pushop.outdatedphases = []
|
|
614
|
pushop.fallbackoutdatedphases = []
|
|
615
|
pushop.fallbackoutdatedphases = []
|
|
615
|
return
|
|
616
|
return
|
|
616
|
|
|
617
|
|
|
617
|
pushop.remotephases = phases.remotephasessummary(pushop.repo,
|
|
618
|
pushop.remotephases = phases.remotephasessummary(pushop.repo,
|
|
618
|
pushop.fallbackheads,
|
|
619
|
pushop.fallbackheads,
|
|
619
|
remotephases)
|
|
620
|
remotephases)
|
|
620
|
droots = pushop.remotephases.draftroots
|
|
621
|
droots = pushop.remotephases.draftroots
|
|
621
|
|
|
622
|
|
|
622
|
extracond = ''
|
|
623
|
extracond = ''
|
|
623
|
if not pushop.remotephases.publishing:
|
|
624
|
if not pushop.remotephases.publishing:
|
|
624
|
extracond = ' and public()'
|
|
625
|
extracond = ' and public()'
|
|
625
|
revset = 'heads((%%ln::%%ln) %s)' % extracond
|
|
626
|
revset = 'heads((%%ln::%%ln) %s)' % extracond
|
|
626
|
# Get the list of all revs draft on remote by public here.
|
|
627
|
# Get the list of all revs draft on remote by public here.
|
|
627
|
# XXX Beware that revset break if droots is not strictly
|
|
628
|
# XXX Beware that revset break if droots is not strictly
|
|
628
|
# XXX root we may want to ensure it is but it is costly
|
|
629
|
# XXX root we may want to ensure it is but it is costly
|
|
629
|
fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
|
|
630
|
fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
|
|
630
|
if not outgoing.missing:
|
|
631
|
if not outgoing.missing:
|
|
631
|
future = fallback
|
|
632
|
future = fallback
|
|
632
|
else:
|
|
633
|
else:
|
|
633
|
# adds changeset we are going to push as draft
|
|
634
|
# adds changeset we are going to push as draft
|
|
634
|
#
|
|
635
|
#
|
|
635
|
# should not be necessary for publishing server, but because of an
|
|
636
|
# should not be necessary for publishing server, but because of an
|
|
636
|
# issue fixed in xxxxx we have to do it anyway.
|
|
637
|
# issue fixed in xxxxx we have to do it anyway.
|
|
637
|
fdroots = list(unfi.set('roots(%ln + %ln::)',
|
|
638
|
fdroots = list(unfi.set('roots(%ln + %ln::)',
|
|
638
|
outgoing.missing, droots))
|
|
639
|
outgoing.missing, droots))
|
|
639
|
fdroots = [f.node() for f in fdroots]
|
|
640
|
fdroots = [f.node() for f in fdroots]
|
|
640
|
future = list(unfi.set(revset, fdroots, pushop.futureheads))
|
|
641
|
future = list(unfi.set(revset, fdroots, pushop.futureheads))
|
|
641
|
pushop.outdatedphases = future
|
|
642
|
pushop.outdatedphases = future
|
|
642
|
pushop.fallbackoutdatedphases = fallback
|
|
643
|
pushop.fallbackoutdatedphases = fallback
|
|
643
|
|
|
644
|
|
|
644
|
@pushdiscovery('obsmarker')
|
|
645
|
@pushdiscovery('obsmarker')
|
|
645
|
def _pushdiscoveryobsmarkers(pushop):
|
|
646
|
def _pushdiscoveryobsmarkers(pushop):
|
|
646
|
if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
|
|
647
|
if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
|
|
647
|
return
|
|
648
|
return
|
|
648
|
|
|
649
|
|
|
649
|
if not pushop.repo.obsstore:
|
|
650
|
if not pushop.repo.obsstore:
|
|
650
|
return
|
|
651
|
return
|
|
651
|
|
|
652
|
|
|
652
|
if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
|
|
653
|
if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
|
|
653
|
return
|
|
654
|
return
|
|
654
|
|
|
655
|
|
|
655
|
repo = pushop.repo
|
|
656
|
repo = pushop.repo
|
|
656
|
# very naive computation, that can be quite expensive on big repo.
|
|
657
|
# very naive computation, that can be quite expensive on big repo.
|
|
657
|
# However: evolution is currently slow on them anyway.
|
|
658
|
# However: evolution is currently slow on them anyway.
|
|
658
|
nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
|
|
659
|
nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
|
|
659
|
pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
|
|
660
|
pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
|
|
660
|
|
|
661
|
|
|
661
|
@pushdiscovery('bookmarks')
|
|
662
|
@pushdiscovery('bookmarks')
|
|
662
|
def _pushdiscoverybookmarks(pushop):
|
|
663
|
def _pushdiscoverybookmarks(pushop):
|
|
663
|
ui = pushop.ui
|
|
664
|
ui = pushop.ui
|
|
664
|
repo = pushop.repo.unfiltered()
|
|
665
|
repo = pushop.repo.unfiltered()
|
|
665
|
remote = pushop.remote
|
|
666
|
remote = pushop.remote
|
|
666
|
ui.debug("checking for updated bookmarks\n")
|
|
667
|
ui.debug("checking for updated bookmarks\n")
|
|
667
|
ancestors = ()
|
|
668
|
ancestors = ()
|
|
668
|
if pushop.revs:
|
|
669
|
if pushop.revs:
|
|
669
|
revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
|
|
670
|
revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
|
|
670
|
ancestors = repo.changelog.ancestors(revnums, inclusive=True)
|
|
671
|
ancestors = repo.changelog.ancestors(revnums, inclusive=True)
|
|
671
|
|
|
672
|
|
|
672
|
remotebookmark = listkeys(remote, 'bookmarks')
|
|
673
|
remotebookmark = listkeys(remote, 'bookmarks')
|
|
673
|
|
|
674
|
|
|
674
|
explicit = set([repo._bookmarks.expandname(bookmark)
|
|
675
|
explicit = set([repo._bookmarks.expandname(bookmark)
|
|
675
|
for bookmark in pushop.bookmarks])
|
|
676
|
for bookmark in pushop.bookmarks])
|
|
676
|
|
|
677
|
|
|
677
|
remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
|
|
678
|
remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
|
|
678
|
comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
|
|
679
|
comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
|
|
679
|
|
|
680
|
|
|
680
|
def safehex(x):
|
|
681
|
def safehex(x):
|
|
681
|
if x is None:
|
|
682
|
if x is None:
|
|
682
|
return x
|
|
683
|
return x
|
|
683
|
return hex(x)
|
|
684
|
return hex(x)
|
|
684
|
|
|
685
|
|
|
685
|
def hexifycompbookmarks(bookmarks):
|
|
686
|
def hexifycompbookmarks(bookmarks):
|
|
686
|
return [(b, safehex(scid), safehex(dcid))
|
|
687
|
return [(b, safehex(scid), safehex(dcid))
|
|
687
|
for (b, scid, dcid) in bookmarks]
|
|
688
|
for (b, scid, dcid) in bookmarks]
|
|
688
|
|
|
689
|
|
|
689
|
comp = [hexifycompbookmarks(marks) for marks in comp]
|
|
690
|
comp = [hexifycompbookmarks(marks) for marks in comp]
|
|
690
|
return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
|
|
691
|
return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
|
|
691
|
|
|
692
|
|
|
692
|
def _processcompared(pushop, pushed, explicit, remotebms, comp):
|
|
693
|
def _processcompared(pushop, pushed, explicit, remotebms, comp):
|
|
693
|
"""take decision on bookmark to pull from the remote bookmark
|
|
694
|
"""take decision on bookmark to pull from the remote bookmark
|
|
694
|
|
|
695
|
|
|
695
|
Exist to help extensions who want to alter this behavior.
|
|
696
|
Exist to help extensions who want to alter this behavior.
|
|
696
|
"""
|
|
697
|
"""
|
|
697
|
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
|
|
698
|
addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
|
|
698
|
|
|
699
|
|
|
699
|
repo = pushop.repo
|
|
700
|
repo = pushop.repo
|
|
700
|
|
|
701
|
|
|
701
|
for b, scid, dcid in advsrc:
|
|
702
|
for b, scid, dcid in advsrc:
|
|
702
|
if b in explicit:
|
|
703
|
if b in explicit:
|
|
703
|
explicit.remove(b)
|
|
704
|
explicit.remove(b)
|
|
704
|
if not pushed or repo[scid].rev() in pushed:
|
|
705
|
if not pushed or repo[scid].rev() in pushed:
|
|
705
|
pushop.outbookmarks.append((b, dcid, scid))
|
|
706
|
pushop.outbookmarks.append((b, dcid, scid))
|
|
706
|
# search added bookmark
|
|
707
|
# search added bookmark
|
|
707
|
for b, scid, dcid in addsrc:
|
|
708
|
for b, scid, dcid in addsrc:
|
|
708
|
if b in explicit:
|
|
709
|
if b in explicit:
|
|
709
|
explicit.remove(b)
|
|
710
|
explicit.remove(b)
|
|
710
|
pushop.outbookmarks.append((b, '', scid))
|
|
711
|
pushop.outbookmarks.append((b, '', scid))
|
|
711
|
# search for overwritten bookmark
|
|
712
|
# search for overwritten bookmark
|
|
712
|
for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
|
|
713
|
for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
|
|
713
|
if b in explicit:
|
|
714
|
if b in explicit:
|
|
714
|
explicit.remove(b)
|
|
715
|
explicit.remove(b)
|
|
715
|
pushop.outbookmarks.append((b, dcid, scid))
|
|
716
|
pushop.outbookmarks.append((b, dcid, scid))
|
|
716
|
# search for bookmark to delete
|
|
717
|
# search for bookmark to delete
|
|
717
|
for b, scid, dcid in adddst:
|
|
718
|
for b, scid, dcid in adddst:
|
|
718
|
if b in explicit:
|
|
719
|
if b in explicit:
|
|
719
|
explicit.remove(b)
|
|
720
|
explicit.remove(b)
|
|
720
|
# treat as "deleted locally"
|
|
721
|
# treat as "deleted locally"
|
|
721
|
pushop.outbookmarks.append((b, dcid, ''))
|
|
722
|
pushop.outbookmarks.append((b, dcid, ''))
|
|
722
|
# identical bookmarks shouldn't get reported
|
|
723
|
# identical bookmarks shouldn't get reported
|
|
723
|
for b, scid, dcid in same:
|
|
724
|
for b, scid, dcid in same:
|
|
724
|
if b in explicit:
|
|
725
|
if b in explicit:
|
|
725
|
explicit.remove(b)
|
|
726
|
explicit.remove(b)
|
|
726
|
|
|
727
|
|
|
727
|
if explicit:
|
|
728
|
if explicit:
|
|
728
|
explicit = sorted(explicit)
|
|
729
|
explicit = sorted(explicit)
|
|
729
|
# we should probably list all of them
|
|
730
|
# we should probably list all of them
|
|
730
|
pushop.ui.warn(_('bookmark %s does not exist on the local '
|
|
731
|
pushop.ui.warn(_('bookmark %s does not exist on the local '
|
|
731
|
'or remote repository!\n') % explicit[0])
|
|
732
|
'or remote repository!\n') % explicit[0])
|
|
732
|
pushop.bkresult = 2
|
|
733
|
pushop.bkresult = 2
|
|
733
|
|
|
734
|
|
|
734
|
pushop.outbookmarks.sort()
|
|
735
|
pushop.outbookmarks.sort()
|
|
735
|
|
|
736
|
|
|
736
|
def _pushcheckoutgoing(pushop):
|
|
737
|
def _pushcheckoutgoing(pushop):
|
|
737
|
outgoing = pushop.outgoing
|
|
738
|
outgoing = pushop.outgoing
|
|
738
|
unfi = pushop.repo.unfiltered()
|
|
739
|
unfi = pushop.repo.unfiltered()
|
|
739
|
if not outgoing.missing:
|
|
740
|
if not outgoing.missing:
|
|
740
|
# nothing to push
|
|
741
|
# nothing to push
|
|
741
|
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
|
|
742
|
scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
|
|
742
|
return False
|
|
743
|
return False
|
|
743
|
# something to push
|
|
744
|
# something to push
|
|
744
|
if not pushop.force:
|
|
745
|
if not pushop.force:
|
|
745
|
# if repo.obsstore == False --> no obsolete
|
|
746
|
# if repo.obsstore == False --> no obsolete
|
|
746
|
# then, save the iteration
|
|
747
|
# then, save the iteration
|
|
747
|
if unfi.obsstore:
|
|
748
|
if unfi.obsstore:
|
|
748
|
# this message are here for 80 char limit reason
|
|
749
|
# this message are here for 80 char limit reason
|
|
749
|
mso = _("push includes obsolete changeset: %s!")
|
|
750
|
mso = _("push includes obsolete changeset: %s!")
|
|
750
|
mspd = _("push includes phase-divergent changeset: %s!")
|
|
751
|
mspd = _("push includes phase-divergent changeset: %s!")
|
|
751
|
mscd = _("push includes content-divergent changeset: %s!")
|
|
752
|
mscd = _("push includes content-divergent changeset: %s!")
|
|
752
|
mst = {"orphan": _("push includes orphan changeset: %s!"),
|
|
753
|
mst = {"orphan": _("push includes orphan changeset: %s!"),
|
|
753
|
"phase-divergent": mspd,
|
|
754
|
"phase-divergent": mspd,
|
|
754
|
"content-divergent": mscd}
|
|
755
|
"content-divergent": mscd}
|
|
755
|
# If we are to push if there is at least one
|
|
756
|
# If we are to push if there is at least one
|
|
756
|
# obsolete or unstable changeset in missing, at
|
|
757
|
# obsolete or unstable changeset in missing, at
|
|
757
|
# least one of the missinghead will be obsolete or
|
|
758
|
# least one of the missinghead will be obsolete or
|
|
758
|
# unstable. So checking heads only is ok
|
|
759
|
# unstable. So checking heads only is ok
|
|
759
|
for node in outgoing.missingheads:
|
|
760
|
for node in outgoing.missingheads:
|
|
760
|
ctx = unfi[node]
|
|
761
|
ctx = unfi[node]
|
|
761
|
if ctx.obsolete():
|
|
762
|
if ctx.obsolete():
|
|
762
|
raise error.Abort(mso % ctx)
|
|
763
|
raise error.Abort(mso % ctx)
|
|
763
|
elif ctx.isunstable():
|
|
764
|
elif ctx.isunstable():
|
|
764
|
# TODO print more than one instability in the abort
|
|
765
|
# TODO print more than one instability in the abort
|
|
765
|
# message
|
|
766
|
# message
|
|
766
|
raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
|
|
767
|
raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
|
|
767
|
|
|
768
|
|
|
768
|
discovery.checkheads(pushop)
|
|
769
|
discovery.checkheads(pushop)
|
|
769
|
return True
|
|
770
|
return True
|
|
770
|
|
|
771
|
|
|
771
|
# List of names of steps to perform for an outgoing bundle2, order matters.
|
|
772
|
# List of names of steps to perform for an outgoing bundle2, order matters.
|
|
772
|
b2partsgenorder = []
|
|
773
|
b2partsgenorder = []
|
|
773
|
|
|
774
|
|
|
774
|
# Mapping between step name and function
|
|
775
|
# Mapping between step name and function
|
|
775
|
#
|
|
776
|
#
|
|
776
|
# This exists to help extensions wrap steps if necessary
|
|
777
|
# This exists to help extensions wrap steps if necessary
|
|
777
|
b2partsgenmapping = {}
|
|
778
|
b2partsgenmapping = {}
|
|
778
|
|
|
779
|
|
|
779
|
def b2partsgenerator(stepname, idx=None):
|
|
780
|
def b2partsgenerator(stepname, idx=None):
|
|
780
|
"""decorator for function generating bundle2 part
|
|
781
|
"""decorator for function generating bundle2 part
|
|
781
|
|
|
782
|
|
|
782
|
The function is added to the step -> function mapping and appended to the
|
|
783
|
The function is added to the step -> function mapping and appended to the
|
|
783
|
list of steps. Beware that decorated functions will be added in order
|
|
784
|
list of steps. Beware that decorated functions will be added in order
|
|
784
|
(this may matter).
|
|
785
|
(this may matter).
|
|
785
|
|
|
786
|
|
|
786
|
You can only use this decorator for new steps, if you want to wrap a step
|
|
787
|
You can only use this decorator for new steps, if you want to wrap a step
|
|
787
|
from an extension, attack the b2partsgenmapping dictionary directly."""
|
|
788
|
from an extension, attack the b2partsgenmapping dictionary directly."""
|
|
788
|
def dec(func):
|
|
789
|
def dec(func):
|
|
789
|
assert stepname not in b2partsgenmapping
|
|
790
|
assert stepname not in b2partsgenmapping
|
|
790
|
b2partsgenmapping[stepname] = func
|
|
791
|
b2partsgenmapping[stepname] = func
|
|
791
|
if idx is None:
|
|
792
|
if idx is None:
|
|
792
|
b2partsgenorder.append(stepname)
|
|
793
|
b2partsgenorder.append(stepname)
|
|
793
|
else:
|
|
794
|
else:
|
|
794
|
b2partsgenorder.insert(idx, stepname)
|
|
795
|
b2partsgenorder.insert(idx, stepname)
|
|
795
|
return func
|
|
796
|
return func
|
|
796
|
return dec
|
|
797
|
return dec
|
|
797
|
|
|
798
|
|
|
798
|
def _pushb2ctxcheckheads(pushop, bundler):
|
|
799
|
def _pushb2ctxcheckheads(pushop, bundler):
|
|
799
|
"""Generate race condition checking parts
|
|
800
|
"""Generate race condition checking parts
|
|
800
|
|
|
801
|
|
|
801
|
Exists as an independent function to aid extensions
|
|
802
|
Exists as an independent function to aid extensions
|
|
802
|
"""
|
|
803
|
"""
|
|
803
|
# * 'force' do not check for push race,
|
|
804
|
# * 'force' do not check for push race,
|
|
804
|
# * if we don't push anything, there are nothing to check.
|
|
805
|
# * if we don't push anything, there are nothing to check.
|
|
805
|
if not pushop.force and pushop.outgoing.missingheads:
|
|
806
|
if not pushop.force and pushop.outgoing.missingheads:
|
|
806
|
allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
|
|
807
|
allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
|
|
807
|
emptyremote = pushop.pushbranchmap is None
|
|
808
|
emptyremote = pushop.pushbranchmap is None
|
|
808
|
if not allowunrelated or emptyremote:
|
|
809
|
if not allowunrelated or emptyremote:
|
|
809
|
bundler.newpart('check:heads', data=iter(pushop.remoteheads))
|
|
810
|
bundler.newpart('check:heads', data=iter(pushop.remoteheads))
|
|
810
|
else:
|
|
811
|
else:
|
|
811
|
affected = set()
|
|
812
|
affected = set()
|
|
812
|
for branch, heads in pushop.pushbranchmap.iteritems():
|
|
813
|
for branch, heads in pushop.pushbranchmap.iteritems():
|
|
813
|
remoteheads, newheads, unsyncedheads, discardedheads = heads
|
|
814
|
remoteheads, newheads, unsyncedheads, discardedheads = heads
|
|
814
|
if remoteheads is not None:
|
|
815
|
if remoteheads is not None:
|
|
815
|
remote = set(remoteheads)
|
|
816
|
remote = set(remoteheads)
|
|
816
|
affected |= set(discardedheads) & remote
|
|
817
|
affected |= set(discardedheads) & remote
|
|
817
|
affected |= remote - set(newheads)
|
|
818
|
affected |= remote - set(newheads)
|
|
818
|
if affected:
|
|
819
|
if affected:
|
|
819
|
data = iter(sorted(affected))
|
|
820
|
data = iter(sorted(affected))
|
|
820
|
bundler.newpart('check:updated-heads', data=data)
|
|
821
|
bundler.newpart('check:updated-heads', data=data)
|
|
821
|
|
|
822
|
|
|
822
|
def _pushing(pushop):
|
|
823
|
def _pushing(pushop):
|
|
823
|
"""return True if we are pushing anything"""
|
|
824
|
"""return True if we are pushing anything"""
|
|
824
|
return bool(pushop.outgoing.missing
|
|
825
|
return bool(pushop.outgoing.missing
|
|
825
|
or pushop.outdatedphases
|
|
826
|
or pushop.outdatedphases
|
|
826
|
or pushop.outobsmarkers
|
|
827
|
or pushop.outobsmarkers
|
|
827
|
or pushop.outbookmarks)
|
|
828
|
or pushop.outbookmarks)
|
|
828
|
|
|
829
|
|
|
829
|
@b2partsgenerator('check-bookmarks')
|
|
830
|
@b2partsgenerator('check-bookmarks')
|
|
830
|
def _pushb2checkbookmarks(pushop, bundler):
|
|
831
|
def _pushb2checkbookmarks(pushop, bundler):
|
|
831
|
"""insert bookmark move checking"""
|
|
832
|
"""insert bookmark move checking"""
|
|
832
|
if not _pushing(pushop) or pushop.force:
|
|
833
|
if not _pushing(pushop) or pushop.force:
|
|
833
|
return
|
|
834
|
return
|
|
834
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
835
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
835
|
hasbookmarkcheck = 'bookmarks' in b2caps
|
|
836
|
hasbookmarkcheck = 'bookmarks' in b2caps
|
|
836
|
if not (pushop.outbookmarks and hasbookmarkcheck):
|
|
837
|
if not (pushop.outbookmarks and hasbookmarkcheck):
|
|
837
|
return
|
|
838
|
return
|
|
838
|
data = []
|
|
839
|
data = []
|
|
839
|
for book, old, new in pushop.outbookmarks:
|
|
840
|
for book, old, new in pushop.outbookmarks:
|
|
840
|
old = bin(old)
|
|
841
|
old = bin(old)
|
|
841
|
data.append((book, old))
|
|
842
|
data.append((book, old))
|
|
842
|
checkdata = bookmod.binaryencode(data)
|
|
843
|
checkdata = bookmod.binaryencode(data)
|
|
843
|
bundler.newpart('check:bookmarks', data=checkdata)
|
|
844
|
bundler.newpart('check:bookmarks', data=checkdata)
|
|
844
|
|
|
845
|
|
|
845
|
@b2partsgenerator('check-phases')
|
|
846
|
@b2partsgenerator('check-phases')
|
|
846
|
def _pushb2checkphases(pushop, bundler):
|
|
847
|
def _pushb2checkphases(pushop, bundler):
|
|
847
|
"""insert phase move checking"""
|
|
848
|
"""insert phase move checking"""
|
|
848
|
if not _pushing(pushop) or pushop.force:
|
|
849
|
if not _pushing(pushop) or pushop.force:
|
|
849
|
return
|
|
850
|
return
|
|
850
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
851
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
851
|
hasphaseheads = 'heads' in b2caps.get('phases', ())
|
|
852
|
hasphaseheads = 'heads' in b2caps.get('phases', ())
|
|
852
|
if pushop.remotephases is not None and hasphaseheads:
|
|
853
|
if pushop.remotephases is not None and hasphaseheads:
|
|
853
|
# check that the remote phase has not changed
|
|
854
|
# check that the remote phase has not changed
|
|
854
|
checks = [[] for p in phases.allphases]
|
|
855
|
checks = [[] for p in phases.allphases]
|
|
855
|
checks[phases.public].extend(pushop.remotephases.publicheads)
|
|
856
|
checks[phases.public].extend(pushop.remotephases.publicheads)
|
|
856
|
checks[phases.draft].extend(pushop.remotephases.draftroots)
|
|
857
|
checks[phases.draft].extend(pushop.remotephases.draftroots)
|
|
857
|
if any(checks):
|
|
858
|
if any(checks):
|
|
858
|
for nodes in checks:
|
|
859
|
for nodes in checks:
|
|
859
|
nodes.sort()
|
|
860
|
nodes.sort()
|
|
860
|
checkdata = phases.binaryencode(checks)
|
|
861
|
checkdata = phases.binaryencode(checks)
|
|
861
|
bundler.newpart('check:phases', data=checkdata)
|
|
862
|
bundler.newpart('check:phases', data=checkdata)
|
|
862
|
|
|
863
|
|
|
863
|
@b2partsgenerator('changeset')
|
|
864
|
@b2partsgenerator('changeset')
|
|
864
|
def _pushb2ctx(pushop, bundler):
|
|
865
|
def _pushb2ctx(pushop, bundler):
|
|
865
|
"""handle changegroup push through bundle2
|
|
866
|
"""handle changegroup push through bundle2
|
|
866
|
|
|
867
|
|
|
867
|
addchangegroup result is stored in the ``pushop.cgresult`` attribute.
|
|
868
|
addchangegroup result is stored in the ``pushop.cgresult`` attribute.
|
|
868
|
"""
|
|
869
|
"""
|
|
869
|
if 'changesets' in pushop.stepsdone:
|
|
870
|
if 'changesets' in pushop.stepsdone:
|
|
870
|
return
|
|
871
|
return
|
|
871
|
pushop.stepsdone.add('changesets')
|
|
872
|
pushop.stepsdone.add('changesets')
|
|
872
|
# Send known heads to the server for race detection.
|
|
873
|
# Send known heads to the server for race detection.
|
|
873
|
if not _pushcheckoutgoing(pushop):
|
|
874
|
if not _pushcheckoutgoing(pushop):
|
|
874
|
return
|
|
875
|
return
|
|
875
|
pushop.repo.prepushoutgoinghooks(pushop)
|
|
876
|
pushop.repo.prepushoutgoinghooks(pushop)
|
|
876
|
|
|
877
|
|
|
877
|
_pushb2ctxcheckheads(pushop, bundler)
|
|
878
|
_pushb2ctxcheckheads(pushop, bundler)
|
|
878
|
|
|
879
|
|
|
879
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
880
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
880
|
version = '01'
|
|
881
|
version = '01'
|
|
881
|
cgversions = b2caps.get('changegroup')
|
|
882
|
cgversions = b2caps.get('changegroup')
|
|
882
|
if cgversions: # 3.1 and 3.2 ship with an empty value
|
|
883
|
if cgversions: # 3.1 and 3.2 ship with an empty value
|
|
883
|
cgversions = [v for v in cgversions
|
|
884
|
cgversions = [v for v in cgversions
|
|
884
|
if v in changegroup.supportedoutgoingversions(
|
|
885
|
if v in changegroup.supportedoutgoingversions(
|
|
885
|
pushop.repo)]
|
|
886
|
pushop.repo)]
|
|
886
|
if not cgversions:
|
|
887
|
if not cgversions:
|
|
887
|
raise ValueError(_('no common changegroup version'))
|
|
888
|
raise ValueError(_('no common changegroup version'))
|
|
888
|
version = max(cgversions)
|
|
889
|
version = max(cgversions)
|
|
889
|
cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
|
|
890
|
cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
|
|
890
|
'push')
|
|
891
|
'push')
|
|
891
|
cgpart = bundler.newpart('changegroup', data=cgstream)
|
|
892
|
cgpart = bundler.newpart('changegroup', data=cgstream)
|
|
892
|
if cgversions:
|
|
893
|
if cgversions:
|
|
893
|
cgpart.addparam('version', version)
|
|
894
|
cgpart.addparam('version', version)
|
|
894
|
if 'treemanifest' in pushop.repo.requirements:
|
|
895
|
if 'treemanifest' in pushop.repo.requirements:
|
|
895
|
cgpart.addparam('treemanifest', '1')
|
|
896
|
cgpart.addparam('treemanifest', '1')
|
|
896
|
def handlereply(op):
|
|
897
|
def handlereply(op):
|
|
897
|
"""extract addchangegroup returns from server reply"""
|
|
898
|
"""extract addchangegroup returns from server reply"""
|
|
898
|
cgreplies = op.records.getreplies(cgpart.id)
|
|
899
|
cgreplies = op.records.getreplies(cgpart.id)
|
|
899
|
assert len(cgreplies['changegroup']) == 1
|
|
900
|
assert len(cgreplies['changegroup']) == 1
|
|
900
|
pushop.cgresult = cgreplies['changegroup'][0]['return']
|
|
901
|
pushop.cgresult = cgreplies['changegroup'][0]['return']
|
|
901
|
return handlereply
|
|
902
|
return handlereply
|
|
902
|
|
|
903
|
|
|
903
|
@b2partsgenerator('phase')
|
|
904
|
@b2partsgenerator('phase')
|
|
904
|
def _pushb2phases(pushop, bundler):
|
|
905
|
def _pushb2phases(pushop, bundler):
|
|
905
|
"""handle phase push through bundle2"""
|
|
906
|
"""handle phase push through bundle2"""
|
|
906
|
if 'phases' in pushop.stepsdone:
|
|
907
|
if 'phases' in pushop.stepsdone:
|
|
907
|
return
|
|
908
|
return
|
|
908
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
909
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
909
|
ui = pushop.repo.ui
|
|
910
|
ui = pushop.repo.ui
|
|
910
|
|
|
911
|
|
|
911
|
legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
|
|
912
|
legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
|
|
912
|
haspushkey = 'pushkey' in b2caps
|
|
913
|
haspushkey = 'pushkey' in b2caps
|
|
913
|
hasphaseheads = 'heads' in b2caps.get('phases', ())
|
|
914
|
hasphaseheads = 'heads' in b2caps.get('phases', ())
|
|
914
|
|
|
915
|
|
|
915
|
if hasphaseheads and not legacyphase:
|
|
916
|
if hasphaseheads and not legacyphase:
|
|
916
|
return _pushb2phaseheads(pushop, bundler)
|
|
917
|
return _pushb2phaseheads(pushop, bundler)
|
|
917
|
elif haspushkey:
|
|
918
|
elif haspushkey:
|
|
918
|
return _pushb2phasespushkey(pushop, bundler)
|
|
919
|
return _pushb2phasespushkey(pushop, bundler)
|
|
919
|
|
|
920
|
|
|
920
|
def _pushb2phaseheads(pushop, bundler):
|
|
921
|
def _pushb2phaseheads(pushop, bundler):
|
|
921
|
"""push phase information through a bundle2 - binary part"""
|
|
922
|
"""push phase information through a bundle2 - binary part"""
|
|
922
|
pushop.stepsdone.add('phases')
|
|
923
|
pushop.stepsdone.add('phases')
|
|
923
|
if pushop.outdatedphases:
|
|
924
|
if pushop.outdatedphases:
|
|
924
|
updates = [[] for p in phases.allphases]
|
|
925
|
updates = [[] for p in phases.allphases]
|
|
925
|
updates[0].extend(h.node() for h in pushop.outdatedphases)
|
|
926
|
updates[0].extend(h.node() for h in pushop.outdatedphases)
|
|
926
|
phasedata = phases.binaryencode(updates)
|
|
927
|
phasedata = phases.binaryencode(updates)
|
|
927
|
bundler.newpart('phase-heads', data=phasedata)
|
|
928
|
bundler.newpart('phase-heads', data=phasedata)
|
|
928
|
|
|
929
|
|
|
929
|
def _pushb2phasespushkey(pushop, bundler):
|
|
930
|
def _pushb2phasespushkey(pushop, bundler):
|
|
930
|
"""push phase information through a bundle2 - pushkey part"""
|
|
931
|
"""push phase information through a bundle2 - pushkey part"""
|
|
931
|
pushop.stepsdone.add('phases')
|
|
932
|
pushop.stepsdone.add('phases')
|
|
932
|
part2node = []
|
|
933
|
part2node = []
|
|
933
|
|
|
934
|
|
|
934
|
def handlefailure(pushop, exc):
|
|
935
|
def handlefailure(pushop, exc):
|
|
935
|
targetid = int(exc.partid)
|
|
936
|
targetid = int(exc.partid)
|
|
936
|
for partid, node in part2node:
|
|
937
|
for partid, node in part2node:
|
|
937
|
if partid == targetid:
|
|
938
|
if partid == targetid:
|
|
938
|
raise error.Abort(_('updating %s to public failed') % node)
|
|
939
|
raise error.Abort(_('updating %s to public failed') % node)
|
|
939
|
|
|
940
|
|
|
940
|
enc = pushkey.encode
|
|
941
|
enc = pushkey.encode
|
|
941
|
for newremotehead in pushop.outdatedphases:
|
|
942
|
for newremotehead in pushop.outdatedphases:
|
|
942
|
part = bundler.newpart('pushkey')
|
|
943
|
part = bundler.newpart('pushkey')
|
|
943
|
part.addparam('namespace', enc('phases'))
|
|
944
|
part.addparam('namespace', enc('phases'))
|
|
944
|
part.addparam('key', enc(newremotehead.hex()))
|
|
945
|
part.addparam('key', enc(newremotehead.hex()))
|
|
945
|
part.addparam('old', enc('%d' % phases.draft))
|
|
946
|
part.addparam('old', enc('%d' % phases.draft))
|
|
946
|
part.addparam('new', enc('%d' % phases.public))
|
|
947
|
part.addparam('new', enc('%d' % phases.public))
|
|
947
|
part2node.append((part.id, newremotehead))
|
|
948
|
part2node.append((part.id, newremotehead))
|
|
948
|
pushop.pkfailcb[part.id] = handlefailure
|
|
949
|
pushop.pkfailcb[part.id] = handlefailure
|
|
949
|
|
|
950
|
|
|
950
|
def handlereply(op):
|
|
951
|
def handlereply(op):
|
|
951
|
for partid, node in part2node:
|
|
952
|
for partid, node in part2node:
|
|
952
|
partrep = op.records.getreplies(partid)
|
|
953
|
partrep = op.records.getreplies(partid)
|
|
953
|
results = partrep['pushkey']
|
|
954
|
results = partrep['pushkey']
|
|
954
|
assert len(results) <= 1
|
|
955
|
assert len(results) <= 1
|
|
955
|
msg = None
|
|
956
|
msg = None
|
|
956
|
if not results:
|
|
957
|
if not results:
|
|
957
|
msg = _('server ignored update of %s to public!\n') % node
|
|
958
|
msg = _('server ignored update of %s to public!\n') % node
|
|
958
|
elif not int(results[0]['return']):
|
|
959
|
elif not int(results[0]['return']):
|
|
959
|
msg = _('updating %s to public failed!\n') % node
|
|
960
|
msg = _('updating %s to public failed!\n') % node
|
|
960
|
if msg is not None:
|
|
961
|
if msg is not None:
|
|
961
|
pushop.ui.warn(msg)
|
|
962
|
pushop.ui.warn(msg)
|
|
962
|
return handlereply
|
|
963
|
return handlereply
|
|
963
|
|
|
964
|
|
|
964
|
@b2partsgenerator('obsmarkers')
|
|
965
|
@b2partsgenerator('obsmarkers')
|
|
965
|
def _pushb2obsmarkers(pushop, bundler):
|
|
966
|
def _pushb2obsmarkers(pushop, bundler):
|
|
966
|
if 'obsmarkers' in pushop.stepsdone:
|
|
967
|
if 'obsmarkers' in pushop.stepsdone:
|
|
967
|
return
|
|
968
|
return
|
|
968
|
remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
|
|
969
|
remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
|
|
969
|
if obsolete.commonversion(remoteversions) is None:
|
|
970
|
if obsolete.commonversion(remoteversions) is None:
|
|
970
|
return
|
|
971
|
return
|
|
971
|
pushop.stepsdone.add('obsmarkers')
|
|
972
|
pushop.stepsdone.add('obsmarkers')
|
|
972
|
if pushop.outobsmarkers:
|
|
973
|
if pushop.outobsmarkers:
|
|
973
|
markers = sorted(pushop.outobsmarkers)
|
|
974
|
markers = sorted(pushop.outobsmarkers)
|
|
974
|
bundle2.buildobsmarkerspart(bundler, markers)
|
|
975
|
bundle2.buildobsmarkerspart(bundler, markers)
|
|
975
|
|
|
976
|
|
|
976
|
@b2partsgenerator('bookmarks')
|
|
977
|
@b2partsgenerator('bookmarks')
|
|
977
|
def _pushb2bookmarks(pushop, bundler):
|
|
978
|
def _pushb2bookmarks(pushop, bundler):
|
|
978
|
"""handle bookmark push through bundle2"""
|
|
979
|
"""handle bookmark push through bundle2"""
|
|
979
|
if 'bookmarks' in pushop.stepsdone:
|
|
980
|
if 'bookmarks' in pushop.stepsdone:
|
|
980
|
return
|
|
981
|
return
|
|
981
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
982
|
b2caps = bundle2.bundle2caps(pushop.remote)
|
|
982
|
|
|
983
|
|
|
983
|
legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
|
|
984
|
legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
|
|
984
|
legacybooks = 'bookmarks' in legacy
|
|
985
|
legacybooks = 'bookmarks' in legacy
|
|
985
|
|
|
986
|
|
|
986
|
if not legacybooks and 'bookmarks' in b2caps:
|
|
987
|
if not legacybooks and 'bookmarks' in b2caps:
|
|
987
|
return _pushb2bookmarkspart(pushop, bundler)
|
|
988
|
return _pushb2bookmarkspart(pushop, bundler)
|
|
988
|
elif 'pushkey' in b2caps:
|
|
989
|
elif 'pushkey' in b2caps:
|
|
989
|
return _pushb2bookmarkspushkey(pushop, bundler)
|
|
990
|
return _pushb2bookmarkspushkey(pushop, bundler)
|
|
990
|
|
|
991
|
|
|
991
|
def _bmaction(old, new):
|
|
992
|
def _bmaction(old, new):
|
|
992
|
"""small utility for bookmark pushing"""
|
|
993
|
"""small utility for bookmark pushing"""
|
|
993
|
if not old:
|
|
994
|
if not old:
|
|
994
|
return 'export'
|
|
995
|
return 'export'
|
|
995
|
elif not new:
|
|
996
|
elif not new:
|
|
996
|
return 'delete'
|
|
997
|
return 'delete'
|
|
997
|
return 'update'
|
|
998
|
return 'update'
|
|
998
|
|
|
999
|
|
|
999
|
def _pushb2bookmarkspart(pushop, bundler):
|
|
1000
|
def _pushb2bookmarkspart(pushop, bundler):
|
|
1000
|
pushop.stepsdone.add('bookmarks')
|
|
1001
|
pushop.stepsdone.add('bookmarks')
|
|
1001
|
if not pushop.outbookmarks:
|
|
1002
|
if not pushop.outbookmarks:
|
|
1002
|
return
|
|
1003
|
return
|
|
1003
|
|
|
1004
|
|
|
1004
|
allactions = []
|
|
1005
|
allactions = []
|
|
1005
|
data = []
|
|
1006
|
data = []
|
|
1006
|
for book, old, new in pushop.outbookmarks:
|
|
1007
|
for book, old, new in pushop.outbookmarks:
|
|
1007
|
new = bin(new)
|
|
1008
|
new = bin(new)
|
|
1008
|
data.append((book, new))
|
|
1009
|
data.append((book, new))
|
|
1009
|
allactions.append((book, _bmaction(old, new)))
|
|
1010
|
allactions.append((book, _bmaction(old, new)))
|
|
1010
|
checkdata = bookmod.binaryencode(data)
|
|
1011
|
checkdata = bookmod.binaryencode(data)
|
|
1011
|
bundler.newpart('bookmarks', data=checkdata)
|
|
1012
|
bundler.newpart('bookmarks', data=checkdata)
|
|
1012
|
|
|
1013
|
|
|
1013
|
def handlereply(op):
|
|
1014
|
def handlereply(op):
|
|
1014
|
ui = pushop.ui
|
|
1015
|
ui = pushop.ui
|
|
1015
|
# if success
|
|
1016
|
# if success
|
|
1016
|
for book, action in allactions:
|
|
1017
|
for book, action in allactions:
|
|
1017
|
ui.status(bookmsgmap[action][0] % book)
|
|
1018
|
ui.status(bookmsgmap[action][0] % book)
|
|
1018
|
|
|
1019
|
|
|
1019
|
return handlereply
|
|
1020
|
return handlereply
|
|
1020
|
|
|
1021
|
|
|
1021
|
def _pushb2bookmarkspushkey(pushop, bundler):
|
|
1022
|
def _pushb2bookmarkspushkey(pushop, bundler):
|
|
1022
|
pushop.stepsdone.add('bookmarks')
|
|
1023
|
pushop.stepsdone.add('bookmarks')
|
|
1023
|
part2book = []
|
|
1024
|
part2book = []
|
|
1024
|
enc = pushkey.encode
|
|
1025
|
enc = pushkey.encode
|
|
1025
|
|
|
1026
|
|
|
1026
|
def handlefailure(pushop, exc):
|
|
1027
|
def handlefailure(pushop, exc):
|
|
1027
|
targetid = int(exc.partid)
|
|
1028
|
targetid = int(exc.partid)
|
|
1028
|
for partid, book, action in part2book:
|
|
1029
|
for partid, book, action in part2book:
|
|
1029
|
if partid == targetid:
|
|
1030
|
if partid == targetid:
|
|
1030
|
raise error.Abort(bookmsgmap[action][1].rstrip() % book)
|
|
1031
|
raise error.Abort(bookmsgmap[action][1].rstrip() % book)
|
|
1031
|
# we should not be called for part we did not generated
|
|
1032
|
# we should not be called for part we did not generated
|
|
1032
|
assert False
|
|
1033
|
assert False
|
|
1033
|
|
|
1034
|
|
|
1034
|
for book, old, new in pushop.outbookmarks:
|
|
1035
|
for book, old, new in pushop.outbookmarks:
|
|
1035
|
part = bundler.newpart('pushkey')
|
|
1036
|
part = bundler.newpart('pushkey')
|
|
1036
|
part.addparam('namespace', enc('bookmarks'))
|
|
1037
|
part.addparam('namespace', enc('bookmarks'))
|
|
1037
|
part.addparam('key', enc(book))
|
|
1038
|
part.addparam('key', enc(book))
|
|
1038
|
part.addparam('old', enc(old))
|
|
1039
|
part.addparam('old', enc(old))
|
|
1039
|
part.addparam('new', enc(new))
|
|
1040
|
part.addparam('new', enc(new))
|
|
1040
|
action = 'update'
|
|
1041
|
action = 'update'
|
|
1041
|
if not old:
|
|
1042
|
if not old:
|
|
1042
|
action = 'export'
|
|
1043
|
action = 'export'
|
|
1043
|
elif not new:
|
|
1044
|
elif not new:
|
|
1044
|
action = 'delete'
|
|
1045
|
action = 'delete'
|
|
1045
|
part2book.append((part.id, book, action))
|
|
1046
|
part2book.append((part.id, book, action))
|
|
1046
|
pushop.pkfailcb[part.id] = handlefailure
|
|
1047
|
pushop.pkfailcb[part.id] = handlefailure
|
|
1047
|
|
|
1048
|
|
|
1048
|
def handlereply(op):
|
|
1049
|
def handlereply(op):
|
|
1049
|
ui = pushop.ui
|
|
1050
|
ui = pushop.ui
|
|
1050
|
for partid, book, action in part2book:
|
|
1051
|
for partid, book, action in part2book:
|
|
1051
|
partrep = op.records.getreplies(partid)
|
|
1052
|
partrep = op.records.getreplies(partid)
|
|
1052
|
results = partrep['pushkey']
|
|
1053
|
results = partrep['pushkey']
|
|
1053
|
assert len(results) <= 1
|
|
1054
|
assert len(results) <= 1
|
|
1054
|
if not results:
|
|
1055
|
if not results:
|
|
1055
|
pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
|
|
1056
|
pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
|
|
1056
|
else:
|
|
1057
|
else:
|
|
1057
|
ret = int(results[0]['return'])
|
|
1058
|
ret = int(results[0]['return'])
|
|
1058
|
if ret:
|
|
1059
|
if ret:
|
|
1059
|
ui.status(bookmsgmap[action][0] % book)
|
|
1060
|
ui.status(bookmsgmap[action][0] % book)
|
|
1060
|
else:
|
|
1061
|
else:
|
|
1061
|
ui.warn(bookmsgmap[action][1] % book)
|
|
1062
|
ui.warn(bookmsgmap[action][1] % book)
|
|
1062
|
if pushop.bkresult is not None:
|
|
1063
|
if pushop.bkresult is not None:
|
|
1063
|
pushop.bkresult = 1
|
|
1064
|
pushop.bkresult = 1
|
|
1064
|
return handlereply
|
|
1065
|
return handlereply
|
|
1065
|
|
|
1066
|
|
|
1066
|
@b2partsgenerator('pushvars', idx=0)
|
|
1067
|
@b2partsgenerator('pushvars', idx=0)
|
|
1067
|
def _getbundlesendvars(pushop, bundler):
|
|
1068
|
def _getbundlesendvars(pushop, bundler):
|
|
1068
|
'''send shellvars via bundle2'''
|
|
1069
|
'''send shellvars via bundle2'''
|
|
1069
|
pushvars = pushop.pushvars
|
|
1070
|
pushvars = pushop.pushvars
|
|
1070
|
if pushvars:
|
|
1071
|
if pushvars:
|
|
1071
|
shellvars = {}
|
|
1072
|
shellvars = {}
|
|
1072
|
for raw in pushvars:
|
|
1073
|
for raw in pushvars:
|
|
1073
|
if '=' not in raw:
|
|
1074
|
if '=' not in raw:
|
|
1074
|
msg = ("unable to parse variable '%s', should follow "
|
|
1075
|
msg = ("unable to parse variable '%s', should follow "
|
|
1075
|
"'KEY=VALUE' or 'KEY=' format")
|
|
1076
|
"'KEY=VALUE' or 'KEY=' format")
|
|
1076
|
raise error.Abort(msg % raw)
|
|
1077
|
raise error.Abort(msg % raw)
|
|
1077
|
k, v = raw.split('=', 1)
|
|
1078
|
k, v = raw.split('=', 1)
|
|
1078
|
shellvars[k] = v
|
|
1079
|
shellvars[k] = v
|
|
1079
|
|
|
1080
|
|
|
1080
|
part = bundler.newpart('pushvars')
|
|
1081
|
part = bundler.newpart('pushvars')
|
|
1081
|
|
|
1082
|
|
|
1082
|
for key, value in shellvars.iteritems():
|
|
1083
|
for key, value in shellvars.iteritems():
|
|
1083
|
part.addparam(key, value, mandatory=False)
|
|
1084
|
part.addparam(key, value, mandatory=False)
|
|
1084
|
|
|
1085
|
|
|
1085
|
def _pushbundle2(pushop):
|
|
1086
|
def _pushbundle2(pushop):
|
|
1086
|
"""push data to the remote using bundle2
|
|
1087
|
"""push data to the remote using bundle2
|
|
1087
|
|
|
1088
|
|
|
1088
|
The only currently supported type of data is changegroup but this will
|
|
1089
|
The only currently supported type of data is changegroup but this will
|
|
1089
|
evolve in the future."""
|
|
1090
|
evolve in the future."""
|
|
1090
|
bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
|
|
1091
|
bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
|
|
1091
|
pushback = (pushop.trmanager
|
|
1092
|
pushback = (pushop.trmanager
|
|
1092
|
and pushop.ui.configbool('experimental', 'bundle2.pushback'))
|
|
1093
|
and pushop.ui.configbool('experimental', 'bundle2.pushback'))
|
|
1093
|
|
|
1094
|
|
|
1094
|
# create reply capability
|
|
1095
|
# create reply capability
|
|
1095
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
|
|
1096
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
|
|
1096
|
allowpushback=pushback,
|
|
1097
|
allowpushback=pushback,
|
|
1097
|
role='client'))
|
|
1098
|
role='client'))
|
|
1098
|
bundler.newpart('replycaps', data=capsblob)
|
|
1099
|
bundler.newpart('replycaps', data=capsblob)
|
|
1099
|
replyhandlers = []
|
|
1100
|
replyhandlers = []
|
|
1100
|
for partgenname in b2partsgenorder:
|
|
1101
|
for partgenname in b2partsgenorder:
|
|
1101
|
partgen = b2partsgenmapping[partgenname]
|
|
1102
|
partgen = b2partsgenmapping[partgenname]
|
|
1102
|
ret = partgen(pushop, bundler)
|
|
1103
|
ret = partgen(pushop, bundler)
|
|
1103
|
if callable(ret):
|
|
1104
|
if callable(ret):
|
|
1104
|
replyhandlers.append(ret)
|
|
1105
|
replyhandlers.append(ret)
|
|
1105
|
# do not push if nothing to push
|
|
1106
|
# do not push if nothing to push
|
|
1106
|
if bundler.nbparts <= 1:
|
|
1107
|
if bundler.nbparts <= 1:
|
|
1107
|
return
|
|
1108
|
return
|
|
1108
|
stream = util.chunkbuffer(bundler.getchunks())
|
|
1109
|
stream = util.chunkbuffer(bundler.getchunks())
|
|
1109
|
try:
|
|
1110
|
try:
|
|
1110
|
try:
|
|
1111
|
try:
|
|
1111
|
with pushop.remote.commandexecutor() as e:
|
|
1112
|
with pushop.remote.commandexecutor() as e:
|
|
1112
|
reply = e.callcommand('unbundle', {
|
|
1113
|
reply = e.callcommand('unbundle', {
|
|
1113
|
'bundle': stream,
|
|
1114
|
'bundle': stream,
|
|
1114
|
'heads': ['force'],
|
|
1115
|
'heads': ['force'],
|
|
1115
|
'url': pushop.remote.url(),
|
|
1116
|
'url': pushop.remote.url(),
|
|
1116
|
}).result()
|
|
1117
|
}).result()
|
|
1117
|
except error.BundleValueError as exc:
|
|
1118
|
except error.BundleValueError as exc:
|
|
1118
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1119
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1119
|
try:
|
|
1120
|
try:
|
|
1120
|
trgetter = None
|
|
1121
|
trgetter = None
|
|
1121
|
if pushback:
|
|
1122
|
if pushback:
|
|
1122
|
trgetter = pushop.trmanager.transaction
|
|
1123
|
trgetter = pushop.trmanager.transaction
|
|
1123
|
op = bundle2.processbundle(pushop.repo, reply, trgetter)
|
|
1124
|
op = bundle2.processbundle(pushop.repo, reply, trgetter)
|
|
1124
|
except error.BundleValueError as exc:
|
|
1125
|
except error.BundleValueError as exc:
|
|
1125
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1126
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1126
|
except bundle2.AbortFromPart as exc:
|
|
1127
|
except bundle2.AbortFromPart as exc:
|
|
1127
|
pushop.ui.status(_('remote: %s\n') % exc)
|
|
1128
|
pushop.ui.status(_('remote: %s\n') % exc)
|
|
1128
|
if exc.hint is not None:
|
|
1129
|
if exc.hint is not None:
|
|
1129
|
pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
|
|
1130
|
pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
|
|
1130
|
raise error.Abort(_('push failed on remote'))
|
|
1131
|
raise error.Abort(_('push failed on remote'))
|
|
1131
|
except error.PushkeyFailed as exc:
|
|
1132
|
except error.PushkeyFailed as exc:
|
|
1132
|
partid = int(exc.partid)
|
|
1133
|
partid = int(exc.partid)
|
|
1133
|
if partid not in pushop.pkfailcb:
|
|
1134
|
if partid not in pushop.pkfailcb:
|
|
1134
|
raise
|
|
1135
|
raise
|
|
1135
|
pushop.pkfailcb[partid](pushop, exc)
|
|
1136
|
pushop.pkfailcb[partid](pushop, exc)
|
|
1136
|
for rephand in replyhandlers:
|
|
1137
|
for rephand in replyhandlers:
|
|
1137
|
rephand(op)
|
|
1138
|
rephand(op)
|
|
1138
|
|
|
1139
|
|
|
1139
|
def _pushchangeset(pushop):
|
|
1140
|
def _pushchangeset(pushop):
|
|
1140
|
"""Make the actual push of changeset bundle to remote repo"""
|
|
1141
|
"""Make the actual push of changeset bundle to remote repo"""
|
|
1141
|
if 'changesets' in pushop.stepsdone:
|
|
1142
|
if 'changesets' in pushop.stepsdone:
|
|
1142
|
return
|
|
1143
|
return
|
|
1143
|
pushop.stepsdone.add('changesets')
|
|
1144
|
pushop.stepsdone.add('changesets')
|
|
1144
|
if not _pushcheckoutgoing(pushop):
|
|
1145
|
if not _pushcheckoutgoing(pushop):
|
|
1145
|
return
|
|
1146
|
return
|
|
1146
|
|
|
1147
|
|
|
1147
|
# Should have verified this in push().
|
|
1148
|
# Should have verified this in push().
|
|
1148
|
assert pushop.remote.capable('unbundle')
|
|
1149
|
assert pushop.remote.capable('unbundle')
|
|
1149
|
|
|
1150
|
|
|
1150
|
pushop.repo.prepushoutgoinghooks(pushop)
|
|
1151
|
pushop.repo.prepushoutgoinghooks(pushop)
|
|
1151
|
outgoing = pushop.outgoing
|
|
1152
|
outgoing = pushop.outgoing
|
|
1152
|
# TODO: get bundlecaps from remote
|
|
1153
|
# TODO: get bundlecaps from remote
|
|
1153
|
bundlecaps = None
|
|
1154
|
bundlecaps = None
|
|
1154
|
# create a changegroup from local
|
|
1155
|
# create a changegroup from local
|
|
1155
|
if pushop.revs is None and not (outgoing.excluded
|
|
1156
|
if pushop.revs is None and not (outgoing.excluded
|
|
1156
|
or pushop.repo.changelog.filteredrevs):
|
|
1157
|
or pushop.repo.changelog.filteredrevs):
|
|
1157
|
# push everything,
|
|
1158
|
# push everything,
|
|
1158
|
# use the fast path, no race possible on push
|
|
1159
|
# use the fast path, no race possible on push
|
|
1159
|
cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
|
|
1160
|
cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
|
|
1160
|
fastpath=True, bundlecaps=bundlecaps)
|
|
1161
|
fastpath=True, bundlecaps=bundlecaps)
|
|
1161
|
else:
|
|
1162
|
else:
|
|
1162
|
cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
|
|
1163
|
cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
|
|
1163
|
'push', bundlecaps=bundlecaps)
|
|
1164
|
'push', bundlecaps=bundlecaps)
|
|
1164
|
|
|
1165
|
|
|
1165
|
# apply changegroup to remote
|
|
1166
|
# apply changegroup to remote
|
|
1166
|
# local repo finds heads on server, finds out what
|
|
1167
|
# local repo finds heads on server, finds out what
|
|
1167
|
# revs it must push. once revs transferred, if server
|
|
1168
|
# revs it must push. once revs transferred, if server
|
|
1168
|
# finds it has different heads (someone else won
|
|
1169
|
# finds it has different heads (someone else won
|
|
1169
|
# commit/push race), server aborts.
|
|
1170
|
# commit/push race), server aborts.
|
|
1170
|
if pushop.force:
|
|
1171
|
if pushop.force:
|
|
1171
|
remoteheads = ['force']
|
|
1172
|
remoteheads = ['force']
|
|
1172
|
else:
|
|
1173
|
else:
|
|
1173
|
remoteheads = pushop.remoteheads
|
|
1174
|
remoteheads = pushop.remoteheads
|
|
1174
|
# ssh: return remote's addchangegroup()
|
|
1175
|
# ssh: return remote's addchangegroup()
|
|
1175
|
# http: return remote's addchangegroup() or 0 for error
|
|
1176
|
# http: return remote's addchangegroup() or 0 for error
|
|
1176
|
pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
|
|
1177
|
pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
|
|
1177
|
pushop.repo.url())
|
|
1178
|
pushop.repo.url())
|
|
1178
|
|
|
1179
|
|
|
1179
|
def _pushsyncphase(pushop):
|
|
1180
|
def _pushsyncphase(pushop):
|
|
1180
|
"""synchronise phase information locally and remotely"""
|
|
1181
|
"""synchronise phase information locally and remotely"""
|
|
1181
|
cheads = pushop.commonheads
|
|
1182
|
cheads = pushop.commonheads
|
|
1182
|
# even when we don't push, exchanging phase data is useful
|
|
1183
|
# even when we don't push, exchanging phase data is useful
|
|
1183
|
remotephases = listkeys(pushop.remote, 'phases')
|
|
1184
|
remotephases = listkeys(pushop.remote, 'phases')
|
|
1184
|
if (pushop.ui.configbool('ui', '_usedassubrepo')
|
|
1185
|
if (pushop.ui.configbool('ui', '_usedassubrepo')
|
|
1185
|
and remotephases # server supports phases
|
|
1186
|
and remotephases # server supports phases
|
|
1186
|
and pushop.cgresult is None # nothing was pushed
|
|
1187
|
and pushop.cgresult is None # nothing was pushed
|
|
1187
|
and remotephases.get('publishing', False)):
|
|
1188
|
and remotephases.get('publishing', False)):
|
|
1188
|
# When:
|
|
1189
|
# When:
|
|
1189
|
# - this is a subrepo push
|
|
1190
|
# - this is a subrepo push
|
|
1190
|
# - and remote support phase
|
|
1191
|
# - and remote support phase
|
|
1191
|
# - and no changeset was pushed
|
|
1192
|
# - and no changeset was pushed
|
|
1192
|
# - and remote is publishing
|
|
1193
|
# - and remote is publishing
|
|
1193
|
# We may be in issue 3871 case!
|
|
1194
|
# We may be in issue 3871 case!
|
|
1194
|
# We drop the possible phase synchronisation done by
|
|
1195
|
# We drop the possible phase synchronisation done by
|
|
1195
|
# courtesy to publish changesets possibly locally draft
|
|
1196
|
# courtesy to publish changesets possibly locally draft
|
|
1196
|
# on the remote.
|
|
1197
|
# on the remote.
|
|
1197
|
remotephases = {'publishing': 'True'}
|
|
1198
|
remotephases = {'publishing': 'True'}
|
|
1198
|
if not remotephases: # old server or public only reply from non-publishing
|
|
1199
|
if not remotephases: # old server or public only reply from non-publishing
|
|
1199
|
_localphasemove(pushop, cheads)
|
|
1200
|
_localphasemove(pushop, cheads)
|
|
1200
|
# don't push any phase data as there is nothing to push
|
|
1201
|
# don't push any phase data as there is nothing to push
|
|
1201
|
else:
|
|
1202
|
else:
|
|
1202
|
ana = phases.analyzeremotephases(pushop.repo, cheads,
|
|
1203
|
ana = phases.analyzeremotephases(pushop.repo, cheads,
|
|
1203
|
remotephases)
|
|
1204
|
remotephases)
|
|
1204
|
pheads, droots = ana
|
|
1205
|
pheads, droots = ana
|
|
1205
|
### Apply remote phase on local
|
|
1206
|
### Apply remote phase on local
|
|
1206
|
if remotephases.get('publishing', False):
|
|
1207
|
if remotephases.get('publishing', False):
|
|
1207
|
_localphasemove(pushop, cheads)
|
|
1208
|
_localphasemove(pushop, cheads)
|
|
1208
|
else: # publish = False
|
|
1209
|
else: # publish = False
|
|
1209
|
_localphasemove(pushop, pheads)
|
|
1210
|
_localphasemove(pushop, pheads)
|
|
1210
|
_localphasemove(pushop, cheads, phases.draft)
|
|
1211
|
_localphasemove(pushop, cheads, phases.draft)
|
|
1211
|
### Apply local phase on remote
|
|
1212
|
### Apply local phase on remote
|
|
1212
|
|
|
1213
|
|
|
1213
|
if pushop.cgresult:
|
|
1214
|
if pushop.cgresult:
|
|
1214
|
if 'phases' in pushop.stepsdone:
|
|
1215
|
if 'phases' in pushop.stepsdone:
|
|
1215
|
# phases already pushed though bundle2
|
|
1216
|
# phases already pushed though bundle2
|
|
1216
|
return
|
|
1217
|
return
|
|
1217
|
outdated = pushop.outdatedphases
|
|
1218
|
outdated = pushop.outdatedphases
|
|
1218
|
else:
|
|
1219
|
else:
|
|
1219
|
outdated = pushop.fallbackoutdatedphases
|
|
1220
|
outdated = pushop.fallbackoutdatedphases
|
|
1220
|
|
|
1221
|
|
|
1221
|
pushop.stepsdone.add('phases')
|
|
1222
|
pushop.stepsdone.add('phases')
|
|
1222
|
|
|
1223
|
|
|
1223
|
# filter heads already turned public by the push
|
|
1224
|
# filter heads already turned public by the push
|
|
1224
|
outdated = [c for c in outdated if c.node() not in pheads]
|
|
1225
|
outdated = [c for c in outdated if c.node() not in pheads]
|
|
1225
|
# fallback to independent pushkey command
|
|
1226
|
# fallback to independent pushkey command
|
|
1226
|
for newremotehead in outdated:
|
|
1227
|
for newremotehead in outdated:
|
|
1227
|
with pushop.remote.commandexecutor() as e:
|
|
1228
|
with pushop.remote.commandexecutor() as e:
|
|
1228
|
r = e.callcommand('pushkey', {
|
|
1229
|
r = e.callcommand('pushkey', {
|
|
1229
|
'namespace': 'phases',
|
|
1230
|
'namespace': 'phases',
|
|
1230
|
'key': newremotehead.hex(),
|
|
1231
|
'key': newremotehead.hex(),
|
|
1231
|
'old': '%d' % phases.draft,
|
|
1232
|
'old': '%d' % phases.draft,
|
|
1232
|
'new': '%d' % phases.public
|
|
1233
|
'new': '%d' % phases.public
|
|
1233
|
}).result()
|
|
1234
|
}).result()
|
|
1234
|
|
|
1235
|
|
|
1235
|
if not r:
|
|
1236
|
if not r:
|
|
1236
|
pushop.ui.warn(_('updating %s to public failed!\n')
|
|
1237
|
pushop.ui.warn(_('updating %s to public failed!\n')
|
|
1237
|
% newremotehead)
|
|
1238
|
% newremotehead)
|
|
1238
|
|
|
1239
|
|
|
1239
|
def _localphasemove(pushop, nodes, phase=phases.public):
|
|
1240
|
def _localphasemove(pushop, nodes, phase=phases.public):
|
|
1240
|
"""move <nodes> to <phase> in the local source repo"""
|
|
1241
|
"""move <nodes> to <phase> in the local source repo"""
|
|
1241
|
if pushop.trmanager:
|
|
1242
|
if pushop.trmanager:
|
|
1242
|
phases.advanceboundary(pushop.repo,
|
|
1243
|
phases.advanceboundary(pushop.repo,
|
|
1243
|
pushop.trmanager.transaction(),
|
|
1244
|
pushop.trmanager.transaction(),
|
|
1244
|
phase,
|
|
1245
|
phase,
|
|
1245
|
nodes)
|
|
1246
|
nodes)
|
|
1246
|
else:
|
|
1247
|
else:
|
|
1247
|
# repo is not locked, do not change any phases!
|
|
1248
|
# repo is not locked, do not change any phases!
|
|
1248
|
# Informs the user that phases should have been moved when
|
|
1249
|
# Informs the user that phases should have been moved when
|
|
1249
|
# applicable.
|
|
1250
|
# applicable.
|
|
1250
|
actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
|
|
1251
|
actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
|
|
1251
|
phasestr = phases.phasenames[phase]
|
|
1252
|
phasestr = phases.phasenames[phase]
|
|
1252
|
if actualmoves:
|
|
1253
|
if actualmoves:
|
|
1253
|
pushop.ui.status(_('cannot lock source repo, skipping '
|
|
1254
|
pushop.ui.status(_('cannot lock source repo, skipping '
|
|
1254
|
'local %s phase update\n') % phasestr)
|
|
1255
|
'local %s phase update\n') % phasestr)
|
|
1255
|
|
|
1256
|
|
|
1256
|
def _pushobsolete(pushop):
|
|
1257
|
def _pushobsolete(pushop):
|
|
1257
|
"""utility function to push obsolete markers to a remote"""
|
|
1258
|
"""utility function to push obsolete markers to a remote"""
|
|
1258
|
if 'obsmarkers' in pushop.stepsdone:
|
|
1259
|
if 'obsmarkers' in pushop.stepsdone:
|
|
1259
|
return
|
|
1260
|
return
|
|
1260
|
repo = pushop.repo
|
|
1261
|
repo = pushop.repo
|
|
1261
|
remote = pushop.remote
|
|
1262
|
remote = pushop.remote
|
|
1262
|
pushop.stepsdone.add('obsmarkers')
|
|
1263
|
pushop.stepsdone.add('obsmarkers')
|
|
1263
|
if pushop.outobsmarkers:
|
|
1264
|
if pushop.outobsmarkers:
|
|
1264
|
pushop.ui.debug('try to push obsolete markers to remote\n')
|
|
1265
|
pushop.ui.debug('try to push obsolete markers to remote\n')
|
|
1265
|
rslts = []
|
|
1266
|
rslts = []
|
|
1266
|
remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
|
|
1267
|
remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
|
|
1267
|
for key in sorted(remotedata, reverse=True):
|
|
1268
|
for key in sorted(remotedata, reverse=True):
|
|
1268
|
# reverse sort to ensure we end with dump0
|
|
1269
|
# reverse sort to ensure we end with dump0
|
|
1269
|
data = remotedata[key]
|
|
1270
|
data = remotedata[key]
|
|
1270
|
rslts.append(remote.pushkey('obsolete', key, '', data))
|
|
1271
|
rslts.append(remote.pushkey('obsolete', key, '', data))
|
|
1271
|
if [r for r in rslts if not r]:
|
|
1272
|
if [r for r in rslts if not r]:
|
|
1272
|
msg = _('failed to push some obsolete markers!\n')
|
|
1273
|
msg = _('failed to push some obsolete markers!\n')
|
|
1273
|
repo.ui.warn(msg)
|
|
1274
|
repo.ui.warn(msg)
|
|
1274
|
|
|
1275
|
|
|
1275
|
def _pushbookmark(pushop):
|
|
1276
|
def _pushbookmark(pushop):
|
|
1276
|
"""Update bookmark position on remote"""
|
|
1277
|
"""Update bookmark position on remote"""
|
|
1277
|
if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
|
|
1278
|
if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
|
|
1278
|
return
|
|
1279
|
return
|
|
1279
|
pushop.stepsdone.add('bookmarks')
|
|
1280
|
pushop.stepsdone.add('bookmarks')
|
|
1280
|
ui = pushop.ui
|
|
1281
|
ui = pushop.ui
|
|
1281
|
remote = pushop.remote
|
|
1282
|
remote = pushop.remote
|
|
1282
|
|
|
1283
|
|
|
1283
|
for b, old, new in pushop.outbookmarks:
|
|
1284
|
for b, old, new in pushop.outbookmarks:
|
|
1284
|
action = 'update'
|
|
1285
|
action = 'update'
|
|
1285
|
if not old:
|
|
1286
|
if not old:
|
|
1286
|
action = 'export'
|
|
1287
|
action = 'export'
|
|
1287
|
elif not new:
|
|
1288
|
elif not new:
|
|
1288
|
action = 'delete'
|
|
1289
|
action = 'delete'
|
|
1289
|
|
|
1290
|
|
|
1290
|
with remote.commandexecutor() as e:
|
|
1291
|
with remote.commandexecutor() as e:
|
|
1291
|
r = e.callcommand('pushkey', {
|
|
1292
|
r = e.callcommand('pushkey', {
|
|
1292
|
'namespace': 'bookmarks',
|
|
1293
|
'namespace': 'bookmarks',
|
|
1293
|
'key': b,
|
|
1294
|
'key': b,
|
|
1294
|
'old': old,
|
|
1295
|
'old': old,
|
|
1295
|
'new': new,
|
|
1296
|
'new': new,
|
|
1296
|
}).result()
|
|
1297
|
}).result()
|
|
1297
|
|
|
1298
|
|
|
1298
|
if r:
|
|
1299
|
if r:
|
|
1299
|
ui.status(bookmsgmap[action][0] % b)
|
|
1300
|
ui.status(bookmsgmap[action][0] % b)
|
|
1300
|
else:
|
|
1301
|
else:
|
|
1301
|
ui.warn(bookmsgmap[action][1] % b)
|
|
1302
|
ui.warn(bookmsgmap[action][1] % b)
|
|
1302
|
# discovery can have set the value form invalid entry
|
|
1303
|
# discovery can have set the value form invalid entry
|
|
1303
|
if pushop.bkresult is not None:
|
|
1304
|
if pushop.bkresult is not None:
|
|
1304
|
pushop.bkresult = 1
|
|
1305
|
pushop.bkresult = 1
|
|
1305
|
|
|
1306
|
|
|
1306
|
class pulloperation(object):
|
|
1307
|
class pulloperation(object):
|
|
1307
|
"""A object that represent a single pull operation
|
|
1308
|
"""A object that represent a single pull operation
|
|
1308
|
|
|
1309
|
|
|
1309
|
It purpose is to carry pull related state and very common operation.
|
|
1310
|
It purpose is to carry pull related state and very common operation.
|
|
1310
|
|
|
1311
|
|
|
1311
|
A new should be created at the beginning of each pull and discarded
|
|
1312
|
A new should be created at the beginning of each pull and discarded
|
|
1312
|
afterward.
|
|
1313
|
afterward.
|
|
1313
|
"""
|
|
1314
|
"""
|
|
1314
|
|
|
1315
|
|
|
1315
|
def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
|
|
1316
|
def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
|
|
1316
|
remotebookmarks=None, streamclonerequested=None,
|
|
1317
|
remotebookmarks=None, streamclonerequested=None,
|
|
1317
|
includepats=None, excludepats=None):
|
|
1318
|
includepats=None, excludepats=None):
|
|
1318
|
# repo we pull into
|
|
1319
|
# repo we pull into
|
|
1319
|
self.repo = repo
|
|
1320
|
self.repo = repo
|
|
1320
|
# repo we pull from
|
|
1321
|
# repo we pull from
|
|
1321
|
self.remote = remote
|
|
1322
|
self.remote = remote
|
|
1322
|
# revision we try to pull (None is "all")
|
|
1323
|
# revision we try to pull (None is "all")
|
|
1323
|
self.heads = heads
|
|
1324
|
self.heads = heads
|
|
1324
|
# bookmark pulled explicitly
|
|
1325
|
# bookmark pulled explicitly
|
|
1325
|
self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
|
|
1326
|
self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
|
|
1326
|
for bookmark in bookmarks]
|
|
1327
|
for bookmark in bookmarks]
|
|
1327
|
# do we force pull?
|
|
1328
|
# do we force pull?
|
|
1328
|
self.force = force
|
|
1329
|
self.force = force
|
|
1329
|
# whether a streaming clone was requested
|
|
1330
|
# whether a streaming clone was requested
|
|
1330
|
self.streamclonerequested = streamclonerequested
|
|
1331
|
self.streamclonerequested = streamclonerequested
|
|
1331
|
# transaction manager
|
|
1332
|
# transaction manager
|
|
1332
|
self.trmanager = None
|
|
1333
|
self.trmanager = None
|
|
1333
|
# set of common changeset between local and remote before pull
|
|
1334
|
# set of common changeset between local and remote before pull
|
|
1334
|
self.common = None
|
|
1335
|
self.common = None
|
|
1335
|
# set of pulled head
|
|
1336
|
# set of pulled head
|
|
1336
|
self.rheads = None
|
|
1337
|
self.rheads = None
|
|
1337
|
# list of missing changeset to fetch remotely
|
|
1338
|
# list of missing changeset to fetch remotely
|
|
1338
|
self.fetch = None
|
|
1339
|
self.fetch = None
|
|
1339
|
# remote bookmarks data
|
|
1340
|
# remote bookmarks data
|
|
1340
|
self.remotebookmarks = remotebookmarks
|
|
1341
|
self.remotebookmarks = remotebookmarks
|
|
1341
|
# result of changegroup pulling (used as return code by pull)
|
|
1342
|
# result of changegroup pulling (used as return code by pull)
|
|
1342
|
self.cgresult = None
|
|
1343
|
self.cgresult = None
|
|
1343
|
# list of step already done
|
|
1344
|
# list of step already done
|
|
1344
|
self.stepsdone = set()
|
|
1345
|
self.stepsdone = set()
|
|
1345
|
# Whether we attempted a clone from pre-generated bundles.
|
|
1346
|
# Whether we attempted a clone from pre-generated bundles.
|
|
1346
|
self.clonebundleattempted = False
|
|
1347
|
self.clonebundleattempted = False
|
|
1347
|
# Set of file patterns to include.
|
|
1348
|
# Set of file patterns to include.
|
|
1348
|
self.includepats = includepats
|
|
1349
|
self.includepats = includepats
|
|
1349
|
# Set of file patterns to exclude.
|
|
1350
|
# Set of file patterns to exclude.
|
|
1350
|
self.excludepats = excludepats
|
|
1351
|
self.excludepats = excludepats
|
|
1351
|
|
|
1352
|
|
|
1352
|
@util.propertycache
|
|
1353
|
@util.propertycache
|
|
1353
|
def pulledsubset(self):
|
|
1354
|
def pulledsubset(self):
|
|
1354
|
"""heads of the set of changeset target by the pull"""
|
|
1355
|
"""heads of the set of changeset target by the pull"""
|
|
1355
|
# compute target subset
|
|
1356
|
# compute target subset
|
|
1356
|
if self.heads is None:
|
|
1357
|
if self.heads is None:
|
|
1357
|
# We pulled every thing possible
|
|
1358
|
# We pulled every thing possible
|
|
1358
|
# sync on everything common
|
|
1359
|
# sync on everything common
|
|
1359
|
c = set(self.common)
|
|
1360
|
c = set(self.common)
|
|
1360
|
ret = list(self.common)
|
|
1361
|
ret = list(self.common)
|
|
1361
|
for n in self.rheads:
|
|
1362
|
for n in self.rheads:
|
|
1362
|
if n not in c:
|
|
1363
|
if n not in c:
|
|
1363
|
ret.append(n)
|
|
1364
|
ret.append(n)
|
|
1364
|
return ret
|
|
1365
|
return ret
|
|
1365
|
else:
|
|
1366
|
else:
|
|
1366
|
# We pulled a specific subset
|
|
1367
|
# We pulled a specific subset
|
|
1367
|
# sync on this subset
|
|
1368
|
# sync on this subset
|
|
1368
|
return self.heads
|
|
1369
|
return self.heads
|
|
1369
|
|
|
1370
|
|
|
1370
|
@util.propertycache
|
|
1371
|
@util.propertycache
|
|
1371
|
def canusebundle2(self):
|
|
1372
|
def canusebundle2(self):
|
|
1372
|
return not _forcebundle1(self)
|
|
1373
|
return not _forcebundle1(self)
|
|
1373
|
|
|
1374
|
|
|
1374
|
@util.propertycache
|
|
1375
|
@util.propertycache
|
|
1375
|
def remotebundle2caps(self):
|
|
1376
|
def remotebundle2caps(self):
|
|
1376
|
return bundle2.bundle2caps(self.remote)
|
|
1377
|
return bundle2.bundle2caps(self.remote)
|
|
1377
|
|
|
1378
|
|
|
1378
|
def gettransaction(self):
|
|
1379
|
def gettransaction(self):
|
|
1379
|
# deprecated; talk to trmanager directly
|
|
1380
|
# deprecated; talk to trmanager directly
|
|
1380
|
return self.trmanager.transaction()
|
|
1381
|
return self.trmanager.transaction()
|
|
1381
|
|
|
1382
|
|
|
1382
|
class transactionmanager(util.transactional):
|
|
1383
|
class transactionmanager(util.transactional):
|
|
1383
|
"""An object to manage the life cycle of a transaction
|
|
1384
|
"""An object to manage the life cycle of a transaction
|
|
1384
|
|
|
1385
|
|
|
1385
|
It creates the transaction on demand and calls the appropriate hooks when
|
|
1386
|
It creates the transaction on demand and calls the appropriate hooks when
|
|
1386
|
closing the transaction."""
|
|
1387
|
closing the transaction."""
|
|
1387
|
def __init__(self, repo, source, url):
|
|
1388
|
def __init__(self, repo, source, url):
|
|
1388
|
self.repo = repo
|
|
1389
|
self.repo = repo
|
|
1389
|
self.source = source
|
|
1390
|
self.source = source
|
|
1390
|
self.url = url
|
|
1391
|
self.url = url
|
|
1391
|
self._tr = None
|
|
1392
|
self._tr = None
|
|
1392
|
|
|
1393
|
|
|
1393
|
def transaction(self):
|
|
1394
|
def transaction(self):
|
|
1394
|
"""Return an open transaction object, constructing if necessary"""
|
|
1395
|
"""Return an open transaction object, constructing if necessary"""
|
|
1395
|
if not self._tr:
|
|
1396
|
if not self._tr:
|
|
1396
|
trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
|
|
1397
|
trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
|
|
1397
|
self._tr = self.repo.transaction(trname)
|
|
1398
|
self._tr = self.repo.transaction(trname)
|
|
1398
|
self._tr.hookargs['source'] = self.source
|
|
1399
|
self._tr.hookargs['source'] = self.source
|
|
1399
|
self._tr.hookargs['url'] = self.url
|
|
1400
|
self._tr.hookargs['url'] = self.url
|
|
1400
|
return self._tr
|
|
1401
|
return self._tr
|
|
1401
|
|
|
1402
|
|
|
1402
|
def close(self):
|
|
1403
|
def close(self):
|
|
1403
|
"""close transaction if created"""
|
|
1404
|
"""close transaction if created"""
|
|
1404
|
if self._tr is not None:
|
|
1405
|
if self._tr is not None:
|
|
1405
|
self._tr.close()
|
|
1406
|
self._tr.close()
|
|
1406
|
|
|
1407
|
|
|
1407
|
def release(self):
|
|
1408
|
def release(self):
|
|
1408
|
"""release transaction if created"""
|
|
1409
|
"""release transaction if created"""
|
|
1409
|
if self._tr is not None:
|
|
1410
|
if self._tr is not None:
|
|
1410
|
self._tr.release()
|
|
1411
|
self._tr.release()
|
|
1411
|
|
|
1412
|
|
|
1412
|
def listkeys(remote, namespace):
|
|
1413
|
def listkeys(remote, namespace):
|
|
1413
|
with remote.commandexecutor() as e:
|
|
1414
|
with remote.commandexecutor() as e:
|
|
1414
|
return e.callcommand('listkeys', {'namespace': namespace}).result()
|
|
1415
|
return e.callcommand('listkeys', {'namespace': namespace}).result()
|
|
1415
|
|
|
1416
|
|
|
1416
|
def _fullpullbundle2(repo, pullop):
|
|
1417
|
def _fullpullbundle2(repo, pullop):
|
|
1417
|
# The server may send a partial reply, i.e. when inlining
|
|
1418
|
# The server may send a partial reply, i.e. when inlining
|
|
1418
|
# pre-computed bundles. In that case, update the common
|
|
1419
|
# pre-computed bundles. In that case, update the common
|
|
1419
|
# set based on the results and pull another bundle.
|
|
1420
|
# set based on the results and pull another bundle.
|
|
1420
|
#
|
|
1421
|
#
|
|
1421
|
# There are two indicators that the process is finished:
|
|
1422
|
# There are two indicators that the process is finished:
|
|
1422
|
# - no changeset has been added, or
|
|
1423
|
# - no changeset has been added, or
|
|
1423
|
# - all remote heads are known locally.
|
|
1424
|
# - all remote heads are known locally.
|
|
1424
|
# The head check must use the unfiltered view as obsoletion
|
|
1425
|
# The head check must use the unfiltered view as obsoletion
|
|
1425
|
# markers can hide heads.
|
|
1426
|
# markers can hide heads.
|
|
1426
|
unfi = repo.unfiltered()
|
|
1427
|
unfi = repo.unfiltered()
|
|
1427
|
unficl = unfi.changelog
|
|
1428
|
unficl = unfi.changelog
|
|
1428
|
def headsofdiff(h1, h2):
|
|
1429
|
def headsofdiff(h1, h2):
|
|
1429
|
"""Returns heads(h1 % h2)"""
|
|
1430
|
"""Returns heads(h1 % h2)"""
|
|
1430
|
res = unfi.set('heads(%ln %% %ln)', h1, h2)
|
|
1431
|
res = unfi.set('heads(%ln %% %ln)', h1, h2)
|
|
1431
|
return set(ctx.node() for ctx in res)
|
|
1432
|
return set(ctx.node() for ctx in res)
|
|
1432
|
def headsofunion(h1, h2):
|
|
1433
|
def headsofunion(h1, h2):
|
|
1433
|
"""Returns heads((h1 + h2) - null)"""
|
|
1434
|
"""Returns heads((h1 + h2) - null)"""
|
|
1434
|
res = unfi.set('heads((%ln + %ln - null))', h1, h2)
|
|
1435
|
res = unfi.set('heads((%ln + %ln - null))', h1, h2)
|
|
1435
|
return set(ctx.node() for ctx in res)
|
|
1436
|
return set(ctx.node() for ctx in res)
|
|
1436
|
while True:
|
|
1437
|
while True:
|
|
1437
|
old_heads = unficl.heads()
|
|
1438
|
old_heads = unficl.heads()
|
|
1438
|
clstart = len(unficl)
|
|
1439
|
clstart = len(unficl)
|
|
1439
|
_pullbundle2(pullop)
|
|
1440
|
_pullbundle2(pullop)
|
|
1440
|
if repository.NARROW_REQUIREMENT in repo.requirements:
|
|
1441
|
if repository.NARROW_REQUIREMENT in repo.requirements:
|
|
1441
|
# XXX narrow clones filter the heads on the server side during
|
|
1442
|
# XXX narrow clones filter the heads on the server side during
|
|
1442
|
# XXX getbundle and result in partial replies as well.
|
|
1443
|
# XXX getbundle and result in partial replies as well.
|
|
1443
|
# XXX Disable pull bundles in this case as band aid to avoid
|
|
1444
|
# XXX Disable pull bundles in this case as band aid to avoid
|
|
1444
|
# XXX extra round trips.
|
|
1445
|
# XXX extra round trips.
|
|
1445
|
break
|
|
1446
|
break
|
|
1446
|
if clstart == len(unficl):
|
|
1447
|
if clstart == len(unficl):
|
|
1447
|
break
|
|
1448
|
break
|
|
1448
|
if all(unficl.hasnode(n) for n in pullop.rheads):
|
|
1449
|
if all(unficl.hasnode(n) for n in pullop.rheads):
|
|
1449
|
break
|
|
1450
|
break
|
|
1450
|
new_heads = headsofdiff(unficl.heads(), old_heads)
|
|
1451
|
new_heads = headsofdiff(unficl.heads(), old_heads)
|
|
1451
|
pullop.common = headsofunion(new_heads, pullop.common)
|
|
1452
|
pullop.common = headsofunion(new_heads, pullop.common)
|
|
1452
|
pullop.rheads = set(pullop.rheads) - pullop.common
|
|
1453
|
pullop.rheads = set(pullop.rheads) - pullop.common
|
|
1453
|
|
|
1454
|
|
|
1454
|
def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
|
|
1455
|
def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
|
|
1455
|
streamclonerequested=None, includepats=None, excludepats=None):
|
|
1456
|
streamclonerequested=None, includepats=None, excludepats=None):
|
|
1456
|
"""Fetch repository data from a remote.
|
|
1457
|
"""Fetch repository data from a remote.
|
|
1457
|
|
|
1458
|
|
|
1458
|
This is the main function used to retrieve data from a remote repository.
|
|
1459
|
This is the main function used to retrieve data from a remote repository.
|
|
1459
|
|
|
1460
|
|
|
1460
|
``repo`` is the local repository to clone into.
|
|
1461
|
``repo`` is the local repository to clone into.
|
|
1461
|
``remote`` is a peer instance.
|
|
1462
|
``remote`` is a peer instance.
|
|
1462
|
``heads`` is an iterable of revisions we want to pull. ``None`` (the
|
|
1463
|
``heads`` is an iterable of revisions we want to pull. ``None`` (the
|
|
1463
|
default) means to pull everything from the remote.
|
|
1464
|
default) means to pull everything from the remote.
|
|
1464
|
``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
|
|
1465
|
``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
|
|
1465
|
default, all remote bookmarks are pulled.
|
|
1466
|
default, all remote bookmarks are pulled.
|
|
1466
|
``opargs`` are additional keyword arguments to pass to ``pulloperation``
|
|
1467
|
``opargs`` are additional keyword arguments to pass to ``pulloperation``
|
|
1467
|
initialization.
|
|
1468
|
initialization.
|
|
1468
|
``streamclonerequested`` is a boolean indicating whether a "streaming
|
|
1469
|
``streamclonerequested`` is a boolean indicating whether a "streaming
|
|
1469
|
clone" is requested. A "streaming clone" is essentially a raw file copy
|
|
1470
|
clone" is requested. A "streaming clone" is essentially a raw file copy
|
|
1470
|
of revlogs from the server. This only works when the local repository is
|
|
1471
|
of revlogs from the server. This only works when the local repository is
|
|
1471
|
empty. The default value of ``None`` means to respect the server
|
|
1472
|
empty. The default value of ``None`` means to respect the server
|
|
1472
|
configuration for preferring stream clones.
|
|
1473
|
configuration for preferring stream clones.
|
|
1473
|
``includepats`` and ``excludepats`` define explicit file patterns to
|
|
1474
|
``includepats`` and ``excludepats`` define explicit file patterns to
|
|
1474
|
include and exclude in storage, respectively. If not defined, narrow
|
|
1475
|
include and exclude in storage, respectively. If not defined, narrow
|
|
1475
|
patterns from the repo instance are used, if available.
|
|
1476
|
patterns from the repo instance are used, if available.
|
|
1476
|
|
|
1477
|
|
|
1477
|
Returns the ``pulloperation`` created for this pull.
|
|
1478
|
Returns the ``pulloperation`` created for this pull.
|
|
1478
|
"""
|
|
1479
|
"""
|
|
1479
|
if opargs is None:
|
|
1480
|
if opargs is None:
|
|
1480
|
opargs = {}
|
|
1481
|
opargs = {}
|
|
1481
|
|
|
1482
|
|
|
1482
|
# We allow the narrow patterns to be passed in explicitly to provide more
|
|
1483
|
# We allow the narrow patterns to be passed in explicitly to provide more
|
|
1483
|
# flexibility for API consumers.
|
|
1484
|
# flexibility for API consumers.
|
|
1484
|
if includepats or excludepats:
|
|
1485
|
if includepats or excludepats:
|
|
1485
|
includepats = includepats or set()
|
|
1486
|
includepats = includepats or set()
|
|
1486
|
excludepats = excludepats or set()
|
|
1487
|
excludepats = excludepats or set()
|
|
1487
|
else:
|
|
1488
|
else:
|
|
1488
|
includepats, excludepats = repo.narrowpats
|
|
1489
|
includepats, excludepats = repo.narrowpats
|
|
1489
|
|
|
1490
|
|
|
1490
|
narrowspec.validatepatterns(includepats)
|
|
1491
|
narrowspec.validatepatterns(includepats)
|
|
1491
|
narrowspec.validatepatterns(excludepats)
|
|
1492
|
narrowspec.validatepatterns(excludepats)
|
|
1492
|
|
|
1493
|
|
|
1493
|
pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
|
|
1494
|
pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
|
|
1494
|
streamclonerequested=streamclonerequested,
|
|
1495
|
streamclonerequested=streamclonerequested,
|
|
1495
|
includepats=includepats, excludepats=excludepats,
|
|
1496
|
includepats=includepats, excludepats=excludepats,
|
|
1496
|
**pycompat.strkwargs(opargs))
|
|
1497
|
**pycompat.strkwargs(opargs))
|
|
1497
|
|
|
1498
|
|
|
1498
|
peerlocal = pullop.remote.local()
|
|
1499
|
peerlocal = pullop.remote.local()
|
|
1499
|
if peerlocal:
|
|
1500
|
if peerlocal:
|
|
1500
|
missing = set(peerlocal.requirements) - pullop.repo.supported
|
|
1501
|
missing = set(peerlocal.requirements) - pullop.repo.supported
|
|
1501
|
if missing:
|
|
1502
|
if missing:
|
|
1502
|
msg = _("required features are not"
|
|
1503
|
msg = _("required features are not"
|
|
1503
|
" supported in the destination:"
|
|
1504
|
" supported in the destination:"
|
|
1504
|
" %s") % (', '.join(sorted(missing)))
|
|
1505
|
" %s") % (', '.join(sorted(missing)))
|
|
1505
|
raise error.Abort(msg)
|
|
1506
|
raise error.Abort(msg)
|
|
1506
|
|
|
1507
|
|
|
1507
|
pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
|
|
1508
|
pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
|
|
1508
|
with repo.wlock(), repo.lock(), pullop.trmanager:
|
|
1509
|
with repo.wlock(), repo.lock(), pullop.trmanager:
|
|
1509
|
# This should ideally be in _pullbundle2(). However, it needs to run
|
|
1510
|
# Use the modern wire protocol, if available.
|
|
1510
|
# before discovery to avoid extra work.
|
|
1511
|
if remote.capable('exchangev2'):
|
|
1511
|
_maybeapplyclonebundle(pullop)
|
|
1512
|
exchangev2.pull(pullop)
|
|
1512
|
streamclone.maybeperformlegacystreamclone(pullop)
|
|
1513
|
else:
|
|
1513
|
_pulldiscovery(pullop)
|
|
1514
|
# This should ideally be in _pullbundle2(). However, it needs to run
|
|
1514
|
if pullop.canusebundle2:
|
|
1515
|
# before discovery to avoid extra work.
|
|
1515
|
_fullpullbundle2(repo, pullop)
|
|
1516
|
_maybeapplyclonebundle(pullop)
|
|
1516
|
_pullchangeset(pullop)
|
|
1517
|
streamclone.maybeperformlegacystreamclone(pullop)
|
|
1517
|
_pullphase(pullop)
|
|
1518
|
_pulldiscovery(pullop)
|
|
1518
|
_pullbookmarks(pullop)
|
|
1519
|
if pullop.canusebundle2:
|
|
1519
|
_pullobsolete(pullop)
|
|
1520
|
_fullpullbundle2(repo, pullop)
|
|
|
|
|
1521
|
_pullchangeset(pullop)
|
|
|
|
|
1522
|
_pullphase(pullop)
|
|
|
|
|
1523
|
_pullbookmarks(pullop)
|
|
|
|
|
1524
|
_pullobsolete(pullop)
|
|
1520
|
|
|
1525
|
|
|
1521
|
# storing remotenames
|
|
1526
|
# storing remotenames
|
|
1522
|
if repo.ui.configbool('experimental', 'remotenames'):
|
|
1527
|
if repo.ui.configbool('experimental', 'remotenames'):
|
|
1523
|
logexchange.pullremotenames(repo, remote)
|
|
1528
|
logexchange.pullremotenames(repo, remote)
|
|
1524
|
|
|
1529
|
|
|
1525
|
return pullop
|
|
1530
|
return pullop
|
|
1526
|
|
|
1531
|
|
|
1527
|
# list of steps to perform discovery before pull
|
|
1532
|
# list of steps to perform discovery before pull
|
|
1528
|
pulldiscoveryorder = []
|
|
1533
|
pulldiscoveryorder = []
|
|
1529
|
|
|
1534
|
|
|
1530
|
# Mapping between step name and function
|
|
1535
|
# Mapping between step name and function
|
|
1531
|
#
|
|
1536
|
#
|
|
1532
|
# This exists to help extensions wrap steps if necessary
|
|
1537
|
# This exists to help extensions wrap steps if necessary
|
|
1533
|
pulldiscoverymapping = {}
|
|
1538
|
pulldiscoverymapping = {}
|
|
1534
|
|
|
1539
|
|
|
1535
|
def pulldiscovery(stepname):
|
|
1540
|
def pulldiscovery(stepname):
|
|
1536
|
"""decorator for function performing discovery before pull
|
|
1541
|
"""decorator for function performing discovery before pull
|
|
1537
|
|
|
1542
|
|
|
1538
|
The function is added to the step -> function mapping and appended to the
|
|
1543
|
The function is added to the step -> function mapping and appended to the
|
|
1539
|
list of steps. Beware that decorated function will be added in order (this
|
|
1544
|
list of steps. Beware that decorated function will be added in order (this
|
|
1540
|
may matter).
|
|
1545
|
may matter).
|
|
1541
|
|
|
1546
|
|
|
1542
|
You can only use this decorator for a new step, if you want to wrap a step
|
|
1547
|
You can only use this decorator for a new step, if you want to wrap a step
|
|
1543
|
from an extension, change the pulldiscovery dictionary directly."""
|
|
1548
|
from an extension, change the pulldiscovery dictionary directly."""
|
|
1544
|
def dec(func):
|
|
1549
|
def dec(func):
|
|
1545
|
assert stepname not in pulldiscoverymapping
|
|
1550
|
assert stepname not in pulldiscoverymapping
|
|
1546
|
pulldiscoverymapping[stepname] = func
|
|
1551
|
pulldiscoverymapping[stepname] = func
|
|
1547
|
pulldiscoveryorder.append(stepname)
|
|
1552
|
pulldiscoveryorder.append(stepname)
|
|
1548
|
return func
|
|
1553
|
return func
|
|
1549
|
return dec
|
|
1554
|
return dec
|
|
1550
|
|
|
1555
|
|
|
1551
|
def _pulldiscovery(pullop):
|
|
1556
|
def _pulldiscovery(pullop):
|
|
1552
|
"""Run all discovery steps"""
|
|
1557
|
"""Run all discovery steps"""
|
|
1553
|
for stepname in pulldiscoveryorder:
|
|
1558
|
for stepname in pulldiscoveryorder:
|
|
1554
|
step = pulldiscoverymapping[stepname]
|
|
1559
|
step = pulldiscoverymapping[stepname]
|
|
1555
|
step(pullop)
|
|
1560
|
step(pullop)
|
|
1556
|
|
|
1561
|
|
|
1557
|
@pulldiscovery('b1:bookmarks')
|
|
1562
|
@pulldiscovery('b1:bookmarks')
|
|
1558
|
def _pullbookmarkbundle1(pullop):
|
|
1563
|
def _pullbookmarkbundle1(pullop):
|
|
1559
|
"""fetch bookmark data in bundle1 case
|
|
1564
|
"""fetch bookmark data in bundle1 case
|
|
1560
|
|
|
1565
|
|
|
1561
|
If not using bundle2, we have to fetch bookmarks before changeset
|
|
1566
|
If not using bundle2, we have to fetch bookmarks before changeset
|
|
1562
|
discovery to reduce the chance and impact of race conditions."""
|
|
1567
|
discovery to reduce the chance and impact of race conditions."""
|
|
1563
|
if pullop.remotebookmarks is not None:
|
|
1568
|
if pullop.remotebookmarks is not None:
|
|
1564
|
return
|
|
1569
|
return
|
|
1565
|
if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
|
|
1570
|
if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
|
|
1566
|
# all known bundle2 servers now support listkeys, but lets be nice with
|
|
1571
|
# all known bundle2 servers now support listkeys, but lets be nice with
|
|
1567
|
# new implementation.
|
|
1572
|
# new implementation.
|
|
1568
|
return
|
|
1573
|
return
|
|
1569
|
books = listkeys(pullop.remote, 'bookmarks')
|
|
1574
|
books = listkeys(pullop.remote, 'bookmarks')
|
|
1570
|
pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
|
|
1575
|
pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
|
|
1571
|
|
|
1576
|
|
|
1572
|
|
|
1577
|
|
|
1573
|
@pulldiscovery('changegroup')
|
|
1578
|
@pulldiscovery('changegroup')
|
|
1574
|
def _pulldiscoverychangegroup(pullop):
|
|
1579
|
def _pulldiscoverychangegroup(pullop):
|
|
1575
|
"""discovery phase for the pull
|
|
1580
|
"""discovery phase for the pull
|
|
1576
|
|
|
1581
|
|
|
1577
|
Current handle changeset discovery only, will change handle all discovery
|
|
1582
|
Current handle changeset discovery only, will change handle all discovery
|
|
1578
|
at some point."""
|
|
1583
|
at some point."""
|
|
1579
|
tmp = discovery.findcommonincoming(pullop.repo,
|
|
1584
|
tmp = discovery.findcommonincoming(pullop.repo,
|
|
1580
|
pullop.remote,
|
|
1585
|
pullop.remote,
|
|
1581
|
heads=pullop.heads,
|
|
1586
|
heads=pullop.heads,
|
|
1582
|
force=pullop.force)
|
|
1587
|
force=pullop.force)
|
|
1583
|
common, fetch, rheads = tmp
|
|
1588
|
common, fetch, rheads = tmp
|
|
1584
|
nm = pullop.repo.unfiltered().changelog.nodemap
|
|
1589
|
nm = pullop.repo.unfiltered().changelog.nodemap
|
|
1585
|
if fetch and rheads:
|
|
1590
|
if fetch and rheads:
|
|
1586
|
# If a remote heads is filtered locally, put in back in common.
|
|
1591
|
# If a remote heads is filtered locally, put in back in common.
|
|
1587
|
#
|
|
1592
|
#
|
|
1588
|
# This is a hackish solution to catch most of "common but locally
|
|
1593
|
# This is a hackish solution to catch most of "common but locally
|
|
1589
|
# hidden situation". We do not performs discovery on unfiltered
|
|
1594
|
# hidden situation". We do not performs discovery on unfiltered
|
|
1590
|
# repository because it end up doing a pathological amount of round
|
|
1595
|
# repository because it end up doing a pathological amount of round
|
|
1591
|
# trip for w huge amount of changeset we do not care about.
|
|
1596
|
# trip for w huge amount of changeset we do not care about.
|
|
1592
|
#
|
|
1597
|
#
|
|
1593
|
# If a set of such "common but filtered" changeset exist on the server
|
|
1598
|
# If a set of such "common but filtered" changeset exist on the server
|
|
1594
|
# but are not including a remote heads, we'll not be able to detect it,
|
|
1599
|
# but are not including a remote heads, we'll not be able to detect it,
|
|
1595
|
scommon = set(common)
|
|
1600
|
scommon = set(common)
|
|
1596
|
for n in rheads:
|
|
1601
|
for n in rheads:
|
|
1597
|
if n in nm:
|
|
1602
|
if n in nm:
|
|
1598
|
if n not in scommon:
|
|
1603
|
if n not in scommon:
|
|
1599
|
common.append(n)
|
|
1604
|
common.append(n)
|
|
1600
|
if set(rheads).issubset(set(common)):
|
|
1605
|
if set(rheads).issubset(set(common)):
|
|
1601
|
fetch = []
|
|
1606
|
fetch = []
|
|
1602
|
pullop.common = common
|
|
1607
|
pullop.common = common
|
|
1603
|
pullop.fetch = fetch
|
|
1608
|
pullop.fetch = fetch
|
|
1604
|
pullop.rheads = rheads
|
|
1609
|
pullop.rheads = rheads
|
|
1605
|
|
|
1610
|
|
|
1606
|
def _pullbundle2(pullop):
|
|
1611
|
def _pullbundle2(pullop):
|
|
1607
|
"""pull data using bundle2
|
|
1612
|
"""pull data using bundle2
|
|
1608
|
|
|
1613
|
|
|
1609
|
For now, the only supported data are changegroup."""
|
|
1614
|
For now, the only supported data are changegroup."""
|
|
1610
|
kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
|
|
1615
|
kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
|
|
1611
|
|
|
1616
|
|
|
1612
|
# make ui easier to access
|
|
1617
|
# make ui easier to access
|
|
1613
|
ui = pullop.repo.ui
|
|
1618
|
ui = pullop.repo.ui
|
|
1614
|
|
|
1619
|
|
|
1615
|
# At the moment we don't do stream clones over bundle2. If that is
|
|
1620
|
# At the moment we don't do stream clones over bundle2. If that is
|
|
1616
|
# implemented then here's where the check for that will go.
|
|
1621
|
# implemented then here's where the check for that will go.
|
|
1617
|
streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
|
|
1622
|
streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
|
|
1618
|
|
|
1623
|
|
|
1619
|
# declare pull perimeters
|
|
1624
|
# declare pull perimeters
|
|
1620
|
kwargs['common'] = pullop.common
|
|
1625
|
kwargs['common'] = pullop.common
|
|
1621
|
kwargs['heads'] = pullop.heads or pullop.rheads
|
|
1626
|
kwargs['heads'] = pullop.heads or pullop.rheads
|
|
1622
|
|
|
1627
|
|
|
1623
|
if streaming:
|
|
1628
|
if streaming:
|
|
1624
|
kwargs['cg'] = False
|
|
1629
|
kwargs['cg'] = False
|
|
1625
|
kwargs['stream'] = True
|
|
1630
|
kwargs['stream'] = True
|
|
1626
|
pullop.stepsdone.add('changegroup')
|
|
1631
|
pullop.stepsdone.add('changegroup')
|
|
1627
|
pullop.stepsdone.add('phases')
|
|
1632
|
pullop.stepsdone.add('phases')
|
|
1628
|
|
|
1633
|
|
|
1629
|
else:
|
|
1634
|
else:
|
|
1630
|
# pulling changegroup
|
|
1635
|
# pulling changegroup
|
|
1631
|
pullop.stepsdone.add('changegroup')
|
|
1636
|
pullop.stepsdone.add('changegroup')
|
|
1632
|
|
|
1637
|
|
|
1633
|
kwargs['cg'] = pullop.fetch
|
|
1638
|
kwargs['cg'] = pullop.fetch
|
|
1634
|
|
|
1639
|
|
|
1635
|
legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
|
|
1640
|
legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
|
|
1636
|
hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
|
|
1641
|
hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
|
|
1637
|
if (not legacyphase and hasbinaryphase):
|
|
1642
|
if (not legacyphase and hasbinaryphase):
|
|
1638
|
kwargs['phases'] = True
|
|
1643
|
kwargs['phases'] = True
|
|
1639
|
pullop.stepsdone.add('phases')
|
|
1644
|
pullop.stepsdone.add('phases')
|
|
1640
|
|
|
1645
|
|
|
1641
|
if 'listkeys' in pullop.remotebundle2caps:
|
|
1646
|
if 'listkeys' in pullop.remotebundle2caps:
|
|
1642
|
if 'phases' not in pullop.stepsdone:
|
|
1647
|
if 'phases' not in pullop.stepsdone:
|
|
1643
|
kwargs['listkeys'] = ['phases']
|
|
1648
|
kwargs['listkeys'] = ['phases']
|
|
1644
|
|
|
1649
|
|
|
1645
|
bookmarksrequested = False
|
|
1650
|
bookmarksrequested = False
|
|
1646
|
legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
|
|
1651
|
legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
|
|
1647
|
hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
|
|
1652
|
hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
|
|
1648
|
|
|
1653
|
|
|
1649
|
if pullop.remotebookmarks is not None:
|
|
1654
|
if pullop.remotebookmarks is not None:
|
|
1650
|
pullop.stepsdone.add('request-bookmarks')
|
|
1655
|
pullop.stepsdone.add('request-bookmarks')
|
|
1651
|
|
|
1656
|
|
|
1652
|
if ('request-bookmarks' not in pullop.stepsdone
|
|
1657
|
if ('request-bookmarks' not in pullop.stepsdone
|
|
1653
|
and pullop.remotebookmarks is None
|
|
1658
|
and pullop.remotebookmarks is None
|
|
1654
|
and not legacybookmark and hasbinarybook):
|
|
1659
|
and not legacybookmark and hasbinarybook):
|
|
1655
|
kwargs['bookmarks'] = True
|
|
1660
|
kwargs['bookmarks'] = True
|
|
1656
|
bookmarksrequested = True
|
|
1661
|
bookmarksrequested = True
|
|
1657
|
|
|
1662
|
|
|
1658
|
if 'listkeys' in pullop.remotebundle2caps:
|
|
1663
|
if 'listkeys' in pullop.remotebundle2caps:
|
|
1659
|
if 'request-bookmarks' not in pullop.stepsdone:
|
|
1664
|
if 'request-bookmarks' not in pullop.stepsdone:
|
|
1660
|
# make sure to always includes bookmark data when migrating
|
|
1665
|
# make sure to always includes bookmark data when migrating
|
|
1661
|
# `hg incoming --bundle` to using this function.
|
|
1666
|
# `hg incoming --bundle` to using this function.
|
|
1662
|
pullop.stepsdone.add('request-bookmarks')
|
|
1667
|
pullop.stepsdone.add('request-bookmarks')
|
|
1663
|
kwargs.setdefault('listkeys', []).append('bookmarks')
|
|
1668
|
kwargs.setdefault('listkeys', []).append('bookmarks')
|
|
1664
|
|
|
1669
|
|
|
1665
|
# If this is a full pull / clone and the server supports the clone bundles
|
|
1670
|
# If this is a full pull / clone and the server supports the clone bundles
|
|
1666
|
# feature, tell the server whether we attempted a clone bundle. The
|
|
1671
|
# feature, tell the server whether we attempted a clone bundle. The
|
|
1667
|
# presence of this flag indicates the client supports clone bundles. This
|
|
1672
|
# presence of this flag indicates the client supports clone bundles. This
|
|
1668
|
# will enable the server to treat clients that support clone bundles
|
|
1673
|
# will enable the server to treat clients that support clone bundles
|
|
1669
|
# differently from those that don't.
|
|
1674
|
# differently from those that don't.
|
|
1670
|
if (pullop.remote.capable('clonebundles')
|
|
1675
|
if (pullop.remote.capable('clonebundles')
|
|
1671
|
and pullop.heads is None and list(pullop.common) == [nullid]):
|
|
1676
|
and pullop.heads is None and list(pullop.common) == [nullid]):
|
|
1672
|
kwargs['cbattempted'] = pullop.clonebundleattempted
|
|
1677
|
kwargs['cbattempted'] = pullop.clonebundleattempted
|
|
1673
|
|
|
1678
|
|
|
1674
|
if streaming:
|
|
1679
|
if streaming:
|
|
1675
|
pullop.repo.ui.status(_('streaming all changes\n'))
|
|
1680
|
pullop.repo.ui.status(_('streaming all changes\n'))
|
|
1676
|
elif not pullop.fetch:
|
|
1681
|
elif not pullop.fetch:
|
|
1677
|
pullop.repo.ui.status(_("no changes found\n"))
|
|
1682
|
pullop.repo.ui.status(_("no changes found\n"))
|
|
1678
|
pullop.cgresult = 0
|
|
1683
|
pullop.cgresult = 0
|
|
1679
|
else:
|
|
1684
|
else:
|
|
1680
|
if pullop.heads is None and list(pullop.common) == [nullid]:
|
|
1685
|
if pullop.heads is None and list(pullop.common) == [nullid]:
|
|
1681
|
pullop.repo.ui.status(_("requesting all changes\n"))
|
|
1686
|
pullop.repo.ui.status(_("requesting all changes\n"))
|
|
1682
|
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
|
|
1687
|
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
|
|
1683
|
remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
|
|
1688
|
remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
|
|
1684
|
if obsolete.commonversion(remoteversions) is not None:
|
|
1689
|
if obsolete.commonversion(remoteversions) is not None:
|
|
1685
|
kwargs['obsmarkers'] = True
|
|
1690
|
kwargs['obsmarkers'] = True
|
|
1686
|
pullop.stepsdone.add('obsmarkers')
|
|
1691
|
pullop.stepsdone.add('obsmarkers')
|
|
1687
|
_pullbundle2extraprepare(pullop, kwargs)
|
|
1692
|
_pullbundle2extraprepare(pullop, kwargs)
|
|
1688
|
|
|
1693
|
|
|
1689
|
with pullop.remote.commandexecutor() as e:
|
|
1694
|
with pullop.remote.commandexecutor() as e:
|
|
1690
|
args = dict(kwargs)
|
|
1695
|
args = dict(kwargs)
|
|
1691
|
args['source'] = 'pull'
|
|
1696
|
args['source'] = 'pull'
|
|
1692
|
bundle = e.callcommand('getbundle', args).result()
|
|
1697
|
bundle = e.callcommand('getbundle', args).result()
|
|
1693
|
|
|
1698
|
|
|
1694
|
try:
|
|
1699
|
try:
|
|
1695
|
op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
|
|
1700
|
op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
|
|
1696
|
source='pull')
|
|
1701
|
source='pull')
|
|
1697
|
op.modes['bookmarks'] = 'records'
|
|
1702
|
op.modes['bookmarks'] = 'records'
|
|
1698
|
bundle2.processbundle(pullop.repo, bundle, op=op)
|
|
1703
|
bundle2.processbundle(pullop.repo, bundle, op=op)
|
|
1699
|
except bundle2.AbortFromPart as exc:
|
|
1704
|
except bundle2.AbortFromPart as exc:
|
|
1700
|
pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
|
|
1705
|
pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
|
|
1701
|
raise error.Abort(_('pull failed on remote'), hint=exc.hint)
|
|
1706
|
raise error.Abort(_('pull failed on remote'), hint=exc.hint)
|
|
1702
|
except error.BundleValueError as exc:
|
|
1707
|
except error.BundleValueError as exc:
|
|
1703
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1708
|
raise error.Abort(_('missing support for %s') % exc)
|
|
1704
|
|
|
1709
|
|
|
1705
|
if pullop.fetch:
|
|
1710
|
if pullop.fetch:
|
|
1706
|
pullop.cgresult = bundle2.combinechangegroupresults(op)
|
|
1711
|
pullop.cgresult = bundle2.combinechangegroupresults(op)
|
|
1707
|
|
|
1712
|
|
|
1708
|
# processing phases change
|
|
1713
|
# processing phases change
|
|
1709
|
for namespace, value in op.records['listkeys']:
|
|
1714
|
for namespace, value in op.records['listkeys']:
|
|
1710
|
if namespace == 'phases':
|
|
1715
|
if namespace == 'phases':
|
|
1711
|
_pullapplyphases(pullop, value)
|
|
1716
|
_pullapplyphases(pullop, value)
|
|
1712
|
|
|
1717
|
|
|
1713
|
# processing bookmark update
|
|
1718
|
# processing bookmark update
|
|
1714
|
if bookmarksrequested:
|
|
1719
|
if bookmarksrequested:
|
|
1715
|
books = {}
|
|
1720
|
books = {}
|
|
1716
|
for record in op.records['bookmarks']:
|
|
1721
|
for record in op.records['bookmarks']:
|
|
1717
|
books[record['bookmark']] = record["node"]
|
|
1722
|
books[record['bookmark']] = record["node"]
|
|
1718
|
pullop.remotebookmarks = books
|
|
1723
|
pullop.remotebookmarks = books
|
|
1719
|
else:
|
|
1724
|
else:
|
|
1720
|
for namespace, value in op.records['listkeys']:
|
|
1725
|
for namespace, value in op.records['listkeys']:
|
|
1721
|
if namespace == 'bookmarks':
|
|
1726
|
if namespace == 'bookmarks':
|
|
1722
|
pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
|
|
1727
|
pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
|
|
1723
|
|
|
1728
|
|
|
1724
|
# bookmark data were either already there or pulled in the bundle
|
|
1729
|
# bookmark data were either already there or pulled in the bundle
|
|
1725
|
if pullop.remotebookmarks is not None:
|
|
1730
|
if pullop.remotebookmarks is not None:
|
|
1726
|
_pullbookmarks(pullop)
|
|
1731
|
_pullbookmarks(pullop)
|
|
1727
|
|
|
1732
|
|
|
1728
|
def _pullbundle2extraprepare(pullop, kwargs):
|
|
1733
|
def _pullbundle2extraprepare(pullop, kwargs):
|
|
1729
|
"""hook function so that extensions can extend the getbundle call"""
|
|
1734
|
"""hook function so that extensions can extend the getbundle call"""
|
|
1730
|
|
|
1735
|
|
|
1731
|
def _pullchangeset(pullop):
|
|
1736
|
def _pullchangeset(pullop):
|
|
1732
|
"""pull changeset from unbundle into the local repo"""
|
|
1737
|
"""pull changeset from unbundle into the local repo"""
|
|
1733
|
# We delay the open of the transaction as late as possible so we
|
|
1738
|
# We delay the open of the transaction as late as possible so we
|
|
1734
|
# don't open transaction for nothing or you break future useful
|
|
1739
|
# don't open transaction for nothing or you break future useful
|
|
1735
|
# rollback call
|
|
1740
|
# rollback call
|
|
1736
|
if 'changegroup' in pullop.stepsdone:
|
|
1741
|
if 'changegroup' in pullop.stepsdone:
|
|
1737
|
return
|
|
1742
|
return
|
|
1738
|
pullop.stepsdone.add('changegroup')
|
|
1743
|
pullop.stepsdone.add('changegroup')
|
|
1739
|
if not pullop.fetch:
|
|
1744
|
if not pullop.fetch:
|
|
1740
|
pullop.repo.ui.status(_("no changes found\n"))
|
|
1745
|
pullop.repo.ui.status(_("no changes found\n"))
|
|
1741
|
pullop.cgresult = 0
|
|
1746
|
pullop.cgresult = 0
|
|
1742
|
return
|
|
1747
|
return
|
|
1743
|
tr = pullop.gettransaction()
|
|
1748
|
tr = pullop.gettransaction()
|
|
1744
|
if pullop.heads is None and list(pullop.common) == [nullid]:
|
|
1749
|
if pullop.heads is None and list(pullop.common) == [nullid]:
|
|
1745
|
pullop.repo.ui.status(_("requesting all changes\n"))
|
|
1750
|
pullop.repo.ui.status(_("requesting all changes\n"))
|
|
1746
|
elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
|
|
1751
|
elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
|
|
1747
|
# issue1320, avoid a race if remote changed after discovery
|
|
1752
|
# issue1320, avoid a race if remote changed after discovery
|
|
1748
|
pullop.heads = pullop.rheads
|
|
1753
|
pullop.heads = pullop.rheads
|
|
1749
|
|
|
1754
|
|
|
1750
|
if pullop.remote.capable('getbundle'):
|
|
1755
|
if pullop.remote.capable('getbundle'):
|
|
1751
|
# TODO: get bundlecaps from remote
|
|
1756
|
# TODO: get bundlecaps from remote
|
|
1752
|
cg = pullop.remote.getbundle('pull', common=pullop.common,
|
|
1757
|
cg = pullop.remote.getbundle('pull', common=pullop.common,
|
|
1753
|
heads=pullop.heads or pullop.rheads)
|
|
1758
|
heads=pullop.heads or pullop.rheads)
|
|
1754
|
elif pullop.heads is None:
|
|
1759
|
elif pullop.heads is None:
|
|
1755
|
with pullop.remote.commandexecutor() as e:
|
|
1760
|
with pullop.remote.commandexecutor() as e:
|
|
1756
|
cg = e.callcommand('changegroup', {
|
|
1761
|
cg = e.callcommand('changegroup', {
|
|
1757
|
'nodes': pullop.fetch,
|
|
1762
|
'nodes': pullop.fetch,
|
|
1758
|
'source': 'pull',
|
|
1763
|
'source': 'pull',
|
|
1759
|
}).result()
|
|
1764
|
}).result()
|
|
1760
|
|
|
1765
|
|
|
1761
|
elif not pullop.remote.capable('changegroupsubset'):
|
|
1766
|
elif not pullop.remote.capable('changegroupsubset'):
|
|
1762
|
raise error.Abort(_("partial pull cannot be done because "
|
|
1767
|
raise error.Abort(_("partial pull cannot be done because "
|
|
1763
|
"other repository doesn't support "
|
|
1768
|
"other repository doesn't support "
|
|
1764
|
"changegroupsubset."))
|
|
1769
|
"changegroupsubset."))
|
|
1765
|
else:
|
|
1770
|
else:
|
|
1766
|
with pullop.remote.commandexecutor() as e:
|
|
1771
|
with pullop.remote.commandexecutor() as e:
|
|
1767
|
cg = e.callcommand('changegroupsubset', {
|
|
1772
|
cg = e.callcommand('changegroupsubset', {
|
|
1768
|
'bases': pullop.fetch,
|
|
1773
|
'bases': pullop.fetch,
|
|
1769
|
'heads': pullop.heads,
|
|
1774
|
'heads': pullop.heads,
|
|
1770
|
'source': 'pull',
|
|
1775
|
'source': 'pull',
|
|
1771
|
}).result()
|
|
1776
|
}).result()
|
|
1772
|
|
|
1777
|
|
|
1773
|
bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
|
|
1778
|
bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
|
|
1774
|
pullop.remote.url())
|
|
1779
|
pullop.remote.url())
|
|
1775
|
pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
|
|
1780
|
pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
|
|
1776
|
|
|
1781
|
|
|
1777
|
def _pullphase(pullop):
|
|
1782
|
def _pullphase(pullop):
|
|
1778
|
# Get remote phases data from remote
|
|
1783
|
# Get remote phases data from remote
|
|
1779
|
if 'phases' in pullop.stepsdone:
|
|
1784
|
if 'phases' in pullop.stepsdone:
|
|
1780
|
return
|
|
1785
|
return
|
|
1781
|
remotephases = listkeys(pullop.remote, 'phases')
|
|
1786
|
remotephases = listkeys(pullop.remote, 'phases')
|
|
1782
|
_pullapplyphases(pullop, remotephases)
|
|
1787
|
_pullapplyphases(pullop, remotephases)
|
|
1783
|
|
|
1788
|
|
|
1784
|
def _pullapplyphases(pullop, remotephases):
|
|
1789
|
def _pullapplyphases(pullop, remotephases):
|
|
1785
|
"""apply phase movement from observed remote state"""
|
|
1790
|
"""apply phase movement from observed remote state"""
|
|
1786
|
if 'phases' in pullop.stepsdone:
|
|
1791
|
if 'phases' in pullop.stepsdone:
|
|
1787
|
return
|
|
1792
|
return
|
|
1788
|
pullop.stepsdone.add('phases')
|
|
1793
|
pullop.stepsdone.add('phases')
|
|
1789
|
publishing = bool(remotephases.get('publishing', False))
|
|
1794
|
publishing = bool(remotephases.get('publishing', False))
|
|
1790
|
if remotephases and not publishing:
|
|
1795
|
if remotephases and not publishing:
|
|
1791
|
# remote is new and non-publishing
|
|
1796
|
# remote is new and non-publishing
|
|
1792
|
pheads, _dr = phases.analyzeremotephases(pullop.repo,
|
|
1797
|
pheads, _dr = phases.analyzeremotephases(pullop.repo,
|
|
1793
|
pullop.pulledsubset,
|
|
1798
|
pullop.pulledsubset,
|
|
1794
|
remotephases)
|
|
1799
|
remotephases)
|
|
1795
|
dheads = pullop.pulledsubset
|
|
1800
|
dheads = pullop.pulledsubset
|
|
1796
|
else:
|
|
1801
|
else:
|
|
1797
|
# Remote is old or publishing all common changesets
|
|
1802
|
# Remote is old or publishing all common changesets
|
|
1798
|
# should be seen as public
|
|
1803
|
# should be seen as public
|
|
1799
|
pheads = pullop.pulledsubset
|
|
1804
|
pheads = pullop.pulledsubset
|
|
1800
|
dheads = []
|
|
1805
|
dheads = []
|
|
1801
|
unfi = pullop.repo.unfiltered()
|
|
1806
|
unfi = pullop.repo.unfiltered()
|
|
1802
|
phase = unfi._phasecache.phase
|
|
1807
|
phase = unfi._phasecache.phase
|
|
1803
|
rev = unfi.changelog.nodemap.get
|
|
1808
|
rev = unfi.changelog.nodemap.get
|
|
1804
|
public = phases.public
|
|
1809
|
public = phases.public
|
|
1805
|
draft = phases.draft
|
|
1810
|
draft = phases.draft
|
|
1806
|
|
|
1811
|
|
|
1807
|
# exclude changesets already public locally and update the others
|
|
1812
|
# exclude changesets already public locally and update the others
|
|
1808
|
pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
|
|
1813
|
pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
|
|
1809
|
if pheads:
|
|
1814
|
if pheads:
|
|
1810
|
tr = pullop.gettransaction()
|
|
1815
|
tr = pullop.gettransaction()
|
|
1811
|
phases.advanceboundary(pullop.repo, tr, public, pheads)
|
|
1816
|
phases.advanceboundary(pullop.repo, tr, public, pheads)
|
|
1812
|
|
|
1817
|
|
|
1813
|
# exclude changesets already draft locally and update the others
|
|
1818
|
# exclude changesets already draft locally and update the others
|
|
1814
|
dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
|
|
1819
|
dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
|
|
1815
|
if dheads:
|
|
1820
|
if dheads:
|
|
1816
|
tr = pullop.gettransaction()
|
|
1821
|
tr = pullop.gettransaction()
|
|
1817
|
phases.advanceboundary(pullop.repo, tr, draft, dheads)
|
|
1822
|
phases.advanceboundary(pullop.repo, tr, draft, dheads)
|
|
1818
|
|
|
1823
|
|
|
1819
|
def _pullbookmarks(pullop):
|
|
1824
|
def _pullbookmarks(pullop):
|
|
1820
|
"""process the remote bookmark information to update the local one"""
|
|
1825
|
"""process the remote bookmark information to update the local one"""
|
|
1821
|
if 'bookmarks' in pullop.stepsdone:
|
|
1826
|
if 'bookmarks' in pullop.stepsdone:
|
|
1822
|
return
|
|
1827
|
return
|
|
1823
|
pullop.stepsdone.add('bookmarks')
|
|
1828
|
pullop.stepsdone.add('bookmarks')
|
|
1824
|
repo = pullop.repo
|
|
1829
|
repo = pullop.repo
|
|
1825
|
remotebookmarks = pullop.remotebookmarks
|
|
1830
|
remotebookmarks = pullop.remotebookmarks
|
|
1826
|
bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
|
|
1831
|
bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
|
|
1827
|
pullop.remote.url(),
|
|
1832
|
pullop.remote.url(),
|
|
1828
|
pullop.gettransaction,
|
|
1833
|
pullop.gettransaction,
|
|
1829
|
explicit=pullop.explicitbookmarks)
|
|
1834
|
explicit=pullop.explicitbookmarks)
|
|
1830
|
|
|
1835
|
|
|
1831
|
def _pullobsolete(pullop):
|
|
1836
|
def _pullobsolete(pullop):
|
|
1832
|
"""utility function to pull obsolete markers from a remote
|
|
1837
|
"""utility function to pull obsolete markers from a remote
|
|
1833
|
|
|
1838
|
|
|
1834
|
The `gettransaction` is function that return the pull transaction, creating
|
|
1839
|
The `gettransaction` is function that return the pull transaction, creating
|
|
1835
|
one if necessary. We return the transaction to inform the calling code that
|
|
1840
|
one if necessary. We return the transaction to inform the calling code that
|
|
1836
|
a new transaction have been created (when applicable).
|
|
1841
|
a new transaction have been created (when applicable).
|
|
1837
|
|
|
1842
|
|
|
1838
|
Exists mostly to allow overriding for experimentation purpose"""
|
|
1843
|
Exists mostly to allow overriding for experimentation purpose"""
|
|
1839
|
if 'obsmarkers' in pullop.stepsdone:
|
|
1844
|
if 'obsmarkers' in pullop.stepsdone:
|
|
1840
|
return
|
|
1845
|
return
|
|
1841
|
pullop.stepsdone.add('obsmarkers')
|
|
1846
|
pullop.stepsdone.add('obsmarkers')
|
|
1842
|
tr = None
|
|
1847
|
tr = None
|
|
1843
|
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
|
|
1848
|
if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
|
|
1844
|
pullop.repo.ui.debug('fetching remote obsolete markers\n')
|
|
1849
|
pullop.repo.ui.debug('fetching remote obsolete markers\n')
|
|
1845
|
remoteobs = listkeys(pullop.remote, 'obsolete')
|
|
1850
|
remoteobs = listkeys(pullop.remote, 'obsolete')
|
|
1846
|
if 'dump0' in remoteobs:
|
|
1851
|
if 'dump0' in remoteobs:
|
|
1847
|
tr = pullop.gettransaction()
|
|
1852
|
tr = pullop.gettransaction()
|
|
1848
|
markers = []
|
|
1853
|
markers = []
|
|
1849
|
for key in sorted(remoteobs, reverse=True):
|
|
1854
|
for key in sorted(remoteobs, reverse=True):
|
|
1850
|
if key.startswith('dump'):
|
|
1855
|
if key.startswith('dump'):
|
|
1851
|
data = util.b85decode(remoteobs[key])
|
|
1856
|
data = util.b85decode(remoteobs[key])
|
|
1852
|
version, newmarks = obsolete._readmarkers(data)
|
|
1857
|
version, newmarks = obsolete._readmarkers(data)
|
|
1853
|
markers += newmarks
|
|
1858
|
markers += newmarks
|
|
1854
|
if markers:
|
|
1859
|
if markers:
|
|
1855
|
pullop.repo.obsstore.add(tr, markers)
|
|
1860
|
pullop.repo.obsstore.add(tr, markers)
|
|
1856
|
pullop.repo.invalidatevolatilesets()
|
|
1861
|
pullop.repo.invalidatevolatilesets()
|
|
1857
|
return tr
|
|
1862
|
return tr
|
|
1858
|
|
|
1863
|
|
|
1859
|
def applynarrowacl(repo, kwargs):
|
|
1864
|
def applynarrowacl(repo, kwargs):
|
|
1860
|
"""Apply narrow fetch access control.
|
|
1865
|
"""Apply narrow fetch access control.
|
|
1861
|
|
|
1866
|
|
|
1862
|
This massages the named arguments for getbundle wire protocol commands
|
|
1867
|
This massages the named arguments for getbundle wire protocol commands
|
|
1863
|
so requested data is filtered through access control rules.
|
|
1868
|
so requested data is filtered through access control rules.
|
|
1864
|
"""
|
|
1869
|
"""
|
|
1865
|
ui = repo.ui
|
|
1870
|
ui = repo.ui
|
|
1866
|
# TODO this assumes existence of HTTP and is a layering violation.
|
|
1871
|
# TODO this assumes existence of HTTP and is a layering violation.
|
|
1867
|
username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
|
|
1872
|
username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
|
|
1868
|
user_includes = ui.configlist(
|
|
1873
|
user_includes = ui.configlist(
|
|
1869
|
_NARROWACL_SECTION, username + '.includes',
|
|
1874
|
_NARROWACL_SECTION, username + '.includes',
|
|
1870
|
ui.configlist(_NARROWACL_SECTION, 'default.includes'))
|
|
1875
|
ui.configlist(_NARROWACL_SECTION, 'default.includes'))
|
|
1871
|
user_excludes = ui.configlist(
|
|
1876
|
user_excludes = ui.configlist(
|
|
1872
|
_NARROWACL_SECTION, username + '.excludes',
|
|
1877
|
_NARROWACL_SECTION, username + '.excludes',
|
|
1873
|
ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
|
|
1878
|
ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
|
|
1874
|
if not user_includes:
|
|
1879
|
if not user_includes:
|
|
1875
|
raise error.Abort(_("{} configuration for user {} is empty")
|
|
1880
|
raise error.Abort(_("{} configuration for user {} is empty")
|
|
1876
|
.format(_NARROWACL_SECTION, username))
|
|
1881
|
.format(_NARROWACL_SECTION, username))
|
|
1877
|
|
|
1882
|
|
|
1878
|
user_includes = [
|
|
1883
|
user_includes = [
|
|
1879
|
'path:.' if p == '*' else 'path:' + p for p in user_includes]
|
|
1884
|
'path:.' if p == '*' else 'path:' + p for p in user_includes]
|
|
1880
|
user_excludes = [
|
|
1885
|
user_excludes = [
|
|
1881
|
'path:.' if p == '*' else 'path:' + p for p in user_excludes]
|
|
1886
|
'path:.' if p == '*' else 'path:' + p for p in user_excludes]
|
|
1882
|
|
|
1887
|
|
|
1883
|
req_includes = set(kwargs.get(r'includepats', []))
|
|
1888
|
req_includes = set(kwargs.get(r'includepats', []))
|
|
1884
|
req_excludes = set(kwargs.get(r'excludepats', []))
|
|
1889
|
req_excludes = set(kwargs.get(r'excludepats', []))
|
|
1885
|
|
|
1890
|
|
|
1886
|
req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
|
|
1891
|
req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
|
|
1887
|
req_includes, req_excludes, user_includes, user_excludes)
|
|
1892
|
req_includes, req_excludes, user_includes, user_excludes)
|
|
1888
|
|
|
1893
|
|
|
1889
|
if invalid_includes:
|
|
1894
|
if invalid_includes:
|
|
1890
|
raise error.Abort(
|
|
1895
|
raise error.Abort(
|
|
1891
|
_("The following includes are not accessible for {}: {}")
|
|
1896
|
_("The following includes are not accessible for {}: {}")
|
|
1892
|
.format(username, invalid_includes))
|
|
1897
|
.format(username, invalid_includes))
|
|
1893
|
|
|
1898
|
|
|
1894
|
new_args = {}
|
|
1899
|
new_args = {}
|
|
1895
|
new_args.update(kwargs)
|
|
1900
|
new_args.update(kwargs)
|
|
1896
|
new_args[r'narrow'] = True
|
|
1901
|
new_args[r'narrow'] = True
|
|
1897
|
new_args[r'includepats'] = req_includes
|
|
1902
|
new_args[r'includepats'] = req_includes
|
|
1898
|
if req_excludes:
|
|
1903
|
if req_excludes:
|
|
1899
|
new_args[r'excludepats'] = req_excludes
|
|
1904
|
new_args[r'excludepats'] = req_excludes
|
|
1900
|
|
|
1905
|
|
|
1901
|
return new_args
|
|
1906
|
return new_args
|
|
1902
|
|
|
1907
|
|
|
1903
|
def _computeellipsis(repo, common, heads, known, match, depth=None):
|
|
1908
|
def _computeellipsis(repo, common, heads, known, match, depth=None):
|
|
1904
|
"""Compute the shape of a narrowed DAG.
|
|
1909
|
"""Compute the shape of a narrowed DAG.
|
|
1905
|
|
|
1910
|
|
|
1906
|
Args:
|
|
1911
|
Args:
|
|
1907
|
repo: The repository we're transferring.
|
|
1912
|
repo: The repository we're transferring.
|
|
1908
|
common: The roots of the DAG range we're transferring.
|
|
1913
|
common: The roots of the DAG range we're transferring.
|
|
1909
|
May be just [nullid], which means all ancestors of heads.
|
|
1914
|
May be just [nullid], which means all ancestors of heads.
|
|
1910
|
heads: The heads of the DAG range we're transferring.
|
|
1915
|
heads: The heads of the DAG range we're transferring.
|
|
1911
|
match: The narrowmatcher that allows us to identify relevant changes.
|
|
1916
|
match: The narrowmatcher that allows us to identify relevant changes.
|
|
1912
|
depth: If not None, only consider nodes to be full nodes if they are at
|
|
1917
|
depth: If not None, only consider nodes to be full nodes if they are at
|
|
1913
|
most depth changesets away from one of heads.
|
|
1918
|
most depth changesets away from one of heads.
|
|
1914
|
|
|
1919
|
|
|
1915
|
Returns:
|
|
1920
|
Returns:
|
|
1916
|
A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
|
|
1921
|
A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
|
|
1917
|
|
|
1922
|
|
|
1918
|
visitnodes: The list of nodes (either full or ellipsis) which
|
|
1923
|
visitnodes: The list of nodes (either full or ellipsis) which
|
|
1919
|
need to be sent to the client.
|
|
1924
|
need to be sent to the client.
|
|
1920
|
relevant_nodes: The set of changelog nodes which change a file inside
|
|
1925
|
relevant_nodes: The set of changelog nodes which change a file inside
|
|
1921
|
the narrowspec. The client needs these as non-ellipsis nodes.
|
|
1926
|
the narrowspec. The client needs these as non-ellipsis nodes.
|
|
1922
|
ellipsisroots: A dict of {rev: parents} that is used in
|
|
1927
|
ellipsisroots: A dict of {rev: parents} that is used in
|
|
1923
|
narrowchangegroup to produce ellipsis nodes with the
|
|
1928
|
narrowchangegroup to produce ellipsis nodes with the
|
|
1924
|
correct parents.
|
|
1929
|
correct parents.
|
|
1925
|
"""
|
|
1930
|
"""
|
|
1926
|
cl = repo.changelog
|
|
1931
|
cl = repo.changelog
|
|
1927
|
mfl = repo.manifestlog
|
|
1932
|
mfl = repo.manifestlog
|
|
1928
|
|
|
1933
|
|
|
1929
|
clrev = cl.rev
|
|
1934
|
clrev = cl.rev
|
|
1930
|
|
|
1935
|
|
|
1931
|
commonrevs = {clrev(n) for n in common} | {nullrev}
|
|
1936
|
commonrevs = {clrev(n) for n in common} | {nullrev}
|
|
1932
|
headsrevs = {clrev(n) for n in heads}
|
|
1937
|
headsrevs = {clrev(n) for n in heads}
|
|
1933
|
|
|
1938
|
|
|
1934
|
if depth:
|
|
1939
|
if depth:
|
|
1935
|
revdepth = {h: 0 for h in headsrevs}
|
|
1940
|
revdepth = {h: 0 for h in headsrevs}
|
|
1936
|
|
|
1941
|
|
|
1937
|
ellipsisheads = collections.defaultdict(set)
|
|
1942
|
ellipsisheads = collections.defaultdict(set)
|
|
1938
|
ellipsisroots = collections.defaultdict(set)
|
|
1943
|
ellipsisroots = collections.defaultdict(set)
|
|
1939
|
|
|
1944
|
|
|
1940
|
def addroot(head, curchange):
|
|
1945
|
def addroot(head, curchange):
|
|
1941
|
"""Add a root to an ellipsis head, splitting heads with 3 roots."""
|
|
1946
|
"""Add a root to an ellipsis head, splitting heads with 3 roots."""
|
|
1942
|
ellipsisroots[head].add(curchange)
|
|
1947
|
ellipsisroots[head].add(curchange)
|
|
1943
|
# Recursively split ellipsis heads with 3 roots by finding the
|
|
1948
|
# Recursively split ellipsis heads with 3 roots by finding the
|
|
1944
|
# roots' youngest common descendant which is an elided merge commit.
|
|
1949
|
# roots' youngest common descendant which is an elided merge commit.
|
|
1945
|
# That descendant takes 2 of the 3 roots as its own, and becomes a
|
|
1950
|
# That descendant takes 2 of the 3 roots as its own, and becomes a
|
|
1946
|
# root of the head.
|
|
1951
|
# root of the head.
|
|
1947
|
while len(ellipsisroots[head]) > 2:
|
|
1952
|
while len(ellipsisroots[head]) > 2:
|
|
1948
|
child, roots = splithead(head)
|
|
1953
|
child, roots = splithead(head)
|
|
1949
|
splitroots(head, child, roots)
|
|
1954
|
splitroots(head, child, roots)
|
|
1950
|
head = child # Recurse in case we just added a 3rd root
|
|
1955
|
head = child # Recurse in case we just added a 3rd root
|
|
1951
|
|
|
1956
|
|
|
1952
|
def splitroots(head, child, roots):
|
|
1957
|
def splitroots(head, child, roots):
|
|
1953
|
ellipsisroots[head].difference_update(roots)
|
|
1958
|
ellipsisroots[head].difference_update(roots)
|
|
1954
|
ellipsisroots[head].add(child)
|
|
1959
|
ellipsisroots[head].add(child)
|
|
1955
|
ellipsisroots[child].update(roots)
|
|
1960
|
ellipsisroots[child].update(roots)
|
|
1956
|
ellipsisroots[child].discard(child)
|
|
1961
|
ellipsisroots[child].discard(child)
|
|
1957
|
|
|
1962
|
|
|
1958
|
def splithead(head):
|
|
1963
|
def splithead(head):
|
|
1959
|
r1, r2, r3 = sorted(ellipsisroots[head])
|
|
1964
|
r1, r2, r3 = sorted(ellipsisroots[head])
|
|
1960
|
for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
|
|
1965
|
for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
|
|
1961
|
mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
|
|
1966
|
mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
|
|
1962
|
nr1, head, nr2, head)
|
|
1967
|
nr1, head, nr2, head)
|
|
1963
|
for j in mid:
|
|
1968
|
for j in mid:
|
|
1964
|
if j == nr2:
|
|
1969
|
if j == nr2:
|
|
1965
|
return nr2, (nr1, nr2)
|
|
1970
|
return nr2, (nr1, nr2)
|
|
1966
|
if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
|
|
1971
|
if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
|
|
1967
|
return j, (nr1, nr2)
|
|
1972
|
return j, (nr1, nr2)
|
|
1968
|
raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
|
|
1973
|
raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
|
|
1969
|
'roots: %d %d %d') % (head, r1, r2, r3))
|
|
1974
|
'roots: %d %d %d') % (head, r1, r2, r3))
|
|
1970
|
|
|
1975
|
|
|
1971
|
missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
|
|
1976
|
missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
|
|
1972
|
visit = reversed(missing)
|
|
1977
|
visit = reversed(missing)
|
|
1973
|
relevant_nodes = set()
|
|
1978
|
relevant_nodes = set()
|
|
1974
|
visitnodes = [cl.node(m) for m in missing]
|
|
1979
|
visitnodes = [cl.node(m) for m in missing]
|
|
1975
|
required = set(headsrevs) | known
|
|
1980
|
required = set(headsrevs) | known
|
|
1976
|
for rev in visit:
|
|
1981
|
for rev in visit:
|
|
1977
|
clrev = cl.changelogrevision(rev)
|
|
1982
|
clrev = cl.changelogrevision(rev)
|
|
1978
|
ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
|
|
1983
|
ps = [prev for prev in cl.parentrevs(rev) if prev != nullrev]
|
|
1979
|
if depth is not None:
|
|
1984
|
if depth is not None:
|
|
1980
|
curdepth = revdepth[rev]
|
|
1985
|
curdepth = revdepth[rev]
|
|
1981
|
for p in ps:
|
|
1986
|
for p in ps:
|
|
1982
|
revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
|
|
1987
|
revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
|
|
1983
|
needed = False
|
|
1988
|
needed = False
|
|
1984
|
shallow_enough = depth is None or revdepth[rev] <= depth
|
|
1989
|
shallow_enough = depth is None or revdepth[rev] <= depth
|
|
1985
|
if shallow_enough:
|
|
1990
|
if shallow_enough:
|
|
1986
|
curmf = mfl[clrev.manifest].read()
|
|
1991
|
curmf = mfl[clrev.manifest].read()
|
|
1987
|
if ps:
|
|
1992
|
if ps:
|
|
1988
|
# We choose to not trust the changed files list in
|
|
1993
|
# We choose to not trust the changed files list in
|
|
1989
|
# changesets because it's not always correct. TODO: could
|
|
1994
|
# changesets because it's not always correct. TODO: could
|
|
1990
|
# we trust it for the non-merge case?
|
|
1995
|
# we trust it for the non-merge case?
|
|
1991
|
p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
|
|
1996
|
p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
|
|
1992
|
needed = bool(curmf.diff(p1mf, match))
|
|
1997
|
needed = bool(curmf.diff(p1mf, match))
|
|
1993
|
if not needed and len(ps) > 1:
|
|
1998
|
if not needed and len(ps) > 1:
|
|
1994
|
# For merge changes, the list of changed files is not
|
|
1999
|
# For merge changes, the list of changed files is not
|
|
1995
|
# helpful, since we need to emit the merge if a file
|
|
2000
|
# helpful, since we need to emit the merge if a file
|
|
1996
|
# in the narrow spec has changed on either side of the
|
|
2001
|
# in the narrow spec has changed on either side of the
|
|
1997
|
# merge. As a result, we do a manifest diff to check.
|
|
2002
|
# merge. As a result, we do a manifest diff to check.
|
|
1998
|
p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
|
|
2003
|
p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
|
|
1999
|
needed = bool(curmf.diff(p2mf, match))
|
|
2004
|
needed = bool(curmf.diff(p2mf, match))
|
|
2000
|
else:
|
|
2005
|
else:
|
|
2001
|
# For a root node, we need to include the node if any
|
|
2006
|
# For a root node, we need to include the node if any
|
|
2002
|
# files in the node match the narrowspec.
|
|
2007
|
# files in the node match the narrowspec.
|
|
2003
|
needed = any(curmf.walk(match))
|
|
2008
|
needed = any(curmf.walk(match))
|
|
2004
|
|
|
2009
|
|
|
2005
|
if needed:
|
|
2010
|
if needed:
|
|
2006
|
for head in ellipsisheads[rev]:
|
|
2011
|
for head in ellipsisheads[rev]:
|
|
2007
|
addroot(head, rev)
|
|
2012
|
addroot(head, rev)
|
|
2008
|
for p in ps:
|
|
2013
|
for p in ps:
|
|
2009
|
required.add(p)
|
|
2014
|
required.add(p)
|
|
2010
|
relevant_nodes.add(cl.node(rev))
|
|
2015
|
relevant_nodes.add(cl.node(rev))
|
|
2011
|
else:
|
|
2016
|
else:
|
|
2012
|
if not ps:
|
|
2017
|
if not ps:
|
|
2013
|
ps = [nullrev]
|
|
2018
|
ps = [nullrev]
|
|
2014
|
if rev in required:
|
|
2019
|
if rev in required:
|
|
2015
|
for head in ellipsisheads[rev]:
|
|
2020
|
for head in ellipsisheads[rev]:
|
|
2016
|
addroot(head, rev)
|
|
2021
|
addroot(head, rev)
|
|
2017
|
for p in ps:
|
|
2022
|
for p in ps:
|
|
2018
|
ellipsisheads[p].add(rev)
|
|
2023
|
ellipsisheads[p].add(rev)
|
|
2019
|
else:
|
|
2024
|
else:
|
|
2020
|
for p in ps:
|
|
2025
|
for p in ps:
|
|
2021
|
ellipsisheads[p] |= ellipsisheads[rev]
|
|
2026
|
ellipsisheads[p] |= ellipsisheads[rev]
|
|
2022
|
|
|
2027
|
|
|
2023
|
# add common changesets as roots of their reachable ellipsis heads
|
|
2028
|
# add common changesets as roots of their reachable ellipsis heads
|
|
2024
|
for c in commonrevs:
|
|
2029
|
for c in commonrevs:
|
|
2025
|
for head in ellipsisheads[c]:
|
|
2030
|
for head in ellipsisheads[c]:
|
|
2026
|
addroot(head, c)
|
|
2031
|
addroot(head, c)
|
|
2027
|
return visitnodes, relevant_nodes, ellipsisroots
|
|
2032
|
return visitnodes, relevant_nodes, ellipsisroots
|
|
2028
|
|
|
2033
|
|
|
2029
|
def caps20to10(repo, role):
|
|
2034
|
def caps20to10(repo, role):
|
|
2030
|
"""return a set with appropriate options to use bundle20 during getbundle"""
|
|
2035
|
"""return a set with appropriate options to use bundle20 during getbundle"""
|
|
2031
|
caps = {'HG20'}
|
|
2036
|
caps = {'HG20'}
|
|
2032
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
|
|
2037
|
capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
|
|
2033
|
caps.add('bundle2=' + urlreq.quote(capsblob))
|
|
2038
|
caps.add('bundle2=' + urlreq.quote(capsblob))
|
|
2034
|
return caps
|
|
2039
|
return caps
|
|
2035
|
|
|
2040
|
|
|
2036
|
# List of names of steps to perform for a bundle2 for getbundle, order matters.
|
|
2041
|
# List of names of steps to perform for a bundle2 for getbundle, order matters.
|
|
2037
|
getbundle2partsorder = []
|
|
2042
|
getbundle2partsorder = []
|
|
2038
|
|
|
2043
|
|
|
2039
|
# Mapping between step name and function
|
|
2044
|
# Mapping between step name and function
|
|
2040
|
#
|
|
2045
|
#
|
|
2041
|
# This exists to help extensions wrap steps if necessary
|
|
2046
|
# This exists to help extensions wrap steps if necessary
|
|
2042
|
getbundle2partsmapping = {}
|
|
2047
|
getbundle2partsmapping = {}
|
|
2043
|
|
|
2048
|
|
|
2044
|
def getbundle2partsgenerator(stepname, idx=None):
|
|
2049
|
def getbundle2partsgenerator(stepname, idx=None):
|
|
2045
|
"""decorator for function generating bundle2 part for getbundle
|
|
2050
|
"""decorator for function generating bundle2 part for getbundle
|
|
2046
|
|
|
2051
|
|
|
2047
|
The function is added to the step -> function mapping and appended to the
|
|
2052
|
The function is added to the step -> function mapping and appended to the
|
|
2048
|
list of steps. Beware that decorated functions will be added in order
|
|
2053
|
list of steps. Beware that decorated functions will be added in order
|
|
2049
|
(this may matter).
|
|
2054
|
(this may matter).
|
|
2050
|
|
|
2055
|
|
|
2051
|
You can only use this decorator for new steps, if you want to wrap a step
|
|
2056
|
You can only use this decorator for new steps, if you want to wrap a step
|
|
2052
|
from an extension, attack the getbundle2partsmapping dictionary directly."""
|
|
2057
|
from an extension, attack the getbundle2partsmapping dictionary directly."""
|
|
2053
|
def dec(func):
|
|
2058
|
def dec(func):
|
|
2054
|
assert stepname not in getbundle2partsmapping
|
|
2059
|
assert stepname not in getbundle2partsmapping
|
|
2055
|
getbundle2partsmapping[stepname] = func
|
|
2060
|
getbundle2partsmapping[stepname] = func
|
|
2056
|
if idx is None:
|
|
2061
|
if idx is None:
|
|
2057
|
getbundle2partsorder.append(stepname)
|
|
2062
|
getbundle2partsorder.append(stepname)
|
|
2058
|
else:
|
|
2063
|
else:
|
|
2059
|
getbundle2partsorder.insert(idx, stepname)
|
|
2064
|
getbundle2partsorder.insert(idx, stepname)
|
|
2060
|
return func
|
|
2065
|
return func
|
|
2061
|
return dec
|
|
2066
|
return dec
|
|
2062
|
|
|
2067
|
|
|
2063
|
def bundle2requested(bundlecaps):
|
|
2068
|
def bundle2requested(bundlecaps):
|
|
2064
|
if bundlecaps is not None:
|
|
2069
|
if bundlecaps is not None:
|
|
2065
|
return any(cap.startswith('HG2') for cap in bundlecaps)
|
|
2070
|
return any(cap.startswith('HG2') for cap in bundlecaps)
|
|
2066
|
return False
|
|
2071
|
return False
|
|
2067
|
|
|
2072
|
|
|
2068
|
def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
|
|
2073
|
def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
|
|
2069
|
**kwargs):
|
|
2074
|
**kwargs):
|
|
2070
|
"""Return chunks constituting a bundle's raw data.
|
|
2075
|
"""Return chunks constituting a bundle's raw data.
|
|
2071
|
|
|
2076
|
|
|
2072
|
Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
|
|
2077
|
Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
|
|
2073
|
passed.
|
|
2078
|
passed.
|
|
2074
|
|
|
2079
|
|
|
2075
|
Returns a 2-tuple of a dict with metadata about the generated bundle
|
|
2080
|
Returns a 2-tuple of a dict with metadata about the generated bundle
|
|
2076
|
and an iterator over raw chunks (of varying sizes).
|
|
2081
|
and an iterator over raw chunks (of varying sizes).
|
|
2077
|
"""
|
|
2082
|
"""
|
|
2078
|
kwargs = pycompat.byteskwargs(kwargs)
|
|
2083
|
kwargs = pycompat.byteskwargs(kwargs)
|
|
2079
|
info = {}
|
|
2084
|
info = {}
|
|
2080
|
usebundle2 = bundle2requested(bundlecaps)
|
|
2085
|
usebundle2 = bundle2requested(bundlecaps)
|
|
2081
|
# bundle10 case
|
|
2086
|
# bundle10 case
|
|
2082
|
if not usebundle2:
|
|
2087
|
if not usebundle2:
|
|
2083
|
if bundlecaps and not kwargs.get('cg', True):
|
|
2088
|
if bundlecaps and not kwargs.get('cg', True):
|
|
2084
|
raise ValueError(_('request for bundle10 must include changegroup'))
|
|
2089
|
raise ValueError(_('request for bundle10 must include changegroup'))
|
|
2085
|
|
|
2090
|
|
|
2086
|
if kwargs:
|
|
2091
|
if kwargs:
|
|
2087
|
raise ValueError(_('unsupported getbundle arguments: %s')
|
|
2092
|
raise ValueError(_('unsupported getbundle arguments: %s')
|
|
2088
|
% ', '.join(sorted(kwargs.keys())))
|
|
2093
|
% ', '.join(sorted(kwargs.keys())))
|
|
2089
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2094
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2090
|
info['bundleversion'] = 1
|
|
2095
|
info['bundleversion'] = 1
|
|
2091
|
return info, changegroup.makestream(repo, outgoing, '01', source,
|
|
2096
|
return info, changegroup.makestream(repo, outgoing, '01', source,
|
|
2092
|
bundlecaps=bundlecaps)
|
|
2097
|
bundlecaps=bundlecaps)
|
|
2093
|
|
|
2098
|
|
|
2094
|
# bundle20 case
|
|
2099
|
# bundle20 case
|
|
2095
|
info['bundleversion'] = 2
|
|
2100
|
info['bundleversion'] = 2
|
|
2096
|
b2caps = {}
|
|
2101
|
b2caps = {}
|
|
2097
|
for bcaps in bundlecaps:
|
|
2102
|
for bcaps in bundlecaps:
|
|
2098
|
if bcaps.startswith('bundle2='):
|
|
2103
|
if bcaps.startswith('bundle2='):
|
|
2099
|
blob = urlreq.unquote(bcaps[len('bundle2='):])
|
|
2104
|
blob = urlreq.unquote(bcaps[len('bundle2='):])
|
|
2100
|
b2caps.update(bundle2.decodecaps(blob))
|
|
2105
|
b2caps.update(bundle2.decodecaps(blob))
|
|
2101
|
bundler = bundle2.bundle20(repo.ui, b2caps)
|
|
2106
|
bundler = bundle2.bundle20(repo.ui, b2caps)
|
|
2102
|
|
|
2107
|
|
|
2103
|
kwargs['heads'] = heads
|
|
2108
|
kwargs['heads'] = heads
|
|
2104
|
kwargs['common'] = common
|
|
2109
|
kwargs['common'] = common
|
|
2105
|
|
|
2110
|
|
|
2106
|
for name in getbundle2partsorder:
|
|
2111
|
for name in getbundle2partsorder:
|
|
2107
|
func = getbundle2partsmapping[name]
|
|
2112
|
func = getbundle2partsmapping[name]
|
|
2108
|
func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
|
|
2113
|
func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
|
|
2109
|
**pycompat.strkwargs(kwargs))
|
|
2114
|
**pycompat.strkwargs(kwargs))
|
|
2110
|
|
|
2115
|
|
|
2111
|
info['prefercompressed'] = bundler.prefercompressed
|
|
2116
|
info['prefercompressed'] = bundler.prefercompressed
|
|
2112
|
|
|
2117
|
|
|
2113
|
return info, bundler.getchunks()
|
|
2118
|
return info, bundler.getchunks()
|
|
2114
|
|
|
2119
|
|
|
2115
|
@getbundle2partsgenerator('stream2')
|
|
2120
|
@getbundle2partsgenerator('stream2')
|
|
2116
|
def _getbundlestream2(bundler, repo, *args, **kwargs):
|
|
2121
|
def _getbundlestream2(bundler, repo, *args, **kwargs):
|
|
2117
|
return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
|
|
2122
|
return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
|
|
2118
|
|
|
2123
|
|
|
2119
|
@getbundle2partsgenerator('changegroup')
|
|
2124
|
@getbundle2partsgenerator('changegroup')
|
|
2120
|
def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
|
|
2125
|
def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
|
|
2121
|
b2caps=None, heads=None, common=None, **kwargs):
|
|
2126
|
b2caps=None, heads=None, common=None, **kwargs):
|
|
2122
|
"""add a changegroup part to the requested bundle"""
|
|
2127
|
"""add a changegroup part to the requested bundle"""
|
|
2123
|
if not kwargs.get(r'cg', True):
|
|
2128
|
if not kwargs.get(r'cg', True):
|
|
2124
|
return
|
|
2129
|
return
|
|
2125
|
|
|
2130
|
|
|
2126
|
version = '01'
|
|
2131
|
version = '01'
|
|
2127
|
cgversions = b2caps.get('changegroup')
|
|
2132
|
cgversions = b2caps.get('changegroup')
|
|
2128
|
if cgversions: # 3.1 and 3.2 ship with an empty value
|
|
2133
|
if cgversions: # 3.1 and 3.2 ship with an empty value
|
|
2129
|
cgversions = [v for v in cgversions
|
|
2134
|
cgversions = [v for v in cgversions
|
|
2130
|
if v in changegroup.supportedoutgoingversions(repo)]
|
|
2135
|
if v in changegroup.supportedoutgoingversions(repo)]
|
|
2131
|
if not cgversions:
|
|
2136
|
if not cgversions:
|
|
2132
|
raise ValueError(_('no common changegroup version'))
|
|
2137
|
raise ValueError(_('no common changegroup version'))
|
|
2133
|
version = max(cgversions)
|
|
2138
|
version = max(cgversions)
|
|
2134
|
|
|
2139
|
|
|
2135
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2140
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2136
|
if not outgoing.missing:
|
|
2141
|
if not outgoing.missing:
|
|
2137
|
return
|
|
2142
|
return
|
|
2138
|
|
|
2143
|
|
|
2139
|
if kwargs.get(r'narrow', False):
|
|
2144
|
if kwargs.get(r'narrow', False):
|
|
2140
|
include = sorted(filter(bool, kwargs.get(r'includepats', [])))
|
|
2145
|
include = sorted(filter(bool, kwargs.get(r'includepats', [])))
|
|
2141
|
exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
|
|
2146
|
exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
|
|
2142
|
filematcher = narrowspec.match(repo.root, include=include,
|
|
2147
|
filematcher = narrowspec.match(repo.root, include=include,
|
|
2143
|
exclude=exclude)
|
|
2148
|
exclude=exclude)
|
|
2144
|
else:
|
|
2149
|
else:
|
|
2145
|
filematcher = None
|
|
2150
|
filematcher = None
|
|
2146
|
|
|
2151
|
|
|
2147
|
cgstream = changegroup.makestream(repo, outgoing, version, source,
|
|
2152
|
cgstream = changegroup.makestream(repo, outgoing, version, source,
|
|
2148
|
bundlecaps=bundlecaps,
|
|
2153
|
bundlecaps=bundlecaps,
|
|
2149
|
filematcher=filematcher)
|
|
2154
|
filematcher=filematcher)
|
|
2150
|
|
|
2155
|
|
|
2151
|
part = bundler.newpart('changegroup', data=cgstream)
|
|
2156
|
part = bundler.newpart('changegroup', data=cgstream)
|
|
2152
|
if cgversions:
|
|
2157
|
if cgversions:
|
|
2153
|
part.addparam('version', version)
|
|
2158
|
part.addparam('version', version)
|
|
2154
|
|
|
2159
|
|
|
2155
|
part.addparam('nbchanges', '%d' % len(outgoing.missing),
|
|
2160
|
part.addparam('nbchanges', '%d' % len(outgoing.missing),
|
|
2156
|
mandatory=False)
|
|
2161
|
mandatory=False)
|
|
2157
|
|
|
2162
|
|
|
2158
|
if 'treemanifest' in repo.requirements:
|
|
2163
|
if 'treemanifest' in repo.requirements:
|
|
2159
|
part.addparam('treemanifest', '1')
|
|
2164
|
part.addparam('treemanifest', '1')
|
|
2160
|
|
|
2165
|
|
|
2161
|
if kwargs.get(r'narrow', False) and (include or exclude):
|
|
2166
|
if kwargs.get(r'narrow', False) and (include or exclude):
|
|
2162
|
narrowspecpart = bundler.newpart('narrow:spec')
|
|
2167
|
narrowspecpart = bundler.newpart('narrow:spec')
|
|
2163
|
if include:
|
|
2168
|
if include:
|
|
2164
|
narrowspecpart.addparam(
|
|
2169
|
narrowspecpart.addparam(
|
|
2165
|
'include', '\n'.join(include), mandatory=True)
|
|
2170
|
'include', '\n'.join(include), mandatory=True)
|
|
2166
|
if exclude:
|
|
2171
|
if exclude:
|
|
2167
|
narrowspecpart.addparam(
|
|
2172
|
narrowspecpart.addparam(
|
|
2168
|
'exclude', '\n'.join(exclude), mandatory=True)
|
|
2173
|
'exclude', '\n'.join(exclude), mandatory=True)
|
|
2169
|
|
|
2174
|
|
|
2170
|
@getbundle2partsgenerator('bookmarks')
|
|
2175
|
@getbundle2partsgenerator('bookmarks')
|
|
2171
|
def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
|
|
2176
|
def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
|
|
2172
|
b2caps=None, **kwargs):
|
|
2177
|
b2caps=None, **kwargs):
|
|
2173
|
"""add a bookmark part to the requested bundle"""
|
|
2178
|
"""add a bookmark part to the requested bundle"""
|
|
2174
|
if not kwargs.get(r'bookmarks', False):
|
|
2179
|
if not kwargs.get(r'bookmarks', False):
|
|
2175
|
return
|
|
2180
|
return
|
|
2176
|
if 'bookmarks' not in b2caps:
|
|
2181
|
if 'bookmarks' not in b2caps:
|
|
2177
|
raise ValueError(_('no common bookmarks exchange method'))
|
|
2182
|
raise ValueError(_('no common bookmarks exchange method'))
|
|
2178
|
books = bookmod.listbinbookmarks(repo)
|
|
2183
|
books = bookmod.listbinbookmarks(repo)
|
|
2179
|
data = bookmod.binaryencode(books)
|
|
2184
|
data = bookmod.binaryencode(books)
|
|
2180
|
if data:
|
|
2185
|
if data:
|
|
2181
|
bundler.newpart('bookmarks', data=data)
|
|
2186
|
bundler.newpart('bookmarks', data=data)
|
|
2182
|
|
|
2187
|
|
|
2183
|
@getbundle2partsgenerator('listkeys')
|
|
2188
|
@getbundle2partsgenerator('listkeys')
|
|
2184
|
def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
|
|
2189
|
def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
|
|
2185
|
b2caps=None, **kwargs):
|
|
2190
|
b2caps=None, **kwargs):
|
|
2186
|
"""add parts containing listkeys namespaces to the requested bundle"""
|
|
2191
|
"""add parts containing listkeys namespaces to the requested bundle"""
|
|
2187
|
listkeys = kwargs.get(r'listkeys', ())
|
|
2192
|
listkeys = kwargs.get(r'listkeys', ())
|
|
2188
|
for namespace in listkeys:
|
|
2193
|
for namespace in listkeys:
|
|
2189
|
part = bundler.newpart('listkeys')
|
|
2194
|
part = bundler.newpart('listkeys')
|
|
2190
|
part.addparam('namespace', namespace)
|
|
2195
|
part.addparam('namespace', namespace)
|
|
2191
|
keys = repo.listkeys(namespace).items()
|
|
2196
|
keys = repo.listkeys(namespace).items()
|
|
2192
|
part.data = pushkey.encodekeys(keys)
|
|
2197
|
part.data = pushkey.encodekeys(keys)
|
|
2193
|
|
|
2198
|
|
|
2194
|
@getbundle2partsgenerator('obsmarkers')
|
|
2199
|
@getbundle2partsgenerator('obsmarkers')
|
|
2195
|
def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
|
|
2200
|
def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
|
|
2196
|
b2caps=None, heads=None, **kwargs):
|
|
2201
|
b2caps=None, heads=None, **kwargs):
|
|
2197
|
"""add an obsolescence markers part to the requested bundle"""
|
|
2202
|
"""add an obsolescence markers part to the requested bundle"""
|
|
2198
|
if kwargs.get(r'obsmarkers', False):
|
|
2203
|
if kwargs.get(r'obsmarkers', False):
|
|
2199
|
if heads is None:
|
|
2204
|
if heads is None:
|
|
2200
|
heads = repo.heads()
|
|
2205
|
heads = repo.heads()
|
|
2201
|
subset = [c.node() for c in repo.set('::%ln', heads)]
|
|
2206
|
subset = [c.node() for c in repo.set('::%ln', heads)]
|
|
2202
|
markers = repo.obsstore.relevantmarkers(subset)
|
|
2207
|
markers = repo.obsstore.relevantmarkers(subset)
|
|
2203
|
markers = sorted(markers)
|
|
2208
|
markers = sorted(markers)
|
|
2204
|
bundle2.buildobsmarkerspart(bundler, markers)
|
|
2209
|
bundle2.buildobsmarkerspart(bundler, markers)
|
|
2205
|
|
|
2210
|
|
|
2206
|
@getbundle2partsgenerator('phases')
|
|
2211
|
@getbundle2partsgenerator('phases')
|
|
2207
|
def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
|
|
2212
|
def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
|
|
2208
|
b2caps=None, heads=None, **kwargs):
|
|
2213
|
b2caps=None, heads=None, **kwargs):
|
|
2209
|
"""add phase heads part to the requested bundle"""
|
|
2214
|
"""add phase heads part to the requested bundle"""
|
|
2210
|
if kwargs.get(r'phases', False):
|
|
2215
|
if kwargs.get(r'phases', False):
|
|
2211
|
if not 'heads' in b2caps.get('phases'):
|
|
2216
|
if not 'heads' in b2caps.get('phases'):
|
|
2212
|
raise ValueError(_('no common phases exchange method'))
|
|
2217
|
raise ValueError(_('no common phases exchange method'))
|
|
2213
|
if heads is None:
|
|
2218
|
if heads is None:
|
|
2214
|
heads = repo.heads()
|
|
2219
|
heads = repo.heads()
|
|
2215
|
|
|
2220
|
|
|
2216
|
headsbyphase = collections.defaultdict(set)
|
|
2221
|
headsbyphase = collections.defaultdict(set)
|
|
2217
|
if repo.publishing():
|
|
2222
|
if repo.publishing():
|
|
2218
|
headsbyphase[phases.public] = heads
|
|
2223
|
headsbyphase[phases.public] = heads
|
|
2219
|
else:
|
|
2224
|
else:
|
|
2220
|
# find the appropriate heads to move
|
|
2225
|
# find the appropriate heads to move
|
|
2221
|
|
|
2226
|
|
|
2222
|
phase = repo._phasecache.phase
|
|
2227
|
phase = repo._phasecache.phase
|
|
2223
|
node = repo.changelog.node
|
|
2228
|
node = repo.changelog.node
|
|
2224
|
rev = repo.changelog.rev
|
|
2229
|
rev = repo.changelog.rev
|
|
2225
|
for h in heads:
|
|
2230
|
for h in heads:
|
|
2226
|
headsbyphase[phase(repo, rev(h))].add(h)
|
|
2231
|
headsbyphase[phase(repo, rev(h))].add(h)
|
|
2227
|
seenphases = list(headsbyphase.keys())
|
|
2232
|
seenphases = list(headsbyphase.keys())
|
|
2228
|
|
|
2233
|
|
|
2229
|
# We do not handle anything but public and draft phase for now)
|
|
2234
|
# We do not handle anything but public and draft phase for now)
|
|
2230
|
if seenphases:
|
|
2235
|
if seenphases:
|
|
2231
|
assert max(seenphases) <= phases.draft
|
|
2236
|
assert max(seenphases) <= phases.draft
|
|
2232
|
|
|
2237
|
|
|
2233
|
# if client is pulling non-public changesets, we need to find
|
|
2238
|
# if client is pulling non-public changesets, we need to find
|
|
2234
|
# intermediate public heads.
|
|
2239
|
# intermediate public heads.
|
|
2235
|
draftheads = headsbyphase.get(phases.draft, set())
|
|
2240
|
draftheads = headsbyphase.get(phases.draft, set())
|
|
2236
|
if draftheads:
|
|
2241
|
if draftheads:
|
|
2237
|
publicheads = headsbyphase.get(phases.public, set())
|
|
2242
|
publicheads = headsbyphase.get(phases.public, set())
|
|
2238
|
|
|
2243
|
|
|
2239
|
revset = 'heads(only(%ln, %ln) and public())'
|
|
2244
|
revset = 'heads(only(%ln, %ln) and public())'
|
|
2240
|
extraheads = repo.revs(revset, draftheads, publicheads)
|
|
2245
|
extraheads = repo.revs(revset, draftheads, publicheads)
|
|
2241
|
for r in extraheads:
|
|
2246
|
for r in extraheads:
|
|
2242
|
headsbyphase[phases.public].add(node(r))
|
|
2247
|
headsbyphase[phases.public].add(node(r))
|
|
2243
|
|
|
2248
|
|
|
2244
|
# transform data in a format used by the encoding function
|
|
2249
|
# transform data in a format used by the encoding function
|
|
2245
|
phasemapping = []
|
|
2250
|
phasemapping = []
|
|
2246
|
for phase in phases.allphases:
|
|
2251
|
for phase in phases.allphases:
|
|
2247
|
phasemapping.append(sorted(headsbyphase[phase]))
|
|
2252
|
phasemapping.append(sorted(headsbyphase[phase]))
|
|
2248
|
|
|
2253
|
|
|
2249
|
# generate the actual part
|
|
2254
|
# generate the actual part
|
|
2250
|
phasedata = phases.binaryencode(phasemapping)
|
|
2255
|
phasedata = phases.binaryencode(phasemapping)
|
|
2251
|
bundler.newpart('phase-heads', data=phasedata)
|
|
2256
|
bundler.newpart('phase-heads', data=phasedata)
|
|
2252
|
|
|
2257
|
|
|
2253
|
@getbundle2partsgenerator('hgtagsfnodes')
|
|
2258
|
@getbundle2partsgenerator('hgtagsfnodes')
|
|
2254
|
def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
|
|
2259
|
def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
|
|
2255
|
b2caps=None, heads=None, common=None,
|
|
2260
|
b2caps=None, heads=None, common=None,
|
|
2256
|
**kwargs):
|
|
2261
|
**kwargs):
|
|
2257
|
"""Transfer the .hgtags filenodes mapping.
|
|
2262
|
"""Transfer the .hgtags filenodes mapping.
|
|
2258
|
|
|
2263
|
|
|
2259
|
Only values for heads in this bundle will be transferred.
|
|
2264
|
Only values for heads in this bundle will be transferred.
|
|
2260
|
|
|
2265
|
|
|
2261
|
The part data consists of pairs of 20 byte changeset node and .hgtags
|
|
2266
|
The part data consists of pairs of 20 byte changeset node and .hgtags
|
|
2262
|
filenodes raw values.
|
|
2267
|
filenodes raw values.
|
|
2263
|
"""
|
|
2268
|
"""
|
|
2264
|
# Don't send unless:
|
|
2269
|
# Don't send unless:
|
|
2265
|
# - changeset are being exchanged,
|
|
2270
|
# - changeset are being exchanged,
|
|
2266
|
# - the client supports it.
|
|
2271
|
# - the client supports it.
|
|
2267
|
if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
|
|
2272
|
if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
|
|
2268
|
return
|
|
2273
|
return
|
|
2269
|
|
|
2274
|
|
|
2270
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2275
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2271
|
bundle2.addparttagsfnodescache(repo, bundler, outgoing)
|
|
2276
|
bundle2.addparttagsfnodescache(repo, bundler, outgoing)
|
|
2272
|
|
|
2277
|
|
|
2273
|
@getbundle2partsgenerator('cache:rev-branch-cache')
|
|
2278
|
@getbundle2partsgenerator('cache:rev-branch-cache')
|
|
2274
|
def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
|
|
2279
|
def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
|
|
2275
|
b2caps=None, heads=None, common=None,
|
|
2280
|
b2caps=None, heads=None, common=None,
|
|
2276
|
**kwargs):
|
|
2281
|
**kwargs):
|
|
2277
|
"""Transfer the rev-branch-cache mapping
|
|
2282
|
"""Transfer the rev-branch-cache mapping
|
|
2278
|
|
|
2283
|
|
|
2279
|
The payload is a series of data related to each branch
|
|
2284
|
The payload is a series of data related to each branch
|
|
2280
|
|
|
2285
|
|
|
2281
|
1) branch name length
|
|
2286
|
1) branch name length
|
|
2282
|
2) number of open heads
|
|
2287
|
2) number of open heads
|
|
2283
|
3) number of closed heads
|
|
2288
|
3) number of closed heads
|
|
2284
|
4) open heads nodes
|
|
2289
|
4) open heads nodes
|
|
2285
|
5) closed heads nodes
|
|
2290
|
5) closed heads nodes
|
|
2286
|
"""
|
|
2291
|
"""
|
|
2287
|
# Don't send unless:
|
|
2292
|
# Don't send unless:
|
|
2288
|
# - changeset are being exchanged,
|
|
2293
|
# - changeset are being exchanged,
|
|
2289
|
# - the client supports it.
|
|
2294
|
# - the client supports it.
|
|
2290
|
# - narrow bundle isn't in play (not currently compatible).
|
|
2295
|
# - narrow bundle isn't in play (not currently compatible).
|
|
2291
|
if (not kwargs.get(r'cg', True)
|
|
2296
|
if (not kwargs.get(r'cg', True)
|
|
2292
|
or 'rev-branch-cache' not in b2caps
|
|
2297
|
or 'rev-branch-cache' not in b2caps
|
|
2293
|
or kwargs.get(r'narrow', False)
|
|
2298
|
or kwargs.get(r'narrow', False)
|
|
2294
|
or repo.ui.has_section(_NARROWACL_SECTION)):
|
|
2299
|
or repo.ui.has_section(_NARROWACL_SECTION)):
|
|
2295
|
return
|
|
2300
|
return
|
|
2296
|
|
|
2301
|
|
|
2297
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2302
|
outgoing = _computeoutgoing(repo, heads, common)
|
|
2298
|
bundle2.addpartrevbranchcache(repo, bundler, outgoing)
|
|
2303
|
bundle2.addpartrevbranchcache(repo, bundler, outgoing)
|
|
2299
|
|
|
2304
|
|
|
2300
|
def check_heads(repo, their_heads, context):
|
|
2305
|
def check_heads(repo, their_heads, context):
|
|
2301
|
"""check if the heads of a repo have been modified
|
|
2306
|
"""check if the heads of a repo have been modified
|
|
2302
|
|
|
2307
|
|
|
2303
|
Used by peer for unbundling.
|
|
2308
|
Used by peer for unbundling.
|
|
2304
|
"""
|
|
2309
|
"""
|
|
2305
|
heads = repo.heads()
|
|
2310
|
heads = repo.heads()
|
|
2306
|
heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
|
|
2311
|
heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
|
|
2307
|
if not (their_heads == ['force'] or their_heads == heads or
|
|
2312
|
if not (their_heads == ['force'] or their_heads == heads or
|
|
2308
|
their_heads == ['hashed', heads_hash]):
|
|
2313
|
their_heads == ['hashed', heads_hash]):
|
|
2309
|
# someone else committed/pushed/unbundled while we
|
|
2314
|
# someone else committed/pushed/unbundled while we
|
|
2310
|
# were transferring data
|
|
2315
|
# were transferring data
|
|
2311
|
raise error.PushRaced('repository changed while %s - '
|
|
2316
|
raise error.PushRaced('repository changed while %s - '
|
|
2312
|
'please try again' % context)
|
|
2317
|
'please try again' % context)
|
|
2313
|
|
|
2318
|
|
|
2314
|
def unbundle(repo, cg, heads, source, url):
|
|
2319
|
def unbundle(repo, cg, heads, source, url):
|
|
2315
|
"""Apply a bundle to a repo.
|
|
2320
|
"""Apply a bundle to a repo.
|
|
2316
|
|
|
2321
|
|
|
2317
|
this function makes sure the repo is locked during the application and have
|
|
2322
|
this function makes sure the repo is locked during the application and have
|
|
2318
|
mechanism to check that no push race occurred between the creation of the
|
|
2323
|
mechanism to check that no push race occurred between the creation of the
|
|
2319
|
bundle and its application.
|
|
2324
|
bundle and its application.
|
|
2320
|
|
|
2325
|
|
|
2321
|
If the push was raced as PushRaced exception is raised."""
|
|
2326
|
If the push was raced as PushRaced exception is raised."""
|
|
2322
|
r = 0
|
|
2327
|
r = 0
|
|
2323
|
# need a transaction when processing a bundle2 stream
|
|
2328
|
# need a transaction when processing a bundle2 stream
|
|
2324
|
# [wlock, lock, tr] - needs to be an array so nested functions can modify it
|
|
2329
|
# [wlock, lock, tr] - needs to be an array so nested functions can modify it
|
|
2325
|
lockandtr = [None, None, None]
|
|
2330
|
lockandtr = [None, None, None]
|
|
2326
|
recordout = None
|
|
2331
|
recordout = None
|
|
2327
|
# quick fix for output mismatch with bundle2 in 3.4
|
|
2332
|
# quick fix for output mismatch with bundle2 in 3.4
|
|
2328
|
captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
|
|
2333
|
captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
|
|
2329
|
if url.startswith('remote:http:') or url.startswith('remote:https:'):
|
|
2334
|
if url.startswith('remote:http:') or url.startswith('remote:https:'):
|
|
2330
|
captureoutput = True
|
|
2335
|
captureoutput = True
|
|
2331
|
try:
|
|
2336
|
try:
|
|
2332
|
# note: outside bundle1, 'heads' is expected to be empty and this
|
|
2337
|
# note: outside bundle1, 'heads' is expected to be empty and this
|
|
2333
|
# 'check_heads' call wil be a no-op
|
|
2338
|
# 'check_heads' call wil be a no-op
|
|
2334
|
check_heads(repo, heads, 'uploading changes')
|
|
2339
|
check_heads(repo, heads, 'uploading changes')
|
|
2335
|
# push can proceed
|
|
2340
|
# push can proceed
|
|
2336
|
if not isinstance(cg, bundle2.unbundle20):
|
|
2341
|
if not isinstance(cg, bundle2.unbundle20):
|
|
2337
|
# legacy case: bundle1 (changegroup 01)
|
|
2342
|
# legacy case: bundle1 (changegroup 01)
|
|
2338
|
txnname = "\n".join([source, util.hidepassword(url)])
|
|
2343
|
txnname = "\n".join([source, util.hidepassword(url)])
|
|
2339
|
with repo.lock(), repo.transaction(txnname) as tr:
|
|
2344
|
with repo.lock(), repo.transaction(txnname) as tr:
|
|
2340
|
op = bundle2.applybundle(repo, cg, tr, source, url)
|
|
2345
|
op = bundle2.applybundle(repo, cg, tr, source, url)
|
|
2341
|
r = bundle2.combinechangegroupresults(op)
|
|
2346
|
r = bundle2.combinechangegroupresults(op)
|
|
2342
|
else:
|
|
2347
|
else:
|
|
2343
|
r = None
|
|
2348
|
r = None
|
|
2344
|
try:
|
|
2349
|
try:
|
|
2345
|
def gettransaction():
|
|
2350
|
def gettransaction():
|
|
2346
|
if not lockandtr[2]:
|
|
2351
|
if not lockandtr[2]:
|
|
2347
|
lockandtr[0] = repo.wlock()
|
|
2352
|
lockandtr[0] = repo.wlock()
|
|
2348
|
lockandtr[1] = repo.lock()
|
|
2353
|
lockandtr[1] = repo.lock()
|
|
2349
|
lockandtr[2] = repo.transaction(source)
|
|
2354
|
lockandtr[2] = repo.transaction(source)
|
|
2350
|
lockandtr[2].hookargs['source'] = source
|
|
2355
|
lockandtr[2].hookargs['source'] = source
|
|
2351
|
lockandtr[2].hookargs['url'] = url
|
|
2356
|
lockandtr[2].hookargs['url'] = url
|
|
2352
|
lockandtr[2].hookargs['bundle2'] = '1'
|
|
2357
|
lockandtr[2].hookargs['bundle2'] = '1'
|
|
2353
|
return lockandtr[2]
|
|
2358
|
return lockandtr[2]
|
|
2354
|
|
|
2359
|
|
|
2355
|
# Do greedy locking by default until we're satisfied with lazy
|
|
2360
|
# Do greedy locking by default until we're satisfied with lazy
|
|
2356
|
# locking.
|
|
2361
|
# locking.
|
|
2357
|
if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
|
|
2362
|
if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
|
|
2358
|
gettransaction()
|
|
2363
|
gettransaction()
|
|
2359
|
|
|
2364
|
|
|
2360
|
op = bundle2.bundleoperation(repo, gettransaction,
|
|
2365
|
op = bundle2.bundleoperation(repo, gettransaction,
|
|
2361
|
captureoutput=captureoutput,
|
|
2366
|
captureoutput=captureoutput,
|
|
2362
|
source='push')
|
|
2367
|
source='push')
|
|
2363
|
try:
|
|
2368
|
try:
|
|
2364
|
op = bundle2.processbundle(repo, cg, op=op)
|
|
2369
|
op = bundle2.processbundle(repo, cg, op=op)
|
|
2365
|
finally:
|
|
2370
|
finally:
|
|
2366
|
r = op.reply
|
|
2371
|
r = op.reply
|
|
2367
|
if captureoutput and r is not None:
|
|
2372
|
if captureoutput and r is not None:
|
|
2368
|
repo.ui.pushbuffer(error=True, subproc=True)
|
|
2373
|
repo.ui.pushbuffer(error=True, subproc=True)
|
|
2369
|
def recordout(output):
|
|
2374
|
def recordout(output):
|
|
2370
|
r.newpart('output', data=output, mandatory=False)
|
|
2375
|
r.newpart('output', data=output, mandatory=False)
|
|
2371
|
if lockandtr[2] is not None:
|
|
2376
|
if lockandtr[2] is not None:
|
|
2372
|
lockandtr[2].close()
|
|
2377
|
lockandtr[2].close()
|
|
2373
|
except BaseException as exc:
|
|
2378
|
except BaseException as exc:
|
|
2374
|
exc.duringunbundle2 = True
|
|
2379
|
exc.duringunbundle2 = True
|
|
2375
|
if captureoutput and r is not None:
|
|
2380
|
if captureoutput and r is not None:
|
|
2376
|
parts = exc._bundle2salvagedoutput = r.salvageoutput()
|
|
2381
|
parts = exc._bundle2salvagedoutput = r.salvageoutput()
|
|
2377
|
def recordout(output):
|
|
2382
|
def recordout(output):
|
|
2378
|
part = bundle2.bundlepart('output', data=output,
|
|
2383
|
part = bundle2.bundlepart('output', data=output,
|
|
2379
|
mandatory=False)
|
|
2384
|
mandatory=False)
|
|
2380
|
parts.append(part)
|
|
2385
|
parts.append(part)
|
|
2381
|
raise
|
|
2386
|
raise
|
|
2382
|
finally:
|
|
2387
|
finally:
|
|
2383
|
lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
|
|
2388
|
lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
|
|
2384
|
if recordout is not None:
|
|
2389
|
if recordout is not None:
|
|
2385
|
recordout(repo.ui.popbuffer())
|
|
2390
|
recordout(repo.ui.popbuffer())
|
|
2386
|
return r
|
|
2391
|
return r
|
|
2387
|
|
|
2392
|
|
|
2388
|
def _maybeapplyclonebundle(pullop):
|
|
2393
|
def _maybeapplyclonebundle(pullop):
|
|
2389
|
"""Apply a clone bundle from a remote, if possible."""
|
|
2394
|
"""Apply a clone bundle from a remote, if possible."""
|
|
2390
|
|
|
2395
|
|
|
2391
|
repo = pullop.repo
|
|
2396
|
repo = pullop.repo
|
|
2392
|
remote = pullop.remote
|
|
2397
|
remote = pullop.remote
|
|
2393
|
|
|
2398
|
|
|
2394
|
if not repo.ui.configbool('ui', 'clonebundles'):
|
|
2399
|
if not repo.ui.configbool('ui', 'clonebundles'):
|
|
2395
|
return
|
|
2400
|
return
|
|
2396
|
|
|
2401
|
|
|
2397
|
# Only run if local repo is empty.
|
|
2402
|
# Only run if local repo is empty.
|
|
2398
|
if len(repo):
|
|
2403
|
if len(repo):
|
|
2399
|
return
|
|
2404
|
return
|
|
2400
|
|
|
2405
|
|
|
2401
|
if pullop.heads:
|
|
2406
|
if pullop.heads:
|
|
2402
|
return
|
|
2407
|
return
|
|
2403
|
|
|
2408
|
|
|
2404
|
if not remote.capable('clonebundles'):
|
|
2409
|
if not remote.capable('clonebundles'):
|
|
2405
|
return
|
|
2410
|
return
|
|
2406
|
|
|
2411
|
|
|
2407
|
with remote.commandexecutor() as e:
|
|
2412
|
with remote.commandexecutor() as e:
|
|
2408
|
res = e.callcommand('clonebundles', {}).result()
|
|
2413
|
res = e.callcommand('clonebundles', {}).result()
|
|
2409
|
|
|
2414
|
|
|
2410
|
# If we call the wire protocol command, that's good enough to record the
|
|
2415
|
# If we call the wire protocol command, that's good enough to record the
|
|
2411
|
# attempt.
|
|
2416
|
# attempt.
|
|
2412
|
pullop.clonebundleattempted = True
|
|
2417
|
pullop.clonebundleattempted = True
|
|
2413
|
|
|
2418
|
|
|
2414
|
entries = parseclonebundlesmanifest(repo, res)
|
|
2419
|
entries = parseclonebundlesmanifest(repo, res)
|
|
2415
|
if not entries:
|
|
2420
|
if not entries:
|
|
2416
|
repo.ui.note(_('no clone bundles available on remote; '
|
|
2421
|
repo.ui.note(_('no clone bundles available on remote; '
|
|
2417
|
'falling back to regular clone\n'))
|
|
2422
|
'falling back to regular clone\n'))
|
|
2418
|
return
|
|
2423
|
return
|
|
2419
|
|
|
2424
|
|
|
2420
|
entries = filterclonebundleentries(
|
|
2425
|
entries = filterclonebundleentries(
|
|
2421
|
repo, entries, streamclonerequested=pullop.streamclonerequested)
|
|
2426
|
repo, entries, streamclonerequested=pullop.streamclonerequested)
|
|
2422
|
|
|
2427
|
|
|
2423
|
if not entries:
|
|
2428
|
if not entries:
|
|
2424
|
# There is a thundering herd concern here. However, if a server
|
|
2429
|
# There is a thundering herd concern here. However, if a server
|
|
2425
|
# operator doesn't advertise bundles appropriate for its clients,
|
|
2430
|
# operator doesn't advertise bundles appropriate for its clients,
|
|
2426
|
# they deserve what's coming. Furthermore, from a client's
|
|
2431
|
# they deserve what's coming. Furthermore, from a client's
|
|
2427
|
# perspective, no automatic fallback would mean not being able to
|
|
2432
|
# perspective, no automatic fallback would mean not being able to
|
|
2428
|
# clone!
|
|
2433
|
# clone!
|
|
2429
|
repo.ui.warn(_('no compatible clone bundles available on server; '
|
|
2434
|
repo.ui.warn(_('no compatible clone bundles available on server; '
|
|
2430
|
'falling back to regular clone\n'))
|
|
2435
|
'falling back to regular clone\n'))
|
|
2431
|
repo.ui.warn(_('(you may want to report this to the server '
|
|
2436
|
repo.ui.warn(_('(you may want to report this to the server '
|
|
2432
|
'operator)\n'))
|
|
2437
|
'operator)\n'))
|
|
2433
|
return
|
|
2438
|
return
|
|
2434
|
|
|
2439
|
|
|
2435
|
entries = sortclonebundleentries(repo.ui, entries)
|
|
2440
|
entries = sortclonebundleentries(repo.ui, entries)
|
|
2436
|
|
|
2441
|
|
|
2437
|
url = entries[0]['URL']
|
|
2442
|
url = entries[0]['URL']
|
|
2438
|
repo.ui.status(_('applying clone bundle from %s\n') % url)
|
|
2443
|
repo.ui.status(_('applying clone bundle from %s\n') % url)
|
|
2439
|
if trypullbundlefromurl(repo.ui, repo, url):
|
|
2444
|
if trypullbundlefromurl(repo.ui, repo, url):
|
|
2440
|
repo.ui.status(_('finished applying clone bundle\n'))
|
|
2445
|
repo.ui.status(_('finished applying clone bundle\n'))
|
|
2441
|
# Bundle failed.
|
|
2446
|
# Bundle failed.
|
|
2442
|
#
|
|
2447
|
#
|
|
2443
|
# We abort by default to avoid the thundering herd of
|
|
2448
|
# We abort by default to avoid the thundering herd of
|
|
2444
|
# clients flooding a server that was expecting expensive
|
|
2449
|
# clients flooding a server that was expecting expensive
|
|
2445
|
# clone load to be offloaded.
|
|
2450
|
# clone load to be offloaded.
|
|
2446
|
elif repo.ui.configbool('ui', 'clonebundlefallback'):
|
|
2451
|
elif repo.ui.configbool('ui', 'clonebundlefallback'):
|
|
2447
|
repo.ui.warn(_('falling back to normal clone\n'))
|
|
2452
|
repo.ui.warn(_('falling back to normal clone\n'))
|
|
2448
|
else:
|
|
2453
|
else:
|
|
2449
|
raise error.Abort(_('error applying bundle'),
|
|
2454
|
raise error.Abort(_('error applying bundle'),
|
|
2450
|
hint=_('if this error persists, consider contacting '
|
|
2455
|
hint=_('if this error persists, consider contacting '
|
|
2451
|
'the server operator or disable clone '
|
|
2456
|
'the server operator or disable clone '
|
|
2452
|
'bundles via '
|
|
2457
|
'bundles via '
|
|
2453
|
'"--config ui.clonebundles=false"'))
|
|
2458
|
'"--config ui.clonebundles=false"'))
|
|
2454
|
|
|
2459
|
|
|
2455
|
def parseclonebundlesmanifest(repo, s):
|
|
2460
|
def parseclonebundlesmanifest(repo, s):
|
|
2456
|
"""Parses the raw text of a clone bundles manifest.
|
|
2461
|
"""Parses the raw text of a clone bundles manifest.
|
|
2457
|
|
|
2462
|
|
|
2458
|
Returns a list of dicts. The dicts have a ``URL`` key corresponding
|
|
2463
|
Returns a list of dicts. The dicts have a ``URL`` key corresponding
|
|
2459
|
to the URL and other keys are the attributes for the entry.
|
|
2464
|
to the URL and other keys are the attributes for the entry.
|
|
2460
|
"""
|
|
2465
|
"""
|
|
2461
|
m = []
|
|
2466
|
m = []
|
|
2462
|
for line in s.splitlines():
|
|
2467
|
for line in s.splitlines():
|
|
2463
|
fields = line.split()
|
|
2468
|
fields = line.split()
|
|
2464
|
if not fields:
|
|
2469
|
if not fields:
|
|
2465
|
continue
|
|
2470
|
continue
|
|
2466
|
attrs = {'URL': fields[0]}
|
|
2471
|
attrs = {'URL': fields[0]}
|
|
2467
|
for rawattr in fields[1:]:
|
|
2472
|
for rawattr in fields[1:]:
|
|
2468
|
key, value = rawattr.split('=', 1)
|
|
2473
|
key, value = rawattr.split('=', 1)
|
|
2469
|
key = urlreq.unquote(key)
|
|
2474
|
key = urlreq.unquote(key)
|
|
2470
|
value = urlreq.unquote(value)
|
|
2475
|
value = urlreq.unquote(value)
|
|
2471
|
attrs[key] = value
|
|
2476
|
attrs[key] = value
|
|
2472
|
|
|
2477
|
|
|
2473
|
# Parse BUNDLESPEC into components. This makes client-side
|
|
2478
|
# Parse BUNDLESPEC into components. This makes client-side
|
|
2474
|
# preferences easier to specify since you can prefer a single
|
|
2479
|
# preferences easier to specify since you can prefer a single
|
|
2475
|
# component of the BUNDLESPEC.
|
|
2480
|
# component of the BUNDLESPEC.
|
|
2476
|
if key == 'BUNDLESPEC':
|
|
2481
|
if key == 'BUNDLESPEC':
|
|
2477
|
try:
|
|
2482
|
try:
|
|
2478
|
bundlespec = parsebundlespec(repo, value)
|
|
2483
|
bundlespec = parsebundlespec(repo, value)
|
|
2479
|
attrs['COMPRESSION'] = bundlespec.compression
|
|
2484
|
attrs['COMPRESSION'] = bundlespec.compression
|
|
2480
|
attrs['VERSION'] = bundlespec.version
|
|
2485
|
attrs['VERSION'] = bundlespec.version
|
|
2481
|
except error.InvalidBundleSpecification:
|
|
2486
|
except error.InvalidBundleSpecification:
|
|
2482
|
pass
|
|
2487
|
pass
|
|
2483
|
except error.UnsupportedBundleSpecification:
|
|
2488
|
except error.UnsupportedBundleSpecification:
|
|
2484
|
pass
|
|
2489
|
pass
|
|
2485
|
|
|
2490
|
|
|
2486
|
m.append(attrs)
|
|
2491
|
m.append(attrs)
|
|
2487
|
|
|
2492
|
|
|
2488
|
return m
|
|
2493
|
return m
|
|
2489
|
|
|
2494
|
|
|
2490
|
def isstreamclonespec(bundlespec):
|
|
2495
|
def isstreamclonespec(bundlespec):
|
|
2491
|
# Stream clone v1
|
|
2496
|
# Stream clone v1
|
|
2492
|
if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
|
|
2497
|
if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
|
|
2493
|
return True
|
|
2498
|
return True
|
|
2494
|
|
|
2499
|
|
|
2495
|
# Stream clone v2
|
|
2500
|
# Stream clone v2
|
|
2496
|
if (bundlespec.wirecompression == 'UN' and \
|
|
2501
|
if (bundlespec.wirecompression == 'UN' and \
|
|
2497
|
bundlespec.wireversion == '02' and \
|
|
2502
|
bundlespec.wireversion == '02' and \
|
|
2498
|
bundlespec.contentopts.get('streamv2')):
|
|
2503
|
bundlespec.contentopts.get('streamv2')):
|
|
2499
|
return True
|
|
2504
|
return True
|
|
2500
|
|
|
2505
|
|
|
2501
|
return False
|
|
2506
|
return False
|
|
2502
|
|
|
2507
|
|
|
2503
|
def filterclonebundleentries(repo, entries, streamclonerequested=False):
|
|
2508
|
def filterclonebundleentries(repo, entries, streamclonerequested=False):
|
|
2504
|
"""Remove incompatible clone bundle manifest entries.
|
|
2509
|
"""Remove incompatible clone bundle manifest entries.
|
|
2505
|
|
|
2510
|
|
|
2506
|
Accepts a list of entries parsed with ``parseclonebundlesmanifest``
|
|
2511
|
Accepts a list of entries parsed with ``parseclonebundlesmanifest``
|
|
2507
|
and returns a new list consisting of only the entries that this client
|
|
2512
|
and returns a new list consisting of only the entries that this client
|
|
2508
|
should be able to apply.
|
|
2513
|
should be able to apply.
|
|
2509
|
|
|
2514
|
|
|
2510
|
There is no guarantee we'll be able to apply all returned entries because
|
|
2515
|
There is no guarantee we'll be able to apply all returned entries because
|
|
2511
|
the metadata we use to filter on may be missing or wrong.
|
|
2516
|
the metadata we use to filter on may be missing or wrong.
|
|
2512
|
"""
|
|
2517
|
"""
|
|
2513
|
newentries = []
|
|
2518
|
newentries = []
|
|
2514
|
for entry in entries:
|
|
2519
|
for entry in entries:
|
|
2515
|
spec = entry.get('BUNDLESPEC')
|
|
2520
|
spec = entry.get('BUNDLESPEC')
|
|
2516
|
if spec:
|
|
2521
|
if spec:
|
|
2517
|
try:
|
|
2522
|
try:
|
|
2518
|
bundlespec = parsebundlespec(repo, spec, strict=True)
|
|
2523
|
bundlespec = parsebundlespec(repo, spec, strict=True)
|
|
2519
|
|
|
2524
|
|
|
2520
|
# If a stream clone was requested, filter out non-streamclone
|
|
2525
|
# If a stream clone was requested, filter out non-streamclone
|
|
2521
|
# entries.
|
|
2526
|
# entries.
|
|
2522
|
if streamclonerequested and not isstreamclonespec(bundlespec):
|
|
2527
|
if streamclonerequested and not isstreamclonespec(bundlespec):
|
|
2523
|
repo.ui.debug('filtering %s because not a stream clone\n' %
|
|
2528
|
repo.ui.debug('filtering %s because not a stream clone\n' %
|
|
2524
|
entry['URL'])
|
|
2529
|
entry['URL'])
|
|
2525
|
continue
|
|
2530
|
continue
|
|
2526
|
|
|
2531
|
|
|
2527
|
except error.InvalidBundleSpecification as e:
|
|
2532
|
except error.InvalidBundleSpecification as e:
|
|
2528
|
repo.ui.debug(stringutil.forcebytestr(e) + '\n')
|
|
2533
|
repo.ui.debug(stringutil.forcebytestr(e) + '\n')
|
|
2529
|
continue
|
|
2534
|
continue
|
|
2530
|
except error.UnsupportedBundleSpecification as e:
|
|
2535
|
except error.UnsupportedBundleSpecification as e:
|
|
2531
|
repo.ui.debug('filtering %s because unsupported bundle '
|
|
2536
|
repo.ui.debug('filtering %s because unsupported bundle '
|
|
2532
|
'spec: %s\n' % (
|
|
2537
|
'spec: %s\n' % (
|
|
2533
|
entry['URL'], stringutil.forcebytestr(e)))
|
|
2538
|
entry['URL'], stringutil.forcebytestr(e)))
|
|
2534
|
continue
|
|
2539
|
continue
|
|
2535
|
# If we don't have a spec and requested a stream clone, we don't know
|
|
2540
|
# If we don't have a spec and requested a stream clone, we don't know
|
|
2536
|
# what the entry is so don't attempt to apply it.
|
|
2541
|
# what the entry is so don't attempt to apply it.
|
|
2537
|
elif streamclonerequested:
|
|
2542
|
elif streamclonerequested:
|
|
2538
|
repo.ui.debug('filtering %s because cannot determine if a stream '
|
|
2543
|
repo.ui.debug('filtering %s because cannot determine if a stream '
|
|
2539
|
'clone bundle\n' % entry['URL'])
|
|
2544
|
'clone bundle\n' % entry['URL'])
|
|
2540
|
continue
|
|
2545
|
continue
|
|
2541
|
|
|
2546
|
|
|
2542
|
if 'REQUIRESNI' in entry and not sslutil.hassni:
|
|
2547
|
if 'REQUIRESNI' in entry and not sslutil.hassni:
|
|
2543
|
repo.ui.debug('filtering %s because SNI not supported\n' %
|
|
2548
|
repo.ui.debug('filtering %s because SNI not supported\n' %
|
|
2544
|
entry['URL'])
|
|
2549
|
entry['URL'])
|
|
2545
|
continue
|
|
2550
|
continue
|
|
2546
|
|
|
2551
|
|
|
2547
|
newentries.append(entry)
|
|
2552
|
newentries.append(entry)
|
|
2548
|
|
|
2553
|
|
|
2549
|
return newentries
|
|
2554
|
return newentries
|
|
2550
|
|
|
2555
|
|
|
2551
|
class clonebundleentry(object):
|
|
2556
|
class clonebundleentry(object):
|
|
2552
|
"""Represents an item in a clone bundles manifest.
|
|
2557
|
"""Represents an item in a clone bundles manifest.
|
|
2553
|
|
|
2558
|
|
|
2554
|
This rich class is needed to support sorting since sorted() in Python 3
|
|
2559
|
This rich class is needed to support sorting since sorted() in Python 3
|
|
2555
|
doesn't support ``cmp`` and our comparison is complex enough that ``key=``
|
|
2560
|
doesn't support ``cmp`` and our comparison is complex enough that ``key=``
|
|
2556
|
won't work.
|
|
2561
|
won't work.
|
|
2557
|
"""
|
|
2562
|
"""
|
|
2558
|
|
|
2563
|
|
|
2559
|
def __init__(self, value, prefers):
|
|
2564
|
def __init__(self, value, prefers):
|
|
2560
|
self.value = value
|
|
2565
|
self.value = value
|
|
2561
|
self.prefers = prefers
|
|
2566
|
self.prefers = prefers
|
|
2562
|
|
|
2567
|
|
|
2563
|
def _cmp(self, other):
|
|
2568
|
def _cmp(self, other):
|
|
2564
|
for prefkey, prefvalue in self.prefers:
|
|
2569
|
for prefkey, prefvalue in self.prefers:
|
|
2565
|
avalue = self.value.get(prefkey)
|
|
2570
|
avalue = self.value.get(prefkey)
|
|
2566
|
bvalue = other.value.get(prefkey)
|
|
2571
|
bvalue = other.value.get(prefkey)
|
|
2567
|
|
|
2572
|
|
|
2568
|
# Special case for b missing attribute and a matches exactly.
|
|
2573
|
# Special case for b missing attribute and a matches exactly.
|
|
2569
|
if avalue is not None and bvalue is None and avalue == prefvalue:
|
|
2574
|
if avalue is not None and bvalue is None and avalue == prefvalue:
|
|
2570
|
return -1
|
|
2575
|
return -1
|
|
2571
|
|
|
2576
|
|
|
2572
|
# Special case for a missing attribute and b matches exactly.
|
|
2577
|
# Special case for a missing attribute and b matches exactly.
|
|
2573
|
if bvalue is not None and avalue is None and bvalue == prefvalue:
|
|
2578
|
if bvalue is not None and avalue is None and bvalue == prefvalue:
|
|
2574
|
return 1
|
|
2579
|
return 1
|
|
2575
|
|
|
2580
|
|
|
2576
|
# We can't compare unless attribute present on both.
|
|
2581
|
# We can't compare unless attribute present on both.
|
|
2577
|
if avalue is None or bvalue is None:
|
|
2582
|
if avalue is None or bvalue is None:
|
|
2578
|
continue
|
|
2583
|
continue
|
|
2579
|
|
|
2584
|
|
|
2580
|
# Same values should fall back to next attribute.
|
|
2585
|
# Same values should fall back to next attribute.
|
|
2581
|
if avalue == bvalue:
|
|
2586
|
if avalue == bvalue:
|
|
2582
|
continue
|
|
2587
|
continue
|
|
2583
|
|
|
2588
|
|
|
2584
|
# Exact matches come first.
|
|
2589
|
# Exact matches come first.
|
|
2585
|
if avalue == prefvalue:
|
|
2590
|
if avalue == prefvalue:
|
|
2586
|
return -1
|
|
2591
|
return -1
|
|
2587
|
if bvalue == prefvalue:
|
|
2592
|
if bvalue == prefvalue:
|
|
2588
|
return 1
|
|
2593
|
return 1
|
|
2589
|
|
|
2594
|
|
|
2590
|
# Fall back to next attribute.
|
|
2595
|
# Fall back to next attribute.
|
|
2591
|
continue
|
|
2596
|
continue
|
|
2592
|
|
|
2597
|
|
|
2593
|
# If we got here we couldn't sort by attributes and prefers. Fall
|
|
2598
|
# If we got here we couldn't sort by attributes and prefers. Fall
|
|
2594
|
# back to index order.
|
|
2599
|
# back to index order.
|
|
2595
|
return 0
|
|
2600
|
return 0
|
|
2596
|
|
|
2601
|
|
|
2597
|
def __lt__(self, other):
|
|
2602
|
def __lt__(self, other):
|
|
2598
|
return self._cmp(other) < 0
|
|
2603
|
return self._cmp(other) < 0
|
|
2599
|
|
|
2604
|
|
|
2600
|
def __gt__(self, other):
|
|
2605
|
def __gt__(self, other):
|
|
2601
|
return self._cmp(other) > 0
|
|
2606
|
return self._cmp(other) > 0
|
|
2602
|
|
|
2607
|
|
|
2603
|
def __eq__(self, other):
|
|
2608
|
def __eq__(self, other):
|
|
2604
|
return self._cmp(other) == 0
|
|
2609
|
return self._cmp(other) == 0
|
|
2605
|
|
|
2610
|
|
|
2606
|
def __le__(self, other):
|
|
2611
|
def __le__(self, other):
|
|
2607
|
return self._cmp(other) <= 0
|
|
2612
|
return self._cmp(other) <= 0
|
|
2608
|
|
|
2613
|
|
|
2609
|
def __ge__(self, other):
|
|
2614
|
def __ge__(self, other):
|
|
2610
|
return self._cmp(other) >= 0
|
|
2615
|
return self._cmp(other) >= 0
|
|
2611
|
|
|
2616
|
|
|
2612
|
def __ne__(self, other):
|
|
2617
|
def __ne__(self, other):
|
|
2613
|
return self._cmp(other) != 0
|
|
2618
|
return self._cmp(other) != 0
|
|
2614
|
|
|
2619
|
|
|
2615
|
def sortclonebundleentries(ui, entries):
|
|
2620
|
def sortclonebundleentries(ui, entries):
|
|
2616
|
prefers = ui.configlist('ui', 'clonebundleprefers')
|
|
2621
|
prefers = ui.configlist('ui', 'clonebundleprefers')
|
|
2617
|
if not prefers:
|
|
2622
|
if not prefers:
|
|
2618
|
return list(entries)
|
|
2623
|
return list(entries)
|
|
2619
|
|
|
2624
|
|
|
2620
|
prefers = [p.split('=', 1) for p in prefers]
|
|
2625
|
prefers = [p.split('=', 1) for p in prefers]
|
|
2621
|
|
|
2626
|
|
|
2622
|
items = sorted(clonebundleentry(v, prefers) for v in entries)
|
|
2627
|
items = sorted(clonebundleentry(v, prefers) for v in entries)
|
|
2623
|
return [i.value for i in items]
|
|
2628
|
return [i.value for i in items]
|
|
2624
|
|
|
2629
|
|
|
2625
|
def trypullbundlefromurl(ui, repo, url):
|
|
2630
|
def trypullbundlefromurl(ui, repo, url):
|
|
2626
|
"""Attempt to apply a bundle from a URL."""
|
|
2631
|
"""Attempt to apply a bundle from a URL."""
|
|
2627
|
with repo.lock(), repo.transaction('bundleurl') as tr:
|
|
2632
|
with repo.lock(), repo.transaction('bundleurl') as tr:
|
|
2628
|
try:
|
|
2633
|
try:
|
|
2629
|
fh = urlmod.open(ui, url)
|
|
2634
|
fh = urlmod.open(ui, url)
|
|
2630
|
cg = readbundle(ui, fh, 'stream')
|
|
2635
|
cg = readbundle(ui, fh, 'stream')
|
|
2631
|
|
|
2636
|
|
|
2632
|
if isinstance(cg, streamclone.streamcloneapplier):
|
|
2637
|
if isinstance(cg, streamclone.streamcloneapplier):
|
|
2633
|
cg.apply(repo)
|
|
2638
|
cg.apply(repo)
|
|
2634
|
else:
|
|
2639
|
else:
|
|
2635
|
bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
|
|
2640
|
bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
|
|
2636
|
return True
|
|
2641
|
return True
|
|
2637
|
except urlerr.httperror as e:
|
|
2642
|
except urlerr.httperror as e:
|
|
2638
|
ui.warn(_('HTTP error fetching bundle: %s\n') %
|
|
2643
|
ui.warn(_('HTTP error fetching bundle: %s\n') %
|
|
2639
|
stringutil.forcebytestr(e))
|
|
2644
|
stringutil.forcebytestr(e))
|
|
2640
|
except urlerr.urlerror as e:
|
|
2645
|
except urlerr.urlerror as e:
|
|
2641
|
ui.warn(_('error fetching bundle: %s\n') %
|
|
2646
|
ui.warn(_('error fetching bundle: %s\n') %
|
|
2642
|
stringutil.forcebytestr(e.reason))
|
|
2647
|
stringutil.forcebytestr(e.reason))
|
|
2643
|
|
|
2648
|
|
|
2644
|
return False
|
|
2649
|
return False
|