##// END OF EJS Templates
remotenames: rename related file and storage dir to logexchange...
Pulkit Goyal -
r35348:a29fe459 default
parent child Browse files
Show More
@@ -1,2214 +1,2214
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 bin,
16 bin,
17 hex,
17 hex,
18 nullid,
18 nullid,
19 )
19 )
20 from . import (
20 from . import (
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 logexchange,
27 obsolete,
28 obsolete,
28 phases,
29 phases,
29 pushkey,
30 pushkey,
30 pycompat,
31 pycompat,
31 remotenames,
32 scmutil,
32 scmutil,
33 sslutil,
33 sslutil,
34 streamclone,
34 streamclone,
35 url as urlmod,
35 url as urlmod,
36 util,
36 util,
37 )
37 )
38
38
39 urlerr = util.urlerr
39 urlerr = util.urlerr
40 urlreq = util.urlreq
40 urlreq = util.urlreq
41
41
42 # Maps bundle version human names to changegroup versions.
42 # Maps bundle version human names to changegroup versions.
43 _bundlespeccgversions = {'v1': '01',
43 _bundlespeccgversions = {'v1': '01',
44 'v2': '02',
44 'v2': '02',
45 'packed1': 's1',
45 'packed1': 's1',
46 'bundle2': '02', #legacy
46 'bundle2': '02', #legacy
47 }
47 }
48
48
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
49 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
50 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
51
51
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 def parsebundlespec(repo, spec, strict=True, externalnames=False):
53 """Parse a bundle string specification into parts.
53 """Parse a bundle string specification into parts.
54
54
55 Bundle specifications denote a well-defined bundle/exchange format.
55 Bundle specifications denote a well-defined bundle/exchange format.
56 The content of a given specification should not change over time in
56 The content of a given specification should not change over time in
57 order to ensure that bundles produced by a newer version of Mercurial are
57 order to ensure that bundles produced by a newer version of Mercurial are
58 readable from an older version.
58 readable from an older version.
59
59
60 The string currently has the form:
60 The string currently has the form:
61
61
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
62 <compression>-<type>[;<parameter0>[;<parameter1>]]
63
63
64 Where <compression> is one of the supported compression formats
64 Where <compression> is one of the supported compression formats
65 and <type> is (currently) a version string. A ";" can follow the type and
65 and <type> is (currently) a version string. A ";" can follow the type and
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
66 all text afterwards is interpreted as URI encoded, ";" delimited key=value
67 pairs.
67 pairs.
68
68
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 If ``strict`` is True (the default) <compression> is required. Otherwise,
70 it is optional.
70 it is optional.
71
71
72 If ``externalnames`` is False (the default), the human-centric names will
72 If ``externalnames`` is False (the default), the human-centric names will
73 be converted to their internal representation.
73 be converted to their internal representation.
74
74
75 Returns a 3-tuple of (compression, version, parameters). Compression will
75 Returns a 3-tuple of (compression, version, parameters). Compression will
76 be ``None`` if not in strict mode and a compression isn't defined.
76 be ``None`` if not in strict mode and a compression isn't defined.
77
77
78 An ``InvalidBundleSpecification`` is raised when the specification is
78 An ``InvalidBundleSpecification`` is raised when the specification is
79 not syntactically well formed.
79 not syntactically well formed.
80
80
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 An ``UnsupportedBundleSpecification`` is raised when the compression or
82 bundle type/version is not recognized.
82 bundle type/version is not recognized.
83
83
84 Note: this function will likely eventually return a more complex data
84 Note: this function will likely eventually return a more complex data
85 structure, including bundle2 part information.
85 structure, including bundle2 part information.
86 """
86 """
87 def parseparams(s):
87 def parseparams(s):
88 if ';' not in s:
88 if ';' not in s:
89 return s, {}
89 return s, {}
90
90
91 params = {}
91 params = {}
92 version, paramstr = s.split(';', 1)
92 version, paramstr = s.split(';', 1)
93
93
94 for p in paramstr.split(';'):
94 for p in paramstr.split(';'):
95 if '=' not in p:
95 if '=' not in p:
96 raise error.InvalidBundleSpecification(
96 raise error.InvalidBundleSpecification(
97 _('invalid bundle specification: '
97 _('invalid bundle specification: '
98 'missing "=" in parameter: %s') % p)
98 'missing "=" in parameter: %s') % p)
99
99
100 key, value = p.split('=', 1)
100 key, value = p.split('=', 1)
101 key = urlreq.unquote(key)
101 key = urlreq.unquote(key)
102 value = urlreq.unquote(value)
102 value = urlreq.unquote(value)
103 params[key] = value
103 params[key] = value
104
104
105 return version, params
105 return version, params
106
106
107
107
108 if strict and '-' not in spec:
108 if strict and '-' not in spec:
109 raise error.InvalidBundleSpecification(
109 raise error.InvalidBundleSpecification(
110 _('invalid bundle specification; '
110 _('invalid bundle specification; '
111 'must be prefixed with compression: %s') % spec)
111 'must be prefixed with compression: %s') % spec)
112
112
113 if '-' in spec:
113 if '-' in spec:
114 compression, version = spec.split('-', 1)
114 compression, version = spec.split('-', 1)
115
115
116 if compression not in util.compengines.supportedbundlenames:
116 if compression not in util.compengines.supportedbundlenames:
117 raise error.UnsupportedBundleSpecification(
117 raise error.UnsupportedBundleSpecification(
118 _('%s compression is not supported') % compression)
118 _('%s compression is not supported') % compression)
119
119
120 version, params = parseparams(version)
120 version, params = parseparams(version)
121
121
122 if version not in _bundlespeccgversions:
122 if version not in _bundlespeccgversions:
123 raise error.UnsupportedBundleSpecification(
123 raise error.UnsupportedBundleSpecification(
124 _('%s is not a recognized bundle version') % version)
124 _('%s is not a recognized bundle version') % version)
125 else:
125 else:
126 # Value could be just the compression or just the version, in which
126 # Value could be just the compression or just the version, in which
127 # case some defaults are assumed (but only when not in strict mode).
127 # case some defaults are assumed (but only when not in strict mode).
128 assert not strict
128 assert not strict
129
129
130 spec, params = parseparams(spec)
130 spec, params = parseparams(spec)
131
131
132 if spec in util.compengines.supportedbundlenames:
132 if spec in util.compengines.supportedbundlenames:
133 compression = spec
133 compression = spec
134 version = 'v1'
134 version = 'v1'
135 # Generaldelta repos require v2.
135 # Generaldelta repos require v2.
136 if 'generaldelta' in repo.requirements:
136 if 'generaldelta' in repo.requirements:
137 version = 'v2'
137 version = 'v2'
138 # Modern compression engines require v2.
138 # Modern compression engines require v2.
139 if compression not in _bundlespecv1compengines:
139 if compression not in _bundlespecv1compengines:
140 version = 'v2'
140 version = 'v2'
141 elif spec in _bundlespeccgversions:
141 elif spec in _bundlespeccgversions:
142 if spec == 'packed1':
142 if spec == 'packed1':
143 compression = 'none'
143 compression = 'none'
144 else:
144 else:
145 compression = 'bzip2'
145 compression = 'bzip2'
146 version = spec
146 version = spec
147 else:
147 else:
148 raise error.UnsupportedBundleSpecification(
148 raise error.UnsupportedBundleSpecification(
149 _('%s is not a recognized bundle specification') % spec)
149 _('%s is not a recognized bundle specification') % spec)
150
150
151 # Bundle version 1 only supports a known set of compression engines.
151 # Bundle version 1 only supports a known set of compression engines.
152 if version == 'v1' and compression not in _bundlespecv1compengines:
152 if version == 'v1' and compression not in _bundlespecv1compengines:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('compression engine %s is not supported on v1 bundles') %
154 _('compression engine %s is not supported on v1 bundles') %
155 compression)
155 compression)
156
156
157 # The specification for packed1 can optionally declare the data formats
157 # The specification for packed1 can optionally declare the data formats
158 # required to apply it. If we see this metadata, compare against what the
158 # required to apply it. If we see this metadata, compare against what the
159 # repo supports and error if the bundle isn't compatible.
159 # repo supports and error if the bundle isn't compatible.
160 if version == 'packed1' and 'requirements' in params:
160 if version == 'packed1' and 'requirements' in params:
161 requirements = set(params['requirements'].split(','))
161 requirements = set(params['requirements'].split(','))
162 missingreqs = requirements - repo.supportedformats
162 missingreqs = requirements - repo.supportedformats
163 if missingreqs:
163 if missingreqs:
164 raise error.UnsupportedBundleSpecification(
164 raise error.UnsupportedBundleSpecification(
165 _('missing support for repository features: %s') %
165 _('missing support for repository features: %s') %
166 ', '.join(sorted(missingreqs)))
166 ', '.join(sorted(missingreqs)))
167
167
168 if not externalnames:
168 if not externalnames:
169 engine = util.compengines.forbundlename(compression)
169 engine = util.compengines.forbundlename(compression)
170 compression = engine.bundletype()[1]
170 compression = engine.bundletype()[1]
171 version = _bundlespeccgversions[version]
171 version = _bundlespeccgversions[version]
172 return compression, version, params
172 return compression, version, params
173
173
174 def readbundle(ui, fh, fname, vfs=None):
174 def readbundle(ui, fh, fname, vfs=None):
175 header = changegroup.readexactly(fh, 4)
175 header = changegroup.readexactly(fh, 4)
176
176
177 alg = None
177 alg = None
178 if not fname:
178 if not fname:
179 fname = "stream"
179 fname = "stream"
180 if not header.startswith('HG') and header.startswith('\0'):
180 if not header.startswith('HG') and header.startswith('\0'):
181 fh = changegroup.headerlessfixup(fh, header)
181 fh = changegroup.headerlessfixup(fh, header)
182 header = "HG10"
182 header = "HG10"
183 alg = 'UN'
183 alg = 'UN'
184 elif vfs:
184 elif vfs:
185 fname = vfs.join(fname)
185 fname = vfs.join(fname)
186
186
187 magic, version = header[0:2], header[2:4]
187 magic, version = header[0:2], header[2:4]
188
188
189 if magic != 'HG':
189 if magic != 'HG':
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
190 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
191 if version == '10':
191 if version == '10':
192 if alg is None:
192 if alg is None:
193 alg = changegroup.readexactly(fh, 2)
193 alg = changegroup.readexactly(fh, 2)
194 return changegroup.cg1unpacker(fh, alg)
194 return changegroup.cg1unpacker(fh, alg)
195 elif version.startswith('2'):
195 elif version.startswith('2'):
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
196 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
197 elif version == 'S1':
197 elif version == 'S1':
198 return streamclone.streamcloneapplier(fh)
198 return streamclone.streamcloneapplier(fh)
199 else:
199 else:
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
200 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
201
201
202 def getbundlespec(ui, fh):
202 def getbundlespec(ui, fh):
203 """Infer the bundlespec from a bundle file handle.
203 """Infer the bundlespec from a bundle file handle.
204
204
205 The input file handle is seeked and the original seek position is not
205 The input file handle is seeked and the original seek position is not
206 restored.
206 restored.
207 """
207 """
208 def speccompression(alg):
208 def speccompression(alg):
209 try:
209 try:
210 return util.compengines.forbundletype(alg).bundletype()[0]
210 return util.compengines.forbundletype(alg).bundletype()[0]
211 except KeyError:
211 except KeyError:
212 return None
212 return None
213
213
214 b = readbundle(ui, fh, None)
214 b = readbundle(ui, fh, None)
215 if isinstance(b, changegroup.cg1unpacker):
215 if isinstance(b, changegroup.cg1unpacker):
216 alg = b._type
216 alg = b._type
217 if alg == '_truncatedBZ':
217 if alg == '_truncatedBZ':
218 alg = 'BZ'
218 alg = 'BZ'
219 comp = speccompression(alg)
219 comp = speccompression(alg)
220 if not comp:
220 if not comp:
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
221 raise error.Abort(_('unknown compression algorithm: %s') % alg)
222 return '%s-v1' % comp
222 return '%s-v1' % comp
223 elif isinstance(b, bundle2.unbundle20):
223 elif isinstance(b, bundle2.unbundle20):
224 if 'Compression' in b.params:
224 if 'Compression' in b.params:
225 comp = speccompression(b.params['Compression'])
225 comp = speccompression(b.params['Compression'])
226 if not comp:
226 if not comp:
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
227 raise error.Abort(_('unknown compression algorithm: %s') % comp)
228 else:
228 else:
229 comp = 'none'
229 comp = 'none'
230
230
231 version = None
231 version = None
232 for part in b.iterparts():
232 for part in b.iterparts():
233 if part.type == 'changegroup':
233 if part.type == 'changegroup':
234 version = part.params['version']
234 version = part.params['version']
235 if version in ('01', '02'):
235 if version in ('01', '02'):
236 version = 'v2'
236 version = 'v2'
237 else:
237 else:
238 raise error.Abort(_('changegroup version %s does not have '
238 raise error.Abort(_('changegroup version %s does not have '
239 'a known bundlespec') % version,
239 'a known bundlespec') % version,
240 hint=_('try upgrading your Mercurial '
240 hint=_('try upgrading your Mercurial '
241 'client'))
241 'client'))
242
242
243 if not version:
243 if not version:
244 raise error.Abort(_('could not identify changegroup version in '
244 raise error.Abort(_('could not identify changegroup version in '
245 'bundle'))
245 'bundle'))
246
246
247 return '%s-%s' % (comp, version)
247 return '%s-%s' % (comp, version)
248 elif isinstance(b, streamclone.streamcloneapplier):
248 elif isinstance(b, streamclone.streamcloneapplier):
249 requirements = streamclone.readbundle1header(fh)[2]
249 requirements = streamclone.readbundle1header(fh)[2]
250 params = 'requirements=%s' % ','.join(sorted(requirements))
250 params = 'requirements=%s' % ','.join(sorted(requirements))
251 return 'none-packed1;%s' % urlreq.quote(params)
251 return 'none-packed1;%s' % urlreq.quote(params)
252 else:
252 else:
253 raise error.Abort(_('unknown bundle type: %s') % b)
253 raise error.Abort(_('unknown bundle type: %s') % b)
254
254
255 def _computeoutgoing(repo, heads, common):
255 def _computeoutgoing(repo, heads, common):
256 """Computes which revs are outgoing given a set of common
256 """Computes which revs are outgoing given a set of common
257 and a set of heads.
257 and a set of heads.
258
258
259 This is a separate function so extensions can have access to
259 This is a separate function so extensions can have access to
260 the logic.
260 the logic.
261
261
262 Returns a discovery.outgoing object.
262 Returns a discovery.outgoing object.
263 """
263 """
264 cl = repo.changelog
264 cl = repo.changelog
265 if common:
265 if common:
266 hasnode = cl.hasnode
266 hasnode = cl.hasnode
267 common = [n for n in common if hasnode(n)]
267 common = [n for n in common if hasnode(n)]
268 else:
268 else:
269 common = [nullid]
269 common = [nullid]
270 if not heads:
270 if not heads:
271 heads = cl.heads()
271 heads = cl.heads()
272 return discovery.outgoing(repo, common, heads)
272 return discovery.outgoing(repo, common, heads)
273
273
274 def _forcebundle1(op):
274 def _forcebundle1(op):
275 """return true if a pull/push must use bundle1
275 """return true if a pull/push must use bundle1
276
276
277 This function is used to allow testing of the older bundle version"""
277 This function is used to allow testing of the older bundle version"""
278 ui = op.repo.ui
278 ui = op.repo.ui
279 forcebundle1 = False
279 forcebundle1 = False
280 # The goal is this config is to allow developer to choose the bundle
280 # The goal is this config is to allow developer to choose the bundle
281 # version used during exchanged. This is especially handy during test.
281 # version used during exchanged. This is especially handy during test.
282 # Value is a list of bundle version to be picked from, highest version
282 # Value is a list of bundle version to be picked from, highest version
283 # should be used.
283 # should be used.
284 #
284 #
285 # developer config: devel.legacy.exchange
285 # developer config: devel.legacy.exchange
286 exchange = ui.configlist('devel', 'legacy.exchange')
286 exchange = ui.configlist('devel', 'legacy.exchange')
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
287 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
288 return forcebundle1 or not op.remote.capable('bundle2')
288 return forcebundle1 or not op.remote.capable('bundle2')
289
289
290 class pushoperation(object):
290 class pushoperation(object):
291 """A object that represent a single push operation
291 """A object that represent a single push operation
292
292
293 Its purpose is to carry push related state and very common operations.
293 Its purpose is to carry push related state and very common operations.
294
294
295 A new pushoperation should be created at the beginning of each push and
295 A new pushoperation should be created at the beginning of each push and
296 discarded afterward.
296 discarded afterward.
297 """
297 """
298
298
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
299 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
300 bookmarks=(), pushvars=None):
300 bookmarks=(), pushvars=None):
301 # repo we push from
301 # repo we push from
302 self.repo = repo
302 self.repo = repo
303 self.ui = repo.ui
303 self.ui = repo.ui
304 # repo we push to
304 # repo we push to
305 self.remote = remote
305 self.remote = remote
306 # force option provided
306 # force option provided
307 self.force = force
307 self.force = force
308 # revs to be pushed (None is "all")
308 # revs to be pushed (None is "all")
309 self.revs = revs
309 self.revs = revs
310 # bookmark explicitly pushed
310 # bookmark explicitly pushed
311 self.bookmarks = bookmarks
311 self.bookmarks = bookmarks
312 # allow push of new branch
312 # allow push of new branch
313 self.newbranch = newbranch
313 self.newbranch = newbranch
314 # step already performed
314 # step already performed
315 # (used to check what steps have been already performed through bundle2)
315 # (used to check what steps have been already performed through bundle2)
316 self.stepsdone = set()
316 self.stepsdone = set()
317 # Integer version of the changegroup push result
317 # Integer version of the changegroup push result
318 # - None means nothing to push
318 # - None means nothing to push
319 # - 0 means HTTP error
319 # - 0 means HTTP error
320 # - 1 means we pushed and remote head count is unchanged *or*
320 # - 1 means we pushed and remote head count is unchanged *or*
321 # we have outgoing changesets but refused to push
321 # we have outgoing changesets but refused to push
322 # - other values as described by addchangegroup()
322 # - other values as described by addchangegroup()
323 self.cgresult = None
323 self.cgresult = None
324 # Boolean value for the bookmark push
324 # Boolean value for the bookmark push
325 self.bkresult = None
325 self.bkresult = None
326 # discover.outgoing object (contains common and outgoing data)
326 # discover.outgoing object (contains common and outgoing data)
327 self.outgoing = None
327 self.outgoing = None
328 # all remote topological heads before the push
328 # all remote topological heads before the push
329 self.remoteheads = None
329 self.remoteheads = None
330 # Details of the remote branch pre and post push
330 # Details of the remote branch pre and post push
331 #
331 #
332 # mapping: {'branch': ([remoteheads],
332 # mapping: {'branch': ([remoteheads],
333 # [newheads],
333 # [newheads],
334 # [unsyncedheads],
334 # [unsyncedheads],
335 # [discardedheads])}
335 # [discardedheads])}
336 # - branch: the branch name
336 # - branch: the branch name
337 # - remoteheads: the list of remote heads known locally
337 # - remoteheads: the list of remote heads known locally
338 # None if the branch is new
338 # None if the branch is new
339 # - newheads: the new remote heads (known locally) with outgoing pushed
339 # - newheads: the new remote heads (known locally) with outgoing pushed
340 # - unsyncedheads: the list of remote heads unknown locally.
340 # - unsyncedheads: the list of remote heads unknown locally.
341 # - discardedheads: the list of remote heads made obsolete by the push
341 # - discardedheads: the list of remote heads made obsolete by the push
342 self.pushbranchmap = None
342 self.pushbranchmap = None
343 # testable as a boolean indicating if any nodes are missing locally.
343 # testable as a boolean indicating if any nodes are missing locally.
344 self.incoming = None
344 self.incoming = None
345 # summary of the remote phase situation
345 # summary of the remote phase situation
346 self.remotephases = None
346 self.remotephases = None
347 # phases changes that must be pushed along side the changesets
347 # phases changes that must be pushed along side the changesets
348 self.outdatedphases = None
348 self.outdatedphases = None
349 # phases changes that must be pushed if changeset push fails
349 # phases changes that must be pushed if changeset push fails
350 self.fallbackoutdatedphases = None
350 self.fallbackoutdatedphases = None
351 # outgoing obsmarkers
351 # outgoing obsmarkers
352 self.outobsmarkers = set()
352 self.outobsmarkers = set()
353 # outgoing bookmarks
353 # outgoing bookmarks
354 self.outbookmarks = []
354 self.outbookmarks = []
355 # transaction manager
355 # transaction manager
356 self.trmanager = None
356 self.trmanager = None
357 # map { pushkey partid -> callback handling failure}
357 # map { pushkey partid -> callback handling failure}
358 # used to handle exception from mandatory pushkey part failure
358 # used to handle exception from mandatory pushkey part failure
359 self.pkfailcb = {}
359 self.pkfailcb = {}
360 # an iterable of pushvars or None
360 # an iterable of pushvars or None
361 self.pushvars = pushvars
361 self.pushvars = pushvars
362
362
363 @util.propertycache
363 @util.propertycache
364 def futureheads(self):
364 def futureheads(self):
365 """future remote heads if the changeset push succeeds"""
365 """future remote heads if the changeset push succeeds"""
366 return self.outgoing.missingheads
366 return self.outgoing.missingheads
367
367
368 @util.propertycache
368 @util.propertycache
369 def fallbackheads(self):
369 def fallbackheads(self):
370 """future remote heads if the changeset push fails"""
370 """future remote heads if the changeset push fails"""
371 if self.revs is None:
371 if self.revs is None:
372 # not target to push, all common are relevant
372 # not target to push, all common are relevant
373 return self.outgoing.commonheads
373 return self.outgoing.commonheads
374 unfi = self.repo.unfiltered()
374 unfi = self.repo.unfiltered()
375 # I want cheads = heads(::missingheads and ::commonheads)
375 # I want cheads = heads(::missingheads and ::commonheads)
376 # (missingheads is revs with secret changeset filtered out)
376 # (missingheads is revs with secret changeset filtered out)
377 #
377 #
378 # This can be expressed as:
378 # This can be expressed as:
379 # cheads = ( (missingheads and ::commonheads)
379 # cheads = ( (missingheads and ::commonheads)
380 # + (commonheads and ::missingheads))"
380 # + (commonheads and ::missingheads))"
381 # )
381 # )
382 #
382 #
383 # while trying to push we already computed the following:
383 # while trying to push we already computed the following:
384 # common = (::commonheads)
384 # common = (::commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
385 # missing = ((commonheads::missingheads) - commonheads)
386 #
386 #
387 # We can pick:
387 # We can pick:
388 # * missingheads part of common (::commonheads)
388 # * missingheads part of common (::commonheads)
389 common = self.outgoing.common
389 common = self.outgoing.common
390 nm = self.repo.changelog.nodemap
390 nm = self.repo.changelog.nodemap
391 cheads = [node for node in self.revs if nm[node] in common]
391 cheads = [node for node in self.revs if nm[node] in common]
392 # and
392 # and
393 # * commonheads parents on missing
393 # * commonheads parents on missing
394 revset = unfi.set('%ln and parents(roots(%ln))',
394 revset = unfi.set('%ln and parents(roots(%ln))',
395 self.outgoing.commonheads,
395 self.outgoing.commonheads,
396 self.outgoing.missing)
396 self.outgoing.missing)
397 cheads.extend(c.node() for c in revset)
397 cheads.extend(c.node() for c in revset)
398 return cheads
398 return cheads
399
399
400 @property
400 @property
401 def commonheads(self):
401 def commonheads(self):
402 """set of all common heads after changeset bundle push"""
402 """set of all common heads after changeset bundle push"""
403 if self.cgresult:
403 if self.cgresult:
404 return self.futureheads
404 return self.futureheads
405 else:
405 else:
406 return self.fallbackheads
406 return self.fallbackheads
407
407
408 # mapping of message used when pushing bookmark
408 # mapping of message used when pushing bookmark
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
409 bookmsgmap = {'update': (_("updating bookmark %s\n"),
410 _('updating bookmark %s failed!\n')),
410 _('updating bookmark %s failed!\n')),
411 'export': (_("exporting bookmark %s\n"),
411 'export': (_("exporting bookmark %s\n"),
412 _('exporting bookmark %s failed!\n')),
412 _('exporting bookmark %s failed!\n')),
413 'delete': (_("deleting remote bookmark %s\n"),
413 'delete': (_("deleting remote bookmark %s\n"),
414 _('deleting remote bookmark %s failed!\n')),
414 _('deleting remote bookmark %s failed!\n')),
415 }
415 }
416
416
417
417
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
418 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
419 opargs=None):
419 opargs=None):
420 '''Push outgoing changesets (limited by revs) from a local
420 '''Push outgoing changesets (limited by revs) from a local
421 repository to remote. Return an integer:
421 repository to remote. Return an integer:
422 - None means nothing to push
422 - None means nothing to push
423 - 0 means HTTP error
423 - 0 means HTTP error
424 - 1 means we pushed and remote head count is unchanged *or*
424 - 1 means we pushed and remote head count is unchanged *or*
425 we have outgoing changesets but refused to push
425 we have outgoing changesets but refused to push
426 - other values as described by addchangegroup()
426 - other values as described by addchangegroup()
427 '''
427 '''
428 if opargs is None:
428 if opargs is None:
429 opargs = {}
429 opargs = {}
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
430 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
431 **pycompat.strkwargs(opargs))
431 **pycompat.strkwargs(opargs))
432 if pushop.remote.local():
432 if pushop.remote.local():
433 missing = (set(pushop.repo.requirements)
433 missing = (set(pushop.repo.requirements)
434 - pushop.remote.local().supported)
434 - pushop.remote.local().supported)
435 if missing:
435 if missing:
436 msg = _("required features are not"
436 msg = _("required features are not"
437 " supported in the destination:"
437 " supported in the destination:"
438 " %s") % (', '.join(sorted(missing)))
438 " %s") % (', '.join(sorted(missing)))
439 raise error.Abort(msg)
439 raise error.Abort(msg)
440
440
441 if not pushop.remote.canpush():
441 if not pushop.remote.canpush():
442 raise error.Abort(_("destination does not support push"))
442 raise error.Abort(_("destination does not support push"))
443
443
444 if not pushop.remote.capable('unbundle'):
444 if not pushop.remote.capable('unbundle'):
445 raise error.Abort(_('cannot push: destination does not support the '
445 raise error.Abort(_('cannot push: destination does not support the '
446 'unbundle wire protocol command'))
446 'unbundle wire protocol command'))
447
447
448 # get lock as we might write phase data
448 # get lock as we might write phase data
449 wlock = lock = None
449 wlock = lock = None
450 try:
450 try:
451 # bundle2 push may receive a reply bundle touching bookmarks or other
451 # bundle2 push may receive a reply bundle touching bookmarks or other
452 # things requiring the wlock. Take it now to ensure proper ordering.
452 # things requiring the wlock. Take it now to ensure proper ordering.
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
453 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
454 if (not _forcebundle1(pushop)) and maypushback:
454 if (not _forcebundle1(pushop)) and maypushback:
455 wlock = pushop.repo.wlock()
455 wlock = pushop.repo.wlock()
456 lock = pushop.repo.lock()
456 lock = pushop.repo.lock()
457 pushop.trmanager = transactionmanager(pushop.repo,
457 pushop.trmanager = transactionmanager(pushop.repo,
458 'push-response',
458 'push-response',
459 pushop.remote.url())
459 pushop.remote.url())
460 except IOError as err:
460 except IOError as err:
461 if err.errno != errno.EACCES:
461 if err.errno != errno.EACCES:
462 raise
462 raise
463 # source repo cannot be locked.
463 # source repo cannot be locked.
464 # We do not abort the push, but just disable the local phase
464 # We do not abort the push, but just disable the local phase
465 # synchronisation.
465 # synchronisation.
466 msg = 'cannot lock source repository: %s\n' % err
466 msg = 'cannot lock source repository: %s\n' % err
467 pushop.ui.debug(msg)
467 pushop.ui.debug(msg)
468
468
469 with wlock or util.nullcontextmanager(), \
469 with wlock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
470 lock or util.nullcontextmanager(), \
471 pushop.trmanager or util.nullcontextmanager():
471 pushop.trmanager or util.nullcontextmanager():
472 pushop.repo.checkpush(pushop)
472 pushop.repo.checkpush(pushop)
473 _pushdiscovery(pushop)
473 _pushdiscovery(pushop)
474 if not _forcebundle1(pushop):
474 if not _forcebundle1(pushop):
475 _pushbundle2(pushop)
475 _pushbundle2(pushop)
476 _pushchangeset(pushop)
476 _pushchangeset(pushop)
477 _pushsyncphase(pushop)
477 _pushsyncphase(pushop)
478 _pushobsolete(pushop)
478 _pushobsolete(pushop)
479 _pushbookmark(pushop)
479 _pushbookmark(pushop)
480
480
481 return pushop
481 return pushop
482
482
483 # list of steps to perform discovery before push
483 # list of steps to perform discovery before push
484 pushdiscoveryorder = []
484 pushdiscoveryorder = []
485
485
486 # Mapping between step name and function
486 # Mapping between step name and function
487 #
487 #
488 # This exists to help extensions wrap steps if necessary
488 # This exists to help extensions wrap steps if necessary
489 pushdiscoverymapping = {}
489 pushdiscoverymapping = {}
490
490
491 def pushdiscovery(stepname):
491 def pushdiscovery(stepname):
492 """decorator for function performing discovery before push
492 """decorator for function performing discovery before push
493
493
494 The function is added to the step -> function mapping and appended to the
494 The function is added to the step -> function mapping and appended to the
495 list of steps. Beware that decorated function will be added in order (this
495 list of steps. Beware that decorated function will be added in order (this
496 may matter).
496 may matter).
497
497
498 You can only use this decorator for a new step, if you want to wrap a step
498 You can only use this decorator for a new step, if you want to wrap a step
499 from an extension, change the pushdiscovery dictionary directly."""
499 from an extension, change the pushdiscovery dictionary directly."""
500 def dec(func):
500 def dec(func):
501 assert stepname not in pushdiscoverymapping
501 assert stepname not in pushdiscoverymapping
502 pushdiscoverymapping[stepname] = func
502 pushdiscoverymapping[stepname] = func
503 pushdiscoveryorder.append(stepname)
503 pushdiscoveryorder.append(stepname)
504 return func
504 return func
505 return dec
505 return dec
506
506
507 def _pushdiscovery(pushop):
507 def _pushdiscovery(pushop):
508 """Run all discovery steps"""
508 """Run all discovery steps"""
509 for stepname in pushdiscoveryorder:
509 for stepname in pushdiscoveryorder:
510 step = pushdiscoverymapping[stepname]
510 step = pushdiscoverymapping[stepname]
511 step(pushop)
511 step(pushop)
512
512
513 @pushdiscovery('changeset')
513 @pushdiscovery('changeset')
514 def _pushdiscoverychangeset(pushop):
514 def _pushdiscoverychangeset(pushop):
515 """discover the changeset that need to be pushed"""
515 """discover the changeset that need to be pushed"""
516 fci = discovery.findcommonincoming
516 fci = discovery.findcommonincoming
517 if pushop.revs:
517 if pushop.revs:
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
518 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
519 ancestorsof=pushop.revs)
519 ancestorsof=pushop.revs)
520 else:
520 else:
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
521 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
522 common, inc, remoteheads = commoninc
522 common, inc, remoteheads = commoninc
523 fco = discovery.findcommonoutgoing
523 fco = discovery.findcommonoutgoing
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
524 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
525 commoninc=commoninc, force=pushop.force)
525 commoninc=commoninc, force=pushop.force)
526 pushop.outgoing = outgoing
526 pushop.outgoing = outgoing
527 pushop.remoteheads = remoteheads
527 pushop.remoteheads = remoteheads
528 pushop.incoming = inc
528 pushop.incoming = inc
529
529
530 @pushdiscovery('phase')
530 @pushdiscovery('phase')
531 def _pushdiscoveryphase(pushop):
531 def _pushdiscoveryphase(pushop):
532 """discover the phase that needs to be pushed
532 """discover the phase that needs to be pushed
533
533
534 (computed for both success and failure case for changesets push)"""
534 (computed for both success and failure case for changesets push)"""
535 outgoing = pushop.outgoing
535 outgoing = pushop.outgoing
536 unfi = pushop.repo.unfiltered()
536 unfi = pushop.repo.unfiltered()
537 remotephases = pushop.remote.listkeys('phases')
537 remotephases = pushop.remote.listkeys('phases')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
538 if (pushop.ui.configbool('ui', '_usedassubrepo')
539 and remotephases # server supports phases
539 and remotephases # server supports phases
540 and not pushop.outgoing.missing # no changesets to be pushed
540 and not pushop.outgoing.missing # no changesets to be pushed
541 and remotephases.get('publishing', False)):
541 and remotephases.get('publishing', False)):
542 # When:
542 # When:
543 # - this is a subrepo push
543 # - this is a subrepo push
544 # - and remote support phase
544 # - and remote support phase
545 # - and no changeset are to be pushed
545 # - and no changeset are to be pushed
546 # - and remote is publishing
546 # - and remote is publishing
547 # We may be in issue 3781 case!
547 # We may be in issue 3781 case!
548 # We drop the possible phase synchronisation done by
548 # We drop the possible phase synchronisation done by
549 # courtesy to publish changesets possibly locally draft
549 # courtesy to publish changesets possibly locally draft
550 # on the remote.
550 # on the remote.
551 pushop.outdatedphases = []
551 pushop.outdatedphases = []
552 pushop.fallbackoutdatedphases = []
552 pushop.fallbackoutdatedphases = []
553 return
553 return
554
554
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
555 pushop.remotephases = phases.remotephasessummary(pushop.repo,
556 pushop.fallbackheads,
556 pushop.fallbackheads,
557 remotephases)
557 remotephases)
558 droots = pushop.remotephases.draftroots
558 droots = pushop.remotephases.draftroots
559
559
560 extracond = ''
560 extracond = ''
561 if not pushop.remotephases.publishing:
561 if not pushop.remotephases.publishing:
562 extracond = ' and public()'
562 extracond = ' and public()'
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
563 revset = 'heads((%%ln::%%ln) %s)' % extracond
564 # Get the list of all revs draft on remote by public here.
564 # Get the list of all revs draft on remote by public here.
565 # XXX Beware that revset break if droots is not strictly
565 # XXX Beware that revset break if droots is not strictly
566 # XXX root we may want to ensure it is but it is costly
566 # XXX root we may want to ensure it is but it is costly
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
567 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
568 if not outgoing.missing:
568 if not outgoing.missing:
569 future = fallback
569 future = fallback
570 else:
570 else:
571 # adds changeset we are going to push as draft
571 # adds changeset we are going to push as draft
572 #
572 #
573 # should not be necessary for publishing server, but because of an
573 # should not be necessary for publishing server, but because of an
574 # issue fixed in xxxxx we have to do it anyway.
574 # issue fixed in xxxxx we have to do it anyway.
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
575 fdroots = list(unfi.set('roots(%ln + %ln::)',
576 outgoing.missing, droots))
576 outgoing.missing, droots))
577 fdroots = [f.node() for f in fdroots]
577 fdroots = [f.node() for f in fdroots]
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
578 future = list(unfi.set(revset, fdroots, pushop.futureheads))
579 pushop.outdatedphases = future
579 pushop.outdatedphases = future
580 pushop.fallbackoutdatedphases = fallback
580 pushop.fallbackoutdatedphases = fallback
581
581
582 @pushdiscovery('obsmarker')
582 @pushdiscovery('obsmarker')
583 def _pushdiscoveryobsmarkers(pushop):
583 def _pushdiscoveryobsmarkers(pushop):
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
584 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
585 and pushop.repo.obsstore
585 and pushop.repo.obsstore
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
586 and 'obsolete' in pushop.remote.listkeys('namespaces')):
587 repo = pushop.repo
587 repo = pushop.repo
588 # very naive computation, that can be quite expensive on big repo.
588 # very naive computation, that can be quite expensive on big repo.
589 # However: evolution is currently slow on them anyway.
589 # However: evolution is currently slow on them anyway.
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
590 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
591 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
592
592
593 @pushdiscovery('bookmarks')
593 @pushdiscovery('bookmarks')
594 def _pushdiscoverybookmarks(pushop):
594 def _pushdiscoverybookmarks(pushop):
595 ui = pushop.ui
595 ui = pushop.ui
596 repo = pushop.repo.unfiltered()
596 repo = pushop.repo.unfiltered()
597 remote = pushop.remote
597 remote = pushop.remote
598 ui.debug("checking for updated bookmarks\n")
598 ui.debug("checking for updated bookmarks\n")
599 ancestors = ()
599 ancestors = ()
600 if pushop.revs:
600 if pushop.revs:
601 revnums = map(repo.changelog.rev, pushop.revs)
601 revnums = map(repo.changelog.rev, pushop.revs)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
602 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
603 remotebookmark = remote.listkeys('bookmarks')
603 remotebookmark = remote.listkeys('bookmarks')
604
604
605 explicit = set([repo._bookmarks.expandname(bookmark)
605 explicit = set([repo._bookmarks.expandname(bookmark)
606 for bookmark in pushop.bookmarks])
606 for bookmark in pushop.bookmarks])
607
607
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
608 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
609 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
610
610
611 def safehex(x):
611 def safehex(x):
612 if x is None:
612 if x is None:
613 return x
613 return x
614 return hex(x)
614 return hex(x)
615
615
616 def hexifycompbookmarks(bookmarks):
616 def hexifycompbookmarks(bookmarks):
617 for b, scid, dcid in bookmarks:
617 for b, scid, dcid in bookmarks:
618 yield b, safehex(scid), safehex(dcid)
618 yield b, safehex(scid), safehex(dcid)
619
619
620 comp = [hexifycompbookmarks(marks) for marks in comp]
620 comp = [hexifycompbookmarks(marks) for marks in comp]
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
621 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
622
622
623 for b, scid, dcid in advsrc:
623 for b, scid, dcid in advsrc:
624 if b in explicit:
624 if b in explicit:
625 explicit.remove(b)
625 explicit.remove(b)
626 if not ancestors or repo[scid].rev() in ancestors:
626 if not ancestors or repo[scid].rev() in ancestors:
627 pushop.outbookmarks.append((b, dcid, scid))
627 pushop.outbookmarks.append((b, dcid, scid))
628 # search added bookmark
628 # search added bookmark
629 for b, scid, dcid in addsrc:
629 for b, scid, dcid in addsrc:
630 if b in explicit:
630 if b in explicit:
631 explicit.remove(b)
631 explicit.remove(b)
632 pushop.outbookmarks.append((b, '', scid))
632 pushop.outbookmarks.append((b, '', scid))
633 # search for overwritten bookmark
633 # search for overwritten bookmark
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
634 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
635 if b in explicit:
635 if b in explicit:
636 explicit.remove(b)
636 explicit.remove(b)
637 pushop.outbookmarks.append((b, dcid, scid))
637 pushop.outbookmarks.append((b, dcid, scid))
638 # search for bookmark to delete
638 # search for bookmark to delete
639 for b, scid, dcid in adddst:
639 for b, scid, dcid in adddst:
640 if b in explicit:
640 if b in explicit:
641 explicit.remove(b)
641 explicit.remove(b)
642 # treat as "deleted locally"
642 # treat as "deleted locally"
643 pushop.outbookmarks.append((b, dcid, ''))
643 pushop.outbookmarks.append((b, dcid, ''))
644 # identical bookmarks shouldn't get reported
644 # identical bookmarks shouldn't get reported
645 for b, scid, dcid in same:
645 for b, scid, dcid in same:
646 if b in explicit:
646 if b in explicit:
647 explicit.remove(b)
647 explicit.remove(b)
648
648
649 if explicit:
649 if explicit:
650 explicit = sorted(explicit)
650 explicit = sorted(explicit)
651 # we should probably list all of them
651 # we should probably list all of them
652 ui.warn(_('bookmark %s does not exist on the local '
652 ui.warn(_('bookmark %s does not exist on the local '
653 'or remote repository!\n') % explicit[0])
653 'or remote repository!\n') % explicit[0])
654 pushop.bkresult = 2
654 pushop.bkresult = 2
655
655
656 pushop.outbookmarks.sort()
656 pushop.outbookmarks.sort()
657
657
658 def _pushcheckoutgoing(pushop):
658 def _pushcheckoutgoing(pushop):
659 outgoing = pushop.outgoing
659 outgoing = pushop.outgoing
660 unfi = pushop.repo.unfiltered()
660 unfi = pushop.repo.unfiltered()
661 if not outgoing.missing:
661 if not outgoing.missing:
662 # nothing to push
662 # nothing to push
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
663 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
664 return False
664 return False
665 # something to push
665 # something to push
666 if not pushop.force:
666 if not pushop.force:
667 # if repo.obsstore == False --> no obsolete
667 # if repo.obsstore == False --> no obsolete
668 # then, save the iteration
668 # then, save the iteration
669 if unfi.obsstore:
669 if unfi.obsstore:
670 # this message are here for 80 char limit reason
670 # this message are here for 80 char limit reason
671 mso = _("push includes obsolete changeset: %s!")
671 mso = _("push includes obsolete changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
672 mspd = _("push includes phase-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
673 mscd = _("push includes content-divergent changeset: %s!")
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
674 mst = {"orphan": _("push includes orphan changeset: %s!"),
675 "phase-divergent": mspd,
675 "phase-divergent": mspd,
676 "content-divergent": mscd}
676 "content-divergent": mscd}
677 # If we are to push if there is at least one
677 # If we are to push if there is at least one
678 # obsolete or unstable changeset in missing, at
678 # obsolete or unstable changeset in missing, at
679 # least one of the missinghead will be obsolete or
679 # least one of the missinghead will be obsolete or
680 # unstable. So checking heads only is ok
680 # unstable. So checking heads only is ok
681 for node in outgoing.missingheads:
681 for node in outgoing.missingheads:
682 ctx = unfi[node]
682 ctx = unfi[node]
683 if ctx.obsolete():
683 if ctx.obsolete():
684 raise error.Abort(mso % ctx)
684 raise error.Abort(mso % ctx)
685 elif ctx.isunstable():
685 elif ctx.isunstable():
686 # TODO print more than one instability in the abort
686 # TODO print more than one instability in the abort
687 # message
687 # message
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
688 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
689
689
690 discovery.checkheads(pushop)
690 discovery.checkheads(pushop)
691 return True
691 return True
692
692
693 # List of names of steps to perform for an outgoing bundle2, order matters.
693 # List of names of steps to perform for an outgoing bundle2, order matters.
694 b2partsgenorder = []
694 b2partsgenorder = []
695
695
696 # Mapping between step name and function
696 # Mapping between step name and function
697 #
697 #
698 # This exists to help extensions wrap steps if necessary
698 # This exists to help extensions wrap steps if necessary
699 b2partsgenmapping = {}
699 b2partsgenmapping = {}
700
700
701 def b2partsgenerator(stepname, idx=None):
701 def b2partsgenerator(stepname, idx=None):
702 """decorator for function generating bundle2 part
702 """decorator for function generating bundle2 part
703
703
704 The function is added to the step -> function mapping and appended to the
704 The function is added to the step -> function mapping and appended to the
705 list of steps. Beware that decorated functions will be added in order
705 list of steps. Beware that decorated functions will be added in order
706 (this may matter).
706 (this may matter).
707
707
708 You can only use this decorator for new steps, if you want to wrap a step
708 You can only use this decorator for new steps, if you want to wrap a step
709 from an extension, attack the b2partsgenmapping dictionary directly."""
709 from an extension, attack the b2partsgenmapping dictionary directly."""
710 def dec(func):
710 def dec(func):
711 assert stepname not in b2partsgenmapping
711 assert stepname not in b2partsgenmapping
712 b2partsgenmapping[stepname] = func
712 b2partsgenmapping[stepname] = func
713 if idx is None:
713 if idx is None:
714 b2partsgenorder.append(stepname)
714 b2partsgenorder.append(stepname)
715 else:
715 else:
716 b2partsgenorder.insert(idx, stepname)
716 b2partsgenorder.insert(idx, stepname)
717 return func
717 return func
718 return dec
718 return dec
719
719
720 def _pushb2ctxcheckheads(pushop, bundler):
720 def _pushb2ctxcheckheads(pushop, bundler):
721 """Generate race condition checking parts
721 """Generate race condition checking parts
722
722
723 Exists as an independent function to aid extensions
723 Exists as an independent function to aid extensions
724 """
724 """
725 # * 'force' do not check for push race,
725 # * 'force' do not check for push race,
726 # * if we don't push anything, there are nothing to check.
726 # * if we don't push anything, there are nothing to check.
727 if not pushop.force and pushop.outgoing.missingheads:
727 if not pushop.force and pushop.outgoing.missingheads:
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
728 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
729 emptyremote = pushop.pushbranchmap is None
729 emptyremote = pushop.pushbranchmap is None
730 if not allowunrelated or emptyremote:
730 if not allowunrelated or emptyremote:
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
731 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
732 else:
732 else:
733 affected = set()
733 affected = set()
734 for branch, heads in pushop.pushbranchmap.iteritems():
734 for branch, heads in pushop.pushbranchmap.iteritems():
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
735 remoteheads, newheads, unsyncedheads, discardedheads = heads
736 if remoteheads is not None:
736 if remoteheads is not None:
737 remote = set(remoteheads)
737 remote = set(remoteheads)
738 affected |= set(discardedheads) & remote
738 affected |= set(discardedheads) & remote
739 affected |= remote - set(newheads)
739 affected |= remote - set(newheads)
740 if affected:
740 if affected:
741 data = iter(sorted(affected))
741 data = iter(sorted(affected))
742 bundler.newpart('check:updated-heads', data=data)
742 bundler.newpart('check:updated-heads', data=data)
743
743
744 def _pushing(pushop):
744 def _pushing(pushop):
745 """return True if we are pushing anything"""
745 """return True if we are pushing anything"""
746 return bool(pushop.outgoing.missing
746 return bool(pushop.outgoing.missing
747 or pushop.outdatedphases
747 or pushop.outdatedphases
748 or pushop.outobsmarkers
748 or pushop.outobsmarkers
749 or pushop.outbookmarks)
749 or pushop.outbookmarks)
750
750
751 @b2partsgenerator('check-bookmarks')
751 @b2partsgenerator('check-bookmarks')
752 def _pushb2checkbookmarks(pushop, bundler):
752 def _pushb2checkbookmarks(pushop, bundler):
753 """insert bookmark move checking"""
753 """insert bookmark move checking"""
754 if not _pushing(pushop) or pushop.force:
754 if not _pushing(pushop) or pushop.force:
755 return
755 return
756 b2caps = bundle2.bundle2caps(pushop.remote)
756 b2caps = bundle2.bundle2caps(pushop.remote)
757 hasbookmarkcheck = 'bookmarks' in b2caps
757 hasbookmarkcheck = 'bookmarks' in b2caps
758 if not (pushop.outbookmarks and hasbookmarkcheck):
758 if not (pushop.outbookmarks and hasbookmarkcheck):
759 return
759 return
760 data = []
760 data = []
761 for book, old, new in pushop.outbookmarks:
761 for book, old, new in pushop.outbookmarks:
762 old = bin(old)
762 old = bin(old)
763 data.append((book, old))
763 data.append((book, old))
764 checkdata = bookmod.binaryencode(data)
764 checkdata = bookmod.binaryencode(data)
765 bundler.newpart('check:bookmarks', data=checkdata)
765 bundler.newpart('check:bookmarks', data=checkdata)
766
766
767 @b2partsgenerator('check-phases')
767 @b2partsgenerator('check-phases')
768 def _pushb2checkphases(pushop, bundler):
768 def _pushb2checkphases(pushop, bundler):
769 """insert phase move checking"""
769 """insert phase move checking"""
770 if not _pushing(pushop) or pushop.force:
770 if not _pushing(pushop) or pushop.force:
771 return
771 return
772 b2caps = bundle2.bundle2caps(pushop.remote)
772 b2caps = bundle2.bundle2caps(pushop.remote)
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
773 hasphaseheads = 'heads' in b2caps.get('phases', ())
774 if pushop.remotephases is not None and hasphaseheads:
774 if pushop.remotephases is not None and hasphaseheads:
775 # check that the remote phase has not changed
775 # check that the remote phase has not changed
776 checks = [[] for p in phases.allphases]
776 checks = [[] for p in phases.allphases]
777 checks[phases.public].extend(pushop.remotephases.publicheads)
777 checks[phases.public].extend(pushop.remotephases.publicheads)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
778 checks[phases.draft].extend(pushop.remotephases.draftroots)
779 if any(checks):
779 if any(checks):
780 for nodes in checks:
780 for nodes in checks:
781 nodes.sort()
781 nodes.sort()
782 checkdata = phases.binaryencode(checks)
782 checkdata = phases.binaryencode(checks)
783 bundler.newpart('check:phases', data=checkdata)
783 bundler.newpart('check:phases', data=checkdata)
784
784
785 @b2partsgenerator('changeset')
785 @b2partsgenerator('changeset')
786 def _pushb2ctx(pushop, bundler):
786 def _pushb2ctx(pushop, bundler):
787 """handle changegroup push through bundle2
787 """handle changegroup push through bundle2
788
788
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
789 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
790 """
790 """
791 if 'changesets' in pushop.stepsdone:
791 if 'changesets' in pushop.stepsdone:
792 return
792 return
793 pushop.stepsdone.add('changesets')
793 pushop.stepsdone.add('changesets')
794 # Send known heads to the server for race detection.
794 # Send known heads to the server for race detection.
795 if not _pushcheckoutgoing(pushop):
795 if not _pushcheckoutgoing(pushop):
796 return
796 return
797 pushop.repo.prepushoutgoinghooks(pushop)
797 pushop.repo.prepushoutgoinghooks(pushop)
798
798
799 _pushb2ctxcheckheads(pushop, bundler)
799 _pushb2ctxcheckheads(pushop, bundler)
800
800
801 b2caps = bundle2.bundle2caps(pushop.remote)
801 b2caps = bundle2.bundle2caps(pushop.remote)
802 version = '01'
802 version = '01'
803 cgversions = b2caps.get('changegroup')
803 cgversions = b2caps.get('changegroup')
804 if cgversions: # 3.1 and 3.2 ship with an empty value
804 if cgversions: # 3.1 and 3.2 ship with an empty value
805 cgversions = [v for v in cgversions
805 cgversions = [v for v in cgversions
806 if v in changegroup.supportedoutgoingversions(
806 if v in changegroup.supportedoutgoingversions(
807 pushop.repo)]
807 pushop.repo)]
808 if not cgversions:
808 if not cgversions:
809 raise ValueError(_('no common changegroup version'))
809 raise ValueError(_('no common changegroup version'))
810 version = max(cgversions)
810 version = max(cgversions)
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
811 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
812 'push')
812 'push')
813 cgpart = bundler.newpart('changegroup', data=cgstream)
813 cgpart = bundler.newpart('changegroup', data=cgstream)
814 if cgversions:
814 if cgversions:
815 cgpart.addparam('version', version)
815 cgpart.addparam('version', version)
816 if 'treemanifest' in pushop.repo.requirements:
816 if 'treemanifest' in pushop.repo.requirements:
817 cgpart.addparam('treemanifest', '1')
817 cgpart.addparam('treemanifest', '1')
818 def handlereply(op):
818 def handlereply(op):
819 """extract addchangegroup returns from server reply"""
819 """extract addchangegroup returns from server reply"""
820 cgreplies = op.records.getreplies(cgpart.id)
820 cgreplies = op.records.getreplies(cgpart.id)
821 assert len(cgreplies['changegroup']) == 1
821 assert len(cgreplies['changegroup']) == 1
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
822 pushop.cgresult = cgreplies['changegroup'][0]['return']
823 return handlereply
823 return handlereply
824
824
825 @b2partsgenerator('phase')
825 @b2partsgenerator('phase')
826 def _pushb2phases(pushop, bundler):
826 def _pushb2phases(pushop, bundler):
827 """handle phase push through bundle2"""
827 """handle phase push through bundle2"""
828 if 'phases' in pushop.stepsdone:
828 if 'phases' in pushop.stepsdone:
829 return
829 return
830 b2caps = bundle2.bundle2caps(pushop.remote)
830 b2caps = bundle2.bundle2caps(pushop.remote)
831 ui = pushop.repo.ui
831 ui = pushop.repo.ui
832
832
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
833 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
834 haspushkey = 'pushkey' in b2caps
834 haspushkey = 'pushkey' in b2caps
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
835 hasphaseheads = 'heads' in b2caps.get('phases', ())
836
836
837 if hasphaseheads and not legacyphase:
837 if hasphaseheads and not legacyphase:
838 return _pushb2phaseheads(pushop, bundler)
838 return _pushb2phaseheads(pushop, bundler)
839 elif haspushkey:
839 elif haspushkey:
840 return _pushb2phasespushkey(pushop, bundler)
840 return _pushb2phasespushkey(pushop, bundler)
841
841
842 def _pushb2phaseheads(pushop, bundler):
842 def _pushb2phaseheads(pushop, bundler):
843 """push phase information through a bundle2 - binary part"""
843 """push phase information through a bundle2 - binary part"""
844 pushop.stepsdone.add('phases')
844 pushop.stepsdone.add('phases')
845 if pushop.outdatedphases:
845 if pushop.outdatedphases:
846 updates = [[] for p in phases.allphases]
846 updates = [[] for p in phases.allphases]
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
847 updates[0].extend(h.node() for h in pushop.outdatedphases)
848 phasedata = phases.binaryencode(updates)
848 phasedata = phases.binaryencode(updates)
849 bundler.newpart('phase-heads', data=phasedata)
849 bundler.newpart('phase-heads', data=phasedata)
850
850
851 def _pushb2phasespushkey(pushop, bundler):
851 def _pushb2phasespushkey(pushop, bundler):
852 """push phase information through a bundle2 - pushkey part"""
852 """push phase information through a bundle2 - pushkey part"""
853 pushop.stepsdone.add('phases')
853 pushop.stepsdone.add('phases')
854 part2node = []
854 part2node = []
855
855
856 def handlefailure(pushop, exc):
856 def handlefailure(pushop, exc):
857 targetid = int(exc.partid)
857 targetid = int(exc.partid)
858 for partid, node in part2node:
858 for partid, node in part2node:
859 if partid == targetid:
859 if partid == targetid:
860 raise error.Abort(_('updating %s to public failed') % node)
860 raise error.Abort(_('updating %s to public failed') % node)
861
861
862 enc = pushkey.encode
862 enc = pushkey.encode
863 for newremotehead in pushop.outdatedphases:
863 for newremotehead in pushop.outdatedphases:
864 part = bundler.newpart('pushkey')
864 part = bundler.newpart('pushkey')
865 part.addparam('namespace', enc('phases'))
865 part.addparam('namespace', enc('phases'))
866 part.addparam('key', enc(newremotehead.hex()))
866 part.addparam('key', enc(newremotehead.hex()))
867 part.addparam('old', enc('%d' % phases.draft))
867 part.addparam('old', enc('%d' % phases.draft))
868 part.addparam('new', enc('%d' % phases.public))
868 part.addparam('new', enc('%d' % phases.public))
869 part2node.append((part.id, newremotehead))
869 part2node.append((part.id, newremotehead))
870 pushop.pkfailcb[part.id] = handlefailure
870 pushop.pkfailcb[part.id] = handlefailure
871
871
872 def handlereply(op):
872 def handlereply(op):
873 for partid, node in part2node:
873 for partid, node in part2node:
874 partrep = op.records.getreplies(partid)
874 partrep = op.records.getreplies(partid)
875 results = partrep['pushkey']
875 results = partrep['pushkey']
876 assert len(results) <= 1
876 assert len(results) <= 1
877 msg = None
877 msg = None
878 if not results:
878 if not results:
879 msg = _('server ignored update of %s to public!\n') % node
879 msg = _('server ignored update of %s to public!\n') % node
880 elif not int(results[0]['return']):
880 elif not int(results[0]['return']):
881 msg = _('updating %s to public failed!\n') % node
881 msg = _('updating %s to public failed!\n') % node
882 if msg is not None:
882 if msg is not None:
883 pushop.ui.warn(msg)
883 pushop.ui.warn(msg)
884 return handlereply
884 return handlereply
885
885
886 @b2partsgenerator('obsmarkers')
886 @b2partsgenerator('obsmarkers')
887 def _pushb2obsmarkers(pushop, bundler):
887 def _pushb2obsmarkers(pushop, bundler):
888 if 'obsmarkers' in pushop.stepsdone:
888 if 'obsmarkers' in pushop.stepsdone:
889 return
889 return
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
890 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
891 if obsolete.commonversion(remoteversions) is None:
891 if obsolete.commonversion(remoteversions) is None:
892 return
892 return
893 pushop.stepsdone.add('obsmarkers')
893 pushop.stepsdone.add('obsmarkers')
894 if pushop.outobsmarkers:
894 if pushop.outobsmarkers:
895 markers = sorted(pushop.outobsmarkers)
895 markers = sorted(pushop.outobsmarkers)
896 bundle2.buildobsmarkerspart(bundler, markers)
896 bundle2.buildobsmarkerspart(bundler, markers)
897
897
898 @b2partsgenerator('bookmarks')
898 @b2partsgenerator('bookmarks')
899 def _pushb2bookmarks(pushop, bundler):
899 def _pushb2bookmarks(pushop, bundler):
900 """handle bookmark push through bundle2"""
900 """handle bookmark push through bundle2"""
901 if 'bookmarks' in pushop.stepsdone:
901 if 'bookmarks' in pushop.stepsdone:
902 return
902 return
903 b2caps = bundle2.bundle2caps(pushop.remote)
903 b2caps = bundle2.bundle2caps(pushop.remote)
904
904
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
905 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
906 legacybooks = 'bookmarks' in legacy
906 legacybooks = 'bookmarks' in legacy
907
907
908 if not legacybooks and 'bookmarks' in b2caps:
908 if not legacybooks and 'bookmarks' in b2caps:
909 return _pushb2bookmarkspart(pushop, bundler)
909 return _pushb2bookmarkspart(pushop, bundler)
910 elif 'pushkey' in b2caps:
910 elif 'pushkey' in b2caps:
911 return _pushb2bookmarkspushkey(pushop, bundler)
911 return _pushb2bookmarkspushkey(pushop, bundler)
912
912
913 def _bmaction(old, new):
913 def _bmaction(old, new):
914 """small utility for bookmark pushing"""
914 """small utility for bookmark pushing"""
915 if not old:
915 if not old:
916 return 'export'
916 return 'export'
917 elif not new:
917 elif not new:
918 return 'delete'
918 return 'delete'
919 return 'update'
919 return 'update'
920
920
921 def _pushb2bookmarkspart(pushop, bundler):
921 def _pushb2bookmarkspart(pushop, bundler):
922 pushop.stepsdone.add('bookmarks')
922 pushop.stepsdone.add('bookmarks')
923 if not pushop.outbookmarks:
923 if not pushop.outbookmarks:
924 return
924 return
925
925
926 allactions = []
926 allactions = []
927 data = []
927 data = []
928 for book, old, new in pushop.outbookmarks:
928 for book, old, new in pushop.outbookmarks:
929 new = bin(new)
929 new = bin(new)
930 data.append((book, new))
930 data.append((book, new))
931 allactions.append((book, _bmaction(old, new)))
931 allactions.append((book, _bmaction(old, new)))
932 checkdata = bookmod.binaryencode(data)
932 checkdata = bookmod.binaryencode(data)
933 bundler.newpart('bookmarks', data=checkdata)
933 bundler.newpart('bookmarks', data=checkdata)
934
934
935 def handlereply(op):
935 def handlereply(op):
936 ui = pushop.ui
936 ui = pushop.ui
937 # if success
937 # if success
938 for book, action in allactions:
938 for book, action in allactions:
939 ui.status(bookmsgmap[action][0] % book)
939 ui.status(bookmsgmap[action][0] % book)
940
940
941 return handlereply
941 return handlereply
942
942
943 def _pushb2bookmarkspushkey(pushop, bundler):
943 def _pushb2bookmarkspushkey(pushop, bundler):
944 pushop.stepsdone.add('bookmarks')
944 pushop.stepsdone.add('bookmarks')
945 part2book = []
945 part2book = []
946 enc = pushkey.encode
946 enc = pushkey.encode
947
947
948 def handlefailure(pushop, exc):
948 def handlefailure(pushop, exc):
949 targetid = int(exc.partid)
949 targetid = int(exc.partid)
950 for partid, book, action in part2book:
950 for partid, book, action in part2book:
951 if partid == targetid:
951 if partid == targetid:
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
952 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
953 # we should not be called for part we did not generated
953 # we should not be called for part we did not generated
954 assert False
954 assert False
955
955
956 for book, old, new in pushop.outbookmarks:
956 for book, old, new in pushop.outbookmarks:
957 part = bundler.newpart('pushkey')
957 part = bundler.newpart('pushkey')
958 part.addparam('namespace', enc('bookmarks'))
958 part.addparam('namespace', enc('bookmarks'))
959 part.addparam('key', enc(book))
959 part.addparam('key', enc(book))
960 part.addparam('old', enc(old))
960 part.addparam('old', enc(old))
961 part.addparam('new', enc(new))
961 part.addparam('new', enc(new))
962 action = 'update'
962 action = 'update'
963 if not old:
963 if not old:
964 action = 'export'
964 action = 'export'
965 elif not new:
965 elif not new:
966 action = 'delete'
966 action = 'delete'
967 part2book.append((part.id, book, action))
967 part2book.append((part.id, book, action))
968 pushop.pkfailcb[part.id] = handlefailure
968 pushop.pkfailcb[part.id] = handlefailure
969
969
970 def handlereply(op):
970 def handlereply(op):
971 ui = pushop.ui
971 ui = pushop.ui
972 for partid, book, action in part2book:
972 for partid, book, action in part2book:
973 partrep = op.records.getreplies(partid)
973 partrep = op.records.getreplies(partid)
974 results = partrep['pushkey']
974 results = partrep['pushkey']
975 assert len(results) <= 1
975 assert len(results) <= 1
976 if not results:
976 if not results:
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
977 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
978 else:
978 else:
979 ret = int(results[0]['return'])
979 ret = int(results[0]['return'])
980 if ret:
980 if ret:
981 ui.status(bookmsgmap[action][0] % book)
981 ui.status(bookmsgmap[action][0] % book)
982 else:
982 else:
983 ui.warn(bookmsgmap[action][1] % book)
983 ui.warn(bookmsgmap[action][1] % book)
984 if pushop.bkresult is not None:
984 if pushop.bkresult is not None:
985 pushop.bkresult = 1
985 pushop.bkresult = 1
986 return handlereply
986 return handlereply
987
987
988 @b2partsgenerator('pushvars', idx=0)
988 @b2partsgenerator('pushvars', idx=0)
989 def _getbundlesendvars(pushop, bundler):
989 def _getbundlesendvars(pushop, bundler):
990 '''send shellvars via bundle2'''
990 '''send shellvars via bundle2'''
991 pushvars = pushop.pushvars
991 pushvars = pushop.pushvars
992 if pushvars:
992 if pushvars:
993 shellvars = {}
993 shellvars = {}
994 for raw in pushvars:
994 for raw in pushvars:
995 if '=' not in raw:
995 if '=' not in raw:
996 msg = ("unable to parse variable '%s', should follow "
996 msg = ("unable to parse variable '%s', should follow "
997 "'KEY=VALUE' or 'KEY=' format")
997 "'KEY=VALUE' or 'KEY=' format")
998 raise error.Abort(msg % raw)
998 raise error.Abort(msg % raw)
999 k, v = raw.split('=', 1)
999 k, v = raw.split('=', 1)
1000 shellvars[k] = v
1000 shellvars[k] = v
1001
1001
1002 part = bundler.newpart('pushvars')
1002 part = bundler.newpart('pushvars')
1003
1003
1004 for key, value in shellvars.iteritems():
1004 for key, value in shellvars.iteritems():
1005 part.addparam(key, value, mandatory=False)
1005 part.addparam(key, value, mandatory=False)
1006
1006
1007 def _pushbundle2(pushop):
1007 def _pushbundle2(pushop):
1008 """push data to the remote using bundle2
1008 """push data to the remote using bundle2
1009
1009
1010 The only currently supported type of data is changegroup but this will
1010 The only currently supported type of data is changegroup but this will
1011 evolve in the future."""
1011 evolve in the future."""
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1012 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1013 pushback = (pushop.trmanager
1013 pushback = (pushop.trmanager
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1014 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1015
1015
1016 # create reply capability
1016 # create reply capability
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1017 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1018 allowpushback=pushback))
1018 allowpushback=pushback))
1019 bundler.newpart('replycaps', data=capsblob)
1019 bundler.newpart('replycaps', data=capsblob)
1020 replyhandlers = []
1020 replyhandlers = []
1021 for partgenname in b2partsgenorder:
1021 for partgenname in b2partsgenorder:
1022 partgen = b2partsgenmapping[partgenname]
1022 partgen = b2partsgenmapping[partgenname]
1023 ret = partgen(pushop, bundler)
1023 ret = partgen(pushop, bundler)
1024 if callable(ret):
1024 if callable(ret):
1025 replyhandlers.append(ret)
1025 replyhandlers.append(ret)
1026 # do not push if nothing to push
1026 # do not push if nothing to push
1027 if bundler.nbparts <= 1:
1027 if bundler.nbparts <= 1:
1028 return
1028 return
1029 stream = util.chunkbuffer(bundler.getchunks())
1029 stream = util.chunkbuffer(bundler.getchunks())
1030 try:
1030 try:
1031 try:
1031 try:
1032 reply = pushop.remote.unbundle(
1032 reply = pushop.remote.unbundle(
1033 stream, ['force'], pushop.remote.url())
1033 stream, ['force'], pushop.remote.url())
1034 except error.BundleValueError as exc:
1034 except error.BundleValueError as exc:
1035 raise error.Abort(_('missing support for %s') % exc)
1035 raise error.Abort(_('missing support for %s') % exc)
1036 try:
1036 try:
1037 trgetter = None
1037 trgetter = None
1038 if pushback:
1038 if pushback:
1039 trgetter = pushop.trmanager.transaction
1039 trgetter = pushop.trmanager.transaction
1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1040 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1041 except error.BundleValueError as exc:
1041 except error.BundleValueError as exc:
1042 raise error.Abort(_('missing support for %s') % exc)
1042 raise error.Abort(_('missing support for %s') % exc)
1043 except bundle2.AbortFromPart as exc:
1043 except bundle2.AbortFromPart as exc:
1044 pushop.ui.status(_('remote: %s\n') % exc)
1044 pushop.ui.status(_('remote: %s\n') % exc)
1045 if exc.hint is not None:
1045 if exc.hint is not None:
1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1046 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1047 raise error.Abort(_('push failed on remote'))
1047 raise error.Abort(_('push failed on remote'))
1048 except error.PushkeyFailed as exc:
1048 except error.PushkeyFailed as exc:
1049 partid = int(exc.partid)
1049 partid = int(exc.partid)
1050 if partid not in pushop.pkfailcb:
1050 if partid not in pushop.pkfailcb:
1051 raise
1051 raise
1052 pushop.pkfailcb[partid](pushop, exc)
1052 pushop.pkfailcb[partid](pushop, exc)
1053 for rephand in replyhandlers:
1053 for rephand in replyhandlers:
1054 rephand(op)
1054 rephand(op)
1055
1055
1056 def _pushchangeset(pushop):
1056 def _pushchangeset(pushop):
1057 """Make the actual push of changeset bundle to remote repo"""
1057 """Make the actual push of changeset bundle to remote repo"""
1058 if 'changesets' in pushop.stepsdone:
1058 if 'changesets' in pushop.stepsdone:
1059 return
1059 return
1060 pushop.stepsdone.add('changesets')
1060 pushop.stepsdone.add('changesets')
1061 if not _pushcheckoutgoing(pushop):
1061 if not _pushcheckoutgoing(pushop):
1062 return
1062 return
1063
1063
1064 # Should have verified this in push().
1064 # Should have verified this in push().
1065 assert pushop.remote.capable('unbundle')
1065 assert pushop.remote.capable('unbundle')
1066
1066
1067 pushop.repo.prepushoutgoinghooks(pushop)
1067 pushop.repo.prepushoutgoinghooks(pushop)
1068 outgoing = pushop.outgoing
1068 outgoing = pushop.outgoing
1069 # TODO: get bundlecaps from remote
1069 # TODO: get bundlecaps from remote
1070 bundlecaps = None
1070 bundlecaps = None
1071 # create a changegroup from local
1071 # create a changegroup from local
1072 if pushop.revs is None and not (outgoing.excluded
1072 if pushop.revs is None and not (outgoing.excluded
1073 or pushop.repo.changelog.filteredrevs):
1073 or pushop.repo.changelog.filteredrevs):
1074 # push everything,
1074 # push everything,
1075 # use the fast path, no race possible on push
1075 # use the fast path, no race possible on push
1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1076 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1077 fastpath=True, bundlecaps=bundlecaps)
1077 fastpath=True, bundlecaps=bundlecaps)
1078 else:
1078 else:
1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1079 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1080 'push', bundlecaps=bundlecaps)
1080 'push', bundlecaps=bundlecaps)
1081
1081
1082 # apply changegroup to remote
1082 # apply changegroup to remote
1083 # local repo finds heads on server, finds out what
1083 # local repo finds heads on server, finds out what
1084 # revs it must push. once revs transferred, if server
1084 # revs it must push. once revs transferred, if server
1085 # finds it has different heads (someone else won
1085 # finds it has different heads (someone else won
1086 # commit/push race), server aborts.
1086 # commit/push race), server aborts.
1087 if pushop.force:
1087 if pushop.force:
1088 remoteheads = ['force']
1088 remoteheads = ['force']
1089 else:
1089 else:
1090 remoteheads = pushop.remoteheads
1090 remoteheads = pushop.remoteheads
1091 # ssh: return remote's addchangegroup()
1091 # ssh: return remote's addchangegroup()
1092 # http: return remote's addchangegroup() or 0 for error
1092 # http: return remote's addchangegroup() or 0 for error
1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1093 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1094 pushop.repo.url())
1094 pushop.repo.url())
1095
1095
1096 def _pushsyncphase(pushop):
1096 def _pushsyncphase(pushop):
1097 """synchronise phase information locally and remotely"""
1097 """synchronise phase information locally and remotely"""
1098 cheads = pushop.commonheads
1098 cheads = pushop.commonheads
1099 # even when we don't push, exchanging phase data is useful
1099 # even when we don't push, exchanging phase data is useful
1100 remotephases = pushop.remote.listkeys('phases')
1100 remotephases = pushop.remote.listkeys('phases')
1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1101 if (pushop.ui.configbool('ui', '_usedassubrepo')
1102 and remotephases # server supports phases
1102 and remotephases # server supports phases
1103 and pushop.cgresult is None # nothing was pushed
1103 and pushop.cgresult is None # nothing was pushed
1104 and remotephases.get('publishing', False)):
1104 and remotephases.get('publishing', False)):
1105 # When:
1105 # When:
1106 # - this is a subrepo push
1106 # - this is a subrepo push
1107 # - and remote support phase
1107 # - and remote support phase
1108 # - and no changeset was pushed
1108 # - and no changeset was pushed
1109 # - and remote is publishing
1109 # - and remote is publishing
1110 # We may be in issue 3871 case!
1110 # We may be in issue 3871 case!
1111 # We drop the possible phase synchronisation done by
1111 # We drop the possible phase synchronisation done by
1112 # courtesy to publish changesets possibly locally draft
1112 # courtesy to publish changesets possibly locally draft
1113 # on the remote.
1113 # on the remote.
1114 remotephases = {'publishing': 'True'}
1114 remotephases = {'publishing': 'True'}
1115 if not remotephases: # old server or public only reply from non-publishing
1115 if not remotephases: # old server or public only reply from non-publishing
1116 _localphasemove(pushop, cheads)
1116 _localphasemove(pushop, cheads)
1117 # don't push any phase data as there is nothing to push
1117 # don't push any phase data as there is nothing to push
1118 else:
1118 else:
1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1119 ana = phases.analyzeremotephases(pushop.repo, cheads,
1120 remotephases)
1120 remotephases)
1121 pheads, droots = ana
1121 pheads, droots = ana
1122 ### Apply remote phase on local
1122 ### Apply remote phase on local
1123 if remotephases.get('publishing', False):
1123 if remotephases.get('publishing', False):
1124 _localphasemove(pushop, cheads)
1124 _localphasemove(pushop, cheads)
1125 else: # publish = False
1125 else: # publish = False
1126 _localphasemove(pushop, pheads)
1126 _localphasemove(pushop, pheads)
1127 _localphasemove(pushop, cheads, phases.draft)
1127 _localphasemove(pushop, cheads, phases.draft)
1128 ### Apply local phase on remote
1128 ### Apply local phase on remote
1129
1129
1130 if pushop.cgresult:
1130 if pushop.cgresult:
1131 if 'phases' in pushop.stepsdone:
1131 if 'phases' in pushop.stepsdone:
1132 # phases already pushed though bundle2
1132 # phases already pushed though bundle2
1133 return
1133 return
1134 outdated = pushop.outdatedphases
1134 outdated = pushop.outdatedphases
1135 else:
1135 else:
1136 outdated = pushop.fallbackoutdatedphases
1136 outdated = pushop.fallbackoutdatedphases
1137
1137
1138 pushop.stepsdone.add('phases')
1138 pushop.stepsdone.add('phases')
1139
1139
1140 # filter heads already turned public by the push
1140 # filter heads already turned public by the push
1141 outdated = [c for c in outdated if c.node() not in pheads]
1141 outdated = [c for c in outdated if c.node() not in pheads]
1142 # fallback to independent pushkey command
1142 # fallback to independent pushkey command
1143 for newremotehead in outdated:
1143 for newremotehead in outdated:
1144 r = pushop.remote.pushkey('phases',
1144 r = pushop.remote.pushkey('phases',
1145 newremotehead.hex(),
1145 newremotehead.hex(),
1146 str(phases.draft),
1146 str(phases.draft),
1147 str(phases.public))
1147 str(phases.public))
1148 if not r:
1148 if not r:
1149 pushop.ui.warn(_('updating %s to public failed!\n')
1149 pushop.ui.warn(_('updating %s to public failed!\n')
1150 % newremotehead)
1150 % newremotehead)
1151
1151
1152 def _localphasemove(pushop, nodes, phase=phases.public):
1152 def _localphasemove(pushop, nodes, phase=phases.public):
1153 """move <nodes> to <phase> in the local source repo"""
1153 """move <nodes> to <phase> in the local source repo"""
1154 if pushop.trmanager:
1154 if pushop.trmanager:
1155 phases.advanceboundary(pushop.repo,
1155 phases.advanceboundary(pushop.repo,
1156 pushop.trmanager.transaction(),
1156 pushop.trmanager.transaction(),
1157 phase,
1157 phase,
1158 nodes)
1158 nodes)
1159 else:
1159 else:
1160 # repo is not locked, do not change any phases!
1160 # repo is not locked, do not change any phases!
1161 # Informs the user that phases should have been moved when
1161 # Informs the user that phases should have been moved when
1162 # applicable.
1162 # applicable.
1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1163 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1164 phasestr = phases.phasenames[phase]
1164 phasestr = phases.phasenames[phase]
1165 if actualmoves:
1165 if actualmoves:
1166 pushop.ui.status(_('cannot lock source repo, skipping '
1166 pushop.ui.status(_('cannot lock source repo, skipping '
1167 'local %s phase update\n') % phasestr)
1167 'local %s phase update\n') % phasestr)
1168
1168
1169 def _pushobsolete(pushop):
1169 def _pushobsolete(pushop):
1170 """utility function to push obsolete markers to a remote"""
1170 """utility function to push obsolete markers to a remote"""
1171 if 'obsmarkers' in pushop.stepsdone:
1171 if 'obsmarkers' in pushop.stepsdone:
1172 return
1172 return
1173 repo = pushop.repo
1173 repo = pushop.repo
1174 remote = pushop.remote
1174 remote = pushop.remote
1175 pushop.stepsdone.add('obsmarkers')
1175 pushop.stepsdone.add('obsmarkers')
1176 if pushop.outobsmarkers:
1176 if pushop.outobsmarkers:
1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1177 pushop.ui.debug('try to push obsolete markers to remote\n')
1178 rslts = []
1178 rslts = []
1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1179 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1180 for key in sorted(remotedata, reverse=True):
1180 for key in sorted(remotedata, reverse=True):
1181 # reverse sort to ensure we end with dump0
1181 # reverse sort to ensure we end with dump0
1182 data = remotedata[key]
1182 data = remotedata[key]
1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1183 rslts.append(remote.pushkey('obsolete', key, '', data))
1184 if [r for r in rslts if not r]:
1184 if [r for r in rslts if not r]:
1185 msg = _('failed to push some obsolete markers!\n')
1185 msg = _('failed to push some obsolete markers!\n')
1186 repo.ui.warn(msg)
1186 repo.ui.warn(msg)
1187
1187
1188 def _pushbookmark(pushop):
1188 def _pushbookmark(pushop):
1189 """Update bookmark position on remote"""
1189 """Update bookmark position on remote"""
1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1190 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1191 return
1191 return
1192 pushop.stepsdone.add('bookmarks')
1192 pushop.stepsdone.add('bookmarks')
1193 ui = pushop.ui
1193 ui = pushop.ui
1194 remote = pushop.remote
1194 remote = pushop.remote
1195
1195
1196 for b, old, new in pushop.outbookmarks:
1196 for b, old, new in pushop.outbookmarks:
1197 action = 'update'
1197 action = 'update'
1198 if not old:
1198 if not old:
1199 action = 'export'
1199 action = 'export'
1200 elif not new:
1200 elif not new:
1201 action = 'delete'
1201 action = 'delete'
1202 if remote.pushkey('bookmarks', b, old, new):
1202 if remote.pushkey('bookmarks', b, old, new):
1203 ui.status(bookmsgmap[action][0] % b)
1203 ui.status(bookmsgmap[action][0] % b)
1204 else:
1204 else:
1205 ui.warn(bookmsgmap[action][1] % b)
1205 ui.warn(bookmsgmap[action][1] % b)
1206 # discovery can have set the value form invalid entry
1206 # discovery can have set the value form invalid entry
1207 if pushop.bkresult is not None:
1207 if pushop.bkresult is not None:
1208 pushop.bkresult = 1
1208 pushop.bkresult = 1
1209
1209
1210 class pulloperation(object):
1210 class pulloperation(object):
1211 """A object that represent a single pull operation
1211 """A object that represent a single pull operation
1212
1212
1213 It purpose is to carry pull related state and very common operation.
1213 It purpose is to carry pull related state and very common operation.
1214
1214
1215 A new should be created at the beginning of each pull and discarded
1215 A new should be created at the beginning of each pull and discarded
1216 afterward.
1216 afterward.
1217 """
1217 """
1218
1218
1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1219 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1220 remotebookmarks=None, streamclonerequested=None):
1220 remotebookmarks=None, streamclonerequested=None):
1221 # repo we pull into
1221 # repo we pull into
1222 self.repo = repo
1222 self.repo = repo
1223 # repo we pull from
1223 # repo we pull from
1224 self.remote = remote
1224 self.remote = remote
1225 # revision we try to pull (None is "all")
1225 # revision we try to pull (None is "all")
1226 self.heads = heads
1226 self.heads = heads
1227 # bookmark pulled explicitly
1227 # bookmark pulled explicitly
1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1228 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1229 for bookmark in bookmarks]
1229 for bookmark in bookmarks]
1230 # do we force pull?
1230 # do we force pull?
1231 self.force = force
1231 self.force = force
1232 # whether a streaming clone was requested
1232 # whether a streaming clone was requested
1233 self.streamclonerequested = streamclonerequested
1233 self.streamclonerequested = streamclonerequested
1234 # transaction manager
1234 # transaction manager
1235 self.trmanager = None
1235 self.trmanager = None
1236 # set of common changeset between local and remote before pull
1236 # set of common changeset between local and remote before pull
1237 self.common = None
1237 self.common = None
1238 # set of pulled head
1238 # set of pulled head
1239 self.rheads = None
1239 self.rheads = None
1240 # list of missing changeset to fetch remotely
1240 # list of missing changeset to fetch remotely
1241 self.fetch = None
1241 self.fetch = None
1242 # remote bookmarks data
1242 # remote bookmarks data
1243 self.remotebookmarks = remotebookmarks
1243 self.remotebookmarks = remotebookmarks
1244 # result of changegroup pulling (used as return code by pull)
1244 # result of changegroup pulling (used as return code by pull)
1245 self.cgresult = None
1245 self.cgresult = None
1246 # list of step already done
1246 # list of step already done
1247 self.stepsdone = set()
1247 self.stepsdone = set()
1248 # Whether we attempted a clone from pre-generated bundles.
1248 # Whether we attempted a clone from pre-generated bundles.
1249 self.clonebundleattempted = False
1249 self.clonebundleattempted = False
1250
1250
1251 @util.propertycache
1251 @util.propertycache
1252 def pulledsubset(self):
1252 def pulledsubset(self):
1253 """heads of the set of changeset target by the pull"""
1253 """heads of the set of changeset target by the pull"""
1254 # compute target subset
1254 # compute target subset
1255 if self.heads is None:
1255 if self.heads is None:
1256 # We pulled every thing possible
1256 # We pulled every thing possible
1257 # sync on everything common
1257 # sync on everything common
1258 c = set(self.common)
1258 c = set(self.common)
1259 ret = list(self.common)
1259 ret = list(self.common)
1260 for n in self.rheads:
1260 for n in self.rheads:
1261 if n not in c:
1261 if n not in c:
1262 ret.append(n)
1262 ret.append(n)
1263 return ret
1263 return ret
1264 else:
1264 else:
1265 # We pulled a specific subset
1265 # We pulled a specific subset
1266 # sync on this subset
1266 # sync on this subset
1267 return self.heads
1267 return self.heads
1268
1268
1269 @util.propertycache
1269 @util.propertycache
1270 def canusebundle2(self):
1270 def canusebundle2(self):
1271 return not _forcebundle1(self)
1271 return not _forcebundle1(self)
1272
1272
1273 @util.propertycache
1273 @util.propertycache
1274 def remotebundle2caps(self):
1274 def remotebundle2caps(self):
1275 return bundle2.bundle2caps(self.remote)
1275 return bundle2.bundle2caps(self.remote)
1276
1276
1277 def gettransaction(self):
1277 def gettransaction(self):
1278 # deprecated; talk to trmanager directly
1278 # deprecated; talk to trmanager directly
1279 return self.trmanager.transaction()
1279 return self.trmanager.transaction()
1280
1280
1281 class transactionmanager(util.transactional):
1281 class transactionmanager(util.transactional):
1282 """An object to manage the life cycle of a transaction
1282 """An object to manage the life cycle of a transaction
1283
1283
1284 It creates the transaction on demand and calls the appropriate hooks when
1284 It creates the transaction on demand and calls the appropriate hooks when
1285 closing the transaction."""
1285 closing the transaction."""
1286 def __init__(self, repo, source, url):
1286 def __init__(self, repo, source, url):
1287 self.repo = repo
1287 self.repo = repo
1288 self.source = source
1288 self.source = source
1289 self.url = url
1289 self.url = url
1290 self._tr = None
1290 self._tr = None
1291
1291
1292 def transaction(self):
1292 def transaction(self):
1293 """Return an open transaction object, constructing if necessary"""
1293 """Return an open transaction object, constructing if necessary"""
1294 if not self._tr:
1294 if not self._tr:
1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1295 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1296 self._tr = self.repo.transaction(trname)
1296 self._tr = self.repo.transaction(trname)
1297 self._tr.hookargs['source'] = self.source
1297 self._tr.hookargs['source'] = self.source
1298 self._tr.hookargs['url'] = self.url
1298 self._tr.hookargs['url'] = self.url
1299 return self._tr
1299 return self._tr
1300
1300
1301 def close(self):
1301 def close(self):
1302 """close transaction if created"""
1302 """close transaction if created"""
1303 if self._tr is not None:
1303 if self._tr is not None:
1304 self._tr.close()
1304 self._tr.close()
1305
1305
1306 def release(self):
1306 def release(self):
1307 """release transaction if created"""
1307 """release transaction if created"""
1308 if self._tr is not None:
1308 if self._tr is not None:
1309 self._tr.release()
1309 self._tr.release()
1310
1310
1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1311 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1312 streamclonerequested=None):
1312 streamclonerequested=None):
1313 """Fetch repository data from a remote.
1313 """Fetch repository data from a remote.
1314
1314
1315 This is the main function used to retrieve data from a remote repository.
1315 This is the main function used to retrieve data from a remote repository.
1316
1316
1317 ``repo`` is the local repository to clone into.
1317 ``repo`` is the local repository to clone into.
1318 ``remote`` is a peer instance.
1318 ``remote`` is a peer instance.
1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1319 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1320 default) means to pull everything from the remote.
1320 default) means to pull everything from the remote.
1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1321 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1322 default, all remote bookmarks are pulled.
1322 default, all remote bookmarks are pulled.
1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1323 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1324 initialization.
1324 initialization.
1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1325 ``streamclonerequested`` is a boolean indicating whether a "streaming
1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1326 clone" is requested. A "streaming clone" is essentially a raw file copy
1327 of revlogs from the server. This only works when the local repository is
1327 of revlogs from the server. This only works when the local repository is
1328 empty. The default value of ``None`` means to respect the server
1328 empty. The default value of ``None`` means to respect the server
1329 configuration for preferring stream clones.
1329 configuration for preferring stream clones.
1330
1330
1331 Returns the ``pulloperation`` created for this pull.
1331 Returns the ``pulloperation`` created for this pull.
1332 """
1332 """
1333 if opargs is None:
1333 if opargs is None:
1334 opargs = {}
1334 opargs = {}
1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1335 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1336 streamclonerequested=streamclonerequested, **opargs)
1336 streamclonerequested=streamclonerequested, **opargs)
1337
1337
1338 peerlocal = pullop.remote.local()
1338 peerlocal = pullop.remote.local()
1339 if peerlocal:
1339 if peerlocal:
1340 missing = set(peerlocal.requirements) - pullop.repo.supported
1340 missing = set(peerlocal.requirements) - pullop.repo.supported
1341 if missing:
1341 if missing:
1342 msg = _("required features are not"
1342 msg = _("required features are not"
1343 " supported in the destination:"
1343 " supported in the destination:"
1344 " %s") % (', '.join(sorted(missing)))
1344 " %s") % (', '.join(sorted(missing)))
1345 raise error.Abort(msg)
1345 raise error.Abort(msg)
1346
1346
1347 wlock = lock = None
1347 wlock = lock = None
1348 try:
1348 try:
1349 wlock = pullop.repo.wlock()
1349 wlock = pullop.repo.wlock()
1350 lock = pullop.repo.lock()
1350 lock = pullop.repo.lock()
1351 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1351 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1352 # This should ideally be in _pullbundle2(). However, it needs to run
1352 # This should ideally be in _pullbundle2(). However, it needs to run
1353 # before discovery to avoid extra work.
1353 # before discovery to avoid extra work.
1354 _maybeapplyclonebundle(pullop)
1354 _maybeapplyclonebundle(pullop)
1355 streamclone.maybeperformlegacystreamclone(pullop)
1355 streamclone.maybeperformlegacystreamclone(pullop)
1356 _pulldiscovery(pullop)
1356 _pulldiscovery(pullop)
1357 if pullop.canusebundle2:
1357 if pullop.canusebundle2:
1358 _pullbundle2(pullop)
1358 _pullbundle2(pullop)
1359 _pullchangeset(pullop)
1359 _pullchangeset(pullop)
1360 _pullphase(pullop)
1360 _pullphase(pullop)
1361 _pullbookmarks(pullop)
1361 _pullbookmarks(pullop)
1362 _pullobsolete(pullop)
1362 _pullobsolete(pullop)
1363 pullop.trmanager.close()
1363 pullop.trmanager.close()
1364 finally:
1364 finally:
1365 lockmod.release(pullop.trmanager, lock, wlock)
1365 lockmod.release(pullop.trmanager, lock, wlock)
1366
1366
1367 # storing remotenames
1367 # storing remotenames
1368 if repo.ui.configbool('experimental', 'remotenames'):
1368 if repo.ui.configbool('experimental', 'remotenames'):
1369 remotenames.pullremotenames(repo, remote)
1369 logexchange.pullremotenames(repo, remote)
1370
1370
1371 return pullop
1371 return pullop
1372
1372
1373 # list of steps to perform discovery before pull
1373 # list of steps to perform discovery before pull
1374 pulldiscoveryorder = []
1374 pulldiscoveryorder = []
1375
1375
1376 # Mapping between step name and function
1376 # Mapping between step name and function
1377 #
1377 #
1378 # This exists to help extensions wrap steps if necessary
1378 # This exists to help extensions wrap steps if necessary
1379 pulldiscoverymapping = {}
1379 pulldiscoverymapping = {}
1380
1380
1381 def pulldiscovery(stepname):
1381 def pulldiscovery(stepname):
1382 """decorator for function performing discovery before pull
1382 """decorator for function performing discovery before pull
1383
1383
1384 The function is added to the step -> function mapping and appended to the
1384 The function is added to the step -> function mapping and appended to the
1385 list of steps. Beware that decorated function will be added in order (this
1385 list of steps. Beware that decorated function will be added in order (this
1386 may matter).
1386 may matter).
1387
1387
1388 You can only use this decorator for a new step, if you want to wrap a step
1388 You can only use this decorator for a new step, if you want to wrap a step
1389 from an extension, change the pulldiscovery dictionary directly."""
1389 from an extension, change the pulldiscovery dictionary directly."""
1390 def dec(func):
1390 def dec(func):
1391 assert stepname not in pulldiscoverymapping
1391 assert stepname not in pulldiscoverymapping
1392 pulldiscoverymapping[stepname] = func
1392 pulldiscoverymapping[stepname] = func
1393 pulldiscoveryorder.append(stepname)
1393 pulldiscoveryorder.append(stepname)
1394 return func
1394 return func
1395 return dec
1395 return dec
1396
1396
1397 def _pulldiscovery(pullop):
1397 def _pulldiscovery(pullop):
1398 """Run all discovery steps"""
1398 """Run all discovery steps"""
1399 for stepname in pulldiscoveryorder:
1399 for stepname in pulldiscoveryorder:
1400 step = pulldiscoverymapping[stepname]
1400 step = pulldiscoverymapping[stepname]
1401 step(pullop)
1401 step(pullop)
1402
1402
1403 @pulldiscovery('b1:bookmarks')
1403 @pulldiscovery('b1:bookmarks')
1404 def _pullbookmarkbundle1(pullop):
1404 def _pullbookmarkbundle1(pullop):
1405 """fetch bookmark data in bundle1 case
1405 """fetch bookmark data in bundle1 case
1406
1406
1407 If not using bundle2, we have to fetch bookmarks before changeset
1407 If not using bundle2, we have to fetch bookmarks before changeset
1408 discovery to reduce the chance and impact of race conditions."""
1408 discovery to reduce the chance and impact of race conditions."""
1409 if pullop.remotebookmarks is not None:
1409 if pullop.remotebookmarks is not None:
1410 return
1410 return
1411 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1411 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1412 # all known bundle2 servers now support listkeys, but lets be nice with
1412 # all known bundle2 servers now support listkeys, but lets be nice with
1413 # new implementation.
1413 # new implementation.
1414 return
1414 return
1415 books = pullop.remote.listkeys('bookmarks')
1415 books = pullop.remote.listkeys('bookmarks')
1416 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1416 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1417
1417
1418
1418
1419 @pulldiscovery('changegroup')
1419 @pulldiscovery('changegroup')
1420 def _pulldiscoverychangegroup(pullop):
1420 def _pulldiscoverychangegroup(pullop):
1421 """discovery phase for the pull
1421 """discovery phase for the pull
1422
1422
1423 Current handle changeset discovery only, will change handle all discovery
1423 Current handle changeset discovery only, will change handle all discovery
1424 at some point."""
1424 at some point."""
1425 tmp = discovery.findcommonincoming(pullop.repo,
1425 tmp = discovery.findcommonincoming(pullop.repo,
1426 pullop.remote,
1426 pullop.remote,
1427 heads=pullop.heads,
1427 heads=pullop.heads,
1428 force=pullop.force)
1428 force=pullop.force)
1429 common, fetch, rheads = tmp
1429 common, fetch, rheads = tmp
1430 nm = pullop.repo.unfiltered().changelog.nodemap
1430 nm = pullop.repo.unfiltered().changelog.nodemap
1431 if fetch and rheads:
1431 if fetch and rheads:
1432 # If a remote heads is filtered locally, put in back in common.
1432 # If a remote heads is filtered locally, put in back in common.
1433 #
1433 #
1434 # This is a hackish solution to catch most of "common but locally
1434 # This is a hackish solution to catch most of "common but locally
1435 # hidden situation". We do not performs discovery on unfiltered
1435 # hidden situation". We do not performs discovery on unfiltered
1436 # repository because it end up doing a pathological amount of round
1436 # repository because it end up doing a pathological amount of round
1437 # trip for w huge amount of changeset we do not care about.
1437 # trip for w huge amount of changeset we do not care about.
1438 #
1438 #
1439 # If a set of such "common but filtered" changeset exist on the server
1439 # If a set of such "common but filtered" changeset exist on the server
1440 # but are not including a remote heads, we'll not be able to detect it,
1440 # but are not including a remote heads, we'll not be able to detect it,
1441 scommon = set(common)
1441 scommon = set(common)
1442 for n in rheads:
1442 for n in rheads:
1443 if n in nm:
1443 if n in nm:
1444 if n not in scommon:
1444 if n not in scommon:
1445 common.append(n)
1445 common.append(n)
1446 if set(rheads).issubset(set(common)):
1446 if set(rheads).issubset(set(common)):
1447 fetch = []
1447 fetch = []
1448 pullop.common = common
1448 pullop.common = common
1449 pullop.fetch = fetch
1449 pullop.fetch = fetch
1450 pullop.rheads = rheads
1450 pullop.rheads = rheads
1451
1451
1452 def _pullbundle2(pullop):
1452 def _pullbundle2(pullop):
1453 """pull data using bundle2
1453 """pull data using bundle2
1454
1454
1455 For now, the only supported data are changegroup."""
1455 For now, the only supported data are changegroup."""
1456 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1456 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1457
1457
1458 # At the moment we don't do stream clones over bundle2. If that is
1458 # At the moment we don't do stream clones over bundle2. If that is
1459 # implemented then here's where the check for that will go.
1459 # implemented then here's where the check for that will go.
1460 streaming = False
1460 streaming = False
1461
1461
1462 # pulling changegroup
1462 # pulling changegroup
1463 pullop.stepsdone.add('changegroup')
1463 pullop.stepsdone.add('changegroup')
1464
1464
1465 kwargs['common'] = pullop.common
1465 kwargs['common'] = pullop.common
1466 kwargs['heads'] = pullop.heads or pullop.rheads
1466 kwargs['heads'] = pullop.heads or pullop.rheads
1467 kwargs['cg'] = pullop.fetch
1467 kwargs['cg'] = pullop.fetch
1468
1468
1469 ui = pullop.repo.ui
1469 ui = pullop.repo.ui
1470 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1470 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1471 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1471 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1472 if (not legacyphase and hasbinaryphase):
1472 if (not legacyphase and hasbinaryphase):
1473 kwargs['phases'] = True
1473 kwargs['phases'] = True
1474 pullop.stepsdone.add('phases')
1474 pullop.stepsdone.add('phases')
1475
1475
1476 bookmarksrequested = False
1476 bookmarksrequested = False
1477 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1477 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1478 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1478 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1479
1479
1480 if pullop.remotebookmarks is not None:
1480 if pullop.remotebookmarks is not None:
1481 pullop.stepsdone.add('request-bookmarks')
1481 pullop.stepsdone.add('request-bookmarks')
1482
1482
1483 if ('request-bookmarks' not in pullop.stepsdone
1483 if ('request-bookmarks' not in pullop.stepsdone
1484 and pullop.remotebookmarks is None
1484 and pullop.remotebookmarks is None
1485 and not legacybookmark and hasbinarybook):
1485 and not legacybookmark and hasbinarybook):
1486 kwargs['bookmarks'] = True
1486 kwargs['bookmarks'] = True
1487 bookmarksrequested = True
1487 bookmarksrequested = True
1488
1488
1489 if 'listkeys' in pullop.remotebundle2caps:
1489 if 'listkeys' in pullop.remotebundle2caps:
1490 if 'phases' not in pullop.stepsdone:
1490 if 'phases' not in pullop.stepsdone:
1491 kwargs['listkeys'] = ['phases']
1491 kwargs['listkeys'] = ['phases']
1492 if 'request-bookmarks' not in pullop.stepsdone:
1492 if 'request-bookmarks' not in pullop.stepsdone:
1493 # make sure to always includes bookmark data when migrating
1493 # make sure to always includes bookmark data when migrating
1494 # `hg incoming --bundle` to using this function.
1494 # `hg incoming --bundle` to using this function.
1495 pullop.stepsdone.add('request-bookmarks')
1495 pullop.stepsdone.add('request-bookmarks')
1496 kwargs.setdefault('listkeys', []).append('bookmarks')
1496 kwargs.setdefault('listkeys', []).append('bookmarks')
1497
1497
1498 # If this is a full pull / clone and the server supports the clone bundles
1498 # If this is a full pull / clone and the server supports the clone bundles
1499 # feature, tell the server whether we attempted a clone bundle. The
1499 # feature, tell the server whether we attempted a clone bundle. The
1500 # presence of this flag indicates the client supports clone bundles. This
1500 # presence of this flag indicates the client supports clone bundles. This
1501 # will enable the server to treat clients that support clone bundles
1501 # will enable the server to treat clients that support clone bundles
1502 # differently from those that don't.
1502 # differently from those that don't.
1503 if (pullop.remote.capable('clonebundles')
1503 if (pullop.remote.capable('clonebundles')
1504 and pullop.heads is None and list(pullop.common) == [nullid]):
1504 and pullop.heads is None and list(pullop.common) == [nullid]):
1505 kwargs['cbattempted'] = pullop.clonebundleattempted
1505 kwargs['cbattempted'] = pullop.clonebundleattempted
1506
1506
1507 if streaming:
1507 if streaming:
1508 pullop.repo.ui.status(_('streaming all changes\n'))
1508 pullop.repo.ui.status(_('streaming all changes\n'))
1509 elif not pullop.fetch:
1509 elif not pullop.fetch:
1510 pullop.repo.ui.status(_("no changes found\n"))
1510 pullop.repo.ui.status(_("no changes found\n"))
1511 pullop.cgresult = 0
1511 pullop.cgresult = 0
1512 else:
1512 else:
1513 if pullop.heads is None and list(pullop.common) == [nullid]:
1513 if pullop.heads is None and list(pullop.common) == [nullid]:
1514 pullop.repo.ui.status(_("requesting all changes\n"))
1514 pullop.repo.ui.status(_("requesting all changes\n"))
1515 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1515 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1516 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1516 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1517 if obsolete.commonversion(remoteversions) is not None:
1517 if obsolete.commonversion(remoteversions) is not None:
1518 kwargs['obsmarkers'] = True
1518 kwargs['obsmarkers'] = True
1519 pullop.stepsdone.add('obsmarkers')
1519 pullop.stepsdone.add('obsmarkers')
1520 _pullbundle2extraprepare(pullop, kwargs)
1520 _pullbundle2extraprepare(pullop, kwargs)
1521 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1521 bundle = pullop.remote.getbundle('pull', **pycompat.strkwargs(kwargs))
1522 try:
1522 try:
1523 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1523 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction)
1524 op.modes['bookmarks'] = 'records'
1524 op.modes['bookmarks'] = 'records'
1525 bundle2.processbundle(pullop.repo, bundle, op=op)
1525 bundle2.processbundle(pullop.repo, bundle, op=op)
1526 except bundle2.AbortFromPart as exc:
1526 except bundle2.AbortFromPart as exc:
1527 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1527 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1528 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1528 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1529 except error.BundleValueError as exc:
1529 except error.BundleValueError as exc:
1530 raise error.Abort(_('missing support for %s') % exc)
1530 raise error.Abort(_('missing support for %s') % exc)
1531
1531
1532 if pullop.fetch:
1532 if pullop.fetch:
1533 pullop.cgresult = bundle2.combinechangegroupresults(op)
1533 pullop.cgresult = bundle2.combinechangegroupresults(op)
1534
1534
1535 # processing phases change
1535 # processing phases change
1536 for namespace, value in op.records['listkeys']:
1536 for namespace, value in op.records['listkeys']:
1537 if namespace == 'phases':
1537 if namespace == 'phases':
1538 _pullapplyphases(pullop, value)
1538 _pullapplyphases(pullop, value)
1539
1539
1540 # processing bookmark update
1540 # processing bookmark update
1541 if bookmarksrequested:
1541 if bookmarksrequested:
1542 books = {}
1542 books = {}
1543 for record in op.records['bookmarks']:
1543 for record in op.records['bookmarks']:
1544 books[record['bookmark']] = record["node"]
1544 books[record['bookmark']] = record["node"]
1545 pullop.remotebookmarks = books
1545 pullop.remotebookmarks = books
1546 else:
1546 else:
1547 for namespace, value in op.records['listkeys']:
1547 for namespace, value in op.records['listkeys']:
1548 if namespace == 'bookmarks':
1548 if namespace == 'bookmarks':
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1550
1550
1551 # bookmark data were either already there or pulled in the bundle
1551 # bookmark data were either already there or pulled in the bundle
1552 if pullop.remotebookmarks is not None:
1552 if pullop.remotebookmarks is not None:
1553 _pullbookmarks(pullop)
1553 _pullbookmarks(pullop)
1554
1554
1555 def _pullbundle2extraprepare(pullop, kwargs):
1555 def _pullbundle2extraprepare(pullop, kwargs):
1556 """hook function so that extensions can extend the getbundle call"""
1556 """hook function so that extensions can extend the getbundle call"""
1557
1557
1558 def _pullchangeset(pullop):
1558 def _pullchangeset(pullop):
1559 """pull changeset from unbundle into the local repo"""
1559 """pull changeset from unbundle into the local repo"""
1560 # We delay the open of the transaction as late as possible so we
1560 # We delay the open of the transaction as late as possible so we
1561 # don't open transaction for nothing or you break future useful
1561 # don't open transaction for nothing or you break future useful
1562 # rollback call
1562 # rollback call
1563 if 'changegroup' in pullop.stepsdone:
1563 if 'changegroup' in pullop.stepsdone:
1564 return
1564 return
1565 pullop.stepsdone.add('changegroup')
1565 pullop.stepsdone.add('changegroup')
1566 if not pullop.fetch:
1566 if not pullop.fetch:
1567 pullop.repo.ui.status(_("no changes found\n"))
1567 pullop.repo.ui.status(_("no changes found\n"))
1568 pullop.cgresult = 0
1568 pullop.cgresult = 0
1569 return
1569 return
1570 tr = pullop.gettransaction()
1570 tr = pullop.gettransaction()
1571 if pullop.heads is None and list(pullop.common) == [nullid]:
1571 if pullop.heads is None and list(pullop.common) == [nullid]:
1572 pullop.repo.ui.status(_("requesting all changes\n"))
1572 pullop.repo.ui.status(_("requesting all changes\n"))
1573 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1573 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1574 # issue1320, avoid a race if remote changed after discovery
1574 # issue1320, avoid a race if remote changed after discovery
1575 pullop.heads = pullop.rheads
1575 pullop.heads = pullop.rheads
1576
1576
1577 if pullop.remote.capable('getbundle'):
1577 if pullop.remote.capable('getbundle'):
1578 # TODO: get bundlecaps from remote
1578 # TODO: get bundlecaps from remote
1579 cg = pullop.remote.getbundle('pull', common=pullop.common,
1579 cg = pullop.remote.getbundle('pull', common=pullop.common,
1580 heads=pullop.heads or pullop.rheads)
1580 heads=pullop.heads or pullop.rheads)
1581 elif pullop.heads is None:
1581 elif pullop.heads is None:
1582 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1582 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1583 elif not pullop.remote.capable('changegroupsubset'):
1583 elif not pullop.remote.capable('changegroupsubset'):
1584 raise error.Abort(_("partial pull cannot be done because "
1584 raise error.Abort(_("partial pull cannot be done because "
1585 "other repository doesn't support "
1585 "other repository doesn't support "
1586 "changegroupsubset."))
1586 "changegroupsubset."))
1587 else:
1587 else:
1588 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1588 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1589 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1589 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1590 pullop.remote.url())
1590 pullop.remote.url())
1591 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1591 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1592
1592
1593 def _pullphase(pullop):
1593 def _pullphase(pullop):
1594 # Get remote phases data from remote
1594 # Get remote phases data from remote
1595 if 'phases' in pullop.stepsdone:
1595 if 'phases' in pullop.stepsdone:
1596 return
1596 return
1597 remotephases = pullop.remote.listkeys('phases')
1597 remotephases = pullop.remote.listkeys('phases')
1598 _pullapplyphases(pullop, remotephases)
1598 _pullapplyphases(pullop, remotephases)
1599
1599
1600 def _pullapplyphases(pullop, remotephases):
1600 def _pullapplyphases(pullop, remotephases):
1601 """apply phase movement from observed remote state"""
1601 """apply phase movement from observed remote state"""
1602 if 'phases' in pullop.stepsdone:
1602 if 'phases' in pullop.stepsdone:
1603 return
1603 return
1604 pullop.stepsdone.add('phases')
1604 pullop.stepsdone.add('phases')
1605 publishing = bool(remotephases.get('publishing', False))
1605 publishing = bool(remotephases.get('publishing', False))
1606 if remotephases and not publishing:
1606 if remotephases and not publishing:
1607 # remote is new and non-publishing
1607 # remote is new and non-publishing
1608 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1608 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1609 pullop.pulledsubset,
1609 pullop.pulledsubset,
1610 remotephases)
1610 remotephases)
1611 dheads = pullop.pulledsubset
1611 dheads = pullop.pulledsubset
1612 else:
1612 else:
1613 # Remote is old or publishing all common changesets
1613 # Remote is old or publishing all common changesets
1614 # should be seen as public
1614 # should be seen as public
1615 pheads = pullop.pulledsubset
1615 pheads = pullop.pulledsubset
1616 dheads = []
1616 dheads = []
1617 unfi = pullop.repo.unfiltered()
1617 unfi = pullop.repo.unfiltered()
1618 phase = unfi._phasecache.phase
1618 phase = unfi._phasecache.phase
1619 rev = unfi.changelog.nodemap.get
1619 rev = unfi.changelog.nodemap.get
1620 public = phases.public
1620 public = phases.public
1621 draft = phases.draft
1621 draft = phases.draft
1622
1622
1623 # exclude changesets already public locally and update the others
1623 # exclude changesets already public locally and update the others
1624 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1624 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1625 if pheads:
1625 if pheads:
1626 tr = pullop.gettransaction()
1626 tr = pullop.gettransaction()
1627 phases.advanceboundary(pullop.repo, tr, public, pheads)
1627 phases.advanceboundary(pullop.repo, tr, public, pheads)
1628
1628
1629 # exclude changesets already draft locally and update the others
1629 # exclude changesets already draft locally and update the others
1630 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1630 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1631 if dheads:
1631 if dheads:
1632 tr = pullop.gettransaction()
1632 tr = pullop.gettransaction()
1633 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1633 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1634
1634
1635 def _pullbookmarks(pullop):
1635 def _pullbookmarks(pullop):
1636 """process the remote bookmark information to update the local one"""
1636 """process the remote bookmark information to update the local one"""
1637 if 'bookmarks' in pullop.stepsdone:
1637 if 'bookmarks' in pullop.stepsdone:
1638 return
1638 return
1639 pullop.stepsdone.add('bookmarks')
1639 pullop.stepsdone.add('bookmarks')
1640 repo = pullop.repo
1640 repo = pullop.repo
1641 remotebookmarks = pullop.remotebookmarks
1641 remotebookmarks = pullop.remotebookmarks
1642 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1642 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1643 pullop.remote.url(),
1643 pullop.remote.url(),
1644 pullop.gettransaction,
1644 pullop.gettransaction,
1645 explicit=pullop.explicitbookmarks)
1645 explicit=pullop.explicitbookmarks)
1646
1646
1647 def _pullobsolete(pullop):
1647 def _pullobsolete(pullop):
1648 """utility function to pull obsolete markers from a remote
1648 """utility function to pull obsolete markers from a remote
1649
1649
1650 The `gettransaction` is function that return the pull transaction, creating
1650 The `gettransaction` is function that return the pull transaction, creating
1651 one if necessary. We return the transaction to inform the calling code that
1651 one if necessary. We return the transaction to inform the calling code that
1652 a new transaction have been created (when applicable).
1652 a new transaction have been created (when applicable).
1653
1653
1654 Exists mostly to allow overriding for experimentation purpose"""
1654 Exists mostly to allow overriding for experimentation purpose"""
1655 if 'obsmarkers' in pullop.stepsdone:
1655 if 'obsmarkers' in pullop.stepsdone:
1656 return
1656 return
1657 pullop.stepsdone.add('obsmarkers')
1657 pullop.stepsdone.add('obsmarkers')
1658 tr = None
1658 tr = None
1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1659 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1660 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1660 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1661 remoteobs = pullop.remote.listkeys('obsolete')
1661 remoteobs = pullop.remote.listkeys('obsolete')
1662 if 'dump0' in remoteobs:
1662 if 'dump0' in remoteobs:
1663 tr = pullop.gettransaction()
1663 tr = pullop.gettransaction()
1664 markers = []
1664 markers = []
1665 for key in sorted(remoteobs, reverse=True):
1665 for key in sorted(remoteobs, reverse=True):
1666 if key.startswith('dump'):
1666 if key.startswith('dump'):
1667 data = util.b85decode(remoteobs[key])
1667 data = util.b85decode(remoteobs[key])
1668 version, newmarks = obsolete._readmarkers(data)
1668 version, newmarks = obsolete._readmarkers(data)
1669 markers += newmarks
1669 markers += newmarks
1670 if markers:
1670 if markers:
1671 pullop.repo.obsstore.add(tr, markers)
1671 pullop.repo.obsstore.add(tr, markers)
1672 pullop.repo.invalidatevolatilesets()
1672 pullop.repo.invalidatevolatilesets()
1673 return tr
1673 return tr
1674
1674
1675 def caps20to10(repo):
1675 def caps20to10(repo):
1676 """return a set with appropriate options to use bundle20 during getbundle"""
1676 """return a set with appropriate options to use bundle20 during getbundle"""
1677 caps = {'HG20'}
1677 caps = {'HG20'}
1678 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1678 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1679 caps.add('bundle2=' + urlreq.quote(capsblob))
1679 caps.add('bundle2=' + urlreq.quote(capsblob))
1680 return caps
1680 return caps
1681
1681
1682 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1682 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1683 getbundle2partsorder = []
1683 getbundle2partsorder = []
1684
1684
1685 # Mapping between step name and function
1685 # Mapping between step name and function
1686 #
1686 #
1687 # This exists to help extensions wrap steps if necessary
1687 # This exists to help extensions wrap steps if necessary
1688 getbundle2partsmapping = {}
1688 getbundle2partsmapping = {}
1689
1689
1690 def getbundle2partsgenerator(stepname, idx=None):
1690 def getbundle2partsgenerator(stepname, idx=None):
1691 """decorator for function generating bundle2 part for getbundle
1691 """decorator for function generating bundle2 part for getbundle
1692
1692
1693 The function is added to the step -> function mapping and appended to the
1693 The function is added to the step -> function mapping and appended to the
1694 list of steps. Beware that decorated functions will be added in order
1694 list of steps. Beware that decorated functions will be added in order
1695 (this may matter).
1695 (this may matter).
1696
1696
1697 You can only use this decorator for new steps, if you want to wrap a step
1697 You can only use this decorator for new steps, if you want to wrap a step
1698 from an extension, attack the getbundle2partsmapping dictionary directly."""
1698 from an extension, attack the getbundle2partsmapping dictionary directly."""
1699 def dec(func):
1699 def dec(func):
1700 assert stepname not in getbundle2partsmapping
1700 assert stepname not in getbundle2partsmapping
1701 getbundle2partsmapping[stepname] = func
1701 getbundle2partsmapping[stepname] = func
1702 if idx is None:
1702 if idx is None:
1703 getbundle2partsorder.append(stepname)
1703 getbundle2partsorder.append(stepname)
1704 else:
1704 else:
1705 getbundle2partsorder.insert(idx, stepname)
1705 getbundle2partsorder.insert(idx, stepname)
1706 return func
1706 return func
1707 return dec
1707 return dec
1708
1708
1709 def bundle2requested(bundlecaps):
1709 def bundle2requested(bundlecaps):
1710 if bundlecaps is not None:
1710 if bundlecaps is not None:
1711 return any(cap.startswith('HG2') for cap in bundlecaps)
1711 return any(cap.startswith('HG2') for cap in bundlecaps)
1712 return False
1712 return False
1713
1713
1714 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1714 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
1715 **kwargs):
1715 **kwargs):
1716 """Return chunks constituting a bundle's raw data.
1716 """Return chunks constituting a bundle's raw data.
1717
1717
1718 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1718 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1719 passed.
1719 passed.
1720
1720
1721 Returns an iterator over raw chunks (of varying sizes).
1721 Returns an iterator over raw chunks (of varying sizes).
1722 """
1722 """
1723 kwargs = pycompat.byteskwargs(kwargs)
1723 kwargs = pycompat.byteskwargs(kwargs)
1724 usebundle2 = bundle2requested(bundlecaps)
1724 usebundle2 = bundle2requested(bundlecaps)
1725 # bundle10 case
1725 # bundle10 case
1726 if not usebundle2:
1726 if not usebundle2:
1727 if bundlecaps and not kwargs.get('cg', True):
1727 if bundlecaps and not kwargs.get('cg', True):
1728 raise ValueError(_('request for bundle10 must include changegroup'))
1728 raise ValueError(_('request for bundle10 must include changegroup'))
1729
1729
1730 if kwargs:
1730 if kwargs:
1731 raise ValueError(_('unsupported getbundle arguments: %s')
1731 raise ValueError(_('unsupported getbundle arguments: %s')
1732 % ', '.join(sorted(kwargs.keys())))
1732 % ', '.join(sorted(kwargs.keys())))
1733 outgoing = _computeoutgoing(repo, heads, common)
1733 outgoing = _computeoutgoing(repo, heads, common)
1734 return changegroup.makestream(repo, outgoing, '01', source,
1734 return changegroup.makestream(repo, outgoing, '01', source,
1735 bundlecaps=bundlecaps)
1735 bundlecaps=bundlecaps)
1736
1736
1737 # bundle20 case
1737 # bundle20 case
1738 b2caps = {}
1738 b2caps = {}
1739 for bcaps in bundlecaps:
1739 for bcaps in bundlecaps:
1740 if bcaps.startswith('bundle2='):
1740 if bcaps.startswith('bundle2='):
1741 blob = urlreq.unquote(bcaps[len('bundle2='):])
1741 blob = urlreq.unquote(bcaps[len('bundle2='):])
1742 b2caps.update(bundle2.decodecaps(blob))
1742 b2caps.update(bundle2.decodecaps(blob))
1743 bundler = bundle2.bundle20(repo.ui, b2caps)
1743 bundler = bundle2.bundle20(repo.ui, b2caps)
1744
1744
1745 kwargs['heads'] = heads
1745 kwargs['heads'] = heads
1746 kwargs['common'] = common
1746 kwargs['common'] = common
1747
1747
1748 for name in getbundle2partsorder:
1748 for name in getbundle2partsorder:
1749 func = getbundle2partsmapping[name]
1749 func = getbundle2partsmapping[name]
1750 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1750 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1751 **pycompat.strkwargs(kwargs))
1751 **pycompat.strkwargs(kwargs))
1752
1752
1753 return bundler.getchunks()
1753 return bundler.getchunks()
1754
1754
1755 @getbundle2partsgenerator('changegroup')
1755 @getbundle2partsgenerator('changegroup')
1756 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1756 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1757 b2caps=None, heads=None, common=None, **kwargs):
1757 b2caps=None, heads=None, common=None, **kwargs):
1758 """add a changegroup part to the requested bundle"""
1758 """add a changegroup part to the requested bundle"""
1759 cgstream = None
1759 cgstream = None
1760 if kwargs.get('cg', True):
1760 if kwargs.get('cg', True):
1761 # build changegroup bundle here.
1761 # build changegroup bundle here.
1762 version = '01'
1762 version = '01'
1763 cgversions = b2caps.get('changegroup')
1763 cgversions = b2caps.get('changegroup')
1764 if cgversions: # 3.1 and 3.2 ship with an empty value
1764 if cgversions: # 3.1 and 3.2 ship with an empty value
1765 cgversions = [v for v in cgversions
1765 cgversions = [v for v in cgversions
1766 if v in changegroup.supportedoutgoingversions(repo)]
1766 if v in changegroup.supportedoutgoingversions(repo)]
1767 if not cgversions:
1767 if not cgversions:
1768 raise ValueError(_('no common changegroup version'))
1768 raise ValueError(_('no common changegroup version'))
1769 version = max(cgversions)
1769 version = max(cgversions)
1770 outgoing = _computeoutgoing(repo, heads, common)
1770 outgoing = _computeoutgoing(repo, heads, common)
1771 if outgoing.missing:
1771 if outgoing.missing:
1772 cgstream = changegroup.makestream(repo, outgoing, version, source,
1772 cgstream = changegroup.makestream(repo, outgoing, version, source,
1773 bundlecaps=bundlecaps)
1773 bundlecaps=bundlecaps)
1774
1774
1775 if cgstream:
1775 if cgstream:
1776 part = bundler.newpart('changegroup', data=cgstream)
1776 part = bundler.newpart('changegroup', data=cgstream)
1777 if cgversions:
1777 if cgversions:
1778 part.addparam('version', version)
1778 part.addparam('version', version)
1779 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1779 part.addparam('nbchanges', '%d' % len(outgoing.missing),
1780 mandatory=False)
1780 mandatory=False)
1781 if 'treemanifest' in repo.requirements:
1781 if 'treemanifest' in repo.requirements:
1782 part.addparam('treemanifest', '1')
1782 part.addparam('treemanifest', '1')
1783
1783
1784 @getbundle2partsgenerator('bookmarks')
1784 @getbundle2partsgenerator('bookmarks')
1785 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1785 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
1786 b2caps=None, **kwargs):
1786 b2caps=None, **kwargs):
1787 """add a bookmark part to the requested bundle"""
1787 """add a bookmark part to the requested bundle"""
1788 if not kwargs.get('bookmarks', False):
1788 if not kwargs.get('bookmarks', False):
1789 return
1789 return
1790 if 'bookmarks' not in b2caps:
1790 if 'bookmarks' not in b2caps:
1791 raise ValueError(_('no common bookmarks exchange method'))
1791 raise ValueError(_('no common bookmarks exchange method'))
1792 books = bookmod.listbinbookmarks(repo)
1792 books = bookmod.listbinbookmarks(repo)
1793 data = bookmod.binaryencode(books)
1793 data = bookmod.binaryencode(books)
1794 if data:
1794 if data:
1795 bundler.newpart('bookmarks', data=data)
1795 bundler.newpart('bookmarks', data=data)
1796
1796
1797 @getbundle2partsgenerator('listkeys')
1797 @getbundle2partsgenerator('listkeys')
1798 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1798 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1799 b2caps=None, **kwargs):
1799 b2caps=None, **kwargs):
1800 """add parts containing listkeys namespaces to the requested bundle"""
1800 """add parts containing listkeys namespaces to the requested bundle"""
1801 listkeys = kwargs.get('listkeys', ())
1801 listkeys = kwargs.get('listkeys', ())
1802 for namespace in listkeys:
1802 for namespace in listkeys:
1803 part = bundler.newpart('listkeys')
1803 part = bundler.newpart('listkeys')
1804 part.addparam('namespace', namespace)
1804 part.addparam('namespace', namespace)
1805 keys = repo.listkeys(namespace).items()
1805 keys = repo.listkeys(namespace).items()
1806 part.data = pushkey.encodekeys(keys)
1806 part.data = pushkey.encodekeys(keys)
1807
1807
1808 @getbundle2partsgenerator('obsmarkers')
1808 @getbundle2partsgenerator('obsmarkers')
1809 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1809 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1810 b2caps=None, heads=None, **kwargs):
1810 b2caps=None, heads=None, **kwargs):
1811 """add an obsolescence markers part to the requested bundle"""
1811 """add an obsolescence markers part to the requested bundle"""
1812 if kwargs.get('obsmarkers', False):
1812 if kwargs.get('obsmarkers', False):
1813 if heads is None:
1813 if heads is None:
1814 heads = repo.heads()
1814 heads = repo.heads()
1815 subset = [c.node() for c in repo.set('::%ln', heads)]
1815 subset = [c.node() for c in repo.set('::%ln', heads)]
1816 markers = repo.obsstore.relevantmarkers(subset)
1816 markers = repo.obsstore.relevantmarkers(subset)
1817 markers = sorted(markers)
1817 markers = sorted(markers)
1818 bundle2.buildobsmarkerspart(bundler, markers)
1818 bundle2.buildobsmarkerspart(bundler, markers)
1819
1819
1820 @getbundle2partsgenerator('phases')
1820 @getbundle2partsgenerator('phases')
1821 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1821 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
1822 b2caps=None, heads=None, **kwargs):
1822 b2caps=None, heads=None, **kwargs):
1823 """add phase heads part to the requested bundle"""
1823 """add phase heads part to the requested bundle"""
1824 if kwargs.get('phases', False):
1824 if kwargs.get('phases', False):
1825 if not 'heads' in b2caps.get('phases'):
1825 if not 'heads' in b2caps.get('phases'):
1826 raise ValueError(_('no common phases exchange method'))
1826 raise ValueError(_('no common phases exchange method'))
1827 if heads is None:
1827 if heads is None:
1828 heads = repo.heads()
1828 heads = repo.heads()
1829
1829
1830 headsbyphase = collections.defaultdict(set)
1830 headsbyphase = collections.defaultdict(set)
1831 if repo.publishing():
1831 if repo.publishing():
1832 headsbyphase[phases.public] = heads
1832 headsbyphase[phases.public] = heads
1833 else:
1833 else:
1834 # find the appropriate heads to move
1834 # find the appropriate heads to move
1835
1835
1836 phase = repo._phasecache.phase
1836 phase = repo._phasecache.phase
1837 node = repo.changelog.node
1837 node = repo.changelog.node
1838 rev = repo.changelog.rev
1838 rev = repo.changelog.rev
1839 for h in heads:
1839 for h in heads:
1840 headsbyphase[phase(repo, rev(h))].add(h)
1840 headsbyphase[phase(repo, rev(h))].add(h)
1841 seenphases = list(headsbyphase.keys())
1841 seenphases = list(headsbyphase.keys())
1842
1842
1843 # We do not handle anything but public and draft phase for now)
1843 # We do not handle anything but public and draft phase for now)
1844 if seenphases:
1844 if seenphases:
1845 assert max(seenphases) <= phases.draft
1845 assert max(seenphases) <= phases.draft
1846
1846
1847 # if client is pulling non-public changesets, we need to find
1847 # if client is pulling non-public changesets, we need to find
1848 # intermediate public heads.
1848 # intermediate public heads.
1849 draftheads = headsbyphase.get(phases.draft, set())
1849 draftheads = headsbyphase.get(phases.draft, set())
1850 if draftheads:
1850 if draftheads:
1851 publicheads = headsbyphase.get(phases.public, set())
1851 publicheads = headsbyphase.get(phases.public, set())
1852
1852
1853 revset = 'heads(only(%ln, %ln) and public())'
1853 revset = 'heads(only(%ln, %ln) and public())'
1854 extraheads = repo.revs(revset, draftheads, publicheads)
1854 extraheads = repo.revs(revset, draftheads, publicheads)
1855 for r in extraheads:
1855 for r in extraheads:
1856 headsbyphase[phases.public].add(node(r))
1856 headsbyphase[phases.public].add(node(r))
1857
1857
1858 # transform data in a format used by the encoding function
1858 # transform data in a format used by the encoding function
1859 phasemapping = []
1859 phasemapping = []
1860 for phase in phases.allphases:
1860 for phase in phases.allphases:
1861 phasemapping.append(sorted(headsbyphase[phase]))
1861 phasemapping.append(sorted(headsbyphase[phase]))
1862
1862
1863 # generate the actual part
1863 # generate the actual part
1864 phasedata = phases.binaryencode(phasemapping)
1864 phasedata = phases.binaryencode(phasemapping)
1865 bundler.newpart('phase-heads', data=phasedata)
1865 bundler.newpart('phase-heads', data=phasedata)
1866
1866
1867 @getbundle2partsgenerator('hgtagsfnodes')
1867 @getbundle2partsgenerator('hgtagsfnodes')
1868 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1868 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1869 b2caps=None, heads=None, common=None,
1869 b2caps=None, heads=None, common=None,
1870 **kwargs):
1870 **kwargs):
1871 """Transfer the .hgtags filenodes mapping.
1871 """Transfer the .hgtags filenodes mapping.
1872
1872
1873 Only values for heads in this bundle will be transferred.
1873 Only values for heads in this bundle will be transferred.
1874
1874
1875 The part data consists of pairs of 20 byte changeset node and .hgtags
1875 The part data consists of pairs of 20 byte changeset node and .hgtags
1876 filenodes raw values.
1876 filenodes raw values.
1877 """
1877 """
1878 # Don't send unless:
1878 # Don't send unless:
1879 # - changeset are being exchanged,
1879 # - changeset are being exchanged,
1880 # - the client supports it.
1880 # - the client supports it.
1881 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1881 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1882 return
1882 return
1883
1883
1884 outgoing = _computeoutgoing(repo, heads, common)
1884 outgoing = _computeoutgoing(repo, heads, common)
1885 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1885 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
1886
1886
1887 def check_heads(repo, their_heads, context):
1887 def check_heads(repo, their_heads, context):
1888 """check if the heads of a repo have been modified
1888 """check if the heads of a repo have been modified
1889
1889
1890 Used by peer for unbundling.
1890 Used by peer for unbundling.
1891 """
1891 """
1892 heads = repo.heads()
1892 heads = repo.heads()
1893 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1893 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
1894 if not (their_heads == ['force'] or their_heads == heads or
1894 if not (their_heads == ['force'] or their_heads == heads or
1895 their_heads == ['hashed', heads_hash]):
1895 their_heads == ['hashed', heads_hash]):
1896 # someone else committed/pushed/unbundled while we
1896 # someone else committed/pushed/unbundled while we
1897 # were transferring data
1897 # were transferring data
1898 raise error.PushRaced('repository changed while %s - '
1898 raise error.PushRaced('repository changed while %s - '
1899 'please try again' % context)
1899 'please try again' % context)
1900
1900
1901 def unbundle(repo, cg, heads, source, url):
1901 def unbundle(repo, cg, heads, source, url):
1902 """Apply a bundle to a repo.
1902 """Apply a bundle to a repo.
1903
1903
1904 this function makes sure the repo is locked during the application and have
1904 this function makes sure the repo is locked during the application and have
1905 mechanism to check that no push race occurred between the creation of the
1905 mechanism to check that no push race occurred between the creation of the
1906 bundle and its application.
1906 bundle and its application.
1907
1907
1908 If the push was raced as PushRaced exception is raised."""
1908 If the push was raced as PushRaced exception is raised."""
1909 r = 0
1909 r = 0
1910 # need a transaction when processing a bundle2 stream
1910 # need a transaction when processing a bundle2 stream
1911 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1911 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1912 lockandtr = [None, None, None]
1912 lockandtr = [None, None, None]
1913 recordout = None
1913 recordout = None
1914 # quick fix for output mismatch with bundle2 in 3.4
1914 # quick fix for output mismatch with bundle2 in 3.4
1915 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1915 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
1916 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1916 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1917 captureoutput = True
1917 captureoutput = True
1918 try:
1918 try:
1919 # note: outside bundle1, 'heads' is expected to be empty and this
1919 # note: outside bundle1, 'heads' is expected to be empty and this
1920 # 'check_heads' call wil be a no-op
1920 # 'check_heads' call wil be a no-op
1921 check_heads(repo, heads, 'uploading changes')
1921 check_heads(repo, heads, 'uploading changes')
1922 # push can proceed
1922 # push can proceed
1923 if not isinstance(cg, bundle2.unbundle20):
1923 if not isinstance(cg, bundle2.unbundle20):
1924 # legacy case: bundle1 (changegroup 01)
1924 # legacy case: bundle1 (changegroup 01)
1925 txnname = "\n".join([source, util.hidepassword(url)])
1925 txnname = "\n".join([source, util.hidepassword(url)])
1926 with repo.lock(), repo.transaction(txnname) as tr:
1926 with repo.lock(), repo.transaction(txnname) as tr:
1927 op = bundle2.applybundle(repo, cg, tr, source, url)
1927 op = bundle2.applybundle(repo, cg, tr, source, url)
1928 r = bundle2.combinechangegroupresults(op)
1928 r = bundle2.combinechangegroupresults(op)
1929 else:
1929 else:
1930 r = None
1930 r = None
1931 try:
1931 try:
1932 def gettransaction():
1932 def gettransaction():
1933 if not lockandtr[2]:
1933 if not lockandtr[2]:
1934 lockandtr[0] = repo.wlock()
1934 lockandtr[0] = repo.wlock()
1935 lockandtr[1] = repo.lock()
1935 lockandtr[1] = repo.lock()
1936 lockandtr[2] = repo.transaction(source)
1936 lockandtr[2] = repo.transaction(source)
1937 lockandtr[2].hookargs['source'] = source
1937 lockandtr[2].hookargs['source'] = source
1938 lockandtr[2].hookargs['url'] = url
1938 lockandtr[2].hookargs['url'] = url
1939 lockandtr[2].hookargs['bundle2'] = '1'
1939 lockandtr[2].hookargs['bundle2'] = '1'
1940 return lockandtr[2]
1940 return lockandtr[2]
1941
1941
1942 # Do greedy locking by default until we're satisfied with lazy
1942 # Do greedy locking by default until we're satisfied with lazy
1943 # locking.
1943 # locking.
1944 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1944 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1945 gettransaction()
1945 gettransaction()
1946
1946
1947 op = bundle2.bundleoperation(repo, gettransaction,
1947 op = bundle2.bundleoperation(repo, gettransaction,
1948 captureoutput=captureoutput)
1948 captureoutput=captureoutput)
1949 try:
1949 try:
1950 op = bundle2.processbundle(repo, cg, op=op)
1950 op = bundle2.processbundle(repo, cg, op=op)
1951 finally:
1951 finally:
1952 r = op.reply
1952 r = op.reply
1953 if captureoutput and r is not None:
1953 if captureoutput and r is not None:
1954 repo.ui.pushbuffer(error=True, subproc=True)
1954 repo.ui.pushbuffer(error=True, subproc=True)
1955 def recordout(output):
1955 def recordout(output):
1956 r.newpart('output', data=output, mandatory=False)
1956 r.newpart('output', data=output, mandatory=False)
1957 if lockandtr[2] is not None:
1957 if lockandtr[2] is not None:
1958 lockandtr[2].close()
1958 lockandtr[2].close()
1959 except BaseException as exc:
1959 except BaseException as exc:
1960 exc.duringunbundle2 = True
1960 exc.duringunbundle2 = True
1961 if captureoutput and r is not None:
1961 if captureoutput and r is not None:
1962 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1962 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1963 def recordout(output):
1963 def recordout(output):
1964 part = bundle2.bundlepart('output', data=output,
1964 part = bundle2.bundlepart('output', data=output,
1965 mandatory=False)
1965 mandatory=False)
1966 parts.append(part)
1966 parts.append(part)
1967 raise
1967 raise
1968 finally:
1968 finally:
1969 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1969 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1970 if recordout is not None:
1970 if recordout is not None:
1971 recordout(repo.ui.popbuffer())
1971 recordout(repo.ui.popbuffer())
1972 return r
1972 return r
1973
1973
1974 def _maybeapplyclonebundle(pullop):
1974 def _maybeapplyclonebundle(pullop):
1975 """Apply a clone bundle from a remote, if possible."""
1975 """Apply a clone bundle from a remote, if possible."""
1976
1976
1977 repo = pullop.repo
1977 repo = pullop.repo
1978 remote = pullop.remote
1978 remote = pullop.remote
1979
1979
1980 if not repo.ui.configbool('ui', 'clonebundles'):
1980 if not repo.ui.configbool('ui', 'clonebundles'):
1981 return
1981 return
1982
1982
1983 # Only run if local repo is empty.
1983 # Only run if local repo is empty.
1984 if len(repo):
1984 if len(repo):
1985 return
1985 return
1986
1986
1987 if pullop.heads:
1987 if pullop.heads:
1988 return
1988 return
1989
1989
1990 if not remote.capable('clonebundles'):
1990 if not remote.capable('clonebundles'):
1991 return
1991 return
1992
1992
1993 res = remote._call('clonebundles')
1993 res = remote._call('clonebundles')
1994
1994
1995 # If we call the wire protocol command, that's good enough to record the
1995 # If we call the wire protocol command, that's good enough to record the
1996 # attempt.
1996 # attempt.
1997 pullop.clonebundleattempted = True
1997 pullop.clonebundleattempted = True
1998
1998
1999 entries = parseclonebundlesmanifest(repo, res)
1999 entries = parseclonebundlesmanifest(repo, res)
2000 if not entries:
2000 if not entries:
2001 repo.ui.note(_('no clone bundles available on remote; '
2001 repo.ui.note(_('no clone bundles available on remote; '
2002 'falling back to regular clone\n'))
2002 'falling back to regular clone\n'))
2003 return
2003 return
2004
2004
2005 entries = filterclonebundleentries(
2005 entries = filterclonebundleentries(
2006 repo, entries, streamclonerequested=pullop.streamclonerequested)
2006 repo, entries, streamclonerequested=pullop.streamclonerequested)
2007
2007
2008 if not entries:
2008 if not entries:
2009 # There is a thundering herd concern here. However, if a server
2009 # There is a thundering herd concern here. However, if a server
2010 # operator doesn't advertise bundles appropriate for its clients,
2010 # operator doesn't advertise bundles appropriate for its clients,
2011 # they deserve what's coming. Furthermore, from a client's
2011 # they deserve what's coming. Furthermore, from a client's
2012 # perspective, no automatic fallback would mean not being able to
2012 # perspective, no automatic fallback would mean not being able to
2013 # clone!
2013 # clone!
2014 repo.ui.warn(_('no compatible clone bundles available on server; '
2014 repo.ui.warn(_('no compatible clone bundles available on server; '
2015 'falling back to regular clone\n'))
2015 'falling back to regular clone\n'))
2016 repo.ui.warn(_('(you may want to report this to the server '
2016 repo.ui.warn(_('(you may want to report this to the server '
2017 'operator)\n'))
2017 'operator)\n'))
2018 return
2018 return
2019
2019
2020 entries = sortclonebundleentries(repo.ui, entries)
2020 entries = sortclonebundleentries(repo.ui, entries)
2021
2021
2022 url = entries[0]['URL']
2022 url = entries[0]['URL']
2023 repo.ui.status(_('applying clone bundle from %s\n') % url)
2023 repo.ui.status(_('applying clone bundle from %s\n') % url)
2024 if trypullbundlefromurl(repo.ui, repo, url):
2024 if trypullbundlefromurl(repo.ui, repo, url):
2025 repo.ui.status(_('finished applying clone bundle\n'))
2025 repo.ui.status(_('finished applying clone bundle\n'))
2026 # Bundle failed.
2026 # Bundle failed.
2027 #
2027 #
2028 # We abort by default to avoid the thundering herd of
2028 # We abort by default to avoid the thundering herd of
2029 # clients flooding a server that was expecting expensive
2029 # clients flooding a server that was expecting expensive
2030 # clone load to be offloaded.
2030 # clone load to be offloaded.
2031 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2031 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2032 repo.ui.warn(_('falling back to normal clone\n'))
2032 repo.ui.warn(_('falling back to normal clone\n'))
2033 else:
2033 else:
2034 raise error.Abort(_('error applying bundle'),
2034 raise error.Abort(_('error applying bundle'),
2035 hint=_('if this error persists, consider contacting '
2035 hint=_('if this error persists, consider contacting '
2036 'the server operator or disable clone '
2036 'the server operator or disable clone '
2037 'bundles via '
2037 'bundles via '
2038 '"--config ui.clonebundles=false"'))
2038 '"--config ui.clonebundles=false"'))
2039
2039
2040 def parseclonebundlesmanifest(repo, s):
2040 def parseclonebundlesmanifest(repo, s):
2041 """Parses the raw text of a clone bundles manifest.
2041 """Parses the raw text of a clone bundles manifest.
2042
2042
2043 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2043 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2044 to the URL and other keys are the attributes for the entry.
2044 to the URL and other keys are the attributes for the entry.
2045 """
2045 """
2046 m = []
2046 m = []
2047 for line in s.splitlines():
2047 for line in s.splitlines():
2048 fields = line.split()
2048 fields = line.split()
2049 if not fields:
2049 if not fields:
2050 continue
2050 continue
2051 attrs = {'URL': fields[0]}
2051 attrs = {'URL': fields[0]}
2052 for rawattr in fields[1:]:
2052 for rawattr in fields[1:]:
2053 key, value = rawattr.split('=', 1)
2053 key, value = rawattr.split('=', 1)
2054 key = urlreq.unquote(key)
2054 key = urlreq.unquote(key)
2055 value = urlreq.unquote(value)
2055 value = urlreq.unquote(value)
2056 attrs[key] = value
2056 attrs[key] = value
2057
2057
2058 # Parse BUNDLESPEC into components. This makes client-side
2058 # Parse BUNDLESPEC into components. This makes client-side
2059 # preferences easier to specify since you can prefer a single
2059 # preferences easier to specify since you can prefer a single
2060 # component of the BUNDLESPEC.
2060 # component of the BUNDLESPEC.
2061 if key == 'BUNDLESPEC':
2061 if key == 'BUNDLESPEC':
2062 try:
2062 try:
2063 comp, version, params = parsebundlespec(repo, value,
2063 comp, version, params = parsebundlespec(repo, value,
2064 externalnames=True)
2064 externalnames=True)
2065 attrs['COMPRESSION'] = comp
2065 attrs['COMPRESSION'] = comp
2066 attrs['VERSION'] = version
2066 attrs['VERSION'] = version
2067 except error.InvalidBundleSpecification:
2067 except error.InvalidBundleSpecification:
2068 pass
2068 pass
2069 except error.UnsupportedBundleSpecification:
2069 except error.UnsupportedBundleSpecification:
2070 pass
2070 pass
2071
2071
2072 m.append(attrs)
2072 m.append(attrs)
2073
2073
2074 return m
2074 return m
2075
2075
2076 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2076 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2077 """Remove incompatible clone bundle manifest entries.
2077 """Remove incompatible clone bundle manifest entries.
2078
2078
2079 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2079 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2080 and returns a new list consisting of only the entries that this client
2080 and returns a new list consisting of only the entries that this client
2081 should be able to apply.
2081 should be able to apply.
2082
2082
2083 There is no guarantee we'll be able to apply all returned entries because
2083 There is no guarantee we'll be able to apply all returned entries because
2084 the metadata we use to filter on may be missing or wrong.
2084 the metadata we use to filter on may be missing or wrong.
2085 """
2085 """
2086 newentries = []
2086 newentries = []
2087 for entry in entries:
2087 for entry in entries:
2088 spec = entry.get('BUNDLESPEC')
2088 spec = entry.get('BUNDLESPEC')
2089 if spec:
2089 if spec:
2090 try:
2090 try:
2091 comp, version, params = parsebundlespec(repo, spec, strict=True)
2091 comp, version, params = parsebundlespec(repo, spec, strict=True)
2092
2092
2093 # If a stream clone was requested, filter out non-streamclone
2093 # If a stream clone was requested, filter out non-streamclone
2094 # entries.
2094 # entries.
2095 if streamclonerequested and (comp != 'UN' or version != 's1'):
2095 if streamclonerequested and (comp != 'UN' or version != 's1'):
2096 repo.ui.debug('filtering %s because not a stream clone\n' %
2096 repo.ui.debug('filtering %s because not a stream clone\n' %
2097 entry['URL'])
2097 entry['URL'])
2098 continue
2098 continue
2099
2099
2100 except error.InvalidBundleSpecification as e:
2100 except error.InvalidBundleSpecification as e:
2101 repo.ui.debug(str(e) + '\n')
2101 repo.ui.debug(str(e) + '\n')
2102 continue
2102 continue
2103 except error.UnsupportedBundleSpecification as e:
2103 except error.UnsupportedBundleSpecification as e:
2104 repo.ui.debug('filtering %s because unsupported bundle '
2104 repo.ui.debug('filtering %s because unsupported bundle '
2105 'spec: %s\n' % (entry['URL'], str(e)))
2105 'spec: %s\n' % (entry['URL'], str(e)))
2106 continue
2106 continue
2107 # If we don't have a spec and requested a stream clone, we don't know
2107 # If we don't have a spec and requested a stream clone, we don't know
2108 # what the entry is so don't attempt to apply it.
2108 # what the entry is so don't attempt to apply it.
2109 elif streamclonerequested:
2109 elif streamclonerequested:
2110 repo.ui.debug('filtering %s because cannot determine if a stream '
2110 repo.ui.debug('filtering %s because cannot determine if a stream '
2111 'clone bundle\n' % entry['URL'])
2111 'clone bundle\n' % entry['URL'])
2112 continue
2112 continue
2113
2113
2114 if 'REQUIRESNI' in entry and not sslutil.hassni:
2114 if 'REQUIRESNI' in entry and not sslutil.hassni:
2115 repo.ui.debug('filtering %s because SNI not supported\n' %
2115 repo.ui.debug('filtering %s because SNI not supported\n' %
2116 entry['URL'])
2116 entry['URL'])
2117 continue
2117 continue
2118
2118
2119 newentries.append(entry)
2119 newentries.append(entry)
2120
2120
2121 return newentries
2121 return newentries
2122
2122
2123 class clonebundleentry(object):
2123 class clonebundleentry(object):
2124 """Represents an item in a clone bundles manifest.
2124 """Represents an item in a clone bundles manifest.
2125
2125
2126 This rich class is needed to support sorting since sorted() in Python 3
2126 This rich class is needed to support sorting since sorted() in Python 3
2127 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2127 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2128 won't work.
2128 won't work.
2129 """
2129 """
2130
2130
2131 def __init__(self, value, prefers):
2131 def __init__(self, value, prefers):
2132 self.value = value
2132 self.value = value
2133 self.prefers = prefers
2133 self.prefers = prefers
2134
2134
2135 def _cmp(self, other):
2135 def _cmp(self, other):
2136 for prefkey, prefvalue in self.prefers:
2136 for prefkey, prefvalue in self.prefers:
2137 avalue = self.value.get(prefkey)
2137 avalue = self.value.get(prefkey)
2138 bvalue = other.value.get(prefkey)
2138 bvalue = other.value.get(prefkey)
2139
2139
2140 # Special case for b missing attribute and a matches exactly.
2140 # Special case for b missing attribute and a matches exactly.
2141 if avalue is not None and bvalue is None and avalue == prefvalue:
2141 if avalue is not None and bvalue is None and avalue == prefvalue:
2142 return -1
2142 return -1
2143
2143
2144 # Special case for a missing attribute and b matches exactly.
2144 # Special case for a missing attribute and b matches exactly.
2145 if bvalue is not None and avalue is None and bvalue == prefvalue:
2145 if bvalue is not None and avalue is None and bvalue == prefvalue:
2146 return 1
2146 return 1
2147
2147
2148 # We can't compare unless attribute present on both.
2148 # We can't compare unless attribute present on both.
2149 if avalue is None or bvalue is None:
2149 if avalue is None or bvalue is None:
2150 continue
2150 continue
2151
2151
2152 # Same values should fall back to next attribute.
2152 # Same values should fall back to next attribute.
2153 if avalue == bvalue:
2153 if avalue == bvalue:
2154 continue
2154 continue
2155
2155
2156 # Exact matches come first.
2156 # Exact matches come first.
2157 if avalue == prefvalue:
2157 if avalue == prefvalue:
2158 return -1
2158 return -1
2159 if bvalue == prefvalue:
2159 if bvalue == prefvalue:
2160 return 1
2160 return 1
2161
2161
2162 # Fall back to next attribute.
2162 # Fall back to next attribute.
2163 continue
2163 continue
2164
2164
2165 # If we got here we couldn't sort by attributes and prefers. Fall
2165 # If we got here we couldn't sort by attributes and prefers. Fall
2166 # back to index order.
2166 # back to index order.
2167 return 0
2167 return 0
2168
2168
2169 def __lt__(self, other):
2169 def __lt__(self, other):
2170 return self._cmp(other) < 0
2170 return self._cmp(other) < 0
2171
2171
2172 def __gt__(self, other):
2172 def __gt__(self, other):
2173 return self._cmp(other) > 0
2173 return self._cmp(other) > 0
2174
2174
2175 def __eq__(self, other):
2175 def __eq__(self, other):
2176 return self._cmp(other) == 0
2176 return self._cmp(other) == 0
2177
2177
2178 def __le__(self, other):
2178 def __le__(self, other):
2179 return self._cmp(other) <= 0
2179 return self._cmp(other) <= 0
2180
2180
2181 def __ge__(self, other):
2181 def __ge__(self, other):
2182 return self._cmp(other) >= 0
2182 return self._cmp(other) >= 0
2183
2183
2184 def __ne__(self, other):
2184 def __ne__(self, other):
2185 return self._cmp(other) != 0
2185 return self._cmp(other) != 0
2186
2186
2187 def sortclonebundleentries(ui, entries):
2187 def sortclonebundleentries(ui, entries):
2188 prefers = ui.configlist('ui', 'clonebundleprefers')
2188 prefers = ui.configlist('ui', 'clonebundleprefers')
2189 if not prefers:
2189 if not prefers:
2190 return list(entries)
2190 return list(entries)
2191
2191
2192 prefers = [p.split('=', 1) for p in prefers]
2192 prefers = [p.split('=', 1) for p in prefers]
2193
2193
2194 items = sorted(clonebundleentry(v, prefers) for v in entries)
2194 items = sorted(clonebundleentry(v, prefers) for v in entries)
2195 return [i.value for i in items]
2195 return [i.value for i in items]
2196
2196
2197 def trypullbundlefromurl(ui, repo, url):
2197 def trypullbundlefromurl(ui, repo, url):
2198 """Attempt to apply a bundle from a URL."""
2198 """Attempt to apply a bundle from a URL."""
2199 with repo.lock(), repo.transaction('bundleurl') as tr:
2199 with repo.lock(), repo.transaction('bundleurl') as tr:
2200 try:
2200 try:
2201 fh = urlmod.open(ui, url)
2201 fh = urlmod.open(ui, url)
2202 cg = readbundle(ui, fh, 'stream')
2202 cg = readbundle(ui, fh, 'stream')
2203
2203
2204 if isinstance(cg, streamclone.streamcloneapplier):
2204 if isinstance(cg, streamclone.streamcloneapplier):
2205 cg.apply(repo)
2205 cg.apply(repo)
2206 else:
2206 else:
2207 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2207 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2208 return True
2208 return True
2209 except urlerr.httperror as e:
2209 except urlerr.httperror as e:
2210 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2210 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
2211 except urlerr.urlerror as e:
2211 except urlerr.urlerror as e:
2212 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2212 ui.warn(_('error fetching bundle: %s\n') % e.reason)
2213
2213
2214 return False
2214 return False
@@ -1,1107 +1,1107
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 bookmarks,
20 bookmarks,
21 bundlerepo,
21 bundlerepo,
22 cmdutil,
22 cmdutil,
23 destutil,
23 destutil,
24 discovery,
24 discovery,
25 error,
25 error,
26 exchange,
26 exchange,
27 extensions,
27 extensions,
28 httppeer,
28 httppeer,
29 localrepo,
29 localrepo,
30 lock,
30 lock,
31 logexchange,
31 merge as mergemod,
32 merge as mergemod,
32 node,
33 node,
33 phases,
34 phases,
34 remotenames,
35 repoview,
35 repoview,
36 scmutil,
36 scmutil,
37 sshpeer,
37 sshpeer,
38 statichttprepo,
38 statichttprepo,
39 ui as uimod,
39 ui as uimod,
40 unionrepo,
40 unionrepo,
41 url,
41 url,
42 util,
42 util,
43 verify as verifymod,
43 verify as verifymod,
44 vfs as vfsmod,
44 vfs as vfsmod,
45 )
45 )
46
46
47 release = lock.release
47 release = lock.release
48
48
49 # shared features
49 # shared features
50 sharedbookmarks = 'bookmarks'
50 sharedbookmarks = 'bookmarks'
51
51
52 def _local(path):
52 def _local(path):
53 path = util.expandpath(util.urllocalpath(path))
53 path = util.expandpath(util.urllocalpath(path))
54 return (os.path.isfile(path) and bundlerepo or localrepo)
54 return (os.path.isfile(path) and bundlerepo or localrepo)
55
55
56 def addbranchrevs(lrepo, other, branches, revs):
56 def addbranchrevs(lrepo, other, branches, revs):
57 peer = other.peer() # a courtesy to callers using a localrepo for other
57 peer = other.peer() # a courtesy to callers using a localrepo for other
58 hashbranch, branches = branches
58 hashbranch, branches = branches
59 if not hashbranch and not branches:
59 if not hashbranch and not branches:
60 x = revs or None
60 x = revs or None
61 if util.safehasattr(revs, 'first'):
61 if util.safehasattr(revs, 'first'):
62 y = revs.first()
62 y = revs.first()
63 elif revs:
63 elif revs:
64 y = revs[0]
64 y = revs[0]
65 else:
65 else:
66 y = None
66 y = None
67 return x, y
67 return x, y
68 if revs:
68 if revs:
69 revs = list(revs)
69 revs = list(revs)
70 else:
70 else:
71 revs = []
71 revs = []
72
72
73 if not peer.capable('branchmap'):
73 if not peer.capable('branchmap'):
74 if branches:
74 if branches:
75 raise error.Abort(_("remote branch lookup not supported"))
75 raise error.Abort(_("remote branch lookup not supported"))
76 revs.append(hashbranch)
76 revs.append(hashbranch)
77 return revs, revs[0]
77 return revs, revs[0]
78 branchmap = peer.branchmap()
78 branchmap = peer.branchmap()
79
79
80 def primary(branch):
80 def primary(branch):
81 if branch == '.':
81 if branch == '.':
82 if not lrepo:
82 if not lrepo:
83 raise error.Abort(_("dirstate branch not accessible"))
83 raise error.Abort(_("dirstate branch not accessible"))
84 branch = lrepo.dirstate.branch()
84 branch = lrepo.dirstate.branch()
85 if branch in branchmap:
85 if branch in branchmap:
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
86 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
87 return True
87 return True
88 else:
88 else:
89 return False
89 return False
90
90
91 for branch in branches:
91 for branch in branches:
92 if not primary(branch):
92 if not primary(branch):
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
93 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
94 if hashbranch:
94 if hashbranch:
95 if not primary(hashbranch):
95 if not primary(hashbranch):
96 revs.append(hashbranch)
96 revs.append(hashbranch)
97 return revs, revs[0]
97 return revs, revs[0]
98
98
99 def parseurl(path, branches=None):
99 def parseurl(path, branches=None):
100 '''parse url#branch, returning (url, (branch, branches))'''
100 '''parse url#branch, returning (url, (branch, branches))'''
101
101
102 u = util.url(path)
102 u = util.url(path)
103 branch = None
103 branch = None
104 if u.fragment:
104 if u.fragment:
105 branch = u.fragment
105 branch = u.fragment
106 u.fragment = None
106 u.fragment = None
107 return bytes(u), (branch, branches or [])
107 return bytes(u), (branch, branches or [])
108
108
109 schemes = {
109 schemes = {
110 'bundle': bundlerepo,
110 'bundle': bundlerepo,
111 'union': unionrepo,
111 'union': unionrepo,
112 'file': _local,
112 'file': _local,
113 'http': httppeer,
113 'http': httppeer,
114 'https': httppeer,
114 'https': httppeer,
115 'ssh': sshpeer,
115 'ssh': sshpeer,
116 'static-http': statichttprepo,
116 'static-http': statichttprepo,
117 }
117 }
118
118
119 def _peerlookup(path):
119 def _peerlookup(path):
120 u = util.url(path)
120 u = util.url(path)
121 scheme = u.scheme or 'file'
121 scheme = u.scheme or 'file'
122 thing = schemes.get(scheme) or schemes['file']
122 thing = schemes.get(scheme) or schemes['file']
123 try:
123 try:
124 return thing(path)
124 return thing(path)
125 except TypeError:
125 except TypeError:
126 # we can't test callable(thing) because 'thing' can be an unloaded
126 # we can't test callable(thing) because 'thing' can be an unloaded
127 # module that implements __call__
127 # module that implements __call__
128 if not util.safehasattr(thing, 'instance'):
128 if not util.safehasattr(thing, 'instance'):
129 raise
129 raise
130 return thing
130 return thing
131
131
132 def islocal(repo):
132 def islocal(repo):
133 '''return true if repo (or path pointing to repo) is local'''
133 '''return true if repo (or path pointing to repo) is local'''
134 if isinstance(repo, bytes):
134 if isinstance(repo, bytes):
135 try:
135 try:
136 return _peerlookup(repo).islocal(repo)
136 return _peerlookup(repo).islocal(repo)
137 except AttributeError:
137 except AttributeError:
138 return False
138 return False
139 return repo.local()
139 return repo.local()
140
140
141 def openpath(ui, path):
141 def openpath(ui, path):
142 '''open path with open if local, url.open if remote'''
142 '''open path with open if local, url.open if remote'''
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
143 pathurl = util.url(path, parsequery=False, parsefragment=False)
144 if pathurl.islocal():
144 if pathurl.islocal():
145 return util.posixfile(pathurl.localpath(), 'rb')
145 return util.posixfile(pathurl.localpath(), 'rb')
146 else:
146 else:
147 return url.open(ui, path)
147 return url.open(ui, path)
148
148
149 # a list of (ui, repo) functions called for wire peer initialization
149 # a list of (ui, repo) functions called for wire peer initialization
150 wirepeersetupfuncs = []
150 wirepeersetupfuncs = []
151
151
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
152 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
153 """return a repository object for the specified path"""
153 """return a repository object for the specified path"""
154 obj = _peerlookup(path).instance(ui, path, create)
154 obj = _peerlookup(path).instance(ui, path, create)
155 ui = getattr(obj, "ui", ui)
155 ui = getattr(obj, "ui", ui)
156 for f in presetupfuncs or []:
156 for f in presetupfuncs or []:
157 f(ui, obj)
157 f(ui, obj)
158 for name, module in extensions.extensions(ui):
158 for name, module in extensions.extensions(ui):
159 hook = getattr(module, 'reposetup', None)
159 hook = getattr(module, 'reposetup', None)
160 if hook:
160 if hook:
161 hook(ui, obj)
161 hook(ui, obj)
162 if not obj.local():
162 if not obj.local():
163 for f in wirepeersetupfuncs:
163 for f in wirepeersetupfuncs:
164 f(ui, obj)
164 f(ui, obj)
165 return obj
165 return obj
166
166
167 def repository(ui, path='', create=False, presetupfuncs=None):
167 def repository(ui, path='', create=False, presetupfuncs=None):
168 """return a repository object for the specified path"""
168 """return a repository object for the specified path"""
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
169 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
170 repo = peer.local()
170 repo = peer.local()
171 if not repo:
171 if not repo:
172 raise error.Abort(_("repository '%s' is not local") %
172 raise error.Abort(_("repository '%s' is not local") %
173 (path or peer.url()))
173 (path or peer.url()))
174 return repo.filtered('visible')
174 return repo.filtered('visible')
175
175
176 def peer(uiorrepo, opts, path, create=False):
176 def peer(uiorrepo, opts, path, create=False):
177 '''return a repository peer for the specified path'''
177 '''return a repository peer for the specified path'''
178 rui = remoteui(uiorrepo, opts)
178 rui = remoteui(uiorrepo, opts)
179 return _peerorrepo(rui, path, create).peer()
179 return _peerorrepo(rui, path, create).peer()
180
180
181 def defaultdest(source):
181 def defaultdest(source):
182 '''return default destination of clone if none is given
182 '''return default destination of clone if none is given
183
183
184 >>> defaultdest(b'foo')
184 >>> defaultdest(b'foo')
185 'foo'
185 'foo'
186 >>> defaultdest(b'/foo/bar')
186 >>> defaultdest(b'/foo/bar')
187 'bar'
187 'bar'
188 >>> defaultdest(b'/')
188 >>> defaultdest(b'/')
189 ''
189 ''
190 >>> defaultdest(b'')
190 >>> defaultdest(b'')
191 ''
191 ''
192 >>> defaultdest(b'http://example.org/')
192 >>> defaultdest(b'http://example.org/')
193 ''
193 ''
194 >>> defaultdest(b'http://example.org/foo/')
194 >>> defaultdest(b'http://example.org/foo/')
195 'foo'
195 'foo'
196 '''
196 '''
197 path = util.url(source).path
197 path = util.url(source).path
198 if not path:
198 if not path:
199 return ''
199 return ''
200 return os.path.basename(os.path.normpath(path))
200 return os.path.basename(os.path.normpath(path))
201
201
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
202 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
203 relative=False):
203 relative=False):
204 '''create a shared repository'''
204 '''create a shared repository'''
205
205
206 if not islocal(source):
206 if not islocal(source):
207 raise error.Abort(_('can only share local repositories'))
207 raise error.Abort(_('can only share local repositories'))
208
208
209 if not dest:
209 if not dest:
210 dest = defaultdest(source)
210 dest = defaultdest(source)
211 else:
211 else:
212 dest = ui.expandpath(dest)
212 dest = ui.expandpath(dest)
213
213
214 if isinstance(source, str):
214 if isinstance(source, str):
215 origsource = ui.expandpath(source)
215 origsource = ui.expandpath(source)
216 source, branches = parseurl(origsource)
216 source, branches = parseurl(origsource)
217 srcrepo = repository(ui, source)
217 srcrepo = repository(ui, source)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
218 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
219 else:
219 else:
220 srcrepo = source.local()
220 srcrepo = source.local()
221 origsource = source = srcrepo.url()
221 origsource = source = srcrepo.url()
222 checkout = None
222 checkout = None
223
223
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
224 sharedpath = srcrepo.sharedpath # if our source is already sharing
225
225
226 destwvfs = vfsmod.vfs(dest, realpath=True)
226 destwvfs = vfsmod.vfs(dest, realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
227 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
228
228
229 if destvfs.lexists():
229 if destvfs.lexists():
230 raise error.Abort(_('destination already exists'))
230 raise error.Abort(_('destination already exists'))
231
231
232 if not destwvfs.isdir():
232 if not destwvfs.isdir():
233 destwvfs.mkdir()
233 destwvfs.mkdir()
234 destvfs.makedir()
234 destvfs.makedir()
235
235
236 requirements = ''
236 requirements = ''
237 try:
237 try:
238 requirements = srcrepo.vfs.read('requires')
238 requirements = srcrepo.vfs.read('requires')
239 except IOError as inst:
239 except IOError as inst:
240 if inst.errno != errno.ENOENT:
240 if inst.errno != errno.ENOENT:
241 raise
241 raise
242
242
243 if relative:
243 if relative:
244 try:
244 try:
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
245 sharedpath = os.path.relpath(sharedpath, destvfs.base)
246 requirements += 'relshared\n'
246 requirements += 'relshared\n'
247 except (IOError, ValueError) as e:
247 except (IOError, ValueError) as e:
248 # ValueError is raised on Windows if the drive letters differ on
248 # ValueError is raised on Windows if the drive letters differ on
249 # each path
249 # each path
250 raise error.Abort(_('cannot calculate relative path'),
250 raise error.Abort(_('cannot calculate relative path'),
251 hint=str(e))
251 hint=str(e))
252 else:
252 else:
253 requirements += 'shared\n'
253 requirements += 'shared\n'
254
254
255 destvfs.write('requires', requirements)
255 destvfs.write('requires', requirements)
256 destvfs.write('sharedpath', sharedpath)
256 destvfs.write('sharedpath', sharedpath)
257
257
258 r = repository(ui, destwvfs.base)
258 r = repository(ui, destwvfs.base)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
259 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
260 _postshareupdate(r, update, checkout=checkout)
260 _postshareupdate(r, update, checkout=checkout)
261 return r
261 return r
262
262
263 def unshare(ui, repo):
263 def unshare(ui, repo):
264 """convert a shared repository to a normal one
264 """convert a shared repository to a normal one
265
265
266 Copy the store data to the repo and remove the sharedpath data.
266 Copy the store data to the repo and remove the sharedpath data.
267 """
267 """
268
268
269 destlock = lock = None
269 destlock = lock = None
270 lock = repo.lock()
270 lock = repo.lock()
271 try:
271 try:
272 # we use locks here because if we race with commit, we
272 # we use locks here because if we race with commit, we
273 # can end up with extra data in the cloned revlogs that's
273 # can end up with extra data in the cloned revlogs that's
274 # not pointed to by changesets, thus causing verify to
274 # not pointed to by changesets, thus causing verify to
275 # fail
275 # fail
276
276
277 destlock = copystore(ui, repo, repo.path)
277 destlock = copystore(ui, repo, repo.path)
278
278
279 sharefile = repo.vfs.join('sharedpath')
279 sharefile = repo.vfs.join('sharedpath')
280 util.rename(sharefile, sharefile + '.old')
280 util.rename(sharefile, sharefile + '.old')
281
281
282 repo.requirements.discard('shared')
282 repo.requirements.discard('shared')
283 repo.requirements.discard('relshared')
283 repo.requirements.discard('relshared')
284 repo._writerequirements()
284 repo._writerequirements()
285 finally:
285 finally:
286 destlock and destlock.release()
286 destlock and destlock.release()
287 lock and lock.release()
287 lock and lock.release()
288
288
289 # update store, spath, svfs and sjoin of repo
289 # update store, spath, svfs and sjoin of repo
290 repo.unfiltered().__init__(repo.baseui, repo.root)
290 repo.unfiltered().__init__(repo.baseui, repo.root)
291
291
292 # TODO: figure out how to access subrepos that exist, but were previously
292 # TODO: figure out how to access subrepos that exist, but were previously
293 # removed from .hgsub
293 # removed from .hgsub
294 c = repo['.']
294 c = repo['.']
295 subs = c.substate
295 subs = c.substate
296 for s in sorted(subs):
296 for s in sorted(subs):
297 c.sub(s).unshare()
297 c.sub(s).unshare()
298
298
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
299 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
300 """Called after a new shared repo is created.
300 """Called after a new shared repo is created.
301
301
302 The new repo only has a requirements file and pointer to the source.
302 The new repo only has a requirements file and pointer to the source.
303 This function configures additional shared data.
303 This function configures additional shared data.
304
304
305 Extensions can wrap this function and write additional entries to
305 Extensions can wrap this function and write additional entries to
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
306 destrepo/.hg/shared to indicate additional pieces of data to be shared.
307 """
307 """
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
308 default = defaultpath or sourcerepo.ui.config('paths', 'default')
309 if default:
309 if default:
310 fp = destrepo.vfs("hgrc", "w", text=True)
310 fp = destrepo.vfs("hgrc", "w", text=True)
311 fp.write("[paths]\n")
311 fp.write("[paths]\n")
312 fp.write("default = %s\n" % default)
312 fp.write("default = %s\n" % default)
313 fp.close()
313 fp.close()
314
314
315 with destrepo.wlock():
315 with destrepo.wlock():
316 if bookmarks:
316 if bookmarks:
317 fp = destrepo.vfs('shared', 'w')
317 fp = destrepo.vfs('shared', 'w')
318 fp.write(sharedbookmarks + '\n')
318 fp.write(sharedbookmarks + '\n')
319 fp.close()
319 fp.close()
320
320
321 def _postshareupdate(repo, update, checkout=None):
321 def _postshareupdate(repo, update, checkout=None):
322 """Maybe perform a working directory update after a shared repo is created.
322 """Maybe perform a working directory update after a shared repo is created.
323
323
324 ``update`` can be a boolean or a revision to update to.
324 ``update`` can be a boolean or a revision to update to.
325 """
325 """
326 if not update:
326 if not update:
327 return
327 return
328
328
329 repo.ui.status(_("updating working directory\n"))
329 repo.ui.status(_("updating working directory\n"))
330 if update is not True:
330 if update is not True:
331 checkout = update
331 checkout = update
332 for test in (checkout, 'default', 'tip'):
332 for test in (checkout, 'default', 'tip'):
333 if test is None:
333 if test is None:
334 continue
334 continue
335 try:
335 try:
336 uprev = repo.lookup(test)
336 uprev = repo.lookup(test)
337 break
337 break
338 except error.RepoLookupError:
338 except error.RepoLookupError:
339 continue
339 continue
340 _update(repo, uprev)
340 _update(repo, uprev)
341
341
342 def copystore(ui, srcrepo, destpath):
342 def copystore(ui, srcrepo, destpath):
343 '''copy files from store of srcrepo in destpath
343 '''copy files from store of srcrepo in destpath
344
344
345 returns destlock
345 returns destlock
346 '''
346 '''
347 destlock = None
347 destlock = None
348 try:
348 try:
349 hardlink = None
349 hardlink = None
350 num = 0
350 num = 0
351 closetopic = [None]
351 closetopic = [None]
352 def prog(topic, pos):
352 def prog(topic, pos):
353 if pos is None:
353 if pos is None:
354 closetopic[0] = topic
354 closetopic[0] = topic
355 else:
355 else:
356 ui.progress(topic, pos + num)
356 ui.progress(topic, pos + num)
357 srcpublishing = srcrepo.publishing()
357 srcpublishing = srcrepo.publishing()
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
358 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
359 dstvfs = vfsmod.vfs(destpath)
359 dstvfs = vfsmod.vfs(destpath)
360 for f in srcrepo.store.copylist():
360 for f in srcrepo.store.copylist():
361 if srcpublishing and f.endswith('phaseroots'):
361 if srcpublishing and f.endswith('phaseroots'):
362 continue
362 continue
363 dstbase = os.path.dirname(f)
363 dstbase = os.path.dirname(f)
364 if dstbase and not dstvfs.exists(dstbase):
364 if dstbase and not dstvfs.exists(dstbase):
365 dstvfs.mkdir(dstbase)
365 dstvfs.mkdir(dstbase)
366 if srcvfs.exists(f):
366 if srcvfs.exists(f):
367 if f.endswith('data'):
367 if f.endswith('data'):
368 # 'dstbase' may be empty (e.g. revlog format 0)
368 # 'dstbase' may be empty (e.g. revlog format 0)
369 lockfile = os.path.join(dstbase, "lock")
369 lockfile = os.path.join(dstbase, "lock")
370 # lock to avoid premature writing to the target
370 # lock to avoid premature writing to the target
371 destlock = lock.lock(dstvfs, lockfile)
371 destlock = lock.lock(dstvfs, lockfile)
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
372 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
373 hardlink, progress=prog)
373 hardlink, progress=prog)
374 num += n
374 num += n
375 if hardlink:
375 if hardlink:
376 ui.debug("linked %d files\n" % num)
376 ui.debug("linked %d files\n" % num)
377 if closetopic[0]:
377 if closetopic[0]:
378 ui.progress(closetopic[0], None)
378 ui.progress(closetopic[0], None)
379 else:
379 else:
380 ui.debug("copied %d files\n" % num)
380 ui.debug("copied %d files\n" % num)
381 if closetopic[0]:
381 if closetopic[0]:
382 ui.progress(closetopic[0], None)
382 ui.progress(closetopic[0], None)
383 return destlock
383 return destlock
384 except: # re-raises
384 except: # re-raises
385 release(destlock)
385 release(destlock)
386 raise
386 raise
387
387
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
388 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
389 rev=None, update=True, stream=False):
389 rev=None, update=True, stream=False):
390 """Perform a clone using a shared repo.
390 """Perform a clone using a shared repo.
391
391
392 The store for the repository will be located at <sharepath>/.hg. The
392 The store for the repository will be located at <sharepath>/.hg. The
393 specified revisions will be cloned or pulled from "source". A shared repo
393 specified revisions will be cloned or pulled from "source". A shared repo
394 will be created at "dest" and a working copy will be created if "update" is
394 will be created at "dest" and a working copy will be created if "update" is
395 True.
395 True.
396 """
396 """
397 revs = None
397 revs = None
398 if rev:
398 if rev:
399 if not srcpeer.capable('lookup'):
399 if not srcpeer.capable('lookup'):
400 raise error.Abort(_("src repository does not support "
400 raise error.Abort(_("src repository does not support "
401 "revision lookup and so doesn't "
401 "revision lookup and so doesn't "
402 "support clone by revision"))
402 "support clone by revision"))
403 revs = [srcpeer.lookup(r) for r in rev]
403 revs = [srcpeer.lookup(r) for r in rev]
404
404
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
405 # Obtain a lock before checking for or cloning the pooled repo otherwise
406 # 2 clients may race creating or populating it.
406 # 2 clients may race creating or populating it.
407 pooldir = os.path.dirname(sharepath)
407 pooldir = os.path.dirname(sharepath)
408 # lock class requires the directory to exist.
408 # lock class requires the directory to exist.
409 try:
409 try:
410 util.makedir(pooldir, False)
410 util.makedir(pooldir, False)
411 except OSError as e:
411 except OSError as e:
412 if e.errno != errno.EEXIST:
412 if e.errno != errno.EEXIST:
413 raise
413 raise
414
414
415 poolvfs = vfsmod.vfs(pooldir)
415 poolvfs = vfsmod.vfs(pooldir)
416 basename = os.path.basename(sharepath)
416 basename = os.path.basename(sharepath)
417
417
418 with lock.lock(poolvfs, '%s.lock' % basename):
418 with lock.lock(poolvfs, '%s.lock' % basename):
419 if os.path.exists(sharepath):
419 if os.path.exists(sharepath):
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
420 ui.status(_('(sharing from existing pooled repository %s)\n') %
421 basename)
421 basename)
422 else:
422 else:
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
423 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
424 # Always use pull mode because hardlinks in share mode don't work
424 # Always use pull mode because hardlinks in share mode don't work
425 # well. Never update because working copies aren't necessary in
425 # well. Never update because working copies aren't necessary in
426 # share mode.
426 # share mode.
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
427 clone(ui, peeropts, source, dest=sharepath, pull=True,
428 rev=rev, update=False, stream=stream)
428 rev=rev, update=False, stream=stream)
429
429
430 # Resolve the value to put in [paths] section for the source.
430 # Resolve the value to put in [paths] section for the source.
431 if islocal(source):
431 if islocal(source):
432 defaultpath = os.path.abspath(util.urllocalpath(source))
432 defaultpath = os.path.abspath(util.urllocalpath(source))
433 else:
433 else:
434 defaultpath = source
434 defaultpath = source
435
435
436 sharerepo = repository(ui, path=sharepath)
436 sharerepo = repository(ui, path=sharepath)
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
437 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
438 defaultpath=defaultpath)
438 defaultpath=defaultpath)
439
439
440 # We need to perform a pull against the dest repo to fetch bookmarks
440 # We need to perform a pull against the dest repo to fetch bookmarks
441 # and other non-store data that isn't shared by default. In the case of
441 # and other non-store data that isn't shared by default. In the case of
442 # non-existing shared repo, this means we pull from the remote twice. This
442 # non-existing shared repo, this means we pull from the remote twice. This
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
443 # is a bit weird. But at the time it was implemented, there wasn't an easy
444 # way to pull just non-changegroup data.
444 # way to pull just non-changegroup data.
445 destrepo = repository(ui, path=dest)
445 destrepo = repository(ui, path=dest)
446 exchange.pull(destrepo, srcpeer, heads=revs)
446 exchange.pull(destrepo, srcpeer, heads=revs)
447
447
448 _postshareupdate(destrepo, update)
448 _postshareupdate(destrepo, update)
449
449
450 return srcpeer, peer(ui, peeropts, dest)
450 return srcpeer, peer(ui, peeropts, dest)
451
451
452 # Recomputing branch cache might be slow on big repos,
452 # Recomputing branch cache might be slow on big repos,
453 # so just copy it
453 # so just copy it
454 def _copycache(srcrepo, dstcachedir, fname):
454 def _copycache(srcrepo, dstcachedir, fname):
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
455 """copy a cache from srcrepo to destcachedir (if it exists)"""
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
456 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
457 dstbranchcache = os.path.join(dstcachedir, fname)
458 if os.path.exists(srcbranchcache):
458 if os.path.exists(srcbranchcache):
459 if not os.path.exists(dstcachedir):
459 if not os.path.exists(dstcachedir):
460 os.mkdir(dstcachedir)
460 os.mkdir(dstcachedir)
461 util.copyfile(srcbranchcache, dstbranchcache)
461 util.copyfile(srcbranchcache, dstbranchcache)
462
462
463 def _cachetocopy(srcrepo):
463 def _cachetocopy(srcrepo):
464 """return the list of cache file valuable to copy during a clone"""
464 """return the list of cache file valuable to copy during a clone"""
465 # In local clones we're copying all nodes, not just served
465 # In local clones we're copying all nodes, not just served
466 # ones. Therefore copy all branch caches over.
466 # ones. Therefore copy all branch caches over.
467 cachefiles = ['branch2']
467 cachefiles = ['branch2']
468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
468 cachefiles += ['branch2-%s' % f for f in repoview.filtertable]
469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
469 cachefiles += ['rbc-names-v1', 'rbc-revs-v1']
470 cachefiles += ['tags2']
470 cachefiles += ['tags2']
471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
471 cachefiles += ['tags2-%s' % f for f in repoview.filtertable]
472 cachefiles += ['hgtagsfnodes1']
472 cachefiles += ['hgtagsfnodes1']
473 return cachefiles
473 return cachefiles
474
474
475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
475 def clone(ui, peeropts, source, dest=None, pull=False, rev=None,
476 update=True, stream=False, branch=None, shareopts=None):
476 update=True, stream=False, branch=None, shareopts=None):
477 """Make a copy of an existing repository.
477 """Make a copy of an existing repository.
478
478
479 Create a copy of an existing repository in a new directory. The
479 Create a copy of an existing repository in a new directory. The
480 source and destination are URLs, as passed to the repository
480 source and destination are URLs, as passed to the repository
481 function. Returns a pair of repository peers, the source and
481 function. Returns a pair of repository peers, the source and
482 newly created destination.
482 newly created destination.
483
483
484 The location of the source is added to the new repository's
484 The location of the source is added to the new repository's
485 .hg/hgrc file, as the default to be used for future pulls and
485 .hg/hgrc file, as the default to be used for future pulls and
486 pushes.
486 pushes.
487
487
488 If an exception is raised, the partly cloned/updated destination
488 If an exception is raised, the partly cloned/updated destination
489 repository will be deleted.
489 repository will be deleted.
490
490
491 Arguments:
491 Arguments:
492
492
493 source: repository object or URL
493 source: repository object or URL
494
494
495 dest: URL of destination repository to create (defaults to base
495 dest: URL of destination repository to create (defaults to base
496 name of source repository)
496 name of source repository)
497
497
498 pull: always pull from source repository, even in local case or if the
498 pull: always pull from source repository, even in local case or if the
499 server prefers streaming
499 server prefers streaming
500
500
501 stream: stream raw data uncompressed from repository (fast over
501 stream: stream raw data uncompressed from repository (fast over
502 LAN, slow over WAN)
502 LAN, slow over WAN)
503
503
504 rev: revision to clone up to (implies pull=True)
504 rev: revision to clone up to (implies pull=True)
505
505
506 update: update working directory after clone completes, if
506 update: update working directory after clone completes, if
507 destination is local repository (True means update to default rev,
507 destination is local repository (True means update to default rev,
508 anything else is treated as a revision)
508 anything else is treated as a revision)
509
509
510 branch: branches to clone
510 branch: branches to clone
511
511
512 shareopts: dict of options to control auto sharing behavior. The "pool" key
512 shareopts: dict of options to control auto sharing behavior. The "pool" key
513 activates auto sharing mode and defines the directory for stores. The
513 activates auto sharing mode and defines the directory for stores. The
514 "mode" key determines how to construct the directory name of the shared
514 "mode" key determines how to construct the directory name of the shared
515 repository. "identity" means the name is derived from the node of the first
515 repository. "identity" means the name is derived from the node of the first
516 changeset in the repository. "remote" means the name is derived from the
516 changeset in the repository. "remote" means the name is derived from the
517 remote's path/URL. Defaults to "identity."
517 remote's path/URL. Defaults to "identity."
518 """
518 """
519
519
520 if isinstance(source, bytes):
520 if isinstance(source, bytes):
521 origsource = ui.expandpath(source)
521 origsource = ui.expandpath(source)
522 source, branch = parseurl(origsource, branch)
522 source, branch = parseurl(origsource, branch)
523 srcpeer = peer(ui, peeropts, source)
523 srcpeer = peer(ui, peeropts, source)
524 else:
524 else:
525 srcpeer = source.peer() # in case we were called with a localrepo
525 srcpeer = source.peer() # in case we were called with a localrepo
526 branch = (None, branch or [])
526 branch = (None, branch or [])
527 origsource = source = srcpeer.url()
527 origsource = source = srcpeer.url()
528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
528 rev, checkout = addbranchrevs(srcpeer, srcpeer, branch, rev)
529
529
530 if dest is None:
530 if dest is None:
531 dest = defaultdest(source)
531 dest = defaultdest(source)
532 if dest:
532 if dest:
533 ui.status(_("destination directory: %s\n") % dest)
533 ui.status(_("destination directory: %s\n") % dest)
534 else:
534 else:
535 dest = ui.expandpath(dest)
535 dest = ui.expandpath(dest)
536
536
537 dest = util.urllocalpath(dest)
537 dest = util.urllocalpath(dest)
538 source = util.urllocalpath(source)
538 source = util.urllocalpath(source)
539
539
540 if not dest:
540 if not dest:
541 raise error.Abort(_("empty destination path is not valid"))
541 raise error.Abort(_("empty destination path is not valid"))
542
542
543 destvfs = vfsmod.vfs(dest, expandpath=True)
543 destvfs = vfsmod.vfs(dest, expandpath=True)
544 if destvfs.lexists():
544 if destvfs.lexists():
545 if not destvfs.isdir():
545 if not destvfs.isdir():
546 raise error.Abort(_("destination '%s' already exists") % dest)
546 raise error.Abort(_("destination '%s' already exists") % dest)
547 elif destvfs.listdir():
547 elif destvfs.listdir():
548 raise error.Abort(_("destination '%s' is not empty") % dest)
548 raise error.Abort(_("destination '%s' is not empty") % dest)
549
549
550 shareopts = shareopts or {}
550 shareopts = shareopts or {}
551 sharepool = shareopts.get('pool')
551 sharepool = shareopts.get('pool')
552 sharenamemode = shareopts.get('mode')
552 sharenamemode = shareopts.get('mode')
553 if sharepool and islocal(dest):
553 if sharepool and islocal(dest):
554 sharepath = None
554 sharepath = None
555 if sharenamemode == 'identity':
555 if sharenamemode == 'identity':
556 # Resolve the name from the initial changeset in the remote
556 # Resolve the name from the initial changeset in the remote
557 # repository. This returns nullid when the remote is empty. It
557 # repository. This returns nullid when the remote is empty. It
558 # raises RepoLookupError if revision 0 is filtered or otherwise
558 # raises RepoLookupError if revision 0 is filtered or otherwise
559 # not available. If we fail to resolve, sharing is not enabled.
559 # not available. If we fail to resolve, sharing is not enabled.
560 try:
560 try:
561 rootnode = srcpeer.lookup('0')
561 rootnode = srcpeer.lookup('0')
562 if rootnode != node.nullid:
562 if rootnode != node.nullid:
563 sharepath = os.path.join(sharepool, node.hex(rootnode))
563 sharepath = os.path.join(sharepool, node.hex(rootnode))
564 else:
564 else:
565 ui.status(_('(not using pooled storage: '
565 ui.status(_('(not using pooled storage: '
566 'remote appears to be empty)\n'))
566 'remote appears to be empty)\n'))
567 except error.RepoLookupError:
567 except error.RepoLookupError:
568 ui.status(_('(not using pooled storage: '
568 ui.status(_('(not using pooled storage: '
569 'unable to resolve identity of remote)\n'))
569 'unable to resolve identity of remote)\n'))
570 elif sharenamemode == 'remote':
570 elif sharenamemode == 'remote':
571 sharepath = os.path.join(
571 sharepath = os.path.join(
572 sharepool, hashlib.sha1(source).hexdigest())
572 sharepool, hashlib.sha1(source).hexdigest())
573 else:
573 else:
574 raise error.Abort(_('unknown share naming mode: %s') %
574 raise error.Abort(_('unknown share naming mode: %s') %
575 sharenamemode)
575 sharenamemode)
576
576
577 if sharepath:
577 if sharepath:
578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
578 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
579 dest, pull=pull, rev=rev, update=update,
579 dest, pull=pull, rev=rev, update=update,
580 stream=stream)
580 stream=stream)
581
581
582 srclock = destlock = cleandir = None
582 srclock = destlock = cleandir = None
583 srcrepo = srcpeer.local()
583 srcrepo = srcpeer.local()
584 try:
584 try:
585 abspath = origsource
585 abspath = origsource
586 if islocal(origsource):
586 if islocal(origsource):
587 abspath = os.path.abspath(util.urllocalpath(origsource))
587 abspath = os.path.abspath(util.urllocalpath(origsource))
588
588
589 if islocal(dest):
589 if islocal(dest):
590 cleandir = dest
590 cleandir = dest
591
591
592 copy = False
592 copy = False
593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
593 if (srcrepo and srcrepo.cancopy() and islocal(dest)
594 and not phases.hassecret(srcrepo)):
594 and not phases.hassecret(srcrepo)):
595 copy = not pull and not rev
595 copy = not pull and not rev
596
596
597 if copy:
597 if copy:
598 try:
598 try:
599 # we use a lock here because if we race with commit, we
599 # we use a lock here because if we race with commit, we
600 # can end up with extra data in the cloned revlogs that's
600 # can end up with extra data in the cloned revlogs that's
601 # not pointed to by changesets, thus causing verify to
601 # not pointed to by changesets, thus causing verify to
602 # fail
602 # fail
603 srclock = srcrepo.lock(wait=False)
603 srclock = srcrepo.lock(wait=False)
604 except error.LockError:
604 except error.LockError:
605 copy = False
605 copy = False
606
606
607 if copy:
607 if copy:
608 srcrepo.hook('preoutgoing', throw=True, source='clone')
608 srcrepo.hook('preoutgoing', throw=True, source='clone')
609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
609 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
610 if not os.path.exists(dest):
610 if not os.path.exists(dest):
611 os.mkdir(dest)
611 os.mkdir(dest)
612 else:
612 else:
613 # only clean up directories we create ourselves
613 # only clean up directories we create ourselves
614 cleandir = hgdir
614 cleandir = hgdir
615 try:
615 try:
616 destpath = hgdir
616 destpath = hgdir
617 util.makedir(destpath, notindexed=True)
617 util.makedir(destpath, notindexed=True)
618 except OSError as inst:
618 except OSError as inst:
619 if inst.errno == errno.EEXIST:
619 if inst.errno == errno.EEXIST:
620 cleandir = None
620 cleandir = None
621 raise error.Abort(_("destination '%s' already exists")
621 raise error.Abort(_("destination '%s' already exists")
622 % dest)
622 % dest)
623 raise
623 raise
624
624
625 destlock = copystore(ui, srcrepo, destpath)
625 destlock = copystore(ui, srcrepo, destpath)
626 # copy bookmarks over
626 # copy bookmarks over
627 srcbookmarks = srcrepo.vfs.join('bookmarks')
627 srcbookmarks = srcrepo.vfs.join('bookmarks')
628 dstbookmarks = os.path.join(destpath, 'bookmarks')
628 dstbookmarks = os.path.join(destpath, 'bookmarks')
629 if os.path.exists(srcbookmarks):
629 if os.path.exists(srcbookmarks):
630 util.copyfile(srcbookmarks, dstbookmarks)
630 util.copyfile(srcbookmarks, dstbookmarks)
631
631
632 dstcachedir = os.path.join(destpath, 'cache')
632 dstcachedir = os.path.join(destpath, 'cache')
633 for cache in _cachetocopy(srcrepo):
633 for cache in _cachetocopy(srcrepo):
634 _copycache(srcrepo, dstcachedir, cache)
634 _copycache(srcrepo, dstcachedir, cache)
635
635
636 # we need to re-init the repo after manually copying the data
636 # we need to re-init the repo after manually copying the data
637 # into it
637 # into it
638 destpeer = peer(srcrepo, peeropts, dest)
638 destpeer = peer(srcrepo, peeropts, dest)
639 srcrepo.hook('outgoing', source='clone',
639 srcrepo.hook('outgoing', source='clone',
640 node=node.hex(node.nullid))
640 node=node.hex(node.nullid))
641 else:
641 else:
642 try:
642 try:
643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
643 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
644 # only pass ui when no srcrepo
644 # only pass ui when no srcrepo
645 except OSError as inst:
645 except OSError as inst:
646 if inst.errno == errno.EEXIST:
646 if inst.errno == errno.EEXIST:
647 cleandir = None
647 cleandir = None
648 raise error.Abort(_("destination '%s' already exists")
648 raise error.Abort(_("destination '%s' already exists")
649 % dest)
649 % dest)
650 raise
650 raise
651
651
652 revs = None
652 revs = None
653 if rev:
653 if rev:
654 if not srcpeer.capable('lookup'):
654 if not srcpeer.capable('lookup'):
655 raise error.Abort(_("src repository does not support "
655 raise error.Abort(_("src repository does not support "
656 "revision lookup and so doesn't "
656 "revision lookup and so doesn't "
657 "support clone by revision"))
657 "support clone by revision"))
658 revs = [srcpeer.lookup(r) for r in rev]
658 revs = [srcpeer.lookup(r) for r in rev]
659 checkout = revs[0]
659 checkout = revs[0]
660 local = destpeer.local()
660 local = destpeer.local()
661 if local:
661 if local:
662 if not stream:
662 if not stream:
663 if pull:
663 if pull:
664 stream = False
664 stream = False
665 else:
665 else:
666 stream = None
666 stream = None
667 # internal config: ui.quietbookmarkmove
667 # internal config: ui.quietbookmarkmove
668 overrides = {('ui', 'quietbookmarkmove'): True}
668 overrides = {('ui', 'quietbookmarkmove'): True}
669 with local.ui.configoverride(overrides, 'clone'):
669 with local.ui.configoverride(overrides, 'clone'):
670 exchange.pull(local, srcpeer, revs,
670 exchange.pull(local, srcpeer, revs,
671 streamclonerequested=stream)
671 streamclonerequested=stream)
672 elif srcrepo:
672 elif srcrepo:
673 exchange.push(srcrepo, destpeer, revs=revs,
673 exchange.push(srcrepo, destpeer, revs=revs,
674 bookmarks=srcrepo._bookmarks.keys())
674 bookmarks=srcrepo._bookmarks.keys())
675 else:
675 else:
676 raise error.Abort(_("clone from remote to remote not supported")
676 raise error.Abort(_("clone from remote to remote not supported")
677 )
677 )
678
678
679 cleandir = None
679 cleandir = None
680
680
681 destrepo = destpeer.local()
681 destrepo = destpeer.local()
682 if destrepo:
682 if destrepo:
683 template = uimod.samplehgrcs['cloned']
683 template = uimod.samplehgrcs['cloned']
684 fp = destrepo.vfs("hgrc", "wb")
684 fp = destrepo.vfs("hgrc", "wb")
685 u = util.url(abspath)
685 u = util.url(abspath)
686 u.passwd = None
686 u.passwd = None
687 defaulturl = bytes(u)
687 defaulturl = bytes(u)
688 fp.write(util.tonativeeol(template % defaulturl))
688 fp.write(util.tonativeeol(template % defaulturl))
689 fp.close()
689 fp.close()
690
690
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
691 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
692
692
693 if ui.configbool('experimental', 'remotenames'):
693 if ui.configbool('experimental', 'remotenames'):
694 remotenames.pullremotenames(destrepo, srcpeer)
694 logexchange.pullremotenames(destrepo, srcpeer)
695
695
696 if update:
696 if update:
697 if update is not True:
697 if update is not True:
698 checkout = srcpeer.lookup(update)
698 checkout = srcpeer.lookup(update)
699 uprev = None
699 uprev = None
700 status = None
700 status = None
701 if checkout is not None:
701 if checkout is not None:
702 try:
702 try:
703 uprev = destrepo.lookup(checkout)
703 uprev = destrepo.lookup(checkout)
704 except error.RepoLookupError:
704 except error.RepoLookupError:
705 if update is not True:
705 if update is not True:
706 try:
706 try:
707 uprev = destrepo.lookup(update)
707 uprev = destrepo.lookup(update)
708 except error.RepoLookupError:
708 except error.RepoLookupError:
709 pass
709 pass
710 if uprev is None:
710 if uprev is None:
711 try:
711 try:
712 uprev = destrepo._bookmarks['@']
712 uprev = destrepo._bookmarks['@']
713 update = '@'
713 update = '@'
714 bn = destrepo[uprev].branch()
714 bn = destrepo[uprev].branch()
715 if bn == 'default':
715 if bn == 'default':
716 status = _("updating to bookmark @\n")
716 status = _("updating to bookmark @\n")
717 else:
717 else:
718 status = (_("updating to bookmark @ on branch %s\n")
718 status = (_("updating to bookmark @ on branch %s\n")
719 % bn)
719 % bn)
720 except KeyError:
720 except KeyError:
721 try:
721 try:
722 uprev = destrepo.branchtip('default')
722 uprev = destrepo.branchtip('default')
723 except error.RepoLookupError:
723 except error.RepoLookupError:
724 uprev = destrepo.lookup('tip')
724 uprev = destrepo.lookup('tip')
725 if not status:
725 if not status:
726 bn = destrepo[uprev].branch()
726 bn = destrepo[uprev].branch()
727 status = _("updating to branch %s\n") % bn
727 status = _("updating to branch %s\n") % bn
728 destrepo.ui.status(status)
728 destrepo.ui.status(status)
729 _update(destrepo, uprev)
729 _update(destrepo, uprev)
730 if update in destrepo._bookmarks:
730 if update in destrepo._bookmarks:
731 bookmarks.activate(destrepo, update)
731 bookmarks.activate(destrepo, update)
732 finally:
732 finally:
733 release(srclock, destlock)
733 release(srclock, destlock)
734 if cleandir is not None:
734 if cleandir is not None:
735 shutil.rmtree(cleandir, True)
735 shutil.rmtree(cleandir, True)
736 if srcpeer is not None:
736 if srcpeer is not None:
737 srcpeer.close()
737 srcpeer.close()
738 return srcpeer, destpeer
738 return srcpeer, destpeer
739
739
740 def _showstats(repo, stats, quietempty=False):
740 def _showstats(repo, stats, quietempty=False):
741 if quietempty and not any(stats):
741 if quietempty and not any(stats):
742 return
742 return
743 repo.ui.status(_("%d files updated, %d files merged, "
743 repo.ui.status(_("%d files updated, %d files merged, "
744 "%d files removed, %d files unresolved\n") % stats)
744 "%d files removed, %d files unresolved\n") % stats)
745
745
746 def updaterepo(repo, node, overwrite, updatecheck=None):
746 def updaterepo(repo, node, overwrite, updatecheck=None):
747 """Update the working directory to node.
747 """Update the working directory to node.
748
748
749 When overwrite is set, changes are clobbered, merged else
749 When overwrite is set, changes are clobbered, merged else
750
750
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
751 returns stats (see pydoc mercurial.merge.applyupdates)"""
752 return mergemod.update(repo, node, False, overwrite,
752 return mergemod.update(repo, node, False, overwrite,
753 labels=['working copy', 'destination'],
753 labels=['working copy', 'destination'],
754 updatecheck=updatecheck)
754 updatecheck=updatecheck)
755
755
756 def update(repo, node, quietempty=False, updatecheck=None):
756 def update(repo, node, quietempty=False, updatecheck=None):
757 """update the working directory to node"""
757 """update the working directory to node"""
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
758 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
759 _showstats(repo, stats, quietempty)
759 _showstats(repo, stats, quietempty)
760 if stats[3]:
760 if stats[3]:
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
761 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
762 return stats[3] > 0
762 return stats[3] > 0
763
763
764 # naming conflict in clone()
764 # naming conflict in clone()
765 _update = update
765 _update = update
766
766
767 def clean(repo, node, show_stats=True, quietempty=False):
767 def clean(repo, node, show_stats=True, quietempty=False):
768 """forcibly switch the working directory to node, clobbering changes"""
768 """forcibly switch the working directory to node, clobbering changes"""
769 stats = updaterepo(repo, node, True)
769 stats = updaterepo(repo, node, True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
770 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
771 if show_stats:
771 if show_stats:
772 _showstats(repo, stats, quietempty)
772 _showstats(repo, stats, quietempty)
773 return stats[3] > 0
773 return stats[3] > 0
774
774
775 # naming conflict in updatetotally()
775 # naming conflict in updatetotally()
776 _clean = clean
776 _clean = clean
777
777
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
778 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
779 """Update the working directory with extra care for non-file components
779 """Update the working directory with extra care for non-file components
780
780
781 This takes care of non-file components below:
781 This takes care of non-file components below:
782
782
783 :bookmark: might be advanced or (in)activated
783 :bookmark: might be advanced or (in)activated
784
784
785 This takes arguments below:
785 This takes arguments below:
786
786
787 :checkout: to which revision the working directory is updated
787 :checkout: to which revision the working directory is updated
788 :brev: a name, which might be a bookmark to be activated after updating
788 :brev: a name, which might be a bookmark to be activated after updating
789 :clean: whether changes in the working directory can be discarded
789 :clean: whether changes in the working directory can be discarded
790 :updatecheck: how to deal with a dirty working directory
790 :updatecheck: how to deal with a dirty working directory
791
791
792 Valid values for updatecheck are (None => linear):
792 Valid values for updatecheck are (None => linear):
793
793
794 * abort: abort if the working directory is dirty
794 * abort: abort if the working directory is dirty
795 * none: don't check (merge working directory changes into destination)
795 * none: don't check (merge working directory changes into destination)
796 * linear: check that update is linear before merging working directory
796 * linear: check that update is linear before merging working directory
797 changes into destination
797 changes into destination
798 * noconflict: check that the update does not result in file merges
798 * noconflict: check that the update does not result in file merges
799
799
800 This returns whether conflict is detected at updating or not.
800 This returns whether conflict is detected at updating or not.
801 """
801 """
802 if updatecheck is None:
802 if updatecheck is None:
803 updatecheck = ui.config('commands', 'update.check')
803 updatecheck = ui.config('commands', 'update.check')
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
804 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
805 # If not configured, or invalid value configured
805 # If not configured, or invalid value configured
806 updatecheck = 'linear'
806 updatecheck = 'linear'
807 with repo.wlock():
807 with repo.wlock():
808 movemarkfrom = None
808 movemarkfrom = None
809 warndest = False
809 warndest = False
810 if checkout is None:
810 if checkout is None:
811 updata = destutil.destupdate(repo, clean=clean)
811 updata = destutil.destupdate(repo, clean=clean)
812 checkout, movemarkfrom, brev = updata
812 checkout, movemarkfrom, brev = updata
813 warndest = True
813 warndest = True
814
814
815 if clean:
815 if clean:
816 ret = _clean(repo, checkout)
816 ret = _clean(repo, checkout)
817 else:
817 else:
818 if updatecheck == 'abort':
818 if updatecheck == 'abort':
819 cmdutil.bailifchanged(repo, merge=False)
819 cmdutil.bailifchanged(repo, merge=False)
820 updatecheck = 'none'
820 updatecheck = 'none'
821 ret = _update(repo, checkout, updatecheck=updatecheck)
821 ret = _update(repo, checkout, updatecheck=updatecheck)
822
822
823 if not ret and movemarkfrom:
823 if not ret and movemarkfrom:
824 if movemarkfrom == repo['.'].node():
824 if movemarkfrom == repo['.'].node():
825 pass # no-op update
825 pass # no-op update
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
826 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
827 b = ui.label(repo._activebookmark, 'bookmarks.active')
828 ui.status(_("updating bookmark %s\n") % b)
828 ui.status(_("updating bookmark %s\n") % b)
829 else:
829 else:
830 # this can happen with a non-linear update
830 # this can happen with a non-linear update
831 b = ui.label(repo._activebookmark, 'bookmarks')
831 b = ui.label(repo._activebookmark, 'bookmarks')
832 ui.status(_("(leaving bookmark %s)\n") % b)
832 ui.status(_("(leaving bookmark %s)\n") % b)
833 bookmarks.deactivate(repo)
833 bookmarks.deactivate(repo)
834 elif brev in repo._bookmarks:
834 elif brev in repo._bookmarks:
835 if brev != repo._activebookmark:
835 if brev != repo._activebookmark:
836 b = ui.label(brev, 'bookmarks.active')
836 b = ui.label(brev, 'bookmarks.active')
837 ui.status(_("(activating bookmark %s)\n") % b)
837 ui.status(_("(activating bookmark %s)\n") % b)
838 bookmarks.activate(repo, brev)
838 bookmarks.activate(repo, brev)
839 elif brev:
839 elif brev:
840 if repo._activebookmark:
840 if repo._activebookmark:
841 b = ui.label(repo._activebookmark, 'bookmarks')
841 b = ui.label(repo._activebookmark, 'bookmarks')
842 ui.status(_("(leaving bookmark %s)\n") % b)
842 ui.status(_("(leaving bookmark %s)\n") % b)
843 bookmarks.deactivate(repo)
843 bookmarks.deactivate(repo)
844
844
845 if warndest:
845 if warndest:
846 destutil.statusotherdests(ui, repo)
846 destutil.statusotherdests(ui, repo)
847
847
848 return ret
848 return ret
849
849
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
850 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None):
851 """Branch merge with node, resolving changes. Return true if any
851 """Branch merge with node, resolving changes. Return true if any
852 unresolved conflicts."""
852 unresolved conflicts."""
853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
853 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
854 labels=labels)
854 labels=labels)
855 _showstats(repo, stats)
855 _showstats(repo, stats)
856 if stats[3]:
856 if stats[3]:
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
857 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
858 "or 'hg update -C .' to abandon\n"))
858 "or 'hg update -C .' to abandon\n"))
859 elif remind:
859 elif remind:
860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
860 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
861 return stats[3] > 0
861 return stats[3] > 0
862
862
863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
863 def _incoming(displaychlist, subreporecurse, ui, repo, source,
864 opts, buffered=False):
864 opts, buffered=False):
865 """
865 """
866 Helper for incoming / gincoming.
866 Helper for incoming / gincoming.
867 displaychlist gets called with
867 displaychlist gets called with
868 (remoterepo, incomingchangesetlist, displayer) parameters,
868 (remoterepo, incomingchangesetlist, displayer) parameters,
869 and is supposed to contain only code that can't be unified.
869 and is supposed to contain only code that can't be unified.
870 """
870 """
871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
871 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
872 other = peer(repo, opts, source)
872 other = peer(repo, opts, source)
873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
873 ui.status(_('comparing with %s\n') % util.hidepassword(source))
874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
874 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
875
875
876 if revs:
876 if revs:
877 revs = [other.lookup(rev) for rev in revs]
877 revs = [other.lookup(rev) for rev in revs]
878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
878 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
879 revs, opts["bundle"], opts["force"])
879 revs, opts["bundle"], opts["force"])
880 try:
880 try:
881 if not chlist:
881 if not chlist:
882 ui.status(_("no changes found\n"))
882 ui.status(_("no changes found\n"))
883 return subreporecurse()
883 return subreporecurse()
884 ui.pager('incoming')
884 ui.pager('incoming')
885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
885 displayer = cmdutil.show_changeset(ui, other, opts, buffered)
886 displaychlist(other, chlist, displayer)
886 displaychlist(other, chlist, displayer)
887 displayer.close()
887 displayer.close()
888 finally:
888 finally:
889 cleanupfn()
889 cleanupfn()
890 subreporecurse()
890 subreporecurse()
891 return 0 # exit code is zero since we found incoming changes
891 return 0 # exit code is zero since we found incoming changes
892
892
893 def incoming(ui, repo, source, opts):
893 def incoming(ui, repo, source, opts):
894 def subreporecurse():
894 def subreporecurse():
895 ret = 1
895 ret = 1
896 if opts.get('subrepos'):
896 if opts.get('subrepos'):
897 ctx = repo[None]
897 ctx = repo[None]
898 for subpath in sorted(ctx.substate):
898 for subpath in sorted(ctx.substate):
899 sub = ctx.sub(subpath)
899 sub = ctx.sub(subpath)
900 ret = min(ret, sub.incoming(ui, source, opts))
900 ret = min(ret, sub.incoming(ui, source, opts))
901 return ret
901 return ret
902
902
903 def display(other, chlist, displayer):
903 def display(other, chlist, displayer):
904 limit = cmdutil.loglimit(opts)
904 limit = cmdutil.loglimit(opts)
905 if opts.get('newest_first'):
905 if opts.get('newest_first'):
906 chlist.reverse()
906 chlist.reverse()
907 count = 0
907 count = 0
908 for n in chlist:
908 for n in chlist:
909 if limit is not None and count >= limit:
909 if limit is not None and count >= limit:
910 break
910 break
911 parents = [p for p in other.changelog.parents(n) if p != nullid]
911 parents = [p for p in other.changelog.parents(n) if p != nullid]
912 if opts.get('no_merges') and len(parents) == 2:
912 if opts.get('no_merges') and len(parents) == 2:
913 continue
913 continue
914 count += 1
914 count += 1
915 displayer.show(other[n])
915 displayer.show(other[n])
916 return _incoming(display, subreporecurse, ui, repo, source, opts)
916 return _incoming(display, subreporecurse, ui, repo, source, opts)
917
917
918 def _outgoing(ui, repo, dest, opts):
918 def _outgoing(ui, repo, dest, opts):
919 dest = ui.expandpath(dest or 'default-push', dest or 'default')
919 dest = ui.expandpath(dest or 'default-push', dest or 'default')
920 dest, branches = parseurl(dest, opts.get('branch'))
920 dest, branches = parseurl(dest, opts.get('branch'))
921 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
921 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
922 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
922 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
923 if revs:
923 if revs:
924 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
924 revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
925
925
926 other = peer(repo, opts, dest)
926 other = peer(repo, opts, dest)
927 outgoing = discovery.findcommonoutgoing(repo, other, revs,
927 outgoing = discovery.findcommonoutgoing(repo, other, revs,
928 force=opts.get('force'))
928 force=opts.get('force'))
929 o = outgoing.missing
929 o = outgoing.missing
930 if not o:
930 if not o:
931 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
931 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
932 return o, other
932 return o, other
933
933
934 def outgoing(ui, repo, dest, opts):
934 def outgoing(ui, repo, dest, opts):
935 def recurse():
935 def recurse():
936 ret = 1
936 ret = 1
937 if opts.get('subrepos'):
937 if opts.get('subrepos'):
938 ctx = repo[None]
938 ctx = repo[None]
939 for subpath in sorted(ctx.substate):
939 for subpath in sorted(ctx.substate):
940 sub = ctx.sub(subpath)
940 sub = ctx.sub(subpath)
941 ret = min(ret, sub.outgoing(ui, dest, opts))
941 ret = min(ret, sub.outgoing(ui, dest, opts))
942 return ret
942 return ret
943
943
944 limit = cmdutil.loglimit(opts)
944 limit = cmdutil.loglimit(opts)
945 o, other = _outgoing(ui, repo, dest, opts)
945 o, other = _outgoing(ui, repo, dest, opts)
946 if not o:
946 if not o:
947 cmdutil.outgoinghooks(ui, repo, other, opts, o)
947 cmdutil.outgoinghooks(ui, repo, other, opts, o)
948 return recurse()
948 return recurse()
949
949
950 if opts.get('newest_first'):
950 if opts.get('newest_first'):
951 o.reverse()
951 o.reverse()
952 ui.pager('outgoing')
952 ui.pager('outgoing')
953 displayer = cmdutil.show_changeset(ui, repo, opts)
953 displayer = cmdutil.show_changeset(ui, repo, opts)
954 count = 0
954 count = 0
955 for n in o:
955 for n in o:
956 if limit is not None and count >= limit:
956 if limit is not None and count >= limit:
957 break
957 break
958 parents = [p for p in repo.changelog.parents(n) if p != nullid]
958 parents = [p for p in repo.changelog.parents(n) if p != nullid]
959 if opts.get('no_merges') and len(parents) == 2:
959 if opts.get('no_merges') and len(parents) == 2:
960 continue
960 continue
961 count += 1
961 count += 1
962 displayer.show(repo[n])
962 displayer.show(repo[n])
963 displayer.close()
963 displayer.close()
964 cmdutil.outgoinghooks(ui, repo, other, opts, o)
964 cmdutil.outgoinghooks(ui, repo, other, opts, o)
965 recurse()
965 recurse()
966 return 0 # exit code is zero since we found outgoing changes
966 return 0 # exit code is zero since we found outgoing changes
967
967
968 def verify(repo):
968 def verify(repo):
969 """verify the consistency of a repository"""
969 """verify the consistency of a repository"""
970 ret = verifymod.verify(repo)
970 ret = verifymod.verify(repo)
971
971
972 # Broken subrepo references in hidden csets don't seem worth worrying about,
972 # Broken subrepo references in hidden csets don't seem worth worrying about,
973 # since they can't be pushed/pulled, and --hidden can be used if they are a
973 # since they can't be pushed/pulled, and --hidden can be used if they are a
974 # concern.
974 # concern.
975
975
976 # pathto() is needed for -R case
976 # pathto() is needed for -R case
977 revs = repo.revs("filelog(%s)",
977 revs = repo.revs("filelog(%s)",
978 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
978 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
979
979
980 if revs:
980 if revs:
981 repo.ui.status(_('checking subrepo links\n'))
981 repo.ui.status(_('checking subrepo links\n'))
982 for rev in revs:
982 for rev in revs:
983 ctx = repo[rev]
983 ctx = repo[rev]
984 try:
984 try:
985 for subpath in ctx.substate:
985 for subpath in ctx.substate:
986 try:
986 try:
987 ret = (ctx.sub(subpath, allowcreate=False).verify()
987 ret = (ctx.sub(subpath, allowcreate=False).verify()
988 or ret)
988 or ret)
989 except error.RepoError as e:
989 except error.RepoError as e:
990 repo.ui.warn(('%s: %s\n') % (rev, e))
990 repo.ui.warn(('%s: %s\n') % (rev, e))
991 except Exception:
991 except Exception:
992 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
992 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
993 node.short(ctx.node()))
993 node.short(ctx.node()))
994
994
995 return ret
995 return ret
996
996
997 def remoteui(src, opts):
997 def remoteui(src, opts):
998 'build a remote ui from ui or repo and opts'
998 'build a remote ui from ui or repo and opts'
999 if util.safehasattr(src, 'baseui'): # looks like a repository
999 if util.safehasattr(src, 'baseui'): # looks like a repository
1000 dst = src.baseui.copy() # drop repo-specific config
1000 dst = src.baseui.copy() # drop repo-specific config
1001 src = src.ui # copy target options from repo
1001 src = src.ui # copy target options from repo
1002 else: # assume it's a global ui object
1002 else: # assume it's a global ui object
1003 dst = src.copy() # keep all global options
1003 dst = src.copy() # keep all global options
1004
1004
1005 # copy ssh-specific options
1005 # copy ssh-specific options
1006 for o in 'ssh', 'remotecmd':
1006 for o in 'ssh', 'remotecmd':
1007 v = opts.get(o) or src.config('ui', o)
1007 v = opts.get(o) or src.config('ui', o)
1008 if v:
1008 if v:
1009 dst.setconfig("ui", o, v, 'copied')
1009 dst.setconfig("ui", o, v, 'copied')
1010
1010
1011 # copy bundle-specific options
1011 # copy bundle-specific options
1012 r = src.config('bundle', 'mainreporoot')
1012 r = src.config('bundle', 'mainreporoot')
1013 if r:
1013 if r:
1014 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1014 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1015
1015
1016 # copy selected local settings to the remote ui
1016 # copy selected local settings to the remote ui
1017 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1017 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1018 for key, val in src.configitems(sect):
1018 for key, val in src.configitems(sect):
1019 dst.setconfig(sect, key, val, 'copied')
1019 dst.setconfig(sect, key, val, 'copied')
1020 v = src.config('web', 'cacerts')
1020 v = src.config('web', 'cacerts')
1021 if v:
1021 if v:
1022 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1022 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1023
1023
1024 return dst
1024 return dst
1025
1025
1026 # Files of interest
1026 # Files of interest
1027 # Used to check if the repository has changed looking at mtime and size of
1027 # Used to check if the repository has changed looking at mtime and size of
1028 # these files.
1028 # these files.
1029 foi = [('spath', '00changelog.i'),
1029 foi = [('spath', '00changelog.i'),
1030 ('spath', 'phaseroots'), # ! phase can change content at the same size
1030 ('spath', 'phaseroots'), # ! phase can change content at the same size
1031 ('spath', 'obsstore'),
1031 ('spath', 'obsstore'),
1032 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1032 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1033 ]
1033 ]
1034
1034
1035 class cachedlocalrepo(object):
1035 class cachedlocalrepo(object):
1036 """Holds a localrepository that can be cached and reused."""
1036 """Holds a localrepository that can be cached and reused."""
1037
1037
1038 def __init__(self, repo):
1038 def __init__(self, repo):
1039 """Create a new cached repo from an existing repo.
1039 """Create a new cached repo from an existing repo.
1040
1040
1041 We assume the passed in repo was recently created. If the
1041 We assume the passed in repo was recently created. If the
1042 repo has changed between when it was created and when it was
1042 repo has changed between when it was created and when it was
1043 turned into a cache, it may not refresh properly.
1043 turned into a cache, it may not refresh properly.
1044 """
1044 """
1045 assert isinstance(repo, localrepo.localrepository)
1045 assert isinstance(repo, localrepo.localrepository)
1046 self._repo = repo
1046 self._repo = repo
1047 self._state, self.mtime = self._repostate()
1047 self._state, self.mtime = self._repostate()
1048 self._filtername = repo.filtername
1048 self._filtername = repo.filtername
1049
1049
1050 def fetch(self):
1050 def fetch(self):
1051 """Refresh (if necessary) and return a repository.
1051 """Refresh (if necessary) and return a repository.
1052
1052
1053 If the cached instance is out of date, it will be recreated
1053 If the cached instance is out of date, it will be recreated
1054 automatically and returned.
1054 automatically and returned.
1055
1055
1056 Returns a tuple of the repo and a boolean indicating whether a new
1056 Returns a tuple of the repo and a boolean indicating whether a new
1057 repo instance was created.
1057 repo instance was created.
1058 """
1058 """
1059 # We compare the mtimes and sizes of some well-known files to
1059 # We compare the mtimes and sizes of some well-known files to
1060 # determine if the repo changed. This is not precise, as mtimes
1060 # determine if the repo changed. This is not precise, as mtimes
1061 # are susceptible to clock skew and imprecise filesystems and
1061 # are susceptible to clock skew and imprecise filesystems and
1062 # file content can change while maintaining the same size.
1062 # file content can change while maintaining the same size.
1063
1063
1064 state, mtime = self._repostate()
1064 state, mtime = self._repostate()
1065 if state == self._state:
1065 if state == self._state:
1066 return self._repo, False
1066 return self._repo, False
1067
1067
1068 repo = repository(self._repo.baseui, self._repo.url())
1068 repo = repository(self._repo.baseui, self._repo.url())
1069 if self._filtername:
1069 if self._filtername:
1070 self._repo = repo.filtered(self._filtername)
1070 self._repo = repo.filtered(self._filtername)
1071 else:
1071 else:
1072 self._repo = repo.unfiltered()
1072 self._repo = repo.unfiltered()
1073 self._state = state
1073 self._state = state
1074 self.mtime = mtime
1074 self.mtime = mtime
1075
1075
1076 return self._repo, True
1076 return self._repo, True
1077
1077
1078 def _repostate(self):
1078 def _repostate(self):
1079 state = []
1079 state = []
1080 maxmtime = -1
1080 maxmtime = -1
1081 for attr, fname in foi:
1081 for attr, fname in foi:
1082 prefix = getattr(self._repo, attr)
1082 prefix = getattr(self._repo, attr)
1083 p = os.path.join(prefix, fname)
1083 p = os.path.join(prefix, fname)
1084 try:
1084 try:
1085 st = os.stat(p)
1085 st = os.stat(p)
1086 except OSError:
1086 except OSError:
1087 st = os.stat(prefix)
1087 st = os.stat(prefix)
1088 state.append((st.st_mtime, st.st_size))
1088 state.append((st.st_mtime, st.st_size))
1089 maxmtime = max(maxmtime, st.st_mtime)
1089 maxmtime = max(maxmtime, st.st_mtime)
1090
1090
1091 return tuple(state), maxmtime
1091 return tuple(state), maxmtime
1092
1092
1093 def copy(self):
1093 def copy(self):
1094 """Obtain a copy of this class instance.
1094 """Obtain a copy of this class instance.
1095
1095
1096 A new localrepository instance is obtained. The new instance should be
1096 A new localrepository instance is obtained. The new instance should be
1097 completely independent of the original.
1097 completely independent of the original.
1098 """
1098 """
1099 repo = repository(self._repo.baseui, self._repo.origroot)
1099 repo = repository(self._repo.baseui, self._repo.origroot)
1100 if self._filtername:
1100 if self._filtername:
1101 repo = repo.filtered(self._filtername)
1101 repo = repo.filtered(self._filtername)
1102 else:
1102 else:
1103 repo = repo.unfiltered()
1103 repo = repo.unfiltered()
1104 c = cachedlocalrepo(repo)
1104 c = cachedlocalrepo(repo)
1105 c._state = self._state
1105 c._state = self._state
1106 c.mtime = self.mtime
1106 c.mtime = self.mtime
1107 return c
1107 return c
@@ -1,118 +1,118
1 # remotenames.py
1 # logexchange.py
2 #
2 #
3 # Copyright 2017 Augie Fackler <raf@durin42.com>
3 # Copyright 2017 Augie Fackler <raf@durin42.com>
4 # Copyright 2017 Sean Farley <sean@farley.io>
4 # Copyright 2017 Sean Farley <sean@farley.io>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from .node import hex
11 from .node import hex
12
12
13 from . import (
13 from . import (
14 vfs as vfsmod,
14 vfs as vfsmod,
15 )
15 )
16
16
17 # directory name in .hg/ in which remotenames files will be present
17 # directory name in .hg/ in which remotenames files will be present
18 remotenamedir = 'remotenames'
18 remotenamedir = 'logexchange'
19
19
20 def readremotenamefile(repo, filename):
20 def readremotenamefile(repo, filename):
21 """
21 """
22 reads a file from .hg/remotenames/ directory and yields it's content
22 reads a file from .hg/logexchange/ directory and yields it's content
23 filename: the file to be read
23 filename: the file to be read
24 yield a tuple (node, remotepath, name)
24 yield a tuple (node, remotepath, name)
25 """
25 """
26
26
27 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
27 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
28 if not vfs.exists(filename):
28 if not vfs.exists(filename):
29 return
29 return
30 f = vfs(filename)
30 f = vfs(filename)
31 lineno = 0
31 lineno = 0
32 for line in f:
32 for line in f:
33 line = line.strip()
33 line = line.strip()
34 if not line:
34 if not line:
35 continue
35 continue
36 # contains the version number
36 # contains the version number
37 if lineno == 0:
37 if lineno == 0:
38 lineno += 1
38 lineno += 1
39 try:
39 try:
40 node, remote, rname = line.split('\0')
40 node, remote, rname = line.split('\0')
41 yield node, remote, rname
41 yield node, remote, rname
42 except ValueError:
42 except ValueError:
43 pass
43 pass
44
44
45 f.close()
45 f.close()
46
46
47 def readremotenames(repo):
47 def readremotenames(repo):
48 """
48 """
49 read the details about the remotenames stored in .hg/remotenames/ and
49 read the details about the remotenames stored in .hg/logexchange/ and
50 yields a tuple (node, remotepath, name). It does not yields information
50 yields a tuple (node, remotepath, name). It does not yields information
51 about whether an entry yielded is branch or bookmark. To get that
51 about whether an entry yielded is branch or bookmark. To get that
52 information, call the respective functions.
52 information, call the respective functions.
53 """
53 """
54
54
55 for bmentry in readremotenamefile(repo, 'bookmarks'):
55 for bmentry in readremotenamefile(repo, 'bookmarks'):
56 yield bmentry
56 yield bmentry
57 for branchentry in readremotenamefile(repo, 'branches'):
57 for branchentry in readremotenamefile(repo, 'branches'):
58 yield branchentry
58 yield branchentry
59
59
60 def writeremotenamefile(repo, remotepath, names, nametype):
60 def writeremotenamefile(repo, remotepath, names, nametype):
61 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
61 vfs = vfsmod.vfs(repo.vfs.join(remotenamedir))
62 f = vfs(nametype, 'w', atomictemp=True)
62 f = vfs(nametype, 'w', atomictemp=True)
63 # write the storage version info on top of file
63 # write the storage version info on top of file
64 # version '0' represents the very initial version of the storage format
64 # version '0' represents the very initial version of the storage format
65 f.write('0\n\n')
65 f.write('0\n\n')
66
66
67 olddata = set(readremotenamefile(repo, nametype))
67 olddata = set(readremotenamefile(repo, nametype))
68 # re-save the data from a different remote than this one.
68 # re-save the data from a different remote than this one.
69 for node, oldpath, rname in sorted(olddata):
69 for node, oldpath, rname in sorted(olddata):
70 if oldpath != remotepath:
70 if oldpath != remotepath:
71 f.write('%s\0%s\0%s\n' % (node, oldpath, rname))
71 f.write('%s\0%s\0%s\n' % (node, oldpath, rname))
72
72
73 for name, node in sorted(names.iteritems()):
73 for name, node in sorted(names.iteritems()):
74 if nametype == "branches":
74 if nametype == "branches":
75 for n in node:
75 for n in node:
76 f.write('%s\0%s\0%s\n' % (n, remotepath, name))
76 f.write('%s\0%s\0%s\n' % (n, remotepath, name))
77 elif nametype == "bookmarks":
77 elif nametype == "bookmarks":
78 if node:
78 if node:
79 f.write('%s\0%s\0%s\n' % (node, remotepath, name))
79 f.write('%s\0%s\0%s\n' % (node, remotepath, name))
80
80
81 f.close()
81 f.close()
82
82
83 def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
83 def saveremotenames(repo, remotepath, branches=None, bookmarks=None):
84 """
84 """
85 save remotenames i.e. remotebookmarks and remotebranches in their
85 save remotenames i.e. remotebookmarks and remotebranches in their
86 respective files under ".hg/remotenames/" directory.
86 respective files under ".hg/logexchange/" directory.
87 """
87 """
88 wlock = repo.wlock()
88 wlock = repo.wlock()
89 try:
89 try:
90 if bookmarks:
90 if bookmarks:
91 writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
91 writeremotenamefile(repo, remotepath, bookmarks, 'bookmarks')
92 if branches:
92 if branches:
93 writeremotenamefile(repo, remotepath, branches, 'branches')
93 writeremotenamefile(repo, remotepath, branches, 'branches')
94 finally:
94 finally:
95 wlock.release()
95 wlock.release()
96
96
97 def pullremotenames(localrepo, remoterepo):
97 def pullremotenames(localrepo, remoterepo):
98 """
98 """
99 pulls bookmarks and branches information of the remote repo during a
99 pulls bookmarks and branches information of the remote repo during a
100 pull or clone operation.
100 pull or clone operation.
101 localrepo is our local repository
101 localrepo is our local repository
102 remoterepo is the peer instance
102 remoterepo is the peer instance
103 """
103 """
104 remotepath = remoterepo.url()
104 remotepath = remoterepo.url()
105 bookmarks = remoterepo.listkeys('bookmarks')
105 bookmarks = remoterepo.listkeys('bookmarks')
106 # on a push, we don't want to keep obsolete heads since
106 # on a push, we don't want to keep obsolete heads since
107 # they won't show up as heads on the next pull, so we
107 # they won't show up as heads on the next pull, so we
108 # remove them here otherwise we would require the user
108 # remove them here otherwise we would require the user
109 # to issue a pull to refresh the storage
109 # to issue a pull to refresh the storage
110 bmap = {}
110 bmap = {}
111 repo = localrepo.unfiltered()
111 repo = localrepo.unfiltered()
112 for branch, nodes in remoterepo.branchmap().iteritems():
112 for branch, nodes in remoterepo.branchmap().iteritems():
113 bmap[branch] = []
113 bmap[branch] = []
114 for node in nodes:
114 for node in nodes:
115 if node in repo and not repo[node].obsolete():
115 if node in repo and not repo[node].obsolete():
116 bmap[branch].append(hex(node))
116 bmap[branch].append(hex(node))
117
117
118 saveremotenames(localrepo, remotepath, bmap, bookmarks)
118 saveremotenames(localrepo, remotepath, bmap, bookmarks)
@@ -1,108 +1,108
1 Testing the functionality to pull remotenames
1 Testing the functionality to pull remotenames
2 =============================================
2 =============================================
3
3
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [alias]
5 > [alias]
6 > glog = log -G -T '{rev}:{node|short} {desc}'
6 > glog = log -G -T '{rev}:{node|short} {desc}'
7 > [experimental]
7 > [experimental]
8 > remotenames = True
8 > remotenames = True
9 > EOF
9 > EOF
10
10
11 Making a server repo
11 Making a server repo
12 --------------------
12 --------------------
13
13
14 $ hg init server
14 $ hg init server
15 $ cd server
15 $ cd server
16 $ for ch in a b c d e f g h; do
16 $ for ch in a b c d e f g h; do
17 > echo "foo" >> $ch
17 > echo "foo" >> $ch
18 > hg ci -Aqm "Added "$ch
18 > hg ci -Aqm "Added "$ch
19 > done
19 > done
20 $ hg glog
20 $ hg glog
21 @ 7:ec2426147f0e Added h
21 @ 7:ec2426147f0e Added h
22 |
22 |
23 o 6:87d6d6676308 Added g
23 o 6:87d6d6676308 Added g
24 |
24 |
25 o 5:825660c69f0c Added f
25 o 5:825660c69f0c Added f
26 |
26 |
27 o 4:aa98ab95a928 Added e
27 o 4:aa98ab95a928 Added e
28 |
28 |
29 o 3:62615734edd5 Added d
29 o 3:62615734edd5 Added d
30 |
30 |
31 o 2:28ad74487de9 Added c
31 o 2:28ad74487de9 Added c
32 |
32 |
33 o 1:29becc82797a Added b
33 o 1:29becc82797a Added b
34 |
34 |
35 o 0:18d04c59bb5d Added a
35 o 0:18d04c59bb5d Added a
36
36
37 $ hg bookmark -r 3 foo
37 $ hg bookmark -r 3 foo
38 $ hg bookmark -r 6 bar
38 $ hg bookmark -r 6 bar
39 $ hg up 4
39 $ hg up 4
40 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
40 0 files updated, 0 files merged, 3 files removed, 0 files unresolved
41 $ hg branch wat
41 $ hg branch wat
42 marked working directory as branch wat
42 marked working directory as branch wat
43 (branches are permanent and global, did you want a bookmark?)
43 (branches are permanent and global, did you want a bookmark?)
44 $ echo foo >> bar
44 $ echo foo >> bar
45 $ hg ci -Aqm "added bar"
45 $ hg ci -Aqm "added bar"
46
46
47 Making a client repo
47 Making a client repo
48 --------------------
48 --------------------
49
49
50 $ cd ..
50 $ cd ..
51
51
52 $ hg clone server client
52 $ hg clone server client
53 updating to branch default
53 updating to branch default
54 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
54 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
55
55
56 $ cd client
56 $ cd client
57 $ cat .hg/remotenames/bookmarks
57 $ cat .hg/logexchange/bookmarks
58 0
58 0
59
59
60 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
60 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
61 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
61 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
62
62
63 $ cat .hg/remotenames/branches
63 $ cat .hg/logexchange/branches
64 0
64 0
65
65
66 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
66 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
67 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
67 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
68
68
69 Making a new server
69 Making a new server
70 -------------------
70 -------------------
71
71
72 $ cd ..
72 $ cd ..
73 $ hg init server2
73 $ hg init server2
74 $ cd server2
74 $ cd server2
75 $ hg pull ../server/
75 $ hg pull ../server/
76 pulling from ../server/
76 pulling from ../server/
77 requesting all changes
77 requesting all changes
78 adding changesets
78 adding changesets
79 adding manifests
79 adding manifests
80 adding file changes
80 adding file changes
81 added 9 changesets with 9 changes to 9 files (+1 heads)
81 added 9 changesets with 9 changes to 9 files (+1 heads)
82 adding remote bookmark bar
82 adding remote bookmark bar
83 adding remote bookmark foo
83 adding remote bookmark foo
84 new changesets 18d04c59bb5d:3e1487808078
84 new changesets 18d04c59bb5d:3e1487808078
85 (run 'hg heads' to see heads)
85 (run 'hg heads' to see heads)
86
86
87 Pulling form the new server
87 Pulling form the new server
88 ---------------------------
88 ---------------------------
89 $ cd ../client/
89 $ cd ../client/
90 $ hg pull ../server2/
90 $ hg pull ../server2/
91 pulling from ../server2/
91 pulling from ../server2/
92 searching for changes
92 searching for changes
93 no changes found
93 no changes found
94 $ cat .hg/remotenames/bookmarks
94 $ cat .hg/logexchange/bookmarks
95 0
95 0
96
96
97 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
97 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server\x00foo (esc)
98 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
98 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server\x00bar (esc)
99 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
99 87d6d66763085b629e6d7ed56778c79827273022\x00file:$TESTTMP/server2\x00bar (esc)
100 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
100 62615734edd52f06b6fb9c2beb429e4fe30d57b8\x00file:$TESTTMP/server2\x00foo (esc)
101
101
102 $ cat .hg/remotenames/branches
102 $ cat .hg/logexchange/branches
103 0
103 0
104
104
105 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
105 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server\x00wat (esc)
106 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
106 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server\x00default (esc)
107 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
107 ec2426147f0e39dbc9cef599b066be6035ce691d\x00file:$TESTTMP/server2\x00default (esc)
108 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
108 3e1487808078543b0af6d10dadf5d46943578db0\x00file:$TESTTMP/server2\x00wat (esc)
General Comments 0
You need to be logged in to leave comments. Login now