##// END OF EJS Templates
exchange: set 'treemanifest' param on pushed changegroups too...
Martin von Zweigbergk -
r27938:cabac7df stable
parent child Browse files
Show More
@@ -1,1933 +1,1935 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import urllib
11 import urllib
12 import urllib2
12 import urllib2
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullid,
17 nullid,
18 )
18 )
19 from . import (
19 from . import (
20 base85,
20 base85,
21 bookmarks as bookmod,
21 bookmarks as bookmod,
22 bundle2,
22 bundle2,
23 changegroup,
23 changegroup,
24 discovery,
24 discovery,
25 error,
25 error,
26 lock as lockmod,
26 lock as lockmod,
27 obsolete,
27 obsolete,
28 phases,
28 phases,
29 pushkey,
29 pushkey,
30 scmutil,
30 scmutil,
31 sslutil,
31 sslutil,
32 streamclone,
32 streamclone,
33 tags,
33 tags,
34 url as urlmod,
34 url as urlmod,
35 util,
35 util,
36 )
36 )
37
37
38 # Maps bundle compression human names to internal representation.
38 # Maps bundle compression human names to internal representation.
39 _bundlespeccompressions = {'none': None,
39 _bundlespeccompressions = {'none': None,
40 'bzip2': 'BZ',
40 'bzip2': 'BZ',
41 'gzip': 'GZ',
41 'gzip': 'GZ',
42 }
42 }
43
43
44 # Maps bundle version human names to changegroup versions.
44 # Maps bundle version human names to changegroup versions.
45 _bundlespeccgversions = {'v1': '01',
45 _bundlespeccgversions = {'v1': '01',
46 'v2': '02',
46 'v2': '02',
47 'packed1': 's1',
47 'packed1': 's1',
48 'bundle2': '02', #legacy
48 'bundle2': '02', #legacy
49 }
49 }
50
50
51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
51 def parsebundlespec(repo, spec, strict=True, externalnames=False):
52 """Parse a bundle string specification into parts.
52 """Parse a bundle string specification into parts.
53
53
54 Bundle specifications denote a well-defined bundle/exchange format.
54 Bundle specifications denote a well-defined bundle/exchange format.
55 The content of a given specification should not change over time in
55 The content of a given specification should not change over time in
56 order to ensure that bundles produced by a newer version of Mercurial are
56 order to ensure that bundles produced by a newer version of Mercurial are
57 readable from an older version.
57 readable from an older version.
58
58
59 The string currently has the form:
59 The string currently has the form:
60
60
61 <compression>-<type>[;<parameter0>[;<parameter1>]]
61 <compression>-<type>[;<parameter0>[;<parameter1>]]
62
62
63 Where <compression> is one of the supported compression formats
63 Where <compression> is one of the supported compression formats
64 and <type> is (currently) a version string. A ";" can follow the type and
64 and <type> is (currently) a version string. A ";" can follow the type and
65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
65 all text afterwards is interpretted as URI encoded, ";" delimited key=value
66 pairs.
66 pairs.
67
67
68 If ``strict`` is True (the default) <compression> is required. Otherwise,
68 If ``strict`` is True (the default) <compression> is required. Otherwise,
69 it is optional.
69 it is optional.
70
70
71 If ``externalnames`` is False (the default), the human-centric names will
71 If ``externalnames`` is False (the default), the human-centric names will
72 be converted to their internal representation.
72 be converted to their internal representation.
73
73
74 Returns a 3-tuple of (compression, version, parameters). Compression will
74 Returns a 3-tuple of (compression, version, parameters). Compression will
75 be ``None`` if not in strict mode and a compression isn't defined.
75 be ``None`` if not in strict mode and a compression isn't defined.
76
76
77 An ``InvalidBundleSpecification`` is raised when the specification is
77 An ``InvalidBundleSpecification`` is raised when the specification is
78 not syntactically well formed.
78 not syntactically well formed.
79
79
80 An ``UnsupportedBundleSpecification`` is raised when the compression or
80 An ``UnsupportedBundleSpecification`` is raised when the compression or
81 bundle type/version is not recognized.
81 bundle type/version is not recognized.
82
82
83 Note: this function will likely eventually return a more complex data
83 Note: this function will likely eventually return a more complex data
84 structure, including bundle2 part information.
84 structure, including bundle2 part information.
85 """
85 """
86 def parseparams(s):
86 def parseparams(s):
87 if ';' not in s:
87 if ';' not in s:
88 return s, {}
88 return s, {}
89
89
90 params = {}
90 params = {}
91 version, paramstr = s.split(';', 1)
91 version, paramstr = s.split(';', 1)
92
92
93 for p in paramstr.split(';'):
93 for p in paramstr.split(';'):
94 if '=' not in p:
94 if '=' not in p:
95 raise error.InvalidBundleSpecification(
95 raise error.InvalidBundleSpecification(
96 _('invalid bundle specification: '
96 _('invalid bundle specification: '
97 'missing "=" in parameter: %s') % p)
97 'missing "=" in parameter: %s') % p)
98
98
99 key, value = p.split('=', 1)
99 key, value = p.split('=', 1)
100 key = urllib.unquote(key)
100 key = urllib.unquote(key)
101 value = urllib.unquote(value)
101 value = urllib.unquote(value)
102 params[key] = value
102 params[key] = value
103
103
104 return version, params
104 return version, params
105
105
106
106
107 if strict and '-' not in spec:
107 if strict and '-' not in spec:
108 raise error.InvalidBundleSpecification(
108 raise error.InvalidBundleSpecification(
109 _('invalid bundle specification; '
109 _('invalid bundle specification; '
110 'must be prefixed with compression: %s') % spec)
110 'must be prefixed with compression: %s') % spec)
111
111
112 if '-' in spec:
112 if '-' in spec:
113 compression, version = spec.split('-', 1)
113 compression, version = spec.split('-', 1)
114
114
115 if compression not in _bundlespeccompressions:
115 if compression not in _bundlespeccompressions:
116 raise error.UnsupportedBundleSpecification(
116 raise error.UnsupportedBundleSpecification(
117 _('%s compression is not supported') % compression)
117 _('%s compression is not supported') % compression)
118
118
119 version, params = parseparams(version)
119 version, params = parseparams(version)
120
120
121 if version not in _bundlespeccgversions:
121 if version not in _bundlespeccgversions:
122 raise error.UnsupportedBundleSpecification(
122 raise error.UnsupportedBundleSpecification(
123 _('%s is not a recognized bundle version') % version)
123 _('%s is not a recognized bundle version') % version)
124 else:
124 else:
125 # Value could be just the compression or just the version, in which
125 # Value could be just the compression or just the version, in which
126 # case some defaults are assumed (but only when not in strict mode).
126 # case some defaults are assumed (but only when not in strict mode).
127 assert not strict
127 assert not strict
128
128
129 spec, params = parseparams(spec)
129 spec, params = parseparams(spec)
130
130
131 if spec in _bundlespeccompressions:
131 if spec in _bundlespeccompressions:
132 compression = spec
132 compression = spec
133 version = 'v1'
133 version = 'v1'
134 if 'generaldelta' in repo.requirements:
134 if 'generaldelta' in repo.requirements:
135 version = 'v2'
135 version = 'v2'
136 elif spec in _bundlespeccgversions:
136 elif spec in _bundlespeccgversions:
137 if spec == 'packed1':
137 if spec == 'packed1':
138 compression = 'none'
138 compression = 'none'
139 else:
139 else:
140 compression = 'bzip2'
140 compression = 'bzip2'
141 version = spec
141 version = spec
142 else:
142 else:
143 raise error.UnsupportedBundleSpecification(
143 raise error.UnsupportedBundleSpecification(
144 _('%s is not a recognized bundle specification') % spec)
144 _('%s is not a recognized bundle specification') % spec)
145
145
146 # The specification for packed1 can optionally declare the data formats
146 # The specification for packed1 can optionally declare the data formats
147 # required to apply it. If we see this metadata, compare against what the
147 # required to apply it. If we see this metadata, compare against what the
148 # repo supports and error if the bundle isn't compatible.
148 # repo supports and error if the bundle isn't compatible.
149 if version == 'packed1' and 'requirements' in params:
149 if version == 'packed1' and 'requirements' in params:
150 requirements = set(params['requirements'].split(','))
150 requirements = set(params['requirements'].split(','))
151 missingreqs = requirements - repo.supportedformats
151 missingreqs = requirements - repo.supportedformats
152 if missingreqs:
152 if missingreqs:
153 raise error.UnsupportedBundleSpecification(
153 raise error.UnsupportedBundleSpecification(
154 _('missing support for repository features: %s') %
154 _('missing support for repository features: %s') %
155 ', '.join(sorted(missingreqs)))
155 ', '.join(sorted(missingreqs)))
156
156
157 if not externalnames:
157 if not externalnames:
158 compression = _bundlespeccompressions[compression]
158 compression = _bundlespeccompressions[compression]
159 version = _bundlespeccgversions[version]
159 version = _bundlespeccgversions[version]
160 return compression, version, params
160 return compression, version, params
161
161
162 def readbundle(ui, fh, fname, vfs=None):
162 def readbundle(ui, fh, fname, vfs=None):
163 header = changegroup.readexactly(fh, 4)
163 header = changegroup.readexactly(fh, 4)
164
164
165 alg = None
165 alg = None
166 if not fname:
166 if not fname:
167 fname = "stream"
167 fname = "stream"
168 if not header.startswith('HG') and header.startswith('\0'):
168 if not header.startswith('HG') and header.startswith('\0'):
169 fh = changegroup.headerlessfixup(fh, header)
169 fh = changegroup.headerlessfixup(fh, header)
170 header = "HG10"
170 header = "HG10"
171 alg = 'UN'
171 alg = 'UN'
172 elif vfs:
172 elif vfs:
173 fname = vfs.join(fname)
173 fname = vfs.join(fname)
174
174
175 magic, version = header[0:2], header[2:4]
175 magic, version = header[0:2], header[2:4]
176
176
177 if magic != 'HG':
177 if magic != 'HG':
178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
178 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
179 if version == '10':
179 if version == '10':
180 if alg is None:
180 if alg is None:
181 alg = changegroup.readexactly(fh, 2)
181 alg = changegroup.readexactly(fh, 2)
182 return changegroup.cg1unpacker(fh, alg)
182 return changegroup.cg1unpacker(fh, alg)
183 elif version.startswith('2'):
183 elif version.startswith('2'):
184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
184 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
185 elif version == 'S1':
185 elif version == 'S1':
186 return streamclone.streamcloneapplier(fh)
186 return streamclone.streamcloneapplier(fh)
187 else:
187 else:
188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
188 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
189
189
190 def getbundlespec(ui, fh):
190 def getbundlespec(ui, fh):
191 """Infer the bundlespec from a bundle file handle.
191 """Infer the bundlespec from a bundle file handle.
192
192
193 The input file handle is seeked and the original seek position is not
193 The input file handle is seeked and the original seek position is not
194 restored.
194 restored.
195 """
195 """
196 def speccompression(alg):
196 def speccompression(alg):
197 for k, v in _bundlespeccompressions.items():
197 for k, v in _bundlespeccompressions.items():
198 if v == alg:
198 if v == alg:
199 return k
199 return k
200 return None
200 return None
201
201
202 b = readbundle(ui, fh, None)
202 b = readbundle(ui, fh, None)
203 if isinstance(b, changegroup.cg1unpacker):
203 if isinstance(b, changegroup.cg1unpacker):
204 alg = b._type
204 alg = b._type
205 if alg == '_truncatedBZ':
205 if alg == '_truncatedBZ':
206 alg = 'BZ'
206 alg = 'BZ'
207 comp = speccompression(alg)
207 comp = speccompression(alg)
208 if not comp:
208 if not comp:
209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
209 raise error.Abort(_('unknown compression algorithm: %s') % alg)
210 return '%s-v1' % comp
210 return '%s-v1' % comp
211 elif isinstance(b, bundle2.unbundle20):
211 elif isinstance(b, bundle2.unbundle20):
212 if 'Compression' in b.params:
212 if 'Compression' in b.params:
213 comp = speccompression(b.params['Compression'])
213 comp = speccompression(b.params['Compression'])
214 if not comp:
214 if not comp:
215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
215 raise error.Abort(_('unknown compression algorithm: %s') % comp)
216 else:
216 else:
217 comp = 'none'
217 comp = 'none'
218
218
219 version = None
219 version = None
220 for part in b.iterparts():
220 for part in b.iterparts():
221 if part.type == 'changegroup':
221 if part.type == 'changegroup':
222 version = part.params['version']
222 version = part.params['version']
223 if version in ('01', '02'):
223 if version in ('01', '02'):
224 version = 'v2'
224 version = 'v2'
225 else:
225 else:
226 raise error.Abort(_('changegroup version %s does not have '
226 raise error.Abort(_('changegroup version %s does not have '
227 'a known bundlespec') % version,
227 'a known bundlespec') % version,
228 hint=_('try upgrading your Mercurial '
228 hint=_('try upgrading your Mercurial '
229 'client'))
229 'client'))
230
230
231 if not version:
231 if not version:
232 raise error.Abort(_('could not identify changegroup version in '
232 raise error.Abort(_('could not identify changegroup version in '
233 'bundle'))
233 'bundle'))
234
234
235 return '%s-%s' % (comp, version)
235 return '%s-%s' % (comp, version)
236 elif isinstance(b, streamclone.streamcloneapplier):
236 elif isinstance(b, streamclone.streamcloneapplier):
237 requirements = streamclone.readbundle1header(fh)[2]
237 requirements = streamclone.readbundle1header(fh)[2]
238 params = 'requirements=%s' % ','.join(sorted(requirements))
238 params = 'requirements=%s' % ','.join(sorted(requirements))
239 return 'none-packed1;%s' % urllib.quote(params)
239 return 'none-packed1;%s' % urllib.quote(params)
240 else:
240 else:
241 raise error.Abort(_('unknown bundle type: %s') % b)
241 raise error.Abort(_('unknown bundle type: %s') % b)
242
242
243 def buildobsmarkerspart(bundler, markers):
243 def buildobsmarkerspart(bundler, markers):
244 """add an obsmarker part to the bundler with <markers>
244 """add an obsmarker part to the bundler with <markers>
245
245
246 No part is created if markers is empty.
246 No part is created if markers is empty.
247 Raises ValueError if the bundler doesn't support any known obsmarker format.
247 Raises ValueError if the bundler doesn't support any known obsmarker format.
248 """
248 """
249 if markers:
249 if markers:
250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
250 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
251 version = obsolete.commonversion(remoteversions)
251 version = obsolete.commonversion(remoteversions)
252 if version is None:
252 if version is None:
253 raise ValueError('bundler does not support common obsmarker format')
253 raise ValueError('bundler does not support common obsmarker format')
254 stream = obsolete.encodemarkers(markers, True, version=version)
254 stream = obsolete.encodemarkers(markers, True, version=version)
255 return bundler.newpart('obsmarkers', data=stream)
255 return bundler.newpart('obsmarkers', data=stream)
256 return None
256 return None
257
257
258 def _canusebundle2(op):
258 def _canusebundle2(op):
259 """return true if a pull/push can use bundle2
259 """return true if a pull/push can use bundle2
260
260
261 Feel free to nuke this function when we drop the experimental option"""
261 Feel free to nuke this function when we drop the experimental option"""
262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
262 return (op.repo.ui.configbool('experimental', 'bundle2-exp', True)
263 and op.remote.capable('bundle2'))
263 and op.remote.capable('bundle2'))
264
264
265
265
266 class pushoperation(object):
266 class pushoperation(object):
267 """A object that represent a single push operation
267 """A object that represent a single push operation
268
268
269 It purpose is to carry push related state and very common operation.
269 It purpose is to carry push related state and very common operation.
270
270
271 A new should be created at the beginning of each push and discarded
271 A new should be created at the beginning of each push and discarded
272 afterward.
272 afterward.
273 """
273 """
274
274
275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
275 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
276 bookmarks=()):
276 bookmarks=()):
277 # repo we push from
277 # repo we push from
278 self.repo = repo
278 self.repo = repo
279 self.ui = repo.ui
279 self.ui = repo.ui
280 # repo we push to
280 # repo we push to
281 self.remote = remote
281 self.remote = remote
282 # force option provided
282 # force option provided
283 self.force = force
283 self.force = force
284 # revs to be pushed (None is "all")
284 # revs to be pushed (None is "all")
285 self.revs = revs
285 self.revs = revs
286 # bookmark explicitly pushed
286 # bookmark explicitly pushed
287 self.bookmarks = bookmarks
287 self.bookmarks = bookmarks
288 # allow push of new branch
288 # allow push of new branch
289 self.newbranch = newbranch
289 self.newbranch = newbranch
290 # did a local lock get acquired?
290 # did a local lock get acquired?
291 self.locallocked = None
291 self.locallocked = None
292 # step already performed
292 # step already performed
293 # (used to check what steps have been already performed through bundle2)
293 # (used to check what steps have been already performed through bundle2)
294 self.stepsdone = set()
294 self.stepsdone = set()
295 # Integer version of the changegroup push result
295 # Integer version of the changegroup push result
296 # - None means nothing to push
296 # - None means nothing to push
297 # - 0 means HTTP error
297 # - 0 means HTTP error
298 # - 1 means we pushed and remote head count is unchanged *or*
298 # - 1 means we pushed and remote head count is unchanged *or*
299 # we have outgoing changesets but refused to push
299 # we have outgoing changesets but refused to push
300 # - other values as described by addchangegroup()
300 # - other values as described by addchangegroup()
301 self.cgresult = None
301 self.cgresult = None
302 # Boolean value for the bookmark push
302 # Boolean value for the bookmark push
303 self.bkresult = None
303 self.bkresult = None
304 # discover.outgoing object (contains common and outgoing data)
304 # discover.outgoing object (contains common and outgoing data)
305 self.outgoing = None
305 self.outgoing = None
306 # all remote heads before the push
306 # all remote heads before the push
307 self.remoteheads = None
307 self.remoteheads = None
308 # testable as a boolean indicating if any nodes are missing locally.
308 # testable as a boolean indicating if any nodes are missing locally.
309 self.incoming = None
309 self.incoming = None
310 # phases changes that must be pushed along side the changesets
310 # phases changes that must be pushed along side the changesets
311 self.outdatedphases = None
311 self.outdatedphases = None
312 # phases changes that must be pushed if changeset push fails
312 # phases changes that must be pushed if changeset push fails
313 self.fallbackoutdatedphases = None
313 self.fallbackoutdatedphases = None
314 # outgoing obsmarkers
314 # outgoing obsmarkers
315 self.outobsmarkers = set()
315 self.outobsmarkers = set()
316 # outgoing bookmarks
316 # outgoing bookmarks
317 self.outbookmarks = []
317 self.outbookmarks = []
318 # transaction manager
318 # transaction manager
319 self.trmanager = None
319 self.trmanager = None
320 # map { pushkey partid -> callback handling failure}
320 # map { pushkey partid -> callback handling failure}
321 # used to handle exception from mandatory pushkey part failure
321 # used to handle exception from mandatory pushkey part failure
322 self.pkfailcb = {}
322 self.pkfailcb = {}
323
323
324 @util.propertycache
324 @util.propertycache
325 def futureheads(self):
325 def futureheads(self):
326 """future remote heads if the changeset push succeeds"""
326 """future remote heads if the changeset push succeeds"""
327 return self.outgoing.missingheads
327 return self.outgoing.missingheads
328
328
329 @util.propertycache
329 @util.propertycache
330 def fallbackheads(self):
330 def fallbackheads(self):
331 """future remote heads if the changeset push fails"""
331 """future remote heads if the changeset push fails"""
332 if self.revs is None:
332 if self.revs is None:
333 # not target to push, all common are relevant
333 # not target to push, all common are relevant
334 return self.outgoing.commonheads
334 return self.outgoing.commonheads
335 unfi = self.repo.unfiltered()
335 unfi = self.repo.unfiltered()
336 # I want cheads = heads(::missingheads and ::commonheads)
336 # I want cheads = heads(::missingheads and ::commonheads)
337 # (missingheads is revs with secret changeset filtered out)
337 # (missingheads is revs with secret changeset filtered out)
338 #
338 #
339 # This can be expressed as:
339 # This can be expressed as:
340 # cheads = ( (missingheads and ::commonheads)
340 # cheads = ( (missingheads and ::commonheads)
341 # + (commonheads and ::missingheads))"
341 # + (commonheads and ::missingheads))"
342 # )
342 # )
343 #
343 #
344 # while trying to push we already computed the following:
344 # while trying to push we already computed the following:
345 # common = (::commonheads)
345 # common = (::commonheads)
346 # missing = ((commonheads::missingheads) - commonheads)
346 # missing = ((commonheads::missingheads) - commonheads)
347 #
347 #
348 # We can pick:
348 # We can pick:
349 # * missingheads part of common (::commonheads)
349 # * missingheads part of common (::commonheads)
350 common = self.outgoing.common
350 common = self.outgoing.common
351 nm = self.repo.changelog.nodemap
351 nm = self.repo.changelog.nodemap
352 cheads = [node for node in self.revs if nm[node] in common]
352 cheads = [node for node in self.revs if nm[node] in common]
353 # and
353 # and
354 # * commonheads parents on missing
354 # * commonheads parents on missing
355 revset = unfi.set('%ln and parents(roots(%ln))',
355 revset = unfi.set('%ln and parents(roots(%ln))',
356 self.outgoing.commonheads,
356 self.outgoing.commonheads,
357 self.outgoing.missing)
357 self.outgoing.missing)
358 cheads.extend(c.node() for c in revset)
358 cheads.extend(c.node() for c in revset)
359 return cheads
359 return cheads
360
360
361 @property
361 @property
362 def commonheads(self):
362 def commonheads(self):
363 """set of all common heads after changeset bundle push"""
363 """set of all common heads after changeset bundle push"""
364 if self.cgresult:
364 if self.cgresult:
365 return self.futureheads
365 return self.futureheads
366 else:
366 else:
367 return self.fallbackheads
367 return self.fallbackheads
368
368
369 # mapping of message used when pushing bookmark
369 # mapping of message used when pushing bookmark
370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
370 bookmsgmap = {'update': (_("updating bookmark %s\n"),
371 _('updating bookmark %s failed!\n')),
371 _('updating bookmark %s failed!\n')),
372 'export': (_("exporting bookmark %s\n"),
372 'export': (_("exporting bookmark %s\n"),
373 _('exporting bookmark %s failed!\n')),
373 _('exporting bookmark %s failed!\n')),
374 'delete': (_("deleting remote bookmark %s\n"),
374 'delete': (_("deleting remote bookmark %s\n"),
375 _('deleting remote bookmark %s failed!\n')),
375 _('deleting remote bookmark %s failed!\n')),
376 }
376 }
377
377
378
378
379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
379 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
380 opargs=None):
380 opargs=None):
381 '''Push outgoing changesets (limited by revs) from a local
381 '''Push outgoing changesets (limited by revs) from a local
382 repository to remote. Return an integer:
382 repository to remote. Return an integer:
383 - None means nothing to push
383 - None means nothing to push
384 - 0 means HTTP error
384 - 0 means HTTP error
385 - 1 means we pushed and remote head count is unchanged *or*
385 - 1 means we pushed and remote head count is unchanged *or*
386 we have outgoing changesets but refused to push
386 we have outgoing changesets but refused to push
387 - other values as described by addchangegroup()
387 - other values as described by addchangegroup()
388 '''
388 '''
389 if opargs is None:
389 if opargs is None:
390 opargs = {}
390 opargs = {}
391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
391 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
392 **opargs)
392 **opargs)
393 if pushop.remote.local():
393 if pushop.remote.local():
394 missing = (set(pushop.repo.requirements)
394 missing = (set(pushop.repo.requirements)
395 - pushop.remote.local().supported)
395 - pushop.remote.local().supported)
396 if missing:
396 if missing:
397 msg = _("required features are not"
397 msg = _("required features are not"
398 " supported in the destination:"
398 " supported in the destination:"
399 " %s") % (', '.join(sorted(missing)))
399 " %s") % (', '.join(sorted(missing)))
400 raise error.Abort(msg)
400 raise error.Abort(msg)
401
401
402 # there are two ways to push to remote repo:
402 # there are two ways to push to remote repo:
403 #
403 #
404 # addchangegroup assumes local user can lock remote
404 # addchangegroup assumes local user can lock remote
405 # repo (local filesystem, old ssh servers).
405 # repo (local filesystem, old ssh servers).
406 #
406 #
407 # unbundle assumes local user cannot lock remote repo (new ssh
407 # unbundle assumes local user cannot lock remote repo (new ssh
408 # servers, http servers).
408 # servers, http servers).
409
409
410 if not pushop.remote.canpush():
410 if not pushop.remote.canpush():
411 raise error.Abort(_("destination does not support push"))
411 raise error.Abort(_("destination does not support push"))
412 # get local lock as we might write phase data
412 # get local lock as we might write phase data
413 localwlock = locallock = None
413 localwlock = locallock = None
414 try:
414 try:
415 # bundle2 push may receive a reply bundle touching bookmarks or other
415 # bundle2 push may receive a reply bundle touching bookmarks or other
416 # things requiring the wlock. Take it now to ensure proper ordering.
416 # things requiring the wlock. Take it now to ensure proper ordering.
417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
417 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
418 if _canusebundle2(pushop) and maypushback:
418 if _canusebundle2(pushop) and maypushback:
419 localwlock = pushop.repo.wlock()
419 localwlock = pushop.repo.wlock()
420 locallock = pushop.repo.lock()
420 locallock = pushop.repo.lock()
421 pushop.locallocked = True
421 pushop.locallocked = True
422 except IOError as err:
422 except IOError as err:
423 pushop.locallocked = False
423 pushop.locallocked = False
424 if err.errno != errno.EACCES:
424 if err.errno != errno.EACCES:
425 raise
425 raise
426 # source repo cannot be locked.
426 # source repo cannot be locked.
427 # We do not abort the push, but just disable the local phase
427 # We do not abort the push, but just disable the local phase
428 # synchronisation.
428 # synchronisation.
429 msg = 'cannot lock source repository: %s\n' % err
429 msg = 'cannot lock source repository: %s\n' % err
430 pushop.ui.debug(msg)
430 pushop.ui.debug(msg)
431 try:
431 try:
432 if pushop.locallocked:
432 if pushop.locallocked:
433 pushop.trmanager = transactionmanager(pushop.repo,
433 pushop.trmanager = transactionmanager(pushop.repo,
434 'push-response',
434 'push-response',
435 pushop.remote.url())
435 pushop.remote.url())
436 pushop.repo.checkpush(pushop)
436 pushop.repo.checkpush(pushop)
437 lock = None
437 lock = None
438 unbundle = pushop.remote.capable('unbundle')
438 unbundle = pushop.remote.capable('unbundle')
439 if not unbundle:
439 if not unbundle:
440 lock = pushop.remote.lock()
440 lock = pushop.remote.lock()
441 try:
441 try:
442 _pushdiscovery(pushop)
442 _pushdiscovery(pushop)
443 if _canusebundle2(pushop):
443 if _canusebundle2(pushop):
444 _pushbundle2(pushop)
444 _pushbundle2(pushop)
445 _pushchangeset(pushop)
445 _pushchangeset(pushop)
446 _pushsyncphase(pushop)
446 _pushsyncphase(pushop)
447 _pushobsolete(pushop)
447 _pushobsolete(pushop)
448 _pushbookmark(pushop)
448 _pushbookmark(pushop)
449 finally:
449 finally:
450 if lock is not None:
450 if lock is not None:
451 lock.release()
451 lock.release()
452 if pushop.trmanager:
452 if pushop.trmanager:
453 pushop.trmanager.close()
453 pushop.trmanager.close()
454 finally:
454 finally:
455 if pushop.trmanager:
455 if pushop.trmanager:
456 pushop.trmanager.release()
456 pushop.trmanager.release()
457 if locallock is not None:
457 if locallock is not None:
458 locallock.release()
458 locallock.release()
459 if localwlock is not None:
459 if localwlock is not None:
460 localwlock.release()
460 localwlock.release()
461
461
462 return pushop
462 return pushop
463
463
464 # list of steps to perform discovery before push
464 # list of steps to perform discovery before push
465 pushdiscoveryorder = []
465 pushdiscoveryorder = []
466
466
467 # Mapping between step name and function
467 # Mapping between step name and function
468 #
468 #
469 # This exists to help extensions wrap steps if necessary
469 # This exists to help extensions wrap steps if necessary
470 pushdiscoverymapping = {}
470 pushdiscoverymapping = {}
471
471
472 def pushdiscovery(stepname):
472 def pushdiscovery(stepname):
473 """decorator for function performing discovery before push
473 """decorator for function performing discovery before push
474
474
475 The function is added to the step -> function mapping and appended to the
475 The function is added to the step -> function mapping and appended to the
476 list of steps. Beware that decorated function will be added in order (this
476 list of steps. Beware that decorated function will be added in order (this
477 may matter).
477 may matter).
478
478
479 You can only use this decorator for a new step, if you want to wrap a step
479 You can only use this decorator for a new step, if you want to wrap a step
480 from an extension, change the pushdiscovery dictionary directly."""
480 from an extension, change the pushdiscovery dictionary directly."""
481 def dec(func):
481 def dec(func):
482 assert stepname not in pushdiscoverymapping
482 assert stepname not in pushdiscoverymapping
483 pushdiscoverymapping[stepname] = func
483 pushdiscoverymapping[stepname] = func
484 pushdiscoveryorder.append(stepname)
484 pushdiscoveryorder.append(stepname)
485 return func
485 return func
486 return dec
486 return dec
487
487
488 def _pushdiscovery(pushop):
488 def _pushdiscovery(pushop):
489 """Run all discovery steps"""
489 """Run all discovery steps"""
490 for stepname in pushdiscoveryorder:
490 for stepname in pushdiscoveryorder:
491 step = pushdiscoverymapping[stepname]
491 step = pushdiscoverymapping[stepname]
492 step(pushop)
492 step(pushop)
493
493
494 @pushdiscovery('changeset')
494 @pushdiscovery('changeset')
495 def _pushdiscoverychangeset(pushop):
495 def _pushdiscoverychangeset(pushop):
496 """discover the changeset that need to be pushed"""
496 """discover the changeset that need to be pushed"""
497 fci = discovery.findcommonincoming
497 fci = discovery.findcommonincoming
498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
498 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
499 common, inc, remoteheads = commoninc
499 common, inc, remoteheads = commoninc
500 fco = discovery.findcommonoutgoing
500 fco = discovery.findcommonoutgoing
501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
501 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
502 commoninc=commoninc, force=pushop.force)
502 commoninc=commoninc, force=pushop.force)
503 pushop.outgoing = outgoing
503 pushop.outgoing = outgoing
504 pushop.remoteheads = remoteheads
504 pushop.remoteheads = remoteheads
505 pushop.incoming = inc
505 pushop.incoming = inc
506
506
507 @pushdiscovery('phase')
507 @pushdiscovery('phase')
508 def _pushdiscoveryphase(pushop):
508 def _pushdiscoveryphase(pushop):
509 """discover the phase that needs to be pushed
509 """discover the phase that needs to be pushed
510
510
511 (computed for both success and failure case for changesets push)"""
511 (computed for both success and failure case for changesets push)"""
512 outgoing = pushop.outgoing
512 outgoing = pushop.outgoing
513 unfi = pushop.repo.unfiltered()
513 unfi = pushop.repo.unfiltered()
514 remotephases = pushop.remote.listkeys('phases')
514 remotephases = pushop.remote.listkeys('phases')
515 publishing = remotephases.get('publishing', False)
515 publishing = remotephases.get('publishing', False)
516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
516 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
517 and remotephases # server supports phases
517 and remotephases # server supports phases
518 and not pushop.outgoing.missing # no changesets to be pushed
518 and not pushop.outgoing.missing # no changesets to be pushed
519 and publishing):
519 and publishing):
520 # When:
520 # When:
521 # - this is a subrepo push
521 # - this is a subrepo push
522 # - and remote support phase
522 # - and remote support phase
523 # - and no changeset are to be pushed
523 # - and no changeset are to be pushed
524 # - and remote is publishing
524 # - and remote is publishing
525 # We may be in issue 3871 case!
525 # We may be in issue 3871 case!
526 # We drop the possible phase synchronisation done by
526 # We drop the possible phase synchronisation done by
527 # courtesy to publish changesets possibly locally draft
527 # courtesy to publish changesets possibly locally draft
528 # on the remote.
528 # on the remote.
529 remotephases = {'publishing': 'True'}
529 remotephases = {'publishing': 'True'}
530 ana = phases.analyzeremotephases(pushop.repo,
530 ana = phases.analyzeremotephases(pushop.repo,
531 pushop.fallbackheads,
531 pushop.fallbackheads,
532 remotephases)
532 remotephases)
533 pheads, droots = ana
533 pheads, droots = ana
534 extracond = ''
534 extracond = ''
535 if not publishing:
535 if not publishing:
536 extracond = ' and public()'
536 extracond = ' and public()'
537 revset = 'heads((%%ln::%%ln) %s)' % extracond
537 revset = 'heads((%%ln::%%ln) %s)' % extracond
538 # Get the list of all revs draft on remote by public here.
538 # Get the list of all revs draft on remote by public here.
539 # XXX Beware that revset break if droots is not strictly
539 # XXX Beware that revset break if droots is not strictly
540 # XXX root we may want to ensure it is but it is costly
540 # XXX root we may want to ensure it is but it is costly
541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
541 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
542 if not outgoing.missing:
542 if not outgoing.missing:
543 future = fallback
543 future = fallback
544 else:
544 else:
545 # adds changeset we are going to push as draft
545 # adds changeset we are going to push as draft
546 #
546 #
547 # should not be necessary for publishing server, but because of an
547 # should not be necessary for publishing server, but because of an
548 # issue fixed in xxxxx we have to do it anyway.
548 # issue fixed in xxxxx we have to do it anyway.
549 fdroots = list(unfi.set('roots(%ln + %ln::)',
549 fdroots = list(unfi.set('roots(%ln + %ln::)',
550 outgoing.missing, droots))
550 outgoing.missing, droots))
551 fdroots = [f.node() for f in fdroots]
551 fdroots = [f.node() for f in fdroots]
552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
552 future = list(unfi.set(revset, fdroots, pushop.futureheads))
553 pushop.outdatedphases = future
553 pushop.outdatedphases = future
554 pushop.fallbackoutdatedphases = fallback
554 pushop.fallbackoutdatedphases = fallback
555
555
556 @pushdiscovery('obsmarker')
556 @pushdiscovery('obsmarker')
557 def _pushdiscoveryobsmarkers(pushop):
557 def _pushdiscoveryobsmarkers(pushop):
558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
558 if (obsolete.isenabled(pushop.repo, obsolete.exchangeopt)
559 and pushop.repo.obsstore
559 and pushop.repo.obsstore
560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
560 and 'obsolete' in pushop.remote.listkeys('namespaces')):
561 repo = pushop.repo
561 repo = pushop.repo
562 # very naive computation, that can be quite expensive on big repo.
562 # very naive computation, that can be quite expensive on big repo.
563 # However: evolution is currently slow on them anyway.
563 # However: evolution is currently slow on them anyway.
564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
564 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
565 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
566
566
567 @pushdiscovery('bookmarks')
567 @pushdiscovery('bookmarks')
568 def _pushdiscoverybookmarks(pushop):
568 def _pushdiscoverybookmarks(pushop):
569 ui = pushop.ui
569 ui = pushop.ui
570 repo = pushop.repo.unfiltered()
570 repo = pushop.repo.unfiltered()
571 remote = pushop.remote
571 remote = pushop.remote
572 ui.debug("checking for updated bookmarks\n")
572 ui.debug("checking for updated bookmarks\n")
573 ancestors = ()
573 ancestors = ()
574 if pushop.revs:
574 if pushop.revs:
575 revnums = map(repo.changelog.rev, pushop.revs)
575 revnums = map(repo.changelog.rev, pushop.revs)
576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
576 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
577 remotebookmark = remote.listkeys('bookmarks')
577 remotebookmark = remote.listkeys('bookmarks')
578
578
579 explicit = set(pushop.bookmarks)
579 explicit = set(pushop.bookmarks)
580
580
581 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
581 comp = bookmod.compare(repo, repo._bookmarks, remotebookmark, srchex=hex)
582 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
582 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
583 for b, scid, dcid in advsrc:
583 for b, scid, dcid in advsrc:
584 if b in explicit:
584 if b in explicit:
585 explicit.remove(b)
585 explicit.remove(b)
586 if not ancestors or repo[scid].rev() in ancestors:
586 if not ancestors or repo[scid].rev() in ancestors:
587 pushop.outbookmarks.append((b, dcid, scid))
587 pushop.outbookmarks.append((b, dcid, scid))
588 # search added bookmark
588 # search added bookmark
589 for b, scid, dcid in addsrc:
589 for b, scid, dcid in addsrc:
590 if b in explicit:
590 if b in explicit:
591 explicit.remove(b)
591 explicit.remove(b)
592 pushop.outbookmarks.append((b, '', scid))
592 pushop.outbookmarks.append((b, '', scid))
593 # search for overwritten bookmark
593 # search for overwritten bookmark
594 for b, scid, dcid in advdst + diverge + differ:
594 for b, scid, dcid in advdst + diverge + differ:
595 if b in explicit:
595 if b in explicit:
596 explicit.remove(b)
596 explicit.remove(b)
597 pushop.outbookmarks.append((b, dcid, scid))
597 pushop.outbookmarks.append((b, dcid, scid))
598 # search for bookmark to delete
598 # search for bookmark to delete
599 for b, scid, dcid in adddst:
599 for b, scid, dcid in adddst:
600 if b in explicit:
600 if b in explicit:
601 explicit.remove(b)
601 explicit.remove(b)
602 # treat as "deleted locally"
602 # treat as "deleted locally"
603 pushop.outbookmarks.append((b, dcid, ''))
603 pushop.outbookmarks.append((b, dcid, ''))
604 # identical bookmarks shouldn't get reported
604 # identical bookmarks shouldn't get reported
605 for b, scid, dcid in same:
605 for b, scid, dcid in same:
606 if b in explicit:
606 if b in explicit:
607 explicit.remove(b)
607 explicit.remove(b)
608
608
609 if explicit:
609 if explicit:
610 explicit = sorted(explicit)
610 explicit = sorted(explicit)
611 # we should probably list all of them
611 # we should probably list all of them
612 ui.warn(_('bookmark %s does not exist on the local '
612 ui.warn(_('bookmark %s does not exist on the local '
613 'or remote repository!\n') % explicit[0])
613 'or remote repository!\n') % explicit[0])
614 pushop.bkresult = 2
614 pushop.bkresult = 2
615
615
616 pushop.outbookmarks.sort()
616 pushop.outbookmarks.sort()
617
617
618 def _pushcheckoutgoing(pushop):
618 def _pushcheckoutgoing(pushop):
619 outgoing = pushop.outgoing
619 outgoing = pushop.outgoing
620 unfi = pushop.repo.unfiltered()
620 unfi = pushop.repo.unfiltered()
621 if not outgoing.missing:
621 if not outgoing.missing:
622 # nothing to push
622 # nothing to push
623 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
623 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
624 return False
624 return False
625 # something to push
625 # something to push
626 if not pushop.force:
626 if not pushop.force:
627 # if repo.obsstore == False --> no obsolete
627 # if repo.obsstore == False --> no obsolete
628 # then, save the iteration
628 # then, save the iteration
629 if unfi.obsstore:
629 if unfi.obsstore:
630 # this message are here for 80 char limit reason
630 # this message are here for 80 char limit reason
631 mso = _("push includes obsolete changeset: %s!")
631 mso = _("push includes obsolete changeset: %s!")
632 mst = {"unstable": _("push includes unstable changeset: %s!"),
632 mst = {"unstable": _("push includes unstable changeset: %s!"),
633 "bumped": _("push includes bumped changeset: %s!"),
633 "bumped": _("push includes bumped changeset: %s!"),
634 "divergent": _("push includes divergent changeset: %s!")}
634 "divergent": _("push includes divergent changeset: %s!")}
635 # If we are to push if there is at least one
635 # If we are to push if there is at least one
636 # obsolete or unstable changeset in missing, at
636 # obsolete or unstable changeset in missing, at
637 # least one of the missinghead will be obsolete or
637 # least one of the missinghead will be obsolete or
638 # unstable. So checking heads only is ok
638 # unstable. So checking heads only is ok
639 for node in outgoing.missingheads:
639 for node in outgoing.missingheads:
640 ctx = unfi[node]
640 ctx = unfi[node]
641 if ctx.obsolete():
641 if ctx.obsolete():
642 raise error.Abort(mso % ctx)
642 raise error.Abort(mso % ctx)
643 elif ctx.troubled():
643 elif ctx.troubled():
644 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
644 raise error.Abort(mst[ctx.troubles()[0]] % ctx)
645
645
646 discovery.checkheads(pushop)
646 discovery.checkheads(pushop)
647 return True
647 return True
648
648
649 # List of names of steps to perform for an outgoing bundle2, order matters.
649 # List of names of steps to perform for an outgoing bundle2, order matters.
650 b2partsgenorder = []
650 b2partsgenorder = []
651
651
652 # Mapping between step name and function
652 # Mapping between step name and function
653 #
653 #
654 # This exists to help extensions wrap steps if necessary
654 # This exists to help extensions wrap steps if necessary
655 b2partsgenmapping = {}
655 b2partsgenmapping = {}
656
656
657 def b2partsgenerator(stepname, idx=None):
657 def b2partsgenerator(stepname, idx=None):
658 """decorator for function generating bundle2 part
658 """decorator for function generating bundle2 part
659
659
660 The function is added to the step -> function mapping and appended to the
660 The function is added to the step -> function mapping and appended to the
661 list of steps. Beware that decorated functions will be added in order
661 list of steps. Beware that decorated functions will be added in order
662 (this may matter).
662 (this may matter).
663
663
664 You can only use this decorator for new steps, if you want to wrap a step
664 You can only use this decorator for new steps, if you want to wrap a step
665 from an extension, attack the b2partsgenmapping dictionary directly."""
665 from an extension, attack the b2partsgenmapping dictionary directly."""
666 def dec(func):
666 def dec(func):
667 assert stepname not in b2partsgenmapping
667 assert stepname not in b2partsgenmapping
668 b2partsgenmapping[stepname] = func
668 b2partsgenmapping[stepname] = func
669 if idx is None:
669 if idx is None:
670 b2partsgenorder.append(stepname)
670 b2partsgenorder.append(stepname)
671 else:
671 else:
672 b2partsgenorder.insert(idx, stepname)
672 b2partsgenorder.insert(idx, stepname)
673 return func
673 return func
674 return dec
674 return dec
675
675
676 def _pushb2ctxcheckheads(pushop, bundler):
676 def _pushb2ctxcheckheads(pushop, bundler):
677 """Generate race condition checking parts
677 """Generate race condition checking parts
678
678
679 Exists as an independent function to aid extensions
679 Exists as an independent function to aid extensions
680 """
680 """
681 if not pushop.force:
681 if not pushop.force:
682 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
682 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
683
683
684 @b2partsgenerator('changeset')
684 @b2partsgenerator('changeset')
685 def _pushb2ctx(pushop, bundler):
685 def _pushb2ctx(pushop, bundler):
686 """handle changegroup push through bundle2
686 """handle changegroup push through bundle2
687
687
688 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
688 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
689 """
689 """
690 if 'changesets' in pushop.stepsdone:
690 if 'changesets' in pushop.stepsdone:
691 return
691 return
692 pushop.stepsdone.add('changesets')
692 pushop.stepsdone.add('changesets')
693 # Send known heads to the server for race detection.
693 # Send known heads to the server for race detection.
694 if not _pushcheckoutgoing(pushop):
694 if not _pushcheckoutgoing(pushop):
695 return
695 return
696 pushop.repo.prepushoutgoinghooks(pushop.repo,
696 pushop.repo.prepushoutgoinghooks(pushop.repo,
697 pushop.remote,
697 pushop.remote,
698 pushop.outgoing)
698 pushop.outgoing)
699
699
700 _pushb2ctxcheckheads(pushop, bundler)
700 _pushb2ctxcheckheads(pushop, bundler)
701
701
702 b2caps = bundle2.bundle2caps(pushop.remote)
702 b2caps = bundle2.bundle2caps(pushop.remote)
703 version = None
703 version = None
704 cgversions = b2caps.get('changegroup')
704 cgversions = b2caps.get('changegroup')
705 if not cgversions: # 3.1 and 3.2 ship with an empty value
705 if not cgversions: # 3.1 and 3.2 ship with an empty value
706 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
706 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
707 pushop.outgoing)
707 pushop.outgoing)
708 else:
708 else:
709 cgversions = [v for v in cgversions
709 cgversions = [v for v in cgversions
710 if v in changegroup.supportedversions(pushop.repo)]
710 if v in changegroup.supportedversions(pushop.repo)]
711 if not cgversions:
711 if not cgversions:
712 raise ValueError(_('no common changegroup version'))
712 raise ValueError(_('no common changegroup version'))
713 version = max(cgversions)
713 version = max(cgversions)
714 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
714 cg = changegroup.getlocalchangegroupraw(pushop.repo, 'push',
715 pushop.outgoing,
715 pushop.outgoing,
716 version=version)
716 version=version)
717 cgpart = bundler.newpart('changegroup', data=cg)
717 cgpart = bundler.newpart('changegroup', data=cg)
718 if version is not None:
718 if version is not None:
719 cgpart.addparam('version', version)
719 cgpart.addparam('version', version)
720 if 'treemanifest' in pushop.repo.requirements:
721 cgpart.addparam('treemanifest', '1')
720 def handlereply(op):
722 def handlereply(op):
721 """extract addchangegroup returns from server reply"""
723 """extract addchangegroup returns from server reply"""
722 cgreplies = op.records.getreplies(cgpart.id)
724 cgreplies = op.records.getreplies(cgpart.id)
723 assert len(cgreplies['changegroup']) == 1
725 assert len(cgreplies['changegroup']) == 1
724 pushop.cgresult = cgreplies['changegroup'][0]['return']
726 pushop.cgresult = cgreplies['changegroup'][0]['return']
725 return handlereply
727 return handlereply
726
728
727 @b2partsgenerator('phase')
729 @b2partsgenerator('phase')
728 def _pushb2phases(pushop, bundler):
730 def _pushb2phases(pushop, bundler):
729 """handle phase push through bundle2"""
731 """handle phase push through bundle2"""
730 if 'phases' in pushop.stepsdone:
732 if 'phases' in pushop.stepsdone:
731 return
733 return
732 b2caps = bundle2.bundle2caps(pushop.remote)
734 b2caps = bundle2.bundle2caps(pushop.remote)
733 if not 'pushkey' in b2caps:
735 if not 'pushkey' in b2caps:
734 return
736 return
735 pushop.stepsdone.add('phases')
737 pushop.stepsdone.add('phases')
736 part2node = []
738 part2node = []
737
739
738 def handlefailure(pushop, exc):
740 def handlefailure(pushop, exc):
739 targetid = int(exc.partid)
741 targetid = int(exc.partid)
740 for partid, node in part2node:
742 for partid, node in part2node:
741 if partid == targetid:
743 if partid == targetid:
742 raise error.Abort(_('updating %s to public failed') % node)
744 raise error.Abort(_('updating %s to public failed') % node)
743
745
744 enc = pushkey.encode
746 enc = pushkey.encode
745 for newremotehead in pushop.outdatedphases:
747 for newremotehead in pushop.outdatedphases:
746 part = bundler.newpart('pushkey')
748 part = bundler.newpart('pushkey')
747 part.addparam('namespace', enc('phases'))
749 part.addparam('namespace', enc('phases'))
748 part.addparam('key', enc(newremotehead.hex()))
750 part.addparam('key', enc(newremotehead.hex()))
749 part.addparam('old', enc(str(phases.draft)))
751 part.addparam('old', enc(str(phases.draft)))
750 part.addparam('new', enc(str(phases.public)))
752 part.addparam('new', enc(str(phases.public)))
751 part2node.append((part.id, newremotehead))
753 part2node.append((part.id, newremotehead))
752 pushop.pkfailcb[part.id] = handlefailure
754 pushop.pkfailcb[part.id] = handlefailure
753
755
754 def handlereply(op):
756 def handlereply(op):
755 for partid, node in part2node:
757 for partid, node in part2node:
756 partrep = op.records.getreplies(partid)
758 partrep = op.records.getreplies(partid)
757 results = partrep['pushkey']
759 results = partrep['pushkey']
758 assert len(results) <= 1
760 assert len(results) <= 1
759 msg = None
761 msg = None
760 if not results:
762 if not results:
761 msg = _('server ignored update of %s to public!\n') % node
763 msg = _('server ignored update of %s to public!\n') % node
762 elif not int(results[0]['return']):
764 elif not int(results[0]['return']):
763 msg = _('updating %s to public failed!\n') % node
765 msg = _('updating %s to public failed!\n') % node
764 if msg is not None:
766 if msg is not None:
765 pushop.ui.warn(msg)
767 pushop.ui.warn(msg)
766 return handlereply
768 return handlereply
767
769
768 @b2partsgenerator('obsmarkers')
770 @b2partsgenerator('obsmarkers')
769 def _pushb2obsmarkers(pushop, bundler):
771 def _pushb2obsmarkers(pushop, bundler):
770 if 'obsmarkers' in pushop.stepsdone:
772 if 'obsmarkers' in pushop.stepsdone:
771 return
773 return
772 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
774 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
773 if obsolete.commonversion(remoteversions) is None:
775 if obsolete.commonversion(remoteversions) is None:
774 return
776 return
775 pushop.stepsdone.add('obsmarkers')
777 pushop.stepsdone.add('obsmarkers')
776 if pushop.outobsmarkers:
778 if pushop.outobsmarkers:
777 markers = sorted(pushop.outobsmarkers)
779 markers = sorted(pushop.outobsmarkers)
778 buildobsmarkerspart(bundler, markers)
780 buildobsmarkerspart(bundler, markers)
779
781
780 @b2partsgenerator('bookmarks')
782 @b2partsgenerator('bookmarks')
781 def _pushb2bookmarks(pushop, bundler):
783 def _pushb2bookmarks(pushop, bundler):
782 """handle bookmark push through bundle2"""
784 """handle bookmark push through bundle2"""
783 if 'bookmarks' in pushop.stepsdone:
785 if 'bookmarks' in pushop.stepsdone:
784 return
786 return
785 b2caps = bundle2.bundle2caps(pushop.remote)
787 b2caps = bundle2.bundle2caps(pushop.remote)
786 if 'pushkey' not in b2caps:
788 if 'pushkey' not in b2caps:
787 return
789 return
788 pushop.stepsdone.add('bookmarks')
790 pushop.stepsdone.add('bookmarks')
789 part2book = []
791 part2book = []
790 enc = pushkey.encode
792 enc = pushkey.encode
791
793
792 def handlefailure(pushop, exc):
794 def handlefailure(pushop, exc):
793 targetid = int(exc.partid)
795 targetid = int(exc.partid)
794 for partid, book, action in part2book:
796 for partid, book, action in part2book:
795 if partid == targetid:
797 if partid == targetid:
796 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
798 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
797 # we should not be called for part we did not generated
799 # we should not be called for part we did not generated
798 assert False
800 assert False
799
801
800 for book, old, new in pushop.outbookmarks:
802 for book, old, new in pushop.outbookmarks:
801 part = bundler.newpart('pushkey')
803 part = bundler.newpart('pushkey')
802 part.addparam('namespace', enc('bookmarks'))
804 part.addparam('namespace', enc('bookmarks'))
803 part.addparam('key', enc(book))
805 part.addparam('key', enc(book))
804 part.addparam('old', enc(old))
806 part.addparam('old', enc(old))
805 part.addparam('new', enc(new))
807 part.addparam('new', enc(new))
806 action = 'update'
808 action = 'update'
807 if not old:
809 if not old:
808 action = 'export'
810 action = 'export'
809 elif not new:
811 elif not new:
810 action = 'delete'
812 action = 'delete'
811 part2book.append((part.id, book, action))
813 part2book.append((part.id, book, action))
812 pushop.pkfailcb[part.id] = handlefailure
814 pushop.pkfailcb[part.id] = handlefailure
813
815
814 def handlereply(op):
816 def handlereply(op):
815 ui = pushop.ui
817 ui = pushop.ui
816 for partid, book, action in part2book:
818 for partid, book, action in part2book:
817 partrep = op.records.getreplies(partid)
819 partrep = op.records.getreplies(partid)
818 results = partrep['pushkey']
820 results = partrep['pushkey']
819 assert len(results) <= 1
821 assert len(results) <= 1
820 if not results:
822 if not results:
821 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
823 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
822 else:
824 else:
823 ret = int(results[0]['return'])
825 ret = int(results[0]['return'])
824 if ret:
826 if ret:
825 ui.status(bookmsgmap[action][0] % book)
827 ui.status(bookmsgmap[action][0] % book)
826 else:
828 else:
827 ui.warn(bookmsgmap[action][1] % book)
829 ui.warn(bookmsgmap[action][1] % book)
828 if pushop.bkresult is not None:
830 if pushop.bkresult is not None:
829 pushop.bkresult = 1
831 pushop.bkresult = 1
830 return handlereply
832 return handlereply
831
833
832
834
833 def _pushbundle2(pushop):
835 def _pushbundle2(pushop):
834 """push data to the remote using bundle2
836 """push data to the remote using bundle2
835
837
836 The only currently supported type of data is changegroup but this will
838 The only currently supported type of data is changegroup but this will
837 evolve in the future."""
839 evolve in the future."""
838 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
840 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
839 pushback = (pushop.trmanager
841 pushback = (pushop.trmanager
840 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
842 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
841
843
842 # create reply capability
844 # create reply capability
843 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
845 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
844 allowpushback=pushback))
846 allowpushback=pushback))
845 bundler.newpart('replycaps', data=capsblob)
847 bundler.newpart('replycaps', data=capsblob)
846 replyhandlers = []
848 replyhandlers = []
847 for partgenname in b2partsgenorder:
849 for partgenname in b2partsgenorder:
848 partgen = b2partsgenmapping[partgenname]
850 partgen = b2partsgenmapping[partgenname]
849 ret = partgen(pushop, bundler)
851 ret = partgen(pushop, bundler)
850 if callable(ret):
852 if callable(ret):
851 replyhandlers.append(ret)
853 replyhandlers.append(ret)
852 # do not push if nothing to push
854 # do not push if nothing to push
853 if bundler.nbparts <= 1:
855 if bundler.nbparts <= 1:
854 return
856 return
855 stream = util.chunkbuffer(bundler.getchunks())
857 stream = util.chunkbuffer(bundler.getchunks())
856 try:
858 try:
857 try:
859 try:
858 reply = pushop.remote.unbundle(stream, ['force'], 'push')
860 reply = pushop.remote.unbundle(stream, ['force'], 'push')
859 except error.BundleValueError as exc:
861 except error.BundleValueError as exc:
860 raise error.Abort('missing support for %s' % exc)
862 raise error.Abort('missing support for %s' % exc)
861 try:
863 try:
862 trgetter = None
864 trgetter = None
863 if pushback:
865 if pushback:
864 trgetter = pushop.trmanager.transaction
866 trgetter = pushop.trmanager.transaction
865 op = bundle2.processbundle(pushop.repo, reply, trgetter)
867 op = bundle2.processbundle(pushop.repo, reply, trgetter)
866 except error.BundleValueError as exc:
868 except error.BundleValueError as exc:
867 raise error.Abort('missing support for %s' % exc)
869 raise error.Abort('missing support for %s' % exc)
868 except bundle2.AbortFromPart as exc:
870 except bundle2.AbortFromPart as exc:
869 pushop.ui.status(_('remote: %s\n') % exc)
871 pushop.ui.status(_('remote: %s\n') % exc)
870 raise error.Abort(_('push failed on remote'), hint=exc.hint)
872 raise error.Abort(_('push failed on remote'), hint=exc.hint)
871 except error.PushkeyFailed as exc:
873 except error.PushkeyFailed as exc:
872 partid = int(exc.partid)
874 partid = int(exc.partid)
873 if partid not in pushop.pkfailcb:
875 if partid not in pushop.pkfailcb:
874 raise
876 raise
875 pushop.pkfailcb[partid](pushop, exc)
877 pushop.pkfailcb[partid](pushop, exc)
876 for rephand in replyhandlers:
878 for rephand in replyhandlers:
877 rephand(op)
879 rephand(op)
878
880
879 def _pushchangeset(pushop):
881 def _pushchangeset(pushop):
880 """Make the actual push of changeset bundle to remote repo"""
882 """Make the actual push of changeset bundle to remote repo"""
881 if 'changesets' in pushop.stepsdone:
883 if 'changesets' in pushop.stepsdone:
882 return
884 return
883 pushop.stepsdone.add('changesets')
885 pushop.stepsdone.add('changesets')
884 if not _pushcheckoutgoing(pushop):
886 if not _pushcheckoutgoing(pushop):
885 return
887 return
886 pushop.repo.prepushoutgoinghooks(pushop.repo,
888 pushop.repo.prepushoutgoinghooks(pushop.repo,
887 pushop.remote,
889 pushop.remote,
888 pushop.outgoing)
890 pushop.outgoing)
889 outgoing = pushop.outgoing
891 outgoing = pushop.outgoing
890 unbundle = pushop.remote.capable('unbundle')
892 unbundle = pushop.remote.capable('unbundle')
891 # TODO: get bundlecaps from remote
893 # TODO: get bundlecaps from remote
892 bundlecaps = None
894 bundlecaps = None
893 # create a changegroup from local
895 # create a changegroup from local
894 if pushop.revs is None and not (outgoing.excluded
896 if pushop.revs is None and not (outgoing.excluded
895 or pushop.repo.changelog.filteredrevs):
897 or pushop.repo.changelog.filteredrevs):
896 # push everything,
898 # push everything,
897 # use the fast path, no race possible on push
899 # use the fast path, no race possible on push
898 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
900 bundler = changegroup.cg1packer(pushop.repo, bundlecaps)
899 cg = changegroup.getsubset(pushop.repo,
901 cg = changegroup.getsubset(pushop.repo,
900 outgoing,
902 outgoing,
901 bundler,
903 bundler,
902 'push',
904 'push',
903 fastpath=True)
905 fastpath=True)
904 else:
906 else:
905 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
907 cg = changegroup.getlocalchangegroup(pushop.repo, 'push', outgoing,
906 bundlecaps)
908 bundlecaps)
907
909
908 # apply changegroup to remote
910 # apply changegroup to remote
909 if unbundle:
911 if unbundle:
910 # local repo finds heads on server, finds out what
912 # local repo finds heads on server, finds out what
911 # revs it must push. once revs transferred, if server
913 # revs it must push. once revs transferred, if server
912 # finds it has different heads (someone else won
914 # finds it has different heads (someone else won
913 # commit/push race), server aborts.
915 # commit/push race), server aborts.
914 if pushop.force:
916 if pushop.force:
915 remoteheads = ['force']
917 remoteheads = ['force']
916 else:
918 else:
917 remoteheads = pushop.remoteheads
919 remoteheads = pushop.remoteheads
918 # ssh: return remote's addchangegroup()
920 # ssh: return remote's addchangegroup()
919 # http: return remote's addchangegroup() or 0 for error
921 # http: return remote's addchangegroup() or 0 for error
920 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
922 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
921 pushop.repo.url())
923 pushop.repo.url())
922 else:
924 else:
923 # we return an integer indicating remote head count
925 # we return an integer indicating remote head count
924 # change
926 # change
925 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
927 pushop.cgresult = pushop.remote.addchangegroup(cg, 'push',
926 pushop.repo.url())
928 pushop.repo.url())
927
929
928 def _pushsyncphase(pushop):
930 def _pushsyncphase(pushop):
929 """synchronise phase information locally and remotely"""
931 """synchronise phase information locally and remotely"""
930 cheads = pushop.commonheads
932 cheads = pushop.commonheads
931 # even when we don't push, exchanging phase data is useful
933 # even when we don't push, exchanging phase data is useful
932 remotephases = pushop.remote.listkeys('phases')
934 remotephases = pushop.remote.listkeys('phases')
933 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
935 if (pushop.ui.configbool('ui', '_usedassubrepo', False)
934 and remotephases # server supports phases
936 and remotephases # server supports phases
935 and pushop.cgresult is None # nothing was pushed
937 and pushop.cgresult is None # nothing was pushed
936 and remotephases.get('publishing', False)):
938 and remotephases.get('publishing', False)):
937 # When:
939 # When:
938 # - this is a subrepo push
940 # - this is a subrepo push
939 # - and remote support phase
941 # - and remote support phase
940 # - and no changeset was pushed
942 # - and no changeset was pushed
941 # - and remote is publishing
943 # - and remote is publishing
942 # We may be in issue 3871 case!
944 # We may be in issue 3871 case!
943 # We drop the possible phase synchronisation done by
945 # We drop the possible phase synchronisation done by
944 # courtesy to publish changesets possibly locally draft
946 # courtesy to publish changesets possibly locally draft
945 # on the remote.
947 # on the remote.
946 remotephases = {'publishing': 'True'}
948 remotephases = {'publishing': 'True'}
947 if not remotephases: # old server or public only reply from non-publishing
949 if not remotephases: # old server or public only reply from non-publishing
948 _localphasemove(pushop, cheads)
950 _localphasemove(pushop, cheads)
949 # don't push any phase data as there is nothing to push
951 # don't push any phase data as there is nothing to push
950 else:
952 else:
951 ana = phases.analyzeremotephases(pushop.repo, cheads,
953 ana = phases.analyzeremotephases(pushop.repo, cheads,
952 remotephases)
954 remotephases)
953 pheads, droots = ana
955 pheads, droots = ana
954 ### Apply remote phase on local
956 ### Apply remote phase on local
955 if remotephases.get('publishing', False):
957 if remotephases.get('publishing', False):
956 _localphasemove(pushop, cheads)
958 _localphasemove(pushop, cheads)
957 else: # publish = False
959 else: # publish = False
958 _localphasemove(pushop, pheads)
960 _localphasemove(pushop, pheads)
959 _localphasemove(pushop, cheads, phases.draft)
961 _localphasemove(pushop, cheads, phases.draft)
960 ### Apply local phase on remote
962 ### Apply local phase on remote
961
963
962 if pushop.cgresult:
964 if pushop.cgresult:
963 if 'phases' in pushop.stepsdone:
965 if 'phases' in pushop.stepsdone:
964 # phases already pushed though bundle2
966 # phases already pushed though bundle2
965 return
967 return
966 outdated = pushop.outdatedphases
968 outdated = pushop.outdatedphases
967 else:
969 else:
968 outdated = pushop.fallbackoutdatedphases
970 outdated = pushop.fallbackoutdatedphases
969
971
970 pushop.stepsdone.add('phases')
972 pushop.stepsdone.add('phases')
971
973
972 # filter heads already turned public by the push
974 # filter heads already turned public by the push
973 outdated = [c for c in outdated if c.node() not in pheads]
975 outdated = [c for c in outdated if c.node() not in pheads]
974 # fallback to independent pushkey command
976 # fallback to independent pushkey command
975 for newremotehead in outdated:
977 for newremotehead in outdated:
976 r = pushop.remote.pushkey('phases',
978 r = pushop.remote.pushkey('phases',
977 newremotehead.hex(),
979 newremotehead.hex(),
978 str(phases.draft),
980 str(phases.draft),
979 str(phases.public))
981 str(phases.public))
980 if not r:
982 if not r:
981 pushop.ui.warn(_('updating %s to public failed!\n')
983 pushop.ui.warn(_('updating %s to public failed!\n')
982 % newremotehead)
984 % newremotehead)
983
985
984 def _localphasemove(pushop, nodes, phase=phases.public):
986 def _localphasemove(pushop, nodes, phase=phases.public):
985 """move <nodes> to <phase> in the local source repo"""
987 """move <nodes> to <phase> in the local source repo"""
986 if pushop.trmanager:
988 if pushop.trmanager:
987 phases.advanceboundary(pushop.repo,
989 phases.advanceboundary(pushop.repo,
988 pushop.trmanager.transaction(),
990 pushop.trmanager.transaction(),
989 phase,
991 phase,
990 nodes)
992 nodes)
991 else:
993 else:
992 # repo is not locked, do not change any phases!
994 # repo is not locked, do not change any phases!
993 # Informs the user that phases should have been moved when
995 # Informs the user that phases should have been moved when
994 # applicable.
996 # applicable.
995 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
997 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
996 phasestr = phases.phasenames[phase]
998 phasestr = phases.phasenames[phase]
997 if actualmoves:
999 if actualmoves:
998 pushop.ui.status(_('cannot lock source repo, skipping '
1000 pushop.ui.status(_('cannot lock source repo, skipping '
999 'local %s phase update\n') % phasestr)
1001 'local %s phase update\n') % phasestr)
1000
1002
1001 def _pushobsolete(pushop):
1003 def _pushobsolete(pushop):
1002 """utility function to push obsolete markers to a remote"""
1004 """utility function to push obsolete markers to a remote"""
1003 if 'obsmarkers' in pushop.stepsdone:
1005 if 'obsmarkers' in pushop.stepsdone:
1004 return
1006 return
1005 repo = pushop.repo
1007 repo = pushop.repo
1006 remote = pushop.remote
1008 remote = pushop.remote
1007 pushop.stepsdone.add('obsmarkers')
1009 pushop.stepsdone.add('obsmarkers')
1008 if pushop.outobsmarkers:
1010 if pushop.outobsmarkers:
1009 pushop.ui.debug('try to push obsolete markers to remote\n')
1011 pushop.ui.debug('try to push obsolete markers to remote\n')
1010 rslts = []
1012 rslts = []
1011 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1013 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1012 for key in sorted(remotedata, reverse=True):
1014 for key in sorted(remotedata, reverse=True):
1013 # reverse sort to ensure we end with dump0
1015 # reverse sort to ensure we end with dump0
1014 data = remotedata[key]
1016 data = remotedata[key]
1015 rslts.append(remote.pushkey('obsolete', key, '', data))
1017 rslts.append(remote.pushkey('obsolete', key, '', data))
1016 if [r for r in rslts if not r]:
1018 if [r for r in rslts if not r]:
1017 msg = _('failed to push some obsolete markers!\n')
1019 msg = _('failed to push some obsolete markers!\n')
1018 repo.ui.warn(msg)
1020 repo.ui.warn(msg)
1019
1021
1020 def _pushbookmark(pushop):
1022 def _pushbookmark(pushop):
1021 """Update bookmark position on remote"""
1023 """Update bookmark position on remote"""
1022 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1024 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1023 return
1025 return
1024 pushop.stepsdone.add('bookmarks')
1026 pushop.stepsdone.add('bookmarks')
1025 ui = pushop.ui
1027 ui = pushop.ui
1026 remote = pushop.remote
1028 remote = pushop.remote
1027
1029
1028 for b, old, new in pushop.outbookmarks:
1030 for b, old, new in pushop.outbookmarks:
1029 action = 'update'
1031 action = 'update'
1030 if not old:
1032 if not old:
1031 action = 'export'
1033 action = 'export'
1032 elif not new:
1034 elif not new:
1033 action = 'delete'
1035 action = 'delete'
1034 if remote.pushkey('bookmarks', b, old, new):
1036 if remote.pushkey('bookmarks', b, old, new):
1035 ui.status(bookmsgmap[action][0] % b)
1037 ui.status(bookmsgmap[action][0] % b)
1036 else:
1038 else:
1037 ui.warn(bookmsgmap[action][1] % b)
1039 ui.warn(bookmsgmap[action][1] % b)
1038 # discovery can have set the value form invalid entry
1040 # discovery can have set the value form invalid entry
1039 if pushop.bkresult is not None:
1041 if pushop.bkresult is not None:
1040 pushop.bkresult = 1
1042 pushop.bkresult = 1
1041
1043
1042 class pulloperation(object):
1044 class pulloperation(object):
1043 """A object that represent a single pull operation
1045 """A object that represent a single pull operation
1044
1046
1045 It purpose is to carry pull related state and very common operation.
1047 It purpose is to carry pull related state and very common operation.
1046
1048
1047 A new should be created at the beginning of each pull and discarded
1049 A new should be created at the beginning of each pull and discarded
1048 afterward.
1050 afterward.
1049 """
1051 """
1050
1052
1051 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1053 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1052 remotebookmarks=None, streamclonerequested=None):
1054 remotebookmarks=None, streamclonerequested=None):
1053 # repo we pull into
1055 # repo we pull into
1054 self.repo = repo
1056 self.repo = repo
1055 # repo we pull from
1057 # repo we pull from
1056 self.remote = remote
1058 self.remote = remote
1057 # revision we try to pull (None is "all")
1059 # revision we try to pull (None is "all")
1058 self.heads = heads
1060 self.heads = heads
1059 # bookmark pulled explicitly
1061 # bookmark pulled explicitly
1060 self.explicitbookmarks = bookmarks
1062 self.explicitbookmarks = bookmarks
1061 # do we force pull?
1063 # do we force pull?
1062 self.force = force
1064 self.force = force
1063 # whether a streaming clone was requested
1065 # whether a streaming clone was requested
1064 self.streamclonerequested = streamclonerequested
1066 self.streamclonerequested = streamclonerequested
1065 # transaction manager
1067 # transaction manager
1066 self.trmanager = None
1068 self.trmanager = None
1067 # set of common changeset between local and remote before pull
1069 # set of common changeset between local and remote before pull
1068 self.common = None
1070 self.common = None
1069 # set of pulled head
1071 # set of pulled head
1070 self.rheads = None
1072 self.rheads = None
1071 # list of missing changeset to fetch remotely
1073 # list of missing changeset to fetch remotely
1072 self.fetch = None
1074 self.fetch = None
1073 # remote bookmarks data
1075 # remote bookmarks data
1074 self.remotebookmarks = remotebookmarks
1076 self.remotebookmarks = remotebookmarks
1075 # result of changegroup pulling (used as return code by pull)
1077 # result of changegroup pulling (used as return code by pull)
1076 self.cgresult = None
1078 self.cgresult = None
1077 # list of step already done
1079 # list of step already done
1078 self.stepsdone = set()
1080 self.stepsdone = set()
1079 # Whether we attempted a clone from pre-generated bundles.
1081 # Whether we attempted a clone from pre-generated bundles.
1080 self.clonebundleattempted = False
1082 self.clonebundleattempted = False
1081
1083
1082 @util.propertycache
1084 @util.propertycache
1083 def pulledsubset(self):
1085 def pulledsubset(self):
1084 """heads of the set of changeset target by the pull"""
1086 """heads of the set of changeset target by the pull"""
1085 # compute target subset
1087 # compute target subset
1086 if self.heads is None:
1088 if self.heads is None:
1087 # We pulled every thing possible
1089 # We pulled every thing possible
1088 # sync on everything common
1090 # sync on everything common
1089 c = set(self.common)
1091 c = set(self.common)
1090 ret = list(self.common)
1092 ret = list(self.common)
1091 for n in self.rheads:
1093 for n in self.rheads:
1092 if n not in c:
1094 if n not in c:
1093 ret.append(n)
1095 ret.append(n)
1094 return ret
1096 return ret
1095 else:
1097 else:
1096 # We pulled a specific subset
1098 # We pulled a specific subset
1097 # sync on this subset
1099 # sync on this subset
1098 return self.heads
1100 return self.heads
1099
1101
1100 @util.propertycache
1102 @util.propertycache
1101 def canusebundle2(self):
1103 def canusebundle2(self):
1102 return _canusebundle2(self)
1104 return _canusebundle2(self)
1103
1105
1104 @util.propertycache
1106 @util.propertycache
1105 def remotebundle2caps(self):
1107 def remotebundle2caps(self):
1106 return bundle2.bundle2caps(self.remote)
1108 return bundle2.bundle2caps(self.remote)
1107
1109
1108 def gettransaction(self):
1110 def gettransaction(self):
1109 # deprecated; talk to trmanager directly
1111 # deprecated; talk to trmanager directly
1110 return self.trmanager.transaction()
1112 return self.trmanager.transaction()
1111
1113
1112 class transactionmanager(object):
1114 class transactionmanager(object):
1113 """An object to manage the life cycle of a transaction
1115 """An object to manage the life cycle of a transaction
1114
1116
1115 It creates the transaction on demand and calls the appropriate hooks when
1117 It creates the transaction on demand and calls the appropriate hooks when
1116 closing the transaction."""
1118 closing the transaction."""
1117 def __init__(self, repo, source, url):
1119 def __init__(self, repo, source, url):
1118 self.repo = repo
1120 self.repo = repo
1119 self.source = source
1121 self.source = source
1120 self.url = url
1122 self.url = url
1121 self._tr = None
1123 self._tr = None
1122
1124
1123 def transaction(self):
1125 def transaction(self):
1124 """Return an open transaction object, constructing if necessary"""
1126 """Return an open transaction object, constructing if necessary"""
1125 if not self._tr:
1127 if not self._tr:
1126 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1128 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1127 self._tr = self.repo.transaction(trname)
1129 self._tr = self.repo.transaction(trname)
1128 self._tr.hookargs['source'] = self.source
1130 self._tr.hookargs['source'] = self.source
1129 self._tr.hookargs['url'] = self.url
1131 self._tr.hookargs['url'] = self.url
1130 return self._tr
1132 return self._tr
1131
1133
1132 def close(self):
1134 def close(self):
1133 """close transaction if created"""
1135 """close transaction if created"""
1134 if self._tr is not None:
1136 if self._tr is not None:
1135 self._tr.close()
1137 self._tr.close()
1136
1138
1137 def release(self):
1139 def release(self):
1138 """release transaction if created"""
1140 """release transaction if created"""
1139 if self._tr is not None:
1141 if self._tr is not None:
1140 self._tr.release()
1142 self._tr.release()
1141
1143
1142 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1144 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1143 streamclonerequested=None):
1145 streamclonerequested=None):
1144 """Fetch repository data from a remote.
1146 """Fetch repository data from a remote.
1145
1147
1146 This is the main function used to retrieve data from a remote repository.
1148 This is the main function used to retrieve data from a remote repository.
1147
1149
1148 ``repo`` is the local repository to clone into.
1150 ``repo`` is the local repository to clone into.
1149 ``remote`` is a peer instance.
1151 ``remote`` is a peer instance.
1150 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1152 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1151 default) means to pull everything from the remote.
1153 default) means to pull everything from the remote.
1152 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1154 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1153 default, all remote bookmarks are pulled.
1155 default, all remote bookmarks are pulled.
1154 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1156 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1155 initialization.
1157 initialization.
1156 ``streamclonerequested`` is a boolean indicating whether a "streaming
1158 ``streamclonerequested`` is a boolean indicating whether a "streaming
1157 clone" is requested. A "streaming clone" is essentially a raw file copy
1159 clone" is requested. A "streaming clone" is essentially a raw file copy
1158 of revlogs from the server. This only works when the local repository is
1160 of revlogs from the server. This only works when the local repository is
1159 empty. The default value of ``None`` means to respect the server
1161 empty. The default value of ``None`` means to respect the server
1160 configuration for preferring stream clones.
1162 configuration for preferring stream clones.
1161
1163
1162 Returns the ``pulloperation`` created for this pull.
1164 Returns the ``pulloperation`` created for this pull.
1163 """
1165 """
1164 if opargs is None:
1166 if opargs is None:
1165 opargs = {}
1167 opargs = {}
1166 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1168 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1167 streamclonerequested=streamclonerequested, **opargs)
1169 streamclonerequested=streamclonerequested, **opargs)
1168 if pullop.remote.local():
1170 if pullop.remote.local():
1169 missing = set(pullop.remote.requirements) - pullop.repo.supported
1171 missing = set(pullop.remote.requirements) - pullop.repo.supported
1170 if missing:
1172 if missing:
1171 msg = _("required features are not"
1173 msg = _("required features are not"
1172 " supported in the destination:"
1174 " supported in the destination:"
1173 " %s") % (', '.join(sorted(missing)))
1175 " %s") % (', '.join(sorted(missing)))
1174 raise error.Abort(msg)
1176 raise error.Abort(msg)
1175
1177
1176 lock = pullop.repo.lock()
1178 lock = pullop.repo.lock()
1177 try:
1179 try:
1178 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1180 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1179 streamclone.maybeperformlegacystreamclone(pullop)
1181 streamclone.maybeperformlegacystreamclone(pullop)
1180 # This should ideally be in _pullbundle2(). However, it needs to run
1182 # This should ideally be in _pullbundle2(). However, it needs to run
1181 # before discovery to avoid extra work.
1183 # before discovery to avoid extra work.
1182 _maybeapplyclonebundle(pullop)
1184 _maybeapplyclonebundle(pullop)
1183 _pulldiscovery(pullop)
1185 _pulldiscovery(pullop)
1184 if pullop.canusebundle2:
1186 if pullop.canusebundle2:
1185 _pullbundle2(pullop)
1187 _pullbundle2(pullop)
1186 _pullchangeset(pullop)
1188 _pullchangeset(pullop)
1187 _pullphase(pullop)
1189 _pullphase(pullop)
1188 _pullbookmarks(pullop)
1190 _pullbookmarks(pullop)
1189 _pullobsolete(pullop)
1191 _pullobsolete(pullop)
1190 pullop.trmanager.close()
1192 pullop.trmanager.close()
1191 finally:
1193 finally:
1192 pullop.trmanager.release()
1194 pullop.trmanager.release()
1193 lock.release()
1195 lock.release()
1194
1196
1195 return pullop
1197 return pullop
1196
1198
1197 # list of steps to perform discovery before pull
1199 # list of steps to perform discovery before pull
1198 pulldiscoveryorder = []
1200 pulldiscoveryorder = []
1199
1201
1200 # Mapping between step name and function
1202 # Mapping between step name and function
1201 #
1203 #
1202 # This exists to help extensions wrap steps if necessary
1204 # This exists to help extensions wrap steps if necessary
1203 pulldiscoverymapping = {}
1205 pulldiscoverymapping = {}
1204
1206
1205 def pulldiscovery(stepname):
1207 def pulldiscovery(stepname):
1206 """decorator for function performing discovery before pull
1208 """decorator for function performing discovery before pull
1207
1209
1208 The function is added to the step -> function mapping and appended to the
1210 The function is added to the step -> function mapping and appended to the
1209 list of steps. Beware that decorated function will be added in order (this
1211 list of steps. Beware that decorated function will be added in order (this
1210 may matter).
1212 may matter).
1211
1213
1212 You can only use this decorator for a new step, if you want to wrap a step
1214 You can only use this decorator for a new step, if you want to wrap a step
1213 from an extension, change the pulldiscovery dictionary directly."""
1215 from an extension, change the pulldiscovery dictionary directly."""
1214 def dec(func):
1216 def dec(func):
1215 assert stepname not in pulldiscoverymapping
1217 assert stepname not in pulldiscoverymapping
1216 pulldiscoverymapping[stepname] = func
1218 pulldiscoverymapping[stepname] = func
1217 pulldiscoveryorder.append(stepname)
1219 pulldiscoveryorder.append(stepname)
1218 return func
1220 return func
1219 return dec
1221 return dec
1220
1222
1221 def _pulldiscovery(pullop):
1223 def _pulldiscovery(pullop):
1222 """Run all discovery steps"""
1224 """Run all discovery steps"""
1223 for stepname in pulldiscoveryorder:
1225 for stepname in pulldiscoveryorder:
1224 step = pulldiscoverymapping[stepname]
1226 step = pulldiscoverymapping[stepname]
1225 step(pullop)
1227 step(pullop)
1226
1228
1227 @pulldiscovery('b1:bookmarks')
1229 @pulldiscovery('b1:bookmarks')
1228 def _pullbookmarkbundle1(pullop):
1230 def _pullbookmarkbundle1(pullop):
1229 """fetch bookmark data in bundle1 case
1231 """fetch bookmark data in bundle1 case
1230
1232
1231 If not using bundle2, we have to fetch bookmarks before changeset
1233 If not using bundle2, we have to fetch bookmarks before changeset
1232 discovery to reduce the chance and impact of race conditions."""
1234 discovery to reduce the chance and impact of race conditions."""
1233 if pullop.remotebookmarks is not None:
1235 if pullop.remotebookmarks is not None:
1234 return
1236 return
1235 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1237 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1236 # all known bundle2 servers now support listkeys, but lets be nice with
1238 # all known bundle2 servers now support listkeys, but lets be nice with
1237 # new implementation.
1239 # new implementation.
1238 return
1240 return
1239 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1241 pullop.remotebookmarks = pullop.remote.listkeys('bookmarks')
1240
1242
1241
1243
1242 @pulldiscovery('changegroup')
1244 @pulldiscovery('changegroup')
1243 def _pulldiscoverychangegroup(pullop):
1245 def _pulldiscoverychangegroup(pullop):
1244 """discovery phase for the pull
1246 """discovery phase for the pull
1245
1247
1246 Current handle changeset discovery only, will change handle all discovery
1248 Current handle changeset discovery only, will change handle all discovery
1247 at some point."""
1249 at some point."""
1248 tmp = discovery.findcommonincoming(pullop.repo,
1250 tmp = discovery.findcommonincoming(pullop.repo,
1249 pullop.remote,
1251 pullop.remote,
1250 heads=pullop.heads,
1252 heads=pullop.heads,
1251 force=pullop.force)
1253 force=pullop.force)
1252 common, fetch, rheads = tmp
1254 common, fetch, rheads = tmp
1253 nm = pullop.repo.unfiltered().changelog.nodemap
1255 nm = pullop.repo.unfiltered().changelog.nodemap
1254 if fetch and rheads:
1256 if fetch and rheads:
1255 # If a remote heads in filtered locally, lets drop it from the unknown
1257 # If a remote heads in filtered locally, lets drop it from the unknown
1256 # remote heads and put in back in common.
1258 # remote heads and put in back in common.
1257 #
1259 #
1258 # This is a hackish solution to catch most of "common but locally
1260 # This is a hackish solution to catch most of "common but locally
1259 # hidden situation". We do not performs discovery on unfiltered
1261 # hidden situation". We do not performs discovery on unfiltered
1260 # repository because it end up doing a pathological amount of round
1262 # repository because it end up doing a pathological amount of round
1261 # trip for w huge amount of changeset we do not care about.
1263 # trip for w huge amount of changeset we do not care about.
1262 #
1264 #
1263 # If a set of such "common but filtered" changeset exist on the server
1265 # If a set of such "common but filtered" changeset exist on the server
1264 # but are not including a remote heads, we'll not be able to detect it,
1266 # but are not including a remote heads, we'll not be able to detect it,
1265 scommon = set(common)
1267 scommon = set(common)
1266 filteredrheads = []
1268 filteredrheads = []
1267 for n in rheads:
1269 for n in rheads:
1268 if n in nm:
1270 if n in nm:
1269 if n not in scommon:
1271 if n not in scommon:
1270 common.append(n)
1272 common.append(n)
1271 else:
1273 else:
1272 filteredrheads.append(n)
1274 filteredrheads.append(n)
1273 if not filteredrheads:
1275 if not filteredrheads:
1274 fetch = []
1276 fetch = []
1275 rheads = filteredrheads
1277 rheads = filteredrheads
1276 pullop.common = common
1278 pullop.common = common
1277 pullop.fetch = fetch
1279 pullop.fetch = fetch
1278 pullop.rheads = rheads
1280 pullop.rheads = rheads
1279
1281
1280 def _pullbundle2(pullop):
1282 def _pullbundle2(pullop):
1281 """pull data using bundle2
1283 """pull data using bundle2
1282
1284
1283 For now, the only supported data are changegroup."""
1285 For now, the only supported data are changegroup."""
1284 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1286 kwargs = {'bundlecaps': caps20to10(pullop.repo)}
1285
1287
1286 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1288 streaming, streamreqs = streamclone.canperformstreamclone(pullop)
1287
1289
1288 # pulling changegroup
1290 # pulling changegroup
1289 pullop.stepsdone.add('changegroup')
1291 pullop.stepsdone.add('changegroup')
1290
1292
1291 kwargs['common'] = pullop.common
1293 kwargs['common'] = pullop.common
1292 kwargs['heads'] = pullop.heads or pullop.rheads
1294 kwargs['heads'] = pullop.heads or pullop.rheads
1293 kwargs['cg'] = pullop.fetch
1295 kwargs['cg'] = pullop.fetch
1294 if 'listkeys' in pullop.remotebundle2caps:
1296 if 'listkeys' in pullop.remotebundle2caps:
1295 kwargs['listkeys'] = ['phase']
1297 kwargs['listkeys'] = ['phase']
1296 if pullop.remotebookmarks is None:
1298 if pullop.remotebookmarks is None:
1297 # make sure to always includes bookmark data when migrating
1299 # make sure to always includes bookmark data when migrating
1298 # `hg incoming --bundle` to using this function.
1300 # `hg incoming --bundle` to using this function.
1299 kwargs['listkeys'].append('bookmarks')
1301 kwargs['listkeys'].append('bookmarks')
1300
1302
1301 # If this is a full pull / clone and the server supports the clone bundles
1303 # If this is a full pull / clone and the server supports the clone bundles
1302 # feature, tell the server whether we attempted a clone bundle. The
1304 # feature, tell the server whether we attempted a clone bundle. The
1303 # presence of this flag indicates the client supports clone bundles. This
1305 # presence of this flag indicates the client supports clone bundles. This
1304 # will enable the server to treat clients that support clone bundles
1306 # will enable the server to treat clients that support clone bundles
1305 # differently from those that don't.
1307 # differently from those that don't.
1306 if (pullop.remote.capable('clonebundles')
1308 if (pullop.remote.capable('clonebundles')
1307 and pullop.heads is None and list(pullop.common) == [nullid]):
1309 and pullop.heads is None and list(pullop.common) == [nullid]):
1308 kwargs['cbattempted'] = pullop.clonebundleattempted
1310 kwargs['cbattempted'] = pullop.clonebundleattempted
1309
1311
1310 if streaming:
1312 if streaming:
1311 pullop.repo.ui.status(_('streaming all changes\n'))
1313 pullop.repo.ui.status(_('streaming all changes\n'))
1312 elif not pullop.fetch:
1314 elif not pullop.fetch:
1313 pullop.repo.ui.status(_("no changes found\n"))
1315 pullop.repo.ui.status(_("no changes found\n"))
1314 pullop.cgresult = 0
1316 pullop.cgresult = 0
1315 else:
1317 else:
1316 if pullop.heads is None and list(pullop.common) == [nullid]:
1318 if pullop.heads is None and list(pullop.common) == [nullid]:
1317 pullop.repo.ui.status(_("requesting all changes\n"))
1319 pullop.repo.ui.status(_("requesting all changes\n"))
1318 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1320 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1319 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1321 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1320 if obsolete.commonversion(remoteversions) is not None:
1322 if obsolete.commonversion(remoteversions) is not None:
1321 kwargs['obsmarkers'] = True
1323 kwargs['obsmarkers'] = True
1322 pullop.stepsdone.add('obsmarkers')
1324 pullop.stepsdone.add('obsmarkers')
1323 _pullbundle2extraprepare(pullop, kwargs)
1325 _pullbundle2extraprepare(pullop, kwargs)
1324 bundle = pullop.remote.getbundle('pull', **kwargs)
1326 bundle = pullop.remote.getbundle('pull', **kwargs)
1325 try:
1327 try:
1326 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1328 op = bundle2.processbundle(pullop.repo, bundle, pullop.gettransaction)
1327 except error.BundleValueError as exc:
1329 except error.BundleValueError as exc:
1328 raise error.Abort('missing support for %s' % exc)
1330 raise error.Abort('missing support for %s' % exc)
1329
1331
1330 if pullop.fetch:
1332 if pullop.fetch:
1331 results = [cg['return'] for cg in op.records['changegroup']]
1333 results = [cg['return'] for cg in op.records['changegroup']]
1332 pullop.cgresult = changegroup.combineresults(results)
1334 pullop.cgresult = changegroup.combineresults(results)
1333
1335
1334 # processing phases change
1336 # processing phases change
1335 for namespace, value in op.records['listkeys']:
1337 for namespace, value in op.records['listkeys']:
1336 if namespace == 'phases':
1338 if namespace == 'phases':
1337 _pullapplyphases(pullop, value)
1339 _pullapplyphases(pullop, value)
1338
1340
1339 # processing bookmark update
1341 # processing bookmark update
1340 for namespace, value in op.records['listkeys']:
1342 for namespace, value in op.records['listkeys']:
1341 if namespace == 'bookmarks':
1343 if namespace == 'bookmarks':
1342 pullop.remotebookmarks = value
1344 pullop.remotebookmarks = value
1343
1345
1344 # bookmark data were either already there or pulled in the bundle
1346 # bookmark data were either already there or pulled in the bundle
1345 if pullop.remotebookmarks is not None:
1347 if pullop.remotebookmarks is not None:
1346 _pullbookmarks(pullop)
1348 _pullbookmarks(pullop)
1347
1349
1348 def _pullbundle2extraprepare(pullop, kwargs):
1350 def _pullbundle2extraprepare(pullop, kwargs):
1349 """hook function so that extensions can extend the getbundle call"""
1351 """hook function so that extensions can extend the getbundle call"""
1350 pass
1352 pass
1351
1353
1352 def _pullchangeset(pullop):
1354 def _pullchangeset(pullop):
1353 """pull changeset from unbundle into the local repo"""
1355 """pull changeset from unbundle into the local repo"""
1354 # We delay the open of the transaction as late as possible so we
1356 # We delay the open of the transaction as late as possible so we
1355 # don't open transaction for nothing or you break future useful
1357 # don't open transaction for nothing or you break future useful
1356 # rollback call
1358 # rollback call
1357 if 'changegroup' in pullop.stepsdone:
1359 if 'changegroup' in pullop.stepsdone:
1358 return
1360 return
1359 pullop.stepsdone.add('changegroup')
1361 pullop.stepsdone.add('changegroup')
1360 if not pullop.fetch:
1362 if not pullop.fetch:
1361 pullop.repo.ui.status(_("no changes found\n"))
1363 pullop.repo.ui.status(_("no changes found\n"))
1362 pullop.cgresult = 0
1364 pullop.cgresult = 0
1363 return
1365 return
1364 pullop.gettransaction()
1366 pullop.gettransaction()
1365 if pullop.heads is None and list(pullop.common) == [nullid]:
1367 if pullop.heads is None and list(pullop.common) == [nullid]:
1366 pullop.repo.ui.status(_("requesting all changes\n"))
1368 pullop.repo.ui.status(_("requesting all changes\n"))
1367 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1369 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1368 # issue1320, avoid a race if remote changed after discovery
1370 # issue1320, avoid a race if remote changed after discovery
1369 pullop.heads = pullop.rheads
1371 pullop.heads = pullop.rheads
1370
1372
1371 if pullop.remote.capable('getbundle'):
1373 if pullop.remote.capable('getbundle'):
1372 # TODO: get bundlecaps from remote
1374 # TODO: get bundlecaps from remote
1373 cg = pullop.remote.getbundle('pull', common=pullop.common,
1375 cg = pullop.remote.getbundle('pull', common=pullop.common,
1374 heads=pullop.heads or pullop.rheads)
1376 heads=pullop.heads or pullop.rheads)
1375 elif pullop.heads is None:
1377 elif pullop.heads is None:
1376 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1378 cg = pullop.remote.changegroup(pullop.fetch, 'pull')
1377 elif not pullop.remote.capable('changegroupsubset'):
1379 elif not pullop.remote.capable('changegroupsubset'):
1378 raise error.Abort(_("partial pull cannot be done because "
1380 raise error.Abort(_("partial pull cannot be done because "
1379 "other repository doesn't support "
1381 "other repository doesn't support "
1380 "changegroupsubset."))
1382 "changegroupsubset."))
1381 else:
1383 else:
1382 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1384 cg = pullop.remote.changegroupsubset(pullop.fetch, pullop.heads, 'pull')
1383 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1385 pullop.cgresult = cg.apply(pullop.repo, 'pull', pullop.remote.url())
1384
1386
1385 def _pullphase(pullop):
1387 def _pullphase(pullop):
1386 # Get remote phases data from remote
1388 # Get remote phases data from remote
1387 if 'phases' in pullop.stepsdone:
1389 if 'phases' in pullop.stepsdone:
1388 return
1390 return
1389 remotephases = pullop.remote.listkeys('phases')
1391 remotephases = pullop.remote.listkeys('phases')
1390 _pullapplyphases(pullop, remotephases)
1392 _pullapplyphases(pullop, remotephases)
1391
1393
1392 def _pullapplyphases(pullop, remotephases):
1394 def _pullapplyphases(pullop, remotephases):
1393 """apply phase movement from observed remote state"""
1395 """apply phase movement from observed remote state"""
1394 if 'phases' in pullop.stepsdone:
1396 if 'phases' in pullop.stepsdone:
1395 return
1397 return
1396 pullop.stepsdone.add('phases')
1398 pullop.stepsdone.add('phases')
1397 publishing = bool(remotephases.get('publishing', False))
1399 publishing = bool(remotephases.get('publishing', False))
1398 if remotephases and not publishing:
1400 if remotephases and not publishing:
1399 # remote is new and unpublishing
1401 # remote is new and unpublishing
1400 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1402 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1401 pullop.pulledsubset,
1403 pullop.pulledsubset,
1402 remotephases)
1404 remotephases)
1403 dheads = pullop.pulledsubset
1405 dheads = pullop.pulledsubset
1404 else:
1406 else:
1405 # Remote is old or publishing all common changesets
1407 # Remote is old or publishing all common changesets
1406 # should be seen as public
1408 # should be seen as public
1407 pheads = pullop.pulledsubset
1409 pheads = pullop.pulledsubset
1408 dheads = []
1410 dheads = []
1409 unfi = pullop.repo.unfiltered()
1411 unfi = pullop.repo.unfiltered()
1410 phase = unfi._phasecache.phase
1412 phase = unfi._phasecache.phase
1411 rev = unfi.changelog.nodemap.get
1413 rev = unfi.changelog.nodemap.get
1412 public = phases.public
1414 public = phases.public
1413 draft = phases.draft
1415 draft = phases.draft
1414
1416
1415 # exclude changesets already public locally and update the others
1417 # exclude changesets already public locally and update the others
1416 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1418 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1417 if pheads:
1419 if pheads:
1418 tr = pullop.gettransaction()
1420 tr = pullop.gettransaction()
1419 phases.advanceboundary(pullop.repo, tr, public, pheads)
1421 phases.advanceboundary(pullop.repo, tr, public, pheads)
1420
1422
1421 # exclude changesets already draft locally and update the others
1423 # exclude changesets already draft locally and update the others
1422 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1424 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1423 if dheads:
1425 if dheads:
1424 tr = pullop.gettransaction()
1426 tr = pullop.gettransaction()
1425 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1427 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1426
1428
1427 def _pullbookmarks(pullop):
1429 def _pullbookmarks(pullop):
1428 """process the remote bookmark information to update the local one"""
1430 """process the remote bookmark information to update the local one"""
1429 if 'bookmarks' in pullop.stepsdone:
1431 if 'bookmarks' in pullop.stepsdone:
1430 return
1432 return
1431 pullop.stepsdone.add('bookmarks')
1433 pullop.stepsdone.add('bookmarks')
1432 repo = pullop.repo
1434 repo = pullop.repo
1433 remotebookmarks = pullop.remotebookmarks
1435 remotebookmarks = pullop.remotebookmarks
1434 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1436 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1435 pullop.remote.url(),
1437 pullop.remote.url(),
1436 pullop.gettransaction,
1438 pullop.gettransaction,
1437 explicit=pullop.explicitbookmarks)
1439 explicit=pullop.explicitbookmarks)
1438
1440
1439 def _pullobsolete(pullop):
1441 def _pullobsolete(pullop):
1440 """utility function to pull obsolete markers from a remote
1442 """utility function to pull obsolete markers from a remote
1441
1443
1442 The `gettransaction` is function that return the pull transaction, creating
1444 The `gettransaction` is function that return the pull transaction, creating
1443 one if necessary. We return the transaction to inform the calling code that
1445 one if necessary. We return the transaction to inform the calling code that
1444 a new transaction have been created (when applicable).
1446 a new transaction have been created (when applicable).
1445
1447
1446 Exists mostly to allow overriding for experimentation purpose"""
1448 Exists mostly to allow overriding for experimentation purpose"""
1447 if 'obsmarkers' in pullop.stepsdone:
1449 if 'obsmarkers' in pullop.stepsdone:
1448 return
1450 return
1449 pullop.stepsdone.add('obsmarkers')
1451 pullop.stepsdone.add('obsmarkers')
1450 tr = None
1452 tr = None
1451 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1453 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1452 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1454 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1453 remoteobs = pullop.remote.listkeys('obsolete')
1455 remoteobs = pullop.remote.listkeys('obsolete')
1454 if 'dump0' in remoteobs:
1456 if 'dump0' in remoteobs:
1455 tr = pullop.gettransaction()
1457 tr = pullop.gettransaction()
1456 markers = []
1458 markers = []
1457 for key in sorted(remoteobs, reverse=True):
1459 for key in sorted(remoteobs, reverse=True):
1458 if key.startswith('dump'):
1460 if key.startswith('dump'):
1459 data = base85.b85decode(remoteobs[key])
1461 data = base85.b85decode(remoteobs[key])
1460 version, newmarks = obsolete._readmarkers(data)
1462 version, newmarks = obsolete._readmarkers(data)
1461 markers += newmarks
1463 markers += newmarks
1462 if markers:
1464 if markers:
1463 pullop.repo.obsstore.add(tr, markers)
1465 pullop.repo.obsstore.add(tr, markers)
1464 pullop.repo.invalidatevolatilesets()
1466 pullop.repo.invalidatevolatilesets()
1465 return tr
1467 return tr
1466
1468
1467 def caps20to10(repo):
1469 def caps20to10(repo):
1468 """return a set with appropriate options to use bundle20 during getbundle"""
1470 """return a set with appropriate options to use bundle20 during getbundle"""
1469 caps = set(['HG20'])
1471 caps = set(['HG20'])
1470 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1472 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo))
1471 caps.add('bundle2=' + urllib.quote(capsblob))
1473 caps.add('bundle2=' + urllib.quote(capsblob))
1472 return caps
1474 return caps
1473
1475
1474 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1476 # List of names of steps to perform for a bundle2 for getbundle, order matters.
1475 getbundle2partsorder = []
1477 getbundle2partsorder = []
1476
1478
1477 # Mapping between step name and function
1479 # Mapping between step name and function
1478 #
1480 #
1479 # This exists to help extensions wrap steps if necessary
1481 # This exists to help extensions wrap steps if necessary
1480 getbundle2partsmapping = {}
1482 getbundle2partsmapping = {}
1481
1483
1482 def getbundle2partsgenerator(stepname, idx=None):
1484 def getbundle2partsgenerator(stepname, idx=None):
1483 """decorator for function generating bundle2 part for getbundle
1485 """decorator for function generating bundle2 part for getbundle
1484
1486
1485 The function is added to the step -> function mapping and appended to the
1487 The function is added to the step -> function mapping and appended to the
1486 list of steps. Beware that decorated functions will be added in order
1488 list of steps. Beware that decorated functions will be added in order
1487 (this may matter).
1489 (this may matter).
1488
1490
1489 You can only use this decorator for new steps, if you want to wrap a step
1491 You can only use this decorator for new steps, if you want to wrap a step
1490 from an extension, attack the getbundle2partsmapping dictionary directly."""
1492 from an extension, attack the getbundle2partsmapping dictionary directly."""
1491 def dec(func):
1493 def dec(func):
1492 assert stepname not in getbundle2partsmapping
1494 assert stepname not in getbundle2partsmapping
1493 getbundle2partsmapping[stepname] = func
1495 getbundle2partsmapping[stepname] = func
1494 if idx is None:
1496 if idx is None:
1495 getbundle2partsorder.append(stepname)
1497 getbundle2partsorder.append(stepname)
1496 else:
1498 else:
1497 getbundle2partsorder.insert(idx, stepname)
1499 getbundle2partsorder.insert(idx, stepname)
1498 return func
1500 return func
1499 return dec
1501 return dec
1500
1502
1501 def bundle2requested(bundlecaps):
1503 def bundle2requested(bundlecaps):
1502 if bundlecaps is not None:
1504 if bundlecaps is not None:
1503 return any(cap.startswith('HG2') for cap in bundlecaps)
1505 return any(cap.startswith('HG2') for cap in bundlecaps)
1504 return False
1506 return False
1505
1507
1506 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1508 def getbundle(repo, source, heads=None, common=None, bundlecaps=None,
1507 **kwargs):
1509 **kwargs):
1508 """return a full bundle (with potentially multiple kind of parts)
1510 """return a full bundle (with potentially multiple kind of parts)
1509
1511
1510 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1512 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
1511 passed. For now, the bundle can contain only changegroup, but this will
1513 passed. For now, the bundle can contain only changegroup, but this will
1512 changes when more part type will be available for bundle2.
1514 changes when more part type will be available for bundle2.
1513
1515
1514 This is different from changegroup.getchangegroup that only returns an HG10
1516 This is different from changegroup.getchangegroup that only returns an HG10
1515 changegroup bundle. They may eventually get reunited in the future when we
1517 changegroup bundle. They may eventually get reunited in the future when we
1516 have a clearer idea of the API we what to query different data.
1518 have a clearer idea of the API we what to query different data.
1517
1519
1518 The implementation is at a very early stage and will get massive rework
1520 The implementation is at a very early stage and will get massive rework
1519 when the API of bundle is refined.
1521 when the API of bundle is refined.
1520 """
1522 """
1521 usebundle2 = bundle2requested(bundlecaps)
1523 usebundle2 = bundle2requested(bundlecaps)
1522 # bundle10 case
1524 # bundle10 case
1523 if not usebundle2:
1525 if not usebundle2:
1524 if bundlecaps and not kwargs.get('cg', True):
1526 if bundlecaps and not kwargs.get('cg', True):
1525 raise ValueError(_('request for bundle10 must include changegroup'))
1527 raise ValueError(_('request for bundle10 must include changegroup'))
1526
1528
1527 if kwargs:
1529 if kwargs:
1528 raise ValueError(_('unsupported getbundle arguments: %s')
1530 raise ValueError(_('unsupported getbundle arguments: %s')
1529 % ', '.join(sorted(kwargs.keys())))
1531 % ', '.join(sorted(kwargs.keys())))
1530 return changegroup.getchangegroup(repo, source, heads=heads,
1532 return changegroup.getchangegroup(repo, source, heads=heads,
1531 common=common, bundlecaps=bundlecaps)
1533 common=common, bundlecaps=bundlecaps)
1532
1534
1533 # bundle20 case
1535 # bundle20 case
1534 b2caps = {}
1536 b2caps = {}
1535 for bcaps in bundlecaps:
1537 for bcaps in bundlecaps:
1536 if bcaps.startswith('bundle2='):
1538 if bcaps.startswith('bundle2='):
1537 blob = urllib.unquote(bcaps[len('bundle2='):])
1539 blob = urllib.unquote(bcaps[len('bundle2='):])
1538 b2caps.update(bundle2.decodecaps(blob))
1540 b2caps.update(bundle2.decodecaps(blob))
1539 bundler = bundle2.bundle20(repo.ui, b2caps)
1541 bundler = bundle2.bundle20(repo.ui, b2caps)
1540
1542
1541 kwargs['heads'] = heads
1543 kwargs['heads'] = heads
1542 kwargs['common'] = common
1544 kwargs['common'] = common
1543
1545
1544 for name in getbundle2partsorder:
1546 for name in getbundle2partsorder:
1545 func = getbundle2partsmapping[name]
1547 func = getbundle2partsmapping[name]
1546 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1548 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
1547 **kwargs)
1549 **kwargs)
1548
1550
1549 return util.chunkbuffer(bundler.getchunks())
1551 return util.chunkbuffer(bundler.getchunks())
1550
1552
1551 @getbundle2partsgenerator('changegroup')
1553 @getbundle2partsgenerator('changegroup')
1552 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1554 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
1553 b2caps=None, heads=None, common=None, **kwargs):
1555 b2caps=None, heads=None, common=None, **kwargs):
1554 """add a changegroup part to the requested bundle"""
1556 """add a changegroup part to the requested bundle"""
1555 cg = None
1557 cg = None
1556 if kwargs.get('cg', True):
1558 if kwargs.get('cg', True):
1557 # build changegroup bundle here.
1559 # build changegroup bundle here.
1558 version = None
1560 version = None
1559 cgversions = b2caps.get('changegroup')
1561 cgversions = b2caps.get('changegroup')
1560 getcgkwargs = {}
1562 getcgkwargs = {}
1561 if cgversions: # 3.1 and 3.2 ship with an empty value
1563 if cgversions: # 3.1 and 3.2 ship with an empty value
1562 cgversions = [v for v in cgversions
1564 cgversions = [v for v in cgversions
1563 if v in changegroup.supportedversions(repo)]
1565 if v in changegroup.supportedversions(repo)]
1564 if not cgversions:
1566 if not cgversions:
1565 raise ValueError(_('no common changegroup version'))
1567 raise ValueError(_('no common changegroup version'))
1566 version = getcgkwargs['version'] = max(cgversions)
1568 version = getcgkwargs['version'] = max(cgversions)
1567 outgoing = changegroup.computeoutgoing(repo, heads, common)
1569 outgoing = changegroup.computeoutgoing(repo, heads, common)
1568 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1570 cg = changegroup.getlocalchangegroupraw(repo, source, outgoing,
1569 bundlecaps=bundlecaps,
1571 bundlecaps=bundlecaps,
1570 **getcgkwargs)
1572 **getcgkwargs)
1571
1573
1572 if cg:
1574 if cg:
1573 part = bundler.newpart('changegroup', data=cg)
1575 part = bundler.newpart('changegroup', data=cg)
1574 if version is not None:
1576 if version is not None:
1575 part.addparam('version', version)
1577 part.addparam('version', version)
1576 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1578 part.addparam('nbchanges', str(len(outgoing.missing)), mandatory=False)
1577 if 'treemanifest' in repo.requirements:
1579 if 'treemanifest' in repo.requirements:
1578 part.addparam('treemanifest', '1')
1580 part.addparam('treemanifest', '1')
1579
1581
1580 @getbundle2partsgenerator('listkeys')
1582 @getbundle2partsgenerator('listkeys')
1581 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1583 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
1582 b2caps=None, **kwargs):
1584 b2caps=None, **kwargs):
1583 """add parts containing listkeys namespaces to the requested bundle"""
1585 """add parts containing listkeys namespaces to the requested bundle"""
1584 listkeys = kwargs.get('listkeys', ())
1586 listkeys = kwargs.get('listkeys', ())
1585 for namespace in listkeys:
1587 for namespace in listkeys:
1586 part = bundler.newpart('listkeys')
1588 part = bundler.newpart('listkeys')
1587 part.addparam('namespace', namespace)
1589 part.addparam('namespace', namespace)
1588 keys = repo.listkeys(namespace).items()
1590 keys = repo.listkeys(namespace).items()
1589 part.data = pushkey.encodekeys(keys)
1591 part.data = pushkey.encodekeys(keys)
1590
1592
1591 @getbundle2partsgenerator('obsmarkers')
1593 @getbundle2partsgenerator('obsmarkers')
1592 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1594 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
1593 b2caps=None, heads=None, **kwargs):
1595 b2caps=None, heads=None, **kwargs):
1594 """add an obsolescence markers part to the requested bundle"""
1596 """add an obsolescence markers part to the requested bundle"""
1595 if kwargs.get('obsmarkers', False):
1597 if kwargs.get('obsmarkers', False):
1596 if heads is None:
1598 if heads is None:
1597 heads = repo.heads()
1599 heads = repo.heads()
1598 subset = [c.node() for c in repo.set('::%ln', heads)]
1600 subset = [c.node() for c in repo.set('::%ln', heads)]
1599 markers = repo.obsstore.relevantmarkers(subset)
1601 markers = repo.obsstore.relevantmarkers(subset)
1600 markers = sorted(markers)
1602 markers = sorted(markers)
1601 buildobsmarkerspart(bundler, markers)
1603 buildobsmarkerspart(bundler, markers)
1602
1604
1603 @getbundle2partsgenerator('hgtagsfnodes')
1605 @getbundle2partsgenerator('hgtagsfnodes')
1604 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1606 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
1605 b2caps=None, heads=None, common=None,
1607 b2caps=None, heads=None, common=None,
1606 **kwargs):
1608 **kwargs):
1607 """Transfer the .hgtags filenodes mapping.
1609 """Transfer the .hgtags filenodes mapping.
1608
1610
1609 Only values for heads in this bundle will be transferred.
1611 Only values for heads in this bundle will be transferred.
1610
1612
1611 The part data consists of pairs of 20 byte changeset node and .hgtags
1613 The part data consists of pairs of 20 byte changeset node and .hgtags
1612 filenodes raw values.
1614 filenodes raw values.
1613 """
1615 """
1614 # Don't send unless:
1616 # Don't send unless:
1615 # - changeset are being exchanged,
1617 # - changeset are being exchanged,
1616 # - the client supports it.
1618 # - the client supports it.
1617 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1619 if not (kwargs.get('cg', True) and 'hgtagsfnodes' in b2caps):
1618 return
1620 return
1619
1621
1620 outgoing = changegroup.computeoutgoing(repo, heads, common)
1622 outgoing = changegroup.computeoutgoing(repo, heads, common)
1621
1623
1622 if not outgoing.missingheads:
1624 if not outgoing.missingheads:
1623 return
1625 return
1624
1626
1625 cache = tags.hgtagsfnodescache(repo.unfiltered())
1627 cache = tags.hgtagsfnodescache(repo.unfiltered())
1626 chunks = []
1628 chunks = []
1627
1629
1628 # .hgtags fnodes are only relevant for head changesets. While we could
1630 # .hgtags fnodes are only relevant for head changesets. While we could
1629 # transfer values for all known nodes, there will likely be little to
1631 # transfer values for all known nodes, there will likely be little to
1630 # no benefit.
1632 # no benefit.
1631 #
1633 #
1632 # We don't bother using a generator to produce output data because
1634 # We don't bother using a generator to produce output data because
1633 # a) we only have 40 bytes per head and even esoteric numbers of heads
1635 # a) we only have 40 bytes per head and even esoteric numbers of heads
1634 # consume little memory (1M heads is 40MB) b) we don't want to send the
1636 # consume little memory (1M heads is 40MB) b) we don't want to send the
1635 # part if we don't have entries and knowing if we have entries requires
1637 # part if we don't have entries and knowing if we have entries requires
1636 # cache lookups.
1638 # cache lookups.
1637 for node in outgoing.missingheads:
1639 for node in outgoing.missingheads:
1638 # Don't compute missing, as this may slow down serving.
1640 # Don't compute missing, as this may slow down serving.
1639 fnode = cache.getfnode(node, computemissing=False)
1641 fnode = cache.getfnode(node, computemissing=False)
1640 if fnode is not None:
1642 if fnode is not None:
1641 chunks.extend([node, fnode])
1643 chunks.extend([node, fnode])
1642
1644
1643 if chunks:
1645 if chunks:
1644 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1646 bundler.newpart('hgtagsfnodes', data=''.join(chunks))
1645
1647
1646 def check_heads(repo, their_heads, context):
1648 def check_heads(repo, their_heads, context):
1647 """check if the heads of a repo have been modified
1649 """check if the heads of a repo have been modified
1648
1650
1649 Used by peer for unbundling.
1651 Used by peer for unbundling.
1650 """
1652 """
1651 heads = repo.heads()
1653 heads = repo.heads()
1652 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1654 heads_hash = util.sha1(''.join(sorted(heads))).digest()
1653 if not (their_heads == ['force'] or their_heads == heads or
1655 if not (their_heads == ['force'] or their_heads == heads or
1654 their_heads == ['hashed', heads_hash]):
1656 their_heads == ['hashed', heads_hash]):
1655 # someone else committed/pushed/unbundled while we
1657 # someone else committed/pushed/unbundled while we
1656 # were transferring data
1658 # were transferring data
1657 raise error.PushRaced('repository changed while %s - '
1659 raise error.PushRaced('repository changed while %s - '
1658 'please try again' % context)
1660 'please try again' % context)
1659
1661
1660 def unbundle(repo, cg, heads, source, url):
1662 def unbundle(repo, cg, heads, source, url):
1661 """Apply a bundle to a repo.
1663 """Apply a bundle to a repo.
1662
1664
1663 this function makes sure the repo is locked during the application and have
1665 this function makes sure the repo is locked during the application and have
1664 mechanism to check that no push race occurred between the creation of the
1666 mechanism to check that no push race occurred between the creation of the
1665 bundle and its application.
1667 bundle and its application.
1666
1668
1667 If the push was raced as PushRaced exception is raised."""
1669 If the push was raced as PushRaced exception is raised."""
1668 r = 0
1670 r = 0
1669 # need a transaction when processing a bundle2 stream
1671 # need a transaction when processing a bundle2 stream
1670 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1672 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
1671 lockandtr = [None, None, None]
1673 lockandtr = [None, None, None]
1672 recordout = None
1674 recordout = None
1673 # quick fix for output mismatch with bundle2 in 3.4
1675 # quick fix for output mismatch with bundle2 in 3.4
1674 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1676 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture',
1675 False)
1677 False)
1676 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1678 if url.startswith('remote:http:') or url.startswith('remote:https:'):
1677 captureoutput = True
1679 captureoutput = True
1678 try:
1680 try:
1679 check_heads(repo, heads, 'uploading changes')
1681 check_heads(repo, heads, 'uploading changes')
1680 # push can proceed
1682 # push can proceed
1681 if util.safehasattr(cg, 'params'):
1683 if util.safehasattr(cg, 'params'):
1682 r = None
1684 r = None
1683 try:
1685 try:
1684 def gettransaction():
1686 def gettransaction():
1685 if not lockandtr[2]:
1687 if not lockandtr[2]:
1686 lockandtr[0] = repo.wlock()
1688 lockandtr[0] = repo.wlock()
1687 lockandtr[1] = repo.lock()
1689 lockandtr[1] = repo.lock()
1688 lockandtr[2] = repo.transaction(source)
1690 lockandtr[2] = repo.transaction(source)
1689 lockandtr[2].hookargs['source'] = source
1691 lockandtr[2].hookargs['source'] = source
1690 lockandtr[2].hookargs['url'] = url
1692 lockandtr[2].hookargs['url'] = url
1691 lockandtr[2].hookargs['bundle2'] = '1'
1693 lockandtr[2].hookargs['bundle2'] = '1'
1692 return lockandtr[2]
1694 return lockandtr[2]
1693
1695
1694 # Do greedy locking by default until we're satisfied with lazy
1696 # Do greedy locking by default until we're satisfied with lazy
1695 # locking.
1697 # locking.
1696 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1698 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
1697 gettransaction()
1699 gettransaction()
1698
1700
1699 op = bundle2.bundleoperation(repo, gettransaction,
1701 op = bundle2.bundleoperation(repo, gettransaction,
1700 captureoutput=captureoutput)
1702 captureoutput=captureoutput)
1701 try:
1703 try:
1702 op = bundle2.processbundle(repo, cg, op=op)
1704 op = bundle2.processbundle(repo, cg, op=op)
1703 finally:
1705 finally:
1704 r = op.reply
1706 r = op.reply
1705 if captureoutput and r is not None:
1707 if captureoutput and r is not None:
1706 repo.ui.pushbuffer(error=True, subproc=True)
1708 repo.ui.pushbuffer(error=True, subproc=True)
1707 def recordout(output):
1709 def recordout(output):
1708 r.newpart('output', data=output, mandatory=False)
1710 r.newpart('output', data=output, mandatory=False)
1709 if lockandtr[2] is not None:
1711 if lockandtr[2] is not None:
1710 lockandtr[2].close()
1712 lockandtr[2].close()
1711 except BaseException as exc:
1713 except BaseException as exc:
1712 exc.duringunbundle2 = True
1714 exc.duringunbundle2 = True
1713 if captureoutput and r is not None:
1715 if captureoutput and r is not None:
1714 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1716 parts = exc._bundle2salvagedoutput = r.salvageoutput()
1715 def recordout(output):
1717 def recordout(output):
1716 part = bundle2.bundlepart('output', data=output,
1718 part = bundle2.bundlepart('output', data=output,
1717 mandatory=False)
1719 mandatory=False)
1718 parts.append(part)
1720 parts.append(part)
1719 raise
1721 raise
1720 else:
1722 else:
1721 lockandtr[1] = repo.lock()
1723 lockandtr[1] = repo.lock()
1722 r = cg.apply(repo, source, url)
1724 r = cg.apply(repo, source, url)
1723 finally:
1725 finally:
1724 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1726 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
1725 if recordout is not None:
1727 if recordout is not None:
1726 recordout(repo.ui.popbuffer())
1728 recordout(repo.ui.popbuffer())
1727 return r
1729 return r
1728
1730
1729 def _maybeapplyclonebundle(pullop):
1731 def _maybeapplyclonebundle(pullop):
1730 """Apply a clone bundle from a remote, if possible."""
1732 """Apply a clone bundle from a remote, if possible."""
1731
1733
1732 repo = pullop.repo
1734 repo = pullop.repo
1733 remote = pullop.remote
1735 remote = pullop.remote
1734
1736
1735 if not repo.ui.configbool('ui', 'clonebundles', True):
1737 if not repo.ui.configbool('ui', 'clonebundles', True):
1736 return
1738 return
1737
1739
1738 # Only run if local repo is empty.
1740 # Only run if local repo is empty.
1739 if len(repo):
1741 if len(repo):
1740 return
1742 return
1741
1743
1742 if pullop.heads:
1744 if pullop.heads:
1743 return
1745 return
1744
1746
1745 if not remote.capable('clonebundles'):
1747 if not remote.capable('clonebundles'):
1746 return
1748 return
1747
1749
1748 res = remote._call('clonebundles')
1750 res = remote._call('clonebundles')
1749
1751
1750 # If we call the wire protocol command, that's good enough to record the
1752 # If we call the wire protocol command, that's good enough to record the
1751 # attempt.
1753 # attempt.
1752 pullop.clonebundleattempted = True
1754 pullop.clonebundleattempted = True
1753
1755
1754 entries = parseclonebundlesmanifest(repo, res)
1756 entries = parseclonebundlesmanifest(repo, res)
1755 if not entries:
1757 if not entries:
1756 repo.ui.note(_('no clone bundles available on remote; '
1758 repo.ui.note(_('no clone bundles available on remote; '
1757 'falling back to regular clone\n'))
1759 'falling back to regular clone\n'))
1758 return
1760 return
1759
1761
1760 entries = filterclonebundleentries(repo, entries)
1762 entries = filterclonebundleentries(repo, entries)
1761 if not entries:
1763 if not entries:
1762 # There is a thundering herd concern here. However, if a server
1764 # There is a thundering herd concern here. However, if a server
1763 # operator doesn't advertise bundles appropriate for its clients,
1765 # operator doesn't advertise bundles appropriate for its clients,
1764 # they deserve what's coming. Furthermore, from a client's
1766 # they deserve what's coming. Furthermore, from a client's
1765 # perspective, no automatic fallback would mean not being able to
1767 # perspective, no automatic fallback would mean not being able to
1766 # clone!
1768 # clone!
1767 repo.ui.warn(_('no compatible clone bundles available on server; '
1769 repo.ui.warn(_('no compatible clone bundles available on server; '
1768 'falling back to regular clone\n'))
1770 'falling back to regular clone\n'))
1769 repo.ui.warn(_('(you may want to report this to the server '
1771 repo.ui.warn(_('(you may want to report this to the server '
1770 'operator)\n'))
1772 'operator)\n'))
1771 return
1773 return
1772
1774
1773 entries = sortclonebundleentries(repo.ui, entries)
1775 entries = sortclonebundleentries(repo.ui, entries)
1774
1776
1775 url = entries[0]['URL']
1777 url = entries[0]['URL']
1776 repo.ui.status(_('applying clone bundle from %s\n') % url)
1778 repo.ui.status(_('applying clone bundle from %s\n') % url)
1777 if trypullbundlefromurl(repo.ui, repo, url):
1779 if trypullbundlefromurl(repo.ui, repo, url):
1778 repo.ui.status(_('finished applying clone bundle\n'))
1780 repo.ui.status(_('finished applying clone bundle\n'))
1779 # Bundle failed.
1781 # Bundle failed.
1780 #
1782 #
1781 # We abort by default to avoid the thundering herd of
1783 # We abort by default to avoid the thundering herd of
1782 # clients flooding a server that was expecting expensive
1784 # clients flooding a server that was expecting expensive
1783 # clone load to be offloaded.
1785 # clone load to be offloaded.
1784 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1786 elif repo.ui.configbool('ui', 'clonebundlefallback', False):
1785 repo.ui.warn(_('falling back to normal clone\n'))
1787 repo.ui.warn(_('falling back to normal clone\n'))
1786 else:
1788 else:
1787 raise error.Abort(_('error applying bundle'),
1789 raise error.Abort(_('error applying bundle'),
1788 hint=_('if this error persists, consider contacting '
1790 hint=_('if this error persists, consider contacting '
1789 'the server operator or disable clone '
1791 'the server operator or disable clone '
1790 'bundles via '
1792 'bundles via '
1791 '"--config ui.clonebundles=false"'))
1793 '"--config ui.clonebundles=false"'))
1792
1794
1793 def parseclonebundlesmanifest(repo, s):
1795 def parseclonebundlesmanifest(repo, s):
1794 """Parses the raw text of a clone bundles manifest.
1796 """Parses the raw text of a clone bundles manifest.
1795
1797
1796 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1798 Returns a list of dicts. The dicts have a ``URL`` key corresponding
1797 to the URL and other keys are the attributes for the entry.
1799 to the URL and other keys are the attributes for the entry.
1798 """
1800 """
1799 m = []
1801 m = []
1800 for line in s.splitlines():
1802 for line in s.splitlines():
1801 fields = line.split()
1803 fields = line.split()
1802 if not fields:
1804 if not fields:
1803 continue
1805 continue
1804 attrs = {'URL': fields[0]}
1806 attrs = {'URL': fields[0]}
1805 for rawattr in fields[1:]:
1807 for rawattr in fields[1:]:
1806 key, value = rawattr.split('=', 1)
1808 key, value = rawattr.split('=', 1)
1807 key = urllib.unquote(key)
1809 key = urllib.unquote(key)
1808 value = urllib.unquote(value)
1810 value = urllib.unquote(value)
1809 attrs[key] = value
1811 attrs[key] = value
1810
1812
1811 # Parse BUNDLESPEC into components. This makes client-side
1813 # Parse BUNDLESPEC into components. This makes client-side
1812 # preferences easier to specify since you can prefer a single
1814 # preferences easier to specify since you can prefer a single
1813 # component of the BUNDLESPEC.
1815 # component of the BUNDLESPEC.
1814 if key == 'BUNDLESPEC':
1816 if key == 'BUNDLESPEC':
1815 try:
1817 try:
1816 comp, version, params = parsebundlespec(repo, value,
1818 comp, version, params = parsebundlespec(repo, value,
1817 externalnames=True)
1819 externalnames=True)
1818 attrs['COMPRESSION'] = comp
1820 attrs['COMPRESSION'] = comp
1819 attrs['VERSION'] = version
1821 attrs['VERSION'] = version
1820 except error.InvalidBundleSpecification:
1822 except error.InvalidBundleSpecification:
1821 pass
1823 pass
1822 except error.UnsupportedBundleSpecification:
1824 except error.UnsupportedBundleSpecification:
1823 pass
1825 pass
1824
1826
1825 m.append(attrs)
1827 m.append(attrs)
1826
1828
1827 return m
1829 return m
1828
1830
1829 def filterclonebundleentries(repo, entries):
1831 def filterclonebundleentries(repo, entries):
1830 """Remove incompatible clone bundle manifest entries.
1832 """Remove incompatible clone bundle manifest entries.
1831
1833
1832 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1834 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
1833 and returns a new list consisting of only the entries that this client
1835 and returns a new list consisting of only the entries that this client
1834 should be able to apply.
1836 should be able to apply.
1835
1837
1836 There is no guarantee we'll be able to apply all returned entries because
1838 There is no guarantee we'll be able to apply all returned entries because
1837 the metadata we use to filter on may be missing or wrong.
1839 the metadata we use to filter on may be missing or wrong.
1838 """
1840 """
1839 newentries = []
1841 newentries = []
1840 for entry in entries:
1842 for entry in entries:
1841 spec = entry.get('BUNDLESPEC')
1843 spec = entry.get('BUNDLESPEC')
1842 if spec:
1844 if spec:
1843 try:
1845 try:
1844 parsebundlespec(repo, spec, strict=True)
1846 parsebundlespec(repo, spec, strict=True)
1845 except error.InvalidBundleSpecification as e:
1847 except error.InvalidBundleSpecification as e:
1846 repo.ui.debug(str(e) + '\n')
1848 repo.ui.debug(str(e) + '\n')
1847 continue
1849 continue
1848 except error.UnsupportedBundleSpecification as e:
1850 except error.UnsupportedBundleSpecification as e:
1849 repo.ui.debug('filtering %s because unsupported bundle '
1851 repo.ui.debug('filtering %s because unsupported bundle '
1850 'spec: %s\n' % (entry['URL'], str(e)))
1852 'spec: %s\n' % (entry['URL'], str(e)))
1851 continue
1853 continue
1852
1854
1853 if 'REQUIRESNI' in entry and not sslutil.hassni:
1855 if 'REQUIRESNI' in entry and not sslutil.hassni:
1854 repo.ui.debug('filtering %s because SNI not supported\n' %
1856 repo.ui.debug('filtering %s because SNI not supported\n' %
1855 entry['URL'])
1857 entry['URL'])
1856 continue
1858 continue
1857
1859
1858 newentries.append(entry)
1860 newentries.append(entry)
1859
1861
1860 return newentries
1862 return newentries
1861
1863
1862 def sortclonebundleentries(ui, entries):
1864 def sortclonebundleentries(ui, entries):
1863 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1865 prefers = ui.configlist('ui', 'clonebundleprefers', default=[])
1864 if not prefers:
1866 if not prefers:
1865 return list(entries)
1867 return list(entries)
1866
1868
1867 prefers = [p.split('=', 1) for p in prefers]
1869 prefers = [p.split('=', 1) for p in prefers]
1868
1870
1869 # Our sort function.
1871 # Our sort function.
1870 def compareentry(a, b):
1872 def compareentry(a, b):
1871 for prefkey, prefvalue in prefers:
1873 for prefkey, prefvalue in prefers:
1872 avalue = a.get(prefkey)
1874 avalue = a.get(prefkey)
1873 bvalue = b.get(prefkey)
1875 bvalue = b.get(prefkey)
1874
1876
1875 # Special case for b missing attribute and a matches exactly.
1877 # Special case for b missing attribute and a matches exactly.
1876 if avalue is not None and bvalue is None and avalue == prefvalue:
1878 if avalue is not None and bvalue is None and avalue == prefvalue:
1877 return -1
1879 return -1
1878
1880
1879 # Special case for a missing attribute and b matches exactly.
1881 # Special case for a missing attribute and b matches exactly.
1880 if bvalue is not None and avalue is None and bvalue == prefvalue:
1882 if bvalue is not None and avalue is None and bvalue == prefvalue:
1881 return 1
1883 return 1
1882
1884
1883 # We can't compare unless attribute present on both.
1885 # We can't compare unless attribute present on both.
1884 if avalue is None or bvalue is None:
1886 if avalue is None or bvalue is None:
1885 continue
1887 continue
1886
1888
1887 # Same values should fall back to next attribute.
1889 # Same values should fall back to next attribute.
1888 if avalue == bvalue:
1890 if avalue == bvalue:
1889 continue
1891 continue
1890
1892
1891 # Exact matches come first.
1893 # Exact matches come first.
1892 if avalue == prefvalue:
1894 if avalue == prefvalue:
1893 return -1
1895 return -1
1894 if bvalue == prefvalue:
1896 if bvalue == prefvalue:
1895 return 1
1897 return 1
1896
1898
1897 # Fall back to next attribute.
1899 # Fall back to next attribute.
1898 continue
1900 continue
1899
1901
1900 # If we got here we couldn't sort by attributes and prefers. Fall
1902 # If we got here we couldn't sort by attributes and prefers. Fall
1901 # back to index order.
1903 # back to index order.
1902 return 0
1904 return 0
1903
1905
1904 return sorted(entries, cmp=compareentry)
1906 return sorted(entries, cmp=compareentry)
1905
1907
1906 def trypullbundlefromurl(ui, repo, url):
1908 def trypullbundlefromurl(ui, repo, url):
1907 """Attempt to apply a bundle from a URL."""
1909 """Attempt to apply a bundle from a URL."""
1908 lock = repo.lock()
1910 lock = repo.lock()
1909 try:
1911 try:
1910 tr = repo.transaction('bundleurl')
1912 tr = repo.transaction('bundleurl')
1911 try:
1913 try:
1912 try:
1914 try:
1913 fh = urlmod.open(ui, url)
1915 fh = urlmod.open(ui, url)
1914 cg = readbundle(ui, fh, 'stream')
1916 cg = readbundle(ui, fh, 'stream')
1915
1917
1916 if isinstance(cg, bundle2.unbundle20):
1918 if isinstance(cg, bundle2.unbundle20):
1917 bundle2.processbundle(repo, cg, lambda: tr)
1919 bundle2.processbundle(repo, cg, lambda: tr)
1918 elif isinstance(cg, streamclone.streamcloneapplier):
1920 elif isinstance(cg, streamclone.streamcloneapplier):
1919 cg.apply(repo)
1921 cg.apply(repo)
1920 else:
1922 else:
1921 cg.apply(repo, 'clonebundles', url)
1923 cg.apply(repo, 'clonebundles', url)
1922 tr.close()
1924 tr.close()
1923 return True
1925 return True
1924 except urllib2.HTTPError as e:
1926 except urllib2.HTTPError as e:
1925 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1927 ui.warn(_('HTTP error fetching bundle: %s\n') % str(e))
1926 except urllib2.URLError as e:
1928 except urllib2.URLError as e:
1927 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1929 ui.warn(_('error fetching bundle: %s\n') % e.reason[1])
1928
1930
1929 return False
1931 return False
1930 finally:
1932 finally:
1931 tr.release()
1933 tr.release()
1932 finally:
1934 finally:
1933 lock.release()
1935 lock.release()
@@ -1,471 +1,490 b''
1 $ cat << EOF >> $HGRCPATH
1 $ cat << EOF >> $HGRCPATH
2 > [format]
2 > [format]
3 > usegeneraldelta=yes
3 > usegeneraldelta=yes
4 > EOF
4 > EOF
5
5
6 Set up repo
6 Set up repo
7
7
8 $ hg --config experimental.treemanifest=True init repo
8 $ hg --config experimental.treemanifest=True init repo
9 $ cd repo
9 $ cd repo
10
10
11 Requirements get set on init
11 Requirements get set on init
12
12
13 $ grep treemanifest .hg/requires
13 $ grep treemanifest .hg/requires
14 treemanifest
14 treemanifest
15
15
16 Without directories, looks like any other repo
16 Without directories, looks like any other repo
17
17
18 $ echo 0 > a
18 $ echo 0 > a
19 $ echo 0 > b
19 $ echo 0 > b
20 $ hg ci -Aqm initial
20 $ hg ci -Aqm initial
21 $ hg debugdata -m 0
21 $ hg debugdata -m 0
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
22 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
23 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
24
24
25 Submanifest is stored in separate revlog
25 Submanifest is stored in separate revlog
26
26
27 $ mkdir dir1
27 $ mkdir dir1
28 $ echo 1 > dir1/a
28 $ echo 1 > dir1/a
29 $ echo 1 > dir1/b
29 $ echo 1 > dir1/b
30 $ echo 1 > e
30 $ echo 1 > e
31 $ hg ci -Aqm 'add dir1'
31 $ hg ci -Aqm 'add dir1'
32 $ hg debugdata -m 1
32 $ hg debugdata -m 1
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
33 a\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
34 b\x00362fef284ce2ca02aecc8de6d5e8a1c3af0556fe (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
35 dir1\x008b3ffd73f901e83304c83d33132c8e774ceac44et (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
36 e\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
37 $ hg debugdata --dir dir1 0
37 $ hg debugdata --dir dir1 0
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
38 a\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
39 b\x00b8e02f6433738021a065f94175c7cd23db5f05be (esc)
40
40
41 Can add nested directories
41 Can add nested directories
42
42
43 $ mkdir dir1/dir1
43 $ mkdir dir1/dir1
44 $ echo 2 > dir1/dir1/a
44 $ echo 2 > dir1/dir1/a
45 $ echo 2 > dir1/dir1/b
45 $ echo 2 > dir1/dir1/b
46 $ mkdir dir1/dir2
46 $ mkdir dir1/dir2
47 $ echo 2 > dir1/dir2/a
47 $ echo 2 > dir1/dir2/a
48 $ echo 2 > dir1/dir2/b
48 $ echo 2 > dir1/dir2/b
49 $ hg ci -Aqm 'add dir1/dir1'
49 $ hg ci -Aqm 'add dir1/dir1'
50 $ hg files -r .
50 $ hg files -r .
51 a
51 a
52 b
52 b
53 dir1/a (glob)
53 dir1/a (glob)
54 dir1/b (glob)
54 dir1/b (glob)
55 dir1/dir1/a (glob)
55 dir1/dir1/a (glob)
56 dir1/dir1/b (glob)
56 dir1/dir1/b (glob)
57 dir1/dir2/a (glob)
57 dir1/dir2/a (glob)
58 dir1/dir2/b (glob)
58 dir1/dir2/b (glob)
59 e
59 e
60
60
61 Revision is not created for unchanged directory
61 Revision is not created for unchanged directory
62
62
63 $ mkdir dir2
63 $ mkdir dir2
64 $ echo 3 > dir2/a
64 $ echo 3 > dir2/a
65 $ hg add dir2
65 $ hg add dir2
66 adding dir2/a (glob)
66 adding dir2/a (glob)
67 $ hg debugindex --dir dir1 > before
67 $ hg debugindex --dir dir1 > before
68 $ hg ci -qm 'add dir2'
68 $ hg ci -qm 'add dir2'
69 $ hg debugindex --dir dir1 > after
69 $ hg debugindex --dir dir1 > after
70 $ diff before after
70 $ diff before after
71 $ rm before after
71 $ rm before after
72
72
73 Removing directory does not create an revlog entry
73 Removing directory does not create an revlog entry
74
74
75 $ hg rm dir1/dir1
75 $ hg rm dir1/dir1
76 removing dir1/dir1/a (glob)
76 removing dir1/dir1/a (glob)
77 removing dir1/dir1/b (glob)
77 removing dir1/dir1/b (glob)
78 $ hg debugindex --dir dir1/dir1 > before
78 $ hg debugindex --dir dir1/dir1 > before
79 $ hg ci -qm 'remove dir1/dir1'
79 $ hg ci -qm 'remove dir1/dir1'
80 $ hg debugindex --dir dir1/dir1 > after
80 $ hg debugindex --dir dir1/dir1 > after
81 $ diff before after
81 $ diff before after
82 $ rm before after
82 $ rm before after
83
83
84 Check that hg files (calls treemanifest.walk()) works
84 Check that hg files (calls treemanifest.walk()) works
85 without loading all directory revlogs
85 without loading all directory revlogs
86
86
87 $ hg co 'desc("add dir2")'
87 $ hg co 'desc("add dir2")'
88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
88 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
89 $ mv .hg/store/meta/dir2 .hg/store/meta/dir2-backup
90 $ hg files -r . dir1
90 $ hg files -r . dir1
91 dir1/a (glob)
91 dir1/a (glob)
92 dir1/b (glob)
92 dir1/b (glob)
93 dir1/dir1/a (glob)
93 dir1/dir1/a (glob)
94 dir1/dir1/b (glob)
94 dir1/dir1/b (glob)
95 dir1/dir2/a (glob)
95 dir1/dir2/a (glob)
96 dir1/dir2/b (glob)
96 dir1/dir2/b (glob)
97
97
98 Check that status between revisions works (calls treemanifest.matches())
98 Check that status between revisions works (calls treemanifest.matches())
99 without loading all directory revlogs
99 without loading all directory revlogs
100
100
101 $ hg status --rev 'desc("add dir1")' --rev . dir1
101 $ hg status --rev 'desc("add dir1")' --rev . dir1
102 A dir1/dir1/a
102 A dir1/dir1/a
103 A dir1/dir1/b
103 A dir1/dir1/b
104 A dir1/dir2/a
104 A dir1/dir2/a
105 A dir1/dir2/b
105 A dir1/dir2/b
106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
106 $ mv .hg/store/meta/dir2-backup .hg/store/meta/dir2
107
107
108 Merge creates 2-parent revision of directory revlog
108 Merge creates 2-parent revision of directory revlog
109
109
110 $ echo 5 > dir1/a
110 $ echo 5 > dir1/a
111 $ hg ci -Aqm 'modify dir1/a'
111 $ hg ci -Aqm 'modify dir1/a'
112 $ hg co '.^'
112 $ hg co '.^'
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
113 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
114 $ echo 6 > dir1/b
114 $ echo 6 > dir1/b
115 $ hg ci -Aqm 'modify dir1/b'
115 $ hg ci -Aqm 'modify dir1/b'
116 $ hg merge 'desc("modify dir1/a")'
116 $ hg merge 'desc("modify dir1/a")'
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
117 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
118 (branch merge, don't forget to commit)
118 (branch merge, don't forget to commit)
119 $ hg ci -m 'conflict-free merge involving dir1/'
119 $ hg ci -m 'conflict-free merge involving dir1/'
120 $ cat dir1/a
120 $ cat dir1/a
121 5
121 5
122 $ cat dir1/b
122 $ cat dir1/b
123 6
123 6
124 $ hg debugindex --dir dir1
124 $ hg debugindex --dir dir1
125 rev offset length delta linkrev nodeid p1 p2
125 rev offset length delta linkrev nodeid p1 p2
126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
126 0 0 54 -1 1 8b3ffd73f901 000000000000 000000000000
127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
127 1 54 68 0 2 68e9d057c5a8 8b3ffd73f901 000000000000
128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
128 2 122 12 1 4 4698198d2624 68e9d057c5a8 000000000000
129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
129 3 134 55 1 5 44844058ccce 68e9d057c5a8 000000000000
130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
130 4 189 55 1 6 bf3d9b744927 68e9d057c5a8 000000000000
131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
131 5 244 55 4 7 dde7c0af2a03 bf3d9b744927 44844058ccce
132
132
133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
133 Merge keeping directory from parent 1 does not create revlog entry. (Note that
134 dir1's manifest does change, but only because dir1/a's filelog changes.)
134 dir1's manifest does change, but only because dir1/a's filelog changes.)
135
135
136 $ hg co 'desc("add dir2")'
136 $ hg co 'desc("add dir2")'
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
137 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
138 $ echo 8 > dir2/a
138 $ echo 8 > dir2/a
139 $ hg ci -m 'modify dir2/a'
139 $ hg ci -m 'modify dir2/a'
140 created new head
140 created new head
141
141
142 $ hg debugindex --dir dir2 > before
142 $ hg debugindex --dir dir2 > before
143 $ hg merge 'desc("modify dir1/a")'
143 $ hg merge 'desc("modify dir1/a")'
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
144 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
145 (branch merge, don't forget to commit)
145 (branch merge, don't forget to commit)
146 $ hg revert -r 'desc("modify dir2/a")' .
146 $ hg revert -r 'desc("modify dir2/a")' .
147 reverting dir1/a (glob)
147 reverting dir1/a (glob)
148 $ hg ci -m 'merge, keeping parent 1'
148 $ hg ci -m 'merge, keeping parent 1'
149 $ hg debugindex --dir dir2 > after
149 $ hg debugindex --dir dir2 > after
150 $ diff before after
150 $ diff before after
151 $ rm before after
151 $ rm before after
152
152
153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
153 Merge keeping directory from parent 2 does not create revlog entry. (Note that
154 dir2's manifest does change, but only because dir2/a's filelog changes.)
154 dir2's manifest does change, but only because dir2/a's filelog changes.)
155
155
156 $ hg co 'desc("modify dir2/a")'
156 $ hg co 'desc("modify dir2/a")'
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
157 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
158 $ hg debugindex --dir dir1 > before
158 $ hg debugindex --dir dir1 > before
159 $ hg merge 'desc("modify dir1/a")'
159 $ hg merge 'desc("modify dir1/a")'
160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
160 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
161 (branch merge, don't forget to commit)
161 (branch merge, don't forget to commit)
162 $ hg revert -r 'desc("modify dir1/a")' .
162 $ hg revert -r 'desc("modify dir1/a")' .
163 reverting dir2/a (glob)
163 reverting dir2/a (glob)
164 $ hg ci -m 'merge, keeping parent 2'
164 $ hg ci -m 'merge, keeping parent 2'
165 created new head
165 created new head
166 $ hg debugindex --dir dir1 > after
166 $ hg debugindex --dir dir1 > after
167 $ diff before after
167 $ diff before after
168 $ rm before after
168 $ rm before after
169
169
170 Create flat source repo for tests with mixed flat/tree manifests
170 Create flat source repo for tests with mixed flat/tree manifests
171
171
172 $ cd ..
172 $ cd ..
173 $ hg init repo-flat
173 $ hg init repo-flat
174 $ cd repo-flat
174 $ cd repo-flat
175
175
176 Create a few commits with flat manifest
176 Create a few commits with flat manifest
177
177
178 $ echo 0 > a
178 $ echo 0 > a
179 $ echo 0 > b
179 $ echo 0 > b
180 $ echo 0 > e
180 $ echo 0 > e
181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
181 $ for d in dir1 dir1/dir1 dir1/dir2 dir2
182 > do
182 > do
183 > mkdir $d
183 > mkdir $d
184 > echo 0 > $d/a
184 > echo 0 > $d/a
185 > echo 0 > $d/b
185 > echo 0 > $d/b
186 > done
186 > done
187 $ hg ci -Aqm initial
187 $ hg ci -Aqm initial
188
188
189 $ echo 1 > a
189 $ echo 1 > a
190 $ echo 1 > dir1/a
190 $ echo 1 > dir1/a
191 $ echo 1 > dir1/dir1/a
191 $ echo 1 > dir1/dir1/a
192 $ hg ci -Aqm 'modify on branch 1'
192 $ hg ci -Aqm 'modify on branch 1'
193
193
194 $ hg co 0
194 $ hg co 0
195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
195 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
196 $ echo 2 > b
196 $ echo 2 > b
197 $ echo 2 > dir1/b
197 $ echo 2 > dir1/b
198 $ echo 2 > dir1/dir1/b
198 $ echo 2 > dir1/dir1/b
199 $ hg ci -Aqm 'modify on branch 2'
199 $ hg ci -Aqm 'modify on branch 2'
200
200
201 $ hg merge 1
201 $ hg merge 1
202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
202 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
203 (branch merge, don't forget to commit)
203 (branch merge, don't forget to commit)
204 $ hg ci -m 'merge of flat manifests to new flat manifest'
204 $ hg ci -m 'merge of flat manifests to new flat manifest'
205
205
206 Create clone with tree manifests enabled
206 Create clone with tree manifests enabled
207
207
208 $ cd ..
208 $ cd ..
209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
209 $ hg clone --pull --config experimental.treemanifest=1 repo-flat repo-mixed
210 requesting all changes
210 requesting all changes
211 adding changesets
211 adding changesets
212 adding manifests
212 adding manifests
213 adding file changes
213 adding file changes
214 added 4 changesets with 17 changes to 11 files
214 added 4 changesets with 17 changes to 11 files
215 updating to branch default
215 updating to branch default
216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
216 11 files updated, 0 files merged, 0 files removed, 0 files unresolved
217 $ cd repo-mixed
217 $ cd repo-mixed
218 $ test -f .hg/store/meta
218 $ test -f .hg/store/meta
219 [1]
219 [1]
220 $ grep treemanifest .hg/requires
220 $ grep treemanifest .hg/requires
221 treemanifest
221 treemanifest
222
222
223 Commit should store revlog per directory
223 Commit should store revlog per directory
224
224
225 $ hg co 1
225 $ hg co 1
226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
226 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
227 $ echo 3 > a
227 $ echo 3 > a
228 $ echo 3 > dir1/a
228 $ echo 3 > dir1/a
229 $ echo 3 > dir1/dir1/a
229 $ echo 3 > dir1/dir1/a
230 $ hg ci -m 'first tree'
230 $ hg ci -m 'first tree'
231 created new head
231 created new head
232 $ find .hg/store/meta | sort
232 $ find .hg/store/meta | sort
233 .hg/store/meta
233 .hg/store/meta
234 .hg/store/meta/dir1
234 .hg/store/meta/dir1
235 .hg/store/meta/dir1/00manifest.i
235 .hg/store/meta/dir1/00manifest.i
236 .hg/store/meta/dir1/dir1
236 .hg/store/meta/dir1/dir1
237 .hg/store/meta/dir1/dir1/00manifest.i
237 .hg/store/meta/dir1/dir1/00manifest.i
238 .hg/store/meta/dir1/dir2
238 .hg/store/meta/dir1/dir2
239 .hg/store/meta/dir1/dir2/00manifest.i
239 .hg/store/meta/dir1/dir2/00manifest.i
240 .hg/store/meta/dir2
240 .hg/store/meta/dir2
241 .hg/store/meta/dir2/00manifest.i
241 .hg/store/meta/dir2/00manifest.i
242
242
243 Merge of two trees
243 Merge of two trees
244
244
245 $ hg co 2
245 $ hg co 2
246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
246 6 files updated, 0 files merged, 0 files removed, 0 files unresolved
247 $ hg merge 1
247 $ hg merge 1
248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
248 3 files updated, 0 files merged, 0 files removed, 0 files unresolved
249 (branch merge, don't forget to commit)
249 (branch merge, don't forget to commit)
250 $ hg ci -m 'merge of flat manifests to new tree manifest'
250 $ hg ci -m 'merge of flat manifests to new tree manifest'
251 created new head
251 created new head
252 $ hg diff -r 3
252 $ hg diff -r 3
253
253
254 Parent of tree root manifest should be flat manifest, and two for merge
254 Parent of tree root manifest should be flat manifest, and two for merge
255
255
256 $ hg debugindex -m
256 $ hg debugindex -m
257 rev offset length delta linkrev nodeid p1 p2
257 rev offset length delta linkrev nodeid p1 p2
258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
258 0 0 80 -1 0 40536115ed9e 000000000000 000000000000
259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
259 1 80 83 0 1 f3376063c255 40536115ed9e 000000000000
260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
260 2 163 89 0 2 5d9b9da231a2 40536115ed9e 000000000000
261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
261 3 252 83 2 3 d17d663cbd8a 5d9b9da231a2 f3376063c255
262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
262 4 335 124 1 4 51e32a8c60ee f3376063c255 000000000000
263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
263 5 459 126 2 5 cc5baa78b230 5d9b9da231a2 f3376063c255
264
264
265
265
266 Status across flat/tree boundary should work
266 Status across flat/tree boundary should work
267
267
268 $ hg status --rev '.^' --rev .
268 $ hg status --rev '.^' --rev .
269 M a
269 M a
270 M dir1/a
270 M dir1/a
271 M dir1/dir1/a
271 M dir1/dir1/a
272
272
273
273
274 Turning off treemanifest config has no effect
274 Turning off treemanifest config has no effect
275
275
276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
276 $ hg debugindex .hg/store/meta/dir1/00manifest.i
277 rev offset length delta linkrev nodeid p1 p2
277 rev offset length delta linkrev nodeid p1 p2
278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
278 0 0 127 -1 4 064927a0648a 000000000000 000000000000
279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
279 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
280 $ echo 2 > dir1/a
280 $ echo 2 > dir1/a
281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
281 $ hg --config experimental.treemanifest=False ci -qm 'modify dir1/a'
282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
282 $ hg debugindex .hg/store/meta/dir1/00manifest.i
283 rev offset length delta linkrev nodeid p1 p2
283 rev offset length delta linkrev nodeid p1 p2
284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
284 0 0 127 -1 4 064927a0648a 000000000000 000000000000
285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
285 1 127 111 0 5 25ecb8cb8618 000000000000 000000000000
286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
286 2 238 55 1 6 5b16163a30c6 25ecb8cb8618 000000000000
287
287
288 Stripping and recovering changes should work
288 Stripping and recovering changes should work
289
289
290 $ hg st --change tip
290 $ hg st --change tip
291 M dir1/a
291 M dir1/a
292 $ hg --config extensions.strip= strip tip
292 $ hg --config extensions.strip= strip tip
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
293 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
294 saved backup bundle to $TESTTMP/repo-mixed/.hg/strip-backup/51cfd7b1e13b-78a2f3ed-backup.hg (glob)
295 $ hg unbundle -q .hg/strip-backup/*
295 $ hg unbundle -q .hg/strip-backup/*
296 $ hg st --change tip
296 $ hg st --change tip
297 M dir1/a
297 M dir1/a
298
298
299 Shelving and unshelving should work
299 Shelving and unshelving should work
300
300
301 $ echo foo >> dir1/a
301 $ echo foo >> dir1/a
302 $ hg --config extensions.shelve= shelve
302 $ hg --config extensions.shelve= shelve
303 shelved as default
303 shelved as default
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
304 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
305 $ hg --config extensions.shelve= unshelve
305 $ hg --config extensions.shelve= unshelve
306 unshelving change 'default'
306 unshelving change 'default'
307 $ hg diff --nodates
307 $ hg diff --nodates
308 diff -r 708a273da119 dir1/a
308 diff -r 708a273da119 dir1/a
309 --- a/dir1/a
309 --- a/dir1/a
310 +++ b/dir1/a
310 +++ b/dir1/a
311 @@ -1,1 +1,2 @@
311 @@ -1,1 +1,2 @@
312 1
312 1
313 +foo
313 +foo
314
314
315 Pushing from treemanifest repo to an empty repo makes that a treemanifest repo
316
317 $ cd ..
318 $ hg init empty-repo
319 $ cat << EOF >> empty-repo/.hg/hgrc
320 > [experimental]
321 > changegroup3=yes
322 > EOF
323 $ grep treemanifest empty-repo/.hg/requires
324 [1]
325 $ hg push -R repo -r 0 empty-repo
326 pushing to empty-repo
327 searching for changes
328 adding changesets
329 adding manifests
330 adding file changes
331 added 1 changesets with 2 changes to 2 files
332 $ grep treemanifest empty-repo/.hg/requires
333 treemanifest
334
315 Create deeper repo with tree manifests.
335 Create deeper repo with tree manifests.
316
336
317 $ cd ..
318 $ hg --config experimental.treemanifest=True init deeprepo
337 $ hg --config experimental.treemanifest=True init deeprepo
319 $ cd deeprepo
338 $ cd deeprepo
320
339
321 $ mkdir a
340 $ mkdir a
322 $ mkdir b
341 $ mkdir b
323 $ mkdir b/bar
342 $ mkdir b/bar
324 $ mkdir b/bar/orange
343 $ mkdir b/bar/orange
325 $ mkdir b/bar/orange/fly
344 $ mkdir b/bar/orange/fly
326 $ mkdir b/foo
345 $ mkdir b/foo
327 $ mkdir b/foo/apple
346 $ mkdir b/foo/apple
328 $ mkdir b/foo/apple/bees
347 $ mkdir b/foo/apple/bees
329
348
330 $ touch a/one.txt
349 $ touch a/one.txt
331 $ touch a/two.txt
350 $ touch a/two.txt
332 $ touch b/bar/fruits.txt
351 $ touch b/bar/fruits.txt
333 $ touch b/bar/orange/fly/gnat.py
352 $ touch b/bar/orange/fly/gnat.py
334 $ touch b/bar/orange/fly/housefly.txt
353 $ touch b/bar/orange/fly/housefly.txt
335 $ touch b/foo/apple/bees/flower.py
354 $ touch b/foo/apple/bees/flower.py
336 $ touch c.txt
355 $ touch c.txt
337 $ touch d.py
356 $ touch d.py
338
357
339 $ hg ci -Aqm 'initial'
358 $ hg ci -Aqm 'initial'
340
359
341 We'll see that visitdir works by removing some treemanifest revlogs and running
360 We'll see that visitdir works by removing some treemanifest revlogs and running
342 the files command with various parameters.
361 the files command with various parameters.
343
362
344 Test files from the root.
363 Test files from the root.
345
364
346 $ hg files -r .
365 $ hg files -r .
347 a/one.txt (glob)
366 a/one.txt (glob)
348 a/two.txt (glob)
367 a/two.txt (glob)
349 b/bar/fruits.txt (glob)
368 b/bar/fruits.txt (glob)
350 b/bar/orange/fly/gnat.py (glob)
369 b/bar/orange/fly/gnat.py (glob)
351 b/bar/orange/fly/housefly.txt (glob)
370 b/bar/orange/fly/housefly.txt (glob)
352 b/foo/apple/bees/flower.py (glob)
371 b/foo/apple/bees/flower.py (glob)
353 c.txt
372 c.txt
354 d.py
373 d.py
355
374
356 Excludes with a glob should not exclude everything from the glob's root
375 Excludes with a glob should not exclude everything from the glob's root
357
376
358 $ hg files -r . -X 'b/fo?' b
377 $ hg files -r . -X 'b/fo?' b
359 b/bar/fruits.txt (glob)
378 b/bar/fruits.txt (glob)
360 b/bar/orange/fly/gnat.py (glob)
379 b/bar/orange/fly/gnat.py (glob)
361 b/bar/orange/fly/housefly.txt (glob)
380 b/bar/orange/fly/housefly.txt (glob)
362
381
363 Test files for a subdirectory.
382 Test files for a subdirectory.
364
383
365 $ mv .hg/store/meta/a oldmf
384 $ mv .hg/store/meta/a oldmf
366 $ hg files -r . b
385 $ hg files -r . b
367 b/bar/fruits.txt (glob)
386 b/bar/fruits.txt (glob)
368 b/bar/orange/fly/gnat.py (glob)
387 b/bar/orange/fly/gnat.py (glob)
369 b/bar/orange/fly/housefly.txt (glob)
388 b/bar/orange/fly/housefly.txt (glob)
370 b/foo/apple/bees/flower.py (glob)
389 b/foo/apple/bees/flower.py (glob)
371 $ mv oldmf .hg/store/meta/a
390 $ mv oldmf .hg/store/meta/a
372
391
373 Test files with just includes and excludes.
392 Test files with just includes and excludes.
374
393
375 $ mv .hg/store/meta/a oldmf
394 $ mv .hg/store/meta/a oldmf
376 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
395 $ mv .hg/store/meta/b/bar/orange/fly oldmf2
377 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
396 $ mv .hg/store/meta/b/foo/apple/bees oldmf3
378 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
397 $ hg files -r . -I path:b/bar -X path:b/bar/orange/fly -I path:b/foo -X path:b/foo/apple/bees
379 b/bar/fruits.txt (glob)
398 b/bar/fruits.txt (glob)
380 $ mv oldmf .hg/store/meta/a
399 $ mv oldmf .hg/store/meta/a
381 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
400 $ mv oldmf2 .hg/store/meta/b/bar/orange/fly
382 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
401 $ mv oldmf3 .hg/store/meta/b/foo/apple/bees
383
402
384 Test files for a subdirectory, excluding a directory within it.
403 Test files for a subdirectory, excluding a directory within it.
385
404
386 $ mv .hg/store/meta/a oldmf
405 $ mv .hg/store/meta/a oldmf
387 $ mv .hg/store/meta/b/foo oldmf2
406 $ mv .hg/store/meta/b/foo oldmf2
388 $ hg files -r . -X path:b/foo b
407 $ hg files -r . -X path:b/foo b
389 b/bar/fruits.txt (glob)
408 b/bar/fruits.txt (glob)
390 b/bar/orange/fly/gnat.py (glob)
409 b/bar/orange/fly/gnat.py (glob)
391 b/bar/orange/fly/housefly.txt (glob)
410 b/bar/orange/fly/housefly.txt (glob)
392 $ mv oldmf .hg/store/meta/a
411 $ mv oldmf .hg/store/meta/a
393 $ mv oldmf2 .hg/store/meta/b/foo
412 $ mv oldmf2 .hg/store/meta/b/foo
394
413
395 Test files for a sub directory, including only a directory within it, and
414 Test files for a sub directory, including only a directory within it, and
396 including an unrelated directory.
415 including an unrelated directory.
397
416
398 $ mv .hg/store/meta/a oldmf
417 $ mv .hg/store/meta/a oldmf
399 $ mv .hg/store/meta/b/foo oldmf2
418 $ mv .hg/store/meta/b/foo oldmf2
400 $ hg files -r . -I path:b/bar/orange -I path:a b
419 $ hg files -r . -I path:b/bar/orange -I path:a b
401 b/bar/orange/fly/gnat.py (glob)
420 b/bar/orange/fly/gnat.py (glob)
402 b/bar/orange/fly/housefly.txt (glob)
421 b/bar/orange/fly/housefly.txt (glob)
403 $ mv oldmf .hg/store/meta/a
422 $ mv oldmf .hg/store/meta/a
404 $ mv oldmf2 .hg/store/meta/b/foo
423 $ mv oldmf2 .hg/store/meta/b/foo
405
424
406 Test files for a pattern, including a directory, and excluding a directory
425 Test files for a pattern, including a directory, and excluding a directory
407 within that.
426 within that.
408
427
409 $ mv .hg/store/meta/a oldmf
428 $ mv .hg/store/meta/a oldmf
410 $ mv .hg/store/meta/b/foo oldmf2
429 $ mv .hg/store/meta/b/foo oldmf2
411 $ mv .hg/store/meta/b/bar/orange oldmf3
430 $ mv .hg/store/meta/b/bar/orange oldmf3
412 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
431 $ hg files -r . glob:**.txt -I path:b/bar -X path:b/bar/orange
413 b/bar/fruits.txt (glob)
432 b/bar/fruits.txt (glob)
414 $ mv oldmf .hg/store/meta/a
433 $ mv oldmf .hg/store/meta/a
415 $ mv oldmf2 .hg/store/meta/b/foo
434 $ mv oldmf2 .hg/store/meta/b/foo
416 $ mv oldmf3 .hg/store/meta/b/bar/orange
435 $ mv oldmf3 .hg/store/meta/b/bar/orange
417
436
418 Add some more changes to the deep repo
437 Add some more changes to the deep repo
419 $ echo narf >> b/bar/fruits.txt
438 $ echo narf >> b/bar/fruits.txt
420 $ hg ci -m narf
439 $ hg ci -m narf
421 $ echo troz >> b/bar/orange/fly/gnat.py
440 $ echo troz >> b/bar/orange/fly/gnat.py
422 $ hg ci -m troz
441 $ hg ci -m troz
423
442
424 Test cloning a treemanifest repo over http.
443 Test cloning a treemanifest repo over http.
425 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
444 $ hg serve -p $HGPORT -d --pid-file=hg.pid --errorlog=errors.log
426 $ cat hg.pid >> $DAEMON_PIDS
445 $ cat hg.pid >> $DAEMON_PIDS
427 $ cd ..
446 $ cd ..
428 We can clone even with the knob turned off and we'll get a treemanifest repo.
447 We can clone even with the knob turned off and we'll get a treemanifest repo.
429 $ hg clone --config experimental.treemanifest=False \
448 $ hg clone --config experimental.treemanifest=False \
430 > --config experimental.changegroup3=True \
449 > --config experimental.changegroup3=True \
431 > http://localhost:$HGPORT deepclone
450 > http://localhost:$HGPORT deepclone
432 requesting all changes
451 requesting all changes
433 adding changesets
452 adding changesets
434 adding manifests
453 adding manifests
435 adding file changes
454 adding file changes
436 added 3 changesets with 10 changes to 8 files
455 added 3 changesets with 10 changes to 8 files
437 updating to branch default
456 updating to branch default
438 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
457 8 files updated, 0 files merged, 0 files removed, 0 files unresolved
439 No server errors.
458 No server errors.
440 $ cat deeprepo/errors.log
459 $ cat deeprepo/errors.log
441 requires got updated to include treemanifest
460 requires got updated to include treemanifest
442 $ cat deepclone/.hg/requires | grep treemanifest
461 $ cat deepclone/.hg/requires | grep treemanifest
443 treemanifest
462 treemanifest
444 Tree manifest revlogs exist.
463 Tree manifest revlogs exist.
445 $ find deepclone/.hg/store/meta | sort
464 $ find deepclone/.hg/store/meta | sort
446 deepclone/.hg/store/meta
465 deepclone/.hg/store/meta
447 deepclone/.hg/store/meta/a
466 deepclone/.hg/store/meta/a
448 deepclone/.hg/store/meta/a/00manifest.i
467 deepclone/.hg/store/meta/a/00manifest.i
449 deepclone/.hg/store/meta/b
468 deepclone/.hg/store/meta/b
450 deepclone/.hg/store/meta/b/00manifest.i
469 deepclone/.hg/store/meta/b/00manifest.i
451 deepclone/.hg/store/meta/b/bar
470 deepclone/.hg/store/meta/b/bar
452 deepclone/.hg/store/meta/b/bar/00manifest.i
471 deepclone/.hg/store/meta/b/bar/00manifest.i
453 deepclone/.hg/store/meta/b/bar/orange
472 deepclone/.hg/store/meta/b/bar/orange
454 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
473 deepclone/.hg/store/meta/b/bar/orange/00manifest.i
455 deepclone/.hg/store/meta/b/bar/orange/fly
474 deepclone/.hg/store/meta/b/bar/orange/fly
456 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
475 deepclone/.hg/store/meta/b/bar/orange/fly/00manifest.i
457 deepclone/.hg/store/meta/b/foo
476 deepclone/.hg/store/meta/b/foo
458 deepclone/.hg/store/meta/b/foo/00manifest.i
477 deepclone/.hg/store/meta/b/foo/00manifest.i
459 deepclone/.hg/store/meta/b/foo/apple
478 deepclone/.hg/store/meta/b/foo/apple
460 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
479 deepclone/.hg/store/meta/b/foo/apple/00manifest.i
461 deepclone/.hg/store/meta/b/foo/apple/bees
480 deepclone/.hg/store/meta/b/foo/apple/bees
462 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
481 deepclone/.hg/store/meta/b/foo/apple/bees/00manifest.i
463 Verify passes.
482 Verify passes.
464 $ cd deepclone
483 $ cd deepclone
465 $ hg verify
484 $ hg verify
466 checking changesets
485 checking changesets
467 checking manifests
486 checking manifests
468 crosschecking files in changesets and manifests
487 crosschecking files in changesets and manifests
469 checking files
488 checking files
470 8 files, 3 changesets, 10 total revisions
489 8 files, 3 changesets, 10 total revisions
471 $ cd ..
490 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now