##// END OF EJS Templates
narrow: move requirement constant from changegroup to repository...
Martin von Zweigbergk -
r38871:a232e674 default
parent child Browse files
Show More
@@ -1,93 +1,93 b''
1 # __init__.py - narrowhg extension
1 # __init__.py - narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
7 '''create clones which fetch history data for subset of files (EXPERIMENTAL)'''
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
11 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
12 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
13 # be specifying the version(s) of Mercurial they are tested with, or
13 # be specifying the version(s) of Mercurial they are tested with, or
14 # leave the attribute unspecified.
14 # leave the attribute unspecified.
15 testedwith = 'ships-with-hg-core'
15 testedwith = 'ships-with-hg-core'
16
16
17 from mercurial import (
17 from mercurial import (
18 changegroup,
19 extensions,
18 extensions,
20 hg,
19 hg,
21 localrepo,
20 localrepo,
22 registrar,
21 registrar,
22 repository,
23 verify as verifymod,
23 verify as verifymod,
24 )
24 )
25
25
26 from . import (
26 from . import (
27 narrowbundle2,
27 narrowbundle2,
28 narrowchangegroup,
28 narrowchangegroup,
29 narrowcommands,
29 narrowcommands,
30 narrowcopies,
30 narrowcopies,
31 narrowpatch,
31 narrowpatch,
32 narrowrepo,
32 narrowrepo,
33 narrowrevlog,
33 narrowrevlog,
34 narrowtemplates,
34 narrowtemplates,
35 narrowwirepeer,
35 narrowwirepeer,
36 )
36 )
37
37
38 configtable = {}
38 configtable = {}
39 configitem = registrar.configitem(configtable)
39 configitem = registrar.configitem(configtable)
40 # Narrowhg *has* support for serving ellipsis nodes (which are used at
40 # Narrowhg *has* support for serving ellipsis nodes (which are used at
41 # least by Google's internal server), but that support is pretty
41 # least by Google's internal server), but that support is pretty
42 # fragile and has a lot of problems on real-world repositories that
42 # fragile and has a lot of problems on real-world repositories that
43 # have complex graph topologies. This could probably be corrected, but
43 # have complex graph topologies. This could probably be corrected, but
44 # absent someone needing the full support for ellipsis nodes in
44 # absent someone needing the full support for ellipsis nodes in
45 # repositories with merges, it's unlikely this work will get done. As
45 # repositories with merges, it's unlikely this work will get done. As
46 # of this writining in late 2017, all repositories large enough for
46 # of this writining in late 2017, all repositories large enough for
47 # ellipsis nodes to be a hard requirement also enforce strictly linear
47 # ellipsis nodes to be a hard requirement also enforce strictly linear
48 # history for other scaling reasons.
48 # history for other scaling reasons.
49 configitem('experimental', 'narrowservebrokenellipses',
49 configitem('experimental', 'narrowservebrokenellipses',
50 default=False,
50 default=False,
51 alias=[('narrow', 'serveellipses')],
51 alias=[('narrow', 'serveellipses')],
52 )
52 )
53
53
54 # Export the commands table for Mercurial to see.
54 # Export the commands table for Mercurial to see.
55 cmdtable = narrowcommands.table
55 cmdtable = narrowcommands.table
56
56
57 def featuresetup(ui, features):
57 def featuresetup(ui, features):
58 features.add(changegroup.NARROW_REQUIREMENT)
58 features.add(repository.NARROW_REQUIREMENT)
59
59
60 def uisetup(ui):
60 def uisetup(ui):
61 """Wraps user-facing mercurial commands with narrow-aware versions."""
61 """Wraps user-facing mercurial commands with narrow-aware versions."""
62 localrepo.featuresetupfuncs.add(featuresetup)
62 localrepo.featuresetupfuncs.add(featuresetup)
63 narrowrevlog.setup()
63 narrowrevlog.setup()
64 narrowbundle2.setup()
64 narrowbundle2.setup()
65 narrowcommands.setup()
65 narrowcommands.setup()
66 narrowchangegroup.setup()
66 narrowchangegroup.setup()
67 narrowwirepeer.uisetup()
67 narrowwirepeer.uisetup()
68
68
69 def reposetup(ui, repo):
69 def reposetup(ui, repo):
70 """Wraps local repositories with narrow repo support."""
70 """Wraps local repositories with narrow repo support."""
71 if not repo.local():
71 if not repo.local():
72 return
72 return
73
73
74 if changegroup.NARROW_REQUIREMENT in repo.requirements:
74 if repository.NARROW_REQUIREMENT in repo.requirements:
75 narrowrepo.wraprepo(repo)
75 narrowrepo.wraprepo(repo)
76 narrowcopies.setup(repo)
76 narrowcopies.setup(repo)
77 narrowpatch.setup(repo)
77 narrowpatch.setup(repo)
78 narrowwirepeer.reposetup(repo)
78 narrowwirepeer.reposetup(repo)
79
79
80 def _verifierinit(orig, self, repo, matcher=None):
80 def _verifierinit(orig, self, repo, matcher=None):
81 # The verifier's matcher argument was desgined for narrowhg, so it should
81 # The verifier's matcher argument was desgined for narrowhg, so it should
82 # be None from core. If another extension passes a matcher (unlikely),
82 # be None from core. If another extension passes a matcher (unlikely),
83 # we'll have to fail until matchers can be composed more easily.
83 # we'll have to fail until matchers can be composed more easily.
84 assert matcher is None
84 assert matcher is None
85 orig(self, repo, repo.narrowmatch())
85 orig(self, repo, repo.narrowmatch())
86
86
87 def extsetup(ui):
87 def extsetup(ui):
88 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
88 extensions.wrapfunction(verifymod.verifier, '__init__', _verifierinit)
89 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
89 extensions.wrapfunction(hg, 'postshare', narrowrepo.wrappostshare)
90 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
90 extensions.wrapfunction(hg, 'copystore', narrowrepo.unsharenarrowspec)
91
91
92 templatekeyword = narrowtemplates.templatekeyword
92 templatekeyword = narrowtemplates.templatekeyword
93 revsetpredicate = narrowtemplates.revsetpredicate
93 revsetpredicate = narrowtemplates.revsetpredicate
@@ -1,308 +1,309 b''
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
1 # narrowbundle2.py - bundle2 extensions for narrow repository support
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from mercurial.i18n import _
13 from mercurial.i18n import _
14 from mercurial.node import (
14 from mercurial.node import (
15 bin,
15 bin,
16 nullid,
16 nullid,
17 )
17 )
18 from mercurial import (
18 from mercurial import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 error,
21 error,
22 exchange,
22 exchange,
23 extensions,
23 extensions,
24 narrowspec,
24 narrowspec,
25 repair,
25 repair,
26 repository,
26 util,
27 util,
27 wireprototypes,
28 wireprototypes,
28 )
29 )
29 from mercurial.utils import (
30 from mercurial.utils import (
30 stringutil,
31 stringutil,
31 )
32 )
32
33
33 NARROWCAP = 'narrow'
34 NARROWCAP = 'narrow'
34 _NARROWACL_SECTION = 'narrowhgacl'
35 _NARROWACL_SECTION = 'narrowhgacl'
35 _CHANGESPECPART = NARROWCAP + ':changespec'
36 _CHANGESPECPART = NARROWCAP + ':changespec'
36 _SPECPART = NARROWCAP + ':spec'
37 _SPECPART = NARROWCAP + ':spec'
37 _SPECPART_INCLUDE = 'include'
38 _SPECPART_INCLUDE = 'include'
38 _SPECPART_EXCLUDE = 'exclude'
39 _SPECPART_EXCLUDE = 'exclude'
39 _KILLNODESIGNAL = 'KILL'
40 _KILLNODESIGNAL = 'KILL'
40 _DONESIGNAL = 'DONE'
41 _DONESIGNAL = 'DONE'
41 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
42 _ELIDEDCSHEADER = '>20s20s20sl' # cset id, p1, p2, len(text)
42 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
43 _ELIDEDMFHEADER = '>20s20s20s20sl' # manifest id, p1, p2, link id, len(text)
43 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
44 _CSHEADERSIZE = struct.calcsize(_ELIDEDCSHEADER)
44 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
45 _MFHEADERSIZE = struct.calcsize(_ELIDEDMFHEADER)
45
46
46 # When advertising capabilities, always include narrow clone support.
47 # When advertising capabilities, always include narrow clone support.
47 def getrepocaps_narrow(orig, repo, **kwargs):
48 def getrepocaps_narrow(orig, repo, **kwargs):
48 caps = orig(repo, **kwargs)
49 caps = orig(repo, **kwargs)
49 caps[NARROWCAP] = ['v0']
50 caps[NARROWCAP] = ['v0']
50 return caps
51 return caps
51
52
52 def _packellipsischangegroup(repo, common, match, relevant_nodes,
53 def _packellipsischangegroup(repo, common, match, relevant_nodes,
53 ellipsisroots, visitnodes, depth, source, version):
54 ellipsisroots, visitnodes, depth, source, version):
54 if version in ('01', '02'):
55 if version in ('01', '02'):
55 raise error.Abort(
56 raise error.Abort(
56 'ellipsis nodes require at least cg3 on client and server, '
57 'ellipsis nodes require at least cg3 on client and server, '
57 'but negotiated version %s' % version)
58 'but negotiated version %s' % version)
58 # We wrap cg1packer.revchunk, using a side channel to pass
59 # We wrap cg1packer.revchunk, using a side channel to pass
59 # relevant_nodes into that area. Then if linknode isn't in the
60 # relevant_nodes into that area. Then if linknode isn't in the
60 # set, we know we have an ellipsis node and we should defer
61 # set, we know we have an ellipsis node and we should defer
61 # sending that node's data. We override close() to detect
62 # sending that node's data. We override close() to detect
62 # pending ellipsis nodes and flush them.
63 # pending ellipsis nodes and flush them.
63 packer = changegroup.getbundler(version, repo,
64 packer = changegroup.getbundler(version, repo,
64 filematcher=match)
65 filematcher=match)
65 # Give the packer the list of nodes which should not be
66 # Give the packer the list of nodes which should not be
66 # ellipsis nodes. We store this rather than the set of nodes
67 # ellipsis nodes. We store this rather than the set of nodes
67 # that should be an ellipsis because for very large histories
68 # that should be an ellipsis because for very large histories
68 # we expect this to be significantly smaller.
69 # we expect this to be significantly smaller.
69 packer.full_nodes = relevant_nodes
70 packer.full_nodes = relevant_nodes
70 # Maps ellipsis revs to their roots at the changelog level.
71 # Maps ellipsis revs to their roots at the changelog level.
71 packer.precomputed_ellipsis = ellipsisroots
72 packer.precomputed_ellipsis = ellipsisroots
72 # Maps CL revs to per-revlog revisions. Cleared in close() at
73 # Maps CL revs to per-revlog revisions. Cleared in close() at
73 # the end of each group.
74 # the end of each group.
74 packer.clrev_to_localrev = {}
75 packer.clrev_to_localrev = {}
75 packer.next_clrev_to_localrev = {}
76 packer.next_clrev_to_localrev = {}
76 # Maps changelog nodes to changelog revs. Filled in once
77 # Maps changelog nodes to changelog revs. Filled in once
77 # during changelog stage and then left unmodified.
78 # during changelog stage and then left unmodified.
78 packer.clnode_to_rev = {}
79 packer.clnode_to_rev = {}
79 packer.changelog_done = False
80 packer.changelog_done = False
80 # If true, informs the packer that it is serving shallow content and might
81 # If true, informs the packer that it is serving shallow content and might
81 # need to pack file contents not introduced by the changes being packed.
82 # need to pack file contents not introduced by the changes being packed.
82 packer.is_shallow = depth is not None
83 packer.is_shallow = depth is not None
83
84
84 return packer.generate(common, visitnodes, False, source)
85 return packer.generate(common, visitnodes, False, source)
85
86
86 # Serve a changegroup for a client with a narrow clone.
87 # Serve a changegroup for a client with a narrow clone.
87 def getbundlechangegrouppart_narrow(bundler, repo, source,
88 def getbundlechangegrouppart_narrow(bundler, repo, source,
88 bundlecaps=None, b2caps=None, heads=None,
89 bundlecaps=None, b2caps=None, heads=None,
89 common=None, **kwargs):
90 common=None, **kwargs):
90 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
91 assert repo.ui.configbool('experimental', 'narrowservebrokenellipses')
91
92
92 cgversions = b2caps.get('changegroup')
93 cgversions = b2caps.get('changegroup')
93 if cgversions: # 3.1 and 3.2 ship with an empty value
94 if cgversions: # 3.1 and 3.2 ship with an empty value
94 cgversions = [v for v in cgversions
95 cgversions = [v for v in cgversions
95 if v in changegroup.supportedoutgoingversions(repo)]
96 if v in changegroup.supportedoutgoingversions(repo)]
96 if not cgversions:
97 if not cgversions:
97 raise ValueError(_('no common changegroup version'))
98 raise ValueError(_('no common changegroup version'))
98 version = max(cgversions)
99 version = max(cgversions)
99 else:
100 else:
100 raise ValueError(_("server does not advertise changegroup version,"
101 raise ValueError(_("server does not advertise changegroup version,"
101 " can't negotiate support for ellipsis nodes"))
102 " can't negotiate support for ellipsis nodes"))
102
103
103 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
104 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
104 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
105 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
105 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
106 newmatch = narrowspec.match(repo.root, include=include, exclude=exclude)
106
107
107 depth = kwargs.get(r'depth', None)
108 depth = kwargs.get(r'depth', None)
108 if depth is not None:
109 if depth is not None:
109 depth = int(depth)
110 depth = int(depth)
110 if depth < 1:
111 if depth < 1:
111 raise error.Abort(_('depth must be positive, got %d') % depth)
112 raise error.Abort(_('depth must be positive, got %d') % depth)
112
113
113 heads = set(heads or repo.heads())
114 heads = set(heads or repo.heads())
114 common = set(common or [nullid])
115 common = set(common or [nullid])
115 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
116 oldinclude = sorted(filter(bool, kwargs.get(r'oldincludepats', [])))
116 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
117 oldexclude = sorted(filter(bool, kwargs.get(r'oldexcludepats', [])))
117 known = {bin(n) for n in kwargs.get(r'known', [])}
118 known = {bin(n) for n in kwargs.get(r'known', [])}
118 if known and (oldinclude != include or oldexclude != exclude):
119 if known and (oldinclude != include or oldexclude != exclude):
119 # Steps:
120 # Steps:
120 # 1. Send kill for "$known & ::common"
121 # 1. Send kill for "$known & ::common"
121 #
122 #
122 # 2. Send changegroup for ::common
123 # 2. Send changegroup for ::common
123 #
124 #
124 # 3. Proceed.
125 # 3. Proceed.
125 #
126 #
126 # In the future, we can send kills for only the specific
127 # In the future, we can send kills for only the specific
127 # nodes we know should go away or change shape, and then
128 # nodes we know should go away or change shape, and then
128 # send a data stream that tells the client something like this:
129 # send a data stream that tells the client something like this:
129 #
130 #
130 # a) apply this changegroup
131 # a) apply this changegroup
131 # b) apply nodes XXX, YYY, ZZZ that you already have
132 # b) apply nodes XXX, YYY, ZZZ that you already have
132 # c) goto a
133 # c) goto a
133 #
134 #
134 # until they've built up the full new state.
135 # until they've built up the full new state.
135 # Convert to revnums and intersect with "common". The client should
136 # Convert to revnums and intersect with "common". The client should
136 # have made it a subset of "common" already, but let's be safe.
137 # have made it a subset of "common" already, but let's be safe.
137 known = set(repo.revs("%ln & ::%ln", known, common))
138 known = set(repo.revs("%ln & ::%ln", known, common))
138 # TODO: we could send only roots() of this set, and the
139 # TODO: we could send only roots() of this set, and the
139 # list of nodes in common, and the client could work out
140 # list of nodes in common, and the client could work out
140 # what to strip, instead of us explicitly sending every
141 # what to strip, instead of us explicitly sending every
141 # single node.
142 # single node.
142 deadrevs = known
143 deadrevs = known
143 def genkills():
144 def genkills():
144 for r in deadrevs:
145 for r in deadrevs:
145 yield _KILLNODESIGNAL
146 yield _KILLNODESIGNAL
146 yield repo.changelog.node(r)
147 yield repo.changelog.node(r)
147 yield _DONESIGNAL
148 yield _DONESIGNAL
148 bundler.newpart(_CHANGESPECPART, data=genkills())
149 bundler.newpart(_CHANGESPECPART, data=genkills())
149 newvisit, newfull, newellipsis = exchange._computeellipsis(
150 newvisit, newfull, newellipsis = exchange._computeellipsis(
150 repo, set(), common, known, newmatch)
151 repo, set(), common, known, newmatch)
151 if newvisit:
152 if newvisit:
152 cg = _packellipsischangegroup(
153 cg = _packellipsischangegroup(
153 repo, common, newmatch, newfull, newellipsis,
154 repo, common, newmatch, newfull, newellipsis,
154 newvisit, depth, source, version)
155 newvisit, depth, source, version)
155 part = bundler.newpart('changegroup', data=cg)
156 part = bundler.newpart('changegroup', data=cg)
156 part.addparam('version', version)
157 part.addparam('version', version)
157 if 'treemanifest' in repo.requirements:
158 if 'treemanifest' in repo.requirements:
158 part.addparam('treemanifest', '1')
159 part.addparam('treemanifest', '1')
159
160
160 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
161 visitnodes, relevant_nodes, ellipsisroots = exchange._computeellipsis(
161 repo, common, heads, set(), newmatch, depth=depth)
162 repo, common, heads, set(), newmatch, depth=depth)
162
163
163 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
164 repo.ui.debug('Found %d relevant revs\n' % len(relevant_nodes))
164 if visitnodes:
165 if visitnodes:
165 cg = _packellipsischangegroup(
166 cg = _packellipsischangegroup(
166 repo, common, newmatch, relevant_nodes, ellipsisroots,
167 repo, common, newmatch, relevant_nodes, ellipsisroots,
167 visitnodes, depth, source, version)
168 visitnodes, depth, source, version)
168 part = bundler.newpart('changegroup', data=cg)
169 part = bundler.newpart('changegroup', data=cg)
169 part.addparam('version', version)
170 part.addparam('version', version)
170 if 'treemanifest' in repo.requirements:
171 if 'treemanifest' in repo.requirements:
171 part.addparam('treemanifest', '1')
172 part.addparam('treemanifest', '1')
172
173
173 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
174 @bundle2.parthandler(_SPECPART, (_SPECPART_INCLUDE, _SPECPART_EXCLUDE))
174 def _handlechangespec_2(op, inpart):
175 def _handlechangespec_2(op, inpart):
175 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
176 includepats = set(inpart.params.get(_SPECPART_INCLUDE, '').splitlines())
176 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
177 excludepats = set(inpart.params.get(_SPECPART_EXCLUDE, '').splitlines())
177 if not changegroup.NARROW_REQUIREMENT in op.repo.requirements:
178 if not repository.NARROW_REQUIREMENT in op.repo.requirements:
178 op.repo.requirements.add(changegroup.NARROW_REQUIREMENT)
179 op.repo.requirements.add(repository.NARROW_REQUIREMENT)
179 op.repo._writerequirements()
180 op.repo._writerequirements()
180 op.repo.setnarrowpats(includepats, excludepats)
181 op.repo.setnarrowpats(includepats, excludepats)
181
182
182 @bundle2.parthandler(_CHANGESPECPART)
183 @bundle2.parthandler(_CHANGESPECPART)
183 def _handlechangespec(op, inpart):
184 def _handlechangespec(op, inpart):
184 repo = op.repo
185 repo = op.repo
185 cl = repo.changelog
186 cl = repo.changelog
186
187
187 # changesets which need to be stripped entirely. either they're no longer
188 # changesets which need to be stripped entirely. either they're no longer
188 # needed in the new narrow spec, or the server is sending a replacement
189 # needed in the new narrow spec, or the server is sending a replacement
189 # in the changegroup part.
190 # in the changegroup part.
190 clkills = set()
191 clkills = set()
191
192
192 # A changespec part contains all the updates to ellipsis nodes
193 # A changespec part contains all the updates to ellipsis nodes
193 # that will happen as a result of widening or narrowing a
194 # that will happen as a result of widening or narrowing a
194 # repo. All the changes that this block encounters are ellipsis
195 # repo. All the changes that this block encounters are ellipsis
195 # nodes or flags to kill an existing ellipsis.
196 # nodes or flags to kill an existing ellipsis.
196 chunksignal = changegroup.readexactly(inpart, 4)
197 chunksignal = changegroup.readexactly(inpart, 4)
197 while chunksignal != _DONESIGNAL:
198 while chunksignal != _DONESIGNAL:
198 if chunksignal == _KILLNODESIGNAL:
199 if chunksignal == _KILLNODESIGNAL:
199 # a node used to be an ellipsis but isn't anymore
200 # a node used to be an ellipsis but isn't anymore
200 ck = changegroup.readexactly(inpart, 20)
201 ck = changegroup.readexactly(inpart, 20)
201 if cl.hasnode(ck):
202 if cl.hasnode(ck):
202 clkills.add(ck)
203 clkills.add(ck)
203 else:
204 else:
204 raise error.Abort(
205 raise error.Abort(
205 _('unexpected changespec node chunk type: %s') % chunksignal)
206 _('unexpected changespec node chunk type: %s') % chunksignal)
206 chunksignal = changegroup.readexactly(inpart, 4)
207 chunksignal = changegroup.readexactly(inpart, 4)
207
208
208 if clkills:
209 if clkills:
209 # preserve bookmarks that repair.strip() would otherwise strip
210 # preserve bookmarks that repair.strip() would otherwise strip
210 bmstore = repo._bookmarks
211 bmstore = repo._bookmarks
211 class dummybmstore(dict):
212 class dummybmstore(dict):
212 def applychanges(self, repo, tr, changes):
213 def applychanges(self, repo, tr, changes):
213 pass
214 pass
214 def recordchange(self, tr): # legacy version
215 def recordchange(self, tr): # legacy version
215 pass
216 pass
216 repo._bookmarks = dummybmstore()
217 repo._bookmarks = dummybmstore()
217 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
218 chgrpfile = repair.strip(op.ui, repo, list(clkills), backup=True,
218 topic='widen')
219 topic='widen')
219 repo._bookmarks = bmstore
220 repo._bookmarks = bmstore
220 if chgrpfile:
221 if chgrpfile:
221 op._widen_uninterr = repo.ui.uninterruptable()
222 op._widen_uninterr = repo.ui.uninterruptable()
222 op._widen_uninterr.__enter__()
223 op._widen_uninterr.__enter__()
223 # presence of _widen_bundle attribute activates widen handler later
224 # presence of _widen_bundle attribute activates widen handler later
224 op._widen_bundle = chgrpfile
225 op._widen_bundle = chgrpfile
225 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
226 # Set the new narrowspec if we're widening. The setnewnarrowpats() method
226 # will currently always be there when using the core+narrowhg server, but
227 # will currently always be there when using the core+narrowhg server, but
227 # other servers may include a changespec part even when not widening (e.g.
228 # other servers may include a changespec part even when not widening (e.g.
228 # because we're deepening a shallow repo).
229 # because we're deepening a shallow repo).
229 if util.safehasattr(repo, 'setnewnarrowpats'):
230 if util.safehasattr(repo, 'setnewnarrowpats'):
230 repo.setnewnarrowpats()
231 repo.setnewnarrowpats()
231
232
232 def handlechangegroup_widen(op, inpart):
233 def handlechangegroup_widen(op, inpart):
233 """Changegroup exchange handler which restores temporarily-stripped nodes"""
234 """Changegroup exchange handler which restores temporarily-stripped nodes"""
234 # We saved a bundle with stripped node data we must now restore.
235 # We saved a bundle with stripped node data we must now restore.
235 # This approach is based on mercurial/repair.py@6ee26a53c111.
236 # This approach is based on mercurial/repair.py@6ee26a53c111.
236 repo = op.repo
237 repo = op.repo
237 ui = op.ui
238 ui = op.ui
238
239
239 chgrpfile = op._widen_bundle
240 chgrpfile = op._widen_bundle
240 del op._widen_bundle
241 del op._widen_bundle
241 vfs = repo.vfs
242 vfs = repo.vfs
242
243
243 ui.note(_("adding branch\n"))
244 ui.note(_("adding branch\n"))
244 f = vfs.open(chgrpfile, "rb")
245 f = vfs.open(chgrpfile, "rb")
245 try:
246 try:
246 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
247 gen = exchange.readbundle(ui, f, chgrpfile, vfs)
247 if not ui.verbose:
248 if not ui.verbose:
248 # silence internal shuffling chatter
249 # silence internal shuffling chatter
249 ui.pushbuffer()
250 ui.pushbuffer()
250 if isinstance(gen, bundle2.unbundle20):
251 if isinstance(gen, bundle2.unbundle20):
251 with repo.transaction('strip') as tr:
252 with repo.transaction('strip') as tr:
252 bundle2.processbundle(repo, gen, lambda: tr)
253 bundle2.processbundle(repo, gen, lambda: tr)
253 else:
254 else:
254 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
255 gen.apply(repo, 'strip', 'bundle:' + vfs.join(chgrpfile), True)
255 if not ui.verbose:
256 if not ui.verbose:
256 ui.popbuffer()
257 ui.popbuffer()
257 finally:
258 finally:
258 f.close()
259 f.close()
259
260
260 # remove undo files
261 # remove undo files
261 for undovfs, undofile in repo.undofiles():
262 for undovfs, undofile in repo.undofiles():
262 try:
263 try:
263 undovfs.unlink(undofile)
264 undovfs.unlink(undofile)
264 except OSError as e:
265 except OSError as e:
265 if e.errno != errno.ENOENT:
266 if e.errno != errno.ENOENT:
266 ui.warn(_('error removing %s: %s\n') %
267 ui.warn(_('error removing %s: %s\n') %
267 (undovfs.join(undofile), stringutil.forcebytestr(e)))
268 (undovfs.join(undofile), stringutil.forcebytestr(e)))
268
269
269 # Remove partial backup only if there were no exceptions
270 # Remove partial backup only if there were no exceptions
270 op._widen_uninterr.__exit__(None, None, None)
271 op._widen_uninterr.__exit__(None, None, None)
271 vfs.unlink(chgrpfile)
272 vfs.unlink(chgrpfile)
272
273
273 def setup():
274 def setup():
274 """Enable narrow repo support in bundle2-related extension points."""
275 """Enable narrow repo support in bundle2-related extension points."""
275 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
276 extensions.wrapfunction(bundle2, 'getrepocaps', getrepocaps_narrow)
276
277
277 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
278 getbundleargs = wireprototypes.GETBUNDLE_ARGUMENTS
278
279
279 getbundleargs['narrow'] = 'boolean'
280 getbundleargs['narrow'] = 'boolean'
280 getbundleargs['depth'] = 'plain'
281 getbundleargs['depth'] = 'plain'
281 getbundleargs['oldincludepats'] = 'csv'
282 getbundleargs['oldincludepats'] = 'csv'
282 getbundleargs['oldexcludepats'] = 'csv'
283 getbundleargs['oldexcludepats'] = 'csv'
283 getbundleargs['includepats'] = 'csv'
284 getbundleargs['includepats'] = 'csv'
284 getbundleargs['excludepats'] = 'csv'
285 getbundleargs['excludepats'] = 'csv'
285 getbundleargs['known'] = 'csv'
286 getbundleargs['known'] = 'csv'
286
287
287 # Extend changegroup serving to handle requests from narrow clients.
288 # Extend changegroup serving to handle requests from narrow clients.
288 origcgfn = exchange.getbundle2partsmapping['changegroup']
289 origcgfn = exchange.getbundle2partsmapping['changegroup']
289 def wrappedcgfn(*args, **kwargs):
290 def wrappedcgfn(*args, **kwargs):
290 repo = args[1]
291 repo = args[1]
291 if repo.ui.has_section(_NARROWACL_SECTION):
292 if repo.ui.has_section(_NARROWACL_SECTION):
292 kwargs = exchange.applynarrowacl(repo, kwargs)
293 kwargs = exchange.applynarrowacl(repo, kwargs)
293
294
294 if (kwargs.get(r'narrow', False) and
295 if (kwargs.get(r'narrow', False) and
295 repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
296 repo.ui.configbool('experimental', 'narrowservebrokenellipses')):
296 getbundlechangegrouppart_narrow(*args, **kwargs)
297 getbundlechangegrouppart_narrow(*args, **kwargs)
297 else:
298 else:
298 origcgfn(*args, **kwargs)
299 origcgfn(*args, **kwargs)
299 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
300 exchange.getbundle2partsmapping['changegroup'] = wrappedcgfn
300
301
301 # Extend changegroup receiver so client can fixup after widen requests.
302 # Extend changegroup receiver so client can fixup after widen requests.
302 origcghandler = bundle2.parthandlermapping['changegroup']
303 origcghandler = bundle2.parthandlermapping['changegroup']
303 def wrappedcghandler(op, inpart):
304 def wrappedcghandler(op, inpart):
304 origcghandler(op, inpart)
305 origcghandler(op, inpart)
305 if util.safehasattr(op, '_widen_bundle'):
306 if util.safehasattr(op, '_widen_bundle'):
306 handlechangegroup_widen(op, inpart)
307 handlechangegroup_widen(op, inpart)
307 wrappedcghandler.params = origcghandler.params
308 wrappedcghandler.params = origcghandler.params
308 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
309 bundle2.parthandlermapping['changegroup'] = wrappedcghandler
@@ -1,403 +1,403 b''
1 # narrowcommands.py - command modifications for narrowhg extension
1 # narrowcommands.py - command modifications for narrowhg extension
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7 from __future__ import absolute_import
7 from __future__ import absolute_import
8
8
9 import itertools
9 import itertools
10
10
11 from mercurial.i18n import _
11 from mercurial.i18n import _
12 from mercurial import (
12 from mercurial import (
13 changegroup,
14 cmdutil,
13 cmdutil,
15 commands,
14 commands,
16 discovery,
15 discovery,
17 error,
16 error,
18 exchange,
17 exchange,
19 extensions,
18 extensions,
20 hg,
19 hg,
21 merge,
20 merge,
22 narrowspec,
21 narrowspec,
23 node,
22 node,
24 pycompat,
23 pycompat,
25 registrar,
24 registrar,
26 repair,
25 repair,
26 repository,
27 repoview,
27 repoview,
28 util,
28 util,
29 )
29 )
30
30
31 from . import (
31 from . import (
32 narrowbundle2,
32 narrowbundle2,
33 )
33 )
34
34
35 table = {}
35 table = {}
36 command = registrar.command(table)
36 command = registrar.command(table)
37
37
38 def setup():
38 def setup():
39 """Wraps user-facing mercurial commands with narrow-aware versions."""
39 """Wraps user-facing mercurial commands with narrow-aware versions."""
40
40
41 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
41 entry = extensions.wrapcommand(commands.table, 'clone', clonenarrowcmd)
42 entry[1].append(('', 'narrow', None,
42 entry[1].append(('', 'narrow', None,
43 _("create a narrow clone of select files")))
43 _("create a narrow clone of select files")))
44 entry[1].append(('', 'depth', '',
44 entry[1].append(('', 'depth', '',
45 _("limit the history fetched by distance from heads")))
45 _("limit the history fetched by distance from heads")))
46 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
46 # TODO(durin42): unify sparse/narrow --include/--exclude logic a bit
47 if 'sparse' not in extensions.enabled():
47 if 'sparse' not in extensions.enabled():
48 entry[1].append(('', 'include', [],
48 entry[1].append(('', 'include', [],
49 _("specifically fetch this file/directory")))
49 _("specifically fetch this file/directory")))
50 entry[1].append(
50 entry[1].append(
51 ('', 'exclude', [],
51 ('', 'exclude', [],
52 _("do not fetch this file/directory, even if included")))
52 _("do not fetch this file/directory, even if included")))
53
53
54 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
54 entry = extensions.wrapcommand(commands.table, 'pull', pullnarrowcmd)
55 entry[1].append(('', 'depth', '',
55 entry[1].append(('', 'depth', '',
56 _("limit the history fetched by distance from heads")))
56 _("limit the history fetched by distance from heads")))
57
57
58 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
58 extensions.wrapcommand(commands.table, 'archive', archivenarrowcmd)
59
59
60 def expandpull(pullop, includepats, excludepats):
60 def expandpull(pullop, includepats, excludepats):
61 if not narrowspec.needsexpansion(includepats):
61 if not narrowspec.needsexpansion(includepats):
62 return includepats, excludepats
62 return includepats, excludepats
63
63
64 heads = pullop.heads or pullop.rheads
64 heads = pullop.heads or pullop.rheads
65 includepats, excludepats = pullop.remote.expandnarrow(
65 includepats, excludepats = pullop.remote.expandnarrow(
66 includepats, excludepats, heads)
66 includepats, excludepats, heads)
67 pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
67 pullop.repo.ui.debug('Expanded narrowspec to inc=%s, exc=%s\n' % (
68 includepats, excludepats))
68 includepats, excludepats))
69 return set(includepats), set(excludepats)
69 return set(includepats), set(excludepats)
70
70
71 def clonenarrowcmd(orig, ui, repo, *args, **opts):
71 def clonenarrowcmd(orig, ui, repo, *args, **opts):
72 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
72 """Wraps clone command, so 'hg clone' first wraps localrepo.clone()."""
73 opts = pycompat.byteskwargs(opts)
73 opts = pycompat.byteskwargs(opts)
74 wrappedextraprepare = util.nullcontextmanager()
74 wrappedextraprepare = util.nullcontextmanager()
75 opts_narrow = opts['narrow']
75 opts_narrow = opts['narrow']
76 if opts_narrow:
76 if opts_narrow:
77 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
77 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
78 # Create narrow spec patterns from clone flags
78 # Create narrow spec patterns from clone flags
79 includepats = narrowspec.parsepatterns(opts['include'])
79 includepats = narrowspec.parsepatterns(opts['include'])
80 excludepats = narrowspec.parsepatterns(opts['exclude'])
80 excludepats = narrowspec.parsepatterns(opts['exclude'])
81
81
82 # If necessary, ask the server to expand the narrowspec.
82 # If necessary, ask the server to expand the narrowspec.
83 includepats, excludepats = expandpull(
83 includepats, excludepats = expandpull(
84 pullop, includepats, excludepats)
84 pullop, includepats, excludepats)
85
85
86 if not includepats and excludepats:
86 if not includepats and excludepats:
87 # If nothing was included, we assume the user meant to include
87 # If nothing was included, we assume the user meant to include
88 # everything, except what they asked to exclude.
88 # everything, except what they asked to exclude.
89 includepats = {'path:.'}
89 includepats = {'path:.'}
90
90
91 pullop.repo.setnarrowpats(includepats, excludepats)
91 pullop.repo.setnarrowpats(includepats, excludepats)
92
92
93 # This will populate 'includepats' etc with the values from the
93 # This will populate 'includepats' etc with the values from the
94 # narrowspec we just saved.
94 # narrowspec we just saved.
95 orig(pullop, kwargs)
95 orig(pullop, kwargs)
96
96
97 if opts.get('depth'):
97 if opts.get('depth'):
98 kwargs['depth'] = opts['depth']
98 kwargs['depth'] = opts['depth']
99 wrappedextraprepare = extensions.wrappedfunction(exchange,
99 wrappedextraprepare = extensions.wrappedfunction(exchange,
100 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
100 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
101
101
102 def pullnarrow(orig, repo, *args, **kwargs):
102 def pullnarrow(orig, repo, *args, **kwargs):
103 if opts_narrow:
103 if opts_narrow:
104 repo.requirements.add(changegroup.NARROW_REQUIREMENT)
104 repo.requirements.add(repository.NARROW_REQUIREMENT)
105 repo._writerequirements()
105 repo._writerequirements()
106
106
107 return orig(repo, *args, **kwargs)
107 return orig(repo, *args, **kwargs)
108
108
109 wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
109 wrappedpull = extensions.wrappedfunction(exchange, 'pull', pullnarrow)
110
110
111 with wrappedextraprepare, wrappedpull:
111 with wrappedextraprepare, wrappedpull:
112 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
112 return orig(ui, repo, *args, **pycompat.strkwargs(opts))
113
113
114 def pullnarrowcmd(orig, ui, repo, *args, **opts):
114 def pullnarrowcmd(orig, ui, repo, *args, **opts):
115 """Wraps pull command to allow modifying narrow spec."""
115 """Wraps pull command to allow modifying narrow spec."""
116 wrappedextraprepare = util.nullcontextmanager()
116 wrappedextraprepare = util.nullcontextmanager()
117 if changegroup.NARROW_REQUIREMENT in repo.requirements:
117 if repository.NARROW_REQUIREMENT in repo.requirements:
118
118
119 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
119 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
120 orig(pullop, kwargs)
120 orig(pullop, kwargs)
121 if opts.get(r'depth'):
121 if opts.get(r'depth'):
122 kwargs['depth'] = opts[r'depth']
122 kwargs['depth'] = opts[r'depth']
123 wrappedextraprepare = extensions.wrappedfunction(exchange,
123 wrappedextraprepare = extensions.wrappedfunction(exchange,
124 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
124 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
125
125
126 with wrappedextraprepare:
126 with wrappedextraprepare:
127 return orig(ui, repo, *args, **opts)
127 return orig(ui, repo, *args, **opts)
128
128
129 def archivenarrowcmd(orig, ui, repo, *args, **opts):
129 def archivenarrowcmd(orig, ui, repo, *args, **opts):
130 """Wraps archive command to narrow the default includes."""
130 """Wraps archive command to narrow the default includes."""
131 if changegroup.NARROW_REQUIREMENT in repo.requirements:
131 if repository.NARROW_REQUIREMENT in repo.requirements:
132 repo_includes, repo_excludes = repo.narrowpats
132 repo_includes, repo_excludes = repo.narrowpats
133 includes = set(opts.get(r'include', []))
133 includes = set(opts.get(r'include', []))
134 excludes = set(opts.get(r'exclude', []))
134 excludes = set(opts.get(r'exclude', []))
135 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
135 includes, excludes, unused_invalid = narrowspec.restrictpatterns(
136 includes, excludes, repo_includes, repo_excludes)
136 includes, excludes, repo_includes, repo_excludes)
137 if includes:
137 if includes:
138 opts[r'include'] = includes
138 opts[r'include'] = includes
139 if excludes:
139 if excludes:
140 opts[r'exclude'] = excludes
140 opts[r'exclude'] = excludes
141 return orig(ui, repo, *args, **opts)
141 return orig(ui, repo, *args, **opts)
142
142
143 def pullbundle2extraprepare(orig, pullop, kwargs):
143 def pullbundle2extraprepare(orig, pullop, kwargs):
144 repo = pullop.repo
144 repo = pullop.repo
145 if changegroup.NARROW_REQUIREMENT not in repo.requirements:
145 if repository.NARROW_REQUIREMENT not in repo.requirements:
146 return orig(pullop, kwargs)
146 return orig(pullop, kwargs)
147
147
148 if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
148 if narrowbundle2.NARROWCAP not in pullop.remotebundle2caps:
149 raise error.Abort(_("server doesn't support narrow clones"))
149 raise error.Abort(_("server doesn't support narrow clones"))
150 orig(pullop, kwargs)
150 orig(pullop, kwargs)
151 kwargs['narrow'] = True
151 kwargs['narrow'] = True
152 include, exclude = repo.narrowpats
152 include, exclude = repo.narrowpats
153 kwargs['oldincludepats'] = include
153 kwargs['oldincludepats'] = include
154 kwargs['oldexcludepats'] = exclude
154 kwargs['oldexcludepats'] = exclude
155 kwargs['includepats'] = include
155 kwargs['includepats'] = include
156 kwargs['excludepats'] = exclude
156 kwargs['excludepats'] = exclude
157 kwargs['known'] = [node.hex(ctx.node()) for ctx in
157 kwargs['known'] = [node.hex(ctx.node()) for ctx in
158 repo.set('::%ln', pullop.common)
158 repo.set('::%ln', pullop.common)
159 if ctx.node() != node.nullid]
159 if ctx.node() != node.nullid]
160 if not kwargs['known']:
160 if not kwargs['known']:
161 # Mercurial serialized an empty list as '' and deserializes it as
161 # Mercurial serialized an empty list as '' and deserializes it as
162 # [''], so delete it instead to avoid handling the empty string on the
162 # [''], so delete it instead to avoid handling the empty string on the
163 # server.
163 # server.
164 del kwargs['known']
164 del kwargs['known']
165
165
166 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
166 extensions.wrapfunction(exchange,'_pullbundle2extraprepare',
167 pullbundle2extraprepare)
167 pullbundle2extraprepare)
168
168
169 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
169 def _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
170 newincludes, newexcludes, force):
170 newincludes, newexcludes, force):
171 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
171 oldmatch = narrowspec.match(repo.root, oldincludes, oldexcludes)
172 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
172 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
173
173
174 # This is essentially doing "hg outgoing" to find all local-only
174 # This is essentially doing "hg outgoing" to find all local-only
175 # commits. We will then check that the local-only commits don't
175 # commits. We will then check that the local-only commits don't
176 # have any changes to files that will be untracked.
176 # have any changes to files that will be untracked.
177 unfi = repo.unfiltered()
177 unfi = repo.unfiltered()
178 outgoing = discovery.findcommonoutgoing(unfi, remote,
178 outgoing = discovery.findcommonoutgoing(unfi, remote,
179 commoninc=commoninc)
179 commoninc=commoninc)
180 ui.status(_('looking for local changes to affected paths\n'))
180 ui.status(_('looking for local changes to affected paths\n'))
181 localnodes = []
181 localnodes = []
182 for n in itertools.chain(outgoing.missing, outgoing.excluded):
182 for n in itertools.chain(outgoing.missing, outgoing.excluded):
183 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
183 if any(oldmatch(f) and not newmatch(f) for f in unfi[n].files()):
184 localnodes.append(n)
184 localnodes.append(n)
185 revstostrip = unfi.revs('descendants(%ln)', localnodes)
185 revstostrip = unfi.revs('descendants(%ln)', localnodes)
186 hiddenrevs = repoview.filterrevs(repo, 'visible')
186 hiddenrevs = repoview.filterrevs(repo, 'visible')
187 visibletostrip = list(repo.changelog.node(r)
187 visibletostrip = list(repo.changelog.node(r)
188 for r in (revstostrip - hiddenrevs))
188 for r in (revstostrip - hiddenrevs))
189 if visibletostrip:
189 if visibletostrip:
190 ui.status(_('The following changeset(s) or their ancestors have '
190 ui.status(_('The following changeset(s) or their ancestors have '
191 'local changes not on the remote:\n'))
191 'local changes not on the remote:\n'))
192 maxnodes = 10
192 maxnodes = 10
193 if ui.verbose or len(visibletostrip) <= maxnodes:
193 if ui.verbose or len(visibletostrip) <= maxnodes:
194 for n in visibletostrip:
194 for n in visibletostrip:
195 ui.status('%s\n' % node.short(n))
195 ui.status('%s\n' % node.short(n))
196 else:
196 else:
197 for n in visibletostrip[:maxnodes]:
197 for n in visibletostrip[:maxnodes]:
198 ui.status('%s\n' % node.short(n))
198 ui.status('%s\n' % node.short(n))
199 ui.status(_('...and %d more, use --verbose to list all\n') %
199 ui.status(_('...and %d more, use --verbose to list all\n') %
200 (len(visibletostrip) - maxnodes))
200 (len(visibletostrip) - maxnodes))
201 if not force:
201 if not force:
202 raise error.Abort(_('local changes found'),
202 raise error.Abort(_('local changes found'),
203 hint=_('use --force-delete-local-changes to '
203 hint=_('use --force-delete-local-changes to '
204 'ignore'))
204 'ignore'))
205
205
206 with ui.uninterruptable():
206 with ui.uninterruptable():
207 if revstostrip:
207 if revstostrip:
208 tostrip = [unfi.changelog.node(r) for r in revstostrip]
208 tostrip = [unfi.changelog.node(r) for r in revstostrip]
209 if repo['.'].node() in tostrip:
209 if repo['.'].node() in tostrip:
210 # stripping working copy, so move to a different commit first
210 # stripping working copy, so move to a different commit first
211 urev = max(repo.revs('(::%n) - %ln + null',
211 urev = max(repo.revs('(::%n) - %ln + null',
212 repo['.'].node(), visibletostrip))
212 repo['.'].node(), visibletostrip))
213 hg.clean(repo, urev)
213 hg.clean(repo, urev)
214 repair.strip(ui, unfi, tostrip, topic='narrow')
214 repair.strip(ui, unfi, tostrip, topic='narrow')
215
215
216 todelete = []
216 todelete = []
217 for f, f2, size in repo.store.datafiles():
217 for f, f2, size in repo.store.datafiles():
218 if f.startswith('data/'):
218 if f.startswith('data/'):
219 file = f[5:-2]
219 file = f[5:-2]
220 if not newmatch(file):
220 if not newmatch(file):
221 todelete.append(f)
221 todelete.append(f)
222 elif f.startswith('meta/'):
222 elif f.startswith('meta/'):
223 dir = f[5:-13]
223 dir = f[5:-13]
224 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
224 dirs = ['.'] + sorted(util.dirs({dir})) + [dir]
225 include = True
225 include = True
226 for d in dirs:
226 for d in dirs:
227 visit = newmatch.visitdir(d)
227 visit = newmatch.visitdir(d)
228 if not visit:
228 if not visit:
229 include = False
229 include = False
230 break
230 break
231 if visit == 'all':
231 if visit == 'all':
232 break
232 break
233 if not include:
233 if not include:
234 todelete.append(f)
234 todelete.append(f)
235
235
236 repo.destroying()
236 repo.destroying()
237
237
238 with repo.transaction("narrowing"):
238 with repo.transaction("narrowing"):
239 for f in todelete:
239 for f in todelete:
240 ui.status(_('deleting %s\n') % f)
240 ui.status(_('deleting %s\n') % f)
241 util.unlinkpath(repo.svfs.join(f))
241 util.unlinkpath(repo.svfs.join(f))
242 repo.store.markremoved(f)
242 repo.store.markremoved(f)
243
243
244 for f in repo.dirstate:
244 for f in repo.dirstate:
245 if not newmatch(f):
245 if not newmatch(f):
246 repo.dirstate.drop(f)
246 repo.dirstate.drop(f)
247 repo.wvfs.unlinkpath(f)
247 repo.wvfs.unlinkpath(f)
248 repo.setnarrowpats(newincludes, newexcludes)
248 repo.setnarrowpats(newincludes, newexcludes)
249
249
250 repo.destroyed()
250 repo.destroyed()
251
251
252 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
252 def _widen(ui, repo, remote, commoninc, newincludes, newexcludes):
253 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
253 newmatch = narrowspec.match(repo.root, newincludes, newexcludes)
254
254
255 # TODO(martinvonz): Get expansion working with widening/narrowing.
255 # TODO(martinvonz): Get expansion working with widening/narrowing.
256 if narrowspec.needsexpansion(newincludes):
256 if narrowspec.needsexpansion(newincludes):
257 raise error.Abort('Expansion not yet supported on pull')
257 raise error.Abort('Expansion not yet supported on pull')
258
258
259 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
259 def pullbundle2extraprepare_widen(orig, pullop, kwargs):
260 orig(pullop, kwargs)
260 orig(pullop, kwargs)
261 # The old{in,ex}cludepats have already been set by orig()
261 # The old{in,ex}cludepats have already been set by orig()
262 kwargs['includepats'] = newincludes
262 kwargs['includepats'] = newincludes
263 kwargs['excludepats'] = newexcludes
263 kwargs['excludepats'] = newexcludes
264 wrappedextraprepare = extensions.wrappedfunction(exchange,
264 wrappedextraprepare = extensions.wrappedfunction(exchange,
265 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
265 '_pullbundle2extraprepare', pullbundle2extraprepare_widen)
266
266
267 # define a function that narrowbundle2 can call after creating the
267 # define a function that narrowbundle2 can call after creating the
268 # backup bundle, but before applying the bundle from the server
268 # backup bundle, but before applying the bundle from the server
269 def setnewnarrowpats():
269 def setnewnarrowpats():
270 repo.setnarrowpats(newincludes, newexcludes)
270 repo.setnarrowpats(newincludes, newexcludes)
271 repo.setnewnarrowpats = setnewnarrowpats
271 repo.setnewnarrowpats = setnewnarrowpats
272
272
273 with ui.uninterruptable():
273 with ui.uninterruptable():
274 ds = repo.dirstate
274 ds = repo.dirstate
275 p1, p2 = ds.p1(), ds.p2()
275 p1, p2 = ds.p1(), ds.p2()
276 with ds.parentchange():
276 with ds.parentchange():
277 ds.setparents(node.nullid, node.nullid)
277 ds.setparents(node.nullid, node.nullid)
278 common = commoninc[0]
278 common = commoninc[0]
279 with wrappedextraprepare:
279 with wrappedextraprepare:
280 exchange.pull(repo, remote, heads=common)
280 exchange.pull(repo, remote, heads=common)
281 with ds.parentchange():
281 with ds.parentchange():
282 ds.setparents(p1, p2)
282 ds.setparents(p1, p2)
283
283
284 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
284 actions = {k: [] for k in 'a am f g cd dc r dm dg m e k p pr'.split()}
285 addgaction = actions['g'].append
285 addgaction = actions['g'].append
286
286
287 mf = repo['.'].manifest().matches(newmatch)
287 mf = repo['.'].manifest().matches(newmatch)
288 for f, fn in mf.iteritems():
288 for f, fn in mf.iteritems():
289 if f not in repo.dirstate:
289 if f not in repo.dirstate:
290 addgaction((f, (mf.flags(f), False),
290 addgaction((f, (mf.flags(f), False),
291 "add from widened narrow clone"))
291 "add from widened narrow clone"))
292
292
293 merge.applyupdates(repo, actions, wctx=repo[None],
293 merge.applyupdates(repo, actions, wctx=repo[None],
294 mctx=repo['.'], overwrite=False)
294 mctx=repo['.'], overwrite=False)
295 merge.recordupdates(repo, actions, branchmerge=False)
295 merge.recordupdates(repo, actions, branchmerge=False)
296
296
297 # TODO(rdamazio): Make new matcher format and update description
297 # TODO(rdamazio): Make new matcher format and update description
298 @command('tracked',
298 @command('tracked',
299 [('', 'addinclude', [], _('new paths to include')),
299 [('', 'addinclude', [], _('new paths to include')),
300 ('', 'removeinclude', [], _('old paths to no longer include')),
300 ('', 'removeinclude', [], _('old paths to no longer include')),
301 ('', 'addexclude', [], _('new paths to exclude')),
301 ('', 'addexclude', [], _('new paths to exclude')),
302 ('', 'removeexclude', [], _('old paths to no longer exclude')),
302 ('', 'removeexclude', [], _('old paths to no longer exclude')),
303 ('', 'clear', False, _('whether to replace the existing narrowspec')),
303 ('', 'clear', False, _('whether to replace the existing narrowspec')),
304 ('', 'force-delete-local-changes', False,
304 ('', 'force-delete-local-changes', False,
305 _('forces deletion of local changes when narrowing')),
305 _('forces deletion of local changes when narrowing')),
306 ] + commands.remoteopts,
306 ] + commands.remoteopts,
307 _('[OPTIONS]... [REMOTE]'),
307 _('[OPTIONS]... [REMOTE]'),
308 inferrepo=True)
308 inferrepo=True)
309 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
309 def trackedcmd(ui, repo, remotepath=None, *pats, **opts):
310 """show or change the current narrowspec
310 """show or change the current narrowspec
311
311
312 With no argument, shows the current narrowspec entries, one per line. Each
312 With no argument, shows the current narrowspec entries, one per line. Each
313 line will be prefixed with 'I' or 'X' for included or excluded patterns,
313 line will be prefixed with 'I' or 'X' for included or excluded patterns,
314 respectively.
314 respectively.
315
315
316 The narrowspec is comprised of expressions to match remote files and/or
316 The narrowspec is comprised of expressions to match remote files and/or
317 directories that should be pulled into your client.
317 directories that should be pulled into your client.
318 The narrowspec has *include* and *exclude* expressions, with excludes always
318 The narrowspec has *include* and *exclude* expressions, with excludes always
319 trumping includes: that is, if a file matches an exclude expression, it will
319 trumping includes: that is, if a file matches an exclude expression, it will
320 be excluded even if it also matches an include expression.
320 be excluded even if it also matches an include expression.
321 Excluding files that were never included has no effect.
321 Excluding files that were never included has no effect.
322
322
323 Each included or excluded entry is in the format described by
323 Each included or excluded entry is in the format described by
324 'hg help patterns'.
324 'hg help patterns'.
325
325
326 The options allow you to add or remove included and excluded expressions.
326 The options allow you to add or remove included and excluded expressions.
327
327
328 If --clear is specified, then all previous includes and excludes are DROPPED
328 If --clear is specified, then all previous includes and excludes are DROPPED
329 and replaced by the new ones specified to --addinclude and --addexclude.
329 and replaced by the new ones specified to --addinclude and --addexclude.
330 If --clear is specified without any further options, the narrowspec will be
330 If --clear is specified without any further options, the narrowspec will be
331 empty and will not match any files.
331 empty and will not match any files.
332 """
332 """
333 opts = pycompat.byteskwargs(opts)
333 opts = pycompat.byteskwargs(opts)
334 if changegroup.NARROW_REQUIREMENT not in repo.requirements:
334 if repository.NARROW_REQUIREMENT not in repo.requirements:
335 ui.warn(_('The narrow command is only supported on respositories cloned'
335 ui.warn(_('The narrow command is only supported on respositories cloned'
336 ' with --narrow.\n'))
336 ' with --narrow.\n'))
337 return 1
337 return 1
338
338
339 # Before supporting, decide whether it "hg tracked --clear" should mean
339 # Before supporting, decide whether it "hg tracked --clear" should mean
340 # tracking no paths or all paths.
340 # tracking no paths or all paths.
341 if opts['clear']:
341 if opts['clear']:
342 ui.warn(_('The --clear option is not yet supported.\n'))
342 ui.warn(_('The --clear option is not yet supported.\n'))
343 return 1
343 return 1
344
344
345 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
345 if narrowspec.needsexpansion(opts['addinclude'] + opts['addexclude']):
346 raise error.Abort('Expansion not yet supported on widen/narrow')
346 raise error.Abort('Expansion not yet supported on widen/narrow')
347
347
348 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
348 addedincludes = narrowspec.parsepatterns(opts['addinclude'])
349 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
349 removedincludes = narrowspec.parsepatterns(opts['removeinclude'])
350 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
350 addedexcludes = narrowspec.parsepatterns(opts['addexclude'])
351 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
351 removedexcludes = narrowspec.parsepatterns(opts['removeexclude'])
352 widening = addedincludes or removedexcludes
352 widening = addedincludes or removedexcludes
353 narrowing = removedincludes or addedexcludes
353 narrowing = removedincludes or addedexcludes
354 only_show = not widening and not narrowing
354 only_show = not widening and not narrowing
355
355
356 # Only print the current narrowspec.
356 # Only print the current narrowspec.
357 if only_show:
357 if only_show:
358 include, exclude = repo.narrowpats
358 include, exclude = repo.narrowpats
359
359
360 ui.pager('tracked')
360 ui.pager('tracked')
361 fm = ui.formatter('narrow', opts)
361 fm = ui.formatter('narrow', opts)
362 for i in sorted(include):
362 for i in sorted(include):
363 fm.startitem()
363 fm.startitem()
364 fm.write('status', '%s ', 'I', label='narrow.included')
364 fm.write('status', '%s ', 'I', label='narrow.included')
365 fm.write('pat', '%s\n', i, label='narrow.included')
365 fm.write('pat', '%s\n', i, label='narrow.included')
366 for i in sorted(exclude):
366 for i in sorted(exclude):
367 fm.startitem()
367 fm.startitem()
368 fm.write('status', '%s ', 'X', label='narrow.excluded')
368 fm.write('status', '%s ', 'X', label='narrow.excluded')
369 fm.write('pat', '%s\n', i, label='narrow.excluded')
369 fm.write('pat', '%s\n', i, label='narrow.excluded')
370 fm.end()
370 fm.end()
371 return 0
371 return 0
372
372
373 with repo.wlock(), repo.lock():
373 with repo.wlock(), repo.lock():
374 cmdutil.bailifchanged(repo)
374 cmdutil.bailifchanged(repo)
375
375
376 # Find the revisions we have in common with the remote. These will
376 # Find the revisions we have in common with the remote. These will
377 # be used for finding local-only changes for narrowing. They will
377 # be used for finding local-only changes for narrowing. They will
378 # also define the set of revisions to update for widening.
378 # also define the set of revisions to update for widening.
379 remotepath = ui.expandpath(remotepath or 'default')
379 remotepath = ui.expandpath(remotepath or 'default')
380 url, branches = hg.parseurl(remotepath)
380 url, branches = hg.parseurl(remotepath)
381 ui.status(_('comparing with %s\n') % util.hidepassword(url))
381 ui.status(_('comparing with %s\n') % util.hidepassword(url))
382 remote = hg.peer(repo, opts, url)
382 remote = hg.peer(repo, opts, url)
383 commoninc = discovery.findcommonincoming(repo, remote)
383 commoninc = discovery.findcommonincoming(repo, remote)
384
384
385 oldincludes, oldexcludes = repo.narrowpats
385 oldincludes, oldexcludes = repo.narrowpats
386 if narrowing:
386 if narrowing:
387 newincludes = oldincludes - removedincludes
387 newincludes = oldincludes - removedincludes
388 newexcludes = oldexcludes | addedexcludes
388 newexcludes = oldexcludes | addedexcludes
389 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
389 _narrow(ui, repo, remote, commoninc, oldincludes, oldexcludes,
390 newincludes, newexcludes,
390 newincludes, newexcludes,
391 opts['force_delete_local_changes'])
391 opts['force_delete_local_changes'])
392 # _narrow() updated the narrowspec and _widen() below needs to
392 # _narrow() updated the narrowspec and _widen() below needs to
393 # use the updated values as its base (otherwise removed includes
393 # use the updated values as its base (otherwise removed includes
394 # and addedexcludes will be lost in the resulting narrowspec)
394 # and addedexcludes will be lost in the resulting narrowspec)
395 oldincludes = newincludes
395 oldincludes = newincludes
396 oldexcludes = newexcludes
396 oldexcludes = newexcludes
397
397
398 if widening:
398 if widening:
399 newincludes = oldincludes | addedincludes
399 newincludes = oldincludes | addedincludes
400 newexcludes = oldexcludes - removedexcludes
400 newexcludes = oldexcludes - removedexcludes
401 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
401 _widen(ui, repo, remote, commoninc, newincludes, newexcludes)
402
402
403 return 0
403 return 0
@@ -1,52 +1,52 b''
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
1 # narrowrepo.py - repository which supports narrow revlogs, lazy loading
2 #
2 #
3 # Copyright 2017 Google, Inc.
3 # Copyright 2017 Google, Inc.
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 changegroup,
12 hg,
11 hg,
13 narrowspec,
12 narrowspec,
13 repository,
14 )
14 )
15
15
16 from . import (
16 from . import (
17 narrowdirstate,
17 narrowdirstate,
18 narrowrevlog,
18 narrowrevlog,
19 )
19 )
20
20
21 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
21 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
22 orig(sourcerepo, destrepo, **kwargs)
22 orig(sourcerepo, destrepo, **kwargs)
23 if changegroup.NARROW_REQUIREMENT in sourcerepo.requirements:
23 if repository.NARROW_REQUIREMENT in sourcerepo.requirements:
24 with destrepo.wlock():
24 with destrepo.wlock():
25 with destrepo.vfs('shared', 'a') as fp:
25 with destrepo.vfs('shared', 'a') as fp:
26 fp.write(narrowspec.FILENAME + '\n')
26 fp.write(narrowspec.FILENAME + '\n')
27
27
28 def unsharenarrowspec(orig, ui, repo, repopath):
28 def unsharenarrowspec(orig, ui, repo, repopath):
29 if (changegroup.NARROW_REQUIREMENT in repo.requirements
29 if (repository.NARROW_REQUIREMENT in repo.requirements
30 and repo.path == repopath and repo.shared()):
30 and repo.path == repopath and repo.shared()):
31 srcrepo = hg.sharedreposource(repo)
31 srcrepo = hg.sharedreposource(repo)
32 with srcrepo.vfs(narrowspec.FILENAME) as f:
32 with srcrepo.vfs(narrowspec.FILENAME) as f:
33 spec = f.read()
33 spec = f.read()
34 with repo.vfs(narrowspec.FILENAME, 'w') as f:
34 with repo.vfs(narrowspec.FILENAME, 'w') as f:
35 f.write(spec)
35 f.write(spec)
36 return orig(ui, repo, repopath)
36 return orig(ui, repo, repopath)
37
37
38 def wraprepo(repo):
38 def wraprepo(repo):
39 """Enables narrow clone functionality on a single local repository."""
39 """Enables narrow clone functionality on a single local repository."""
40
40
41 class narrowrepository(repo.__class__):
41 class narrowrepository(repo.__class__):
42
42
43 def file(self, f):
43 def file(self, f):
44 fl = super(narrowrepository, self).file(f)
44 fl = super(narrowrepository, self).file(f)
45 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
45 narrowrevlog.makenarrowfilelog(fl, self.narrowmatch())
46 return fl
46 return fl
47
47
48 def _makedirstate(self):
48 def _makedirstate(self):
49 dirstate = super(narrowrepository, self)._makedirstate()
49 dirstate = super(narrowrepository, self)._makedirstate()
50 return narrowdirstate.wrapdirstate(self, dirstate)
50 return narrowdirstate.wrapdirstate(self, dirstate)
51
51
52 repo.__class__ = narrowrepository
52 repo.__class__ = narrowrepository
@@ -1,1043 +1,1040 b''
1 # changegroup.py - Mercurial changegroup manipulation functions
1 # changegroup.py - Mercurial changegroup manipulation functions
2 #
2 #
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import os
10 import os
11 import struct
11 import struct
12 import weakref
12 import weakref
13
13
14 from .i18n import _
14 from .i18n import _
15 from .node import (
15 from .node import (
16 hex,
16 hex,
17 nullrev,
17 nullrev,
18 short,
18 short,
19 )
19 )
20
20
21 from . import (
21 from . import (
22 dagutil,
22 dagutil,
23 error,
23 error,
24 manifest,
24 manifest,
25 match as matchmod,
25 match as matchmod,
26 mdiff,
26 mdiff,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 repository,
29 util,
30 util,
30 )
31 )
31
32
32 from .utils import (
33 from .utils import (
33 stringutil,
34 stringutil,
34 )
35 )
35
36
36 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
37 _CHANGEGROUPV1_DELTA_HEADER = "20s20s20s20s"
37 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
38 _CHANGEGROUPV2_DELTA_HEADER = "20s20s20s20s20s"
38 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
39 _CHANGEGROUPV3_DELTA_HEADER = ">20s20s20s20s20sH"
39
40
40 LFS_REQUIREMENT = 'lfs'
41 LFS_REQUIREMENT = 'lfs'
41
42
42 # When narrowing is finalized and no longer subject to format changes,
43 # we should move this to just "narrow" or similar.
44 NARROW_REQUIREMENT = 'narrowhg-experimental'
45
46 readexactly = util.readexactly
43 readexactly = util.readexactly
47
44
48 def getchunk(stream):
45 def getchunk(stream):
49 """return the next chunk from stream as a string"""
46 """return the next chunk from stream as a string"""
50 d = readexactly(stream, 4)
47 d = readexactly(stream, 4)
51 l = struct.unpack(">l", d)[0]
48 l = struct.unpack(">l", d)[0]
52 if l <= 4:
49 if l <= 4:
53 if l:
50 if l:
54 raise error.Abort(_("invalid chunk length %d") % l)
51 raise error.Abort(_("invalid chunk length %d") % l)
55 return ""
52 return ""
56 return readexactly(stream, l - 4)
53 return readexactly(stream, l - 4)
57
54
58 def chunkheader(length):
55 def chunkheader(length):
59 """return a changegroup chunk header (string)"""
56 """return a changegroup chunk header (string)"""
60 return struct.pack(">l", length + 4)
57 return struct.pack(">l", length + 4)
61
58
62 def closechunk():
59 def closechunk():
63 """return a changegroup chunk header (string) for a zero-length chunk"""
60 """return a changegroup chunk header (string) for a zero-length chunk"""
64 return struct.pack(">l", 0)
61 return struct.pack(">l", 0)
65
62
66 def writechunks(ui, chunks, filename, vfs=None):
63 def writechunks(ui, chunks, filename, vfs=None):
67 """Write chunks to a file and return its filename.
64 """Write chunks to a file and return its filename.
68
65
69 The stream is assumed to be a bundle file.
66 The stream is assumed to be a bundle file.
70 Existing files will not be overwritten.
67 Existing files will not be overwritten.
71 If no filename is specified, a temporary file is created.
68 If no filename is specified, a temporary file is created.
72 """
69 """
73 fh = None
70 fh = None
74 cleanup = None
71 cleanup = None
75 try:
72 try:
76 if filename:
73 if filename:
77 if vfs:
74 if vfs:
78 fh = vfs.open(filename, "wb")
75 fh = vfs.open(filename, "wb")
79 else:
76 else:
80 # Increase default buffer size because default is usually
77 # Increase default buffer size because default is usually
81 # small (4k is common on Linux).
78 # small (4k is common on Linux).
82 fh = open(filename, "wb", 131072)
79 fh = open(filename, "wb", 131072)
83 else:
80 else:
84 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
81 fd, filename = pycompat.mkstemp(prefix="hg-bundle-", suffix=".hg")
85 fh = os.fdopen(fd, r"wb")
82 fh = os.fdopen(fd, r"wb")
86 cleanup = filename
83 cleanup = filename
87 for c in chunks:
84 for c in chunks:
88 fh.write(c)
85 fh.write(c)
89 cleanup = None
86 cleanup = None
90 return filename
87 return filename
91 finally:
88 finally:
92 if fh is not None:
89 if fh is not None:
93 fh.close()
90 fh.close()
94 if cleanup is not None:
91 if cleanup is not None:
95 if filename and vfs:
92 if filename and vfs:
96 vfs.unlink(cleanup)
93 vfs.unlink(cleanup)
97 else:
94 else:
98 os.unlink(cleanup)
95 os.unlink(cleanup)
99
96
100 class cg1unpacker(object):
97 class cg1unpacker(object):
101 """Unpacker for cg1 changegroup streams.
98 """Unpacker for cg1 changegroup streams.
102
99
103 A changegroup unpacker handles the framing of the revision data in
100 A changegroup unpacker handles the framing of the revision data in
104 the wire format. Most consumers will want to use the apply()
101 the wire format. Most consumers will want to use the apply()
105 method to add the changes from the changegroup to a repository.
102 method to add the changes from the changegroup to a repository.
106
103
107 If you're forwarding a changegroup unmodified to another consumer,
104 If you're forwarding a changegroup unmodified to another consumer,
108 use getchunks(), which returns an iterator of changegroup
105 use getchunks(), which returns an iterator of changegroup
109 chunks. This is mostly useful for cases where you need to know the
106 chunks. This is mostly useful for cases where you need to know the
110 data stream has ended by observing the end of the changegroup.
107 data stream has ended by observing the end of the changegroup.
111
108
112 deltachunk() is useful only if you're applying delta data. Most
109 deltachunk() is useful only if you're applying delta data. Most
113 consumers should prefer apply() instead.
110 consumers should prefer apply() instead.
114
111
115 A few other public methods exist. Those are used only for
112 A few other public methods exist. Those are used only for
116 bundlerepo and some debug commands - their use is discouraged.
113 bundlerepo and some debug commands - their use is discouraged.
117 """
114 """
118 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
115 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
119 deltaheadersize = struct.calcsize(deltaheader)
116 deltaheadersize = struct.calcsize(deltaheader)
120 version = '01'
117 version = '01'
121 _grouplistcount = 1 # One list of files after the manifests
118 _grouplistcount = 1 # One list of files after the manifests
122
119
123 def __init__(self, fh, alg, extras=None):
120 def __init__(self, fh, alg, extras=None):
124 if alg is None:
121 if alg is None:
125 alg = 'UN'
122 alg = 'UN'
126 if alg not in util.compengines.supportedbundletypes:
123 if alg not in util.compengines.supportedbundletypes:
127 raise error.Abort(_('unknown stream compression type: %s')
124 raise error.Abort(_('unknown stream compression type: %s')
128 % alg)
125 % alg)
129 if alg == 'BZ':
126 if alg == 'BZ':
130 alg = '_truncatedBZ'
127 alg = '_truncatedBZ'
131
128
132 compengine = util.compengines.forbundletype(alg)
129 compengine = util.compengines.forbundletype(alg)
133 self._stream = compengine.decompressorreader(fh)
130 self._stream = compengine.decompressorreader(fh)
134 self._type = alg
131 self._type = alg
135 self.extras = extras or {}
132 self.extras = extras or {}
136 self.callback = None
133 self.callback = None
137
134
138 # These methods (compressed, read, seek, tell) all appear to only
135 # These methods (compressed, read, seek, tell) all appear to only
139 # be used by bundlerepo, but it's a little hard to tell.
136 # be used by bundlerepo, but it's a little hard to tell.
140 def compressed(self):
137 def compressed(self):
141 return self._type is not None and self._type != 'UN'
138 return self._type is not None and self._type != 'UN'
142 def read(self, l):
139 def read(self, l):
143 return self._stream.read(l)
140 return self._stream.read(l)
144 def seek(self, pos):
141 def seek(self, pos):
145 return self._stream.seek(pos)
142 return self._stream.seek(pos)
146 def tell(self):
143 def tell(self):
147 return self._stream.tell()
144 return self._stream.tell()
148 def close(self):
145 def close(self):
149 return self._stream.close()
146 return self._stream.close()
150
147
151 def _chunklength(self):
148 def _chunklength(self):
152 d = readexactly(self._stream, 4)
149 d = readexactly(self._stream, 4)
153 l = struct.unpack(">l", d)[0]
150 l = struct.unpack(">l", d)[0]
154 if l <= 4:
151 if l <= 4:
155 if l:
152 if l:
156 raise error.Abort(_("invalid chunk length %d") % l)
153 raise error.Abort(_("invalid chunk length %d") % l)
157 return 0
154 return 0
158 if self.callback:
155 if self.callback:
159 self.callback()
156 self.callback()
160 return l - 4
157 return l - 4
161
158
162 def changelogheader(self):
159 def changelogheader(self):
163 """v10 does not have a changelog header chunk"""
160 """v10 does not have a changelog header chunk"""
164 return {}
161 return {}
165
162
166 def manifestheader(self):
163 def manifestheader(self):
167 """v10 does not have a manifest header chunk"""
164 """v10 does not have a manifest header chunk"""
168 return {}
165 return {}
169
166
170 def filelogheader(self):
167 def filelogheader(self):
171 """return the header of the filelogs chunk, v10 only has the filename"""
168 """return the header of the filelogs chunk, v10 only has the filename"""
172 l = self._chunklength()
169 l = self._chunklength()
173 if not l:
170 if not l:
174 return {}
171 return {}
175 fname = readexactly(self._stream, l)
172 fname = readexactly(self._stream, l)
176 return {'filename': fname}
173 return {'filename': fname}
177
174
178 def _deltaheader(self, headertuple, prevnode):
175 def _deltaheader(self, headertuple, prevnode):
179 node, p1, p2, cs = headertuple
176 node, p1, p2, cs = headertuple
180 if prevnode is None:
177 if prevnode is None:
181 deltabase = p1
178 deltabase = p1
182 else:
179 else:
183 deltabase = prevnode
180 deltabase = prevnode
184 flags = 0
181 flags = 0
185 return node, p1, p2, deltabase, cs, flags
182 return node, p1, p2, deltabase, cs, flags
186
183
187 def deltachunk(self, prevnode):
184 def deltachunk(self, prevnode):
188 l = self._chunklength()
185 l = self._chunklength()
189 if not l:
186 if not l:
190 return {}
187 return {}
191 headerdata = readexactly(self._stream, self.deltaheadersize)
188 headerdata = readexactly(self._stream, self.deltaheadersize)
192 header = struct.unpack(self.deltaheader, headerdata)
189 header = struct.unpack(self.deltaheader, headerdata)
193 delta = readexactly(self._stream, l - self.deltaheadersize)
190 delta = readexactly(self._stream, l - self.deltaheadersize)
194 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
191 node, p1, p2, deltabase, cs, flags = self._deltaheader(header, prevnode)
195 return (node, p1, p2, cs, deltabase, delta, flags)
192 return (node, p1, p2, cs, deltabase, delta, flags)
196
193
197 def getchunks(self):
194 def getchunks(self):
198 """returns all the chunks contains in the bundle
195 """returns all the chunks contains in the bundle
199
196
200 Used when you need to forward the binary stream to a file or another
197 Used when you need to forward the binary stream to a file or another
201 network API. To do so, it parse the changegroup data, otherwise it will
198 network API. To do so, it parse the changegroup data, otherwise it will
202 block in case of sshrepo because it don't know the end of the stream.
199 block in case of sshrepo because it don't know the end of the stream.
203 """
200 """
204 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
201 # For changegroup 1 and 2, we expect 3 parts: changelog, manifestlog,
205 # and a list of filelogs. For changegroup 3, we expect 4 parts:
202 # and a list of filelogs. For changegroup 3, we expect 4 parts:
206 # changelog, manifestlog, a list of tree manifestlogs, and a list of
203 # changelog, manifestlog, a list of tree manifestlogs, and a list of
207 # filelogs.
204 # filelogs.
208 #
205 #
209 # Changelog and manifestlog parts are terminated with empty chunks. The
206 # Changelog and manifestlog parts are terminated with empty chunks. The
210 # tree and file parts are a list of entry sections. Each entry section
207 # tree and file parts are a list of entry sections. Each entry section
211 # is a series of chunks terminating in an empty chunk. The list of these
208 # is a series of chunks terminating in an empty chunk. The list of these
212 # entry sections is terminated in yet another empty chunk, so we know
209 # entry sections is terminated in yet another empty chunk, so we know
213 # we've reached the end of the tree/file list when we reach an empty
210 # we've reached the end of the tree/file list when we reach an empty
214 # chunk that was proceeded by no non-empty chunks.
211 # chunk that was proceeded by no non-empty chunks.
215
212
216 parts = 0
213 parts = 0
217 while parts < 2 + self._grouplistcount:
214 while parts < 2 + self._grouplistcount:
218 noentries = True
215 noentries = True
219 while True:
216 while True:
220 chunk = getchunk(self)
217 chunk = getchunk(self)
221 if not chunk:
218 if not chunk:
222 # The first two empty chunks represent the end of the
219 # The first two empty chunks represent the end of the
223 # changelog and the manifestlog portions. The remaining
220 # changelog and the manifestlog portions. The remaining
224 # empty chunks represent either A) the end of individual
221 # empty chunks represent either A) the end of individual
225 # tree or file entries in the file list, or B) the end of
222 # tree or file entries in the file list, or B) the end of
226 # the entire list. It's the end of the entire list if there
223 # the entire list. It's the end of the entire list if there
227 # were no entries (i.e. noentries is True).
224 # were no entries (i.e. noentries is True).
228 if parts < 2:
225 if parts < 2:
229 parts += 1
226 parts += 1
230 elif noentries:
227 elif noentries:
231 parts += 1
228 parts += 1
232 break
229 break
233 noentries = False
230 noentries = False
234 yield chunkheader(len(chunk))
231 yield chunkheader(len(chunk))
235 pos = 0
232 pos = 0
236 while pos < len(chunk):
233 while pos < len(chunk):
237 next = pos + 2**20
234 next = pos + 2**20
238 yield chunk[pos:next]
235 yield chunk[pos:next]
239 pos = next
236 pos = next
240 yield closechunk()
237 yield closechunk()
241
238
242 def _unpackmanifests(self, repo, revmap, trp, prog):
239 def _unpackmanifests(self, repo, revmap, trp, prog):
243 self.callback = prog.increment
240 self.callback = prog.increment
244 # no need to check for empty manifest group here:
241 # no need to check for empty manifest group here:
245 # if the result of the merge of 1 and 2 is the same in 3 and 4,
242 # if the result of the merge of 1 and 2 is the same in 3 and 4,
246 # no new manifest will be created and the manifest group will
243 # no new manifest will be created and the manifest group will
247 # be empty during the pull
244 # be empty during the pull
248 self.manifestheader()
245 self.manifestheader()
249 deltas = self.deltaiter()
246 deltas = self.deltaiter()
250 repo.manifestlog.addgroup(deltas, revmap, trp)
247 repo.manifestlog.addgroup(deltas, revmap, trp)
251 prog.complete()
248 prog.complete()
252 self.callback = None
249 self.callback = None
253
250
254 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
251 def apply(self, repo, tr, srctype, url, targetphase=phases.draft,
255 expectedtotal=None):
252 expectedtotal=None):
256 """Add the changegroup returned by source.read() to this repo.
253 """Add the changegroup returned by source.read() to this repo.
257 srctype is a string like 'push', 'pull', or 'unbundle'. url is
254 srctype is a string like 'push', 'pull', or 'unbundle'. url is
258 the URL of the repo where this changegroup is coming from.
255 the URL of the repo where this changegroup is coming from.
259
256
260 Return an integer summarizing the change to this repo:
257 Return an integer summarizing the change to this repo:
261 - nothing changed or no source: 0
258 - nothing changed or no source: 0
262 - more heads than before: 1+added heads (2..n)
259 - more heads than before: 1+added heads (2..n)
263 - fewer heads than before: -1-removed heads (-2..-n)
260 - fewer heads than before: -1-removed heads (-2..-n)
264 - number of heads stays the same: 1
261 - number of heads stays the same: 1
265 """
262 """
266 repo = repo.unfiltered()
263 repo = repo.unfiltered()
267 def csmap(x):
264 def csmap(x):
268 repo.ui.debug("add changeset %s\n" % short(x))
265 repo.ui.debug("add changeset %s\n" % short(x))
269 return len(cl)
266 return len(cl)
270
267
271 def revmap(x):
268 def revmap(x):
272 return cl.rev(x)
269 return cl.rev(x)
273
270
274 changesets = files = revisions = 0
271 changesets = files = revisions = 0
275
272
276 try:
273 try:
277 # The transaction may already carry source information. In this
274 # The transaction may already carry source information. In this
278 # case we use the top level data. We overwrite the argument
275 # case we use the top level data. We overwrite the argument
279 # because we need to use the top level value (if they exist)
276 # because we need to use the top level value (if they exist)
280 # in this function.
277 # in this function.
281 srctype = tr.hookargs.setdefault('source', srctype)
278 srctype = tr.hookargs.setdefault('source', srctype)
282 url = tr.hookargs.setdefault('url', url)
279 url = tr.hookargs.setdefault('url', url)
283 repo.hook('prechangegroup',
280 repo.hook('prechangegroup',
284 throw=True, **pycompat.strkwargs(tr.hookargs))
281 throw=True, **pycompat.strkwargs(tr.hookargs))
285
282
286 # write changelog data to temp files so concurrent readers
283 # write changelog data to temp files so concurrent readers
287 # will not see an inconsistent view
284 # will not see an inconsistent view
288 cl = repo.changelog
285 cl = repo.changelog
289 cl.delayupdate(tr)
286 cl.delayupdate(tr)
290 oldheads = set(cl.heads())
287 oldheads = set(cl.heads())
291
288
292 trp = weakref.proxy(tr)
289 trp = weakref.proxy(tr)
293 # pull off the changeset group
290 # pull off the changeset group
294 repo.ui.status(_("adding changesets\n"))
291 repo.ui.status(_("adding changesets\n"))
295 clstart = len(cl)
292 clstart = len(cl)
296 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
293 progress = repo.ui.makeprogress(_('changesets'), unit=_('chunks'),
297 total=expectedtotal)
294 total=expectedtotal)
298 self.callback = progress.increment
295 self.callback = progress.increment
299
296
300 efiles = set()
297 efiles = set()
301 def onchangelog(cl, node):
298 def onchangelog(cl, node):
302 efiles.update(cl.readfiles(node))
299 efiles.update(cl.readfiles(node))
303
300
304 self.changelogheader()
301 self.changelogheader()
305 deltas = self.deltaiter()
302 deltas = self.deltaiter()
306 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
303 cgnodes = cl.addgroup(deltas, csmap, trp, addrevisioncb=onchangelog)
307 efiles = len(efiles)
304 efiles = len(efiles)
308
305
309 if not cgnodes:
306 if not cgnodes:
310 repo.ui.develwarn('applied empty changegroup',
307 repo.ui.develwarn('applied empty changegroup',
311 config='warn-empty-changegroup')
308 config='warn-empty-changegroup')
312 clend = len(cl)
309 clend = len(cl)
313 changesets = clend - clstart
310 changesets = clend - clstart
314 progress.complete()
311 progress.complete()
315 self.callback = None
312 self.callback = None
316
313
317 # pull off the manifest group
314 # pull off the manifest group
318 repo.ui.status(_("adding manifests\n"))
315 repo.ui.status(_("adding manifests\n"))
319 # We know that we'll never have more manifests than we had
316 # We know that we'll never have more manifests than we had
320 # changesets.
317 # changesets.
321 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
318 progress = repo.ui.makeprogress(_('manifests'), unit=_('chunks'),
322 total=changesets)
319 total=changesets)
323 self._unpackmanifests(repo, revmap, trp, progress)
320 self._unpackmanifests(repo, revmap, trp, progress)
324
321
325 needfiles = {}
322 needfiles = {}
326 if repo.ui.configbool('server', 'validate'):
323 if repo.ui.configbool('server', 'validate'):
327 cl = repo.changelog
324 cl = repo.changelog
328 ml = repo.manifestlog
325 ml = repo.manifestlog
329 # validate incoming csets have their manifests
326 # validate incoming csets have their manifests
330 for cset in pycompat.xrange(clstart, clend):
327 for cset in pycompat.xrange(clstart, clend):
331 mfnode = cl.changelogrevision(cset).manifest
328 mfnode = cl.changelogrevision(cset).manifest
332 mfest = ml[mfnode].readdelta()
329 mfest = ml[mfnode].readdelta()
333 # store file cgnodes we must see
330 # store file cgnodes we must see
334 for f, n in mfest.iteritems():
331 for f, n in mfest.iteritems():
335 needfiles.setdefault(f, set()).add(n)
332 needfiles.setdefault(f, set()).add(n)
336
333
337 # process the files
334 # process the files
338 repo.ui.status(_("adding file changes\n"))
335 repo.ui.status(_("adding file changes\n"))
339 newrevs, newfiles = _addchangegroupfiles(
336 newrevs, newfiles = _addchangegroupfiles(
340 repo, self, revmap, trp, efiles, needfiles)
337 repo, self, revmap, trp, efiles, needfiles)
341 revisions += newrevs
338 revisions += newrevs
342 files += newfiles
339 files += newfiles
343
340
344 deltaheads = 0
341 deltaheads = 0
345 if oldheads:
342 if oldheads:
346 heads = cl.heads()
343 heads = cl.heads()
347 deltaheads = len(heads) - len(oldheads)
344 deltaheads = len(heads) - len(oldheads)
348 for h in heads:
345 for h in heads:
349 if h not in oldheads and repo[h].closesbranch():
346 if h not in oldheads and repo[h].closesbranch():
350 deltaheads -= 1
347 deltaheads -= 1
351 htext = ""
348 htext = ""
352 if deltaheads:
349 if deltaheads:
353 htext = _(" (%+d heads)") % deltaheads
350 htext = _(" (%+d heads)") % deltaheads
354
351
355 repo.ui.status(_("added %d changesets"
352 repo.ui.status(_("added %d changesets"
356 " with %d changes to %d files%s\n")
353 " with %d changes to %d files%s\n")
357 % (changesets, revisions, files, htext))
354 % (changesets, revisions, files, htext))
358 repo.invalidatevolatilesets()
355 repo.invalidatevolatilesets()
359
356
360 if changesets > 0:
357 if changesets > 0:
361 if 'node' not in tr.hookargs:
358 if 'node' not in tr.hookargs:
362 tr.hookargs['node'] = hex(cl.node(clstart))
359 tr.hookargs['node'] = hex(cl.node(clstart))
363 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
360 tr.hookargs['node_last'] = hex(cl.node(clend - 1))
364 hookargs = dict(tr.hookargs)
361 hookargs = dict(tr.hookargs)
365 else:
362 else:
366 hookargs = dict(tr.hookargs)
363 hookargs = dict(tr.hookargs)
367 hookargs['node'] = hex(cl.node(clstart))
364 hookargs['node'] = hex(cl.node(clstart))
368 hookargs['node_last'] = hex(cl.node(clend - 1))
365 hookargs['node_last'] = hex(cl.node(clend - 1))
369 repo.hook('pretxnchangegroup',
366 repo.hook('pretxnchangegroup',
370 throw=True, **pycompat.strkwargs(hookargs))
367 throw=True, **pycompat.strkwargs(hookargs))
371
368
372 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
369 added = [cl.node(r) for r in pycompat.xrange(clstart, clend)]
373 phaseall = None
370 phaseall = None
374 if srctype in ('push', 'serve'):
371 if srctype in ('push', 'serve'):
375 # Old servers can not push the boundary themselves.
372 # Old servers can not push the boundary themselves.
376 # New servers won't push the boundary if changeset already
373 # New servers won't push the boundary if changeset already
377 # exists locally as secret
374 # exists locally as secret
378 #
375 #
379 # We should not use added here but the list of all change in
376 # We should not use added here but the list of all change in
380 # the bundle
377 # the bundle
381 if repo.publishing():
378 if repo.publishing():
382 targetphase = phaseall = phases.public
379 targetphase = phaseall = phases.public
383 else:
380 else:
384 # closer target phase computation
381 # closer target phase computation
385
382
386 # Those changesets have been pushed from the
383 # Those changesets have been pushed from the
387 # outside, their phases are going to be pushed
384 # outside, their phases are going to be pushed
388 # alongside. Therefor `targetphase` is
385 # alongside. Therefor `targetphase` is
389 # ignored.
386 # ignored.
390 targetphase = phaseall = phases.draft
387 targetphase = phaseall = phases.draft
391 if added:
388 if added:
392 phases.registernew(repo, tr, targetphase, added)
389 phases.registernew(repo, tr, targetphase, added)
393 if phaseall is not None:
390 if phaseall is not None:
394 phases.advanceboundary(repo, tr, phaseall, cgnodes)
391 phases.advanceboundary(repo, tr, phaseall, cgnodes)
395
392
396 if changesets > 0:
393 if changesets > 0:
397
394
398 def runhooks():
395 def runhooks():
399 # These hooks run when the lock releases, not when the
396 # These hooks run when the lock releases, not when the
400 # transaction closes. So it's possible for the changelog
397 # transaction closes. So it's possible for the changelog
401 # to have changed since we last saw it.
398 # to have changed since we last saw it.
402 if clstart >= len(repo):
399 if clstart >= len(repo):
403 return
400 return
404
401
405 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
402 repo.hook("changegroup", **pycompat.strkwargs(hookargs))
406
403
407 for n in added:
404 for n in added:
408 args = hookargs.copy()
405 args = hookargs.copy()
409 args['node'] = hex(n)
406 args['node'] = hex(n)
410 del args['node_last']
407 del args['node_last']
411 repo.hook("incoming", **pycompat.strkwargs(args))
408 repo.hook("incoming", **pycompat.strkwargs(args))
412
409
413 newheads = [h for h in repo.heads()
410 newheads = [h for h in repo.heads()
414 if h not in oldheads]
411 if h not in oldheads]
415 repo.ui.log("incoming",
412 repo.ui.log("incoming",
416 "%d incoming changes - new heads: %s\n",
413 "%d incoming changes - new heads: %s\n",
417 len(added),
414 len(added),
418 ', '.join([hex(c[:6]) for c in newheads]))
415 ', '.join([hex(c[:6]) for c in newheads]))
419
416
420 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
417 tr.addpostclose('changegroup-runhooks-%020i' % clstart,
421 lambda tr: repo._afterlock(runhooks))
418 lambda tr: repo._afterlock(runhooks))
422 finally:
419 finally:
423 repo.ui.flush()
420 repo.ui.flush()
424 # never return 0 here:
421 # never return 0 here:
425 if deltaheads < 0:
422 if deltaheads < 0:
426 ret = deltaheads - 1
423 ret = deltaheads - 1
427 else:
424 else:
428 ret = deltaheads + 1
425 ret = deltaheads + 1
429 return ret
426 return ret
430
427
431 def deltaiter(self):
428 def deltaiter(self):
432 """
429 """
433 returns an iterator of the deltas in this changegroup
430 returns an iterator of the deltas in this changegroup
434
431
435 Useful for passing to the underlying storage system to be stored.
432 Useful for passing to the underlying storage system to be stored.
436 """
433 """
437 chain = None
434 chain = None
438 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
435 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
439 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
436 # Chunkdata: (node, p1, p2, cs, deltabase, delta, flags)
440 yield chunkdata
437 yield chunkdata
441 chain = chunkdata[0]
438 chain = chunkdata[0]
442
439
443 class cg2unpacker(cg1unpacker):
440 class cg2unpacker(cg1unpacker):
444 """Unpacker for cg2 streams.
441 """Unpacker for cg2 streams.
445
442
446 cg2 streams add support for generaldelta, so the delta header
443 cg2 streams add support for generaldelta, so the delta header
447 format is slightly different. All other features about the data
444 format is slightly different. All other features about the data
448 remain the same.
445 remain the same.
449 """
446 """
450 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
447 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
451 deltaheadersize = struct.calcsize(deltaheader)
448 deltaheadersize = struct.calcsize(deltaheader)
452 version = '02'
449 version = '02'
453
450
454 def _deltaheader(self, headertuple, prevnode):
451 def _deltaheader(self, headertuple, prevnode):
455 node, p1, p2, deltabase, cs = headertuple
452 node, p1, p2, deltabase, cs = headertuple
456 flags = 0
453 flags = 0
457 return node, p1, p2, deltabase, cs, flags
454 return node, p1, p2, deltabase, cs, flags
458
455
459 class cg3unpacker(cg2unpacker):
456 class cg3unpacker(cg2unpacker):
460 """Unpacker for cg3 streams.
457 """Unpacker for cg3 streams.
461
458
462 cg3 streams add support for exchanging treemanifests and revlog
459 cg3 streams add support for exchanging treemanifests and revlog
463 flags. It adds the revlog flags to the delta header and an empty chunk
460 flags. It adds the revlog flags to the delta header and an empty chunk
464 separating manifests and files.
461 separating manifests and files.
465 """
462 """
466 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
463 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
467 deltaheadersize = struct.calcsize(deltaheader)
464 deltaheadersize = struct.calcsize(deltaheader)
468 version = '03'
465 version = '03'
469 _grouplistcount = 2 # One list of manifests and one list of files
466 _grouplistcount = 2 # One list of manifests and one list of files
470
467
471 def _deltaheader(self, headertuple, prevnode):
468 def _deltaheader(self, headertuple, prevnode):
472 node, p1, p2, deltabase, cs, flags = headertuple
469 node, p1, p2, deltabase, cs, flags = headertuple
473 return node, p1, p2, deltabase, cs, flags
470 return node, p1, p2, deltabase, cs, flags
474
471
475 def _unpackmanifests(self, repo, revmap, trp, prog):
472 def _unpackmanifests(self, repo, revmap, trp, prog):
476 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
473 super(cg3unpacker, self)._unpackmanifests(repo, revmap, trp, prog)
477 for chunkdata in iter(self.filelogheader, {}):
474 for chunkdata in iter(self.filelogheader, {}):
478 # If we get here, there are directory manifests in the changegroup
475 # If we get here, there are directory manifests in the changegroup
479 d = chunkdata["filename"]
476 d = chunkdata["filename"]
480 repo.ui.debug("adding %s revisions\n" % d)
477 repo.ui.debug("adding %s revisions\n" % d)
481 dirlog = repo.manifestlog._revlog.dirlog(d)
478 dirlog = repo.manifestlog._revlog.dirlog(d)
482 deltas = self.deltaiter()
479 deltas = self.deltaiter()
483 if not dirlog.addgroup(deltas, revmap, trp):
480 if not dirlog.addgroup(deltas, revmap, trp):
484 raise error.Abort(_("received dir revlog group is empty"))
481 raise error.Abort(_("received dir revlog group is empty"))
485
482
486 class headerlessfixup(object):
483 class headerlessfixup(object):
487 def __init__(self, fh, h):
484 def __init__(self, fh, h):
488 self._h = h
485 self._h = h
489 self._fh = fh
486 self._fh = fh
490 def read(self, n):
487 def read(self, n):
491 if self._h:
488 if self._h:
492 d, self._h = self._h[:n], self._h[n:]
489 d, self._h = self._h[:n], self._h[n:]
493 if len(d) < n:
490 if len(d) < n:
494 d += readexactly(self._fh, n - len(d))
491 d += readexactly(self._fh, n - len(d))
495 return d
492 return d
496 return readexactly(self._fh, n)
493 return readexactly(self._fh, n)
497
494
498 class cg1packer(object):
495 class cg1packer(object):
499 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
496 deltaheader = _CHANGEGROUPV1_DELTA_HEADER
500 version = '01'
497 version = '01'
501 def __init__(self, repo, filematcher, bundlecaps=None):
498 def __init__(self, repo, filematcher, bundlecaps=None):
502 """Given a source repo, construct a bundler.
499 """Given a source repo, construct a bundler.
503
500
504 filematcher is a matcher that matches on files to include in the
501 filematcher is a matcher that matches on files to include in the
505 changegroup. Used to facilitate sparse changegroups.
502 changegroup. Used to facilitate sparse changegroups.
506
503
507 bundlecaps is optional and can be used to specify the set of
504 bundlecaps is optional and can be used to specify the set of
508 capabilities which can be used to build the bundle. While bundlecaps is
505 capabilities which can be used to build the bundle. While bundlecaps is
509 unused in core Mercurial, extensions rely on this feature to communicate
506 unused in core Mercurial, extensions rely on this feature to communicate
510 capabilities to customize the changegroup packer.
507 capabilities to customize the changegroup packer.
511 """
508 """
512 assert filematcher
509 assert filematcher
513 self._filematcher = filematcher
510 self._filematcher = filematcher
514
511
515 # Set of capabilities we can use to build the bundle.
512 # Set of capabilities we can use to build the bundle.
516 if bundlecaps is None:
513 if bundlecaps is None:
517 bundlecaps = set()
514 bundlecaps = set()
518 self._bundlecaps = bundlecaps
515 self._bundlecaps = bundlecaps
519 # experimental config: bundle.reorder
516 # experimental config: bundle.reorder
520 reorder = repo.ui.config('bundle', 'reorder')
517 reorder = repo.ui.config('bundle', 'reorder')
521 if reorder == 'auto':
518 if reorder == 'auto':
522 reorder = None
519 reorder = None
523 else:
520 else:
524 reorder = stringutil.parsebool(reorder)
521 reorder = stringutil.parsebool(reorder)
525 self._repo = repo
522 self._repo = repo
526 self._reorder = reorder
523 self._reorder = reorder
527 if self._repo.ui.verbose and not self._repo.ui.debugflag:
524 if self._repo.ui.verbose and not self._repo.ui.debugflag:
528 self._verbosenote = self._repo.ui.note
525 self._verbosenote = self._repo.ui.note
529 else:
526 else:
530 self._verbosenote = lambda s: None
527 self._verbosenote = lambda s: None
531
528
532 def close(self):
529 def close(self):
533 return closechunk()
530 return closechunk()
534
531
535 def fileheader(self, fname):
532 def fileheader(self, fname):
536 return chunkheader(len(fname)) + fname
533 return chunkheader(len(fname)) + fname
537
534
538 # Extracted both for clarity and for overriding in extensions.
535 # Extracted both for clarity and for overriding in extensions.
539 def _sortgroup(self, revlog, nodelist, lookup):
536 def _sortgroup(self, revlog, nodelist, lookup):
540 """Sort nodes for change group and turn them into revnums."""
537 """Sort nodes for change group and turn them into revnums."""
541 # for generaldelta revlogs, we linearize the revs; this will both be
538 # for generaldelta revlogs, we linearize the revs; this will both be
542 # much quicker and generate a much smaller bundle
539 # much quicker and generate a much smaller bundle
543 if (revlog._generaldelta and self._reorder is None) or self._reorder:
540 if (revlog._generaldelta and self._reorder is None) or self._reorder:
544 dag = dagutil.revlogdag(revlog)
541 dag = dagutil.revlogdag(revlog)
545 return dag.linearize(set(revlog.rev(n) for n in nodelist))
542 return dag.linearize(set(revlog.rev(n) for n in nodelist))
546 else:
543 else:
547 return sorted([revlog.rev(n) for n in nodelist])
544 return sorted([revlog.rev(n) for n in nodelist])
548
545
549 def group(self, nodelist, revlog, lookup, units=None):
546 def group(self, nodelist, revlog, lookup, units=None):
550 """Calculate a delta group, yielding a sequence of changegroup chunks
547 """Calculate a delta group, yielding a sequence of changegroup chunks
551 (strings).
548 (strings).
552
549
553 Given a list of changeset revs, return a set of deltas and
550 Given a list of changeset revs, return a set of deltas and
554 metadata corresponding to nodes. The first delta is
551 metadata corresponding to nodes. The first delta is
555 first parent(nodelist[0]) -> nodelist[0], the receiver is
552 first parent(nodelist[0]) -> nodelist[0], the receiver is
556 guaranteed to have this parent as it has all history before
553 guaranteed to have this parent as it has all history before
557 these changesets. In the case firstparent is nullrev the
554 these changesets. In the case firstparent is nullrev the
558 changegroup starts with a full revision.
555 changegroup starts with a full revision.
559
556
560 If units is not None, progress detail will be generated, units specifies
557 If units is not None, progress detail will be generated, units specifies
561 the type of revlog that is touched (changelog, manifest, etc.).
558 the type of revlog that is touched (changelog, manifest, etc.).
562 """
559 """
563 # if we don't have any revisions touched by these changesets, bail
560 # if we don't have any revisions touched by these changesets, bail
564 if len(nodelist) == 0:
561 if len(nodelist) == 0:
565 yield self.close()
562 yield self.close()
566 return
563 return
567
564
568 revs = self._sortgroup(revlog, nodelist, lookup)
565 revs = self._sortgroup(revlog, nodelist, lookup)
569
566
570 # add the parent of the first rev
567 # add the parent of the first rev
571 p = revlog.parentrevs(revs[0])[0]
568 p = revlog.parentrevs(revs[0])[0]
572 revs.insert(0, p)
569 revs.insert(0, p)
573
570
574 # build deltas
571 # build deltas
575 progress = None
572 progress = None
576 if units is not None:
573 if units is not None:
577 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
574 progress = self._repo.ui.makeprogress(_('bundling'), unit=units,
578 total=(len(revs) - 1))
575 total=(len(revs) - 1))
579 for r in pycompat.xrange(len(revs) - 1):
576 for r in pycompat.xrange(len(revs) - 1):
580 if progress:
577 if progress:
581 progress.update(r + 1)
578 progress.update(r + 1)
582 prev, curr = revs[r], revs[r + 1]
579 prev, curr = revs[r], revs[r + 1]
583 linknode = lookup(revlog.node(curr))
580 linknode = lookup(revlog.node(curr))
584 for c in self.revchunk(revlog, curr, prev, linknode):
581 for c in self.revchunk(revlog, curr, prev, linknode):
585 yield c
582 yield c
586
583
587 if progress:
584 if progress:
588 progress.complete()
585 progress.complete()
589 yield self.close()
586 yield self.close()
590
587
591 # filter any nodes that claim to be part of the known set
588 # filter any nodes that claim to be part of the known set
592 def prune(self, revlog, missing, commonrevs):
589 def prune(self, revlog, missing, commonrevs):
593 # TODO this violates storage abstraction for manifests.
590 # TODO this violates storage abstraction for manifests.
594 if isinstance(revlog, manifest.manifestrevlog):
591 if isinstance(revlog, manifest.manifestrevlog):
595 if not self._filematcher.visitdir(revlog._dir[:-1] or '.'):
592 if not self._filematcher.visitdir(revlog._dir[:-1] or '.'):
596 return []
593 return []
597
594
598 rr, rl = revlog.rev, revlog.linkrev
595 rr, rl = revlog.rev, revlog.linkrev
599 return [n for n in missing if rl(rr(n)) not in commonrevs]
596 return [n for n in missing if rl(rr(n)) not in commonrevs]
600
597
601 def _packmanifests(self, dir, mfnodes, lookuplinknode):
598 def _packmanifests(self, dir, mfnodes, lookuplinknode):
602 """Pack flat manifests into a changegroup stream."""
599 """Pack flat manifests into a changegroup stream."""
603 assert not dir
600 assert not dir
604 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
601 for chunk in self.group(mfnodes, self._repo.manifestlog._revlog,
605 lookuplinknode, units=_('manifests')):
602 lookuplinknode, units=_('manifests')):
606 yield chunk
603 yield chunk
607
604
608 def _manifestsdone(self):
605 def _manifestsdone(self):
609 return ''
606 return ''
610
607
611 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
608 def generate(self, commonrevs, clnodes, fastpathlinkrev, source):
612 '''yield a sequence of changegroup chunks (strings)'''
609 '''yield a sequence of changegroup chunks (strings)'''
613 repo = self._repo
610 repo = self._repo
614 cl = repo.changelog
611 cl = repo.changelog
615
612
616 clrevorder = {}
613 clrevorder = {}
617 mfs = {} # needed manifests
614 mfs = {} # needed manifests
618 fnodes = {} # needed file nodes
615 fnodes = {} # needed file nodes
619 changedfiles = set()
616 changedfiles = set()
620
617
621 # Callback for the changelog, used to collect changed files and manifest
618 # Callback for the changelog, used to collect changed files and manifest
622 # nodes.
619 # nodes.
623 # Returns the linkrev node (identity in the changelog case).
620 # Returns the linkrev node (identity in the changelog case).
624 def lookupcl(x):
621 def lookupcl(x):
625 c = cl.read(x)
622 c = cl.read(x)
626 clrevorder[x] = len(clrevorder)
623 clrevorder[x] = len(clrevorder)
627 n = c[0]
624 n = c[0]
628 # record the first changeset introducing this manifest version
625 # record the first changeset introducing this manifest version
629 mfs.setdefault(n, x)
626 mfs.setdefault(n, x)
630 # Record a complete list of potentially-changed files in
627 # Record a complete list of potentially-changed files in
631 # this manifest.
628 # this manifest.
632 changedfiles.update(c[3])
629 changedfiles.update(c[3])
633 return x
630 return x
634
631
635 self._verbosenote(_('uncompressed size of bundle content:\n'))
632 self._verbosenote(_('uncompressed size of bundle content:\n'))
636 size = 0
633 size = 0
637 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
634 for chunk in self.group(clnodes, cl, lookupcl, units=_('changesets')):
638 size += len(chunk)
635 size += len(chunk)
639 yield chunk
636 yield chunk
640 self._verbosenote(_('%8.i (changelog)\n') % size)
637 self._verbosenote(_('%8.i (changelog)\n') % size)
641
638
642 # We need to make sure that the linkrev in the changegroup refers to
639 # We need to make sure that the linkrev in the changegroup refers to
643 # the first changeset that introduced the manifest or file revision.
640 # the first changeset that introduced the manifest or file revision.
644 # The fastpath is usually safer than the slowpath, because the filelogs
641 # The fastpath is usually safer than the slowpath, because the filelogs
645 # are walked in revlog order.
642 # are walked in revlog order.
646 #
643 #
647 # When taking the slowpath with reorder=None and the manifest revlog
644 # When taking the slowpath with reorder=None and the manifest revlog
648 # uses generaldelta, the manifest may be walked in the "wrong" order.
645 # uses generaldelta, the manifest may be walked in the "wrong" order.
649 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
646 # Without 'clrevorder', we would get an incorrect linkrev (see fix in
650 # cc0ff93d0c0c).
647 # cc0ff93d0c0c).
651 #
648 #
652 # When taking the fastpath, we are only vulnerable to reordering
649 # When taking the fastpath, we are only vulnerable to reordering
653 # of the changelog itself. The changelog never uses generaldelta, so
650 # of the changelog itself. The changelog never uses generaldelta, so
654 # it is only reordered when reorder=True. To handle this case, we
651 # it is only reordered when reorder=True. To handle this case, we
655 # simply take the slowpath, which already has the 'clrevorder' logic.
652 # simply take the slowpath, which already has the 'clrevorder' logic.
656 # This was also fixed in cc0ff93d0c0c.
653 # This was also fixed in cc0ff93d0c0c.
657 fastpathlinkrev = fastpathlinkrev and not self._reorder
654 fastpathlinkrev = fastpathlinkrev and not self._reorder
658 # Treemanifests don't work correctly with fastpathlinkrev
655 # Treemanifests don't work correctly with fastpathlinkrev
659 # either, because we don't discover which directory nodes to
656 # either, because we don't discover which directory nodes to
660 # send along with files. This could probably be fixed.
657 # send along with files. This could probably be fixed.
661 fastpathlinkrev = fastpathlinkrev and (
658 fastpathlinkrev = fastpathlinkrev and (
662 'treemanifest' not in repo.requirements)
659 'treemanifest' not in repo.requirements)
663
660
664 for chunk in self.generatemanifests(commonrevs, clrevorder,
661 for chunk in self.generatemanifests(commonrevs, clrevorder,
665 fastpathlinkrev, mfs, fnodes, source):
662 fastpathlinkrev, mfs, fnodes, source):
666 yield chunk
663 yield chunk
667 mfs.clear()
664 mfs.clear()
668 clrevs = set(cl.rev(x) for x in clnodes)
665 clrevs = set(cl.rev(x) for x in clnodes)
669
666
670 if not fastpathlinkrev:
667 if not fastpathlinkrev:
671 def linknodes(unused, fname):
668 def linknodes(unused, fname):
672 return fnodes.get(fname, {})
669 return fnodes.get(fname, {})
673 else:
670 else:
674 cln = cl.node
671 cln = cl.node
675 def linknodes(filerevlog, fname):
672 def linknodes(filerevlog, fname):
676 llr = filerevlog.linkrev
673 llr = filerevlog.linkrev
677 fln = filerevlog.node
674 fln = filerevlog.node
678 revs = ((r, llr(r)) for r in filerevlog)
675 revs = ((r, llr(r)) for r in filerevlog)
679 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
676 return dict((fln(r), cln(lr)) for r, lr in revs if lr in clrevs)
680
677
681 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
678 for chunk in self.generatefiles(changedfiles, linknodes, commonrevs,
682 source):
679 source):
683 yield chunk
680 yield chunk
684
681
685 yield self.close()
682 yield self.close()
686
683
687 if clnodes:
684 if clnodes:
688 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
685 repo.hook('outgoing', node=hex(clnodes[0]), source=source)
689
686
690 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
687 def generatemanifests(self, commonrevs, clrevorder, fastpathlinkrev, mfs,
691 fnodes, source):
688 fnodes, source):
692 """Returns an iterator of changegroup chunks containing manifests.
689 """Returns an iterator of changegroup chunks containing manifests.
693
690
694 `source` is unused here, but is used by extensions like remotefilelog to
691 `source` is unused here, but is used by extensions like remotefilelog to
695 change what is sent based in pulls vs pushes, etc.
692 change what is sent based in pulls vs pushes, etc.
696 """
693 """
697 repo = self._repo
694 repo = self._repo
698 mfl = repo.manifestlog
695 mfl = repo.manifestlog
699 dirlog = mfl._revlog.dirlog
696 dirlog = mfl._revlog.dirlog
700 tmfnodes = {'': mfs}
697 tmfnodes = {'': mfs}
701
698
702 # Callback for the manifest, used to collect linkrevs for filelog
699 # Callback for the manifest, used to collect linkrevs for filelog
703 # revisions.
700 # revisions.
704 # Returns the linkrev node (collected in lookupcl).
701 # Returns the linkrev node (collected in lookupcl).
705 def makelookupmflinknode(dir, nodes):
702 def makelookupmflinknode(dir, nodes):
706 if fastpathlinkrev:
703 if fastpathlinkrev:
707 assert not dir
704 assert not dir
708 return mfs.__getitem__
705 return mfs.__getitem__
709
706
710 def lookupmflinknode(x):
707 def lookupmflinknode(x):
711 """Callback for looking up the linknode for manifests.
708 """Callback for looking up the linknode for manifests.
712
709
713 Returns the linkrev node for the specified manifest.
710 Returns the linkrev node for the specified manifest.
714
711
715 SIDE EFFECT:
712 SIDE EFFECT:
716
713
717 1) fclnodes gets populated with the list of relevant
714 1) fclnodes gets populated with the list of relevant
718 file nodes if we're not using fastpathlinkrev
715 file nodes if we're not using fastpathlinkrev
719 2) When treemanifests are in use, collects treemanifest nodes
716 2) When treemanifests are in use, collects treemanifest nodes
720 to send
717 to send
721
718
722 Note that this means manifests must be completely sent to
719 Note that this means manifests must be completely sent to
723 the client before you can trust the list of files and
720 the client before you can trust the list of files and
724 treemanifests to send.
721 treemanifests to send.
725 """
722 """
726 clnode = nodes[x]
723 clnode = nodes[x]
727 mdata = mfl.get(dir, x).readfast(shallow=True)
724 mdata = mfl.get(dir, x).readfast(shallow=True)
728 for p, n, fl in mdata.iterentries():
725 for p, n, fl in mdata.iterentries():
729 if fl == 't': # subdirectory manifest
726 if fl == 't': # subdirectory manifest
730 subdir = dir + p + '/'
727 subdir = dir + p + '/'
731 tmfclnodes = tmfnodes.setdefault(subdir, {})
728 tmfclnodes = tmfnodes.setdefault(subdir, {})
732 tmfclnode = tmfclnodes.setdefault(n, clnode)
729 tmfclnode = tmfclnodes.setdefault(n, clnode)
733 if clrevorder[clnode] < clrevorder[tmfclnode]:
730 if clrevorder[clnode] < clrevorder[tmfclnode]:
734 tmfclnodes[n] = clnode
731 tmfclnodes[n] = clnode
735 else:
732 else:
736 f = dir + p
733 f = dir + p
737 fclnodes = fnodes.setdefault(f, {})
734 fclnodes = fnodes.setdefault(f, {})
738 fclnode = fclnodes.setdefault(n, clnode)
735 fclnode = fclnodes.setdefault(n, clnode)
739 if clrevorder[clnode] < clrevorder[fclnode]:
736 if clrevorder[clnode] < clrevorder[fclnode]:
740 fclnodes[n] = clnode
737 fclnodes[n] = clnode
741 return clnode
738 return clnode
742 return lookupmflinknode
739 return lookupmflinknode
743
740
744 size = 0
741 size = 0
745 while tmfnodes:
742 while tmfnodes:
746 dir, nodes = tmfnodes.popitem()
743 dir, nodes = tmfnodes.popitem()
747 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
744 prunednodes = self.prune(dirlog(dir), nodes, commonrevs)
748 if not dir or prunednodes:
745 if not dir or prunednodes:
749 for x in self._packmanifests(dir, prunednodes,
746 for x in self._packmanifests(dir, prunednodes,
750 makelookupmflinknode(dir, nodes)):
747 makelookupmflinknode(dir, nodes)):
751 size += len(x)
748 size += len(x)
752 yield x
749 yield x
753 self._verbosenote(_('%8.i (manifests)\n') % size)
750 self._verbosenote(_('%8.i (manifests)\n') % size)
754 yield self._manifestsdone()
751 yield self._manifestsdone()
755
752
756 # The 'source' parameter is useful for extensions
753 # The 'source' parameter is useful for extensions
757 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
754 def generatefiles(self, changedfiles, linknodes, commonrevs, source):
758 repo = self._repo
755 repo = self._repo
759 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
756 progress = repo.ui.makeprogress(_('bundling'), unit=_('files'),
760 total=len(changedfiles))
757 total=len(changedfiles))
761 for i, fname in enumerate(sorted(changedfiles)):
758 for i, fname in enumerate(sorted(changedfiles)):
762 filerevlog = repo.file(fname)
759 filerevlog = repo.file(fname)
763 if not filerevlog:
760 if not filerevlog:
764 raise error.Abort(_("empty or missing file data for %s") %
761 raise error.Abort(_("empty or missing file data for %s") %
765 fname)
762 fname)
766
763
767 linkrevnodes = linknodes(filerevlog, fname)
764 linkrevnodes = linknodes(filerevlog, fname)
768 # Lookup for filenodes, we collected the linkrev nodes above in the
765 # Lookup for filenodes, we collected the linkrev nodes above in the
769 # fastpath case and with lookupmf in the slowpath case.
766 # fastpath case and with lookupmf in the slowpath case.
770 def lookupfilelog(x):
767 def lookupfilelog(x):
771 return linkrevnodes[x]
768 return linkrevnodes[x]
772
769
773 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
770 filenodes = self.prune(filerevlog, linkrevnodes, commonrevs)
774 if filenodes:
771 if filenodes:
775 progress.update(i + 1, item=fname)
772 progress.update(i + 1, item=fname)
776 h = self.fileheader(fname)
773 h = self.fileheader(fname)
777 size = len(h)
774 size = len(h)
778 yield h
775 yield h
779 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
776 for chunk in self.group(filenodes, filerevlog, lookupfilelog):
780 size += len(chunk)
777 size += len(chunk)
781 yield chunk
778 yield chunk
782 self._verbosenote(_('%8.i %s\n') % (size, fname))
779 self._verbosenote(_('%8.i %s\n') % (size, fname))
783 progress.complete()
780 progress.complete()
784
781
785 def deltaparent(self, revlog, rev, p1, p2, prev):
782 def deltaparent(self, revlog, rev, p1, p2, prev):
786 if not revlog.candelta(prev, rev):
783 if not revlog.candelta(prev, rev):
787 raise error.ProgrammingError('cg1 should not be used in this case')
784 raise error.ProgrammingError('cg1 should not be used in this case')
788 return prev
785 return prev
789
786
790 def revchunk(self, revlog, rev, prev, linknode):
787 def revchunk(self, revlog, rev, prev, linknode):
791 node = revlog.node(rev)
788 node = revlog.node(rev)
792 p1, p2 = revlog.parentrevs(rev)
789 p1, p2 = revlog.parentrevs(rev)
793 base = self.deltaparent(revlog, rev, p1, p2, prev)
790 base = self.deltaparent(revlog, rev, p1, p2, prev)
794
791
795 prefix = ''
792 prefix = ''
796 if revlog.iscensored(base) or revlog.iscensored(rev):
793 if revlog.iscensored(base) or revlog.iscensored(rev):
797 try:
794 try:
798 delta = revlog.revision(node, raw=True)
795 delta = revlog.revision(node, raw=True)
799 except error.CensoredNodeError as e:
796 except error.CensoredNodeError as e:
800 delta = e.tombstone
797 delta = e.tombstone
801 if base == nullrev:
798 if base == nullrev:
802 prefix = mdiff.trivialdiffheader(len(delta))
799 prefix = mdiff.trivialdiffheader(len(delta))
803 else:
800 else:
804 baselen = revlog.rawsize(base)
801 baselen = revlog.rawsize(base)
805 prefix = mdiff.replacediffheader(baselen, len(delta))
802 prefix = mdiff.replacediffheader(baselen, len(delta))
806 elif base == nullrev:
803 elif base == nullrev:
807 delta = revlog.revision(node, raw=True)
804 delta = revlog.revision(node, raw=True)
808 prefix = mdiff.trivialdiffheader(len(delta))
805 prefix = mdiff.trivialdiffheader(len(delta))
809 else:
806 else:
810 delta = revlog.revdiff(base, rev)
807 delta = revlog.revdiff(base, rev)
811 p1n, p2n = revlog.parents(node)
808 p1n, p2n = revlog.parents(node)
812 basenode = revlog.node(base)
809 basenode = revlog.node(base)
813 flags = revlog.flags(rev)
810 flags = revlog.flags(rev)
814 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
811 meta = self.builddeltaheader(node, p1n, p2n, basenode, linknode, flags)
815 meta += prefix
812 meta += prefix
816 l = len(meta) + len(delta)
813 l = len(meta) + len(delta)
817 yield chunkheader(l)
814 yield chunkheader(l)
818 yield meta
815 yield meta
819 yield delta
816 yield delta
820 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
817 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
821 # do nothing with basenode, it is implicitly the previous one in HG10
818 # do nothing with basenode, it is implicitly the previous one in HG10
822 # do nothing with flags, it is implicitly 0 for cg1 and cg2
819 # do nothing with flags, it is implicitly 0 for cg1 and cg2
823 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
820 return struct.pack(self.deltaheader, node, p1n, p2n, linknode)
824
821
825 class cg2packer(cg1packer):
822 class cg2packer(cg1packer):
826 version = '02'
823 version = '02'
827 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
824 deltaheader = _CHANGEGROUPV2_DELTA_HEADER
828
825
829 def __init__(self, repo, filematcher, bundlecaps=None):
826 def __init__(self, repo, filematcher, bundlecaps=None):
830 super(cg2packer, self).__init__(repo, filematcher,
827 super(cg2packer, self).__init__(repo, filematcher,
831 bundlecaps=bundlecaps)
828 bundlecaps=bundlecaps)
832
829
833 if self._reorder is None:
830 if self._reorder is None:
834 # Since generaldelta is directly supported by cg2, reordering
831 # Since generaldelta is directly supported by cg2, reordering
835 # generally doesn't help, so we disable it by default (treating
832 # generally doesn't help, so we disable it by default (treating
836 # bundle.reorder=auto just like bundle.reorder=False).
833 # bundle.reorder=auto just like bundle.reorder=False).
837 self._reorder = False
834 self._reorder = False
838
835
839 def deltaparent(self, revlog, rev, p1, p2, prev):
836 def deltaparent(self, revlog, rev, p1, p2, prev):
840 dp = revlog.deltaparent(rev)
837 dp = revlog.deltaparent(rev)
841 if dp == nullrev and revlog.storedeltachains:
838 if dp == nullrev and revlog.storedeltachains:
842 # Avoid sending full revisions when delta parent is null. Pick prev
839 # Avoid sending full revisions when delta parent is null. Pick prev
843 # in that case. It's tempting to pick p1 in this case, as p1 will
840 # in that case. It's tempting to pick p1 in this case, as p1 will
844 # be smaller in the common case. However, computing a delta against
841 # be smaller in the common case. However, computing a delta against
845 # p1 may require resolving the raw text of p1, which could be
842 # p1 may require resolving the raw text of p1, which could be
846 # expensive. The revlog caches should have prev cached, meaning
843 # expensive. The revlog caches should have prev cached, meaning
847 # less CPU for changegroup generation. There is likely room to add
844 # less CPU for changegroup generation. There is likely room to add
848 # a flag and/or config option to control this behavior.
845 # a flag and/or config option to control this behavior.
849 base = prev
846 base = prev
850 elif dp == nullrev:
847 elif dp == nullrev:
851 # revlog is configured to use full snapshot for a reason,
848 # revlog is configured to use full snapshot for a reason,
852 # stick to full snapshot.
849 # stick to full snapshot.
853 base = nullrev
850 base = nullrev
854 elif dp not in (p1, p2, prev):
851 elif dp not in (p1, p2, prev):
855 # Pick prev when we can't be sure remote has the base revision.
852 # Pick prev when we can't be sure remote has the base revision.
856 return prev
853 return prev
857 else:
854 else:
858 base = dp
855 base = dp
859 if base != nullrev and not revlog.candelta(base, rev):
856 if base != nullrev and not revlog.candelta(base, rev):
860 base = nullrev
857 base = nullrev
861 return base
858 return base
862
859
863 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
860 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
864 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
861 # Do nothing with flags, it is implicitly 0 in cg1 and cg2
865 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
862 return struct.pack(self.deltaheader, node, p1n, p2n, basenode, linknode)
866
863
867 class cg3packer(cg2packer):
864 class cg3packer(cg2packer):
868 version = '03'
865 version = '03'
869 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
866 deltaheader = _CHANGEGROUPV3_DELTA_HEADER
870
867
871 def _packmanifests(self, dir, mfnodes, lookuplinknode):
868 def _packmanifests(self, dir, mfnodes, lookuplinknode):
872 if dir:
869 if dir:
873 yield self.fileheader(dir)
870 yield self.fileheader(dir)
874
871
875 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
872 dirlog = self._repo.manifestlog._revlog.dirlog(dir)
876 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
873 for chunk in self.group(mfnodes, dirlog, lookuplinknode,
877 units=_('manifests')):
874 units=_('manifests')):
878 yield chunk
875 yield chunk
879
876
880 def _manifestsdone(self):
877 def _manifestsdone(self):
881 return self.close()
878 return self.close()
882
879
883 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
880 def builddeltaheader(self, node, p1n, p2n, basenode, linknode, flags):
884 return struct.pack(
881 return struct.pack(
885 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
882 self.deltaheader, node, p1n, p2n, basenode, linknode, flags)
886
883
887 _packermap = {'01': (cg1packer, cg1unpacker),
884 _packermap = {'01': (cg1packer, cg1unpacker),
888 # cg2 adds support for exchanging generaldelta
885 # cg2 adds support for exchanging generaldelta
889 '02': (cg2packer, cg2unpacker),
886 '02': (cg2packer, cg2unpacker),
890 # cg3 adds support for exchanging revlog flags and treemanifests
887 # cg3 adds support for exchanging revlog flags and treemanifests
891 '03': (cg3packer, cg3unpacker),
888 '03': (cg3packer, cg3unpacker),
892 }
889 }
893
890
894 def allsupportedversions(repo):
891 def allsupportedversions(repo):
895 versions = set(_packermap.keys())
892 versions = set(_packermap.keys())
896 if not (repo.ui.configbool('experimental', 'changegroup3') or
893 if not (repo.ui.configbool('experimental', 'changegroup3') or
897 repo.ui.configbool('experimental', 'treemanifest') or
894 repo.ui.configbool('experimental', 'treemanifest') or
898 'treemanifest' in repo.requirements):
895 'treemanifest' in repo.requirements):
899 versions.discard('03')
896 versions.discard('03')
900 return versions
897 return versions
901
898
902 # Changegroup versions that can be applied to the repo
899 # Changegroup versions that can be applied to the repo
903 def supportedincomingversions(repo):
900 def supportedincomingversions(repo):
904 return allsupportedversions(repo)
901 return allsupportedversions(repo)
905
902
906 # Changegroup versions that can be created from the repo
903 # Changegroup versions that can be created from the repo
907 def supportedoutgoingversions(repo):
904 def supportedoutgoingversions(repo):
908 versions = allsupportedversions(repo)
905 versions = allsupportedversions(repo)
909 if 'treemanifest' in repo.requirements:
906 if 'treemanifest' in repo.requirements:
910 # Versions 01 and 02 support only flat manifests and it's just too
907 # Versions 01 and 02 support only flat manifests and it's just too
911 # expensive to convert between the flat manifest and tree manifest on
908 # expensive to convert between the flat manifest and tree manifest on
912 # the fly. Since tree manifests are hashed differently, all of history
909 # the fly. Since tree manifests are hashed differently, all of history
913 # would have to be converted. Instead, we simply don't even pretend to
910 # would have to be converted. Instead, we simply don't even pretend to
914 # support versions 01 and 02.
911 # support versions 01 and 02.
915 versions.discard('01')
912 versions.discard('01')
916 versions.discard('02')
913 versions.discard('02')
917 if NARROW_REQUIREMENT in repo.requirements:
914 if repository.NARROW_REQUIREMENT in repo.requirements:
918 # Versions 01 and 02 don't support revlog flags, and we need to
915 # Versions 01 and 02 don't support revlog flags, and we need to
919 # support that for stripping and unbundling to work.
916 # support that for stripping and unbundling to work.
920 versions.discard('01')
917 versions.discard('01')
921 versions.discard('02')
918 versions.discard('02')
922 if LFS_REQUIREMENT in repo.requirements:
919 if LFS_REQUIREMENT in repo.requirements:
923 # Versions 01 and 02 don't support revlog flags, and we need to
920 # Versions 01 and 02 don't support revlog flags, and we need to
924 # mark LFS entries with REVIDX_EXTSTORED.
921 # mark LFS entries with REVIDX_EXTSTORED.
925 versions.discard('01')
922 versions.discard('01')
926 versions.discard('02')
923 versions.discard('02')
927
924
928 return versions
925 return versions
929
926
930 def localversion(repo):
927 def localversion(repo):
931 # Finds the best version to use for bundles that are meant to be used
928 # Finds the best version to use for bundles that are meant to be used
932 # locally, such as those from strip and shelve, and temporary bundles.
929 # locally, such as those from strip and shelve, and temporary bundles.
933 return max(supportedoutgoingversions(repo))
930 return max(supportedoutgoingversions(repo))
934
931
935 def safeversion(repo):
932 def safeversion(repo):
936 # Finds the smallest version that it's safe to assume clients of the repo
933 # Finds the smallest version that it's safe to assume clients of the repo
937 # will support. For example, all hg versions that support generaldelta also
934 # will support. For example, all hg versions that support generaldelta also
938 # support changegroup 02.
935 # support changegroup 02.
939 versions = supportedoutgoingversions(repo)
936 versions = supportedoutgoingversions(repo)
940 if 'generaldelta' in repo.requirements:
937 if 'generaldelta' in repo.requirements:
941 versions.discard('01')
938 versions.discard('01')
942 assert versions
939 assert versions
943 return min(versions)
940 return min(versions)
944
941
945 def getbundler(version, repo, bundlecaps=None, filematcher=None):
942 def getbundler(version, repo, bundlecaps=None, filematcher=None):
946 assert version in supportedoutgoingversions(repo)
943 assert version in supportedoutgoingversions(repo)
947
944
948 if filematcher is None:
945 if filematcher is None:
949 filematcher = matchmod.alwaysmatcher(repo.root, '')
946 filematcher = matchmod.alwaysmatcher(repo.root, '')
950
947
951 if version == '01' and not filematcher.always():
948 if version == '01' and not filematcher.always():
952 raise error.ProgrammingError('version 01 changegroups do not support '
949 raise error.ProgrammingError('version 01 changegroups do not support '
953 'sparse file matchers')
950 'sparse file matchers')
954
951
955 # Requested files could include files not in the local store. So
952 # Requested files could include files not in the local store. So
956 # filter those out.
953 # filter those out.
957 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
954 filematcher = matchmod.intersectmatchers(repo.narrowmatch(),
958 filematcher)
955 filematcher)
959
956
960 return _packermap[version][0](repo, filematcher=filematcher,
957 return _packermap[version][0](repo, filematcher=filematcher,
961 bundlecaps=bundlecaps)
958 bundlecaps=bundlecaps)
962
959
963 def getunbundler(version, fh, alg, extras=None):
960 def getunbundler(version, fh, alg, extras=None):
964 return _packermap[version][1](fh, alg, extras=extras)
961 return _packermap[version][1](fh, alg, extras=extras)
965
962
966 def _changegroupinfo(repo, nodes, source):
963 def _changegroupinfo(repo, nodes, source):
967 if repo.ui.verbose or source == 'bundle':
964 if repo.ui.verbose or source == 'bundle':
968 repo.ui.status(_("%d changesets found\n") % len(nodes))
965 repo.ui.status(_("%d changesets found\n") % len(nodes))
969 if repo.ui.debugflag:
966 if repo.ui.debugflag:
970 repo.ui.debug("list of changesets:\n")
967 repo.ui.debug("list of changesets:\n")
971 for node in nodes:
968 for node in nodes:
972 repo.ui.debug("%s\n" % hex(node))
969 repo.ui.debug("%s\n" % hex(node))
973
970
974 def makechangegroup(repo, outgoing, version, source, fastpath=False,
971 def makechangegroup(repo, outgoing, version, source, fastpath=False,
975 bundlecaps=None):
972 bundlecaps=None):
976 cgstream = makestream(repo, outgoing, version, source,
973 cgstream = makestream(repo, outgoing, version, source,
977 fastpath=fastpath, bundlecaps=bundlecaps)
974 fastpath=fastpath, bundlecaps=bundlecaps)
978 return getunbundler(version, util.chunkbuffer(cgstream), None,
975 return getunbundler(version, util.chunkbuffer(cgstream), None,
979 {'clcount': len(outgoing.missing) })
976 {'clcount': len(outgoing.missing) })
980
977
981 def makestream(repo, outgoing, version, source, fastpath=False,
978 def makestream(repo, outgoing, version, source, fastpath=False,
982 bundlecaps=None, filematcher=None):
979 bundlecaps=None, filematcher=None):
983 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
980 bundler = getbundler(version, repo, bundlecaps=bundlecaps,
984 filematcher=filematcher)
981 filematcher=filematcher)
985
982
986 repo = repo.unfiltered()
983 repo = repo.unfiltered()
987 commonrevs = outgoing.common
984 commonrevs = outgoing.common
988 csets = outgoing.missing
985 csets = outgoing.missing
989 heads = outgoing.missingheads
986 heads = outgoing.missingheads
990 # We go through the fast path if we get told to, or if all (unfiltered
987 # We go through the fast path if we get told to, or if all (unfiltered
991 # heads have been requested (since we then know there all linkrevs will
988 # heads have been requested (since we then know there all linkrevs will
992 # be pulled by the client).
989 # be pulled by the client).
993 heads.sort()
990 heads.sort()
994 fastpathlinkrev = fastpath or (
991 fastpathlinkrev = fastpath or (
995 repo.filtername is None and heads == sorted(repo.heads()))
992 repo.filtername is None and heads == sorted(repo.heads()))
996
993
997 repo.hook('preoutgoing', throw=True, source=source)
994 repo.hook('preoutgoing', throw=True, source=source)
998 _changegroupinfo(repo, csets, source)
995 _changegroupinfo(repo, csets, source)
999 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
996 return bundler.generate(commonrevs, csets, fastpathlinkrev, source)
1000
997
1001 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
998 def _addchangegroupfiles(repo, source, revmap, trp, expectedfiles, needfiles):
1002 revisions = 0
999 revisions = 0
1003 files = 0
1000 files = 0
1004 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1001 progress = repo.ui.makeprogress(_('files'), unit=_('files'),
1005 total=expectedfiles)
1002 total=expectedfiles)
1006 for chunkdata in iter(source.filelogheader, {}):
1003 for chunkdata in iter(source.filelogheader, {}):
1007 files += 1
1004 files += 1
1008 f = chunkdata["filename"]
1005 f = chunkdata["filename"]
1009 repo.ui.debug("adding %s revisions\n" % f)
1006 repo.ui.debug("adding %s revisions\n" % f)
1010 progress.increment()
1007 progress.increment()
1011 fl = repo.file(f)
1008 fl = repo.file(f)
1012 o = len(fl)
1009 o = len(fl)
1013 try:
1010 try:
1014 deltas = source.deltaiter()
1011 deltas = source.deltaiter()
1015 if not fl.addgroup(deltas, revmap, trp):
1012 if not fl.addgroup(deltas, revmap, trp):
1016 raise error.Abort(_("received file revlog group is empty"))
1013 raise error.Abort(_("received file revlog group is empty"))
1017 except error.CensoredBaseError as e:
1014 except error.CensoredBaseError as e:
1018 raise error.Abort(_("received delta base is censored: %s") % e)
1015 raise error.Abort(_("received delta base is censored: %s") % e)
1019 revisions += len(fl) - o
1016 revisions += len(fl) - o
1020 if f in needfiles:
1017 if f in needfiles:
1021 needs = needfiles[f]
1018 needs = needfiles[f]
1022 for new in pycompat.xrange(o, len(fl)):
1019 for new in pycompat.xrange(o, len(fl)):
1023 n = fl.node(new)
1020 n = fl.node(new)
1024 if n in needs:
1021 if n in needs:
1025 needs.remove(n)
1022 needs.remove(n)
1026 else:
1023 else:
1027 raise error.Abort(
1024 raise error.Abort(
1028 _("received spurious file revlog entry"))
1025 _("received spurious file revlog entry"))
1029 if not needs:
1026 if not needs:
1030 del needfiles[f]
1027 del needfiles[f]
1031 progress.complete()
1028 progress.complete()
1032
1029
1033 for f, needs in needfiles.iteritems():
1030 for f, needs in needfiles.iteritems():
1034 fl = repo.file(f)
1031 fl = repo.file(f)
1035 for n in needs:
1032 for n in needs:
1036 try:
1033 try:
1037 fl.rev(n)
1034 fl.rev(n)
1038 except error.LookupError:
1035 except error.LookupError:
1039 raise error.Abort(
1036 raise error.Abort(
1040 _('missing file data for %s:%s - run hg verify') %
1037 _('missing file data for %s:%s - run hg verify') %
1041 (f, hex(n)))
1038 (f, hex(n)))
1042
1039
1043 return revisions, files
1040 return revisions, files
@@ -1,2622 +1,2623 b''
1 # exchange.py - utility to exchange data between repos.
1 # exchange.py - utility to exchange data between repos.
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import hashlib
11 import hashlib
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 nullid,
17 nullid,
18 nullrev,
18 nullrev,
19 )
19 )
20 from .thirdparty import (
20 from .thirdparty import (
21 attr,
21 attr,
22 )
22 )
23 from . import (
23 from . import (
24 bookmarks as bookmod,
24 bookmarks as bookmod,
25 bundle2,
25 bundle2,
26 changegroup,
26 changegroup,
27 dagutil,
27 dagutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 lock as lockmod,
30 lock as lockmod,
31 logexchange,
31 logexchange,
32 narrowspec,
32 narrowspec,
33 obsolete,
33 obsolete,
34 phases,
34 phases,
35 pushkey,
35 pushkey,
36 pycompat,
36 pycompat,
37 repository,
37 scmutil,
38 scmutil,
38 sslutil,
39 sslutil,
39 streamclone,
40 streamclone,
40 url as urlmod,
41 url as urlmod,
41 util,
42 util,
42 )
43 )
43 from .utils import (
44 from .utils import (
44 stringutil,
45 stringutil,
45 )
46 )
46
47
47 urlerr = util.urlerr
48 urlerr = util.urlerr
48 urlreq = util.urlreq
49 urlreq = util.urlreq
49
50
50 _NARROWACL_SECTION = 'narrowhgacl'
51 _NARROWACL_SECTION = 'narrowhgacl'
51
52
52 # Maps bundle version human names to changegroup versions.
53 # Maps bundle version human names to changegroup versions.
53 _bundlespeccgversions = {'v1': '01',
54 _bundlespeccgversions = {'v1': '01',
54 'v2': '02',
55 'v2': '02',
55 'packed1': 's1',
56 'packed1': 's1',
56 'bundle2': '02', #legacy
57 'bundle2': '02', #legacy
57 }
58 }
58
59
59 # Maps bundle version with content opts to choose which part to bundle
60 # Maps bundle version with content opts to choose which part to bundle
60 _bundlespeccontentopts = {
61 _bundlespeccontentopts = {
61 'v1': {
62 'v1': {
62 'changegroup': True,
63 'changegroup': True,
63 'cg.version': '01',
64 'cg.version': '01',
64 'obsolescence': False,
65 'obsolescence': False,
65 'phases': False,
66 'phases': False,
66 'tagsfnodescache': False,
67 'tagsfnodescache': False,
67 'revbranchcache': False
68 'revbranchcache': False
68 },
69 },
69 'v2': {
70 'v2': {
70 'changegroup': True,
71 'changegroup': True,
71 'cg.version': '02',
72 'cg.version': '02',
72 'obsolescence': False,
73 'obsolescence': False,
73 'phases': False,
74 'phases': False,
74 'tagsfnodescache': True,
75 'tagsfnodescache': True,
75 'revbranchcache': True
76 'revbranchcache': True
76 },
77 },
77 'packed1' : {
78 'packed1' : {
78 'cg.version': 's1'
79 'cg.version': 's1'
79 }
80 }
80 }
81 }
81 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82 _bundlespeccontentopts['bundle2'] = _bundlespeccontentopts['v2']
82
83
83 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 _bundlespecvariants = {"streamv2": {"changegroup": False, "streamv2": True,
84 "tagsfnodescache": False,
85 "tagsfnodescache": False,
85 "revbranchcache": False}}
86 "revbranchcache": False}}
86
87
87 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 # Compression engines allowed in version 1. THIS SHOULD NEVER CHANGE.
88 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89 _bundlespecv1compengines = {'gzip', 'bzip2', 'none'}
89
90
90 @attr.s
91 @attr.s
91 class bundlespec(object):
92 class bundlespec(object):
92 compression = attr.ib()
93 compression = attr.ib()
93 wirecompression = attr.ib()
94 wirecompression = attr.ib()
94 version = attr.ib()
95 version = attr.ib()
95 wireversion = attr.ib()
96 wireversion = attr.ib()
96 params = attr.ib()
97 params = attr.ib()
97 contentopts = attr.ib()
98 contentopts = attr.ib()
98
99
99 def parsebundlespec(repo, spec, strict=True):
100 def parsebundlespec(repo, spec, strict=True):
100 """Parse a bundle string specification into parts.
101 """Parse a bundle string specification into parts.
101
102
102 Bundle specifications denote a well-defined bundle/exchange format.
103 Bundle specifications denote a well-defined bundle/exchange format.
103 The content of a given specification should not change over time in
104 The content of a given specification should not change over time in
104 order to ensure that bundles produced by a newer version of Mercurial are
105 order to ensure that bundles produced by a newer version of Mercurial are
105 readable from an older version.
106 readable from an older version.
106
107
107 The string currently has the form:
108 The string currently has the form:
108
109
109 <compression>-<type>[;<parameter0>[;<parameter1>]]
110 <compression>-<type>[;<parameter0>[;<parameter1>]]
110
111
111 Where <compression> is one of the supported compression formats
112 Where <compression> is one of the supported compression formats
112 and <type> is (currently) a version string. A ";" can follow the type and
113 and <type> is (currently) a version string. A ";" can follow the type and
113 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 all text afterwards is interpreted as URI encoded, ";" delimited key=value
114 pairs.
115 pairs.
115
116
116 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 If ``strict`` is True (the default) <compression> is required. Otherwise,
117 it is optional.
118 it is optional.
118
119
119 Returns a bundlespec object of (compression, version, parameters).
120 Returns a bundlespec object of (compression, version, parameters).
120 Compression will be ``None`` if not in strict mode and a compression isn't
121 Compression will be ``None`` if not in strict mode and a compression isn't
121 defined.
122 defined.
122
123
123 An ``InvalidBundleSpecification`` is raised when the specification is
124 An ``InvalidBundleSpecification`` is raised when the specification is
124 not syntactically well formed.
125 not syntactically well formed.
125
126
126 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 An ``UnsupportedBundleSpecification`` is raised when the compression or
127 bundle type/version is not recognized.
128 bundle type/version is not recognized.
128
129
129 Note: this function will likely eventually return a more complex data
130 Note: this function will likely eventually return a more complex data
130 structure, including bundle2 part information.
131 structure, including bundle2 part information.
131 """
132 """
132 def parseparams(s):
133 def parseparams(s):
133 if ';' not in s:
134 if ';' not in s:
134 return s, {}
135 return s, {}
135
136
136 params = {}
137 params = {}
137 version, paramstr = s.split(';', 1)
138 version, paramstr = s.split(';', 1)
138
139
139 for p in paramstr.split(';'):
140 for p in paramstr.split(';'):
140 if '=' not in p:
141 if '=' not in p:
141 raise error.InvalidBundleSpecification(
142 raise error.InvalidBundleSpecification(
142 _('invalid bundle specification: '
143 _('invalid bundle specification: '
143 'missing "=" in parameter: %s') % p)
144 'missing "=" in parameter: %s') % p)
144
145
145 key, value = p.split('=', 1)
146 key, value = p.split('=', 1)
146 key = urlreq.unquote(key)
147 key = urlreq.unquote(key)
147 value = urlreq.unquote(value)
148 value = urlreq.unquote(value)
148 params[key] = value
149 params[key] = value
149
150
150 return version, params
151 return version, params
151
152
152
153
153 if strict and '-' not in spec:
154 if strict and '-' not in spec:
154 raise error.InvalidBundleSpecification(
155 raise error.InvalidBundleSpecification(
155 _('invalid bundle specification; '
156 _('invalid bundle specification; '
156 'must be prefixed with compression: %s') % spec)
157 'must be prefixed with compression: %s') % spec)
157
158
158 if '-' in spec:
159 if '-' in spec:
159 compression, version = spec.split('-', 1)
160 compression, version = spec.split('-', 1)
160
161
161 if compression not in util.compengines.supportedbundlenames:
162 if compression not in util.compengines.supportedbundlenames:
162 raise error.UnsupportedBundleSpecification(
163 raise error.UnsupportedBundleSpecification(
163 _('%s compression is not supported') % compression)
164 _('%s compression is not supported') % compression)
164
165
165 version, params = parseparams(version)
166 version, params = parseparams(version)
166
167
167 if version not in _bundlespeccgversions:
168 if version not in _bundlespeccgversions:
168 raise error.UnsupportedBundleSpecification(
169 raise error.UnsupportedBundleSpecification(
169 _('%s is not a recognized bundle version') % version)
170 _('%s is not a recognized bundle version') % version)
170 else:
171 else:
171 # Value could be just the compression or just the version, in which
172 # Value could be just the compression or just the version, in which
172 # case some defaults are assumed (but only when not in strict mode).
173 # case some defaults are assumed (but only when not in strict mode).
173 assert not strict
174 assert not strict
174
175
175 spec, params = parseparams(spec)
176 spec, params = parseparams(spec)
176
177
177 if spec in util.compengines.supportedbundlenames:
178 if spec in util.compengines.supportedbundlenames:
178 compression = spec
179 compression = spec
179 version = 'v1'
180 version = 'v1'
180 # Generaldelta repos require v2.
181 # Generaldelta repos require v2.
181 if 'generaldelta' in repo.requirements:
182 if 'generaldelta' in repo.requirements:
182 version = 'v2'
183 version = 'v2'
183 # Modern compression engines require v2.
184 # Modern compression engines require v2.
184 if compression not in _bundlespecv1compengines:
185 if compression not in _bundlespecv1compengines:
185 version = 'v2'
186 version = 'v2'
186 elif spec in _bundlespeccgversions:
187 elif spec in _bundlespeccgversions:
187 if spec == 'packed1':
188 if spec == 'packed1':
188 compression = 'none'
189 compression = 'none'
189 else:
190 else:
190 compression = 'bzip2'
191 compression = 'bzip2'
191 version = spec
192 version = spec
192 else:
193 else:
193 raise error.UnsupportedBundleSpecification(
194 raise error.UnsupportedBundleSpecification(
194 _('%s is not a recognized bundle specification') % spec)
195 _('%s is not a recognized bundle specification') % spec)
195
196
196 # Bundle version 1 only supports a known set of compression engines.
197 # Bundle version 1 only supports a known set of compression engines.
197 if version == 'v1' and compression not in _bundlespecv1compengines:
198 if version == 'v1' and compression not in _bundlespecv1compengines:
198 raise error.UnsupportedBundleSpecification(
199 raise error.UnsupportedBundleSpecification(
199 _('compression engine %s is not supported on v1 bundles') %
200 _('compression engine %s is not supported on v1 bundles') %
200 compression)
201 compression)
201
202
202 # The specification for packed1 can optionally declare the data formats
203 # The specification for packed1 can optionally declare the data formats
203 # required to apply it. If we see this metadata, compare against what the
204 # required to apply it. If we see this metadata, compare against what the
204 # repo supports and error if the bundle isn't compatible.
205 # repo supports and error if the bundle isn't compatible.
205 if version == 'packed1' and 'requirements' in params:
206 if version == 'packed1' and 'requirements' in params:
206 requirements = set(params['requirements'].split(','))
207 requirements = set(params['requirements'].split(','))
207 missingreqs = requirements - repo.supportedformats
208 missingreqs = requirements - repo.supportedformats
208 if missingreqs:
209 if missingreqs:
209 raise error.UnsupportedBundleSpecification(
210 raise error.UnsupportedBundleSpecification(
210 _('missing support for repository features: %s') %
211 _('missing support for repository features: %s') %
211 ', '.join(sorted(missingreqs)))
212 ', '.join(sorted(missingreqs)))
212
213
213 # Compute contentopts based on the version
214 # Compute contentopts based on the version
214 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215 contentopts = _bundlespeccontentopts.get(version, {}).copy()
215
216
216 # Process the variants
217 # Process the variants
217 if "stream" in params and params["stream"] == "v2":
218 if "stream" in params and params["stream"] == "v2":
218 variant = _bundlespecvariants["streamv2"]
219 variant = _bundlespecvariants["streamv2"]
219 contentopts.update(variant)
220 contentopts.update(variant)
220
221
221 engine = util.compengines.forbundlename(compression)
222 engine = util.compengines.forbundlename(compression)
222 compression, wirecompression = engine.bundletype()
223 compression, wirecompression = engine.bundletype()
223 wireversion = _bundlespeccgversions[version]
224 wireversion = _bundlespeccgversions[version]
224
225
225 return bundlespec(compression, wirecompression, version, wireversion,
226 return bundlespec(compression, wirecompression, version, wireversion,
226 params, contentopts)
227 params, contentopts)
227
228
228 def readbundle(ui, fh, fname, vfs=None):
229 def readbundle(ui, fh, fname, vfs=None):
229 header = changegroup.readexactly(fh, 4)
230 header = changegroup.readexactly(fh, 4)
230
231
231 alg = None
232 alg = None
232 if not fname:
233 if not fname:
233 fname = "stream"
234 fname = "stream"
234 if not header.startswith('HG') and header.startswith('\0'):
235 if not header.startswith('HG') and header.startswith('\0'):
235 fh = changegroup.headerlessfixup(fh, header)
236 fh = changegroup.headerlessfixup(fh, header)
236 header = "HG10"
237 header = "HG10"
237 alg = 'UN'
238 alg = 'UN'
238 elif vfs:
239 elif vfs:
239 fname = vfs.join(fname)
240 fname = vfs.join(fname)
240
241
241 magic, version = header[0:2], header[2:4]
242 magic, version = header[0:2], header[2:4]
242
243
243 if magic != 'HG':
244 if magic != 'HG':
244 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 raise error.Abort(_('%s: not a Mercurial bundle') % fname)
245 if version == '10':
246 if version == '10':
246 if alg is None:
247 if alg is None:
247 alg = changegroup.readexactly(fh, 2)
248 alg = changegroup.readexactly(fh, 2)
248 return changegroup.cg1unpacker(fh, alg)
249 return changegroup.cg1unpacker(fh, alg)
249 elif version.startswith('2'):
250 elif version.startswith('2'):
250 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 return bundle2.getunbundler(ui, fh, magicstring=magic + version)
251 elif version == 'S1':
252 elif version == 'S1':
252 return streamclone.streamcloneapplier(fh)
253 return streamclone.streamcloneapplier(fh)
253 else:
254 else:
254 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255 raise error.Abort(_('%s: unknown bundle version %s') % (fname, version))
255
256
256 def getbundlespec(ui, fh):
257 def getbundlespec(ui, fh):
257 """Infer the bundlespec from a bundle file handle.
258 """Infer the bundlespec from a bundle file handle.
258
259
259 The input file handle is seeked and the original seek position is not
260 The input file handle is seeked and the original seek position is not
260 restored.
261 restored.
261 """
262 """
262 def speccompression(alg):
263 def speccompression(alg):
263 try:
264 try:
264 return util.compengines.forbundletype(alg).bundletype()[0]
265 return util.compengines.forbundletype(alg).bundletype()[0]
265 except KeyError:
266 except KeyError:
266 return None
267 return None
267
268
268 b = readbundle(ui, fh, None)
269 b = readbundle(ui, fh, None)
269 if isinstance(b, changegroup.cg1unpacker):
270 if isinstance(b, changegroup.cg1unpacker):
270 alg = b._type
271 alg = b._type
271 if alg == '_truncatedBZ':
272 if alg == '_truncatedBZ':
272 alg = 'BZ'
273 alg = 'BZ'
273 comp = speccompression(alg)
274 comp = speccompression(alg)
274 if not comp:
275 if not comp:
275 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 raise error.Abort(_('unknown compression algorithm: %s') % alg)
276 return '%s-v1' % comp
277 return '%s-v1' % comp
277 elif isinstance(b, bundle2.unbundle20):
278 elif isinstance(b, bundle2.unbundle20):
278 if 'Compression' in b.params:
279 if 'Compression' in b.params:
279 comp = speccompression(b.params['Compression'])
280 comp = speccompression(b.params['Compression'])
280 if not comp:
281 if not comp:
281 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 raise error.Abort(_('unknown compression algorithm: %s') % comp)
282 else:
283 else:
283 comp = 'none'
284 comp = 'none'
284
285
285 version = None
286 version = None
286 for part in b.iterparts():
287 for part in b.iterparts():
287 if part.type == 'changegroup':
288 if part.type == 'changegroup':
288 version = part.params['version']
289 version = part.params['version']
289 if version in ('01', '02'):
290 if version in ('01', '02'):
290 version = 'v2'
291 version = 'v2'
291 else:
292 else:
292 raise error.Abort(_('changegroup version %s does not have '
293 raise error.Abort(_('changegroup version %s does not have '
293 'a known bundlespec') % version,
294 'a known bundlespec') % version,
294 hint=_('try upgrading your Mercurial '
295 hint=_('try upgrading your Mercurial '
295 'client'))
296 'client'))
296 elif part.type == 'stream2' and version is None:
297 elif part.type == 'stream2' and version is None:
297 # A stream2 part requires to be part of a v2 bundle
298 # A stream2 part requires to be part of a v2 bundle
298 version = "v2"
299 version = "v2"
299 requirements = urlreq.unquote(part.params['requirements'])
300 requirements = urlreq.unquote(part.params['requirements'])
300 splitted = requirements.split()
301 splitted = requirements.split()
301 params = bundle2._formatrequirementsparams(splitted)
302 params = bundle2._formatrequirementsparams(splitted)
302 return 'none-v2;stream=v2;%s' % params
303 return 'none-v2;stream=v2;%s' % params
303
304
304 if not version:
305 if not version:
305 raise error.Abort(_('could not identify changegroup version in '
306 raise error.Abort(_('could not identify changegroup version in '
306 'bundle'))
307 'bundle'))
307
308
308 return '%s-%s' % (comp, version)
309 return '%s-%s' % (comp, version)
309 elif isinstance(b, streamclone.streamcloneapplier):
310 elif isinstance(b, streamclone.streamcloneapplier):
310 requirements = streamclone.readbundle1header(fh)[2]
311 requirements = streamclone.readbundle1header(fh)[2]
311 formatted = bundle2._formatrequirementsparams(requirements)
312 formatted = bundle2._formatrequirementsparams(requirements)
312 return 'none-packed1;%s' % formatted
313 return 'none-packed1;%s' % formatted
313 else:
314 else:
314 raise error.Abort(_('unknown bundle type: %s') % b)
315 raise error.Abort(_('unknown bundle type: %s') % b)
315
316
316 def _computeoutgoing(repo, heads, common):
317 def _computeoutgoing(repo, heads, common):
317 """Computes which revs are outgoing given a set of common
318 """Computes which revs are outgoing given a set of common
318 and a set of heads.
319 and a set of heads.
319
320
320 This is a separate function so extensions can have access to
321 This is a separate function so extensions can have access to
321 the logic.
322 the logic.
322
323
323 Returns a discovery.outgoing object.
324 Returns a discovery.outgoing object.
324 """
325 """
325 cl = repo.changelog
326 cl = repo.changelog
326 if common:
327 if common:
327 hasnode = cl.hasnode
328 hasnode = cl.hasnode
328 common = [n for n in common if hasnode(n)]
329 common = [n for n in common if hasnode(n)]
329 else:
330 else:
330 common = [nullid]
331 common = [nullid]
331 if not heads:
332 if not heads:
332 heads = cl.heads()
333 heads = cl.heads()
333 return discovery.outgoing(repo, common, heads)
334 return discovery.outgoing(repo, common, heads)
334
335
335 def _forcebundle1(op):
336 def _forcebundle1(op):
336 """return true if a pull/push must use bundle1
337 """return true if a pull/push must use bundle1
337
338
338 This function is used to allow testing of the older bundle version"""
339 This function is used to allow testing of the older bundle version"""
339 ui = op.repo.ui
340 ui = op.repo.ui
340 # The goal is this config is to allow developer to choose the bundle
341 # The goal is this config is to allow developer to choose the bundle
341 # version used during exchanged. This is especially handy during test.
342 # version used during exchanged. This is especially handy during test.
342 # Value is a list of bundle version to be picked from, highest version
343 # Value is a list of bundle version to be picked from, highest version
343 # should be used.
344 # should be used.
344 #
345 #
345 # developer config: devel.legacy.exchange
346 # developer config: devel.legacy.exchange
346 exchange = ui.configlist('devel', 'legacy.exchange')
347 exchange = ui.configlist('devel', 'legacy.exchange')
347 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 forcebundle1 = 'bundle2' not in exchange and 'bundle1' in exchange
348 return forcebundle1 or not op.remote.capable('bundle2')
349 return forcebundle1 or not op.remote.capable('bundle2')
349
350
350 class pushoperation(object):
351 class pushoperation(object):
351 """A object that represent a single push operation
352 """A object that represent a single push operation
352
353
353 Its purpose is to carry push related state and very common operations.
354 Its purpose is to carry push related state and very common operations.
354
355
355 A new pushoperation should be created at the beginning of each push and
356 A new pushoperation should be created at the beginning of each push and
356 discarded afterward.
357 discarded afterward.
357 """
358 """
358
359
359 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 def __init__(self, repo, remote, force=False, revs=None, newbranch=False,
360 bookmarks=(), pushvars=None):
361 bookmarks=(), pushvars=None):
361 # repo we push from
362 # repo we push from
362 self.repo = repo
363 self.repo = repo
363 self.ui = repo.ui
364 self.ui = repo.ui
364 # repo we push to
365 # repo we push to
365 self.remote = remote
366 self.remote = remote
366 # force option provided
367 # force option provided
367 self.force = force
368 self.force = force
368 # revs to be pushed (None is "all")
369 # revs to be pushed (None is "all")
369 self.revs = revs
370 self.revs = revs
370 # bookmark explicitly pushed
371 # bookmark explicitly pushed
371 self.bookmarks = bookmarks
372 self.bookmarks = bookmarks
372 # allow push of new branch
373 # allow push of new branch
373 self.newbranch = newbranch
374 self.newbranch = newbranch
374 # step already performed
375 # step already performed
375 # (used to check what steps have been already performed through bundle2)
376 # (used to check what steps have been already performed through bundle2)
376 self.stepsdone = set()
377 self.stepsdone = set()
377 # Integer version of the changegroup push result
378 # Integer version of the changegroup push result
378 # - None means nothing to push
379 # - None means nothing to push
379 # - 0 means HTTP error
380 # - 0 means HTTP error
380 # - 1 means we pushed and remote head count is unchanged *or*
381 # - 1 means we pushed and remote head count is unchanged *or*
381 # we have outgoing changesets but refused to push
382 # we have outgoing changesets but refused to push
382 # - other values as described by addchangegroup()
383 # - other values as described by addchangegroup()
383 self.cgresult = None
384 self.cgresult = None
384 # Boolean value for the bookmark push
385 # Boolean value for the bookmark push
385 self.bkresult = None
386 self.bkresult = None
386 # discover.outgoing object (contains common and outgoing data)
387 # discover.outgoing object (contains common and outgoing data)
387 self.outgoing = None
388 self.outgoing = None
388 # all remote topological heads before the push
389 # all remote topological heads before the push
389 self.remoteheads = None
390 self.remoteheads = None
390 # Details of the remote branch pre and post push
391 # Details of the remote branch pre and post push
391 #
392 #
392 # mapping: {'branch': ([remoteheads],
393 # mapping: {'branch': ([remoteheads],
393 # [newheads],
394 # [newheads],
394 # [unsyncedheads],
395 # [unsyncedheads],
395 # [discardedheads])}
396 # [discardedheads])}
396 # - branch: the branch name
397 # - branch: the branch name
397 # - remoteheads: the list of remote heads known locally
398 # - remoteheads: the list of remote heads known locally
398 # None if the branch is new
399 # None if the branch is new
399 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - newheads: the new remote heads (known locally) with outgoing pushed
400 # - unsyncedheads: the list of remote heads unknown locally.
401 # - unsyncedheads: the list of remote heads unknown locally.
401 # - discardedheads: the list of remote heads made obsolete by the push
402 # - discardedheads: the list of remote heads made obsolete by the push
402 self.pushbranchmap = None
403 self.pushbranchmap = None
403 # testable as a boolean indicating if any nodes are missing locally.
404 # testable as a boolean indicating if any nodes are missing locally.
404 self.incoming = None
405 self.incoming = None
405 # summary of the remote phase situation
406 # summary of the remote phase situation
406 self.remotephases = None
407 self.remotephases = None
407 # phases changes that must be pushed along side the changesets
408 # phases changes that must be pushed along side the changesets
408 self.outdatedphases = None
409 self.outdatedphases = None
409 # phases changes that must be pushed if changeset push fails
410 # phases changes that must be pushed if changeset push fails
410 self.fallbackoutdatedphases = None
411 self.fallbackoutdatedphases = None
411 # outgoing obsmarkers
412 # outgoing obsmarkers
412 self.outobsmarkers = set()
413 self.outobsmarkers = set()
413 # outgoing bookmarks
414 # outgoing bookmarks
414 self.outbookmarks = []
415 self.outbookmarks = []
415 # transaction manager
416 # transaction manager
416 self.trmanager = None
417 self.trmanager = None
417 # map { pushkey partid -> callback handling failure}
418 # map { pushkey partid -> callback handling failure}
418 # used to handle exception from mandatory pushkey part failure
419 # used to handle exception from mandatory pushkey part failure
419 self.pkfailcb = {}
420 self.pkfailcb = {}
420 # an iterable of pushvars or None
421 # an iterable of pushvars or None
421 self.pushvars = pushvars
422 self.pushvars = pushvars
422
423
423 @util.propertycache
424 @util.propertycache
424 def futureheads(self):
425 def futureheads(self):
425 """future remote heads if the changeset push succeeds"""
426 """future remote heads if the changeset push succeeds"""
426 return self.outgoing.missingheads
427 return self.outgoing.missingheads
427
428
428 @util.propertycache
429 @util.propertycache
429 def fallbackheads(self):
430 def fallbackheads(self):
430 """future remote heads if the changeset push fails"""
431 """future remote heads if the changeset push fails"""
431 if self.revs is None:
432 if self.revs is None:
432 # not target to push, all common are relevant
433 # not target to push, all common are relevant
433 return self.outgoing.commonheads
434 return self.outgoing.commonheads
434 unfi = self.repo.unfiltered()
435 unfi = self.repo.unfiltered()
435 # I want cheads = heads(::missingheads and ::commonheads)
436 # I want cheads = heads(::missingheads and ::commonheads)
436 # (missingheads is revs with secret changeset filtered out)
437 # (missingheads is revs with secret changeset filtered out)
437 #
438 #
438 # This can be expressed as:
439 # This can be expressed as:
439 # cheads = ( (missingheads and ::commonheads)
440 # cheads = ( (missingheads and ::commonheads)
440 # + (commonheads and ::missingheads))"
441 # + (commonheads and ::missingheads))"
441 # )
442 # )
442 #
443 #
443 # while trying to push we already computed the following:
444 # while trying to push we already computed the following:
444 # common = (::commonheads)
445 # common = (::commonheads)
445 # missing = ((commonheads::missingheads) - commonheads)
446 # missing = ((commonheads::missingheads) - commonheads)
446 #
447 #
447 # We can pick:
448 # We can pick:
448 # * missingheads part of common (::commonheads)
449 # * missingheads part of common (::commonheads)
449 common = self.outgoing.common
450 common = self.outgoing.common
450 nm = self.repo.changelog.nodemap
451 nm = self.repo.changelog.nodemap
451 cheads = [node for node in self.revs if nm[node] in common]
452 cheads = [node for node in self.revs if nm[node] in common]
452 # and
453 # and
453 # * commonheads parents on missing
454 # * commonheads parents on missing
454 revset = unfi.set('%ln and parents(roots(%ln))',
455 revset = unfi.set('%ln and parents(roots(%ln))',
455 self.outgoing.commonheads,
456 self.outgoing.commonheads,
456 self.outgoing.missing)
457 self.outgoing.missing)
457 cheads.extend(c.node() for c in revset)
458 cheads.extend(c.node() for c in revset)
458 return cheads
459 return cheads
459
460
460 @property
461 @property
461 def commonheads(self):
462 def commonheads(self):
462 """set of all common heads after changeset bundle push"""
463 """set of all common heads after changeset bundle push"""
463 if self.cgresult:
464 if self.cgresult:
464 return self.futureheads
465 return self.futureheads
465 else:
466 else:
466 return self.fallbackheads
467 return self.fallbackheads
467
468
468 # mapping of message used when pushing bookmark
469 # mapping of message used when pushing bookmark
469 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 bookmsgmap = {'update': (_("updating bookmark %s\n"),
470 _('updating bookmark %s failed!\n')),
471 _('updating bookmark %s failed!\n')),
471 'export': (_("exporting bookmark %s\n"),
472 'export': (_("exporting bookmark %s\n"),
472 _('exporting bookmark %s failed!\n')),
473 _('exporting bookmark %s failed!\n')),
473 'delete': (_("deleting remote bookmark %s\n"),
474 'delete': (_("deleting remote bookmark %s\n"),
474 _('deleting remote bookmark %s failed!\n')),
475 _('deleting remote bookmark %s failed!\n')),
475 }
476 }
476
477
477
478
478 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 def push(repo, remote, force=False, revs=None, newbranch=False, bookmarks=(),
479 opargs=None):
480 opargs=None):
480 '''Push outgoing changesets (limited by revs) from a local
481 '''Push outgoing changesets (limited by revs) from a local
481 repository to remote. Return an integer:
482 repository to remote. Return an integer:
482 - None means nothing to push
483 - None means nothing to push
483 - 0 means HTTP error
484 - 0 means HTTP error
484 - 1 means we pushed and remote head count is unchanged *or*
485 - 1 means we pushed and remote head count is unchanged *or*
485 we have outgoing changesets but refused to push
486 we have outgoing changesets but refused to push
486 - other values as described by addchangegroup()
487 - other values as described by addchangegroup()
487 '''
488 '''
488 if opargs is None:
489 if opargs is None:
489 opargs = {}
490 opargs = {}
490 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 pushop = pushoperation(repo, remote, force, revs, newbranch, bookmarks,
491 **pycompat.strkwargs(opargs))
492 **pycompat.strkwargs(opargs))
492 if pushop.remote.local():
493 if pushop.remote.local():
493 missing = (set(pushop.repo.requirements)
494 missing = (set(pushop.repo.requirements)
494 - pushop.remote.local().supported)
495 - pushop.remote.local().supported)
495 if missing:
496 if missing:
496 msg = _("required features are not"
497 msg = _("required features are not"
497 " supported in the destination:"
498 " supported in the destination:"
498 " %s") % (', '.join(sorted(missing)))
499 " %s") % (', '.join(sorted(missing)))
499 raise error.Abort(msg)
500 raise error.Abort(msg)
500
501
501 if not pushop.remote.canpush():
502 if not pushop.remote.canpush():
502 raise error.Abort(_("destination does not support push"))
503 raise error.Abort(_("destination does not support push"))
503
504
504 if not pushop.remote.capable('unbundle'):
505 if not pushop.remote.capable('unbundle'):
505 raise error.Abort(_('cannot push: destination does not support the '
506 raise error.Abort(_('cannot push: destination does not support the '
506 'unbundle wire protocol command'))
507 'unbundle wire protocol command'))
507
508
508 # get lock as we might write phase data
509 # get lock as we might write phase data
509 wlock = lock = None
510 wlock = lock = None
510 try:
511 try:
511 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # bundle2 push may receive a reply bundle touching bookmarks or other
512 # things requiring the wlock. Take it now to ensure proper ordering.
513 # things requiring the wlock. Take it now to ensure proper ordering.
513 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 maypushback = pushop.ui.configbool('experimental', 'bundle2.pushback')
514 if (not _forcebundle1(pushop)) and maypushback:
515 if (not _forcebundle1(pushop)) and maypushback:
515 wlock = pushop.repo.wlock()
516 wlock = pushop.repo.wlock()
516 lock = pushop.repo.lock()
517 lock = pushop.repo.lock()
517 pushop.trmanager = transactionmanager(pushop.repo,
518 pushop.trmanager = transactionmanager(pushop.repo,
518 'push-response',
519 'push-response',
519 pushop.remote.url())
520 pushop.remote.url())
520 except error.LockUnavailable as err:
521 except error.LockUnavailable as err:
521 # source repo cannot be locked.
522 # source repo cannot be locked.
522 # We do not abort the push, but just disable the local phase
523 # We do not abort the push, but just disable the local phase
523 # synchronisation.
524 # synchronisation.
524 msg = 'cannot lock source repository: %s\n' % err
525 msg = 'cannot lock source repository: %s\n' % err
525 pushop.ui.debug(msg)
526 pushop.ui.debug(msg)
526
527
527 with wlock or util.nullcontextmanager(), \
528 with wlock or util.nullcontextmanager(), \
528 lock or util.nullcontextmanager(), \
529 lock or util.nullcontextmanager(), \
529 pushop.trmanager or util.nullcontextmanager():
530 pushop.trmanager or util.nullcontextmanager():
530 pushop.repo.checkpush(pushop)
531 pushop.repo.checkpush(pushop)
531 _pushdiscovery(pushop)
532 _pushdiscovery(pushop)
532 if not _forcebundle1(pushop):
533 if not _forcebundle1(pushop):
533 _pushbundle2(pushop)
534 _pushbundle2(pushop)
534 _pushchangeset(pushop)
535 _pushchangeset(pushop)
535 _pushsyncphase(pushop)
536 _pushsyncphase(pushop)
536 _pushobsolete(pushop)
537 _pushobsolete(pushop)
537 _pushbookmark(pushop)
538 _pushbookmark(pushop)
538
539
539 if repo.ui.configbool('experimental', 'remotenames'):
540 if repo.ui.configbool('experimental', 'remotenames'):
540 logexchange.pullremotenames(repo, remote)
541 logexchange.pullremotenames(repo, remote)
541
542
542 return pushop
543 return pushop
543
544
544 # list of steps to perform discovery before push
545 # list of steps to perform discovery before push
545 pushdiscoveryorder = []
546 pushdiscoveryorder = []
546
547
547 # Mapping between step name and function
548 # Mapping between step name and function
548 #
549 #
549 # This exists to help extensions wrap steps if necessary
550 # This exists to help extensions wrap steps if necessary
550 pushdiscoverymapping = {}
551 pushdiscoverymapping = {}
551
552
552 def pushdiscovery(stepname):
553 def pushdiscovery(stepname):
553 """decorator for function performing discovery before push
554 """decorator for function performing discovery before push
554
555
555 The function is added to the step -> function mapping and appended to the
556 The function is added to the step -> function mapping and appended to the
556 list of steps. Beware that decorated function will be added in order (this
557 list of steps. Beware that decorated function will be added in order (this
557 may matter).
558 may matter).
558
559
559 You can only use this decorator for a new step, if you want to wrap a step
560 You can only use this decorator for a new step, if you want to wrap a step
560 from an extension, change the pushdiscovery dictionary directly."""
561 from an extension, change the pushdiscovery dictionary directly."""
561 def dec(func):
562 def dec(func):
562 assert stepname not in pushdiscoverymapping
563 assert stepname not in pushdiscoverymapping
563 pushdiscoverymapping[stepname] = func
564 pushdiscoverymapping[stepname] = func
564 pushdiscoveryorder.append(stepname)
565 pushdiscoveryorder.append(stepname)
565 return func
566 return func
566 return dec
567 return dec
567
568
568 def _pushdiscovery(pushop):
569 def _pushdiscovery(pushop):
569 """Run all discovery steps"""
570 """Run all discovery steps"""
570 for stepname in pushdiscoveryorder:
571 for stepname in pushdiscoveryorder:
571 step = pushdiscoverymapping[stepname]
572 step = pushdiscoverymapping[stepname]
572 step(pushop)
573 step(pushop)
573
574
574 @pushdiscovery('changeset')
575 @pushdiscovery('changeset')
575 def _pushdiscoverychangeset(pushop):
576 def _pushdiscoverychangeset(pushop):
576 """discover the changeset that need to be pushed"""
577 """discover the changeset that need to be pushed"""
577 fci = discovery.findcommonincoming
578 fci = discovery.findcommonincoming
578 if pushop.revs:
579 if pushop.revs:
579 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
580 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force,
580 ancestorsof=pushop.revs)
581 ancestorsof=pushop.revs)
581 else:
582 else:
582 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 commoninc = fci(pushop.repo, pushop.remote, force=pushop.force)
583 common, inc, remoteheads = commoninc
584 common, inc, remoteheads = commoninc
584 fco = discovery.findcommonoutgoing
585 fco = discovery.findcommonoutgoing
585 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
586 outgoing = fco(pushop.repo, pushop.remote, onlyheads=pushop.revs,
586 commoninc=commoninc, force=pushop.force)
587 commoninc=commoninc, force=pushop.force)
587 pushop.outgoing = outgoing
588 pushop.outgoing = outgoing
588 pushop.remoteheads = remoteheads
589 pushop.remoteheads = remoteheads
589 pushop.incoming = inc
590 pushop.incoming = inc
590
591
591 @pushdiscovery('phase')
592 @pushdiscovery('phase')
592 def _pushdiscoveryphase(pushop):
593 def _pushdiscoveryphase(pushop):
593 """discover the phase that needs to be pushed
594 """discover the phase that needs to be pushed
594
595
595 (computed for both success and failure case for changesets push)"""
596 (computed for both success and failure case for changesets push)"""
596 outgoing = pushop.outgoing
597 outgoing = pushop.outgoing
597 unfi = pushop.repo.unfiltered()
598 unfi = pushop.repo.unfiltered()
598 remotephases = listkeys(pushop.remote, 'phases')
599 remotephases = listkeys(pushop.remote, 'phases')
599
600
600 if (pushop.ui.configbool('ui', '_usedassubrepo')
601 if (pushop.ui.configbool('ui', '_usedassubrepo')
601 and remotephases # server supports phases
602 and remotephases # server supports phases
602 and not pushop.outgoing.missing # no changesets to be pushed
603 and not pushop.outgoing.missing # no changesets to be pushed
603 and remotephases.get('publishing', False)):
604 and remotephases.get('publishing', False)):
604 # When:
605 # When:
605 # - this is a subrepo push
606 # - this is a subrepo push
606 # - and remote support phase
607 # - and remote support phase
607 # - and no changeset are to be pushed
608 # - and no changeset are to be pushed
608 # - and remote is publishing
609 # - and remote is publishing
609 # We may be in issue 3781 case!
610 # We may be in issue 3781 case!
610 # We drop the possible phase synchronisation done by
611 # We drop the possible phase synchronisation done by
611 # courtesy to publish changesets possibly locally draft
612 # courtesy to publish changesets possibly locally draft
612 # on the remote.
613 # on the remote.
613 pushop.outdatedphases = []
614 pushop.outdatedphases = []
614 pushop.fallbackoutdatedphases = []
615 pushop.fallbackoutdatedphases = []
615 return
616 return
616
617
617 pushop.remotephases = phases.remotephasessummary(pushop.repo,
618 pushop.remotephases = phases.remotephasessummary(pushop.repo,
618 pushop.fallbackheads,
619 pushop.fallbackheads,
619 remotephases)
620 remotephases)
620 droots = pushop.remotephases.draftroots
621 droots = pushop.remotephases.draftroots
621
622
622 extracond = ''
623 extracond = ''
623 if not pushop.remotephases.publishing:
624 if not pushop.remotephases.publishing:
624 extracond = ' and public()'
625 extracond = ' and public()'
625 revset = 'heads((%%ln::%%ln) %s)' % extracond
626 revset = 'heads((%%ln::%%ln) %s)' % extracond
626 # Get the list of all revs draft on remote by public here.
627 # Get the list of all revs draft on remote by public here.
627 # XXX Beware that revset break if droots is not strictly
628 # XXX Beware that revset break if droots is not strictly
628 # XXX root we may want to ensure it is but it is costly
629 # XXX root we may want to ensure it is but it is costly
629 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
630 fallback = list(unfi.set(revset, droots, pushop.fallbackheads))
630 if not outgoing.missing:
631 if not outgoing.missing:
631 future = fallback
632 future = fallback
632 else:
633 else:
633 # adds changeset we are going to push as draft
634 # adds changeset we are going to push as draft
634 #
635 #
635 # should not be necessary for publishing server, but because of an
636 # should not be necessary for publishing server, but because of an
636 # issue fixed in xxxxx we have to do it anyway.
637 # issue fixed in xxxxx we have to do it anyway.
637 fdroots = list(unfi.set('roots(%ln + %ln::)',
638 fdroots = list(unfi.set('roots(%ln + %ln::)',
638 outgoing.missing, droots))
639 outgoing.missing, droots))
639 fdroots = [f.node() for f in fdroots]
640 fdroots = [f.node() for f in fdroots]
640 future = list(unfi.set(revset, fdroots, pushop.futureheads))
641 future = list(unfi.set(revset, fdroots, pushop.futureheads))
641 pushop.outdatedphases = future
642 pushop.outdatedphases = future
642 pushop.fallbackoutdatedphases = fallback
643 pushop.fallbackoutdatedphases = fallback
643
644
644 @pushdiscovery('obsmarker')
645 @pushdiscovery('obsmarker')
645 def _pushdiscoveryobsmarkers(pushop):
646 def _pushdiscoveryobsmarkers(pushop):
646 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
647 if not obsolete.isenabled(pushop.repo, obsolete.exchangeopt):
647 return
648 return
648
649
649 if not pushop.repo.obsstore:
650 if not pushop.repo.obsstore:
650 return
651 return
651
652
652 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
653 if 'obsolete' not in listkeys(pushop.remote, 'namespaces'):
653 return
654 return
654
655
655 repo = pushop.repo
656 repo = pushop.repo
656 # very naive computation, that can be quite expensive on big repo.
657 # very naive computation, that can be quite expensive on big repo.
657 # However: evolution is currently slow on them anyway.
658 # However: evolution is currently slow on them anyway.
658 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
659 nodes = (c.node() for c in repo.set('::%ln', pushop.futureheads))
659 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
660 pushop.outobsmarkers = pushop.repo.obsstore.relevantmarkers(nodes)
660
661
661 @pushdiscovery('bookmarks')
662 @pushdiscovery('bookmarks')
662 def _pushdiscoverybookmarks(pushop):
663 def _pushdiscoverybookmarks(pushop):
663 ui = pushop.ui
664 ui = pushop.ui
664 repo = pushop.repo.unfiltered()
665 repo = pushop.repo.unfiltered()
665 remote = pushop.remote
666 remote = pushop.remote
666 ui.debug("checking for updated bookmarks\n")
667 ui.debug("checking for updated bookmarks\n")
667 ancestors = ()
668 ancestors = ()
668 if pushop.revs:
669 if pushop.revs:
669 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
670 revnums = pycompat.maplist(repo.changelog.rev, pushop.revs)
670 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
671 ancestors = repo.changelog.ancestors(revnums, inclusive=True)
671
672
672 remotebookmark = listkeys(remote, 'bookmarks')
673 remotebookmark = listkeys(remote, 'bookmarks')
673
674
674 explicit = set([repo._bookmarks.expandname(bookmark)
675 explicit = set([repo._bookmarks.expandname(bookmark)
675 for bookmark in pushop.bookmarks])
676 for bookmark in pushop.bookmarks])
676
677
677 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
678 remotebookmark = bookmod.unhexlifybookmarks(remotebookmark)
678 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
679 comp = bookmod.comparebookmarks(repo, repo._bookmarks, remotebookmark)
679
680
680 def safehex(x):
681 def safehex(x):
681 if x is None:
682 if x is None:
682 return x
683 return x
683 return hex(x)
684 return hex(x)
684
685
685 def hexifycompbookmarks(bookmarks):
686 def hexifycompbookmarks(bookmarks):
686 return [(b, safehex(scid), safehex(dcid))
687 return [(b, safehex(scid), safehex(dcid))
687 for (b, scid, dcid) in bookmarks]
688 for (b, scid, dcid) in bookmarks]
688
689
689 comp = [hexifycompbookmarks(marks) for marks in comp]
690 comp = [hexifycompbookmarks(marks) for marks in comp]
690 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
691 return _processcompared(pushop, ancestors, explicit, remotebookmark, comp)
691
692
692 def _processcompared(pushop, pushed, explicit, remotebms, comp):
693 def _processcompared(pushop, pushed, explicit, remotebms, comp):
693 """take decision on bookmark to pull from the remote bookmark
694 """take decision on bookmark to pull from the remote bookmark
694
695
695 Exist to help extensions who want to alter this behavior.
696 Exist to help extensions who want to alter this behavior.
696 """
697 """
697 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
698 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = comp
698
699
699 repo = pushop.repo
700 repo = pushop.repo
700
701
701 for b, scid, dcid in advsrc:
702 for b, scid, dcid in advsrc:
702 if b in explicit:
703 if b in explicit:
703 explicit.remove(b)
704 explicit.remove(b)
704 if not pushed or repo[scid].rev() in pushed:
705 if not pushed or repo[scid].rev() in pushed:
705 pushop.outbookmarks.append((b, dcid, scid))
706 pushop.outbookmarks.append((b, dcid, scid))
706 # search added bookmark
707 # search added bookmark
707 for b, scid, dcid in addsrc:
708 for b, scid, dcid in addsrc:
708 if b in explicit:
709 if b in explicit:
709 explicit.remove(b)
710 explicit.remove(b)
710 pushop.outbookmarks.append((b, '', scid))
711 pushop.outbookmarks.append((b, '', scid))
711 # search for overwritten bookmark
712 # search for overwritten bookmark
712 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
713 for b, scid, dcid in list(advdst) + list(diverge) + list(differ):
713 if b in explicit:
714 if b in explicit:
714 explicit.remove(b)
715 explicit.remove(b)
715 pushop.outbookmarks.append((b, dcid, scid))
716 pushop.outbookmarks.append((b, dcid, scid))
716 # search for bookmark to delete
717 # search for bookmark to delete
717 for b, scid, dcid in adddst:
718 for b, scid, dcid in adddst:
718 if b in explicit:
719 if b in explicit:
719 explicit.remove(b)
720 explicit.remove(b)
720 # treat as "deleted locally"
721 # treat as "deleted locally"
721 pushop.outbookmarks.append((b, dcid, ''))
722 pushop.outbookmarks.append((b, dcid, ''))
722 # identical bookmarks shouldn't get reported
723 # identical bookmarks shouldn't get reported
723 for b, scid, dcid in same:
724 for b, scid, dcid in same:
724 if b in explicit:
725 if b in explicit:
725 explicit.remove(b)
726 explicit.remove(b)
726
727
727 if explicit:
728 if explicit:
728 explicit = sorted(explicit)
729 explicit = sorted(explicit)
729 # we should probably list all of them
730 # we should probably list all of them
730 pushop.ui.warn(_('bookmark %s does not exist on the local '
731 pushop.ui.warn(_('bookmark %s does not exist on the local '
731 'or remote repository!\n') % explicit[0])
732 'or remote repository!\n') % explicit[0])
732 pushop.bkresult = 2
733 pushop.bkresult = 2
733
734
734 pushop.outbookmarks.sort()
735 pushop.outbookmarks.sort()
735
736
736 def _pushcheckoutgoing(pushop):
737 def _pushcheckoutgoing(pushop):
737 outgoing = pushop.outgoing
738 outgoing = pushop.outgoing
738 unfi = pushop.repo.unfiltered()
739 unfi = pushop.repo.unfiltered()
739 if not outgoing.missing:
740 if not outgoing.missing:
740 # nothing to push
741 # nothing to push
741 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
742 scmutil.nochangesfound(unfi.ui, unfi, outgoing.excluded)
742 return False
743 return False
743 # something to push
744 # something to push
744 if not pushop.force:
745 if not pushop.force:
745 # if repo.obsstore == False --> no obsolete
746 # if repo.obsstore == False --> no obsolete
746 # then, save the iteration
747 # then, save the iteration
747 if unfi.obsstore:
748 if unfi.obsstore:
748 # this message are here for 80 char limit reason
749 # this message are here for 80 char limit reason
749 mso = _("push includes obsolete changeset: %s!")
750 mso = _("push includes obsolete changeset: %s!")
750 mspd = _("push includes phase-divergent changeset: %s!")
751 mspd = _("push includes phase-divergent changeset: %s!")
751 mscd = _("push includes content-divergent changeset: %s!")
752 mscd = _("push includes content-divergent changeset: %s!")
752 mst = {"orphan": _("push includes orphan changeset: %s!"),
753 mst = {"orphan": _("push includes orphan changeset: %s!"),
753 "phase-divergent": mspd,
754 "phase-divergent": mspd,
754 "content-divergent": mscd}
755 "content-divergent": mscd}
755 # If we are to push if there is at least one
756 # If we are to push if there is at least one
756 # obsolete or unstable changeset in missing, at
757 # obsolete or unstable changeset in missing, at
757 # least one of the missinghead will be obsolete or
758 # least one of the missinghead will be obsolete or
758 # unstable. So checking heads only is ok
759 # unstable. So checking heads only is ok
759 for node in outgoing.missingheads:
760 for node in outgoing.missingheads:
760 ctx = unfi[node]
761 ctx = unfi[node]
761 if ctx.obsolete():
762 if ctx.obsolete():
762 raise error.Abort(mso % ctx)
763 raise error.Abort(mso % ctx)
763 elif ctx.isunstable():
764 elif ctx.isunstable():
764 # TODO print more than one instability in the abort
765 # TODO print more than one instability in the abort
765 # message
766 # message
766 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
767 raise error.Abort(mst[ctx.instabilities()[0]] % ctx)
767
768
768 discovery.checkheads(pushop)
769 discovery.checkheads(pushop)
769 return True
770 return True
770
771
771 # List of names of steps to perform for an outgoing bundle2, order matters.
772 # List of names of steps to perform for an outgoing bundle2, order matters.
772 b2partsgenorder = []
773 b2partsgenorder = []
773
774
774 # Mapping between step name and function
775 # Mapping between step name and function
775 #
776 #
776 # This exists to help extensions wrap steps if necessary
777 # This exists to help extensions wrap steps if necessary
777 b2partsgenmapping = {}
778 b2partsgenmapping = {}
778
779
779 def b2partsgenerator(stepname, idx=None):
780 def b2partsgenerator(stepname, idx=None):
780 """decorator for function generating bundle2 part
781 """decorator for function generating bundle2 part
781
782
782 The function is added to the step -> function mapping and appended to the
783 The function is added to the step -> function mapping and appended to the
783 list of steps. Beware that decorated functions will be added in order
784 list of steps. Beware that decorated functions will be added in order
784 (this may matter).
785 (this may matter).
785
786
786 You can only use this decorator for new steps, if you want to wrap a step
787 You can only use this decorator for new steps, if you want to wrap a step
787 from an extension, attack the b2partsgenmapping dictionary directly."""
788 from an extension, attack the b2partsgenmapping dictionary directly."""
788 def dec(func):
789 def dec(func):
789 assert stepname not in b2partsgenmapping
790 assert stepname not in b2partsgenmapping
790 b2partsgenmapping[stepname] = func
791 b2partsgenmapping[stepname] = func
791 if idx is None:
792 if idx is None:
792 b2partsgenorder.append(stepname)
793 b2partsgenorder.append(stepname)
793 else:
794 else:
794 b2partsgenorder.insert(idx, stepname)
795 b2partsgenorder.insert(idx, stepname)
795 return func
796 return func
796 return dec
797 return dec
797
798
798 def _pushb2ctxcheckheads(pushop, bundler):
799 def _pushb2ctxcheckheads(pushop, bundler):
799 """Generate race condition checking parts
800 """Generate race condition checking parts
800
801
801 Exists as an independent function to aid extensions
802 Exists as an independent function to aid extensions
802 """
803 """
803 # * 'force' do not check for push race,
804 # * 'force' do not check for push race,
804 # * if we don't push anything, there are nothing to check.
805 # * if we don't push anything, there are nothing to check.
805 if not pushop.force and pushop.outgoing.missingheads:
806 if not pushop.force and pushop.outgoing.missingheads:
806 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
807 allowunrelated = 'related' in bundler.capabilities.get('checkheads', ())
807 emptyremote = pushop.pushbranchmap is None
808 emptyremote = pushop.pushbranchmap is None
808 if not allowunrelated or emptyremote:
809 if not allowunrelated or emptyremote:
809 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
810 bundler.newpart('check:heads', data=iter(pushop.remoteheads))
810 else:
811 else:
811 affected = set()
812 affected = set()
812 for branch, heads in pushop.pushbranchmap.iteritems():
813 for branch, heads in pushop.pushbranchmap.iteritems():
813 remoteheads, newheads, unsyncedheads, discardedheads = heads
814 remoteheads, newheads, unsyncedheads, discardedheads = heads
814 if remoteheads is not None:
815 if remoteheads is not None:
815 remote = set(remoteheads)
816 remote = set(remoteheads)
816 affected |= set(discardedheads) & remote
817 affected |= set(discardedheads) & remote
817 affected |= remote - set(newheads)
818 affected |= remote - set(newheads)
818 if affected:
819 if affected:
819 data = iter(sorted(affected))
820 data = iter(sorted(affected))
820 bundler.newpart('check:updated-heads', data=data)
821 bundler.newpart('check:updated-heads', data=data)
821
822
822 def _pushing(pushop):
823 def _pushing(pushop):
823 """return True if we are pushing anything"""
824 """return True if we are pushing anything"""
824 return bool(pushop.outgoing.missing
825 return bool(pushop.outgoing.missing
825 or pushop.outdatedphases
826 or pushop.outdatedphases
826 or pushop.outobsmarkers
827 or pushop.outobsmarkers
827 or pushop.outbookmarks)
828 or pushop.outbookmarks)
828
829
829 @b2partsgenerator('check-bookmarks')
830 @b2partsgenerator('check-bookmarks')
830 def _pushb2checkbookmarks(pushop, bundler):
831 def _pushb2checkbookmarks(pushop, bundler):
831 """insert bookmark move checking"""
832 """insert bookmark move checking"""
832 if not _pushing(pushop) or pushop.force:
833 if not _pushing(pushop) or pushop.force:
833 return
834 return
834 b2caps = bundle2.bundle2caps(pushop.remote)
835 b2caps = bundle2.bundle2caps(pushop.remote)
835 hasbookmarkcheck = 'bookmarks' in b2caps
836 hasbookmarkcheck = 'bookmarks' in b2caps
836 if not (pushop.outbookmarks and hasbookmarkcheck):
837 if not (pushop.outbookmarks and hasbookmarkcheck):
837 return
838 return
838 data = []
839 data = []
839 for book, old, new in pushop.outbookmarks:
840 for book, old, new in pushop.outbookmarks:
840 old = bin(old)
841 old = bin(old)
841 data.append((book, old))
842 data.append((book, old))
842 checkdata = bookmod.binaryencode(data)
843 checkdata = bookmod.binaryencode(data)
843 bundler.newpart('check:bookmarks', data=checkdata)
844 bundler.newpart('check:bookmarks', data=checkdata)
844
845
845 @b2partsgenerator('check-phases')
846 @b2partsgenerator('check-phases')
846 def _pushb2checkphases(pushop, bundler):
847 def _pushb2checkphases(pushop, bundler):
847 """insert phase move checking"""
848 """insert phase move checking"""
848 if not _pushing(pushop) or pushop.force:
849 if not _pushing(pushop) or pushop.force:
849 return
850 return
850 b2caps = bundle2.bundle2caps(pushop.remote)
851 b2caps = bundle2.bundle2caps(pushop.remote)
851 hasphaseheads = 'heads' in b2caps.get('phases', ())
852 hasphaseheads = 'heads' in b2caps.get('phases', ())
852 if pushop.remotephases is not None and hasphaseheads:
853 if pushop.remotephases is not None and hasphaseheads:
853 # check that the remote phase has not changed
854 # check that the remote phase has not changed
854 checks = [[] for p in phases.allphases]
855 checks = [[] for p in phases.allphases]
855 checks[phases.public].extend(pushop.remotephases.publicheads)
856 checks[phases.public].extend(pushop.remotephases.publicheads)
856 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 checks[phases.draft].extend(pushop.remotephases.draftroots)
857 if any(checks):
858 if any(checks):
858 for nodes in checks:
859 for nodes in checks:
859 nodes.sort()
860 nodes.sort()
860 checkdata = phases.binaryencode(checks)
861 checkdata = phases.binaryencode(checks)
861 bundler.newpart('check:phases', data=checkdata)
862 bundler.newpart('check:phases', data=checkdata)
862
863
863 @b2partsgenerator('changeset')
864 @b2partsgenerator('changeset')
864 def _pushb2ctx(pushop, bundler):
865 def _pushb2ctx(pushop, bundler):
865 """handle changegroup push through bundle2
866 """handle changegroup push through bundle2
866
867
867 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
868 addchangegroup result is stored in the ``pushop.cgresult`` attribute.
868 """
869 """
869 if 'changesets' in pushop.stepsdone:
870 if 'changesets' in pushop.stepsdone:
870 return
871 return
871 pushop.stepsdone.add('changesets')
872 pushop.stepsdone.add('changesets')
872 # Send known heads to the server for race detection.
873 # Send known heads to the server for race detection.
873 if not _pushcheckoutgoing(pushop):
874 if not _pushcheckoutgoing(pushop):
874 return
875 return
875 pushop.repo.prepushoutgoinghooks(pushop)
876 pushop.repo.prepushoutgoinghooks(pushop)
876
877
877 _pushb2ctxcheckheads(pushop, bundler)
878 _pushb2ctxcheckheads(pushop, bundler)
878
879
879 b2caps = bundle2.bundle2caps(pushop.remote)
880 b2caps = bundle2.bundle2caps(pushop.remote)
880 version = '01'
881 version = '01'
881 cgversions = b2caps.get('changegroup')
882 cgversions = b2caps.get('changegroup')
882 if cgversions: # 3.1 and 3.2 ship with an empty value
883 if cgversions: # 3.1 and 3.2 ship with an empty value
883 cgversions = [v for v in cgversions
884 cgversions = [v for v in cgversions
884 if v in changegroup.supportedoutgoingversions(
885 if v in changegroup.supportedoutgoingversions(
885 pushop.repo)]
886 pushop.repo)]
886 if not cgversions:
887 if not cgversions:
887 raise ValueError(_('no common changegroup version'))
888 raise ValueError(_('no common changegroup version'))
888 version = max(cgversions)
889 version = max(cgversions)
889 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
890 cgstream = changegroup.makestream(pushop.repo, pushop.outgoing, version,
890 'push')
891 'push')
891 cgpart = bundler.newpart('changegroup', data=cgstream)
892 cgpart = bundler.newpart('changegroup', data=cgstream)
892 if cgversions:
893 if cgversions:
893 cgpart.addparam('version', version)
894 cgpart.addparam('version', version)
894 if 'treemanifest' in pushop.repo.requirements:
895 if 'treemanifest' in pushop.repo.requirements:
895 cgpart.addparam('treemanifest', '1')
896 cgpart.addparam('treemanifest', '1')
896 def handlereply(op):
897 def handlereply(op):
897 """extract addchangegroup returns from server reply"""
898 """extract addchangegroup returns from server reply"""
898 cgreplies = op.records.getreplies(cgpart.id)
899 cgreplies = op.records.getreplies(cgpart.id)
899 assert len(cgreplies['changegroup']) == 1
900 assert len(cgreplies['changegroup']) == 1
900 pushop.cgresult = cgreplies['changegroup'][0]['return']
901 pushop.cgresult = cgreplies['changegroup'][0]['return']
901 return handlereply
902 return handlereply
902
903
903 @b2partsgenerator('phase')
904 @b2partsgenerator('phase')
904 def _pushb2phases(pushop, bundler):
905 def _pushb2phases(pushop, bundler):
905 """handle phase push through bundle2"""
906 """handle phase push through bundle2"""
906 if 'phases' in pushop.stepsdone:
907 if 'phases' in pushop.stepsdone:
907 return
908 return
908 b2caps = bundle2.bundle2caps(pushop.remote)
909 b2caps = bundle2.bundle2caps(pushop.remote)
909 ui = pushop.repo.ui
910 ui = pushop.repo.ui
910
911
911 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
912 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
912 haspushkey = 'pushkey' in b2caps
913 haspushkey = 'pushkey' in b2caps
913 hasphaseheads = 'heads' in b2caps.get('phases', ())
914 hasphaseheads = 'heads' in b2caps.get('phases', ())
914
915
915 if hasphaseheads and not legacyphase:
916 if hasphaseheads and not legacyphase:
916 return _pushb2phaseheads(pushop, bundler)
917 return _pushb2phaseheads(pushop, bundler)
917 elif haspushkey:
918 elif haspushkey:
918 return _pushb2phasespushkey(pushop, bundler)
919 return _pushb2phasespushkey(pushop, bundler)
919
920
920 def _pushb2phaseheads(pushop, bundler):
921 def _pushb2phaseheads(pushop, bundler):
921 """push phase information through a bundle2 - binary part"""
922 """push phase information through a bundle2 - binary part"""
922 pushop.stepsdone.add('phases')
923 pushop.stepsdone.add('phases')
923 if pushop.outdatedphases:
924 if pushop.outdatedphases:
924 updates = [[] for p in phases.allphases]
925 updates = [[] for p in phases.allphases]
925 updates[0].extend(h.node() for h in pushop.outdatedphases)
926 updates[0].extend(h.node() for h in pushop.outdatedphases)
926 phasedata = phases.binaryencode(updates)
927 phasedata = phases.binaryencode(updates)
927 bundler.newpart('phase-heads', data=phasedata)
928 bundler.newpart('phase-heads', data=phasedata)
928
929
929 def _pushb2phasespushkey(pushop, bundler):
930 def _pushb2phasespushkey(pushop, bundler):
930 """push phase information through a bundle2 - pushkey part"""
931 """push phase information through a bundle2 - pushkey part"""
931 pushop.stepsdone.add('phases')
932 pushop.stepsdone.add('phases')
932 part2node = []
933 part2node = []
933
934
934 def handlefailure(pushop, exc):
935 def handlefailure(pushop, exc):
935 targetid = int(exc.partid)
936 targetid = int(exc.partid)
936 for partid, node in part2node:
937 for partid, node in part2node:
937 if partid == targetid:
938 if partid == targetid:
938 raise error.Abort(_('updating %s to public failed') % node)
939 raise error.Abort(_('updating %s to public failed') % node)
939
940
940 enc = pushkey.encode
941 enc = pushkey.encode
941 for newremotehead in pushop.outdatedphases:
942 for newremotehead in pushop.outdatedphases:
942 part = bundler.newpart('pushkey')
943 part = bundler.newpart('pushkey')
943 part.addparam('namespace', enc('phases'))
944 part.addparam('namespace', enc('phases'))
944 part.addparam('key', enc(newremotehead.hex()))
945 part.addparam('key', enc(newremotehead.hex()))
945 part.addparam('old', enc('%d' % phases.draft))
946 part.addparam('old', enc('%d' % phases.draft))
946 part.addparam('new', enc('%d' % phases.public))
947 part.addparam('new', enc('%d' % phases.public))
947 part2node.append((part.id, newremotehead))
948 part2node.append((part.id, newremotehead))
948 pushop.pkfailcb[part.id] = handlefailure
949 pushop.pkfailcb[part.id] = handlefailure
949
950
950 def handlereply(op):
951 def handlereply(op):
951 for partid, node in part2node:
952 for partid, node in part2node:
952 partrep = op.records.getreplies(partid)
953 partrep = op.records.getreplies(partid)
953 results = partrep['pushkey']
954 results = partrep['pushkey']
954 assert len(results) <= 1
955 assert len(results) <= 1
955 msg = None
956 msg = None
956 if not results:
957 if not results:
957 msg = _('server ignored update of %s to public!\n') % node
958 msg = _('server ignored update of %s to public!\n') % node
958 elif not int(results[0]['return']):
959 elif not int(results[0]['return']):
959 msg = _('updating %s to public failed!\n') % node
960 msg = _('updating %s to public failed!\n') % node
960 if msg is not None:
961 if msg is not None:
961 pushop.ui.warn(msg)
962 pushop.ui.warn(msg)
962 return handlereply
963 return handlereply
963
964
964 @b2partsgenerator('obsmarkers')
965 @b2partsgenerator('obsmarkers')
965 def _pushb2obsmarkers(pushop, bundler):
966 def _pushb2obsmarkers(pushop, bundler):
966 if 'obsmarkers' in pushop.stepsdone:
967 if 'obsmarkers' in pushop.stepsdone:
967 return
968 return
968 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
969 remoteversions = bundle2.obsmarkersversion(bundler.capabilities)
969 if obsolete.commonversion(remoteversions) is None:
970 if obsolete.commonversion(remoteversions) is None:
970 return
971 return
971 pushop.stepsdone.add('obsmarkers')
972 pushop.stepsdone.add('obsmarkers')
972 if pushop.outobsmarkers:
973 if pushop.outobsmarkers:
973 markers = sorted(pushop.outobsmarkers)
974 markers = sorted(pushop.outobsmarkers)
974 bundle2.buildobsmarkerspart(bundler, markers)
975 bundle2.buildobsmarkerspart(bundler, markers)
975
976
976 @b2partsgenerator('bookmarks')
977 @b2partsgenerator('bookmarks')
977 def _pushb2bookmarks(pushop, bundler):
978 def _pushb2bookmarks(pushop, bundler):
978 """handle bookmark push through bundle2"""
979 """handle bookmark push through bundle2"""
979 if 'bookmarks' in pushop.stepsdone:
980 if 'bookmarks' in pushop.stepsdone:
980 return
981 return
981 b2caps = bundle2.bundle2caps(pushop.remote)
982 b2caps = bundle2.bundle2caps(pushop.remote)
982
983
983 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
984 legacy = pushop.repo.ui.configlist('devel', 'legacy.exchange')
984 legacybooks = 'bookmarks' in legacy
985 legacybooks = 'bookmarks' in legacy
985
986
986 if not legacybooks and 'bookmarks' in b2caps:
987 if not legacybooks and 'bookmarks' in b2caps:
987 return _pushb2bookmarkspart(pushop, bundler)
988 return _pushb2bookmarkspart(pushop, bundler)
988 elif 'pushkey' in b2caps:
989 elif 'pushkey' in b2caps:
989 return _pushb2bookmarkspushkey(pushop, bundler)
990 return _pushb2bookmarkspushkey(pushop, bundler)
990
991
991 def _bmaction(old, new):
992 def _bmaction(old, new):
992 """small utility for bookmark pushing"""
993 """small utility for bookmark pushing"""
993 if not old:
994 if not old:
994 return 'export'
995 return 'export'
995 elif not new:
996 elif not new:
996 return 'delete'
997 return 'delete'
997 return 'update'
998 return 'update'
998
999
999 def _pushb2bookmarkspart(pushop, bundler):
1000 def _pushb2bookmarkspart(pushop, bundler):
1000 pushop.stepsdone.add('bookmarks')
1001 pushop.stepsdone.add('bookmarks')
1001 if not pushop.outbookmarks:
1002 if not pushop.outbookmarks:
1002 return
1003 return
1003
1004
1004 allactions = []
1005 allactions = []
1005 data = []
1006 data = []
1006 for book, old, new in pushop.outbookmarks:
1007 for book, old, new in pushop.outbookmarks:
1007 new = bin(new)
1008 new = bin(new)
1008 data.append((book, new))
1009 data.append((book, new))
1009 allactions.append((book, _bmaction(old, new)))
1010 allactions.append((book, _bmaction(old, new)))
1010 checkdata = bookmod.binaryencode(data)
1011 checkdata = bookmod.binaryencode(data)
1011 bundler.newpart('bookmarks', data=checkdata)
1012 bundler.newpart('bookmarks', data=checkdata)
1012
1013
1013 def handlereply(op):
1014 def handlereply(op):
1014 ui = pushop.ui
1015 ui = pushop.ui
1015 # if success
1016 # if success
1016 for book, action in allactions:
1017 for book, action in allactions:
1017 ui.status(bookmsgmap[action][0] % book)
1018 ui.status(bookmsgmap[action][0] % book)
1018
1019
1019 return handlereply
1020 return handlereply
1020
1021
1021 def _pushb2bookmarkspushkey(pushop, bundler):
1022 def _pushb2bookmarkspushkey(pushop, bundler):
1022 pushop.stepsdone.add('bookmarks')
1023 pushop.stepsdone.add('bookmarks')
1023 part2book = []
1024 part2book = []
1024 enc = pushkey.encode
1025 enc = pushkey.encode
1025
1026
1026 def handlefailure(pushop, exc):
1027 def handlefailure(pushop, exc):
1027 targetid = int(exc.partid)
1028 targetid = int(exc.partid)
1028 for partid, book, action in part2book:
1029 for partid, book, action in part2book:
1029 if partid == targetid:
1030 if partid == targetid:
1030 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 raise error.Abort(bookmsgmap[action][1].rstrip() % book)
1031 # we should not be called for part we did not generated
1032 # we should not be called for part we did not generated
1032 assert False
1033 assert False
1033
1034
1034 for book, old, new in pushop.outbookmarks:
1035 for book, old, new in pushop.outbookmarks:
1035 part = bundler.newpart('pushkey')
1036 part = bundler.newpart('pushkey')
1036 part.addparam('namespace', enc('bookmarks'))
1037 part.addparam('namespace', enc('bookmarks'))
1037 part.addparam('key', enc(book))
1038 part.addparam('key', enc(book))
1038 part.addparam('old', enc(old))
1039 part.addparam('old', enc(old))
1039 part.addparam('new', enc(new))
1040 part.addparam('new', enc(new))
1040 action = 'update'
1041 action = 'update'
1041 if not old:
1042 if not old:
1042 action = 'export'
1043 action = 'export'
1043 elif not new:
1044 elif not new:
1044 action = 'delete'
1045 action = 'delete'
1045 part2book.append((part.id, book, action))
1046 part2book.append((part.id, book, action))
1046 pushop.pkfailcb[part.id] = handlefailure
1047 pushop.pkfailcb[part.id] = handlefailure
1047
1048
1048 def handlereply(op):
1049 def handlereply(op):
1049 ui = pushop.ui
1050 ui = pushop.ui
1050 for partid, book, action in part2book:
1051 for partid, book, action in part2book:
1051 partrep = op.records.getreplies(partid)
1052 partrep = op.records.getreplies(partid)
1052 results = partrep['pushkey']
1053 results = partrep['pushkey']
1053 assert len(results) <= 1
1054 assert len(results) <= 1
1054 if not results:
1055 if not results:
1055 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1056 pushop.ui.warn(_('server ignored bookmark %s update\n') % book)
1056 else:
1057 else:
1057 ret = int(results[0]['return'])
1058 ret = int(results[0]['return'])
1058 if ret:
1059 if ret:
1059 ui.status(bookmsgmap[action][0] % book)
1060 ui.status(bookmsgmap[action][0] % book)
1060 else:
1061 else:
1061 ui.warn(bookmsgmap[action][1] % book)
1062 ui.warn(bookmsgmap[action][1] % book)
1062 if pushop.bkresult is not None:
1063 if pushop.bkresult is not None:
1063 pushop.bkresult = 1
1064 pushop.bkresult = 1
1064 return handlereply
1065 return handlereply
1065
1066
1066 @b2partsgenerator('pushvars', idx=0)
1067 @b2partsgenerator('pushvars', idx=0)
1067 def _getbundlesendvars(pushop, bundler):
1068 def _getbundlesendvars(pushop, bundler):
1068 '''send shellvars via bundle2'''
1069 '''send shellvars via bundle2'''
1069 pushvars = pushop.pushvars
1070 pushvars = pushop.pushvars
1070 if pushvars:
1071 if pushvars:
1071 shellvars = {}
1072 shellvars = {}
1072 for raw in pushvars:
1073 for raw in pushvars:
1073 if '=' not in raw:
1074 if '=' not in raw:
1074 msg = ("unable to parse variable '%s', should follow "
1075 msg = ("unable to parse variable '%s', should follow "
1075 "'KEY=VALUE' or 'KEY=' format")
1076 "'KEY=VALUE' or 'KEY=' format")
1076 raise error.Abort(msg % raw)
1077 raise error.Abort(msg % raw)
1077 k, v = raw.split('=', 1)
1078 k, v = raw.split('=', 1)
1078 shellvars[k] = v
1079 shellvars[k] = v
1079
1080
1080 part = bundler.newpart('pushvars')
1081 part = bundler.newpart('pushvars')
1081
1082
1082 for key, value in shellvars.iteritems():
1083 for key, value in shellvars.iteritems():
1083 part.addparam(key, value, mandatory=False)
1084 part.addparam(key, value, mandatory=False)
1084
1085
1085 def _pushbundle2(pushop):
1086 def _pushbundle2(pushop):
1086 """push data to the remote using bundle2
1087 """push data to the remote using bundle2
1087
1088
1088 The only currently supported type of data is changegroup but this will
1089 The only currently supported type of data is changegroup but this will
1089 evolve in the future."""
1090 evolve in the future."""
1090 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1091 bundler = bundle2.bundle20(pushop.ui, bundle2.bundle2caps(pushop.remote))
1091 pushback = (pushop.trmanager
1092 pushback = (pushop.trmanager
1092 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1093 and pushop.ui.configbool('experimental', 'bundle2.pushback'))
1093
1094
1094 # create reply capability
1095 # create reply capability
1095 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1096 capsblob = bundle2.encodecaps(bundle2.getrepocaps(pushop.repo,
1096 allowpushback=pushback,
1097 allowpushback=pushback,
1097 role='client'))
1098 role='client'))
1098 bundler.newpart('replycaps', data=capsblob)
1099 bundler.newpart('replycaps', data=capsblob)
1099 replyhandlers = []
1100 replyhandlers = []
1100 for partgenname in b2partsgenorder:
1101 for partgenname in b2partsgenorder:
1101 partgen = b2partsgenmapping[partgenname]
1102 partgen = b2partsgenmapping[partgenname]
1102 ret = partgen(pushop, bundler)
1103 ret = partgen(pushop, bundler)
1103 if callable(ret):
1104 if callable(ret):
1104 replyhandlers.append(ret)
1105 replyhandlers.append(ret)
1105 # do not push if nothing to push
1106 # do not push if nothing to push
1106 if bundler.nbparts <= 1:
1107 if bundler.nbparts <= 1:
1107 return
1108 return
1108 stream = util.chunkbuffer(bundler.getchunks())
1109 stream = util.chunkbuffer(bundler.getchunks())
1109 try:
1110 try:
1110 try:
1111 try:
1111 with pushop.remote.commandexecutor() as e:
1112 with pushop.remote.commandexecutor() as e:
1112 reply = e.callcommand('unbundle', {
1113 reply = e.callcommand('unbundle', {
1113 'bundle': stream,
1114 'bundle': stream,
1114 'heads': ['force'],
1115 'heads': ['force'],
1115 'url': pushop.remote.url(),
1116 'url': pushop.remote.url(),
1116 }).result()
1117 }).result()
1117 except error.BundleValueError as exc:
1118 except error.BundleValueError as exc:
1118 raise error.Abort(_('missing support for %s') % exc)
1119 raise error.Abort(_('missing support for %s') % exc)
1119 try:
1120 try:
1120 trgetter = None
1121 trgetter = None
1121 if pushback:
1122 if pushback:
1122 trgetter = pushop.trmanager.transaction
1123 trgetter = pushop.trmanager.transaction
1123 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1124 op = bundle2.processbundle(pushop.repo, reply, trgetter)
1124 except error.BundleValueError as exc:
1125 except error.BundleValueError as exc:
1125 raise error.Abort(_('missing support for %s') % exc)
1126 raise error.Abort(_('missing support for %s') % exc)
1126 except bundle2.AbortFromPart as exc:
1127 except bundle2.AbortFromPart as exc:
1127 pushop.ui.status(_('remote: %s\n') % exc)
1128 pushop.ui.status(_('remote: %s\n') % exc)
1128 if exc.hint is not None:
1129 if exc.hint is not None:
1129 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1130 pushop.ui.status(_('remote: %s\n') % ('(%s)' % exc.hint))
1130 raise error.Abort(_('push failed on remote'))
1131 raise error.Abort(_('push failed on remote'))
1131 except error.PushkeyFailed as exc:
1132 except error.PushkeyFailed as exc:
1132 partid = int(exc.partid)
1133 partid = int(exc.partid)
1133 if partid not in pushop.pkfailcb:
1134 if partid not in pushop.pkfailcb:
1134 raise
1135 raise
1135 pushop.pkfailcb[partid](pushop, exc)
1136 pushop.pkfailcb[partid](pushop, exc)
1136 for rephand in replyhandlers:
1137 for rephand in replyhandlers:
1137 rephand(op)
1138 rephand(op)
1138
1139
1139 def _pushchangeset(pushop):
1140 def _pushchangeset(pushop):
1140 """Make the actual push of changeset bundle to remote repo"""
1141 """Make the actual push of changeset bundle to remote repo"""
1141 if 'changesets' in pushop.stepsdone:
1142 if 'changesets' in pushop.stepsdone:
1142 return
1143 return
1143 pushop.stepsdone.add('changesets')
1144 pushop.stepsdone.add('changesets')
1144 if not _pushcheckoutgoing(pushop):
1145 if not _pushcheckoutgoing(pushop):
1145 return
1146 return
1146
1147
1147 # Should have verified this in push().
1148 # Should have verified this in push().
1148 assert pushop.remote.capable('unbundle')
1149 assert pushop.remote.capable('unbundle')
1149
1150
1150 pushop.repo.prepushoutgoinghooks(pushop)
1151 pushop.repo.prepushoutgoinghooks(pushop)
1151 outgoing = pushop.outgoing
1152 outgoing = pushop.outgoing
1152 # TODO: get bundlecaps from remote
1153 # TODO: get bundlecaps from remote
1153 bundlecaps = None
1154 bundlecaps = None
1154 # create a changegroup from local
1155 # create a changegroup from local
1155 if pushop.revs is None and not (outgoing.excluded
1156 if pushop.revs is None and not (outgoing.excluded
1156 or pushop.repo.changelog.filteredrevs):
1157 or pushop.repo.changelog.filteredrevs):
1157 # push everything,
1158 # push everything,
1158 # use the fast path, no race possible on push
1159 # use the fast path, no race possible on push
1159 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1160 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01', 'push',
1160 fastpath=True, bundlecaps=bundlecaps)
1161 fastpath=True, bundlecaps=bundlecaps)
1161 else:
1162 else:
1162 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1163 cg = changegroup.makechangegroup(pushop.repo, outgoing, '01',
1163 'push', bundlecaps=bundlecaps)
1164 'push', bundlecaps=bundlecaps)
1164
1165
1165 # apply changegroup to remote
1166 # apply changegroup to remote
1166 # local repo finds heads on server, finds out what
1167 # local repo finds heads on server, finds out what
1167 # revs it must push. once revs transferred, if server
1168 # revs it must push. once revs transferred, if server
1168 # finds it has different heads (someone else won
1169 # finds it has different heads (someone else won
1169 # commit/push race), server aborts.
1170 # commit/push race), server aborts.
1170 if pushop.force:
1171 if pushop.force:
1171 remoteheads = ['force']
1172 remoteheads = ['force']
1172 else:
1173 else:
1173 remoteheads = pushop.remoteheads
1174 remoteheads = pushop.remoteheads
1174 # ssh: return remote's addchangegroup()
1175 # ssh: return remote's addchangegroup()
1175 # http: return remote's addchangegroup() or 0 for error
1176 # http: return remote's addchangegroup() or 0 for error
1176 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1177 pushop.cgresult = pushop.remote.unbundle(cg, remoteheads,
1177 pushop.repo.url())
1178 pushop.repo.url())
1178
1179
1179 def _pushsyncphase(pushop):
1180 def _pushsyncphase(pushop):
1180 """synchronise phase information locally and remotely"""
1181 """synchronise phase information locally and remotely"""
1181 cheads = pushop.commonheads
1182 cheads = pushop.commonheads
1182 # even when we don't push, exchanging phase data is useful
1183 # even when we don't push, exchanging phase data is useful
1183 remotephases = listkeys(pushop.remote, 'phases')
1184 remotephases = listkeys(pushop.remote, 'phases')
1184 if (pushop.ui.configbool('ui', '_usedassubrepo')
1185 if (pushop.ui.configbool('ui', '_usedassubrepo')
1185 and remotephases # server supports phases
1186 and remotephases # server supports phases
1186 and pushop.cgresult is None # nothing was pushed
1187 and pushop.cgresult is None # nothing was pushed
1187 and remotephases.get('publishing', False)):
1188 and remotephases.get('publishing', False)):
1188 # When:
1189 # When:
1189 # - this is a subrepo push
1190 # - this is a subrepo push
1190 # - and remote support phase
1191 # - and remote support phase
1191 # - and no changeset was pushed
1192 # - and no changeset was pushed
1192 # - and remote is publishing
1193 # - and remote is publishing
1193 # We may be in issue 3871 case!
1194 # We may be in issue 3871 case!
1194 # We drop the possible phase synchronisation done by
1195 # We drop the possible phase synchronisation done by
1195 # courtesy to publish changesets possibly locally draft
1196 # courtesy to publish changesets possibly locally draft
1196 # on the remote.
1197 # on the remote.
1197 remotephases = {'publishing': 'True'}
1198 remotephases = {'publishing': 'True'}
1198 if not remotephases: # old server or public only reply from non-publishing
1199 if not remotephases: # old server or public only reply from non-publishing
1199 _localphasemove(pushop, cheads)
1200 _localphasemove(pushop, cheads)
1200 # don't push any phase data as there is nothing to push
1201 # don't push any phase data as there is nothing to push
1201 else:
1202 else:
1202 ana = phases.analyzeremotephases(pushop.repo, cheads,
1203 ana = phases.analyzeremotephases(pushop.repo, cheads,
1203 remotephases)
1204 remotephases)
1204 pheads, droots = ana
1205 pheads, droots = ana
1205 ### Apply remote phase on local
1206 ### Apply remote phase on local
1206 if remotephases.get('publishing', False):
1207 if remotephases.get('publishing', False):
1207 _localphasemove(pushop, cheads)
1208 _localphasemove(pushop, cheads)
1208 else: # publish = False
1209 else: # publish = False
1209 _localphasemove(pushop, pheads)
1210 _localphasemove(pushop, pheads)
1210 _localphasemove(pushop, cheads, phases.draft)
1211 _localphasemove(pushop, cheads, phases.draft)
1211 ### Apply local phase on remote
1212 ### Apply local phase on remote
1212
1213
1213 if pushop.cgresult:
1214 if pushop.cgresult:
1214 if 'phases' in pushop.stepsdone:
1215 if 'phases' in pushop.stepsdone:
1215 # phases already pushed though bundle2
1216 # phases already pushed though bundle2
1216 return
1217 return
1217 outdated = pushop.outdatedphases
1218 outdated = pushop.outdatedphases
1218 else:
1219 else:
1219 outdated = pushop.fallbackoutdatedphases
1220 outdated = pushop.fallbackoutdatedphases
1220
1221
1221 pushop.stepsdone.add('phases')
1222 pushop.stepsdone.add('phases')
1222
1223
1223 # filter heads already turned public by the push
1224 # filter heads already turned public by the push
1224 outdated = [c for c in outdated if c.node() not in pheads]
1225 outdated = [c for c in outdated if c.node() not in pheads]
1225 # fallback to independent pushkey command
1226 # fallback to independent pushkey command
1226 for newremotehead in outdated:
1227 for newremotehead in outdated:
1227 with pushop.remote.commandexecutor() as e:
1228 with pushop.remote.commandexecutor() as e:
1228 r = e.callcommand('pushkey', {
1229 r = e.callcommand('pushkey', {
1229 'namespace': 'phases',
1230 'namespace': 'phases',
1230 'key': newremotehead.hex(),
1231 'key': newremotehead.hex(),
1231 'old': '%d' % phases.draft,
1232 'old': '%d' % phases.draft,
1232 'new': '%d' % phases.public
1233 'new': '%d' % phases.public
1233 }).result()
1234 }).result()
1234
1235
1235 if not r:
1236 if not r:
1236 pushop.ui.warn(_('updating %s to public failed!\n')
1237 pushop.ui.warn(_('updating %s to public failed!\n')
1237 % newremotehead)
1238 % newremotehead)
1238
1239
1239 def _localphasemove(pushop, nodes, phase=phases.public):
1240 def _localphasemove(pushop, nodes, phase=phases.public):
1240 """move <nodes> to <phase> in the local source repo"""
1241 """move <nodes> to <phase> in the local source repo"""
1241 if pushop.trmanager:
1242 if pushop.trmanager:
1242 phases.advanceboundary(pushop.repo,
1243 phases.advanceboundary(pushop.repo,
1243 pushop.trmanager.transaction(),
1244 pushop.trmanager.transaction(),
1244 phase,
1245 phase,
1245 nodes)
1246 nodes)
1246 else:
1247 else:
1247 # repo is not locked, do not change any phases!
1248 # repo is not locked, do not change any phases!
1248 # Informs the user that phases should have been moved when
1249 # Informs the user that phases should have been moved when
1249 # applicable.
1250 # applicable.
1250 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1251 actualmoves = [n for n in nodes if phase < pushop.repo[n].phase()]
1251 phasestr = phases.phasenames[phase]
1252 phasestr = phases.phasenames[phase]
1252 if actualmoves:
1253 if actualmoves:
1253 pushop.ui.status(_('cannot lock source repo, skipping '
1254 pushop.ui.status(_('cannot lock source repo, skipping '
1254 'local %s phase update\n') % phasestr)
1255 'local %s phase update\n') % phasestr)
1255
1256
1256 def _pushobsolete(pushop):
1257 def _pushobsolete(pushop):
1257 """utility function to push obsolete markers to a remote"""
1258 """utility function to push obsolete markers to a remote"""
1258 if 'obsmarkers' in pushop.stepsdone:
1259 if 'obsmarkers' in pushop.stepsdone:
1259 return
1260 return
1260 repo = pushop.repo
1261 repo = pushop.repo
1261 remote = pushop.remote
1262 remote = pushop.remote
1262 pushop.stepsdone.add('obsmarkers')
1263 pushop.stepsdone.add('obsmarkers')
1263 if pushop.outobsmarkers:
1264 if pushop.outobsmarkers:
1264 pushop.ui.debug('try to push obsolete markers to remote\n')
1265 pushop.ui.debug('try to push obsolete markers to remote\n')
1265 rslts = []
1266 rslts = []
1266 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1267 remotedata = obsolete._pushkeyescape(sorted(pushop.outobsmarkers))
1267 for key in sorted(remotedata, reverse=True):
1268 for key in sorted(remotedata, reverse=True):
1268 # reverse sort to ensure we end with dump0
1269 # reverse sort to ensure we end with dump0
1269 data = remotedata[key]
1270 data = remotedata[key]
1270 rslts.append(remote.pushkey('obsolete', key, '', data))
1271 rslts.append(remote.pushkey('obsolete', key, '', data))
1271 if [r for r in rslts if not r]:
1272 if [r for r in rslts if not r]:
1272 msg = _('failed to push some obsolete markers!\n')
1273 msg = _('failed to push some obsolete markers!\n')
1273 repo.ui.warn(msg)
1274 repo.ui.warn(msg)
1274
1275
1275 def _pushbookmark(pushop):
1276 def _pushbookmark(pushop):
1276 """Update bookmark position on remote"""
1277 """Update bookmark position on remote"""
1277 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1278 if pushop.cgresult == 0 or 'bookmarks' in pushop.stepsdone:
1278 return
1279 return
1279 pushop.stepsdone.add('bookmarks')
1280 pushop.stepsdone.add('bookmarks')
1280 ui = pushop.ui
1281 ui = pushop.ui
1281 remote = pushop.remote
1282 remote = pushop.remote
1282
1283
1283 for b, old, new in pushop.outbookmarks:
1284 for b, old, new in pushop.outbookmarks:
1284 action = 'update'
1285 action = 'update'
1285 if not old:
1286 if not old:
1286 action = 'export'
1287 action = 'export'
1287 elif not new:
1288 elif not new:
1288 action = 'delete'
1289 action = 'delete'
1289
1290
1290 with remote.commandexecutor() as e:
1291 with remote.commandexecutor() as e:
1291 r = e.callcommand('pushkey', {
1292 r = e.callcommand('pushkey', {
1292 'namespace': 'bookmarks',
1293 'namespace': 'bookmarks',
1293 'key': b,
1294 'key': b,
1294 'old': old,
1295 'old': old,
1295 'new': new,
1296 'new': new,
1296 }).result()
1297 }).result()
1297
1298
1298 if r:
1299 if r:
1299 ui.status(bookmsgmap[action][0] % b)
1300 ui.status(bookmsgmap[action][0] % b)
1300 else:
1301 else:
1301 ui.warn(bookmsgmap[action][1] % b)
1302 ui.warn(bookmsgmap[action][1] % b)
1302 # discovery can have set the value form invalid entry
1303 # discovery can have set the value form invalid entry
1303 if pushop.bkresult is not None:
1304 if pushop.bkresult is not None:
1304 pushop.bkresult = 1
1305 pushop.bkresult = 1
1305
1306
1306 class pulloperation(object):
1307 class pulloperation(object):
1307 """A object that represent a single pull operation
1308 """A object that represent a single pull operation
1308
1309
1309 It purpose is to carry pull related state and very common operation.
1310 It purpose is to carry pull related state and very common operation.
1310
1311
1311 A new should be created at the beginning of each pull and discarded
1312 A new should be created at the beginning of each pull and discarded
1312 afterward.
1313 afterward.
1313 """
1314 """
1314
1315
1315 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1316 def __init__(self, repo, remote, heads=None, force=False, bookmarks=(),
1316 remotebookmarks=None, streamclonerequested=None):
1317 remotebookmarks=None, streamclonerequested=None):
1317 # repo we pull into
1318 # repo we pull into
1318 self.repo = repo
1319 self.repo = repo
1319 # repo we pull from
1320 # repo we pull from
1320 self.remote = remote
1321 self.remote = remote
1321 # revision we try to pull (None is "all")
1322 # revision we try to pull (None is "all")
1322 self.heads = heads
1323 self.heads = heads
1323 # bookmark pulled explicitly
1324 # bookmark pulled explicitly
1324 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1325 self.explicitbookmarks = [repo._bookmarks.expandname(bookmark)
1325 for bookmark in bookmarks]
1326 for bookmark in bookmarks]
1326 # do we force pull?
1327 # do we force pull?
1327 self.force = force
1328 self.force = force
1328 # whether a streaming clone was requested
1329 # whether a streaming clone was requested
1329 self.streamclonerequested = streamclonerequested
1330 self.streamclonerequested = streamclonerequested
1330 # transaction manager
1331 # transaction manager
1331 self.trmanager = None
1332 self.trmanager = None
1332 # set of common changeset between local and remote before pull
1333 # set of common changeset between local and remote before pull
1333 self.common = None
1334 self.common = None
1334 # set of pulled head
1335 # set of pulled head
1335 self.rheads = None
1336 self.rheads = None
1336 # list of missing changeset to fetch remotely
1337 # list of missing changeset to fetch remotely
1337 self.fetch = None
1338 self.fetch = None
1338 # remote bookmarks data
1339 # remote bookmarks data
1339 self.remotebookmarks = remotebookmarks
1340 self.remotebookmarks = remotebookmarks
1340 # result of changegroup pulling (used as return code by pull)
1341 # result of changegroup pulling (used as return code by pull)
1341 self.cgresult = None
1342 self.cgresult = None
1342 # list of step already done
1343 # list of step already done
1343 self.stepsdone = set()
1344 self.stepsdone = set()
1344 # Whether we attempted a clone from pre-generated bundles.
1345 # Whether we attempted a clone from pre-generated bundles.
1345 self.clonebundleattempted = False
1346 self.clonebundleattempted = False
1346
1347
1347 @util.propertycache
1348 @util.propertycache
1348 def pulledsubset(self):
1349 def pulledsubset(self):
1349 """heads of the set of changeset target by the pull"""
1350 """heads of the set of changeset target by the pull"""
1350 # compute target subset
1351 # compute target subset
1351 if self.heads is None:
1352 if self.heads is None:
1352 # We pulled every thing possible
1353 # We pulled every thing possible
1353 # sync on everything common
1354 # sync on everything common
1354 c = set(self.common)
1355 c = set(self.common)
1355 ret = list(self.common)
1356 ret = list(self.common)
1356 for n in self.rheads:
1357 for n in self.rheads:
1357 if n not in c:
1358 if n not in c:
1358 ret.append(n)
1359 ret.append(n)
1359 return ret
1360 return ret
1360 else:
1361 else:
1361 # We pulled a specific subset
1362 # We pulled a specific subset
1362 # sync on this subset
1363 # sync on this subset
1363 return self.heads
1364 return self.heads
1364
1365
1365 @util.propertycache
1366 @util.propertycache
1366 def canusebundle2(self):
1367 def canusebundle2(self):
1367 return not _forcebundle1(self)
1368 return not _forcebundle1(self)
1368
1369
1369 @util.propertycache
1370 @util.propertycache
1370 def remotebundle2caps(self):
1371 def remotebundle2caps(self):
1371 return bundle2.bundle2caps(self.remote)
1372 return bundle2.bundle2caps(self.remote)
1372
1373
1373 def gettransaction(self):
1374 def gettransaction(self):
1374 # deprecated; talk to trmanager directly
1375 # deprecated; talk to trmanager directly
1375 return self.trmanager.transaction()
1376 return self.trmanager.transaction()
1376
1377
1377 class transactionmanager(util.transactional):
1378 class transactionmanager(util.transactional):
1378 """An object to manage the life cycle of a transaction
1379 """An object to manage the life cycle of a transaction
1379
1380
1380 It creates the transaction on demand and calls the appropriate hooks when
1381 It creates the transaction on demand and calls the appropriate hooks when
1381 closing the transaction."""
1382 closing the transaction."""
1382 def __init__(self, repo, source, url):
1383 def __init__(self, repo, source, url):
1383 self.repo = repo
1384 self.repo = repo
1384 self.source = source
1385 self.source = source
1385 self.url = url
1386 self.url = url
1386 self._tr = None
1387 self._tr = None
1387
1388
1388 def transaction(self):
1389 def transaction(self):
1389 """Return an open transaction object, constructing if necessary"""
1390 """Return an open transaction object, constructing if necessary"""
1390 if not self._tr:
1391 if not self._tr:
1391 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1392 trname = '%s\n%s' % (self.source, util.hidepassword(self.url))
1392 self._tr = self.repo.transaction(trname)
1393 self._tr = self.repo.transaction(trname)
1393 self._tr.hookargs['source'] = self.source
1394 self._tr.hookargs['source'] = self.source
1394 self._tr.hookargs['url'] = self.url
1395 self._tr.hookargs['url'] = self.url
1395 return self._tr
1396 return self._tr
1396
1397
1397 def close(self):
1398 def close(self):
1398 """close transaction if created"""
1399 """close transaction if created"""
1399 if self._tr is not None:
1400 if self._tr is not None:
1400 self._tr.close()
1401 self._tr.close()
1401
1402
1402 def release(self):
1403 def release(self):
1403 """release transaction if created"""
1404 """release transaction if created"""
1404 if self._tr is not None:
1405 if self._tr is not None:
1405 self._tr.release()
1406 self._tr.release()
1406
1407
1407 def listkeys(remote, namespace):
1408 def listkeys(remote, namespace):
1408 with remote.commandexecutor() as e:
1409 with remote.commandexecutor() as e:
1409 return e.callcommand('listkeys', {'namespace': namespace}).result()
1410 return e.callcommand('listkeys', {'namespace': namespace}).result()
1410
1411
1411 def _fullpullbundle2(repo, pullop):
1412 def _fullpullbundle2(repo, pullop):
1412 # The server may send a partial reply, i.e. when inlining
1413 # The server may send a partial reply, i.e. when inlining
1413 # pre-computed bundles. In that case, update the common
1414 # pre-computed bundles. In that case, update the common
1414 # set based on the results and pull another bundle.
1415 # set based on the results and pull another bundle.
1415 #
1416 #
1416 # There are two indicators that the process is finished:
1417 # There are two indicators that the process is finished:
1417 # - no changeset has been added, or
1418 # - no changeset has been added, or
1418 # - all remote heads are known locally.
1419 # - all remote heads are known locally.
1419 # The head check must use the unfiltered view as obsoletion
1420 # The head check must use the unfiltered view as obsoletion
1420 # markers can hide heads.
1421 # markers can hide heads.
1421 unfi = repo.unfiltered()
1422 unfi = repo.unfiltered()
1422 unficl = unfi.changelog
1423 unficl = unfi.changelog
1423 def headsofdiff(h1, h2):
1424 def headsofdiff(h1, h2):
1424 """Returns heads(h1 % h2)"""
1425 """Returns heads(h1 % h2)"""
1425 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1426 res = unfi.set('heads(%ln %% %ln)', h1, h2)
1426 return set(ctx.node() for ctx in res)
1427 return set(ctx.node() for ctx in res)
1427 def headsofunion(h1, h2):
1428 def headsofunion(h1, h2):
1428 """Returns heads((h1 + h2) - null)"""
1429 """Returns heads((h1 + h2) - null)"""
1429 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1430 res = unfi.set('heads((%ln + %ln - null))', h1, h2)
1430 return set(ctx.node() for ctx in res)
1431 return set(ctx.node() for ctx in res)
1431 while True:
1432 while True:
1432 old_heads = unficl.heads()
1433 old_heads = unficl.heads()
1433 clstart = len(unficl)
1434 clstart = len(unficl)
1434 _pullbundle2(pullop)
1435 _pullbundle2(pullop)
1435 if changegroup.NARROW_REQUIREMENT in repo.requirements:
1436 if repository.NARROW_REQUIREMENT in repo.requirements:
1436 # XXX narrow clones filter the heads on the server side during
1437 # XXX narrow clones filter the heads on the server side during
1437 # XXX getbundle and result in partial replies as well.
1438 # XXX getbundle and result in partial replies as well.
1438 # XXX Disable pull bundles in this case as band aid to avoid
1439 # XXX Disable pull bundles in this case as band aid to avoid
1439 # XXX extra round trips.
1440 # XXX extra round trips.
1440 break
1441 break
1441 if clstart == len(unficl):
1442 if clstart == len(unficl):
1442 break
1443 break
1443 if all(unficl.hasnode(n) for n in pullop.rheads):
1444 if all(unficl.hasnode(n) for n in pullop.rheads):
1444 break
1445 break
1445 new_heads = headsofdiff(unficl.heads(), old_heads)
1446 new_heads = headsofdiff(unficl.heads(), old_heads)
1446 pullop.common = headsofunion(new_heads, pullop.common)
1447 pullop.common = headsofunion(new_heads, pullop.common)
1447 pullop.rheads = set(pullop.rheads) - pullop.common
1448 pullop.rheads = set(pullop.rheads) - pullop.common
1448
1449
1449 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1450 def pull(repo, remote, heads=None, force=False, bookmarks=(), opargs=None,
1450 streamclonerequested=None):
1451 streamclonerequested=None):
1451 """Fetch repository data from a remote.
1452 """Fetch repository data from a remote.
1452
1453
1453 This is the main function used to retrieve data from a remote repository.
1454 This is the main function used to retrieve data from a remote repository.
1454
1455
1455 ``repo`` is the local repository to clone into.
1456 ``repo`` is the local repository to clone into.
1456 ``remote`` is a peer instance.
1457 ``remote`` is a peer instance.
1457 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1458 ``heads`` is an iterable of revisions we want to pull. ``None`` (the
1458 default) means to pull everything from the remote.
1459 default) means to pull everything from the remote.
1459 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1460 ``bookmarks`` is an iterable of bookmarks requesting to be pulled. By
1460 default, all remote bookmarks are pulled.
1461 default, all remote bookmarks are pulled.
1461 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1462 ``opargs`` are additional keyword arguments to pass to ``pulloperation``
1462 initialization.
1463 initialization.
1463 ``streamclonerequested`` is a boolean indicating whether a "streaming
1464 ``streamclonerequested`` is a boolean indicating whether a "streaming
1464 clone" is requested. A "streaming clone" is essentially a raw file copy
1465 clone" is requested. A "streaming clone" is essentially a raw file copy
1465 of revlogs from the server. This only works when the local repository is
1466 of revlogs from the server. This only works when the local repository is
1466 empty. The default value of ``None`` means to respect the server
1467 empty. The default value of ``None`` means to respect the server
1467 configuration for preferring stream clones.
1468 configuration for preferring stream clones.
1468
1469
1469 Returns the ``pulloperation`` created for this pull.
1470 Returns the ``pulloperation`` created for this pull.
1470 """
1471 """
1471 if opargs is None:
1472 if opargs is None:
1472 opargs = {}
1473 opargs = {}
1473 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1474 pullop = pulloperation(repo, remote, heads, force, bookmarks=bookmarks,
1474 streamclonerequested=streamclonerequested,
1475 streamclonerequested=streamclonerequested,
1475 **pycompat.strkwargs(opargs))
1476 **pycompat.strkwargs(opargs))
1476
1477
1477 peerlocal = pullop.remote.local()
1478 peerlocal = pullop.remote.local()
1478 if peerlocal:
1479 if peerlocal:
1479 missing = set(peerlocal.requirements) - pullop.repo.supported
1480 missing = set(peerlocal.requirements) - pullop.repo.supported
1480 if missing:
1481 if missing:
1481 msg = _("required features are not"
1482 msg = _("required features are not"
1482 " supported in the destination:"
1483 " supported in the destination:"
1483 " %s") % (', '.join(sorted(missing)))
1484 " %s") % (', '.join(sorted(missing)))
1484 raise error.Abort(msg)
1485 raise error.Abort(msg)
1485
1486
1486 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1487 pullop.trmanager = transactionmanager(repo, 'pull', remote.url())
1487 with repo.wlock(), repo.lock(), pullop.trmanager:
1488 with repo.wlock(), repo.lock(), pullop.trmanager:
1488 # This should ideally be in _pullbundle2(). However, it needs to run
1489 # This should ideally be in _pullbundle2(). However, it needs to run
1489 # before discovery to avoid extra work.
1490 # before discovery to avoid extra work.
1490 _maybeapplyclonebundle(pullop)
1491 _maybeapplyclonebundle(pullop)
1491 streamclone.maybeperformlegacystreamclone(pullop)
1492 streamclone.maybeperformlegacystreamclone(pullop)
1492 _pulldiscovery(pullop)
1493 _pulldiscovery(pullop)
1493 if pullop.canusebundle2:
1494 if pullop.canusebundle2:
1494 _fullpullbundle2(repo, pullop)
1495 _fullpullbundle2(repo, pullop)
1495 _pullchangeset(pullop)
1496 _pullchangeset(pullop)
1496 _pullphase(pullop)
1497 _pullphase(pullop)
1497 _pullbookmarks(pullop)
1498 _pullbookmarks(pullop)
1498 _pullobsolete(pullop)
1499 _pullobsolete(pullop)
1499
1500
1500 # storing remotenames
1501 # storing remotenames
1501 if repo.ui.configbool('experimental', 'remotenames'):
1502 if repo.ui.configbool('experimental', 'remotenames'):
1502 logexchange.pullremotenames(repo, remote)
1503 logexchange.pullremotenames(repo, remote)
1503
1504
1504 return pullop
1505 return pullop
1505
1506
1506 # list of steps to perform discovery before pull
1507 # list of steps to perform discovery before pull
1507 pulldiscoveryorder = []
1508 pulldiscoveryorder = []
1508
1509
1509 # Mapping between step name and function
1510 # Mapping between step name and function
1510 #
1511 #
1511 # This exists to help extensions wrap steps if necessary
1512 # This exists to help extensions wrap steps if necessary
1512 pulldiscoverymapping = {}
1513 pulldiscoverymapping = {}
1513
1514
1514 def pulldiscovery(stepname):
1515 def pulldiscovery(stepname):
1515 """decorator for function performing discovery before pull
1516 """decorator for function performing discovery before pull
1516
1517
1517 The function is added to the step -> function mapping and appended to the
1518 The function is added to the step -> function mapping and appended to the
1518 list of steps. Beware that decorated function will be added in order (this
1519 list of steps. Beware that decorated function will be added in order (this
1519 may matter).
1520 may matter).
1520
1521
1521 You can only use this decorator for a new step, if you want to wrap a step
1522 You can only use this decorator for a new step, if you want to wrap a step
1522 from an extension, change the pulldiscovery dictionary directly."""
1523 from an extension, change the pulldiscovery dictionary directly."""
1523 def dec(func):
1524 def dec(func):
1524 assert stepname not in pulldiscoverymapping
1525 assert stepname not in pulldiscoverymapping
1525 pulldiscoverymapping[stepname] = func
1526 pulldiscoverymapping[stepname] = func
1526 pulldiscoveryorder.append(stepname)
1527 pulldiscoveryorder.append(stepname)
1527 return func
1528 return func
1528 return dec
1529 return dec
1529
1530
1530 def _pulldiscovery(pullop):
1531 def _pulldiscovery(pullop):
1531 """Run all discovery steps"""
1532 """Run all discovery steps"""
1532 for stepname in pulldiscoveryorder:
1533 for stepname in pulldiscoveryorder:
1533 step = pulldiscoverymapping[stepname]
1534 step = pulldiscoverymapping[stepname]
1534 step(pullop)
1535 step(pullop)
1535
1536
1536 @pulldiscovery('b1:bookmarks')
1537 @pulldiscovery('b1:bookmarks')
1537 def _pullbookmarkbundle1(pullop):
1538 def _pullbookmarkbundle1(pullop):
1538 """fetch bookmark data in bundle1 case
1539 """fetch bookmark data in bundle1 case
1539
1540
1540 If not using bundle2, we have to fetch bookmarks before changeset
1541 If not using bundle2, we have to fetch bookmarks before changeset
1541 discovery to reduce the chance and impact of race conditions."""
1542 discovery to reduce the chance and impact of race conditions."""
1542 if pullop.remotebookmarks is not None:
1543 if pullop.remotebookmarks is not None:
1543 return
1544 return
1544 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1545 if pullop.canusebundle2 and 'listkeys' in pullop.remotebundle2caps:
1545 # all known bundle2 servers now support listkeys, but lets be nice with
1546 # all known bundle2 servers now support listkeys, but lets be nice with
1546 # new implementation.
1547 # new implementation.
1547 return
1548 return
1548 books = listkeys(pullop.remote, 'bookmarks')
1549 books = listkeys(pullop.remote, 'bookmarks')
1549 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1550 pullop.remotebookmarks = bookmod.unhexlifybookmarks(books)
1550
1551
1551
1552
1552 @pulldiscovery('changegroup')
1553 @pulldiscovery('changegroup')
1553 def _pulldiscoverychangegroup(pullop):
1554 def _pulldiscoverychangegroup(pullop):
1554 """discovery phase for the pull
1555 """discovery phase for the pull
1555
1556
1556 Current handle changeset discovery only, will change handle all discovery
1557 Current handle changeset discovery only, will change handle all discovery
1557 at some point."""
1558 at some point."""
1558 tmp = discovery.findcommonincoming(pullop.repo,
1559 tmp = discovery.findcommonincoming(pullop.repo,
1559 pullop.remote,
1560 pullop.remote,
1560 heads=pullop.heads,
1561 heads=pullop.heads,
1561 force=pullop.force)
1562 force=pullop.force)
1562 common, fetch, rheads = tmp
1563 common, fetch, rheads = tmp
1563 nm = pullop.repo.unfiltered().changelog.nodemap
1564 nm = pullop.repo.unfiltered().changelog.nodemap
1564 if fetch and rheads:
1565 if fetch and rheads:
1565 # If a remote heads is filtered locally, put in back in common.
1566 # If a remote heads is filtered locally, put in back in common.
1566 #
1567 #
1567 # This is a hackish solution to catch most of "common but locally
1568 # This is a hackish solution to catch most of "common but locally
1568 # hidden situation". We do not performs discovery on unfiltered
1569 # hidden situation". We do not performs discovery on unfiltered
1569 # repository because it end up doing a pathological amount of round
1570 # repository because it end up doing a pathological amount of round
1570 # trip for w huge amount of changeset we do not care about.
1571 # trip for w huge amount of changeset we do not care about.
1571 #
1572 #
1572 # If a set of such "common but filtered" changeset exist on the server
1573 # If a set of such "common but filtered" changeset exist on the server
1573 # but are not including a remote heads, we'll not be able to detect it,
1574 # but are not including a remote heads, we'll not be able to detect it,
1574 scommon = set(common)
1575 scommon = set(common)
1575 for n in rheads:
1576 for n in rheads:
1576 if n in nm:
1577 if n in nm:
1577 if n not in scommon:
1578 if n not in scommon:
1578 common.append(n)
1579 common.append(n)
1579 if set(rheads).issubset(set(common)):
1580 if set(rheads).issubset(set(common)):
1580 fetch = []
1581 fetch = []
1581 pullop.common = common
1582 pullop.common = common
1582 pullop.fetch = fetch
1583 pullop.fetch = fetch
1583 pullop.rheads = rheads
1584 pullop.rheads = rheads
1584
1585
1585 def _pullbundle2(pullop):
1586 def _pullbundle2(pullop):
1586 """pull data using bundle2
1587 """pull data using bundle2
1587
1588
1588 For now, the only supported data are changegroup."""
1589 For now, the only supported data are changegroup."""
1589 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1590 kwargs = {'bundlecaps': caps20to10(pullop.repo, role='client')}
1590
1591
1591 # make ui easier to access
1592 # make ui easier to access
1592 ui = pullop.repo.ui
1593 ui = pullop.repo.ui
1593
1594
1594 # At the moment we don't do stream clones over bundle2. If that is
1595 # At the moment we don't do stream clones over bundle2. If that is
1595 # implemented then here's where the check for that will go.
1596 # implemented then here's where the check for that will go.
1596 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1597 streaming = streamclone.canperformstreamclone(pullop, bundle2=True)[0]
1597
1598
1598 # declare pull perimeters
1599 # declare pull perimeters
1599 kwargs['common'] = pullop.common
1600 kwargs['common'] = pullop.common
1600 kwargs['heads'] = pullop.heads or pullop.rheads
1601 kwargs['heads'] = pullop.heads or pullop.rheads
1601
1602
1602 if streaming:
1603 if streaming:
1603 kwargs['cg'] = False
1604 kwargs['cg'] = False
1604 kwargs['stream'] = True
1605 kwargs['stream'] = True
1605 pullop.stepsdone.add('changegroup')
1606 pullop.stepsdone.add('changegroup')
1606 pullop.stepsdone.add('phases')
1607 pullop.stepsdone.add('phases')
1607
1608
1608 else:
1609 else:
1609 # pulling changegroup
1610 # pulling changegroup
1610 pullop.stepsdone.add('changegroup')
1611 pullop.stepsdone.add('changegroup')
1611
1612
1612 kwargs['cg'] = pullop.fetch
1613 kwargs['cg'] = pullop.fetch
1613
1614
1614 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1615 legacyphase = 'phases' in ui.configlist('devel', 'legacy.exchange')
1615 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1616 hasbinaryphase = 'heads' in pullop.remotebundle2caps.get('phases', ())
1616 if (not legacyphase and hasbinaryphase):
1617 if (not legacyphase and hasbinaryphase):
1617 kwargs['phases'] = True
1618 kwargs['phases'] = True
1618 pullop.stepsdone.add('phases')
1619 pullop.stepsdone.add('phases')
1619
1620
1620 if 'listkeys' in pullop.remotebundle2caps:
1621 if 'listkeys' in pullop.remotebundle2caps:
1621 if 'phases' not in pullop.stepsdone:
1622 if 'phases' not in pullop.stepsdone:
1622 kwargs['listkeys'] = ['phases']
1623 kwargs['listkeys'] = ['phases']
1623
1624
1624 bookmarksrequested = False
1625 bookmarksrequested = False
1625 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1626 legacybookmark = 'bookmarks' in ui.configlist('devel', 'legacy.exchange')
1626 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1627 hasbinarybook = 'bookmarks' in pullop.remotebundle2caps
1627
1628
1628 if pullop.remotebookmarks is not None:
1629 if pullop.remotebookmarks is not None:
1629 pullop.stepsdone.add('request-bookmarks')
1630 pullop.stepsdone.add('request-bookmarks')
1630
1631
1631 if ('request-bookmarks' not in pullop.stepsdone
1632 if ('request-bookmarks' not in pullop.stepsdone
1632 and pullop.remotebookmarks is None
1633 and pullop.remotebookmarks is None
1633 and not legacybookmark and hasbinarybook):
1634 and not legacybookmark and hasbinarybook):
1634 kwargs['bookmarks'] = True
1635 kwargs['bookmarks'] = True
1635 bookmarksrequested = True
1636 bookmarksrequested = True
1636
1637
1637 if 'listkeys' in pullop.remotebundle2caps:
1638 if 'listkeys' in pullop.remotebundle2caps:
1638 if 'request-bookmarks' not in pullop.stepsdone:
1639 if 'request-bookmarks' not in pullop.stepsdone:
1639 # make sure to always includes bookmark data when migrating
1640 # make sure to always includes bookmark data when migrating
1640 # `hg incoming --bundle` to using this function.
1641 # `hg incoming --bundle` to using this function.
1641 pullop.stepsdone.add('request-bookmarks')
1642 pullop.stepsdone.add('request-bookmarks')
1642 kwargs.setdefault('listkeys', []).append('bookmarks')
1643 kwargs.setdefault('listkeys', []).append('bookmarks')
1643
1644
1644 # If this is a full pull / clone and the server supports the clone bundles
1645 # If this is a full pull / clone and the server supports the clone bundles
1645 # feature, tell the server whether we attempted a clone bundle. The
1646 # feature, tell the server whether we attempted a clone bundle. The
1646 # presence of this flag indicates the client supports clone bundles. This
1647 # presence of this flag indicates the client supports clone bundles. This
1647 # will enable the server to treat clients that support clone bundles
1648 # will enable the server to treat clients that support clone bundles
1648 # differently from those that don't.
1649 # differently from those that don't.
1649 if (pullop.remote.capable('clonebundles')
1650 if (pullop.remote.capable('clonebundles')
1650 and pullop.heads is None and list(pullop.common) == [nullid]):
1651 and pullop.heads is None and list(pullop.common) == [nullid]):
1651 kwargs['cbattempted'] = pullop.clonebundleattempted
1652 kwargs['cbattempted'] = pullop.clonebundleattempted
1652
1653
1653 if streaming:
1654 if streaming:
1654 pullop.repo.ui.status(_('streaming all changes\n'))
1655 pullop.repo.ui.status(_('streaming all changes\n'))
1655 elif not pullop.fetch:
1656 elif not pullop.fetch:
1656 pullop.repo.ui.status(_("no changes found\n"))
1657 pullop.repo.ui.status(_("no changes found\n"))
1657 pullop.cgresult = 0
1658 pullop.cgresult = 0
1658 else:
1659 else:
1659 if pullop.heads is None and list(pullop.common) == [nullid]:
1660 if pullop.heads is None and list(pullop.common) == [nullid]:
1660 pullop.repo.ui.status(_("requesting all changes\n"))
1661 pullop.repo.ui.status(_("requesting all changes\n"))
1661 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1662 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1662 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1663 remoteversions = bundle2.obsmarkersversion(pullop.remotebundle2caps)
1663 if obsolete.commonversion(remoteversions) is not None:
1664 if obsolete.commonversion(remoteversions) is not None:
1664 kwargs['obsmarkers'] = True
1665 kwargs['obsmarkers'] = True
1665 pullop.stepsdone.add('obsmarkers')
1666 pullop.stepsdone.add('obsmarkers')
1666 _pullbundle2extraprepare(pullop, kwargs)
1667 _pullbundle2extraprepare(pullop, kwargs)
1667
1668
1668 with pullop.remote.commandexecutor() as e:
1669 with pullop.remote.commandexecutor() as e:
1669 args = dict(kwargs)
1670 args = dict(kwargs)
1670 args['source'] = 'pull'
1671 args['source'] = 'pull'
1671 bundle = e.callcommand('getbundle', args).result()
1672 bundle = e.callcommand('getbundle', args).result()
1672
1673
1673 try:
1674 try:
1674 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1675 op = bundle2.bundleoperation(pullop.repo, pullop.gettransaction,
1675 source='pull')
1676 source='pull')
1676 op.modes['bookmarks'] = 'records'
1677 op.modes['bookmarks'] = 'records'
1677 bundle2.processbundle(pullop.repo, bundle, op=op)
1678 bundle2.processbundle(pullop.repo, bundle, op=op)
1678 except bundle2.AbortFromPart as exc:
1679 except bundle2.AbortFromPart as exc:
1679 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1680 pullop.repo.ui.status(_('remote: abort: %s\n') % exc)
1680 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1681 raise error.Abort(_('pull failed on remote'), hint=exc.hint)
1681 except error.BundleValueError as exc:
1682 except error.BundleValueError as exc:
1682 raise error.Abort(_('missing support for %s') % exc)
1683 raise error.Abort(_('missing support for %s') % exc)
1683
1684
1684 if pullop.fetch:
1685 if pullop.fetch:
1685 pullop.cgresult = bundle2.combinechangegroupresults(op)
1686 pullop.cgresult = bundle2.combinechangegroupresults(op)
1686
1687
1687 # processing phases change
1688 # processing phases change
1688 for namespace, value in op.records['listkeys']:
1689 for namespace, value in op.records['listkeys']:
1689 if namespace == 'phases':
1690 if namespace == 'phases':
1690 _pullapplyphases(pullop, value)
1691 _pullapplyphases(pullop, value)
1691
1692
1692 # processing bookmark update
1693 # processing bookmark update
1693 if bookmarksrequested:
1694 if bookmarksrequested:
1694 books = {}
1695 books = {}
1695 for record in op.records['bookmarks']:
1696 for record in op.records['bookmarks']:
1696 books[record['bookmark']] = record["node"]
1697 books[record['bookmark']] = record["node"]
1697 pullop.remotebookmarks = books
1698 pullop.remotebookmarks = books
1698 else:
1699 else:
1699 for namespace, value in op.records['listkeys']:
1700 for namespace, value in op.records['listkeys']:
1700 if namespace == 'bookmarks':
1701 if namespace == 'bookmarks':
1701 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1702 pullop.remotebookmarks = bookmod.unhexlifybookmarks(value)
1702
1703
1703 # bookmark data were either already there or pulled in the bundle
1704 # bookmark data were either already there or pulled in the bundle
1704 if pullop.remotebookmarks is not None:
1705 if pullop.remotebookmarks is not None:
1705 _pullbookmarks(pullop)
1706 _pullbookmarks(pullop)
1706
1707
1707 def _pullbundle2extraprepare(pullop, kwargs):
1708 def _pullbundle2extraprepare(pullop, kwargs):
1708 """hook function so that extensions can extend the getbundle call"""
1709 """hook function so that extensions can extend the getbundle call"""
1709
1710
1710 def _pullchangeset(pullop):
1711 def _pullchangeset(pullop):
1711 """pull changeset from unbundle into the local repo"""
1712 """pull changeset from unbundle into the local repo"""
1712 # We delay the open of the transaction as late as possible so we
1713 # We delay the open of the transaction as late as possible so we
1713 # don't open transaction for nothing or you break future useful
1714 # don't open transaction for nothing or you break future useful
1714 # rollback call
1715 # rollback call
1715 if 'changegroup' in pullop.stepsdone:
1716 if 'changegroup' in pullop.stepsdone:
1716 return
1717 return
1717 pullop.stepsdone.add('changegroup')
1718 pullop.stepsdone.add('changegroup')
1718 if not pullop.fetch:
1719 if not pullop.fetch:
1719 pullop.repo.ui.status(_("no changes found\n"))
1720 pullop.repo.ui.status(_("no changes found\n"))
1720 pullop.cgresult = 0
1721 pullop.cgresult = 0
1721 return
1722 return
1722 tr = pullop.gettransaction()
1723 tr = pullop.gettransaction()
1723 if pullop.heads is None and list(pullop.common) == [nullid]:
1724 if pullop.heads is None and list(pullop.common) == [nullid]:
1724 pullop.repo.ui.status(_("requesting all changes\n"))
1725 pullop.repo.ui.status(_("requesting all changes\n"))
1725 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1726 elif pullop.heads is None and pullop.remote.capable('changegroupsubset'):
1726 # issue1320, avoid a race if remote changed after discovery
1727 # issue1320, avoid a race if remote changed after discovery
1727 pullop.heads = pullop.rheads
1728 pullop.heads = pullop.rheads
1728
1729
1729 if pullop.remote.capable('getbundle'):
1730 if pullop.remote.capable('getbundle'):
1730 # TODO: get bundlecaps from remote
1731 # TODO: get bundlecaps from remote
1731 cg = pullop.remote.getbundle('pull', common=pullop.common,
1732 cg = pullop.remote.getbundle('pull', common=pullop.common,
1732 heads=pullop.heads or pullop.rheads)
1733 heads=pullop.heads or pullop.rheads)
1733 elif pullop.heads is None:
1734 elif pullop.heads is None:
1734 with pullop.remote.commandexecutor() as e:
1735 with pullop.remote.commandexecutor() as e:
1735 cg = e.callcommand('changegroup', {
1736 cg = e.callcommand('changegroup', {
1736 'nodes': pullop.fetch,
1737 'nodes': pullop.fetch,
1737 'source': 'pull',
1738 'source': 'pull',
1738 }).result()
1739 }).result()
1739
1740
1740 elif not pullop.remote.capable('changegroupsubset'):
1741 elif not pullop.remote.capable('changegroupsubset'):
1741 raise error.Abort(_("partial pull cannot be done because "
1742 raise error.Abort(_("partial pull cannot be done because "
1742 "other repository doesn't support "
1743 "other repository doesn't support "
1743 "changegroupsubset."))
1744 "changegroupsubset."))
1744 else:
1745 else:
1745 with pullop.remote.commandexecutor() as e:
1746 with pullop.remote.commandexecutor() as e:
1746 cg = e.callcommand('changegroupsubset', {
1747 cg = e.callcommand('changegroupsubset', {
1747 'bases': pullop.fetch,
1748 'bases': pullop.fetch,
1748 'heads': pullop.heads,
1749 'heads': pullop.heads,
1749 'source': 'pull',
1750 'source': 'pull',
1750 }).result()
1751 }).result()
1751
1752
1752 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1753 bundleop = bundle2.applybundle(pullop.repo, cg, tr, 'pull',
1753 pullop.remote.url())
1754 pullop.remote.url())
1754 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1755 pullop.cgresult = bundle2.combinechangegroupresults(bundleop)
1755
1756
1756 def _pullphase(pullop):
1757 def _pullphase(pullop):
1757 # Get remote phases data from remote
1758 # Get remote phases data from remote
1758 if 'phases' in pullop.stepsdone:
1759 if 'phases' in pullop.stepsdone:
1759 return
1760 return
1760 remotephases = listkeys(pullop.remote, 'phases')
1761 remotephases = listkeys(pullop.remote, 'phases')
1761 _pullapplyphases(pullop, remotephases)
1762 _pullapplyphases(pullop, remotephases)
1762
1763
1763 def _pullapplyphases(pullop, remotephases):
1764 def _pullapplyphases(pullop, remotephases):
1764 """apply phase movement from observed remote state"""
1765 """apply phase movement from observed remote state"""
1765 if 'phases' in pullop.stepsdone:
1766 if 'phases' in pullop.stepsdone:
1766 return
1767 return
1767 pullop.stepsdone.add('phases')
1768 pullop.stepsdone.add('phases')
1768 publishing = bool(remotephases.get('publishing', False))
1769 publishing = bool(remotephases.get('publishing', False))
1769 if remotephases and not publishing:
1770 if remotephases and not publishing:
1770 # remote is new and non-publishing
1771 # remote is new and non-publishing
1771 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1772 pheads, _dr = phases.analyzeremotephases(pullop.repo,
1772 pullop.pulledsubset,
1773 pullop.pulledsubset,
1773 remotephases)
1774 remotephases)
1774 dheads = pullop.pulledsubset
1775 dheads = pullop.pulledsubset
1775 else:
1776 else:
1776 # Remote is old or publishing all common changesets
1777 # Remote is old or publishing all common changesets
1777 # should be seen as public
1778 # should be seen as public
1778 pheads = pullop.pulledsubset
1779 pheads = pullop.pulledsubset
1779 dheads = []
1780 dheads = []
1780 unfi = pullop.repo.unfiltered()
1781 unfi = pullop.repo.unfiltered()
1781 phase = unfi._phasecache.phase
1782 phase = unfi._phasecache.phase
1782 rev = unfi.changelog.nodemap.get
1783 rev = unfi.changelog.nodemap.get
1783 public = phases.public
1784 public = phases.public
1784 draft = phases.draft
1785 draft = phases.draft
1785
1786
1786 # exclude changesets already public locally and update the others
1787 # exclude changesets already public locally and update the others
1787 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1788 pheads = [pn for pn in pheads if phase(unfi, rev(pn)) > public]
1788 if pheads:
1789 if pheads:
1789 tr = pullop.gettransaction()
1790 tr = pullop.gettransaction()
1790 phases.advanceboundary(pullop.repo, tr, public, pheads)
1791 phases.advanceboundary(pullop.repo, tr, public, pheads)
1791
1792
1792 # exclude changesets already draft locally and update the others
1793 # exclude changesets already draft locally and update the others
1793 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1794 dheads = [pn for pn in dheads if phase(unfi, rev(pn)) > draft]
1794 if dheads:
1795 if dheads:
1795 tr = pullop.gettransaction()
1796 tr = pullop.gettransaction()
1796 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1797 phases.advanceboundary(pullop.repo, tr, draft, dheads)
1797
1798
1798 def _pullbookmarks(pullop):
1799 def _pullbookmarks(pullop):
1799 """process the remote bookmark information to update the local one"""
1800 """process the remote bookmark information to update the local one"""
1800 if 'bookmarks' in pullop.stepsdone:
1801 if 'bookmarks' in pullop.stepsdone:
1801 return
1802 return
1802 pullop.stepsdone.add('bookmarks')
1803 pullop.stepsdone.add('bookmarks')
1803 repo = pullop.repo
1804 repo = pullop.repo
1804 remotebookmarks = pullop.remotebookmarks
1805 remotebookmarks = pullop.remotebookmarks
1805 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1806 bookmod.updatefromremote(repo.ui, repo, remotebookmarks,
1806 pullop.remote.url(),
1807 pullop.remote.url(),
1807 pullop.gettransaction,
1808 pullop.gettransaction,
1808 explicit=pullop.explicitbookmarks)
1809 explicit=pullop.explicitbookmarks)
1809
1810
1810 def _pullobsolete(pullop):
1811 def _pullobsolete(pullop):
1811 """utility function to pull obsolete markers from a remote
1812 """utility function to pull obsolete markers from a remote
1812
1813
1813 The `gettransaction` is function that return the pull transaction, creating
1814 The `gettransaction` is function that return the pull transaction, creating
1814 one if necessary. We return the transaction to inform the calling code that
1815 one if necessary. We return the transaction to inform the calling code that
1815 a new transaction have been created (when applicable).
1816 a new transaction have been created (when applicable).
1816
1817
1817 Exists mostly to allow overriding for experimentation purpose"""
1818 Exists mostly to allow overriding for experimentation purpose"""
1818 if 'obsmarkers' in pullop.stepsdone:
1819 if 'obsmarkers' in pullop.stepsdone:
1819 return
1820 return
1820 pullop.stepsdone.add('obsmarkers')
1821 pullop.stepsdone.add('obsmarkers')
1821 tr = None
1822 tr = None
1822 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1823 if obsolete.isenabled(pullop.repo, obsolete.exchangeopt):
1823 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1824 pullop.repo.ui.debug('fetching remote obsolete markers\n')
1824 remoteobs = listkeys(pullop.remote, 'obsolete')
1825 remoteobs = listkeys(pullop.remote, 'obsolete')
1825 if 'dump0' in remoteobs:
1826 if 'dump0' in remoteobs:
1826 tr = pullop.gettransaction()
1827 tr = pullop.gettransaction()
1827 markers = []
1828 markers = []
1828 for key in sorted(remoteobs, reverse=True):
1829 for key in sorted(remoteobs, reverse=True):
1829 if key.startswith('dump'):
1830 if key.startswith('dump'):
1830 data = util.b85decode(remoteobs[key])
1831 data = util.b85decode(remoteobs[key])
1831 version, newmarks = obsolete._readmarkers(data)
1832 version, newmarks = obsolete._readmarkers(data)
1832 markers += newmarks
1833 markers += newmarks
1833 if markers:
1834 if markers:
1834 pullop.repo.obsstore.add(tr, markers)
1835 pullop.repo.obsstore.add(tr, markers)
1835 pullop.repo.invalidatevolatilesets()
1836 pullop.repo.invalidatevolatilesets()
1836 return tr
1837 return tr
1837
1838
1838 def applynarrowacl(repo, kwargs):
1839 def applynarrowacl(repo, kwargs):
1839 """Apply narrow fetch access control.
1840 """Apply narrow fetch access control.
1840
1841
1841 This massages the named arguments for getbundle wire protocol commands
1842 This massages the named arguments for getbundle wire protocol commands
1842 so requested data is filtered through access control rules.
1843 so requested data is filtered through access control rules.
1843 """
1844 """
1844 ui = repo.ui
1845 ui = repo.ui
1845 # TODO this assumes existence of HTTP and is a layering violation.
1846 # TODO this assumes existence of HTTP and is a layering violation.
1846 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1847 username = ui.shortuser(ui.environ.get('REMOTE_USER') or ui.username())
1847 user_includes = ui.configlist(
1848 user_includes = ui.configlist(
1848 _NARROWACL_SECTION, username + '.includes',
1849 _NARROWACL_SECTION, username + '.includes',
1849 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1850 ui.configlist(_NARROWACL_SECTION, 'default.includes'))
1850 user_excludes = ui.configlist(
1851 user_excludes = ui.configlist(
1851 _NARROWACL_SECTION, username + '.excludes',
1852 _NARROWACL_SECTION, username + '.excludes',
1852 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1853 ui.configlist(_NARROWACL_SECTION, 'default.excludes'))
1853 if not user_includes:
1854 if not user_includes:
1854 raise error.Abort(_("{} configuration for user {} is empty")
1855 raise error.Abort(_("{} configuration for user {} is empty")
1855 .format(_NARROWACL_SECTION, username))
1856 .format(_NARROWACL_SECTION, username))
1856
1857
1857 user_includes = [
1858 user_includes = [
1858 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1859 'path:.' if p == '*' else 'path:' + p for p in user_includes]
1859 user_excludes = [
1860 user_excludes = [
1860 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1861 'path:.' if p == '*' else 'path:' + p for p in user_excludes]
1861
1862
1862 req_includes = set(kwargs.get(r'includepats', []))
1863 req_includes = set(kwargs.get(r'includepats', []))
1863 req_excludes = set(kwargs.get(r'excludepats', []))
1864 req_excludes = set(kwargs.get(r'excludepats', []))
1864
1865
1865 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1866 req_includes, req_excludes, invalid_includes = narrowspec.restrictpatterns(
1866 req_includes, req_excludes, user_includes, user_excludes)
1867 req_includes, req_excludes, user_includes, user_excludes)
1867
1868
1868 if invalid_includes:
1869 if invalid_includes:
1869 raise error.Abort(
1870 raise error.Abort(
1870 _("The following includes are not accessible for {}: {}")
1871 _("The following includes are not accessible for {}: {}")
1871 .format(username, invalid_includes))
1872 .format(username, invalid_includes))
1872
1873
1873 new_args = {}
1874 new_args = {}
1874 new_args.update(kwargs)
1875 new_args.update(kwargs)
1875 new_args[r'narrow'] = True
1876 new_args[r'narrow'] = True
1876 new_args[r'includepats'] = req_includes
1877 new_args[r'includepats'] = req_includes
1877 if req_excludes:
1878 if req_excludes:
1878 new_args[r'excludepats'] = req_excludes
1879 new_args[r'excludepats'] = req_excludes
1879
1880
1880 return new_args
1881 return new_args
1881
1882
1882 def _computeellipsis(repo, common, heads, known, match, depth=None):
1883 def _computeellipsis(repo, common, heads, known, match, depth=None):
1883 """Compute the shape of a narrowed DAG.
1884 """Compute the shape of a narrowed DAG.
1884
1885
1885 Args:
1886 Args:
1886 repo: The repository we're transferring.
1887 repo: The repository we're transferring.
1887 common: The roots of the DAG range we're transferring.
1888 common: The roots of the DAG range we're transferring.
1888 May be just [nullid], which means all ancestors of heads.
1889 May be just [nullid], which means all ancestors of heads.
1889 heads: The heads of the DAG range we're transferring.
1890 heads: The heads of the DAG range we're transferring.
1890 match: The narrowmatcher that allows us to identify relevant changes.
1891 match: The narrowmatcher that allows us to identify relevant changes.
1891 depth: If not None, only consider nodes to be full nodes if they are at
1892 depth: If not None, only consider nodes to be full nodes if they are at
1892 most depth changesets away from one of heads.
1893 most depth changesets away from one of heads.
1893
1894
1894 Returns:
1895 Returns:
1895 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1896 A tuple of (visitnodes, relevant_nodes, ellipsisroots) where:
1896
1897
1897 visitnodes: The list of nodes (either full or ellipsis) which
1898 visitnodes: The list of nodes (either full or ellipsis) which
1898 need to be sent to the client.
1899 need to be sent to the client.
1899 relevant_nodes: The set of changelog nodes which change a file inside
1900 relevant_nodes: The set of changelog nodes which change a file inside
1900 the narrowspec. The client needs these as non-ellipsis nodes.
1901 the narrowspec. The client needs these as non-ellipsis nodes.
1901 ellipsisroots: A dict of {rev: parents} that is used in
1902 ellipsisroots: A dict of {rev: parents} that is used in
1902 narrowchangegroup to produce ellipsis nodes with the
1903 narrowchangegroup to produce ellipsis nodes with the
1903 correct parents.
1904 correct parents.
1904 """
1905 """
1905 cl = repo.changelog
1906 cl = repo.changelog
1906 mfl = repo.manifestlog
1907 mfl = repo.manifestlog
1907
1908
1908 cldag = dagutil.revlogdag(cl)
1909 cldag = dagutil.revlogdag(cl)
1909 # dagutil does not like nullid/nullrev
1910 # dagutil does not like nullid/nullrev
1910 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
1911 commonrevs = cldag.internalizeall(common - set([nullid])) | set([nullrev])
1911 headsrevs = cldag.internalizeall(heads)
1912 headsrevs = cldag.internalizeall(heads)
1912 if depth:
1913 if depth:
1913 revdepth = {h: 0 for h in headsrevs}
1914 revdepth = {h: 0 for h in headsrevs}
1914
1915
1915 ellipsisheads = collections.defaultdict(set)
1916 ellipsisheads = collections.defaultdict(set)
1916 ellipsisroots = collections.defaultdict(set)
1917 ellipsisroots = collections.defaultdict(set)
1917
1918
1918 def addroot(head, curchange):
1919 def addroot(head, curchange):
1919 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1920 """Add a root to an ellipsis head, splitting heads with 3 roots."""
1920 ellipsisroots[head].add(curchange)
1921 ellipsisroots[head].add(curchange)
1921 # Recursively split ellipsis heads with 3 roots by finding the
1922 # Recursively split ellipsis heads with 3 roots by finding the
1922 # roots' youngest common descendant which is an elided merge commit.
1923 # roots' youngest common descendant which is an elided merge commit.
1923 # That descendant takes 2 of the 3 roots as its own, and becomes a
1924 # That descendant takes 2 of the 3 roots as its own, and becomes a
1924 # root of the head.
1925 # root of the head.
1925 while len(ellipsisroots[head]) > 2:
1926 while len(ellipsisroots[head]) > 2:
1926 child, roots = splithead(head)
1927 child, roots = splithead(head)
1927 splitroots(head, child, roots)
1928 splitroots(head, child, roots)
1928 head = child # Recurse in case we just added a 3rd root
1929 head = child # Recurse in case we just added a 3rd root
1929
1930
1930 def splitroots(head, child, roots):
1931 def splitroots(head, child, roots):
1931 ellipsisroots[head].difference_update(roots)
1932 ellipsisroots[head].difference_update(roots)
1932 ellipsisroots[head].add(child)
1933 ellipsisroots[head].add(child)
1933 ellipsisroots[child].update(roots)
1934 ellipsisroots[child].update(roots)
1934 ellipsisroots[child].discard(child)
1935 ellipsisroots[child].discard(child)
1935
1936
1936 def splithead(head):
1937 def splithead(head):
1937 r1, r2, r3 = sorted(ellipsisroots[head])
1938 r1, r2, r3 = sorted(ellipsisroots[head])
1938 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1939 for nr1, nr2 in ((r2, r3), (r1, r3), (r1, r2)):
1939 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1940 mid = repo.revs('sort(merge() & %d::%d & %d::%d, -rev)',
1940 nr1, head, nr2, head)
1941 nr1, head, nr2, head)
1941 for j in mid:
1942 for j in mid:
1942 if j == nr2:
1943 if j == nr2:
1943 return nr2, (nr1, nr2)
1944 return nr2, (nr1, nr2)
1944 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1945 if j not in ellipsisroots or len(ellipsisroots[j]) < 2:
1945 return j, (nr1, nr2)
1946 return j, (nr1, nr2)
1946 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1947 raise error.Abort(_('Failed to split up ellipsis node! head: %d, '
1947 'roots: %d %d %d') % (head, r1, r2, r3))
1948 'roots: %d %d %d') % (head, r1, r2, r3))
1948
1949
1949 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1950 missing = list(cl.findmissingrevs(common=commonrevs, heads=headsrevs))
1950 visit = reversed(missing)
1951 visit = reversed(missing)
1951 relevant_nodes = set()
1952 relevant_nodes = set()
1952 visitnodes = [cl.node(m) for m in missing]
1953 visitnodes = [cl.node(m) for m in missing]
1953 required = set(headsrevs) | known
1954 required = set(headsrevs) | known
1954 for rev in visit:
1955 for rev in visit:
1955 clrev = cl.changelogrevision(rev)
1956 clrev = cl.changelogrevision(rev)
1956 ps = cldag.parents(rev)
1957 ps = cldag.parents(rev)
1957 if depth is not None:
1958 if depth is not None:
1958 curdepth = revdepth[rev]
1959 curdepth = revdepth[rev]
1959 for p in ps:
1960 for p in ps:
1960 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1961 revdepth[p] = min(curdepth + 1, revdepth.get(p, depth + 1))
1961 needed = False
1962 needed = False
1962 shallow_enough = depth is None or revdepth[rev] <= depth
1963 shallow_enough = depth is None or revdepth[rev] <= depth
1963 if shallow_enough:
1964 if shallow_enough:
1964 curmf = mfl[clrev.manifest].read()
1965 curmf = mfl[clrev.manifest].read()
1965 if ps:
1966 if ps:
1966 # We choose to not trust the changed files list in
1967 # We choose to not trust the changed files list in
1967 # changesets because it's not always correct. TODO: could
1968 # changesets because it's not always correct. TODO: could
1968 # we trust it for the non-merge case?
1969 # we trust it for the non-merge case?
1969 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1970 p1mf = mfl[cl.changelogrevision(ps[0]).manifest].read()
1970 needed = bool(curmf.diff(p1mf, match))
1971 needed = bool(curmf.diff(p1mf, match))
1971 if not needed and len(ps) > 1:
1972 if not needed and len(ps) > 1:
1972 # For merge changes, the list of changed files is not
1973 # For merge changes, the list of changed files is not
1973 # helpful, since we need to emit the merge if a file
1974 # helpful, since we need to emit the merge if a file
1974 # in the narrow spec has changed on either side of the
1975 # in the narrow spec has changed on either side of the
1975 # merge. As a result, we do a manifest diff to check.
1976 # merge. As a result, we do a manifest diff to check.
1976 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1977 p2mf = mfl[cl.changelogrevision(ps[1]).manifest].read()
1977 needed = bool(curmf.diff(p2mf, match))
1978 needed = bool(curmf.diff(p2mf, match))
1978 else:
1979 else:
1979 # For a root node, we need to include the node if any
1980 # For a root node, we need to include the node if any
1980 # files in the node match the narrowspec.
1981 # files in the node match the narrowspec.
1981 needed = any(curmf.walk(match))
1982 needed = any(curmf.walk(match))
1982
1983
1983 if needed:
1984 if needed:
1984 for head in ellipsisheads[rev]:
1985 for head in ellipsisheads[rev]:
1985 addroot(head, rev)
1986 addroot(head, rev)
1986 for p in ps:
1987 for p in ps:
1987 required.add(p)
1988 required.add(p)
1988 relevant_nodes.add(cl.node(rev))
1989 relevant_nodes.add(cl.node(rev))
1989 else:
1990 else:
1990 if not ps:
1991 if not ps:
1991 ps = [nullrev]
1992 ps = [nullrev]
1992 if rev in required:
1993 if rev in required:
1993 for head in ellipsisheads[rev]:
1994 for head in ellipsisheads[rev]:
1994 addroot(head, rev)
1995 addroot(head, rev)
1995 for p in ps:
1996 for p in ps:
1996 ellipsisheads[p].add(rev)
1997 ellipsisheads[p].add(rev)
1997 else:
1998 else:
1998 for p in ps:
1999 for p in ps:
1999 ellipsisheads[p] |= ellipsisheads[rev]
2000 ellipsisheads[p] |= ellipsisheads[rev]
2000
2001
2001 # add common changesets as roots of their reachable ellipsis heads
2002 # add common changesets as roots of their reachable ellipsis heads
2002 for c in commonrevs:
2003 for c in commonrevs:
2003 for head in ellipsisheads[c]:
2004 for head in ellipsisheads[c]:
2004 addroot(head, c)
2005 addroot(head, c)
2005 return visitnodes, relevant_nodes, ellipsisroots
2006 return visitnodes, relevant_nodes, ellipsisroots
2006
2007
2007 def caps20to10(repo, role):
2008 def caps20to10(repo, role):
2008 """return a set with appropriate options to use bundle20 during getbundle"""
2009 """return a set with appropriate options to use bundle20 during getbundle"""
2009 caps = {'HG20'}
2010 caps = {'HG20'}
2010 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2011 capsblob = bundle2.encodecaps(bundle2.getrepocaps(repo, role=role))
2011 caps.add('bundle2=' + urlreq.quote(capsblob))
2012 caps.add('bundle2=' + urlreq.quote(capsblob))
2012 return caps
2013 return caps
2013
2014
2014 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2015 # List of names of steps to perform for a bundle2 for getbundle, order matters.
2015 getbundle2partsorder = []
2016 getbundle2partsorder = []
2016
2017
2017 # Mapping between step name and function
2018 # Mapping between step name and function
2018 #
2019 #
2019 # This exists to help extensions wrap steps if necessary
2020 # This exists to help extensions wrap steps if necessary
2020 getbundle2partsmapping = {}
2021 getbundle2partsmapping = {}
2021
2022
2022 def getbundle2partsgenerator(stepname, idx=None):
2023 def getbundle2partsgenerator(stepname, idx=None):
2023 """decorator for function generating bundle2 part for getbundle
2024 """decorator for function generating bundle2 part for getbundle
2024
2025
2025 The function is added to the step -> function mapping and appended to the
2026 The function is added to the step -> function mapping and appended to the
2026 list of steps. Beware that decorated functions will be added in order
2027 list of steps. Beware that decorated functions will be added in order
2027 (this may matter).
2028 (this may matter).
2028
2029
2029 You can only use this decorator for new steps, if you want to wrap a step
2030 You can only use this decorator for new steps, if you want to wrap a step
2030 from an extension, attack the getbundle2partsmapping dictionary directly."""
2031 from an extension, attack the getbundle2partsmapping dictionary directly."""
2031 def dec(func):
2032 def dec(func):
2032 assert stepname not in getbundle2partsmapping
2033 assert stepname not in getbundle2partsmapping
2033 getbundle2partsmapping[stepname] = func
2034 getbundle2partsmapping[stepname] = func
2034 if idx is None:
2035 if idx is None:
2035 getbundle2partsorder.append(stepname)
2036 getbundle2partsorder.append(stepname)
2036 else:
2037 else:
2037 getbundle2partsorder.insert(idx, stepname)
2038 getbundle2partsorder.insert(idx, stepname)
2038 return func
2039 return func
2039 return dec
2040 return dec
2040
2041
2041 def bundle2requested(bundlecaps):
2042 def bundle2requested(bundlecaps):
2042 if bundlecaps is not None:
2043 if bundlecaps is not None:
2043 return any(cap.startswith('HG2') for cap in bundlecaps)
2044 return any(cap.startswith('HG2') for cap in bundlecaps)
2044 return False
2045 return False
2045
2046
2046 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2047 def getbundlechunks(repo, source, heads=None, common=None, bundlecaps=None,
2047 **kwargs):
2048 **kwargs):
2048 """Return chunks constituting a bundle's raw data.
2049 """Return chunks constituting a bundle's raw data.
2049
2050
2050 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2051 Could be a bundle HG10 or a bundle HG20 depending on bundlecaps
2051 passed.
2052 passed.
2052
2053
2053 Returns a 2-tuple of a dict with metadata about the generated bundle
2054 Returns a 2-tuple of a dict with metadata about the generated bundle
2054 and an iterator over raw chunks (of varying sizes).
2055 and an iterator over raw chunks (of varying sizes).
2055 """
2056 """
2056 kwargs = pycompat.byteskwargs(kwargs)
2057 kwargs = pycompat.byteskwargs(kwargs)
2057 info = {}
2058 info = {}
2058 usebundle2 = bundle2requested(bundlecaps)
2059 usebundle2 = bundle2requested(bundlecaps)
2059 # bundle10 case
2060 # bundle10 case
2060 if not usebundle2:
2061 if not usebundle2:
2061 if bundlecaps and not kwargs.get('cg', True):
2062 if bundlecaps and not kwargs.get('cg', True):
2062 raise ValueError(_('request for bundle10 must include changegroup'))
2063 raise ValueError(_('request for bundle10 must include changegroup'))
2063
2064
2064 if kwargs:
2065 if kwargs:
2065 raise ValueError(_('unsupported getbundle arguments: %s')
2066 raise ValueError(_('unsupported getbundle arguments: %s')
2066 % ', '.join(sorted(kwargs.keys())))
2067 % ', '.join(sorted(kwargs.keys())))
2067 outgoing = _computeoutgoing(repo, heads, common)
2068 outgoing = _computeoutgoing(repo, heads, common)
2068 info['bundleversion'] = 1
2069 info['bundleversion'] = 1
2069 return info, changegroup.makestream(repo, outgoing, '01', source,
2070 return info, changegroup.makestream(repo, outgoing, '01', source,
2070 bundlecaps=bundlecaps)
2071 bundlecaps=bundlecaps)
2071
2072
2072 # bundle20 case
2073 # bundle20 case
2073 info['bundleversion'] = 2
2074 info['bundleversion'] = 2
2074 b2caps = {}
2075 b2caps = {}
2075 for bcaps in bundlecaps:
2076 for bcaps in bundlecaps:
2076 if bcaps.startswith('bundle2='):
2077 if bcaps.startswith('bundle2='):
2077 blob = urlreq.unquote(bcaps[len('bundle2='):])
2078 blob = urlreq.unquote(bcaps[len('bundle2='):])
2078 b2caps.update(bundle2.decodecaps(blob))
2079 b2caps.update(bundle2.decodecaps(blob))
2079 bundler = bundle2.bundle20(repo.ui, b2caps)
2080 bundler = bundle2.bundle20(repo.ui, b2caps)
2080
2081
2081 kwargs['heads'] = heads
2082 kwargs['heads'] = heads
2082 kwargs['common'] = common
2083 kwargs['common'] = common
2083
2084
2084 for name in getbundle2partsorder:
2085 for name in getbundle2partsorder:
2085 func = getbundle2partsmapping[name]
2086 func = getbundle2partsmapping[name]
2086 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2087 func(bundler, repo, source, bundlecaps=bundlecaps, b2caps=b2caps,
2087 **pycompat.strkwargs(kwargs))
2088 **pycompat.strkwargs(kwargs))
2088
2089
2089 info['prefercompressed'] = bundler.prefercompressed
2090 info['prefercompressed'] = bundler.prefercompressed
2090
2091
2091 return info, bundler.getchunks()
2092 return info, bundler.getchunks()
2092
2093
2093 @getbundle2partsgenerator('stream2')
2094 @getbundle2partsgenerator('stream2')
2094 def _getbundlestream2(bundler, repo, *args, **kwargs):
2095 def _getbundlestream2(bundler, repo, *args, **kwargs):
2095 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2096 return bundle2.addpartbundlestream2(bundler, repo, **kwargs)
2096
2097
2097 @getbundle2partsgenerator('changegroup')
2098 @getbundle2partsgenerator('changegroup')
2098 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2099 def _getbundlechangegrouppart(bundler, repo, source, bundlecaps=None,
2099 b2caps=None, heads=None, common=None, **kwargs):
2100 b2caps=None, heads=None, common=None, **kwargs):
2100 """add a changegroup part to the requested bundle"""
2101 """add a changegroup part to the requested bundle"""
2101 if not kwargs.get(r'cg', True):
2102 if not kwargs.get(r'cg', True):
2102 return
2103 return
2103
2104
2104 version = '01'
2105 version = '01'
2105 cgversions = b2caps.get('changegroup')
2106 cgversions = b2caps.get('changegroup')
2106 if cgversions: # 3.1 and 3.2 ship with an empty value
2107 if cgversions: # 3.1 and 3.2 ship with an empty value
2107 cgversions = [v for v in cgversions
2108 cgversions = [v for v in cgversions
2108 if v in changegroup.supportedoutgoingversions(repo)]
2109 if v in changegroup.supportedoutgoingversions(repo)]
2109 if not cgversions:
2110 if not cgversions:
2110 raise ValueError(_('no common changegroup version'))
2111 raise ValueError(_('no common changegroup version'))
2111 version = max(cgversions)
2112 version = max(cgversions)
2112
2113
2113 outgoing = _computeoutgoing(repo, heads, common)
2114 outgoing = _computeoutgoing(repo, heads, common)
2114 if not outgoing.missing:
2115 if not outgoing.missing:
2115 return
2116 return
2116
2117
2117 if kwargs.get(r'narrow', False):
2118 if kwargs.get(r'narrow', False):
2118 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2119 include = sorted(filter(bool, kwargs.get(r'includepats', [])))
2119 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2120 exclude = sorted(filter(bool, kwargs.get(r'excludepats', [])))
2120 filematcher = narrowspec.match(repo.root, include=include,
2121 filematcher = narrowspec.match(repo.root, include=include,
2121 exclude=exclude)
2122 exclude=exclude)
2122 else:
2123 else:
2123 filematcher = None
2124 filematcher = None
2124
2125
2125 cgstream = changegroup.makestream(repo, outgoing, version, source,
2126 cgstream = changegroup.makestream(repo, outgoing, version, source,
2126 bundlecaps=bundlecaps,
2127 bundlecaps=bundlecaps,
2127 filematcher=filematcher)
2128 filematcher=filematcher)
2128
2129
2129 part = bundler.newpart('changegroup', data=cgstream)
2130 part = bundler.newpart('changegroup', data=cgstream)
2130 if cgversions:
2131 if cgversions:
2131 part.addparam('version', version)
2132 part.addparam('version', version)
2132
2133
2133 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2134 part.addparam('nbchanges', '%d' % len(outgoing.missing),
2134 mandatory=False)
2135 mandatory=False)
2135
2136
2136 if 'treemanifest' in repo.requirements:
2137 if 'treemanifest' in repo.requirements:
2137 part.addparam('treemanifest', '1')
2138 part.addparam('treemanifest', '1')
2138
2139
2139 if kwargs.get(r'narrow', False) and (include or exclude):
2140 if kwargs.get(r'narrow', False) and (include or exclude):
2140 narrowspecpart = bundler.newpart('narrow:spec')
2141 narrowspecpart = bundler.newpart('narrow:spec')
2141 if include:
2142 if include:
2142 narrowspecpart.addparam(
2143 narrowspecpart.addparam(
2143 'include', '\n'.join(include), mandatory=True)
2144 'include', '\n'.join(include), mandatory=True)
2144 if exclude:
2145 if exclude:
2145 narrowspecpart.addparam(
2146 narrowspecpart.addparam(
2146 'exclude', '\n'.join(exclude), mandatory=True)
2147 'exclude', '\n'.join(exclude), mandatory=True)
2147
2148
2148 @getbundle2partsgenerator('bookmarks')
2149 @getbundle2partsgenerator('bookmarks')
2149 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2150 def _getbundlebookmarkpart(bundler, repo, source, bundlecaps=None,
2150 b2caps=None, **kwargs):
2151 b2caps=None, **kwargs):
2151 """add a bookmark part to the requested bundle"""
2152 """add a bookmark part to the requested bundle"""
2152 if not kwargs.get(r'bookmarks', False):
2153 if not kwargs.get(r'bookmarks', False):
2153 return
2154 return
2154 if 'bookmarks' not in b2caps:
2155 if 'bookmarks' not in b2caps:
2155 raise ValueError(_('no common bookmarks exchange method'))
2156 raise ValueError(_('no common bookmarks exchange method'))
2156 books = bookmod.listbinbookmarks(repo)
2157 books = bookmod.listbinbookmarks(repo)
2157 data = bookmod.binaryencode(books)
2158 data = bookmod.binaryencode(books)
2158 if data:
2159 if data:
2159 bundler.newpart('bookmarks', data=data)
2160 bundler.newpart('bookmarks', data=data)
2160
2161
2161 @getbundle2partsgenerator('listkeys')
2162 @getbundle2partsgenerator('listkeys')
2162 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2163 def _getbundlelistkeysparts(bundler, repo, source, bundlecaps=None,
2163 b2caps=None, **kwargs):
2164 b2caps=None, **kwargs):
2164 """add parts containing listkeys namespaces to the requested bundle"""
2165 """add parts containing listkeys namespaces to the requested bundle"""
2165 listkeys = kwargs.get(r'listkeys', ())
2166 listkeys = kwargs.get(r'listkeys', ())
2166 for namespace in listkeys:
2167 for namespace in listkeys:
2167 part = bundler.newpart('listkeys')
2168 part = bundler.newpart('listkeys')
2168 part.addparam('namespace', namespace)
2169 part.addparam('namespace', namespace)
2169 keys = repo.listkeys(namespace).items()
2170 keys = repo.listkeys(namespace).items()
2170 part.data = pushkey.encodekeys(keys)
2171 part.data = pushkey.encodekeys(keys)
2171
2172
2172 @getbundle2partsgenerator('obsmarkers')
2173 @getbundle2partsgenerator('obsmarkers')
2173 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2174 def _getbundleobsmarkerpart(bundler, repo, source, bundlecaps=None,
2174 b2caps=None, heads=None, **kwargs):
2175 b2caps=None, heads=None, **kwargs):
2175 """add an obsolescence markers part to the requested bundle"""
2176 """add an obsolescence markers part to the requested bundle"""
2176 if kwargs.get(r'obsmarkers', False):
2177 if kwargs.get(r'obsmarkers', False):
2177 if heads is None:
2178 if heads is None:
2178 heads = repo.heads()
2179 heads = repo.heads()
2179 subset = [c.node() for c in repo.set('::%ln', heads)]
2180 subset = [c.node() for c in repo.set('::%ln', heads)]
2180 markers = repo.obsstore.relevantmarkers(subset)
2181 markers = repo.obsstore.relevantmarkers(subset)
2181 markers = sorted(markers)
2182 markers = sorted(markers)
2182 bundle2.buildobsmarkerspart(bundler, markers)
2183 bundle2.buildobsmarkerspart(bundler, markers)
2183
2184
2184 @getbundle2partsgenerator('phases')
2185 @getbundle2partsgenerator('phases')
2185 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2186 def _getbundlephasespart(bundler, repo, source, bundlecaps=None,
2186 b2caps=None, heads=None, **kwargs):
2187 b2caps=None, heads=None, **kwargs):
2187 """add phase heads part to the requested bundle"""
2188 """add phase heads part to the requested bundle"""
2188 if kwargs.get(r'phases', False):
2189 if kwargs.get(r'phases', False):
2189 if not 'heads' in b2caps.get('phases'):
2190 if not 'heads' in b2caps.get('phases'):
2190 raise ValueError(_('no common phases exchange method'))
2191 raise ValueError(_('no common phases exchange method'))
2191 if heads is None:
2192 if heads is None:
2192 heads = repo.heads()
2193 heads = repo.heads()
2193
2194
2194 headsbyphase = collections.defaultdict(set)
2195 headsbyphase = collections.defaultdict(set)
2195 if repo.publishing():
2196 if repo.publishing():
2196 headsbyphase[phases.public] = heads
2197 headsbyphase[phases.public] = heads
2197 else:
2198 else:
2198 # find the appropriate heads to move
2199 # find the appropriate heads to move
2199
2200
2200 phase = repo._phasecache.phase
2201 phase = repo._phasecache.phase
2201 node = repo.changelog.node
2202 node = repo.changelog.node
2202 rev = repo.changelog.rev
2203 rev = repo.changelog.rev
2203 for h in heads:
2204 for h in heads:
2204 headsbyphase[phase(repo, rev(h))].add(h)
2205 headsbyphase[phase(repo, rev(h))].add(h)
2205 seenphases = list(headsbyphase.keys())
2206 seenphases = list(headsbyphase.keys())
2206
2207
2207 # We do not handle anything but public and draft phase for now)
2208 # We do not handle anything but public and draft phase for now)
2208 if seenphases:
2209 if seenphases:
2209 assert max(seenphases) <= phases.draft
2210 assert max(seenphases) <= phases.draft
2210
2211
2211 # if client is pulling non-public changesets, we need to find
2212 # if client is pulling non-public changesets, we need to find
2212 # intermediate public heads.
2213 # intermediate public heads.
2213 draftheads = headsbyphase.get(phases.draft, set())
2214 draftheads = headsbyphase.get(phases.draft, set())
2214 if draftheads:
2215 if draftheads:
2215 publicheads = headsbyphase.get(phases.public, set())
2216 publicheads = headsbyphase.get(phases.public, set())
2216
2217
2217 revset = 'heads(only(%ln, %ln) and public())'
2218 revset = 'heads(only(%ln, %ln) and public())'
2218 extraheads = repo.revs(revset, draftheads, publicheads)
2219 extraheads = repo.revs(revset, draftheads, publicheads)
2219 for r in extraheads:
2220 for r in extraheads:
2220 headsbyphase[phases.public].add(node(r))
2221 headsbyphase[phases.public].add(node(r))
2221
2222
2222 # transform data in a format used by the encoding function
2223 # transform data in a format used by the encoding function
2223 phasemapping = []
2224 phasemapping = []
2224 for phase in phases.allphases:
2225 for phase in phases.allphases:
2225 phasemapping.append(sorted(headsbyphase[phase]))
2226 phasemapping.append(sorted(headsbyphase[phase]))
2226
2227
2227 # generate the actual part
2228 # generate the actual part
2228 phasedata = phases.binaryencode(phasemapping)
2229 phasedata = phases.binaryencode(phasemapping)
2229 bundler.newpart('phase-heads', data=phasedata)
2230 bundler.newpart('phase-heads', data=phasedata)
2230
2231
2231 @getbundle2partsgenerator('hgtagsfnodes')
2232 @getbundle2partsgenerator('hgtagsfnodes')
2232 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2233 def _getbundletagsfnodes(bundler, repo, source, bundlecaps=None,
2233 b2caps=None, heads=None, common=None,
2234 b2caps=None, heads=None, common=None,
2234 **kwargs):
2235 **kwargs):
2235 """Transfer the .hgtags filenodes mapping.
2236 """Transfer the .hgtags filenodes mapping.
2236
2237
2237 Only values for heads in this bundle will be transferred.
2238 Only values for heads in this bundle will be transferred.
2238
2239
2239 The part data consists of pairs of 20 byte changeset node and .hgtags
2240 The part data consists of pairs of 20 byte changeset node and .hgtags
2240 filenodes raw values.
2241 filenodes raw values.
2241 """
2242 """
2242 # Don't send unless:
2243 # Don't send unless:
2243 # - changeset are being exchanged,
2244 # - changeset are being exchanged,
2244 # - the client supports it.
2245 # - the client supports it.
2245 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2246 if not (kwargs.get(r'cg', True) and 'hgtagsfnodes' in b2caps):
2246 return
2247 return
2247
2248
2248 outgoing = _computeoutgoing(repo, heads, common)
2249 outgoing = _computeoutgoing(repo, heads, common)
2249 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2250 bundle2.addparttagsfnodescache(repo, bundler, outgoing)
2250
2251
2251 @getbundle2partsgenerator('cache:rev-branch-cache')
2252 @getbundle2partsgenerator('cache:rev-branch-cache')
2252 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2253 def _getbundlerevbranchcache(bundler, repo, source, bundlecaps=None,
2253 b2caps=None, heads=None, common=None,
2254 b2caps=None, heads=None, common=None,
2254 **kwargs):
2255 **kwargs):
2255 """Transfer the rev-branch-cache mapping
2256 """Transfer the rev-branch-cache mapping
2256
2257
2257 The payload is a series of data related to each branch
2258 The payload is a series of data related to each branch
2258
2259
2259 1) branch name length
2260 1) branch name length
2260 2) number of open heads
2261 2) number of open heads
2261 3) number of closed heads
2262 3) number of closed heads
2262 4) open heads nodes
2263 4) open heads nodes
2263 5) closed heads nodes
2264 5) closed heads nodes
2264 """
2265 """
2265 # Don't send unless:
2266 # Don't send unless:
2266 # - changeset are being exchanged,
2267 # - changeset are being exchanged,
2267 # - the client supports it.
2268 # - the client supports it.
2268 # - narrow bundle isn't in play (not currently compatible).
2269 # - narrow bundle isn't in play (not currently compatible).
2269 if (not kwargs.get(r'cg', True)
2270 if (not kwargs.get(r'cg', True)
2270 or 'rev-branch-cache' not in b2caps
2271 or 'rev-branch-cache' not in b2caps
2271 or kwargs.get(r'narrow', False)
2272 or kwargs.get(r'narrow', False)
2272 or repo.ui.has_section(_NARROWACL_SECTION)):
2273 or repo.ui.has_section(_NARROWACL_SECTION)):
2273 return
2274 return
2274
2275
2275 outgoing = _computeoutgoing(repo, heads, common)
2276 outgoing = _computeoutgoing(repo, heads, common)
2276 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2277 bundle2.addpartrevbranchcache(repo, bundler, outgoing)
2277
2278
2278 def check_heads(repo, their_heads, context):
2279 def check_heads(repo, their_heads, context):
2279 """check if the heads of a repo have been modified
2280 """check if the heads of a repo have been modified
2280
2281
2281 Used by peer for unbundling.
2282 Used by peer for unbundling.
2282 """
2283 """
2283 heads = repo.heads()
2284 heads = repo.heads()
2284 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2285 heads_hash = hashlib.sha1(''.join(sorted(heads))).digest()
2285 if not (their_heads == ['force'] or their_heads == heads or
2286 if not (their_heads == ['force'] or their_heads == heads or
2286 their_heads == ['hashed', heads_hash]):
2287 their_heads == ['hashed', heads_hash]):
2287 # someone else committed/pushed/unbundled while we
2288 # someone else committed/pushed/unbundled while we
2288 # were transferring data
2289 # were transferring data
2289 raise error.PushRaced('repository changed while %s - '
2290 raise error.PushRaced('repository changed while %s - '
2290 'please try again' % context)
2291 'please try again' % context)
2291
2292
2292 def unbundle(repo, cg, heads, source, url):
2293 def unbundle(repo, cg, heads, source, url):
2293 """Apply a bundle to a repo.
2294 """Apply a bundle to a repo.
2294
2295
2295 this function makes sure the repo is locked during the application and have
2296 this function makes sure the repo is locked during the application and have
2296 mechanism to check that no push race occurred between the creation of the
2297 mechanism to check that no push race occurred between the creation of the
2297 bundle and its application.
2298 bundle and its application.
2298
2299
2299 If the push was raced as PushRaced exception is raised."""
2300 If the push was raced as PushRaced exception is raised."""
2300 r = 0
2301 r = 0
2301 # need a transaction when processing a bundle2 stream
2302 # need a transaction when processing a bundle2 stream
2302 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2303 # [wlock, lock, tr] - needs to be an array so nested functions can modify it
2303 lockandtr = [None, None, None]
2304 lockandtr = [None, None, None]
2304 recordout = None
2305 recordout = None
2305 # quick fix for output mismatch with bundle2 in 3.4
2306 # quick fix for output mismatch with bundle2 in 3.4
2306 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2307 captureoutput = repo.ui.configbool('experimental', 'bundle2-output-capture')
2307 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2308 if url.startswith('remote:http:') or url.startswith('remote:https:'):
2308 captureoutput = True
2309 captureoutput = True
2309 try:
2310 try:
2310 # note: outside bundle1, 'heads' is expected to be empty and this
2311 # note: outside bundle1, 'heads' is expected to be empty and this
2311 # 'check_heads' call wil be a no-op
2312 # 'check_heads' call wil be a no-op
2312 check_heads(repo, heads, 'uploading changes')
2313 check_heads(repo, heads, 'uploading changes')
2313 # push can proceed
2314 # push can proceed
2314 if not isinstance(cg, bundle2.unbundle20):
2315 if not isinstance(cg, bundle2.unbundle20):
2315 # legacy case: bundle1 (changegroup 01)
2316 # legacy case: bundle1 (changegroup 01)
2316 txnname = "\n".join([source, util.hidepassword(url)])
2317 txnname = "\n".join([source, util.hidepassword(url)])
2317 with repo.lock(), repo.transaction(txnname) as tr:
2318 with repo.lock(), repo.transaction(txnname) as tr:
2318 op = bundle2.applybundle(repo, cg, tr, source, url)
2319 op = bundle2.applybundle(repo, cg, tr, source, url)
2319 r = bundle2.combinechangegroupresults(op)
2320 r = bundle2.combinechangegroupresults(op)
2320 else:
2321 else:
2321 r = None
2322 r = None
2322 try:
2323 try:
2323 def gettransaction():
2324 def gettransaction():
2324 if not lockandtr[2]:
2325 if not lockandtr[2]:
2325 lockandtr[0] = repo.wlock()
2326 lockandtr[0] = repo.wlock()
2326 lockandtr[1] = repo.lock()
2327 lockandtr[1] = repo.lock()
2327 lockandtr[2] = repo.transaction(source)
2328 lockandtr[2] = repo.transaction(source)
2328 lockandtr[2].hookargs['source'] = source
2329 lockandtr[2].hookargs['source'] = source
2329 lockandtr[2].hookargs['url'] = url
2330 lockandtr[2].hookargs['url'] = url
2330 lockandtr[2].hookargs['bundle2'] = '1'
2331 lockandtr[2].hookargs['bundle2'] = '1'
2331 return lockandtr[2]
2332 return lockandtr[2]
2332
2333
2333 # Do greedy locking by default until we're satisfied with lazy
2334 # Do greedy locking by default until we're satisfied with lazy
2334 # locking.
2335 # locking.
2335 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2336 if not repo.ui.configbool('experimental', 'bundle2lazylocking'):
2336 gettransaction()
2337 gettransaction()
2337
2338
2338 op = bundle2.bundleoperation(repo, gettransaction,
2339 op = bundle2.bundleoperation(repo, gettransaction,
2339 captureoutput=captureoutput,
2340 captureoutput=captureoutput,
2340 source='push')
2341 source='push')
2341 try:
2342 try:
2342 op = bundle2.processbundle(repo, cg, op=op)
2343 op = bundle2.processbundle(repo, cg, op=op)
2343 finally:
2344 finally:
2344 r = op.reply
2345 r = op.reply
2345 if captureoutput and r is not None:
2346 if captureoutput and r is not None:
2346 repo.ui.pushbuffer(error=True, subproc=True)
2347 repo.ui.pushbuffer(error=True, subproc=True)
2347 def recordout(output):
2348 def recordout(output):
2348 r.newpart('output', data=output, mandatory=False)
2349 r.newpart('output', data=output, mandatory=False)
2349 if lockandtr[2] is not None:
2350 if lockandtr[2] is not None:
2350 lockandtr[2].close()
2351 lockandtr[2].close()
2351 except BaseException as exc:
2352 except BaseException as exc:
2352 exc.duringunbundle2 = True
2353 exc.duringunbundle2 = True
2353 if captureoutput and r is not None:
2354 if captureoutput and r is not None:
2354 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2355 parts = exc._bundle2salvagedoutput = r.salvageoutput()
2355 def recordout(output):
2356 def recordout(output):
2356 part = bundle2.bundlepart('output', data=output,
2357 part = bundle2.bundlepart('output', data=output,
2357 mandatory=False)
2358 mandatory=False)
2358 parts.append(part)
2359 parts.append(part)
2359 raise
2360 raise
2360 finally:
2361 finally:
2361 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2362 lockmod.release(lockandtr[2], lockandtr[1], lockandtr[0])
2362 if recordout is not None:
2363 if recordout is not None:
2363 recordout(repo.ui.popbuffer())
2364 recordout(repo.ui.popbuffer())
2364 return r
2365 return r
2365
2366
2366 def _maybeapplyclonebundle(pullop):
2367 def _maybeapplyclonebundle(pullop):
2367 """Apply a clone bundle from a remote, if possible."""
2368 """Apply a clone bundle from a remote, if possible."""
2368
2369
2369 repo = pullop.repo
2370 repo = pullop.repo
2370 remote = pullop.remote
2371 remote = pullop.remote
2371
2372
2372 if not repo.ui.configbool('ui', 'clonebundles'):
2373 if not repo.ui.configbool('ui', 'clonebundles'):
2373 return
2374 return
2374
2375
2375 # Only run if local repo is empty.
2376 # Only run if local repo is empty.
2376 if len(repo):
2377 if len(repo):
2377 return
2378 return
2378
2379
2379 if pullop.heads:
2380 if pullop.heads:
2380 return
2381 return
2381
2382
2382 if not remote.capable('clonebundles'):
2383 if not remote.capable('clonebundles'):
2383 return
2384 return
2384
2385
2385 with remote.commandexecutor() as e:
2386 with remote.commandexecutor() as e:
2386 res = e.callcommand('clonebundles', {}).result()
2387 res = e.callcommand('clonebundles', {}).result()
2387
2388
2388 # If we call the wire protocol command, that's good enough to record the
2389 # If we call the wire protocol command, that's good enough to record the
2389 # attempt.
2390 # attempt.
2390 pullop.clonebundleattempted = True
2391 pullop.clonebundleattempted = True
2391
2392
2392 entries = parseclonebundlesmanifest(repo, res)
2393 entries = parseclonebundlesmanifest(repo, res)
2393 if not entries:
2394 if not entries:
2394 repo.ui.note(_('no clone bundles available on remote; '
2395 repo.ui.note(_('no clone bundles available on remote; '
2395 'falling back to regular clone\n'))
2396 'falling back to regular clone\n'))
2396 return
2397 return
2397
2398
2398 entries = filterclonebundleentries(
2399 entries = filterclonebundleentries(
2399 repo, entries, streamclonerequested=pullop.streamclonerequested)
2400 repo, entries, streamclonerequested=pullop.streamclonerequested)
2400
2401
2401 if not entries:
2402 if not entries:
2402 # There is a thundering herd concern here. However, if a server
2403 # There is a thundering herd concern here. However, if a server
2403 # operator doesn't advertise bundles appropriate for its clients,
2404 # operator doesn't advertise bundles appropriate for its clients,
2404 # they deserve what's coming. Furthermore, from a client's
2405 # they deserve what's coming. Furthermore, from a client's
2405 # perspective, no automatic fallback would mean not being able to
2406 # perspective, no automatic fallback would mean not being able to
2406 # clone!
2407 # clone!
2407 repo.ui.warn(_('no compatible clone bundles available on server; '
2408 repo.ui.warn(_('no compatible clone bundles available on server; '
2408 'falling back to regular clone\n'))
2409 'falling back to regular clone\n'))
2409 repo.ui.warn(_('(you may want to report this to the server '
2410 repo.ui.warn(_('(you may want to report this to the server '
2410 'operator)\n'))
2411 'operator)\n'))
2411 return
2412 return
2412
2413
2413 entries = sortclonebundleentries(repo.ui, entries)
2414 entries = sortclonebundleentries(repo.ui, entries)
2414
2415
2415 url = entries[0]['URL']
2416 url = entries[0]['URL']
2416 repo.ui.status(_('applying clone bundle from %s\n') % url)
2417 repo.ui.status(_('applying clone bundle from %s\n') % url)
2417 if trypullbundlefromurl(repo.ui, repo, url):
2418 if trypullbundlefromurl(repo.ui, repo, url):
2418 repo.ui.status(_('finished applying clone bundle\n'))
2419 repo.ui.status(_('finished applying clone bundle\n'))
2419 # Bundle failed.
2420 # Bundle failed.
2420 #
2421 #
2421 # We abort by default to avoid the thundering herd of
2422 # We abort by default to avoid the thundering herd of
2422 # clients flooding a server that was expecting expensive
2423 # clients flooding a server that was expecting expensive
2423 # clone load to be offloaded.
2424 # clone load to be offloaded.
2424 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2425 elif repo.ui.configbool('ui', 'clonebundlefallback'):
2425 repo.ui.warn(_('falling back to normal clone\n'))
2426 repo.ui.warn(_('falling back to normal clone\n'))
2426 else:
2427 else:
2427 raise error.Abort(_('error applying bundle'),
2428 raise error.Abort(_('error applying bundle'),
2428 hint=_('if this error persists, consider contacting '
2429 hint=_('if this error persists, consider contacting '
2429 'the server operator or disable clone '
2430 'the server operator or disable clone '
2430 'bundles via '
2431 'bundles via '
2431 '"--config ui.clonebundles=false"'))
2432 '"--config ui.clonebundles=false"'))
2432
2433
2433 def parseclonebundlesmanifest(repo, s):
2434 def parseclonebundlesmanifest(repo, s):
2434 """Parses the raw text of a clone bundles manifest.
2435 """Parses the raw text of a clone bundles manifest.
2435
2436
2436 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2437 Returns a list of dicts. The dicts have a ``URL`` key corresponding
2437 to the URL and other keys are the attributes for the entry.
2438 to the URL and other keys are the attributes for the entry.
2438 """
2439 """
2439 m = []
2440 m = []
2440 for line in s.splitlines():
2441 for line in s.splitlines():
2441 fields = line.split()
2442 fields = line.split()
2442 if not fields:
2443 if not fields:
2443 continue
2444 continue
2444 attrs = {'URL': fields[0]}
2445 attrs = {'URL': fields[0]}
2445 for rawattr in fields[1:]:
2446 for rawattr in fields[1:]:
2446 key, value = rawattr.split('=', 1)
2447 key, value = rawattr.split('=', 1)
2447 key = urlreq.unquote(key)
2448 key = urlreq.unquote(key)
2448 value = urlreq.unquote(value)
2449 value = urlreq.unquote(value)
2449 attrs[key] = value
2450 attrs[key] = value
2450
2451
2451 # Parse BUNDLESPEC into components. This makes client-side
2452 # Parse BUNDLESPEC into components. This makes client-side
2452 # preferences easier to specify since you can prefer a single
2453 # preferences easier to specify since you can prefer a single
2453 # component of the BUNDLESPEC.
2454 # component of the BUNDLESPEC.
2454 if key == 'BUNDLESPEC':
2455 if key == 'BUNDLESPEC':
2455 try:
2456 try:
2456 bundlespec = parsebundlespec(repo, value)
2457 bundlespec = parsebundlespec(repo, value)
2457 attrs['COMPRESSION'] = bundlespec.compression
2458 attrs['COMPRESSION'] = bundlespec.compression
2458 attrs['VERSION'] = bundlespec.version
2459 attrs['VERSION'] = bundlespec.version
2459 except error.InvalidBundleSpecification:
2460 except error.InvalidBundleSpecification:
2460 pass
2461 pass
2461 except error.UnsupportedBundleSpecification:
2462 except error.UnsupportedBundleSpecification:
2462 pass
2463 pass
2463
2464
2464 m.append(attrs)
2465 m.append(attrs)
2465
2466
2466 return m
2467 return m
2467
2468
2468 def isstreamclonespec(bundlespec):
2469 def isstreamclonespec(bundlespec):
2469 # Stream clone v1
2470 # Stream clone v1
2470 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2471 if (bundlespec.wirecompression == 'UN' and bundlespec.wireversion == 's1'):
2471 return True
2472 return True
2472
2473
2473 # Stream clone v2
2474 # Stream clone v2
2474 if (bundlespec.wirecompression == 'UN' and \
2475 if (bundlespec.wirecompression == 'UN' and \
2475 bundlespec.wireversion == '02' and \
2476 bundlespec.wireversion == '02' and \
2476 bundlespec.contentopts.get('streamv2')):
2477 bundlespec.contentopts.get('streamv2')):
2477 return True
2478 return True
2478
2479
2479 return False
2480 return False
2480
2481
2481 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2482 def filterclonebundleentries(repo, entries, streamclonerequested=False):
2482 """Remove incompatible clone bundle manifest entries.
2483 """Remove incompatible clone bundle manifest entries.
2483
2484
2484 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2485 Accepts a list of entries parsed with ``parseclonebundlesmanifest``
2485 and returns a new list consisting of only the entries that this client
2486 and returns a new list consisting of only the entries that this client
2486 should be able to apply.
2487 should be able to apply.
2487
2488
2488 There is no guarantee we'll be able to apply all returned entries because
2489 There is no guarantee we'll be able to apply all returned entries because
2489 the metadata we use to filter on may be missing or wrong.
2490 the metadata we use to filter on may be missing or wrong.
2490 """
2491 """
2491 newentries = []
2492 newentries = []
2492 for entry in entries:
2493 for entry in entries:
2493 spec = entry.get('BUNDLESPEC')
2494 spec = entry.get('BUNDLESPEC')
2494 if spec:
2495 if spec:
2495 try:
2496 try:
2496 bundlespec = parsebundlespec(repo, spec, strict=True)
2497 bundlespec = parsebundlespec(repo, spec, strict=True)
2497
2498
2498 # If a stream clone was requested, filter out non-streamclone
2499 # If a stream clone was requested, filter out non-streamclone
2499 # entries.
2500 # entries.
2500 if streamclonerequested and not isstreamclonespec(bundlespec):
2501 if streamclonerequested and not isstreamclonespec(bundlespec):
2501 repo.ui.debug('filtering %s because not a stream clone\n' %
2502 repo.ui.debug('filtering %s because not a stream clone\n' %
2502 entry['URL'])
2503 entry['URL'])
2503 continue
2504 continue
2504
2505
2505 except error.InvalidBundleSpecification as e:
2506 except error.InvalidBundleSpecification as e:
2506 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2507 repo.ui.debug(stringutil.forcebytestr(e) + '\n')
2507 continue
2508 continue
2508 except error.UnsupportedBundleSpecification as e:
2509 except error.UnsupportedBundleSpecification as e:
2509 repo.ui.debug('filtering %s because unsupported bundle '
2510 repo.ui.debug('filtering %s because unsupported bundle '
2510 'spec: %s\n' % (
2511 'spec: %s\n' % (
2511 entry['URL'], stringutil.forcebytestr(e)))
2512 entry['URL'], stringutil.forcebytestr(e)))
2512 continue
2513 continue
2513 # If we don't have a spec and requested a stream clone, we don't know
2514 # If we don't have a spec and requested a stream clone, we don't know
2514 # what the entry is so don't attempt to apply it.
2515 # what the entry is so don't attempt to apply it.
2515 elif streamclonerequested:
2516 elif streamclonerequested:
2516 repo.ui.debug('filtering %s because cannot determine if a stream '
2517 repo.ui.debug('filtering %s because cannot determine if a stream '
2517 'clone bundle\n' % entry['URL'])
2518 'clone bundle\n' % entry['URL'])
2518 continue
2519 continue
2519
2520
2520 if 'REQUIRESNI' in entry and not sslutil.hassni:
2521 if 'REQUIRESNI' in entry and not sslutil.hassni:
2521 repo.ui.debug('filtering %s because SNI not supported\n' %
2522 repo.ui.debug('filtering %s because SNI not supported\n' %
2522 entry['URL'])
2523 entry['URL'])
2523 continue
2524 continue
2524
2525
2525 newentries.append(entry)
2526 newentries.append(entry)
2526
2527
2527 return newentries
2528 return newentries
2528
2529
2529 class clonebundleentry(object):
2530 class clonebundleentry(object):
2530 """Represents an item in a clone bundles manifest.
2531 """Represents an item in a clone bundles manifest.
2531
2532
2532 This rich class is needed to support sorting since sorted() in Python 3
2533 This rich class is needed to support sorting since sorted() in Python 3
2533 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2534 doesn't support ``cmp`` and our comparison is complex enough that ``key=``
2534 won't work.
2535 won't work.
2535 """
2536 """
2536
2537
2537 def __init__(self, value, prefers):
2538 def __init__(self, value, prefers):
2538 self.value = value
2539 self.value = value
2539 self.prefers = prefers
2540 self.prefers = prefers
2540
2541
2541 def _cmp(self, other):
2542 def _cmp(self, other):
2542 for prefkey, prefvalue in self.prefers:
2543 for prefkey, prefvalue in self.prefers:
2543 avalue = self.value.get(prefkey)
2544 avalue = self.value.get(prefkey)
2544 bvalue = other.value.get(prefkey)
2545 bvalue = other.value.get(prefkey)
2545
2546
2546 # Special case for b missing attribute and a matches exactly.
2547 # Special case for b missing attribute and a matches exactly.
2547 if avalue is not None and bvalue is None and avalue == prefvalue:
2548 if avalue is not None and bvalue is None and avalue == prefvalue:
2548 return -1
2549 return -1
2549
2550
2550 # Special case for a missing attribute and b matches exactly.
2551 # Special case for a missing attribute and b matches exactly.
2551 if bvalue is not None and avalue is None and bvalue == prefvalue:
2552 if bvalue is not None and avalue is None and bvalue == prefvalue:
2552 return 1
2553 return 1
2553
2554
2554 # We can't compare unless attribute present on both.
2555 # We can't compare unless attribute present on both.
2555 if avalue is None or bvalue is None:
2556 if avalue is None or bvalue is None:
2556 continue
2557 continue
2557
2558
2558 # Same values should fall back to next attribute.
2559 # Same values should fall back to next attribute.
2559 if avalue == bvalue:
2560 if avalue == bvalue:
2560 continue
2561 continue
2561
2562
2562 # Exact matches come first.
2563 # Exact matches come first.
2563 if avalue == prefvalue:
2564 if avalue == prefvalue:
2564 return -1
2565 return -1
2565 if bvalue == prefvalue:
2566 if bvalue == prefvalue:
2566 return 1
2567 return 1
2567
2568
2568 # Fall back to next attribute.
2569 # Fall back to next attribute.
2569 continue
2570 continue
2570
2571
2571 # If we got here we couldn't sort by attributes and prefers. Fall
2572 # If we got here we couldn't sort by attributes and prefers. Fall
2572 # back to index order.
2573 # back to index order.
2573 return 0
2574 return 0
2574
2575
2575 def __lt__(self, other):
2576 def __lt__(self, other):
2576 return self._cmp(other) < 0
2577 return self._cmp(other) < 0
2577
2578
2578 def __gt__(self, other):
2579 def __gt__(self, other):
2579 return self._cmp(other) > 0
2580 return self._cmp(other) > 0
2580
2581
2581 def __eq__(self, other):
2582 def __eq__(self, other):
2582 return self._cmp(other) == 0
2583 return self._cmp(other) == 0
2583
2584
2584 def __le__(self, other):
2585 def __le__(self, other):
2585 return self._cmp(other) <= 0
2586 return self._cmp(other) <= 0
2586
2587
2587 def __ge__(self, other):
2588 def __ge__(self, other):
2588 return self._cmp(other) >= 0
2589 return self._cmp(other) >= 0
2589
2590
2590 def __ne__(self, other):
2591 def __ne__(self, other):
2591 return self._cmp(other) != 0
2592 return self._cmp(other) != 0
2592
2593
2593 def sortclonebundleentries(ui, entries):
2594 def sortclonebundleentries(ui, entries):
2594 prefers = ui.configlist('ui', 'clonebundleprefers')
2595 prefers = ui.configlist('ui', 'clonebundleprefers')
2595 if not prefers:
2596 if not prefers:
2596 return list(entries)
2597 return list(entries)
2597
2598
2598 prefers = [p.split('=', 1) for p in prefers]
2599 prefers = [p.split('=', 1) for p in prefers]
2599
2600
2600 items = sorted(clonebundleentry(v, prefers) for v in entries)
2601 items = sorted(clonebundleentry(v, prefers) for v in entries)
2601 return [i.value for i in items]
2602 return [i.value for i in items]
2602
2603
2603 def trypullbundlefromurl(ui, repo, url):
2604 def trypullbundlefromurl(ui, repo, url):
2604 """Attempt to apply a bundle from a URL."""
2605 """Attempt to apply a bundle from a URL."""
2605 with repo.lock(), repo.transaction('bundleurl') as tr:
2606 with repo.lock(), repo.transaction('bundleurl') as tr:
2606 try:
2607 try:
2607 fh = urlmod.open(ui, url)
2608 fh = urlmod.open(ui, url)
2608 cg = readbundle(ui, fh, 'stream')
2609 cg = readbundle(ui, fh, 'stream')
2609
2610
2610 if isinstance(cg, streamclone.streamcloneapplier):
2611 if isinstance(cg, streamclone.streamcloneapplier):
2611 cg.apply(repo)
2612 cg.apply(repo)
2612 else:
2613 else:
2613 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2614 bundle2.applybundle(repo, cg, tr, 'clonebundles', url)
2614 return True
2615 return True
2615 except urlerr.httperror as e:
2616 except urlerr.httperror as e:
2616 ui.warn(_('HTTP error fetching bundle: %s\n') %
2617 ui.warn(_('HTTP error fetching bundle: %s\n') %
2617 stringutil.forcebytestr(e))
2618 stringutil.forcebytestr(e))
2618 except urlerr.urlerror as e:
2619 except urlerr.urlerror as e:
2619 ui.warn(_('error fetching bundle: %s\n') %
2620 ui.warn(_('error fetching bundle: %s\n') %
2620 stringutil.forcebytestr(e.reason))
2621 stringutil.forcebytestr(e.reason))
2621
2622
2622 return False
2623 return False
@@ -1,2401 +1,2401 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from . import (
24 from . import (
25 bookmarks,
25 bookmarks,
26 branchmap,
26 branchmap,
27 bundle2,
27 bundle2,
28 changegroup,
28 changegroup,
29 changelog,
29 changelog,
30 color,
30 color,
31 context,
31 context,
32 dirstate,
32 dirstate,
33 dirstateguard,
33 dirstateguard,
34 discovery,
34 discovery,
35 encoding,
35 encoding,
36 error,
36 error,
37 exchange,
37 exchange,
38 extensions,
38 extensions,
39 filelog,
39 filelog,
40 hook,
40 hook,
41 lock as lockmod,
41 lock as lockmod,
42 manifest,
42 manifest,
43 match as matchmod,
43 match as matchmod,
44 merge as mergemod,
44 merge as mergemod,
45 mergeutil,
45 mergeutil,
46 namespaces,
46 namespaces,
47 narrowspec,
47 narrowspec,
48 obsolete,
48 obsolete,
49 pathutil,
49 pathutil,
50 phases,
50 phases,
51 pushkey,
51 pushkey,
52 pycompat,
52 pycompat,
53 repository,
53 repository,
54 repoview,
54 repoview,
55 revset,
55 revset,
56 revsetlang,
56 revsetlang,
57 scmutil,
57 scmutil,
58 sparse,
58 sparse,
59 store,
59 store,
60 subrepoutil,
60 subrepoutil,
61 tags as tagsmod,
61 tags as tagsmod,
62 transaction,
62 transaction,
63 txnutil,
63 txnutil,
64 util,
64 util,
65 vfs as vfsmod,
65 vfs as vfsmod,
66 )
66 )
67 from .utils import (
67 from .utils import (
68 interfaceutil,
68 interfaceutil,
69 procutil,
69 procutil,
70 stringutil,
70 stringutil,
71 )
71 )
72
72
73 release = lockmod.release
73 release = lockmod.release
74 urlerr = util.urlerr
74 urlerr = util.urlerr
75 urlreq = util.urlreq
75 urlreq = util.urlreq
76
76
77 # set of (path, vfs-location) tuples. vfs-location is:
77 # set of (path, vfs-location) tuples. vfs-location is:
78 # - 'plain for vfs relative paths
78 # - 'plain for vfs relative paths
79 # - '' for svfs relative paths
79 # - '' for svfs relative paths
80 _cachedfiles = set()
80 _cachedfiles = set()
81
81
82 class _basefilecache(scmutil.filecache):
82 class _basefilecache(scmutil.filecache):
83 """All filecache usage on repo are done for logic that should be unfiltered
83 """All filecache usage on repo are done for logic that should be unfiltered
84 """
84 """
85 def __get__(self, repo, type=None):
85 def __get__(self, repo, type=None):
86 if repo is None:
86 if repo is None:
87 return self
87 return self
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
88 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
89 def __set__(self, repo, value):
89 def __set__(self, repo, value):
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
90 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
91 def __delete__(self, repo):
91 def __delete__(self, repo):
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
92 return super(_basefilecache, self).__delete__(repo.unfiltered())
93
93
94 class repofilecache(_basefilecache):
94 class repofilecache(_basefilecache):
95 """filecache for files in .hg but outside of .hg/store"""
95 """filecache for files in .hg but outside of .hg/store"""
96 def __init__(self, *paths):
96 def __init__(self, *paths):
97 super(repofilecache, self).__init__(*paths)
97 super(repofilecache, self).__init__(*paths)
98 for path in paths:
98 for path in paths:
99 _cachedfiles.add((path, 'plain'))
99 _cachedfiles.add((path, 'plain'))
100
100
101 def join(self, obj, fname):
101 def join(self, obj, fname):
102 return obj.vfs.join(fname)
102 return obj.vfs.join(fname)
103
103
104 class storecache(_basefilecache):
104 class storecache(_basefilecache):
105 """filecache for files in the store"""
105 """filecache for files in the store"""
106 def __init__(self, *paths):
106 def __init__(self, *paths):
107 super(storecache, self).__init__(*paths)
107 super(storecache, self).__init__(*paths)
108 for path in paths:
108 for path in paths:
109 _cachedfiles.add((path, ''))
109 _cachedfiles.add((path, ''))
110
110
111 def join(self, obj, fname):
111 def join(self, obj, fname):
112 return obj.sjoin(fname)
112 return obj.sjoin(fname)
113
113
114 def isfilecached(repo, name):
114 def isfilecached(repo, name):
115 """check if a repo has already cached "name" filecache-ed property
115 """check if a repo has already cached "name" filecache-ed property
116
116
117 This returns (cachedobj-or-None, iscached) tuple.
117 This returns (cachedobj-or-None, iscached) tuple.
118 """
118 """
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
119 cacheentry = repo.unfiltered()._filecache.get(name, None)
120 if not cacheentry:
120 if not cacheentry:
121 return None, False
121 return None, False
122 return cacheentry.obj, True
122 return cacheentry.obj, True
123
123
124 class unfilteredpropertycache(util.propertycache):
124 class unfilteredpropertycache(util.propertycache):
125 """propertycache that apply to unfiltered repo only"""
125 """propertycache that apply to unfiltered repo only"""
126
126
127 def __get__(self, repo, type=None):
127 def __get__(self, repo, type=None):
128 unfi = repo.unfiltered()
128 unfi = repo.unfiltered()
129 if unfi is repo:
129 if unfi is repo:
130 return super(unfilteredpropertycache, self).__get__(unfi)
130 return super(unfilteredpropertycache, self).__get__(unfi)
131 return getattr(unfi, self.name)
131 return getattr(unfi, self.name)
132
132
133 class filteredpropertycache(util.propertycache):
133 class filteredpropertycache(util.propertycache):
134 """propertycache that must take filtering in account"""
134 """propertycache that must take filtering in account"""
135
135
136 def cachevalue(self, obj, value):
136 def cachevalue(self, obj, value):
137 object.__setattr__(obj, self.name, value)
137 object.__setattr__(obj, self.name, value)
138
138
139
139
140 def hasunfilteredcache(repo, name):
140 def hasunfilteredcache(repo, name):
141 """check if a repo has an unfilteredpropertycache value for <name>"""
141 """check if a repo has an unfilteredpropertycache value for <name>"""
142 return name in vars(repo.unfiltered())
142 return name in vars(repo.unfiltered())
143
143
144 def unfilteredmethod(orig):
144 def unfilteredmethod(orig):
145 """decorate method that always need to be run on unfiltered version"""
145 """decorate method that always need to be run on unfiltered version"""
146 def wrapper(repo, *args, **kwargs):
146 def wrapper(repo, *args, **kwargs):
147 return orig(repo.unfiltered(), *args, **kwargs)
147 return orig(repo.unfiltered(), *args, **kwargs)
148 return wrapper
148 return wrapper
149
149
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
150 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
151 'unbundle'}
151 'unbundle'}
152 legacycaps = moderncaps.union({'changegroupsubset'})
152 legacycaps = moderncaps.union({'changegroupsubset'})
153
153
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
154 @interfaceutil.implementer(repository.ipeercommandexecutor)
155 class localcommandexecutor(object):
155 class localcommandexecutor(object):
156 def __init__(self, peer):
156 def __init__(self, peer):
157 self._peer = peer
157 self._peer = peer
158 self._sent = False
158 self._sent = False
159 self._closed = False
159 self._closed = False
160
160
161 def __enter__(self):
161 def __enter__(self):
162 return self
162 return self
163
163
164 def __exit__(self, exctype, excvalue, exctb):
164 def __exit__(self, exctype, excvalue, exctb):
165 self.close()
165 self.close()
166
166
167 def callcommand(self, command, args):
167 def callcommand(self, command, args):
168 if self._sent:
168 if self._sent:
169 raise error.ProgrammingError('callcommand() cannot be used after '
169 raise error.ProgrammingError('callcommand() cannot be used after '
170 'sendcommands()')
170 'sendcommands()')
171
171
172 if self._closed:
172 if self._closed:
173 raise error.ProgrammingError('callcommand() cannot be used after '
173 raise error.ProgrammingError('callcommand() cannot be used after '
174 'close()')
174 'close()')
175
175
176 # We don't need to support anything fancy. Just call the named
176 # We don't need to support anything fancy. Just call the named
177 # method on the peer and return a resolved future.
177 # method on the peer and return a resolved future.
178 fn = getattr(self._peer, pycompat.sysstr(command))
178 fn = getattr(self._peer, pycompat.sysstr(command))
179
179
180 f = pycompat.futures.Future()
180 f = pycompat.futures.Future()
181
181
182 try:
182 try:
183 result = fn(**pycompat.strkwargs(args))
183 result = fn(**pycompat.strkwargs(args))
184 except Exception:
184 except Exception:
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
185 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
186 else:
186 else:
187 f.set_result(result)
187 f.set_result(result)
188
188
189 return f
189 return f
190
190
191 def sendcommands(self):
191 def sendcommands(self):
192 self._sent = True
192 self._sent = True
193
193
194 def close(self):
194 def close(self):
195 self._closed = True
195 self._closed = True
196
196
197 @interfaceutil.implementer(repository.ipeercommands)
197 @interfaceutil.implementer(repository.ipeercommands)
198 class localpeer(repository.peer):
198 class localpeer(repository.peer):
199 '''peer for a local repo; reflects only the most recent API'''
199 '''peer for a local repo; reflects only the most recent API'''
200
200
201 def __init__(self, repo, caps=None):
201 def __init__(self, repo, caps=None):
202 super(localpeer, self).__init__()
202 super(localpeer, self).__init__()
203
203
204 if caps is None:
204 if caps is None:
205 caps = moderncaps.copy()
205 caps = moderncaps.copy()
206 self._repo = repo.filtered('served')
206 self._repo = repo.filtered('served')
207 self.ui = repo.ui
207 self.ui = repo.ui
208 self._caps = repo._restrictcapabilities(caps)
208 self._caps = repo._restrictcapabilities(caps)
209
209
210 # Begin of _basepeer interface.
210 # Begin of _basepeer interface.
211
211
212 def url(self):
212 def url(self):
213 return self._repo.url()
213 return self._repo.url()
214
214
215 def local(self):
215 def local(self):
216 return self._repo
216 return self._repo
217
217
218 def peer(self):
218 def peer(self):
219 return self
219 return self
220
220
221 def canpush(self):
221 def canpush(self):
222 return True
222 return True
223
223
224 def close(self):
224 def close(self):
225 self._repo.close()
225 self._repo.close()
226
226
227 # End of _basepeer interface.
227 # End of _basepeer interface.
228
228
229 # Begin of _basewirecommands interface.
229 # Begin of _basewirecommands interface.
230
230
231 def branchmap(self):
231 def branchmap(self):
232 return self._repo.branchmap()
232 return self._repo.branchmap()
233
233
234 def capabilities(self):
234 def capabilities(self):
235 return self._caps
235 return self._caps
236
236
237 def clonebundles(self):
237 def clonebundles(self):
238 return self._repo.tryread('clonebundles.manifest')
238 return self._repo.tryread('clonebundles.manifest')
239
239
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
240 def debugwireargs(self, one, two, three=None, four=None, five=None):
241 """Used to test argument passing over the wire"""
241 """Used to test argument passing over the wire"""
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
242 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
243 pycompat.bytestr(four),
243 pycompat.bytestr(four),
244 pycompat.bytestr(five))
244 pycompat.bytestr(five))
245
245
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
246 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
247 **kwargs):
247 **kwargs):
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
248 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
249 common=common, bundlecaps=bundlecaps,
249 common=common, bundlecaps=bundlecaps,
250 **kwargs)[1]
250 **kwargs)[1]
251 cb = util.chunkbuffer(chunks)
251 cb = util.chunkbuffer(chunks)
252
252
253 if exchange.bundle2requested(bundlecaps):
253 if exchange.bundle2requested(bundlecaps):
254 # When requesting a bundle2, getbundle returns a stream to make the
254 # When requesting a bundle2, getbundle returns a stream to make the
255 # wire level function happier. We need to build a proper object
255 # wire level function happier. We need to build a proper object
256 # from it in local peer.
256 # from it in local peer.
257 return bundle2.getunbundler(self.ui, cb)
257 return bundle2.getunbundler(self.ui, cb)
258 else:
258 else:
259 return changegroup.getunbundler('01', cb, None)
259 return changegroup.getunbundler('01', cb, None)
260
260
261 def heads(self):
261 def heads(self):
262 return self._repo.heads()
262 return self._repo.heads()
263
263
264 def known(self, nodes):
264 def known(self, nodes):
265 return self._repo.known(nodes)
265 return self._repo.known(nodes)
266
266
267 def listkeys(self, namespace):
267 def listkeys(self, namespace):
268 return self._repo.listkeys(namespace)
268 return self._repo.listkeys(namespace)
269
269
270 def lookup(self, key):
270 def lookup(self, key):
271 return self._repo.lookup(key)
271 return self._repo.lookup(key)
272
272
273 def pushkey(self, namespace, key, old, new):
273 def pushkey(self, namespace, key, old, new):
274 return self._repo.pushkey(namespace, key, old, new)
274 return self._repo.pushkey(namespace, key, old, new)
275
275
276 def stream_out(self):
276 def stream_out(self):
277 raise error.Abort(_('cannot perform stream clone against local '
277 raise error.Abort(_('cannot perform stream clone against local '
278 'peer'))
278 'peer'))
279
279
280 def unbundle(self, bundle, heads, url):
280 def unbundle(self, bundle, heads, url):
281 """apply a bundle on a repo
281 """apply a bundle on a repo
282
282
283 This function handles the repo locking itself."""
283 This function handles the repo locking itself."""
284 try:
284 try:
285 try:
285 try:
286 bundle = exchange.readbundle(self.ui, bundle, None)
286 bundle = exchange.readbundle(self.ui, bundle, None)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
287 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
288 if util.safehasattr(ret, 'getchunks'):
288 if util.safehasattr(ret, 'getchunks'):
289 # This is a bundle20 object, turn it into an unbundler.
289 # This is a bundle20 object, turn it into an unbundler.
290 # This little dance should be dropped eventually when the
290 # This little dance should be dropped eventually when the
291 # API is finally improved.
291 # API is finally improved.
292 stream = util.chunkbuffer(ret.getchunks())
292 stream = util.chunkbuffer(ret.getchunks())
293 ret = bundle2.getunbundler(self.ui, stream)
293 ret = bundle2.getunbundler(self.ui, stream)
294 return ret
294 return ret
295 except Exception as exc:
295 except Exception as exc:
296 # If the exception contains output salvaged from a bundle2
296 # If the exception contains output salvaged from a bundle2
297 # reply, we need to make sure it is printed before continuing
297 # reply, we need to make sure it is printed before continuing
298 # to fail. So we build a bundle2 with such output and consume
298 # to fail. So we build a bundle2 with such output and consume
299 # it directly.
299 # it directly.
300 #
300 #
301 # This is not very elegant but allows a "simple" solution for
301 # This is not very elegant but allows a "simple" solution for
302 # issue4594
302 # issue4594
303 output = getattr(exc, '_bundle2salvagedoutput', ())
303 output = getattr(exc, '_bundle2salvagedoutput', ())
304 if output:
304 if output:
305 bundler = bundle2.bundle20(self._repo.ui)
305 bundler = bundle2.bundle20(self._repo.ui)
306 for out in output:
306 for out in output:
307 bundler.addpart(out)
307 bundler.addpart(out)
308 stream = util.chunkbuffer(bundler.getchunks())
308 stream = util.chunkbuffer(bundler.getchunks())
309 b = bundle2.getunbundler(self.ui, stream)
309 b = bundle2.getunbundler(self.ui, stream)
310 bundle2.processbundle(self._repo, b)
310 bundle2.processbundle(self._repo, b)
311 raise
311 raise
312 except error.PushRaced as exc:
312 except error.PushRaced as exc:
313 raise error.ResponseError(_('push failed:'),
313 raise error.ResponseError(_('push failed:'),
314 stringutil.forcebytestr(exc))
314 stringutil.forcebytestr(exc))
315
315
316 # End of _basewirecommands interface.
316 # End of _basewirecommands interface.
317
317
318 # Begin of peer interface.
318 # Begin of peer interface.
319
319
320 def commandexecutor(self):
320 def commandexecutor(self):
321 return localcommandexecutor(self)
321 return localcommandexecutor(self)
322
322
323 # End of peer interface.
323 # End of peer interface.
324
324
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
325 @interfaceutil.implementer(repository.ipeerlegacycommands)
326 class locallegacypeer(localpeer):
326 class locallegacypeer(localpeer):
327 '''peer extension which implements legacy methods too; used for tests with
327 '''peer extension which implements legacy methods too; used for tests with
328 restricted capabilities'''
328 restricted capabilities'''
329
329
330 def __init__(self, repo):
330 def __init__(self, repo):
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
331 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
332
332
333 # Begin of baselegacywirecommands interface.
333 # Begin of baselegacywirecommands interface.
334
334
335 def between(self, pairs):
335 def between(self, pairs):
336 return self._repo.between(pairs)
336 return self._repo.between(pairs)
337
337
338 def branches(self, nodes):
338 def branches(self, nodes):
339 return self._repo.branches(nodes)
339 return self._repo.branches(nodes)
340
340
341 def changegroup(self, nodes, source):
341 def changegroup(self, nodes, source):
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
342 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
343 missingheads=self._repo.heads())
343 missingheads=self._repo.heads())
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
344 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
345
345
346 def changegroupsubset(self, bases, heads, source):
346 def changegroupsubset(self, bases, heads, source):
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
347 outgoing = discovery.outgoing(self._repo, missingroots=bases,
348 missingheads=heads)
348 missingheads=heads)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
349 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
350
350
351 # End of baselegacywirecommands interface.
351 # End of baselegacywirecommands interface.
352
352
353 # Increment the sub-version when the revlog v2 format changes to lock out old
353 # Increment the sub-version when the revlog v2 format changes to lock out old
354 # clients.
354 # clients.
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
355 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
356
356
357 # A repository with the sparserevlog feature will have delta chains that
357 # A repository with the sparserevlog feature will have delta chains that
358 # can spread over a larger span. Sparse reading cuts these large spans into
358 # can spread over a larger span. Sparse reading cuts these large spans into
359 # pieces, so that each piece isn't too big.
359 # pieces, so that each piece isn't too big.
360 # Without the sparserevlog capability, reading from the repository could use
360 # Without the sparserevlog capability, reading from the repository could use
361 # huge amounts of memory, because the whole span would be read at once,
361 # huge amounts of memory, because the whole span would be read at once,
362 # including all the intermediate revisions that aren't pertinent for the chain.
362 # including all the intermediate revisions that aren't pertinent for the chain.
363 # This is why once a repository has enabled sparse-read, it becomes required.
363 # This is why once a repository has enabled sparse-read, it becomes required.
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
364 SPARSEREVLOG_REQUIREMENT = 'sparserevlog'
365
365
366 # Functions receiving (ui, features) that extensions can register to impact
366 # Functions receiving (ui, features) that extensions can register to impact
367 # the ability to load repositories with custom requirements. Only
367 # the ability to load repositories with custom requirements. Only
368 # functions defined in loaded extensions are called.
368 # functions defined in loaded extensions are called.
369 #
369 #
370 # The function receives a set of requirement strings that the repository
370 # The function receives a set of requirement strings that the repository
371 # is capable of opening. Functions will typically add elements to the
371 # is capable of opening. Functions will typically add elements to the
372 # set to reflect that the extension knows how to handle that requirements.
372 # set to reflect that the extension knows how to handle that requirements.
373 featuresetupfuncs = set()
373 featuresetupfuncs = set()
374
374
375 @interfaceutil.implementer(repository.completelocalrepository)
375 @interfaceutil.implementer(repository.completelocalrepository)
376 class localrepository(object):
376 class localrepository(object):
377
377
378 # obsolete experimental requirements:
378 # obsolete experimental requirements:
379 # - manifestv2: An experimental new manifest format that allowed
379 # - manifestv2: An experimental new manifest format that allowed
380 # for stem compression of long paths. Experiment ended up not
380 # for stem compression of long paths. Experiment ended up not
381 # being successful (repository sizes went up due to worse delta
381 # being successful (repository sizes went up due to worse delta
382 # chains), and the code was deleted in 4.6.
382 # chains), and the code was deleted in 4.6.
383 supportedformats = {
383 supportedformats = {
384 'revlogv1',
384 'revlogv1',
385 'generaldelta',
385 'generaldelta',
386 'treemanifest',
386 'treemanifest',
387 REVLOGV2_REQUIREMENT,
387 REVLOGV2_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
388 SPARSEREVLOG_REQUIREMENT,
389 }
389 }
390 _basesupported = supportedformats | {
390 _basesupported = supportedformats | {
391 'store',
391 'store',
392 'fncache',
392 'fncache',
393 'shared',
393 'shared',
394 'relshared',
394 'relshared',
395 'dotencode',
395 'dotencode',
396 'exp-sparse',
396 'exp-sparse',
397 }
397 }
398 openerreqs = {
398 openerreqs = {
399 'revlogv1',
399 'revlogv1',
400 'generaldelta',
400 'generaldelta',
401 'treemanifest',
401 'treemanifest',
402 }
402 }
403
403
404 # list of prefix for file which can be written without 'wlock'
404 # list of prefix for file which can be written without 'wlock'
405 # Extensions should extend this list when needed
405 # Extensions should extend this list when needed
406 _wlockfreeprefix = {
406 _wlockfreeprefix = {
407 # We migh consider requiring 'wlock' for the next
407 # We migh consider requiring 'wlock' for the next
408 # two, but pretty much all the existing code assume
408 # two, but pretty much all the existing code assume
409 # wlock is not needed so we keep them excluded for
409 # wlock is not needed so we keep them excluded for
410 # now.
410 # now.
411 'hgrc',
411 'hgrc',
412 'requires',
412 'requires',
413 # XXX cache is a complicatged business someone
413 # XXX cache is a complicatged business someone
414 # should investigate this in depth at some point
414 # should investigate this in depth at some point
415 'cache/',
415 'cache/',
416 # XXX shouldn't be dirstate covered by the wlock?
416 # XXX shouldn't be dirstate covered by the wlock?
417 'dirstate',
417 'dirstate',
418 # XXX bisect was still a bit too messy at the time
418 # XXX bisect was still a bit too messy at the time
419 # this changeset was introduced. Someone should fix
419 # this changeset was introduced. Someone should fix
420 # the remainig bit and drop this line
420 # the remainig bit and drop this line
421 'bisect.state',
421 'bisect.state',
422 }
422 }
423
423
424 def __init__(self, baseui, path, create=False, intents=None):
424 def __init__(self, baseui, path, create=False, intents=None):
425 self.requirements = set()
425 self.requirements = set()
426 self.filtername = None
426 self.filtername = None
427 # wvfs: rooted at the repository root, used to access the working copy
427 # wvfs: rooted at the repository root, used to access the working copy
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
428 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
429 # vfs: rooted at .hg, used to access repo files outside of .hg/store
430 self.vfs = None
430 self.vfs = None
431 # svfs: usually rooted at .hg/store, used to access repository history
431 # svfs: usually rooted at .hg/store, used to access repository history
432 # If this is a shared repository, this vfs may point to another
432 # If this is a shared repository, this vfs may point to another
433 # repository's .hg/store directory.
433 # repository's .hg/store directory.
434 self.svfs = None
434 self.svfs = None
435 self.root = self.wvfs.base
435 self.root = self.wvfs.base
436 self.path = self.wvfs.join(".hg")
436 self.path = self.wvfs.join(".hg")
437 self.origroot = path
437 self.origroot = path
438 # This is only used by context.workingctx.match in order to
438 # This is only used by context.workingctx.match in order to
439 # detect files in subrepos.
439 # detect files in subrepos.
440 self.auditor = pathutil.pathauditor(
440 self.auditor = pathutil.pathauditor(
441 self.root, callback=self._checknested)
441 self.root, callback=self._checknested)
442 # This is only used by context.basectx.match in order to detect
442 # This is only used by context.basectx.match in order to detect
443 # files in subrepos.
443 # files in subrepos.
444 self.nofsauditor = pathutil.pathauditor(
444 self.nofsauditor = pathutil.pathauditor(
445 self.root, callback=self._checknested, realfs=False, cached=True)
445 self.root, callback=self._checknested, realfs=False, cached=True)
446 self.baseui = baseui
446 self.baseui = baseui
447 self.ui = baseui.copy()
447 self.ui = baseui.copy()
448 self.ui.copy = baseui.copy # prevent copying repo configuration
448 self.ui.copy = baseui.copy # prevent copying repo configuration
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
449 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
450 if (self.ui.configbool('devel', 'all-warnings') or
450 if (self.ui.configbool('devel', 'all-warnings') or
451 self.ui.configbool('devel', 'check-locks')):
451 self.ui.configbool('devel', 'check-locks')):
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
452 self.vfs.audit = self._getvfsward(self.vfs.audit)
453 # A list of callback to shape the phase if no data were found.
453 # A list of callback to shape the phase if no data were found.
454 # Callback are in the form: func(repo, roots) --> processed root.
454 # Callback are in the form: func(repo, roots) --> processed root.
455 # This list it to be filled by extension during repo setup
455 # This list it to be filled by extension during repo setup
456 self._phasedefaults = []
456 self._phasedefaults = []
457 try:
457 try:
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
458 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
459 self._loadextensions()
459 self._loadextensions()
460 except IOError:
460 except IOError:
461 pass
461 pass
462
462
463 if featuresetupfuncs:
463 if featuresetupfuncs:
464 self.supported = set(self._basesupported) # use private copy
464 self.supported = set(self._basesupported) # use private copy
465 extmods = set(m.__name__ for n, m
465 extmods = set(m.__name__ for n, m
466 in extensions.extensions(self.ui))
466 in extensions.extensions(self.ui))
467 for setupfunc in featuresetupfuncs:
467 for setupfunc in featuresetupfuncs:
468 if setupfunc.__module__ in extmods:
468 if setupfunc.__module__ in extmods:
469 setupfunc(self.ui, self.supported)
469 setupfunc(self.ui, self.supported)
470 else:
470 else:
471 self.supported = self._basesupported
471 self.supported = self._basesupported
472 color.setup(self.ui)
472 color.setup(self.ui)
473
473
474 # Add compression engines.
474 # Add compression engines.
475 for name in util.compengines:
475 for name in util.compengines:
476 engine = util.compengines[name]
476 engine = util.compengines[name]
477 if engine.revlogheader():
477 if engine.revlogheader():
478 self.supported.add('exp-compression-%s' % name)
478 self.supported.add('exp-compression-%s' % name)
479
479
480 if not self.vfs.isdir():
480 if not self.vfs.isdir():
481 if create:
481 if create:
482 self.requirements = newreporequirements(self)
482 self.requirements = newreporequirements(self)
483
483
484 if not self.wvfs.exists():
484 if not self.wvfs.exists():
485 self.wvfs.makedirs()
485 self.wvfs.makedirs()
486 self.vfs.makedir(notindexed=True)
486 self.vfs.makedir(notindexed=True)
487
487
488 if 'store' in self.requirements:
488 if 'store' in self.requirements:
489 self.vfs.mkdir("store")
489 self.vfs.mkdir("store")
490
490
491 # create an invalid changelog
491 # create an invalid changelog
492 self.vfs.append(
492 self.vfs.append(
493 "00changelog.i",
493 "00changelog.i",
494 '\0\0\0\2' # represents revlogv2
494 '\0\0\0\2' # represents revlogv2
495 ' dummy changelog to prevent using the old repo layout'
495 ' dummy changelog to prevent using the old repo layout'
496 )
496 )
497 else:
497 else:
498 raise error.RepoError(_("repository %s not found") % path)
498 raise error.RepoError(_("repository %s not found") % path)
499 elif create:
499 elif create:
500 raise error.RepoError(_("repository %s already exists") % path)
500 raise error.RepoError(_("repository %s already exists") % path)
501 else:
501 else:
502 try:
502 try:
503 self.requirements = scmutil.readrequires(
503 self.requirements = scmutil.readrequires(
504 self.vfs, self.supported)
504 self.vfs, self.supported)
505 except IOError as inst:
505 except IOError as inst:
506 if inst.errno != errno.ENOENT:
506 if inst.errno != errno.ENOENT:
507 raise
507 raise
508
508
509 cachepath = self.vfs.join('cache')
509 cachepath = self.vfs.join('cache')
510 self.sharedpath = self.path
510 self.sharedpath = self.path
511 try:
511 try:
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
512 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
513 if 'relshared' in self.requirements:
513 if 'relshared' in self.requirements:
514 sharedpath = self.vfs.join(sharedpath)
514 sharedpath = self.vfs.join(sharedpath)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
515 vfs = vfsmod.vfs(sharedpath, realpath=True)
516 cachepath = vfs.join('cache')
516 cachepath = vfs.join('cache')
517 s = vfs.base
517 s = vfs.base
518 if not vfs.exists():
518 if not vfs.exists():
519 raise error.RepoError(
519 raise error.RepoError(
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
520 _('.hg/sharedpath points to nonexistent directory %s') % s)
521 self.sharedpath = s
521 self.sharedpath = s
522 except IOError as inst:
522 except IOError as inst:
523 if inst.errno != errno.ENOENT:
523 if inst.errno != errno.ENOENT:
524 raise
524 raise
525
525
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
526 if 'exp-sparse' in self.requirements and not sparse.enabled:
527 raise error.RepoError(_('repository is using sparse feature but '
527 raise error.RepoError(_('repository is using sparse feature but '
528 'sparse is not enabled; enable the '
528 'sparse is not enabled; enable the '
529 '"sparse" extensions to access'))
529 '"sparse" extensions to access'))
530
530
531 self.store = store.store(
531 self.store = store.store(
532 self.requirements, self.sharedpath,
532 self.requirements, self.sharedpath,
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
533 lambda base: vfsmod.vfs(base, cacheaudited=True))
534 self.spath = self.store.path
534 self.spath = self.store.path
535 self.svfs = self.store.vfs
535 self.svfs = self.store.vfs
536 self.sjoin = self.store.join
536 self.sjoin = self.store.join
537 self.vfs.createmode = self.store.createmode
537 self.vfs.createmode = self.store.createmode
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
538 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
539 self.cachevfs.createmode = self.store.createmode
539 self.cachevfs.createmode = self.store.createmode
540 if (self.ui.configbool('devel', 'all-warnings') or
540 if (self.ui.configbool('devel', 'all-warnings') or
541 self.ui.configbool('devel', 'check-locks')):
541 self.ui.configbool('devel', 'check-locks')):
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
542 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
543 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
544 else: # standard vfs
544 else: # standard vfs
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
545 self.svfs.audit = self._getsvfsward(self.svfs.audit)
546 self._applyopenerreqs()
546 self._applyopenerreqs()
547 if create:
547 if create:
548 self._writerequirements()
548 self._writerequirements()
549
549
550 self._dirstatevalidatewarned = False
550 self._dirstatevalidatewarned = False
551
551
552 self._branchcaches = {}
552 self._branchcaches = {}
553 self._revbranchcache = None
553 self._revbranchcache = None
554 self._filterpats = {}
554 self._filterpats = {}
555 self._datafilters = {}
555 self._datafilters = {}
556 self._transref = self._lockref = self._wlockref = None
556 self._transref = self._lockref = self._wlockref = None
557
557
558 # A cache for various files under .hg/ that tracks file changes,
558 # A cache for various files under .hg/ that tracks file changes,
559 # (used by the filecache decorator)
559 # (used by the filecache decorator)
560 #
560 #
561 # Maps a property name to its util.filecacheentry
561 # Maps a property name to its util.filecacheentry
562 self._filecache = {}
562 self._filecache = {}
563
563
564 # hold sets of revision to be filtered
564 # hold sets of revision to be filtered
565 # should be cleared when something might have changed the filter value:
565 # should be cleared when something might have changed the filter value:
566 # - new changesets,
566 # - new changesets,
567 # - phase change,
567 # - phase change,
568 # - new obsolescence marker,
568 # - new obsolescence marker,
569 # - working directory parent change,
569 # - working directory parent change,
570 # - bookmark changes
570 # - bookmark changes
571 self.filteredrevcache = {}
571 self.filteredrevcache = {}
572
572
573 # post-dirstate-status hooks
573 # post-dirstate-status hooks
574 self._postdsstatus = []
574 self._postdsstatus = []
575
575
576 # generic mapping between names and nodes
576 # generic mapping between names and nodes
577 self.names = namespaces.namespaces()
577 self.names = namespaces.namespaces()
578
578
579 # Key to signature value.
579 # Key to signature value.
580 self._sparsesignaturecache = {}
580 self._sparsesignaturecache = {}
581 # Signature to cached matcher instance.
581 # Signature to cached matcher instance.
582 self._sparsematchercache = {}
582 self._sparsematchercache = {}
583
583
584 def _getvfsward(self, origfunc):
584 def _getvfsward(self, origfunc):
585 """build a ward for self.vfs"""
585 """build a ward for self.vfs"""
586 rref = weakref.ref(self)
586 rref = weakref.ref(self)
587 def checkvfs(path, mode=None):
587 def checkvfs(path, mode=None):
588 ret = origfunc(path, mode=mode)
588 ret = origfunc(path, mode=mode)
589 repo = rref()
589 repo = rref()
590 if (repo is None
590 if (repo is None
591 or not util.safehasattr(repo, '_wlockref')
591 or not util.safehasattr(repo, '_wlockref')
592 or not util.safehasattr(repo, '_lockref')):
592 or not util.safehasattr(repo, '_lockref')):
593 return
593 return
594 if mode in (None, 'r', 'rb'):
594 if mode in (None, 'r', 'rb'):
595 return
595 return
596 if path.startswith(repo.path):
596 if path.startswith(repo.path):
597 # truncate name relative to the repository (.hg)
597 # truncate name relative to the repository (.hg)
598 path = path[len(repo.path) + 1:]
598 path = path[len(repo.path) + 1:]
599 if path.startswith('cache/'):
599 if path.startswith('cache/'):
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
600 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
601 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
602 if path.startswith('journal.'):
602 if path.startswith('journal.'):
603 # journal is covered by 'lock'
603 # journal is covered by 'lock'
604 if repo._currentlock(repo._lockref) is None:
604 if repo._currentlock(repo._lockref) is None:
605 repo.ui.develwarn('write with no lock: "%s"' % path,
605 repo.ui.develwarn('write with no lock: "%s"' % path,
606 stacklevel=2, config='check-locks')
606 stacklevel=2, config='check-locks')
607 elif repo._currentlock(repo._wlockref) is None:
607 elif repo._currentlock(repo._wlockref) is None:
608 # rest of vfs files are covered by 'wlock'
608 # rest of vfs files are covered by 'wlock'
609 #
609 #
610 # exclude special files
610 # exclude special files
611 for prefix in self._wlockfreeprefix:
611 for prefix in self._wlockfreeprefix:
612 if path.startswith(prefix):
612 if path.startswith(prefix):
613 return
613 return
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
614 repo.ui.develwarn('write with no wlock: "%s"' % path,
615 stacklevel=2, config='check-locks')
615 stacklevel=2, config='check-locks')
616 return ret
616 return ret
617 return checkvfs
617 return checkvfs
618
618
619 def _getsvfsward(self, origfunc):
619 def _getsvfsward(self, origfunc):
620 """build a ward for self.svfs"""
620 """build a ward for self.svfs"""
621 rref = weakref.ref(self)
621 rref = weakref.ref(self)
622 def checksvfs(path, mode=None):
622 def checksvfs(path, mode=None):
623 ret = origfunc(path, mode=mode)
623 ret = origfunc(path, mode=mode)
624 repo = rref()
624 repo = rref()
625 if repo is None or not util.safehasattr(repo, '_lockref'):
625 if repo is None or not util.safehasattr(repo, '_lockref'):
626 return
626 return
627 if mode in (None, 'r', 'rb'):
627 if mode in (None, 'r', 'rb'):
628 return
628 return
629 if path.startswith(repo.sharedpath):
629 if path.startswith(repo.sharedpath):
630 # truncate name relative to the repository (.hg)
630 # truncate name relative to the repository (.hg)
631 path = path[len(repo.sharedpath) + 1:]
631 path = path[len(repo.sharedpath) + 1:]
632 if repo._currentlock(repo._lockref) is None:
632 if repo._currentlock(repo._lockref) is None:
633 repo.ui.develwarn('write with no lock: "%s"' % path,
633 repo.ui.develwarn('write with no lock: "%s"' % path,
634 stacklevel=3)
634 stacklevel=3)
635 return ret
635 return ret
636 return checksvfs
636 return checksvfs
637
637
638 def close(self):
638 def close(self):
639 self._writecaches()
639 self._writecaches()
640
640
641 def _loadextensions(self):
641 def _loadextensions(self):
642 extensions.loadall(self.ui)
642 extensions.loadall(self.ui)
643
643
644 def _writecaches(self):
644 def _writecaches(self):
645 if self._revbranchcache:
645 if self._revbranchcache:
646 self._revbranchcache.write()
646 self._revbranchcache.write()
647
647
648 def _restrictcapabilities(self, caps):
648 def _restrictcapabilities(self, caps):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
649 if self.ui.configbool('experimental', 'bundle2-advertise'):
650 caps = set(caps)
650 caps = set(caps)
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
651 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
652 role='client'))
652 role='client'))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
653 caps.add('bundle2=' + urlreq.quote(capsblob))
654 return caps
654 return caps
655
655
656 def _applyopenerreqs(self):
656 def _applyopenerreqs(self):
657 self.svfs.options = dict((r, 1) for r in self.requirements
657 self.svfs.options = dict((r, 1) for r in self.requirements
658 if r in self.openerreqs)
658 if r in self.openerreqs)
659 # experimental config: format.chunkcachesize
659 # experimental config: format.chunkcachesize
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
660 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
661 if chunkcachesize is not None:
661 if chunkcachesize is not None:
662 self.svfs.options['chunkcachesize'] = chunkcachesize
662 self.svfs.options['chunkcachesize'] = chunkcachesize
663 # experimental config: format.maxchainlen
663 # experimental config: format.maxchainlen
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
664 maxchainlen = self.ui.configint('format', 'maxchainlen')
665 if maxchainlen is not None:
665 if maxchainlen is not None:
666 self.svfs.options['maxchainlen'] = maxchainlen
666 self.svfs.options['maxchainlen'] = maxchainlen
667 # experimental config: format.manifestcachesize
667 # experimental config: format.manifestcachesize
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
668 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
669 if manifestcachesize is not None:
669 if manifestcachesize is not None:
670 self.svfs.options['manifestcachesize'] = manifestcachesize
670 self.svfs.options['manifestcachesize'] = manifestcachesize
671 deltabothparents = self.ui.configbool('storage',
671 deltabothparents = self.ui.configbool('storage',
672 'revlog.optimize-delta-parent-choice')
672 'revlog.optimize-delta-parent-choice')
673 self.svfs.options['deltabothparents'] = deltabothparents
673 self.svfs.options['deltabothparents'] = deltabothparents
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
674 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
675 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
676 if 0 <= chainspan:
676 if 0 <= chainspan:
677 self.svfs.options['maxdeltachainspan'] = chainspan
677 self.svfs.options['maxdeltachainspan'] = chainspan
678 mmapindexthreshold = self.ui.configbytes('experimental',
678 mmapindexthreshold = self.ui.configbytes('experimental',
679 'mmapindexthreshold')
679 'mmapindexthreshold')
680 if mmapindexthreshold is not None:
680 if mmapindexthreshold is not None:
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
681 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
682 withsparseread = self.ui.configbool('experimental', 'sparse-read')
683 srdensitythres = float(self.ui.config('experimental',
683 srdensitythres = float(self.ui.config('experimental',
684 'sparse-read.density-threshold'))
684 'sparse-read.density-threshold'))
685 srmingapsize = self.ui.configbytes('experimental',
685 srmingapsize = self.ui.configbytes('experimental',
686 'sparse-read.min-gap-size')
686 'sparse-read.min-gap-size')
687 self.svfs.options['with-sparse-read'] = withsparseread
687 self.svfs.options['with-sparse-read'] = withsparseread
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
688 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
689 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
690 sparserevlog = SPARSEREVLOG_REQUIREMENT in self.requirements
691 self.svfs.options['sparse-revlog'] = sparserevlog
691 self.svfs.options['sparse-revlog'] = sparserevlog
692 if sparserevlog:
692 if sparserevlog:
693 self.svfs.options['generaldelta'] = True
693 self.svfs.options['generaldelta'] = True
694
694
695 for r in self.requirements:
695 for r in self.requirements:
696 if r.startswith('exp-compression-'):
696 if r.startswith('exp-compression-'):
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
697 self.svfs.options['compengine'] = r[len('exp-compression-'):]
698
698
699 # TODO move "revlogv2" to openerreqs once finalized.
699 # TODO move "revlogv2" to openerreqs once finalized.
700 if REVLOGV2_REQUIREMENT in self.requirements:
700 if REVLOGV2_REQUIREMENT in self.requirements:
701 self.svfs.options['revlogv2'] = True
701 self.svfs.options['revlogv2'] = True
702
702
703 def _writerequirements(self):
703 def _writerequirements(self):
704 scmutil.writerequires(self.vfs, self.requirements)
704 scmutil.writerequires(self.vfs, self.requirements)
705
705
706 def _checknested(self, path):
706 def _checknested(self, path):
707 """Determine if path is a legal nested repository."""
707 """Determine if path is a legal nested repository."""
708 if not path.startswith(self.root):
708 if not path.startswith(self.root):
709 return False
709 return False
710 subpath = path[len(self.root) + 1:]
710 subpath = path[len(self.root) + 1:]
711 normsubpath = util.pconvert(subpath)
711 normsubpath = util.pconvert(subpath)
712
712
713 # XXX: Checking against the current working copy is wrong in
713 # XXX: Checking against the current working copy is wrong in
714 # the sense that it can reject things like
714 # the sense that it can reject things like
715 #
715 #
716 # $ hg cat -r 10 sub/x.txt
716 # $ hg cat -r 10 sub/x.txt
717 #
717 #
718 # if sub/ is no longer a subrepository in the working copy
718 # if sub/ is no longer a subrepository in the working copy
719 # parent revision.
719 # parent revision.
720 #
720 #
721 # However, it can of course also allow things that would have
721 # However, it can of course also allow things that would have
722 # been rejected before, such as the above cat command if sub/
722 # been rejected before, such as the above cat command if sub/
723 # is a subrepository now, but was a normal directory before.
723 # is a subrepository now, but was a normal directory before.
724 # The old path auditor would have rejected by mistake since it
724 # The old path auditor would have rejected by mistake since it
725 # panics when it sees sub/.hg/.
725 # panics when it sees sub/.hg/.
726 #
726 #
727 # All in all, checking against the working copy seems sensible
727 # All in all, checking against the working copy seems sensible
728 # since we want to prevent access to nested repositories on
728 # since we want to prevent access to nested repositories on
729 # the filesystem *now*.
729 # the filesystem *now*.
730 ctx = self[None]
730 ctx = self[None]
731 parts = util.splitpath(subpath)
731 parts = util.splitpath(subpath)
732 while parts:
732 while parts:
733 prefix = '/'.join(parts)
733 prefix = '/'.join(parts)
734 if prefix in ctx.substate:
734 if prefix in ctx.substate:
735 if prefix == normsubpath:
735 if prefix == normsubpath:
736 return True
736 return True
737 else:
737 else:
738 sub = ctx.sub(prefix)
738 sub = ctx.sub(prefix)
739 return sub.checknested(subpath[len(prefix) + 1:])
739 return sub.checknested(subpath[len(prefix) + 1:])
740 else:
740 else:
741 parts.pop()
741 parts.pop()
742 return False
742 return False
743
743
744 def peer(self):
744 def peer(self):
745 return localpeer(self) # not cached to avoid reference cycle
745 return localpeer(self) # not cached to avoid reference cycle
746
746
747 def unfiltered(self):
747 def unfiltered(self):
748 """Return unfiltered version of the repository
748 """Return unfiltered version of the repository
749
749
750 Intended to be overwritten by filtered repo."""
750 Intended to be overwritten by filtered repo."""
751 return self
751 return self
752
752
753 def filtered(self, name, visibilityexceptions=None):
753 def filtered(self, name, visibilityexceptions=None):
754 """Return a filtered version of a repository"""
754 """Return a filtered version of a repository"""
755 cls = repoview.newtype(self.unfiltered().__class__)
755 cls = repoview.newtype(self.unfiltered().__class__)
756 return cls(self, name, visibilityexceptions)
756 return cls(self, name, visibilityexceptions)
757
757
758 @repofilecache('bookmarks', 'bookmarks.current')
758 @repofilecache('bookmarks', 'bookmarks.current')
759 def _bookmarks(self):
759 def _bookmarks(self):
760 return bookmarks.bmstore(self)
760 return bookmarks.bmstore(self)
761
761
762 @property
762 @property
763 def _activebookmark(self):
763 def _activebookmark(self):
764 return self._bookmarks.active
764 return self._bookmarks.active
765
765
766 # _phasesets depend on changelog. what we need is to call
766 # _phasesets depend on changelog. what we need is to call
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
767 # _phasecache.invalidate() if '00changelog.i' was changed, but it
768 # can't be easily expressed in filecache mechanism.
768 # can't be easily expressed in filecache mechanism.
769 @storecache('phaseroots', '00changelog.i')
769 @storecache('phaseroots', '00changelog.i')
770 def _phasecache(self):
770 def _phasecache(self):
771 return phases.phasecache(self, self._phasedefaults)
771 return phases.phasecache(self, self._phasedefaults)
772
772
773 @storecache('obsstore')
773 @storecache('obsstore')
774 def obsstore(self):
774 def obsstore(self):
775 return obsolete.makestore(self.ui, self)
775 return obsolete.makestore(self.ui, self)
776
776
777 @storecache('00changelog.i')
777 @storecache('00changelog.i')
778 def changelog(self):
778 def changelog(self):
779 return changelog.changelog(self.svfs,
779 return changelog.changelog(self.svfs,
780 trypending=txnutil.mayhavepending(self.root))
780 trypending=txnutil.mayhavepending(self.root))
781
781
782 def _constructmanifest(self):
782 def _constructmanifest(self):
783 # This is a temporary function while we migrate from manifest to
783 # This is a temporary function while we migrate from manifest to
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
784 # manifestlog. It allows bundlerepo and unionrepo to intercept the
785 # manifest creation.
785 # manifest creation.
786 return manifest.manifestrevlog(self.svfs)
786 return manifest.manifestrevlog(self.svfs)
787
787
788 @storecache('00manifest.i')
788 @storecache('00manifest.i')
789 def manifestlog(self):
789 def manifestlog(self):
790 return manifest.manifestlog(self.svfs, self)
790 return manifest.manifestlog(self.svfs, self)
791
791
792 @repofilecache('dirstate')
792 @repofilecache('dirstate')
793 def dirstate(self):
793 def dirstate(self):
794 return self._makedirstate()
794 return self._makedirstate()
795
795
796 def _makedirstate(self):
796 def _makedirstate(self):
797 """Extension point for wrapping the dirstate per-repo."""
797 """Extension point for wrapping the dirstate per-repo."""
798 sparsematchfn = lambda: sparse.matcher(self)
798 sparsematchfn = lambda: sparse.matcher(self)
799
799
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
800 return dirstate.dirstate(self.vfs, self.ui, self.root,
801 self._dirstatevalidate, sparsematchfn)
801 self._dirstatevalidate, sparsematchfn)
802
802
803 def _dirstatevalidate(self, node):
803 def _dirstatevalidate(self, node):
804 try:
804 try:
805 self.changelog.rev(node)
805 self.changelog.rev(node)
806 return node
806 return node
807 except error.LookupError:
807 except error.LookupError:
808 if not self._dirstatevalidatewarned:
808 if not self._dirstatevalidatewarned:
809 self._dirstatevalidatewarned = True
809 self._dirstatevalidatewarned = True
810 self.ui.warn(_("warning: ignoring unknown"
810 self.ui.warn(_("warning: ignoring unknown"
811 " working parent %s!\n") % short(node))
811 " working parent %s!\n") % short(node))
812 return nullid
812 return nullid
813
813
814 @repofilecache(narrowspec.FILENAME)
814 @repofilecache(narrowspec.FILENAME)
815 def narrowpats(self):
815 def narrowpats(self):
816 """matcher patterns for this repository's narrowspec
816 """matcher patterns for this repository's narrowspec
817
817
818 A tuple of (includes, excludes).
818 A tuple of (includes, excludes).
819 """
819 """
820 source = self
820 source = self
821 if self.shared():
821 if self.shared():
822 from . import hg
822 from . import hg
823 source = hg.sharedreposource(self)
823 source = hg.sharedreposource(self)
824 return narrowspec.load(source)
824 return narrowspec.load(source)
825
825
826 @repofilecache(narrowspec.FILENAME)
826 @repofilecache(narrowspec.FILENAME)
827 def _narrowmatch(self):
827 def _narrowmatch(self):
828 if changegroup.NARROW_REQUIREMENT not in self.requirements:
828 if repository.NARROW_REQUIREMENT not in self.requirements:
829 return matchmod.always(self.root, '')
829 return matchmod.always(self.root, '')
830 include, exclude = self.narrowpats
830 include, exclude = self.narrowpats
831 return narrowspec.match(self.root, include=include, exclude=exclude)
831 return narrowspec.match(self.root, include=include, exclude=exclude)
832
832
833 # TODO(martinvonz): make this property-like instead?
833 # TODO(martinvonz): make this property-like instead?
834 def narrowmatch(self):
834 def narrowmatch(self):
835 return self._narrowmatch
835 return self._narrowmatch
836
836
837 def setnarrowpats(self, newincludes, newexcludes):
837 def setnarrowpats(self, newincludes, newexcludes):
838 target = self
838 target = self
839 if self.shared():
839 if self.shared():
840 from . import hg
840 from . import hg
841 target = hg.sharedreposource(self)
841 target = hg.sharedreposource(self)
842 narrowspec.save(target, newincludes, newexcludes)
842 narrowspec.save(target, newincludes, newexcludes)
843 self.invalidate(clearfilecache=True)
843 self.invalidate(clearfilecache=True)
844
844
845 def __getitem__(self, changeid):
845 def __getitem__(self, changeid):
846 if changeid is None:
846 if changeid is None:
847 return context.workingctx(self)
847 return context.workingctx(self)
848 if isinstance(changeid, context.basectx):
848 if isinstance(changeid, context.basectx):
849 return changeid
849 return changeid
850 if isinstance(changeid, slice):
850 if isinstance(changeid, slice):
851 # wdirrev isn't contiguous so the slice shouldn't include it
851 # wdirrev isn't contiguous so the slice shouldn't include it
852 return [context.changectx(self, i)
852 return [context.changectx(self, i)
853 for i in pycompat.xrange(*changeid.indices(len(self)))
853 for i in pycompat.xrange(*changeid.indices(len(self)))
854 if i not in self.changelog.filteredrevs]
854 if i not in self.changelog.filteredrevs]
855 try:
855 try:
856 return context.changectx(self, changeid)
856 return context.changectx(self, changeid)
857 except error.WdirUnsupported:
857 except error.WdirUnsupported:
858 return context.workingctx(self)
858 return context.workingctx(self)
859
859
860 def __contains__(self, changeid):
860 def __contains__(self, changeid):
861 """True if the given changeid exists
861 """True if the given changeid exists
862
862
863 error.LookupError is raised if an ambiguous node specified.
863 error.LookupError is raised if an ambiguous node specified.
864 """
864 """
865 try:
865 try:
866 self[changeid]
866 self[changeid]
867 return True
867 return True
868 except error.RepoLookupError:
868 except error.RepoLookupError:
869 return False
869 return False
870
870
871 def __nonzero__(self):
871 def __nonzero__(self):
872 return True
872 return True
873
873
874 __bool__ = __nonzero__
874 __bool__ = __nonzero__
875
875
876 def __len__(self):
876 def __len__(self):
877 # no need to pay the cost of repoview.changelog
877 # no need to pay the cost of repoview.changelog
878 unfi = self.unfiltered()
878 unfi = self.unfiltered()
879 return len(unfi.changelog)
879 return len(unfi.changelog)
880
880
881 def __iter__(self):
881 def __iter__(self):
882 return iter(self.changelog)
882 return iter(self.changelog)
883
883
884 def revs(self, expr, *args):
884 def revs(self, expr, *args):
885 '''Find revisions matching a revset.
885 '''Find revisions matching a revset.
886
886
887 The revset is specified as a string ``expr`` that may contain
887 The revset is specified as a string ``expr`` that may contain
888 %-formatting to escape certain types. See ``revsetlang.formatspec``.
888 %-formatting to escape certain types. See ``revsetlang.formatspec``.
889
889
890 Revset aliases from the configuration are not expanded. To expand
890 Revset aliases from the configuration are not expanded. To expand
891 user aliases, consider calling ``scmutil.revrange()`` or
891 user aliases, consider calling ``scmutil.revrange()`` or
892 ``repo.anyrevs([expr], user=True)``.
892 ``repo.anyrevs([expr], user=True)``.
893
893
894 Returns a revset.abstractsmartset, which is a list-like interface
894 Returns a revset.abstractsmartset, which is a list-like interface
895 that contains integer revisions.
895 that contains integer revisions.
896 '''
896 '''
897 expr = revsetlang.formatspec(expr, *args)
897 expr = revsetlang.formatspec(expr, *args)
898 m = revset.match(None, expr)
898 m = revset.match(None, expr)
899 return m(self)
899 return m(self)
900
900
901 def set(self, expr, *args):
901 def set(self, expr, *args):
902 '''Find revisions matching a revset and emit changectx instances.
902 '''Find revisions matching a revset and emit changectx instances.
903
903
904 This is a convenience wrapper around ``revs()`` that iterates the
904 This is a convenience wrapper around ``revs()`` that iterates the
905 result and is a generator of changectx instances.
905 result and is a generator of changectx instances.
906
906
907 Revset aliases from the configuration are not expanded. To expand
907 Revset aliases from the configuration are not expanded. To expand
908 user aliases, consider calling ``scmutil.revrange()``.
908 user aliases, consider calling ``scmutil.revrange()``.
909 '''
909 '''
910 for r in self.revs(expr, *args):
910 for r in self.revs(expr, *args):
911 yield self[r]
911 yield self[r]
912
912
913 def anyrevs(self, specs, user=False, localalias=None):
913 def anyrevs(self, specs, user=False, localalias=None):
914 '''Find revisions matching one of the given revsets.
914 '''Find revisions matching one of the given revsets.
915
915
916 Revset aliases from the configuration are not expanded by default. To
916 Revset aliases from the configuration are not expanded by default. To
917 expand user aliases, specify ``user=True``. To provide some local
917 expand user aliases, specify ``user=True``. To provide some local
918 definitions overriding user aliases, set ``localalias`` to
918 definitions overriding user aliases, set ``localalias`` to
919 ``{name: definitionstring}``.
919 ``{name: definitionstring}``.
920 '''
920 '''
921 if user:
921 if user:
922 m = revset.matchany(self.ui, specs,
922 m = revset.matchany(self.ui, specs,
923 lookup=revset.lookupfn(self),
923 lookup=revset.lookupfn(self),
924 localalias=localalias)
924 localalias=localalias)
925 else:
925 else:
926 m = revset.matchany(None, specs, localalias=localalias)
926 m = revset.matchany(None, specs, localalias=localalias)
927 return m(self)
927 return m(self)
928
928
929 def url(self):
929 def url(self):
930 return 'file:' + self.root
930 return 'file:' + self.root
931
931
932 def hook(self, name, throw=False, **args):
932 def hook(self, name, throw=False, **args):
933 """Call a hook, passing this repo instance.
933 """Call a hook, passing this repo instance.
934
934
935 This a convenience method to aid invoking hooks. Extensions likely
935 This a convenience method to aid invoking hooks. Extensions likely
936 won't call this unless they have registered a custom hook or are
936 won't call this unless they have registered a custom hook or are
937 replacing code that is expected to call a hook.
937 replacing code that is expected to call a hook.
938 """
938 """
939 return hook.hook(self.ui, self, name, throw, **args)
939 return hook.hook(self.ui, self, name, throw, **args)
940
940
941 @filteredpropertycache
941 @filteredpropertycache
942 def _tagscache(self):
942 def _tagscache(self):
943 '''Returns a tagscache object that contains various tags related
943 '''Returns a tagscache object that contains various tags related
944 caches.'''
944 caches.'''
945
945
946 # This simplifies its cache management by having one decorated
946 # This simplifies its cache management by having one decorated
947 # function (this one) and the rest simply fetch things from it.
947 # function (this one) and the rest simply fetch things from it.
948 class tagscache(object):
948 class tagscache(object):
949 def __init__(self):
949 def __init__(self):
950 # These two define the set of tags for this repository. tags
950 # These two define the set of tags for this repository. tags
951 # maps tag name to node; tagtypes maps tag name to 'global' or
951 # maps tag name to node; tagtypes maps tag name to 'global' or
952 # 'local'. (Global tags are defined by .hgtags across all
952 # 'local'. (Global tags are defined by .hgtags across all
953 # heads, and local tags are defined in .hg/localtags.)
953 # heads, and local tags are defined in .hg/localtags.)
954 # They constitute the in-memory cache of tags.
954 # They constitute the in-memory cache of tags.
955 self.tags = self.tagtypes = None
955 self.tags = self.tagtypes = None
956
956
957 self.nodetagscache = self.tagslist = None
957 self.nodetagscache = self.tagslist = None
958
958
959 cache = tagscache()
959 cache = tagscache()
960 cache.tags, cache.tagtypes = self._findtags()
960 cache.tags, cache.tagtypes = self._findtags()
961
961
962 return cache
962 return cache
963
963
964 def tags(self):
964 def tags(self):
965 '''return a mapping of tag to node'''
965 '''return a mapping of tag to node'''
966 t = {}
966 t = {}
967 if self.changelog.filteredrevs:
967 if self.changelog.filteredrevs:
968 tags, tt = self._findtags()
968 tags, tt = self._findtags()
969 else:
969 else:
970 tags = self._tagscache.tags
970 tags = self._tagscache.tags
971 for k, v in tags.iteritems():
971 for k, v in tags.iteritems():
972 try:
972 try:
973 # ignore tags to unknown nodes
973 # ignore tags to unknown nodes
974 self.changelog.rev(v)
974 self.changelog.rev(v)
975 t[k] = v
975 t[k] = v
976 except (error.LookupError, ValueError):
976 except (error.LookupError, ValueError):
977 pass
977 pass
978 return t
978 return t
979
979
980 def _findtags(self):
980 def _findtags(self):
981 '''Do the hard work of finding tags. Return a pair of dicts
981 '''Do the hard work of finding tags. Return a pair of dicts
982 (tags, tagtypes) where tags maps tag name to node, and tagtypes
982 (tags, tagtypes) where tags maps tag name to node, and tagtypes
983 maps tag name to a string like \'global\' or \'local\'.
983 maps tag name to a string like \'global\' or \'local\'.
984 Subclasses or extensions are free to add their own tags, but
984 Subclasses or extensions are free to add their own tags, but
985 should be aware that the returned dicts will be retained for the
985 should be aware that the returned dicts will be retained for the
986 duration of the localrepo object.'''
986 duration of the localrepo object.'''
987
987
988 # XXX what tagtype should subclasses/extensions use? Currently
988 # XXX what tagtype should subclasses/extensions use? Currently
989 # mq and bookmarks add tags, but do not set the tagtype at all.
989 # mq and bookmarks add tags, but do not set the tagtype at all.
990 # Should each extension invent its own tag type? Should there
990 # Should each extension invent its own tag type? Should there
991 # be one tagtype for all such "virtual" tags? Or is the status
991 # be one tagtype for all such "virtual" tags? Or is the status
992 # quo fine?
992 # quo fine?
993
993
994
994
995 # map tag name to (node, hist)
995 # map tag name to (node, hist)
996 alltags = tagsmod.findglobaltags(self.ui, self)
996 alltags = tagsmod.findglobaltags(self.ui, self)
997 # map tag name to tag type
997 # map tag name to tag type
998 tagtypes = dict((tag, 'global') for tag in alltags)
998 tagtypes = dict((tag, 'global') for tag in alltags)
999
999
1000 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1000 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
1001
1001
1002 # Build the return dicts. Have to re-encode tag names because
1002 # Build the return dicts. Have to re-encode tag names because
1003 # the tags module always uses UTF-8 (in order not to lose info
1003 # the tags module always uses UTF-8 (in order not to lose info
1004 # writing to the cache), but the rest of Mercurial wants them in
1004 # writing to the cache), but the rest of Mercurial wants them in
1005 # local encoding.
1005 # local encoding.
1006 tags = {}
1006 tags = {}
1007 for (name, (node, hist)) in alltags.iteritems():
1007 for (name, (node, hist)) in alltags.iteritems():
1008 if node != nullid:
1008 if node != nullid:
1009 tags[encoding.tolocal(name)] = node
1009 tags[encoding.tolocal(name)] = node
1010 tags['tip'] = self.changelog.tip()
1010 tags['tip'] = self.changelog.tip()
1011 tagtypes = dict([(encoding.tolocal(name), value)
1011 tagtypes = dict([(encoding.tolocal(name), value)
1012 for (name, value) in tagtypes.iteritems()])
1012 for (name, value) in tagtypes.iteritems()])
1013 return (tags, tagtypes)
1013 return (tags, tagtypes)
1014
1014
1015 def tagtype(self, tagname):
1015 def tagtype(self, tagname):
1016 '''
1016 '''
1017 return the type of the given tag. result can be:
1017 return the type of the given tag. result can be:
1018
1018
1019 'local' : a local tag
1019 'local' : a local tag
1020 'global' : a global tag
1020 'global' : a global tag
1021 None : tag does not exist
1021 None : tag does not exist
1022 '''
1022 '''
1023
1023
1024 return self._tagscache.tagtypes.get(tagname)
1024 return self._tagscache.tagtypes.get(tagname)
1025
1025
1026 def tagslist(self):
1026 def tagslist(self):
1027 '''return a list of tags ordered by revision'''
1027 '''return a list of tags ordered by revision'''
1028 if not self._tagscache.tagslist:
1028 if not self._tagscache.tagslist:
1029 l = []
1029 l = []
1030 for t, n in self.tags().iteritems():
1030 for t, n in self.tags().iteritems():
1031 l.append((self.changelog.rev(n), t, n))
1031 l.append((self.changelog.rev(n), t, n))
1032 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1032 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1033
1033
1034 return self._tagscache.tagslist
1034 return self._tagscache.tagslist
1035
1035
1036 def nodetags(self, node):
1036 def nodetags(self, node):
1037 '''return the tags associated with a node'''
1037 '''return the tags associated with a node'''
1038 if not self._tagscache.nodetagscache:
1038 if not self._tagscache.nodetagscache:
1039 nodetagscache = {}
1039 nodetagscache = {}
1040 for t, n in self._tagscache.tags.iteritems():
1040 for t, n in self._tagscache.tags.iteritems():
1041 nodetagscache.setdefault(n, []).append(t)
1041 nodetagscache.setdefault(n, []).append(t)
1042 for tags in nodetagscache.itervalues():
1042 for tags in nodetagscache.itervalues():
1043 tags.sort()
1043 tags.sort()
1044 self._tagscache.nodetagscache = nodetagscache
1044 self._tagscache.nodetagscache = nodetagscache
1045 return self._tagscache.nodetagscache.get(node, [])
1045 return self._tagscache.nodetagscache.get(node, [])
1046
1046
1047 def nodebookmarks(self, node):
1047 def nodebookmarks(self, node):
1048 """return the list of bookmarks pointing to the specified node"""
1048 """return the list of bookmarks pointing to the specified node"""
1049 return self._bookmarks.names(node)
1049 return self._bookmarks.names(node)
1050
1050
1051 def branchmap(self):
1051 def branchmap(self):
1052 '''returns a dictionary {branch: [branchheads]} with branchheads
1052 '''returns a dictionary {branch: [branchheads]} with branchheads
1053 ordered by increasing revision number'''
1053 ordered by increasing revision number'''
1054 branchmap.updatecache(self)
1054 branchmap.updatecache(self)
1055 return self._branchcaches[self.filtername]
1055 return self._branchcaches[self.filtername]
1056
1056
1057 @unfilteredmethod
1057 @unfilteredmethod
1058 def revbranchcache(self):
1058 def revbranchcache(self):
1059 if not self._revbranchcache:
1059 if not self._revbranchcache:
1060 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1060 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1061 return self._revbranchcache
1061 return self._revbranchcache
1062
1062
1063 def branchtip(self, branch, ignoremissing=False):
1063 def branchtip(self, branch, ignoremissing=False):
1064 '''return the tip node for a given branch
1064 '''return the tip node for a given branch
1065
1065
1066 If ignoremissing is True, then this method will not raise an error.
1066 If ignoremissing is True, then this method will not raise an error.
1067 This is helpful for callers that only expect None for a missing branch
1067 This is helpful for callers that only expect None for a missing branch
1068 (e.g. namespace).
1068 (e.g. namespace).
1069
1069
1070 '''
1070 '''
1071 try:
1071 try:
1072 return self.branchmap().branchtip(branch)
1072 return self.branchmap().branchtip(branch)
1073 except KeyError:
1073 except KeyError:
1074 if not ignoremissing:
1074 if not ignoremissing:
1075 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1075 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1076 else:
1076 else:
1077 pass
1077 pass
1078
1078
1079 def lookup(self, key):
1079 def lookup(self, key):
1080 return scmutil.revsymbol(self, key).node()
1080 return scmutil.revsymbol(self, key).node()
1081
1081
1082 def lookupbranch(self, key):
1082 def lookupbranch(self, key):
1083 if key in self.branchmap():
1083 if key in self.branchmap():
1084 return key
1084 return key
1085
1085
1086 return scmutil.revsymbol(self, key).branch()
1086 return scmutil.revsymbol(self, key).branch()
1087
1087
1088 def known(self, nodes):
1088 def known(self, nodes):
1089 cl = self.changelog
1089 cl = self.changelog
1090 nm = cl.nodemap
1090 nm = cl.nodemap
1091 filtered = cl.filteredrevs
1091 filtered = cl.filteredrevs
1092 result = []
1092 result = []
1093 for n in nodes:
1093 for n in nodes:
1094 r = nm.get(n)
1094 r = nm.get(n)
1095 resp = not (r is None or r in filtered)
1095 resp = not (r is None or r in filtered)
1096 result.append(resp)
1096 result.append(resp)
1097 return result
1097 return result
1098
1098
1099 def local(self):
1099 def local(self):
1100 return self
1100 return self
1101
1101
1102 def publishing(self):
1102 def publishing(self):
1103 # it's safe (and desirable) to trust the publish flag unconditionally
1103 # it's safe (and desirable) to trust the publish flag unconditionally
1104 # so that we don't finalize changes shared between users via ssh or nfs
1104 # so that we don't finalize changes shared between users via ssh or nfs
1105 return self.ui.configbool('phases', 'publish', untrusted=True)
1105 return self.ui.configbool('phases', 'publish', untrusted=True)
1106
1106
1107 def cancopy(self):
1107 def cancopy(self):
1108 # so statichttprepo's override of local() works
1108 # so statichttprepo's override of local() works
1109 if not self.local():
1109 if not self.local():
1110 return False
1110 return False
1111 if not self.publishing():
1111 if not self.publishing():
1112 return True
1112 return True
1113 # if publishing we can't copy if there is filtered content
1113 # if publishing we can't copy if there is filtered content
1114 return not self.filtered('visible').changelog.filteredrevs
1114 return not self.filtered('visible').changelog.filteredrevs
1115
1115
1116 def shared(self):
1116 def shared(self):
1117 '''the type of shared repository (None if not shared)'''
1117 '''the type of shared repository (None if not shared)'''
1118 if self.sharedpath != self.path:
1118 if self.sharedpath != self.path:
1119 return 'store'
1119 return 'store'
1120 return None
1120 return None
1121
1121
1122 def wjoin(self, f, *insidef):
1122 def wjoin(self, f, *insidef):
1123 return self.vfs.reljoin(self.root, f, *insidef)
1123 return self.vfs.reljoin(self.root, f, *insidef)
1124
1124
1125 def file(self, f):
1125 def file(self, f):
1126 if f[0] == '/':
1126 if f[0] == '/':
1127 f = f[1:]
1127 f = f[1:]
1128 return filelog.filelog(self.svfs, f)
1128 return filelog.filelog(self.svfs, f)
1129
1129
1130 def setparents(self, p1, p2=nullid):
1130 def setparents(self, p1, p2=nullid):
1131 with self.dirstate.parentchange():
1131 with self.dirstate.parentchange():
1132 copies = self.dirstate.setparents(p1, p2)
1132 copies = self.dirstate.setparents(p1, p2)
1133 pctx = self[p1]
1133 pctx = self[p1]
1134 if copies:
1134 if copies:
1135 # Adjust copy records, the dirstate cannot do it, it
1135 # Adjust copy records, the dirstate cannot do it, it
1136 # requires access to parents manifests. Preserve them
1136 # requires access to parents manifests. Preserve them
1137 # only for entries added to first parent.
1137 # only for entries added to first parent.
1138 for f in copies:
1138 for f in copies:
1139 if f not in pctx and copies[f] in pctx:
1139 if f not in pctx and copies[f] in pctx:
1140 self.dirstate.copy(copies[f], f)
1140 self.dirstate.copy(copies[f], f)
1141 if p2 == nullid:
1141 if p2 == nullid:
1142 for f, s in sorted(self.dirstate.copies().items()):
1142 for f, s in sorted(self.dirstate.copies().items()):
1143 if f not in pctx and s not in pctx:
1143 if f not in pctx and s not in pctx:
1144 self.dirstate.copy(None, f)
1144 self.dirstate.copy(None, f)
1145
1145
1146 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1146 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1147 """changeid can be a changeset revision, node, or tag.
1147 """changeid can be a changeset revision, node, or tag.
1148 fileid can be a file revision or node."""
1148 fileid can be a file revision or node."""
1149 return context.filectx(self, path, changeid, fileid,
1149 return context.filectx(self, path, changeid, fileid,
1150 changectx=changectx)
1150 changectx=changectx)
1151
1151
1152 def getcwd(self):
1152 def getcwd(self):
1153 return self.dirstate.getcwd()
1153 return self.dirstate.getcwd()
1154
1154
1155 def pathto(self, f, cwd=None):
1155 def pathto(self, f, cwd=None):
1156 return self.dirstate.pathto(f, cwd)
1156 return self.dirstate.pathto(f, cwd)
1157
1157
1158 def _loadfilter(self, filter):
1158 def _loadfilter(self, filter):
1159 if filter not in self._filterpats:
1159 if filter not in self._filterpats:
1160 l = []
1160 l = []
1161 for pat, cmd in self.ui.configitems(filter):
1161 for pat, cmd in self.ui.configitems(filter):
1162 if cmd == '!':
1162 if cmd == '!':
1163 continue
1163 continue
1164 mf = matchmod.match(self.root, '', [pat])
1164 mf = matchmod.match(self.root, '', [pat])
1165 fn = None
1165 fn = None
1166 params = cmd
1166 params = cmd
1167 for name, filterfn in self._datafilters.iteritems():
1167 for name, filterfn in self._datafilters.iteritems():
1168 if cmd.startswith(name):
1168 if cmd.startswith(name):
1169 fn = filterfn
1169 fn = filterfn
1170 params = cmd[len(name):].lstrip()
1170 params = cmd[len(name):].lstrip()
1171 break
1171 break
1172 if not fn:
1172 if not fn:
1173 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1173 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1174 # Wrap old filters not supporting keyword arguments
1174 # Wrap old filters not supporting keyword arguments
1175 if not pycompat.getargspec(fn)[2]:
1175 if not pycompat.getargspec(fn)[2]:
1176 oldfn = fn
1176 oldfn = fn
1177 fn = lambda s, c, **kwargs: oldfn(s, c)
1177 fn = lambda s, c, **kwargs: oldfn(s, c)
1178 l.append((mf, fn, params))
1178 l.append((mf, fn, params))
1179 self._filterpats[filter] = l
1179 self._filterpats[filter] = l
1180 return self._filterpats[filter]
1180 return self._filterpats[filter]
1181
1181
1182 def _filter(self, filterpats, filename, data):
1182 def _filter(self, filterpats, filename, data):
1183 for mf, fn, cmd in filterpats:
1183 for mf, fn, cmd in filterpats:
1184 if mf(filename):
1184 if mf(filename):
1185 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1185 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1186 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1187 break
1187 break
1188
1188
1189 return data
1189 return data
1190
1190
1191 @unfilteredpropertycache
1191 @unfilteredpropertycache
1192 def _encodefilterpats(self):
1192 def _encodefilterpats(self):
1193 return self._loadfilter('encode')
1193 return self._loadfilter('encode')
1194
1194
1195 @unfilteredpropertycache
1195 @unfilteredpropertycache
1196 def _decodefilterpats(self):
1196 def _decodefilterpats(self):
1197 return self._loadfilter('decode')
1197 return self._loadfilter('decode')
1198
1198
1199 def adddatafilter(self, name, filter):
1199 def adddatafilter(self, name, filter):
1200 self._datafilters[name] = filter
1200 self._datafilters[name] = filter
1201
1201
1202 def wread(self, filename):
1202 def wread(self, filename):
1203 if self.wvfs.islink(filename):
1203 if self.wvfs.islink(filename):
1204 data = self.wvfs.readlink(filename)
1204 data = self.wvfs.readlink(filename)
1205 else:
1205 else:
1206 data = self.wvfs.read(filename)
1206 data = self.wvfs.read(filename)
1207 return self._filter(self._encodefilterpats, filename, data)
1207 return self._filter(self._encodefilterpats, filename, data)
1208
1208
1209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1209 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1210 """write ``data`` into ``filename`` in the working directory
1210 """write ``data`` into ``filename`` in the working directory
1211
1211
1212 This returns length of written (maybe decoded) data.
1212 This returns length of written (maybe decoded) data.
1213 """
1213 """
1214 data = self._filter(self._decodefilterpats, filename, data)
1214 data = self._filter(self._decodefilterpats, filename, data)
1215 if 'l' in flags:
1215 if 'l' in flags:
1216 self.wvfs.symlink(data, filename)
1216 self.wvfs.symlink(data, filename)
1217 else:
1217 else:
1218 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1218 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1219 **kwargs)
1219 **kwargs)
1220 if 'x' in flags:
1220 if 'x' in flags:
1221 self.wvfs.setflags(filename, False, True)
1221 self.wvfs.setflags(filename, False, True)
1222 else:
1222 else:
1223 self.wvfs.setflags(filename, False, False)
1223 self.wvfs.setflags(filename, False, False)
1224 return len(data)
1224 return len(data)
1225
1225
1226 def wwritedata(self, filename, data):
1226 def wwritedata(self, filename, data):
1227 return self._filter(self._decodefilterpats, filename, data)
1227 return self._filter(self._decodefilterpats, filename, data)
1228
1228
1229 def currenttransaction(self):
1229 def currenttransaction(self):
1230 """return the current transaction or None if non exists"""
1230 """return the current transaction or None if non exists"""
1231 if self._transref:
1231 if self._transref:
1232 tr = self._transref()
1232 tr = self._transref()
1233 else:
1233 else:
1234 tr = None
1234 tr = None
1235
1235
1236 if tr and tr.running():
1236 if tr and tr.running():
1237 return tr
1237 return tr
1238 return None
1238 return None
1239
1239
1240 def transaction(self, desc, report=None):
1240 def transaction(self, desc, report=None):
1241 if (self.ui.configbool('devel', 'all-warnings')
1241 if (self.ui.configbool('devel', 'all-warnings')
1242 or self.ui.configbool('devel', 'check-locks')):
1242 or self.ui.configbool('devel', 'check-locks')):
1243 if self._currentlock(self._lockref) is None:
1243 if self._currentlock(self._lockref) is None:
1244 raise error.ProgrammingError('transaction requires locking')
1244 raise error.ProgrammingError('transaction requires locking')
1245 tr = self.currenttransaction()
1245 tr = self.currenttransaction()
1246 if tr is not None:
1246 if tr is not None:
1247 return tr.nest(name=desc)
1247 return tr.nest(name=desc)
1248
1248
1249 # abort here if the journal already exists
1249 # abort here if the journal already exists
1250 if self.svfs.exists("journal"):
1250 if self.svfs.exists("journal"):
1251 raise error.RepoError(
1251 raise error.RepoError(
1252 _("abandoned transaction found"),
1252 _("abandoned transaction found"),
1253 hint=_("run 'hg recover' to clean up transaction"))
1253 hint=_("run 'hg recover' to clean up transaction"))
1254
1254
1255 idbase = "%.40f#%f" % (random.random(), time.time())
1255 idbase = "%.40f#%f" % (random.random(), time.time())
1256 ha = hex(hashlib.sha1(idbase).digest())
1256 ha = hex(hashlib.sha1(idbase).digest())
1257 txnid = 'TXN:' + ha
1257 txnid = 'TXN:' + ha
1258 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1258 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1259
1259
1260 self._writejournal(desc)
1260 self._writejournal(desc)
1261 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1261 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1262 if report:
1262 if report:
1263 rp = report
1263 rp = report
1264 else:
1264 else:
1265 rp = self.ui.warn
1265 rp = self.ui.warn
1266 vfsmap = {'plain': self.vfs} # root of .hg/
1266 vfsmap = {'plain': self.vfs} # root of .hg/
1267 # we must avoid cyclic reference between repo and transaction.
1267 # we must avoid cyclic reference between repo and transaction.
1268 reporef = weakref.ref(self)
1268 reporef = weakref.ref(self)
1269 # Code to track tag movement
1269 # Code to track tag movement
1270 #
1270 #
1271 # Since tags are all handled as file content, it is actually quite hard
1271 # Since tags are all handled as file content, it is actually quite hard
1272 # to track these movement from a code perspective. So we fallback to a
1272 # to track these movement from a code perspective. So we fallback to a
1273 # tracking at the repository level. One could envision to track changes
1273 # tracking at the repository level. One could envision to track changes
1274 # to the '.hgtags' file through changegroup apply but that fails to
1274 # to the '.hgtags' file through changegroup apply but that fails to
1275 # cope with case where transaction expose new heads without changegroup
1275 # cope with case where transaction expose new heads without changegroup
1276 # being involved (eg: phase movement).
1276 # being involved (eg: phase movement).
1277 #
1277 #
1278 # For now, We gate the feature behind a flag since this likely comes
1278 # For now, We gate the feature behind a flag since this likely comes
1279 # with performance impacts. The current code run more often than needed
1279 # with performance impacts. The current code run more often than needed
1280 # and do not use caches as much as it could. The current focus is on
1280 # and do not use caches as much as it could. The current focus is on
1281 # the behavior of the feature so we disable it by default. The flag
1281 # the behavior of the feature so we disable it by default. The flag
1282 # will be removed when we are happy with the performance impact.
1282 # will be removed when we are happy with the performance impact.
1283 #
1283 #
1284 # Once this feature is no longer experimental move the following
1284 # Once this feature is no longer experimental move the following
1285 # documentation to the appropriate help section:
1285 # documentation to the appropriate help section:
1286 #
1286 #
1287 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1287 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1288 # tags (new or changed or deleted tags). In addition the details of
1288 # tags (new or changed or deleted tags). In addition the details of
1289 # these changes are made available in a file at:
1289 # these changes are made available in a file at:
1290 # ``REPOROOT/.hg/changes/tags.changes``.
1290 # ``REPOROOT/.hg/changes/tags.changes``.
1291 # Make sure you check for HG_TAG_MOVED before reading that file as it
1291 # Make sure you check for HG_TAG_MOVED before reading that file as it
1292 # might exist from a previous transaction even if no tag were touched
1292 # might exist from a previous transaction even if no tag were touched
1293 # in this one. Changes are recorded in a line base format::
1293 # in this one. Changes are recorded in a line base format::
1294 #
1294 #
1295 # <action> <hex-node> <tag-name>\n
1295 # <action> <hex-node> <tag-name>\n
1296 #
1296 #
1297 # Actions are defined as follow:
1297 # Actions are defined as follow:
1298 # "-R": tag is removed,
1298 # "-R": tag is removed,
1299 # "+A": tag is added,
1299 # "+A": tag is added,
1300 # "-M": tag is moved (old value),
1300 # "-M": tag is moved (old value),
1301 # "+M": tag is moved (new value),
1301 # "+M": tag is moved (new value),
1302 tracktags = lambda x: None
1302 tracktags = lambda x: None
1303 # experimental config: experimental.hook-track-tags
1303 # experimental config: experimental.hook-track-tags
1304 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1304 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1305 if desc != 'strip' and shouldtracktags:
1305 if desc != 'strip' and shouldtracktags:
1306 oldheads = self.changelog.headrevs()
1306 oldheads = self.changelog.headrevs()
1307 def tracktags(tr2):
1307 def tracktags(tr2):
1308 repo = reporef()
1308 repo = reporef()
1309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1309 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1310 newheads = repo.changelog.headrevs()
1310 newheads = repo.changelog.headrevs()
1311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1311 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1312 # notes: we compare lists here.
1312 # notes: we compare lists here.
1313 # As we do it only once buiding set would not be cheaper
1313 # As we do it only once buiding set would not be cheaper
1314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1314 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1315 if changes:
1315 if changes:
1316 tr2.hookargs['tag_moved'] = '1'
1316 tr2.hookargs['tag_moved'] = '1'
1317 with repo.vfs('changes/tags.changes', 'w',
1317 with repo.vfs('changes/tags.changes', 'w',
1318 atomictemp=True) as changesfile:
1318 atomictemp=True) as changesfile:
1319 # note: we do not register the file to the transaction
1319 # note: we do not register the file to the transaction
1320 # because we needs it to still exist on the transaction
1320 # because we needs it to still exist on the transaction
1321 # is close (for txnclose hooks)
1321 # is close (for txnclose hooks)
1322 tagsmod.writediff(changesfile, changes)
1322 tagsmod.writediff(changesfile, changes)
1323 def validate(tr2):
1323 def validate(tr2):
1324 """will run pre-closing hooks"""
1324 """will run pre-closing hooks"""
1325 # XXX the transaction API is a bit lacking here so we take a hacky
1325 # XXX the transaction API is a bit lacking here so we take a hacky
1326 # path for now
1326 # path for now
1327 #
1327 #
1328 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1328 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1329 # dict is copied before these run. In addition we needs the data
1329 # dict is copied before these run. In addition we needs the data
1330 # available to in memory hooks too.
1330 # available to in memory hooks too.
1331 #
1331 #
1332 # Moreover, we also need to make sure this runs before txnclose
1332 # Moreover, we also need to make sure this runs before txnclose
1333 # hooks and there is no "pending" mechanism that would execute
1333 # hooks and there is no "pending" mechanism that would execute
1334 # logic only if hooks are about to run.
1334 # logic only if hooks are about to run.
1335 #
1335 #
1336 # Fixing this limitation of the transaction is also needed to track
1336 # Fixing this limitation of the transaction is also needed to track
1337 # other families of changes (bookmarks, phases, obsolescence).
1337 # other families of changes (bookmarks, phases, obsolescence).
1338 #
1338 #
1339 # This will have to be fixed before we remove the experimental
1339 # This will have to be fixed before we remove the experimental
1340 # gating.
1340 # gating.
1341 tracktags(tr2)
1341 tracktags(tr2)
1342 repo = reporef()
1342 repo = reporef()
1343 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1343 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1344 scmutil.enforcesinglehead(repo, tr2, desc)
1344 scmutil.enforcesinglehead(repo, tr2, desc)
1345 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1345 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1346 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1346 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1347 args = tr.hookargs.copy()
1347 args = tr.hookargs.copy()
1348 args.update(bookmarks.preparehookargs(name, old, new))
1348 args.update(bookmarks.preparehookargs(name, old, new))
1349 repo.hook('pretxnclose-bookmark', throw=True,
1349 repo.hook('pretxnclose-bookmark', throw=True,
1350 txnname=desc,
1350 txnname=desc,
1351 **pycompat.strkwargs(args))
1351 **pycompat.strkwargs(args))
1352 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1352 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1353 cl = repo.unfiltered().changelog
1353 cl = repo.unfiltered().changelog
1354 for rev, (old, new) in tr.changes['phases'].items():
1354 for rev, (old, new) in tr.changes['phases'].items():
1355 args = tr.hookargs.copy()
1355 args = tr.hookargs.copy()
1356 node = hex(cl.node(rev))
1356 node = hex(cl.node(rev))
1357 args.update(phases.preparehookargs(node, old, new))
1357 args.update(phases.preparehookargs(node, old, new))
1358 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1358 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1359 **pycompat.strkwargs(args))
1359 **pycompat.strkwargs(args))
1360
1360
1361 repo.hook('pretxnclose', throw=True,
1361 repo.hook('pretxnclose', throw=True,
1362 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1362 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1363 def releasefn(tr, success):
1363 def releasefn(tr, success):
1364 repo = reporef()
1364 repo = reporef()
1365 if success:
1365 if success:
1366 # this should be explicitly invoked here, because
1366 # this should be explicitly invoked here, because
1367 # in-memory changes aren't written out at closing
1367 # in-memory changes aren't written out at closing
1368 # transaction, if tr.addfilegenerator (via
1368 # transaction, if tr.addfilegenerator (via
1369 # dirstate.write or so) isn't invoked while
1369 # dirstate.write or so) isn't invoked while
1370 # transaction running
1370 # transaction running
1371 repo.dirstate.write(None)
1371 repo.dirstate.write(None)
1372 else:
1372 else:
1373 # discard all changes (including ones already written
1373 # discard all changes (including ones already written
1374 # out) in this transaction
1374 # out) in this transaction
1375 repo.dirstate.restorebackup(None, 'journal.dirstate')
1375 repo.dirstate.restorebackup(None, 'journal.dirstate')
1376
1376
1377 repo.invalidate(clearfilecache=True)
1377 repo.invalidate(clearfilecache=True)
1378
1378
1379 tr = transaction.transaction(rp, self.svfs, vfsmap,
1379 tr = transaction.transaction(rp, self.svfs, vfsmap,
1380 "journal",
1380 "journal",
1381 "undo",
1381 "undo",
1382 aftertrans(renames),
1382 aftertrans(renames),
1383 self.store.createmode,
1383 self.store.createmode,
1384 validator=validate,
1384 validator=validate,
1385 releasefn=releasefn,
1385 releasefn=releasefn,
1386 checkambigfiles=_cachedfiles,
1386 checkambigfiles=_cachedfiles,
1387 name=desc)
1387 name=desc)
1388 tr.changes['revs'] = pycompat.xrange(0, 0)
1388 tr.changes['revs'] = pycompat.xrange(0, 0)
1389 tr.changes['obsmarkers'] = set()
1389 tr.changes['obsmarkers'] = set()
1390 tr.changes['phases'] = {}
1390 tr.changes['phases'] = {}
1391 tr.changes['bookmarks'] = {}
1391 tr.changes['bookmarks'] = {}
1392
1392
1393 tr.hookargs['txnid'] = txnid
1393 tr.hookargs['txnid'] = txnid
1394 # note: writing the fncache only during finalize mean that the file is
1394 # note: writing the fncache only during finalize mean that the file is
1395 # outdated when running hooks. As fncache is used for streaming clone,
1395 # outdated when running hooks. As fncache is used for streaming clone,
1396 # this is not expected to break anything that happen during the hooks.
1396 # this is not expected to break anything that happen during the hooks.
1397 tr.addfinalize('flush-fncache', self.store.write)
1397 tr.addfinalize('flush-fncache', self.store.write)
1398 def txnclosehook(tr2):
1398 def txnclosehook(tr2):
1399 """To be run if transaction is successful, will schedule a hook run
1399 """To be run if transaction is successful, will schedule a hook run
1400 """
1400 """
1401 # Don't reference tr2 in hook() so we don't hold a reference.
1401 # Don't reference tr2 in hook() so we don't hold a reference.
1402 # This reduces memory consumption when there are multiple
1402 # This reduces memory consumption when there are multiple
1403 # transactions per lock. This can likely go away if issue5045
1403 # transactions per lock. This can likely go away if issue5045
1404 # fixes the function accumulation.
1404 # fixes the function accumulation.
1405 hookargs = tr2.hookargs
1405 hookargs = tr2.hookargs
1406
1406
1407 def hookfunc():
1407 def hookfunc():
1408 repo = reporef()
1408 repo = reporef()
1409 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1409 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1410 bmchanges = sorted(tr.changes['bookmarks'].items())
1410 bmchanges = sorted(tr.changes['bookmarks'].items())
1411 for name, (old, new) in bmchanges:
1411 for name, (old, new) in bmchanges:
1412 args = tr.hookargs.copy()
1412 args = tr.hookargs.copy()
1413 args.update(bookmarks.preparehookargs(name, old, new))
1413 args.update(bookmarks.preparehookargs(name, old, new))
1414 repo.hook('txnclose-bookmark', throw=False,
1414 repo.hook('txnclose-bookmark', throw=False,
1415 txnname=desc, **pycompat.strkwargs(args))
1415 txnname=desc, **pycompat.strkwargs(args))
1416
1416
1417 if hook.hashook(repo.ui, 'txnclose-phase'):
1417 if hook.hashook(repo.ui, 'txnclose-phase'):
1418 cl = repo.unfiltered().changelog
1418 cl = repo.unfiltered().changelog
1419 phasemv = sorted(tr.changes['phases'].items())
1419 phasemv = sorted(tr.changes['phases'].items())
1420 for rev, (old, new) in phasemv:
1420 for rev, (old, new) in phasemv:
1421 args = tr.hookargs.copy()
1421 args = tr.hookargs.copy()
1422 node = hex(cl.node(rev))
1422 node = hex(cl.node(rev))
1423 args.update(phases.preparehookargs(node, old, new))
1423 args.update(phases.preparehookargs(node, old, new))
1424 repo.hook('txnclose-phase', throw=False, txnname=desc,
1424 repo.hook('txnclose-phase', throw=False, txnname=desc,
1425 **pycompat.strkwargs(args))
1425 **pycompat.strkwargs(args))
1426
1426
1427 repo.hook('txnclose', throw=False, txnname=desc,
1427 repo.hook('txnclose', throw=False, txnname=desc,
1428 **pycompat.strkwargs(hookargs))
1428 **pycompat.strkwargs(hookargs))
1429 reporef()._afterlock(hookfunc)
1429 reporef()._afterlock(hookfunc)
1430 tr.addfinalize('txnclose-hook', txnclosehook)
1430 tr.addfinalize('txnclose-hook', txnclosehook)
1431 # Include a leading "-" to make it happen before the transaction summary
1431 # Include a leading "-" to make it happen before the transaction summary
1432 # reports registered via scmutil.registersummarycallback() whose names
1432 # reports registered via scmutil.registersummarycallback() whose names
1433 # are 00-txnreport etc. That way, the caches will be warm when the
1433 # are 00-txnreport etc. That way, the caches will be warm when the
1434 # callbacks run.
1434 # callbacks run.
1435 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1435 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1436 def txnaborthook(tr2):
1436 def txnaborthook(tr2):
1437 """To be run if transaction is aborted
1437 """To be run if transaction is aborted
1438 """
1438 """
1439 reporef().hook('txnabort', throw=False, txnname=desc,
1439 reporef().hook('txnabort', throw=False, txnname=desc,
1440 **pycompat.strkwargs(tr2.hookargs))
1440 **pycompat.strkwargs(tr2.hookargs))
1441 tr.addabort('txnabort-hook', txnaborthook)
1441 tr.addabort('txnabort-hook', txnaborthook)
1442 # avoid eager cache invalidation. in-memory data should be identical
1442 # avoid eager cache invalidation. in-memory data should be identical
1443 # to stored data if transaction has no error.
1443 # to stored data if transaction has no error.
1444 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1444 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1445 self._transref = weakref.ref(tr)
1445 self._transref = weakref.ref(tr)
1446 scmutil.registersummarycallback(self, tr, desc)
1446 scmutil.registersummarycallback(self, tr, desc)
1447 return tr
1447 return tr
1448
1448
1449 def _journalfiles(self):
1449 def _journalfiles(self):
1450 return ((self.svfs, 'journal'),
1450 return ((self.svfs, 'journal'),
1451 (self.vfs, 'journal.dirstate'),
1451 (self.vfs, 'journal.dirstate'),
1452 (self.vfs, 'journal.branch'),
1452 (self.vfs, 'journal.branch'),
1453 (self.vfs, 'journal.desc'),
1453 (self.vfs, 'journal.desc'),
1454 (self.vfs, 'journal.bookmarks'),
1454 (self.vfs, 'journal.bookmarks'),
1455 (self.svfs, 'journal.phaseroots'))
1455 (self.svfs, 'journal.phaseroots'))
1456
1456
1457 def undofiles(self):
1457 def undofiles(self):
1458 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1458 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1459
1459
1460 @unfilteredmethod
1460 @unfilteredmethod
1461 def _writejournal(self, desc):
1461 def _writejournal(self, desc):
1462 self.dirstate.savebackup(None, 'journal.dirstate')
1462 self.dirstate.savebackup(None, 'journal.dirstate')
1463 self.vfs.write("journal.branch",
1463 self.vfs.write("journal.branch",
1464 encoding.fromlocal(self.dirstate.branch()))
1464 encoding.fromlocal(self.dirstate.branch()))
1465 self.vfs.write("journal.desc",
1465 self.vfs.write("journal.desc",
1466 "%d\n%s\n" % (len(self), desc))
1466 "%d\n%s\n" % (len(self), desc))
1467 self.vfs.write("journal.bookmarks",
1467 self.vfs.write("journal.bookmarks",
1468 self.vfs.tryread("bookmarks"))
1468 self.vfs.tryread("bookmarks"))
1469 self.svfs.write("journal.phaseroots",
1469 self.svfs.write("journal.phaseroots",
1470 self.svfs.tryread("phaseroots"))
1470 self.svfs.tryread("phaseroots"))
1471
1471
1472 def recover(self):
1472 def recover(self):
1473 with self.lock():
1473 with self.lock():
1474 if self.svfs.exists("journal"):
1474 if self.svfs.exists("journal"):
1475 self.ui.status(_("rolling back interrupted transaction\n"))
1475 self.ui.status(_("rolling back interrupted transaction\n"))
1476 vfsmap = {'': self.svfs,
1476 vfsmap = {'': self.svfs,
1477 'plain': self.vfs,}
1477 'plain': self.vfs,}
1478 transaction.rollback(self.svfs, vfsmap, "journal",
1478 transaction.rollback(self.svfs, vfsmap, "journal",
1479 self.ui.warn,
1479 self.ui.warn,
1480 checkambigfiles=_cachedfiles)
1480 checkambigfiles=_cachedfiles)
1481 self.invalidate()
1481 self.invalidate()
1482 return True
1482 return True
1483 else:
1483 else:
1484 self.ui.warn(_("no interrupted transaction available\n"))
1484 self.ui.warn(_("no interrupted transaction available\n"))
1485 return False
1485 return False
1486
1486
1487 def rollback(self, dryrun=False, force=False):
1487 def rollback(self, dryrun=False, force=False):
1488 wlock = lock = dsguard = None
1488 wlock = lock = dsguard = None
1489 try:
1489 try:
1490 wlock = self.wlock()
1490 wlock = self.wlock()
1491 lock = self.lock()
1491 lock = self.lock()
1492 if self.svfs.exists("undo"):
1492 if self.svfs.exists("undo"):
1493 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1493 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1494
1494
1495 return self._rollback(dryrun, force, dsguard)
1495 return self._rollback(dryrun, force, dsguard)
1496 else:
1496 else:
1497 self.ui.warn(_("no rollback information available\n"))
1497 self.ui.warn(_("no rollback information available\n"))
1498 return 1
1498 return 1
1499 finally:
1499 finally:
1500 release(dsguard, lock, wlock)
1500 release(dsguard, lock, wlock)
1501
1501
1502 @unfilteredmethod # Until we get smarter cache management
1502 @unfilteredmethod # Until we get smarter cache management
1503 def _rollback(self, dryrun, force, dsguard):
1503 def _rollback(self, dryrun, force, dsguard):
1504 ui = self.ui
1504 ui = self.ui
1505 try:
1505 try:
1506 args = self.vfs.read('undo.desc').splitlines()
1506 args = self.vfs.read('undo.desc').splitlines()
1507 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1507 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1508 if len(args) >= 3:
1508 if len(args) >= 3:
1509 detail = args[2]
1509 detail = args[2]
1510 oldtip = oldlen - 1
1510 oldtip = oldlen - 1
1511
1511
1512 if detail and ui.verbose:
1512 if detail and ui.verbose:
1513 msg = (_('repository tip rolled back to revision %d'
1513 msg = (_('repository tip rolled back to revision %d'
1514 ' (undo %s: %s)\n')
1514 ' (undo %s: %s)\n')
1515 % (oldtip, desc, detail))
1515 % (oldtip, desc, detail))
1516 else:
1516 else:
1517 msg = (_('repository tip rolled back to revision %d'
1517 msg = (_('repository tip rolled back to revision %d'
1518 ' (undo %s)\n')
1518 ' (undo %s)\n')
1519 % (oldtip, desc))
1519 % (oldtip, desc))
1520 except IOError:
1520 except IOError:
1521 msg = _('rolling back unknown transaction\n')
1521 msg = _('rolling back unknown transaction\n')
1522 desc = None
1522 desc = None
1523
1523
1524 if not force and self['.'] != self['tip'] and desc == 'commit':
1524 if not force and self['.'] != self['tip'] and desc == 'commit':
1525 raise error.Abort(
1525 raise error.Abort(
1526 _('rollback of last commit while not checked out '
1526 _('rollback of last commit while not checked out '
1527 'may lose data'), hint=_('use -f to force'))
1527 'may lose data'), hint=_('use -f to force'))
1528
1528
1529 ui.status(msg)
1529 ui.status(msg)
1530 if dryrun:
1530 if dryrun:
1531 return 0
1531 return 0
1532
1532
1533 parents = self.dirstate.parents()
1533 parents = self.dirstate.parents()
1534 self.destroying()
1534 self.destroying()
1535 vfsmap = {'plain': self.vfs, '': self.svfs}
1535 vfsmap = {'plain': self.vfs, '': self.svfs}
1536 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1536 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1537 checkambigfiles=_cachedfiles)
1537 checkambigfiles=_cachedfiles)
1538 if self.vfs.exists('undo.bookmarks'):
1538 if self.vfs.exists('undo.bookmarks'):
1539 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1539 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1540 if self.svfs.exists('undo.phaseroots'):
1540 if self.svfs.exists('undo.phaseroots'):
1541 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1541 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1542 self.invalidate()
1542 self.invalidate()
1543
1543
1544 parentgone = (parents[0] not in self.changelog.nodemap or
1544 parentgone = (parents[0] not in self.changelog.nodemap or
1545 parents[1] not in self.changelog.nodemap)
1545 parents[1] not in self.changelog.nodemap)
1546 if parentgone:
1546 if parentgone:
1547 # prevent dirstateguard from overwriting already restored one
1547 # prevent dirstateguard from overwriting already restored one
1548 dsguard.close()
1548 dsguard.close()
1549
1549
1550 self.dirstate.restorebackup(None, 'undo.dirstate')
1550 self.dirstate.restorebackup(None, 'undo.dirstate')
1551 try:
1551 try:
1552 branch = self.vfs.read('undo.branch')
1552 branch = self.vfs.read('undo.branch')
1553 self.dirstate.setbranch(encoding.tolocal(branch))
1553 self.dirstate.setbranch(encoding.tolocal(branch))
1554 except IOError:
1554 except IOError:
1555 ui.warn(_('named branch could not be reset: '
1555 ui.warn(_('named branch could not be reset: '
1556 'current branch is still \'%s\'\n')
1556 'current branch is still \'%s\'\n')
1557 % self.dirstate.branch())
1557 % self.dirstate.branch())
1558
1558
1559 parents = tuple([p.rev() for p in self[None].parents()])
1559 parents = tuple([p.rev() for p in self[None].parents()])
1560 if len(parents) > 1:
1560 if len(parents) > 1:
1561 ui.status(_('working directory now based on '
1561 ui.status(_('working directory now based on '
1562 'revisions %d and %d\n') % parents)
1562 'revisions %d and %d\n') % parents)
1563 else:
1563 else:
1564 ui.status(_('working directory now based on '
1564 ui.status(_('working directory now based on '
1565 'revision %d\n') % parents)
1565 'revision %d\n') % parents)
1566 mergemod.mergestate.clean(self, self['.'].node())
1566 mergemod.mergestate.clean(self, self['.'].node())
1567
1567
1568 # TODO: if we know which new heads may result from this rollback, pass
1568 # TODO: if we know which new heads may result from this rollback, pass
1569 # them to destroy(), which will prevent the branchhead cache from being
1569 # them to destroy(), which will prevent the branchhead cache from being
1570 # invalidated.
1570 # invalidated.
1571 self.destroyed()
1571 self.destroyed()
1572 return 0
1572 return 0
1573
1573
1574 def _buildcacheupdater(self, newtransaction):
1574 def _buildcacheupdater(self, newtransaction):
1575 """called during transaction to build the callback updating cache
1575 """called during transaction to build the callback updating cache
1576
1576
1577 Lives on the repository to help extension who might want to augment
1577 Lives on the repository to help extension who might want to augment
1578 this logic. For this purpose, the created transaction is passed to the
1578 this logic. For this purpose, the created transaction is passed to the
1579 method.
1579 method.
1580 """
1580 """
1581 # we must avoid cyclic reference between repo and transaction.
1581 # we must avoid cyclic reference between repo and transaction.
1582 reporef = weakref.ref(self)
1582 reporef = weakref.ref(self)
1583 def updater(tr):
1583 def updater(tr):
1584 repo = reporef()
1584 repo = reporef()
1585 repo.updatecaches(tr)
1585 repo.updatecaches(tr)
1586 return updater
1586 return updater
1587
1587
1588 @unfilteredmethod
1588 @unfilteredmethod
1589 def updatecaches(self, tr=None, full=False):
1589 def updatecaches(self, tr=None, full=False):
1590 """warm appropriate caches
1590 """warm appropriate caches
1591
1591
1592 If this function is called after a transaction closed. The transaction
1592 If this function is called after a transaction closed. The transaction
1593 will be available in the 'tr' argument. This can be used to selectively
1593 will be available in the 'tr' argument. This can be used to selectively
1594 update caches relevant to the changes in that transaction.
1594 update caches relevant to the changes in that transaction.
1595
1595
1596 If 'full' is set, make sure all caches the function knows about have
1596 If 'full' is set, make sure all caches the function knows about have
1597 up-to-date data. Even the ones usually loaded more lazily.
1597 up-to-date data. Even the ones usually loaded more lazily.
1598 """
1598 """
1599 if tr is not None and tr.hookargs.get('source') == 'strip':
1599 if tr is not None and tr.hookargs.get('source') == 'strip':
1600 # During strip, many caches are invalid but
1600 # During strip, many caches are invalid but
1601 # later call to `destroyed` will refresh them.
1601 # later call to `destroyed` will refresh them.
1602 return
1602 return
1603
1603
1604 if tr is None or tr.changes['revs']:
1604 if tr is None or tr.changes['revs']:
1605 # updating the unfiltered branchmap should refresh all the others,
1605 # updating the unfiltered branchmap should refresh all the others,
1606 self.ui.debug('updating the branch cache\n')
1606 self.ui.debug('updating the branch cache\n')
1607 branchmap.updatecache(self.filtered('served'))
1607 branchmap.updatecache(self.filtered('served'))
1608
1608
1609 if full:
1609 if full:
1610 rbc = self.revbranchcache()
1610 rbc = self.revbranchcache()
1611 for r in self.changelog:
1611 for r in self.changelog:
1612 rbc.branchinfo(r)
1612 rbc.branchinfo(r)
1613 rbc.write()
1613 rbc.write()
1614
1614
1615 # ensure the working copy parents are in the manifestfulltextcache
1615 # ensure the working copy parents are in the manifestfulltextcache
1616 for ctx in self['.'].parents():
1616 for ctx in self['.'].parents():
1617 ctx.manifest() # accessing the manifest is enough
1617 ctx.manifest() # accessing the manifest is enough
1618
1618
1619 def invalidatecaches(self):
1619 def invalidatecaches(self):
1620
1620
1621 if '_tagscache' in vars(self):
1621 if '_tagscache' in vars(self):
1622 # can't use delattr on proxy
1622 # can't use delattr on proxy
1623 del self.__dict__['_tagscache']
1623 del self.__dict__['_tagscache']
1624
1624
1625 self.unfiltered()._branchcaches.clear()
1625 self.unfiltered()._branchcaches.clear()
1626 self.invalidatevolatilesets()
1626 self.invalidatevolatilesets()
1627 self._sparsesignaturecache.clear()
1627 self._sparsesignaturecache.clear()
1628
1628
1629 def invalidatevolatilesets(self):
1629 def invalidatevolatilesets(self):
1630 self.filteredrevcache.clear()
1630 self.filteredrevcache.clear()
1631 obsolete.clearobscaches(self)
1631 obsolete.clearobscaches(self)
1632
1632
1633 def invalidatedirstate(self):
1633 def invalidatedirstate(self):
1634 '''Invalidates the dirstate, causing the next call to dirstate
1634 '''Invalidates the dirstate, causing the next call to dirstate
1635 to check if it was modified since the last time it was read,
1635 to check if it was modified since the last time it was read,
1636 rereading it if it has.
1636 rereading it if it has.
1637
1637
1638 This is different to dirstate.invalidate() that it doesn't always
1638 This is different to dirstate.invalidate() that it doesn't always
1639 rereads the dirstate. Use dirstate.invalidate() if you want to
1639 rereads the dirstate. Use dirstate.invalidate() if you want to
1640 explicitly read the dirstate again (i.e. restoring it to a previous
1640 explicitly read the dirstate again (i.e. restoring it to a previous
1641 known good state).'''
1641 known good state).'''
1642 if hasunfilteredcache(self, 'dirstate'):
1642 if hasunfilteredcache(self, 'dirstate'):
1643 for k in self.dirstate._filecache:
1643 for k in self.dirstate._filecache:
1644 try:
1644 try:
1645 delattr(self.dirstate, k)
1645 delattr(self.dirstate, k)
1646 except AttributeError:
1646 except AttributeError:
1647 pass
1647 pass
1648 delattr(self.unfiltered(), 'dirstate')
1648 delattr(self.unfiltered(), 'dirstate')
1649
1649
1650 def invalidate(self, clearfilecache=False):
1650 def invalidate(self, clearfilecache=False):
1651 '''Invalidates both store and non-store parts other than dirstate
1651 '''Invalidates both store and non-store parts other than dirstate
1652
1652
1653 If a transaction is running, invalidation of store is omitted,
1653 If a transaction is running, invalidation of store is omitted,
1654 because discarding in-memory changes might cause inconsistency
1654 because discarding in-memory changes might cause inconsistency
1655 (e.g. incomplete fncache causes unintentional failure, but
1655 (e.g. incomplete fncache causes unintentional failure, but
1656 redundant one doesn't).
1656 redundant one doesn't).
1657 '''
1657 '''
1658 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1658 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1659 for k in list(self._filecache.keys()):
1659 for k in list(self._filecache.keys()):
1660 # dirstate is invalidated separately in invalidatedirstate()
1660 # dirstate is invalidated separately in invalidatedirstate()
1661 if k == 'dirstate':
1661 if k == 'dirstate':
1662 continue
1662 continue
1663 if (k == 'changelog' and
1663 if (k == 'changelog' and
1664 self.currenttransaction() and
1664 self.currenttransaction() and
1665 self.changelog._delayed):
1665 self.changelog._delayed):
1666 # The changelog object may store unwritten revisions. We don't
1666 # The changelog object may store unwritten revisions. We don't
1667 # want to lose them.
1667 # want to lose them.
1668 # TODO: Solve the problem instead of working around it.
1668 # TODO: Solve the problem instead of working around it.
1669 continue
1669 continue
1670
1670
1671 if clearfilecache:
1671 if clearfilecache:
1672 del self._filecache[k]
1672 del self._filecache[k]
1673 try:
1673 try:
1674 delattr(unfiltered, k)
1674 delattr(unfiltered, k)
1675 except AttributeError:
1675 except AttributeError:
1676 pass
1676 pass
1677 self.invalidatecaches()
1677 self.invalidatecaches()
1678 if not self.currenttransaction():
1678 if not self.currenttransaction():
1679 # TODO: Changing contents of store outside transaction
1679 # TODO: Changing contents of store outside transaction
1680 # causes inconsistency. We should make in-memory store
1680 # causes inconsistency. We should make in-memory store
1681 # changes detectable, and abort if changed.
1681 # changes detectable, and abort if changed.
1682 self.store.invalidatecaches()
1682 self.store.invalidatecaches()
1683
1683
1684 def invalidateall(self):
1684 def invalidateall(self):
1685 '''Fully invalidates both store and non-store parts, causing the
1685 '''Fully invalidates both store and non-store parts, causing the
1686 subsequent operation to reread any outside changes.'''
1686 subsequent operation to reread any outside changes.'''
1687 # extension should hook this to invalidate its caches
1687 # extension should hook this to invalidate its caches
1688 self.invalidate()
1688 self.invalidate()
1689 self.invalidatedirstate()
1689 self.invalidatedirstate()
1690
1690
1691 @unfilteredmethod
1691 @unfilteredmethod
1692 def _refreshfilecachestats(self, tr):
1692 def _refreshfilecachestats(self, tr):
1693 """Reload stats of cached files so that they are flagged as valid"""
1693 """Reload stats of cached files so that they are flagged as valid"""
1694 for k, ce in self._filecache.items():
1694 for k, ce in self._filecache.items():
1695 k = pycompat.sysstr(k)
1695 k = pycompat.sysstr(k)
1696 if k == r'dirstate' or k not in self.__dict__:
1696 if k == r'dirstate' or k not in self.__dict__:
1697 continue
1697 continue
1698 ce.refresh()
1698 ce.refresh()
1699
1699
1700 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1700 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1701 inheritchecker=None, parentenvvar=None):
1701 inheritchecker=None, parentenvvar=None):
1702 parentlock = None
1702 parentlock = None
1703 # the contents of parentenvvar are used by the underlying lock to
1703 # the contents of parentenvvar are used by the underlying lock to
1704 # determine whether it can be inherited
1704 # determine whether it can be inherited
1705 if parentenvvar is not None:
1705 if parentenvvar is not None:
1706 parentlock = encoding.environ.get(parentenvvar)
1706 parentlock = encoding.environ.get(parentenvvar)
1707
1707
1708 timeout = 0
1708 timeout = 0
1709 warntimeout = 0
1709 warntimeout = 0
1710 if wait:
1710 if wait:
1711 timeout = self.ui.configint("ui", "timeout")
1711 timeout = self.ui.configint("ui", "timeout")
1712 warntimeout = self.ui.configint("ui", "timeout.warn")
1712 warntimeout = self.ui.configint("ui", "timeout.warn")
1713 # internal config: ui.signal-safe-lock
1713 # internal config: ui.signal-safe-lock
1714 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1714 signalsafe = self.ui.configbool('ui', 'signal-safe-lock')
1715
1715
1716 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1716 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1717 releasefn=releasefn,
1717 releasefn=releasefn,
1718 acquirefn=acquirefn, desc=desc,
1718 acquirefn=acquirefn, desc=desc,
1719 inheritchecker=inheritchecker,
1719 inheritchecker=inheritchecker,
1720 parentlock=parentlock,
1720 parentlock=parentlock,
1721 signalsafe=signalsafe)
1721 signalsafe=signalsafe)
1722 return l
1722 return l
1723
1723
1724 def _afterlock(self, callback):
1724 def _afterlock(self, callback):
1725 """add a callback to be run when the repository is fully unlocked
1725 """add a callback to be run when the repository is fully unlocked
1726
1726
1727 The callback will be executed when the outermost lock is released
1727 The callback will be executed when the outermost lock is released
1728 (with wlock being higher level than 'lock')."""
1728 (with wlock being higher level than 'lock')."""
1729 for ref in (self._wlockref, self._lockref):
1729 for ref in (self._wlockref, self._lockref):
1730 l = ref and ref()
1730 l = ref and ref()
1731 if l and l.held:
1731 if l and l.held:
1732 l.postrelease.append(callback)
1732 l.postrelease.append(callback)
1733 break
1733 break
1734 else: # no lock have been found.
1734 else: # no lock have been found.
1735 callback()
1735 callback()
1736
1736
1737 def lock(self, wait=True):
1737 def lock(self, wait=True):
1738 '''Lock the repository store (.hg/store) and return a weak reference
1738 '''Lock the repository store (.hg/store) and return a weak reference
1739 to the lock. Use this before modifying the store (e.g. committing or
1739 to the lock. Use this before modifying the store (e.g. committing or
1740 stripping). If you are opening a transaction, get a lock as well.)
1740 stripping). If you are opening a transaction, get a lock as well.)
1741
1741
1742 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1742 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1743 'wlock' first to avoid a dead-lock hazard.'''
1743 'wlock' first to avoid a dead-lock hazard.'''
1744 l = self._currentlock(self._lockref)
1744 l = self._currentlock(self._lockref)
1745 if l is not None:
1745 if l is not None:
1746 l.lock()
1746 l.lock()
1747 return l
1747 return l
1748
1748
1749 l = self._lock(self.svfs, "lock", wait, None,
1749 l = self._lock(self.svfs, "lock", wait, None,
1750 self.invalidate, _('repository %s') % self.origroot)
1750 self.invalidate, _('repository %s') % self.origroot)
1751 self._lockref = weakref.ref(l)
1751 self._lockref = weakref.ref(l)
1752 return l
1752 return l
1753
1753
1754 def _wlockchecktransaction(self):
1754 def _wlockchecktransaction(self):
1755 if self.currenttransaction() is not None:
1755 if self.currenttransaction() is not None:
1756 raise error.LockInheritanceContractViolation(
1756 raise error.LockInheritanceContractViolation(
1757 'wlock cannot be inherited in the middle of a transaction')
1757 'wlock cannot be inherited in the middle of a transaction')
1758
1758
1759 def wlock(self, wait=True):
1759 def wlock(self, wait=True):
1760 '''Lock the non-store parts of the repository (everything under
1760 '''Lock the non-store parts of the repository (everything under
1761 .hg except .hg/store) and return a weak reference to the lock.
1761 .hg except .hg/store) and return a weak reference to the lock.
1762
1762
1763 Use this before modifying files in .hg.
1763 Use this before modifying files in .hg.
1764
1764
1765 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1765 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1766 'wlock' first to avoid a dead-lock hazard.'''
1766 'wlock' first to avoid a dead-lock hazard.'''
1767 l = self._wlockref and self._wlockref()
1767 l = self._wlockref and self._wlockref()
1768 if l is not None and l.held:
1768 if l is not None and l.held:
1769 l.lock()
1769 l.lock()
1770 return l
1770 return l
1771
1771
1772 # We do not need to check for non-waiting lock acquisition. Such
1772 # We do not need to check for non-waiting lock acquisition. Such
1773 # acquisition would not cause dead-lock as they would just fail.
1773 # acquisition would not cause dead-lock as they would just fail.
1774 if wait and (self.ui.configbool('devel', 'all-warnings')
1774 if wait and (self.ui.configbool('devel', 'all-warnings')
1775 or self.ui.configbool('devel', 'check-locks')):
1775 or self.ui.configbool('devel', 'check-locks')):
1776 if self._currentlock(self._lockref) is not None:
1776 if self._currentlock(self._lockref) is not None:
1777 self.ui.develwarn('"wlock" acquired after "lock"')
1777 self.ui.develwarn('"wlock" acquired after "lock"')
1778
1778
1779 def unlock():
1779 def unlock():
1780 if self.dirstate.pendingparentchange():
1780 if self.dirstate.pendingparentchange():
1781 self.dirstate.invalidate()
1781 self.dirstate.invalidate()
1782 else:
1782 else:
1783 self.dirstate.write(None)
1783 self.dirstate.write(None)
1784
1784
1785 self._filecache['dirstate'].refresh()
1785 self._filecache['dirstate'].refresh()
1786
1786
1787 l = self._lock(self.vfs, "wlock", wait, unlock,
1787 l = self._lock(self.vfs, "wlock", wait, unlock,
1788 self.invalidatedirstate, _('working directory of %s') %
1788 self.invalidatedirstate, _('working directory of %s') %
1789 self.origroot,
1789 self.origroot,
1790 inheritchecker=self._wlockchecktransaction,
1790 inheritchecker=self._wlockchecktransaction,
1791 parentenvvar='HG_WLOCK_LOCKER')
1791 parentenvvar='HG_WLOCK_LOCKER')
1792 self._wlockref = weakref.ref(l)
1792 self._wlockref = weakref.ref(l)
1793 return l
1793 return l
1794
1794
1795 def _currentlock(self, lockref):
1795 def _currentlock(self, lockref):
1796 """Returns the lock if it's held, or None if it's not."""
1796 """Returns the lock if it's held, or None if it's not."""
1797 if lockref is None:
1797 if lockref is None:
1798 return None
1798 return None
1799 l = lockref()
1799 l = lockref()
1800 if l is None or not l.held:
1800 if l is None or not l.held:
1801 return None
1801 return None
1802 return l
1802 return l
1803
1803
1804 def currentwlock(self):
1804 def currentwlock(self):
1805 """Returns the wlock if it's held, or None if it's not."""
1805 """Returns the wlock if it's held, or None if it's not."""
1806 return self._currentlock(self._wlockref)
1806 return self._currentlock(self._wlockref)
1807
1807
1808 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1808 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1809 """
1809 """
1810 commit an individual file as part of a larger transaction
1810 commit an individual file as part of a larger transaction
1811 """
1811 """
1812
1812
1813 fname = fctx.path()
1813 fname = fctx.path()
1814 fparent1 = manifest1.get(fname, nullid)
1814 fparent1 = manifest1.get(fname, nullid)
1815 fparent2 = manifest2.get(fname, nullid)
1815 fparent2 = manifest2.get(fname, nullid)
1816 if isinstance(fctx, context.filectx):
1816 if isinstance(fctx, context.filectx):
1817 node = fctx.filenode()
1817 node = fctx.filenode()
1818 if node in [fparent1, fparent2]:
1818 if node in [fparent1, fparent2]:
1819 self.ui.debug('reusing %s filelog entry\n' % fname)
1819 self.ui.debug('reusing %s filelog entry\n' % fname)
1820 if manifest1.flags(fname) != fctx.flags():
1820 if manifest1.flags(fname) != fctx.flags():
1821 changelist.append(fname)
1821 changelist.append(fname)
1822 return node
1822 return node
1823
1823
1824 flog = self.file(fname)
1824 flog = self.file(fname)
1825 meta = {}
1825 meta = {}
1826 copy = fctx.renamed()
1826 copy = fctx.renamed()
1827 if copy and copy[0] != fname:
1827 if copy and copy[0] != fname:
1828 # Mark the new revision of this file as a copy of another
1828 # Mark the new revision of this file as a copy of another
1829 # file. This copy data will effectively act as a parent
1829 # file. This copy data will effectively act as a parent
1830 # of this new revision. If this is a merge, the first
1830 # of this new revision. If this is a merge, the first
1831 # parent will be the nullid (meaning "look up the copy data")
1831 # parent will be the nullid (meaning "look up the copy data")
1832 # and the second one will be the other parent. For example:
1832 # and the second one will be the other parent. For example:
1833 #
1833 #
1834 # 0 --- 1 --- 3 rev1 changes file foo
1834 # 0 --- 1 --- 3 rev1 changes file foo
1835 # \ / rev2 renames foo to bar and changes it
1835 # \ / rev2 renames foo to bar and changes it
1836 # \- 2 -/ rev3 should have bar with all changes and
1836 # \- 2 -/ rev3 should have bar with all changes and
1837 # should record that bar descends from
1837 # should record that bar descends from
1838 # bar in rev2 and foo in rev1
1838 # bar in rev2 and foo in rev1
1839 #
1839 #
1840 # this allows this merge to succeed:
1840 # this allows this merge to succeed:
1841 #
1841 #
1842 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1842 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1843 # \ / merging rev3 and rev4 should use bar@rev2
1843 # \ / merging rev3 and rev4 should use bar@rev2
1844 # \- 2 --- 4 as the merge base
1844 # \- 2 --- 4 as the merge base
1845 #
1845 #
1846
1846
1847 cfname = copy[0]
1847 cfname = copy[0]
1848 crev = manifest1.get(cfname)
1848 crev = manifest1.get(cfname)
1849 newfparent = fparent2
1849 newfparent = fparent2
1850
1850
1851 if manifest2: # branch merge
1851 if manifest2: # branch merge
1852 if fparent2 == nullid or crev is None: # copied on remote side
1852 if fparent2 == nullid or crev is None: # copied on remote side
1853 if cfname in manifest2:
1853 if cfname in manifest2:
1854 crev = manifest2[cfname]
1854 crev = manifest2[cfname]
1855 newfparent = fparent1
1855 newfparent = fparent1
1856
1856
1857 # Here, we used to search backwards through history to try to find
1857 # Here, we used to search backwards through history to try to find
1858 # where the file copy came from if the source of a copy was not in
1858 # where the file copy came from if the source of a copy was not in
1859 # the parent directory. However, this doesn't actually make sense to
1859 # the parent directory. However, this doesn't actually make sense to
1860 # do (what does a copy from something not in your working copy even
1860 # do (what does a copy from something not in your working copy even
1861 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1861 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1862 # the user that copy information was dropped, so if they didn't
1862 # the user that copy information was dropped, so if they didn't
1863 # expect this outcome it can be fixed, but this is the correct
1863 # expect this outcome it can be fixed, but this is the correct
1864 # behavior in this circumstance.
1864 # behavior in this circumstance.
1865
1865
1866 if crev:
1866 if crev:
1867 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1867 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1868 meta["copy"] = cfname
1868 meta["copy"] = cfname
1869 meta["copyrev"] = hex(crev)
1869 meta["copyrev"] = hex(crev)
1870 fparent1, fparent2 = nullid, newfparent
1870 fparent1, fparent2 = nullid, newfparent
1871 else:
1871 else:
1872 self.ui.warn(_("warning: can't find ancestor for '%s' "
1872 self.ui.warn(_("warning: can't find ancestor for '%s' "
1873 "copied from '%s'!\n") % (fname, cfname))
1873 "copied from '%s'!\n") % (fname, cfname))
1874
1874
1875 elif fparent1 == nullid:
1875 elif fparent1 == nullid:
1876 fparent1, fparent2 = fparent2, nullid
1876 fparent1, fparent2 = fparent2, nullid
1877 elif fparent2 != nullid:
1877 elif fparent2 != nullid:
1878 # is one parent an ancestor of the other?
1878 # is one parent an ancestor of the other?
1879 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1879 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1880 if fparent1 in fparentancestors:
1880 if fparent1 in fparentancestors:
1881 fparent1, fparent2 = fparent2, nullid
1881 fparent1, fparent2 = fparent2, nullid
1882 elif fparent2 in fparentancestors:
1882 elif fparent2 in fparentancestors:
1883 fparent2 = nullid
1883 fparent2 = nullid
1884
1884
1885 # is the file changed?
1885 # is the file changed?
1886 text = fctx.data()
1886 text = fctx.data()
1887 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1887 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1888 changelist.append(fname)
1888 changelist.append(fname)
1889 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1889 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1890 # are just the flags changed during merge?
1890 # are just the flags changed during merge?
1891 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1891 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1892 changelist.append(fname)
1892 changelist.append(fname)
1893
1893
1894 return fparent1
1894 return fparent1
1895
1895
1896 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1896 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1897 """check for commit arguments that aren't committable"""
1897 """check for commit arguments that aren't committable"""
1898 if match.isexact() or match.prefix():
1898 if match.isexact() or match.prefix():
1899 matched = set(status.modified + status.added + status.removed)
1899 matched = set(status.modified + status.added + status.removed)
1900
1900
1901 for f in match.files():
1901 for f in match.files():
1902 f = self.dirstate.normalize(f)
1902 f = self.dirstate.normalize(f)
1903 if f == '.' or f in matched or f in wctx.substate:
1903 if f == '.' or f in matched or f in wctx.substate:
1904 continue
1904 continue
1905 if f in status.deleted:
1905 if f in status.deleted:
1906 fail(f, _('file not found!'))
1906 fail(f, _('file not found!'))
1907 if f in vdirs: # visited directory
1907 if f in vdirs: # visited directory
1908 d = f + '/'
1908 d = f + '/'
1909 for mf in matched:
1909 for mf in matched:
1910 if mf.startswith(d):
1910 if mf.startswith(d):
1911 break
1911 break
1912 else:
1912 else:
1913 fail(f, _("no match under directory!"))
1913 fail(f, _("no match under directory!"))
1914 elif f not in self.dirstate:
1914 elif f not in self.dirstate:
1915 fail(f, _("file not tracked!"))
1915 fail(f, _("file not tracked!"))
1916
1916
1917 @unfilteredmethod
1917 @unfilteredmethod
1918 def commit(self, text="", user=None, date=None, match=None, force=False,
1918 def commit(self, text="", user=None, date=None, match=None, force=False,
1919 editor=False, extra=None):
1919 editor=False, extra=None):
1920 """Add a new revision to current repository.
1920 """Add a new revision to current repository.
1921
1921
1922 Revision information is gathered from the working directory,
1922 Revision information is gathered from the working directory,
1923 match can be used to filter the committed files. If editor is
1923 match can be used to filter the committed files. If editor is
1924 supplied, it is called to get a commit message.
1924 supplied, it is called to get a commit message.
1925 """
1925 """
1926 if extra is None:
1926 if extra is None:
1927 extra = {}
1927 extra = {}
1928
1928
1929 def fail(f, msg):
1929 def fail(f, msg):
1930 raise error.Abort('%s: %s' % (f, msg))
1930 raise error.Abort('%s: %s' % (f, msg))
1931
1931
1932 if not match:
1932 if not match:
1933 match = matchmod.always(self.root, '')
1933 match = matchmod.always(self.root, '')
1934
1934
1935 if not force:
1935 if not force:
1936 vdirs = []
1936 vdirs = []
1937 match.explicitdir = vdirs.append
1937 match.explicitdir = vdirs.append
1938 match.bad = fail
1938 match.bad = fail
1939
1939
1940 wlock = lock = tr = None
1940 wlock = lock = tr = None
1941 try:
1941 try:
1942 wlock = self.wlock()
1942 wlock = self.wlock()
1943 lock = self.lock() # for recent changelog (see issue4368)
1943 lock = self.lock() # for recent changelog (see issue4368)
1944
1944
1945 wctx = self[None]
1945 wctx = self[None]
1946 merge = len(wctx.parents()) > 1
1946 merge = len(wctx.parents()) > 1
1947
1947
1948 if not force and merge and not match.always():
1948 if not force and merge and not match.always():
1949 raise error.Abort(_('cannot partially commit a merge '
1949 raise error.Abort(_('cannot partially commit a merge '
1950 '(do not specify files or patterns)'))
1950 '(do not specify files or patterns)'))
1951
1951
1952 status = self.status(match=match, clean=force)
1952 status = self.status(match=match, clean=force)
1953 if force:
1953 if force:
1954 status.modified.extend(status.clean) # mq may commit clean files
1954 status.modified.extend(status.clean) # mq may commit clean files
1955
1955
1956 # check subrepos
1956 # check subrepos
1957 subs, commitsubs, newstate = subrepoutil.precommit(
1957 subs, commitsubs, newstate = subrepoutil.precommit(
1958 self.ui, wctx, status, match, force=force)
1958 self.ui, wctx, status, match, force=force)
1959
1959
1960 # make sure all explicit patterns are matched
1960 # make sure all explicit patterns are matched
1961 if not force:
1961 if not force:
1962 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1962 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1963
1963
1964 cctx = context.workingcommitctx(self, status,
1964 cctx = context.workingcommitctx(self, status,
1965 text, user, date, extra)
1965 text, user, date, extra)
1966
1966
1967 # internal config: ui.allowemptycommit
1967 # internal config: ui.allowemptycommit
1968 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1968 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1969 or extra.get('close') or merge or cctx.files()
1969 or extra.get('close') or merge or cctx.files()
1970 or self.ui.configbool('ui', 'allowemptycommit'))
1970 or self.ui.configbool('ui', 'allowemptycommit'))
1971 if not allowemptycommit:
1971 if not allowemptycommit:
1972 return None
1972 return None
1973
1973
1974 if merge and cctx.deleted():
1974 if merge and cctx.deleted():
1975 raise error.Abort(_("cannot commit merge with missing files"))
1975 raise error.Abort(_("cannot commit merge with missing files"))
1976
1976
1977 ms = mergemod.mergestate.read(self)
1977 ms = mergemod.mergestate.read(self)
1978 mergeutil.checkunresolved(ms)
1978 mergeutil.checkunresolved(ms)
1979
1979
1980 if editor:
1980 if editor:
1981 cctx._text = editor(self, cctx, subs)
1981 cctx._text = editor(self, cctx, subs)
1982 edited = (text != cctx._text)
1982 edited = (text != cctx._text)
1983
1983
1984 # Save commit message in case this transaction gets rolled back
1984 # Save commit message in case this transaction gets rolled back
1985 # (e.g. by a pretxncommit hook). Leave the content alone on
1985 # (e.g. by a pretxncommit hook). Leave the content alone on
1986 # the assumption that the user will use the same editor again.
1986 # the assumption that the user will use the same editor again.
1987 msgfn = self.savecommitmessage(cctx._text)
1987 msgfn = self.savecommitmessage(cctx._text)
1988
1988
1989 # commit subs and write new state
1989 # commit subs and write new state
1990 if subs:
1990 if subs:
1991 for s in sorted(commitsubs):
1991 for s in sorted(commitsubs):
1992 sub = wctx.sub(s)
1992 sub = wctx.sub(s)
1993 self.ui.status(_('committing subrepository %s\n') %
1993 self.ui.status(_('committing subrepository %s\n') %
1994 subrepoutil.subrelpath(sub))
1994 subrepoutil.subrelpath(sub))
1995 sr = sub.commit(cctx._text, user, date)
1995 sr = sub.commit(cctx._text, user, date)
1996 newstate[s] = (newstate[s][0], sr)
1996 newstate[s] = (newstate[s][0], sr)
1997 subrepoutil.writestate(self, newstate)
1997 subrepoutil.writestate(self, newstate)
1998
1998
1999 p1, p2 = self.dirstate.parents()
1999 p1, p2 = self.dirstate.parents()
2000 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2000 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
2001 try:
2001 try:
2002 self.hook("precommit", throw=True, parent1=hookp1,
2002 self.hook("precommit", throw=True, parent1=hookp1,
2003 parent2=hookp2)
2003 parent2=hookp2)
2004 tr = self.transaction('commit')
2004 tr = self.transaction('commit')
2005 ret = self.commitctx(cctx, True)
2005 ret = self.commitctx(cctx, True)
2006 except: # re-raises
2006 except: # re-raises
2007 if edited:
2007 if edited:
2008 self.ui.write(
2008 self.ui.write(
2009 _('note: commit message saved in %s\n') % msgfn)
2009 _('note: commit message saved in %s\n') % msgfn)
2010 raise
2010 raise
2011 # update bookmarks, dirstate and mergestate
2011 # update bookmarks, dirstate and mergestate
2012 bookmarks.update(self, [p1, p2], ret)
2012 bookmarks.update(self, [p1, p2], ret)
2013 cctx.markcommitted(ret)
2013 cctx.markcommitted(ret)
2014 ms.reset()
2014 ms.reset()
2015 tr.close()
2015 tr.close()
2016
2016
2017 finally:
2017 finally:
2018 lockmod.release(tr, lock, wlock)
2018 lockmod.release(tr, lock, wlock)
2019
2019
2020 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2020 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2021 # hack for command that use a temporary commit (eg: histedit)
2021 # hack for command that use a temporary commit (eg: histedit)
2022 # temporary commit got stripped before hook release
2022 # temporary commit got stripped before hook release
2023 if self.changelog.hasnode(ret):
2023 if self.changelog.hasnode(ret):
2024 self.hook("commit", node=node, parent1=parent1,
2024 self.hook("commit", node=node, parent1=parent1,
2025 parent2=parent2)
2025 parent2=parent2)
2026 self._afterlock(commithook)
2026 self._afterlock(commithook)
2027 return ret
2027 return ret
2028
2028
2029 @unfilteredmethod
2029 @unfilteredmethod
2030 def commitctx(self, ctx, error=False):
2030 def commitctx(self, ctx, error=False):
2031 """Add a new revision to current repository.
2031 """Add a new revision to current repository.
2032 Revision information is passed via the context argument.
2032 Revision information is passed via the context argument.
2033 """
2033 """
2034
2034
2035 tr = None
2035 tr = None
2036 p1, p2 = ctx.p1(), ctx.p2()
2036 p1, p2 = ctx.p1(), ctx.p2()
2037 user = ctx.user()
2037 user = ctx.user()
2038
2038
2039 lock = self.lock()
2039 lock = self.lock()
2040 try:
2040 try:
2041 tr = self.transaction("commit")
2041 tr = self.transaction("commit")
2042 trp = weakref.proxy(tr)
2042 trp = weakref.proxy(tr)
2043
2043
2044 if ctx.manifestnode():
2044 if ctx.manifestnode():
2045 # reuse an existing manifest revision
2045 # reuse an existing manifest revision
2046 mn = ctx.manifestnode()
2046 mn = ctx.manifestnode()
2047 files = ctx.files()
2047 files = ctx.files()
2048 elif ctx.files():
2048 elif ctx.files():
2049 m1ctx = p1.manifestctx()
2049 m1ctx = p1.manifestctx()
2050 m2ctx = p2.manifestctx()
2050 m2ctx = p2.manifestctx()
2051 mctx = m1ctx.copy()
2051 mctx = m1ctx.copy()
2052
2052
2053 m = mctx.read()
2053 m = mctx.read()
2054 m1 = m1ctx.read()
2054 m1 = m1ctx.read()
2055 m2 = m2ctx.read()
2055 m2 = m2ctx.read()
2056
2056
2057 # check in files
2057 # check in files
2058 added = []
2058 added = []
2059 changed = []
2059 changed = []
2060 removed = list(ctx.removed())
2060 removed = list(ctx.removed())
2061 linkrev = len(self)
2061 linkrev = len(self)
2062 self.ui.note(_("committing files:\n"))
2062 self.ui.note(_("committing files:\n"))
2063 for f in sorted(ctx.modified() + ctx.added()):
2063 for f in sorted(ctx.modified() + ctx.added()):
2064 self.ui.note(f + "\n")
2064 self.ui.note(f + "\n")
2065 try:
2065 try:
2066 fctx = ctx[f]
2066 fctx = ctx[f]
2067 if fctx is None:
2067 if fctx is None:
2068 removed.append(f)
2068 removed.append(f)
2069 else:
2069 else:
2070 added.append(f)
2070 added.append(f)
2071 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2071 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2072 trp, changed)
2072 trp, changed)
2073 m.setflag(f, fctx.flags())
2073 m.setflag(f, fctx.flags())
2074 except OSError as inst:
2074 except OSError as inst:
2075 self.ui.warn(_("trouble committing %s!\n") % f)
2075 self.ui.warn(_("trouble committing %s!\n") % f)
2076 raise
2076 raise
2077 except IOError as inst:
2077 except IOError as inst:
2078 errcode = getattr(inst, 'errno', errno.ENOENT)
2078 errcode = getattr(inst, 'errno', errno.ENOENT)
2079 if error or errcode and errcode != errno.ENOENT:
2079 if error or errcode and errcode != errno.ENOENT:
2080 self.ui.warn(_("trouble committing %s!\n") % f)
2080 self.ui.warn(_("trouble committing %s!\n") % f)
2081 raise
2081 raise
2082
2082
2083 # update manifest
2083 # update manifest
2084 self.ui.note(_("committing manifest\n"))
2084 self.ui.note(_("committing manifest\n"))
2085 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2085 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2086 drop = [f for f in removed if f in m]
2086 drop = [f for f in removed if f in m]
2087 for f in drop:
2087 for f in drop:
2088 del m[f]
2088 del m[f]
2089 mn = mctx.write(trp, linkrev,
2089 mn = mctx.write(trp, linkrev,
2090 p1.manifestnode(), p2.manifestnode(),
2090 p1.manifestnode(), p2.manifestnode(),
2091 added, drop)
2091 added, drop)
2092 files = changed + removed
2092 files = changed + removed
2093 else:
2093 else:
2094 mn = p1.manifestnode()
2094 mn = p1.manifestnode()
2095 files = []
2095 files = []
2096
2096
2097 # update changelog
2097 # update changelog
2098 self.ui.note(_("committing changelog\n"))
2098 self.ui.note(_("committing changelog\n"))
2099 self.changelog.delayupdate(tr)
2099 self.changelog.delayupdate(tr)
2100 n = self.changelog.add(mn, files, ctx.description(),
2100 n = self.changelog.add(mn, files, ctx.description(),
2101 trp, p1.node(), p2.node(),
2101 trp, p1.node(), p2.node(),
2102 user, ctx.date(), ctx.extra().copy())
2102 user, ctx.date(), ctx.extra().copy())
2103 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2103 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2104 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2104 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2105 parent2=xp2)
2105 parent2=xp2)
2106 # set the new commit is proper phase
2106 # set the new commit is proper phase
2107 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2107 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2108 if targetphase:
2108 if targetphase:
2109 # retract boundary do not alter parent changeset.
2109 # retract boundary do not alter parent changeset.
2110 # if a parent have higher the resulting phase will
2110 # if a parent have higher the resulting phase will
2111 # be compliant anyway
2111 # be compliant anyway
2112 #
2112 #
2113 # if minimal phase was 0 we don't need to retract anything
2113 # if minimal phase was 0 we don't need to retract anything
2114 phases.registernew(self, tr, targetphase, [n])
2114 phases.registernew(self, tr, targetphase, [n])
2115 tr.close()
2115 tr.close()
2116 return n
2116 return n
2117 finally:
2117 finally:
2118 if tr:
2118 if tr:
2119 tr.release()
2119 tr.release()
2120 lock.release()
2120 lock.release()
2121
2121
2122 @unfilteredmethod
2122 @unfilteredmethod
2123 def destroying(self):
2123 def destroying(self):
2124 '''Inform the repository that nodes are about to be destroyed.
2124 '''Inform the repository that nodes are about to be destroyed.
2125 Intended for use by strip and rollback, so there's a common
2125 Intended for use by strip and rollback, so there's a common
2126 place for anything that has to be done before destroying history.
2126 place for anything that has to be done before destroying history.
2127
2127
2128 This is mostly useful for saving state that is in memory and waiting
2128 This is mostly useful for saving state that is in memory and waiting
2129 to be flushed when the current lock is released. Because a call to
2129 to be flushed when the current lock is released. Because a call to
2130 destroyed is imminent, the repo will be invalidated causing those
2130 destroyed is imminent, the repo will be invalidated causing those
2131 changes to stay in memory (waiting for the next unlock), or vanish
2131 changes to stay in memory (waiting for the next unlock), or vanish
2132 completely.
2132 completely.
2133 '''
2133 '''
2134 # When using the same lock to commit and strip, the phasecache is left
2134 # When using the same lock to commit and strip, the phasecache is left
2135 # dirty after committing. Then when we strip, the repo is invalidated,
2135 # dirty after committing. Then when we strip, the repo is invalidated,
2136 # causing those changes to disappear.
2136 # causing those changes to disappear.
2137 if '_phasecache' in vars(self):
2137 if '_phasecache' in vars(self):
2138 self._phasecache.write()
2138 self._phasecache.write()
2139
2139
2140 @unfilteredmethod
2140 @unfilteredmethod
2141 def destroyed(self):
2141 def destroyed(self):
2142 '''Inform the repository that nodes have been destroyed.
2142 '''Inform the repository that nodes have been destroyed.
2143 Intended for use by strip and rollback, so there's a common
2143 Intended for use by strip and rollback, so there's a common
2144 place for anything that has to be done after destroying history.
2144 place for anything that has to be done after destroying history.
2145 '''
2145 '''
2146 # When one tries to:
2146 # When one tries to:
2147 # 1) destroy nodes thus calling this method (e.g. strip)
2147 # 1) destroy nodes thus calling this method (e.g. strip)
2148 # 2) use phasecache somewhere (e.g. commit)
2148 # 2) use phasecache somewhere (e.g. commit)
2149 #
2149 #
2150 # then 2) will fail because the phasecache contains nodes that were
2150 # then 2) will fail because the phasecache contains nodes that were
2151 # removed. We can either remove phasecache from the filecache,
2151 # removed. We can either remove phasecache from the filecache,
2152 # causing it to reload next time it is accessed, or simply filter
2152 # causing it to reload next time it is accessed, or simply filter
2153 # the removed nodes now and write the updated cache.
2153 # the removed nodes now and write the updated cache.
2154 self._phasecache.filterunknown(self)
2154 self._phasecache.filterunknown(self)
2155 self._phasecache.write()
2155 self._phasecache.write()
2156
2156
2157 # refresh all repository caches
2157 # refresh all repository caches
2158 self.updatecaches()
2158 self.updatecaches()
2159
2159
2160 # Ensure the persistent tag cache is updated. Doing it now
2160 # Ensure the persistent tag cache is updated. Doing it now
2161 # means that the tag cache only has to worry about destroyed
2161 # means that the tag cache only has to worry about destroyed
2162 # heads immediately after a strip/rollback. That in turn
2162 # heads immediately after a strip/rollback. That in turn
2163 # guarantees that "cachetip == currenttip" (comparing both rev
2163 # guarantees that "cachetip == currenttip" (comparing both rev
2164 # and node) always means no nodes have been added or destroyed.
2164 # and node) always means no nodes have been added or destroyed.
2165
2165
2166 # XXX this is suboptimal when qrefresh'ing: we strip the current
2166 # XXX this is suboptimal when qrefresh'ing: we strip the current
2167 # head, refresh the tag cache, then immediately add a new head.
2167 # head, refresh the tag cache, then immediately add a new head.
2168 # But I think doing it this way is necessary for the "instant
2168 # But I think doing it this way is necessary for the "instant
2169 # tag cache retrieval" case to work.
2169 # tag cache retrieval" case to work.
2170 self.invalidate()
2170 self.invalidate()
2171
2171
2172 def status(self, node1='.', node2=None, match=None,
2172 def status(self, node1='.', node2=None, match=None,
2173 ignored=False, clean=False, unknown=False,
2173 ignored=False, clean=False, unknown=False,
2174 listsubrepos=False):
2174 listsubrepos=False):
2175 '''a convenience method that calls node1.status(node2)'''
2175 '''a convenience method that calls node1.status(node2)'''
2176 return self[node1].status(node2, match, ignored, clean, unknown,
2176 return self[node1].status(node2, match, ignored, clean, unknown,
2177 listsubrepos)
2177 listsubrepos)
2178
2178
2179 def addpostdsstatus(self, ps):
2179 def addpostdsstatus(self, ps):
2180 """Add a callback to run within the wlock, at the point at which status
2180 """Add a callback to run within the wlock, at the point at which status
2181 fixups happen.
2181 fixups happen.
2182
2182
2183 On status completion, callback(wctx, status) will be called with the
2183 On status completion, callback(wctx, status) will be called with the
2184 wlock held, unless the dirstate has changed from underneath or the wlock
2184 wlock held, unless the dirstate has changed from underneath or the wlock
2185 couldn't be grabbed.
2185 couldn't be grabbed.
2186
2186
2187 Callbacks should not capture and use a cached copy of the dirstate --
2187 Callbacks should not capture and use a cached copy of the dirstate --
2188 it might change in the meanwhile. Instead, they should access the
2188 it might change in the meanwhile. Instead, they should access the
2189 dirstate via wctx.repo().dirstate.
2189 dirstate via wctx.repo().dirstate.
2190
2190
2191 This list is emptied out after each status run -- extensions should
2191 This list is emptied out after each status run -- extensions should
2192 make sure it adds to this list each time dirstate.status is called.
2192 make sure it adds to this list each time dirstate.status is called.
2193 Extensions should also make sure they don't call this for statuses
2193 Extensions should also make sure they don't call this for statuses
2194 that don't involve the dirstate.
2194 that don't involve the dirstate.
2195 """
2195 """
2196
2196
2197 # The list is located here for uniqueness reasons -- it is actually
2197 # The list is located here for uniqueness reasons -- it is actually
2198 # managed by the workingctx, but that isn't unique per-repo.
2198 # managed by the workingctx, but that isn't unique per-repo.
2199 self._postdsstatus.append(ps)
2199 self._postdsstatus.append(ps)
2200
2200
2201 def postdsstatus(self):
2201 def postdsstatus(self):
2202 """Used by workingctx to get the list of post-dirstate-status hooks."""
2202 """Used by workingctx to get the list of post-dirstate-status hooks."""
2203 return self._postdsstatus
2203 return self._postdsstatus
2204
2204
2205 def clearpostdsstatus(self):
2205 def clearpostdsstatus(self):
2206 """Used by workingctx to clear post-dirstate-status hooks."""
2206 """Used by workingctx to clear post-dirstate-status hooks."""
2207 del self._postdsstatus[:]
2207 del self._postdsstatus[:]
2208
2208
2209 def heads(self, start=None):
2209 def heads(self, start=None):
2210 if start is None:
2210 if start is None:
2211 cl = self.changelog
2211 cl = self.changelog
2212 headrevs = reversed(cl.headrevs())
2212 headrevs = reversed(cl.headrevs())
2213 return [cl.node(rev) for rev in headrevs]
2213 return [cl.node(rev) for rev in headrevs]
2214
2214
2215 heads = self.changelog.heads(start)
2215 heads = self.changelog.heads(start)
2216 # sort the output in rev descending order
2216 # sort the output in rev descending order
2217 return sorted(heads, key=self.changelog.rev, reverse=True)
2217 return sorted(heads, key=self.changelog.rev, reverse=True)
2218
2218
2219 def branchheads(self, branch=None, start=None, closed=False):
2219 def branchheads(self, branch=None, start=None, closed=False):
2220 '''return a (possibly filtered) list of heads for the given branch
2220 '''return a (possibly filtered) list of heads for the given branch
2221
2221
2222 Heads are returned in topological order, from newest to oldest.
2222 Heads are returned in topological order, from newest to oldest.
2223 If branch is None, use the dirstate branch.
2223 If branch is None, use the dirstate branch.
2224 If start is not None, return only heads reachable from start.
2224 If start is not None, return only heads reachable from start.
2225 If closed is True, return heads that are marked as closed as well.
2225 If closed is True, return heads that are marked as closed as well.
2226 '''
2226 '''
2227 if branch is None:
2227 if branch is None:
2228 branch = self[None].branch()
2228 branch = self[None].branch()
2229 branches = self.branchmap()
2229 branches = self.branchmap()
2230 if branch not in branches:
2230 if branch not in branches:
2231 return []
2231 return []
2232 # the cache returns heads ordered lowest to highest
2232 # the cache returns heads ordered lowest to highest
2233 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2233 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2234 if start is not None:
2234 if start is not None:
2235 # filter out the heads that cannot be reached from startrev
2235 # filter out the heads that cannot be reached from startrev
2236 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2236 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2237 bheads = [h for h in bheads if h in fbheads]
2237 bheads = [h for h in bheads if h in fbheads]
2238 return bheads
2238 return bheads
2239
2239
2240 def branches(self, nodes):
2240 def branches(self, nodes):
2241 if not nodes:
2241 if not nodes:
2242 nodes = [self.changelog.tip()]
2242 nodes = [self.changelog.tip()]
2243 b = []
2243 b = []
2244 for n in nodes:
2244 for n in nodes:
2245 t = n
2245 t = n
2246 while True:
2246 while True:
2247 p = self.changelog.parents(n)
2247 p = self.changelog.parents(n)
2248 if p[1] != nullid or p[0] == nullid:
2248 if p[1] != nullid or p[0] == nullid:
2249 b.append((t, n, p[0], p[1]))
2249 b.append((t, n, p[0], p[1]))
2250 break
2250 break
2251 n = p[0]
2251 n = p[0]
2252 return b
2252 return b
2253
2253
2254 def between(self, pairs):
2254 def between(self, pairs):
2255 r = []
2255 r = []
2256
2256
2257 for top, bottom in pairs:
2257 for top, bottom in pairs:
2258 n, l, i = top, [], 0
2258 n, l, i = top, [], 0
2259 f = 1
2259 f = 1
2260
2260
2261 while n != bottom and n != nullid:
2261 while n != bottom and n != nullid:
2262 p = self.changelog.parents(n)[0]
2262 p = self.changelog.parents(n)[0]
2263 if i == f:
2263 if i == f:
2264 l.append(n)
2264 l.append(n)
2265 f = f * 2
2265 f = f * 2
2266 n = p
2266 n = p
2267 i += 1
2267 i += 1
2268
2268
2269 r.append(l)
2269 r.append(l)
2270
2270
2271 return r
2271 return r
2272
2272
2273 def checkpush(self, pushop):
2273 def checkpush(self, pushop):
2274 """Extensions can override this function if additional checks have
2274 """Extensions can override this function if additional checks have
2275 to be performed before pushing, or call it if they override push
2275 to be performed before pushing, or call it if they override push
2276 command.
2276 command.
2277 """
2277 """
2278
2278
2279 @unfilteredpropertycache
2279 @unfilteredpropertycache
2280 def prepushoutgoinghooks(self):
2280 def prepushoutgoinghooks(self):
2281 """Return util.hooks consists of a pushop with repo, remote, outgoing
2281 """Return util.hooks consists of a pushop with repo, remote, outgoing
2282 methods, which are called before pushing changesets.
2282 methods, which are called before pushing changesets.
2283 """
2283 """
2284 return util.hooks()
2284 return util.hooks()
2285
2285
2286 def pushkey(self, namespace, key, old, new):
2286 def pushkey(self, namespace, key, old, new):
2287 try:
2287 try:
2288 tr = self.currenttransaction()
2288 tr = self.currenttransaction()
2289 hookargs = {}
2289 hookargs = {}
2290 if tr is not None:
2290 if tr is not None:
2291 hookargs.update(tr.hookargs)
2291 hookargs.update(tr.hookargs)
2292 hookargs = pycompat.strkwargs(hookargs)
2292 hookargs = pycompat.strkwargs(hookargs)
2293 hookargs[r'namespace'] = namespace
2293 hookargs[r'namespace'] = namespace
2294 hookargs[r'key'] = key
2294 hookargs[r'key'] = key
2295 hookargs[r'old'] = old
2295 hookargs[r'old'] = old
2296 hookargs[r'new'] = new
2296 hookargs[r'new'] = new
2297 self.hook('prepushkey', throw=True, **hookargs)
2297 self.hook('prepushkey', throw=True, **hookargs)
2298 except error.HookAbort as exc:
2298 except error.HookAbort as exc:
2299 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2299 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2300 if exc.hint:
2300 if exc.hint:
2301 self.ui.write_err(_("(%s)\n") % exc.hint)
2301 self.ui.write_err(_("(%s)\n") % exc.hint)
2302 return False
2302 return False
2303 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2303 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2304 ret = pushkey.push(self, namespace, key, old, new)
2304 ret = pushkey.push(self, namespace, key, old, new)
2305 def runhook():
2305 def runhook():
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2306 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2307 ret=ret)
2307 ret=ret)
2308 self._afterlock(runhook)
2308 self._afterlock(runhook)
2309 return ret
2309 return ret
2310
2310
2311 def listkeys(self, namespace):
2311 def listkeys(self, namespace):
2312 self.hook('prelistkeys', throw=True, namespace=namespace)
2312 self.hook('prelistkeys', throw=True, namespace=namespace)
2313 self.ui.debug('listing keys for "%s"\n' % namespace)
2313 self.ui.debug('listing keys for "%s"\n' % namespace)
2314 values = pushkey.list(self, namespace)
2314 values = pushkey.list(self, namespace)
2315 self.hook('listkeys', namespace=namespace, values=values)
2315 self.hook('listkeys', namespace=namespace, values=values)
2316 return values
2316 return values
2317
2317
2318 def debugwireargs(self, one, two, three=None, four=None, five=None):
2318 def debugwireargs(self, one, two, three=None, four=None, five=None):
2319 '''used to test argument passing over the wire'''
2319 '''used to test argument passing over the wire'''
2320 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2320 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2321 pycompat.bytestr(four),
2321 pycompat.bytestr(four),
2322 pycompat.bytestr(five))
2322 pycompat.bytestr(five))
2323
2323
2324 def savecommitmessage(self, text):
2324 def savecommitmessage(self, text):
2325 fp = self.vfs('last-message.txt', 'wb')
2325 fp = self.vfs('last-message.txt', 'wb')
2326 try:
2326 try:
2327 fp.write(text)
2327 fp.write(text)
2328 finally:
2328 finally:
2329 fp.close()
2329 fp.close()
2330 return self.pathto(fp.name[len(self.root) + 1:])
2330 return self.pathto(fp.name[len(self.root) + 1:])
2331
2331
2332 # used to avoid circular references so destructors work
2332 # used to avoid circular references so destructors work
2333 def aftertrans(files):
2333 def aftertrans(files):
2334 renamefiles = [tuple(t) for t in files]
2334 renamefiles = [tuple(t) for t in files]
2335 def a():
2335 def a():
2336 for vfs, src, dest in renamefiles:
2336 for vfs, src, dest in renamefiles:
2337 # if src and dest refer to a same file, vfs.rename is a no-op,
2337 # if src and dest refer to a same file, vfs.rename is a no-op,
2338 # leaving both src and dest on disk. delete dest to make sure
2338 # leaving both src and dest on disk. delete dest to make sure
2339 # the rename couldn't be such a no-op.
2339 # the rename couldn't be such a no-op.
2340 vfs.tryunlink(dest)
2340 vfs.tryunlink(dest)
2341 try:
2341 try:
2342 vfs.rename(src, dest)
2342 vfs.rename(src, dest)
2343 except OSError: # journal file does not yet exist
2343 except OSError: # journal file does not yet exist
2344 pass
2344 pass
2345 return a
2345 return a
2346
2346
2347 def undoname(fn):
2347 def undoname(fn):
2348 base, name = os.path.split(fn)
2348 base, name = os.path.split(fn)
2349 assert name.startswith('journal')
2349 assert name.startswith('journal')
2350 return os.path.join(base, name.replace('journal', 'undo', 1))
2350 return os.path.join(base, name.replace('journal', 'undo', 1))
2351
2351
2352 def instance(ui, path, create, intents=None):
2352 def instance(ui, path, create, intents=None):
2353 return localrepository(ui, util.urllocalpath(path), create,
2353 return localrepository(ui, util.urllocalpath(path), create,
2354 intents=intents)
2354 intents=intents)
2355
2355
2356 def islocal(path):
2356 def islocal(path):
2357 return True
2357 return True
2358
2358
2359 def newreporequirements(repo):
2359 def newreporequirements(repo):
2360 """Determine the set of requirements for a new local repository.
2360 """Determine the set of requirements for a new local repository.
2361
2361
2362 Extensions can wrap this function to specify custom requirements for
2362 Extensions can wrap this function to specify custom requirements for
2363 new repositories.
2363 new repositories.
2364 """
2364 """
2365 ui = repo.ui
2365 ui = repo.ui
2366 requirements = {'revlogv1'}
2366 requirements = {'revlogv1'}
2367 if ui.configbool('format', 'usestore'):
2367 if ui.configbool('format', 'usestore'):
2368 requirements.add('store')
2368 requirements.add('store')
2369 if ui.configbool('format', 'usefncache'):
2369 if ui.configbool('format', 'usefncache'):
2370 requirements.add('fncache')
2370 requirements.add('fncache')
2371 if ui.configbool('format', 'dotencode'):
2371 if ui.configbool('format', 'dotencode'):
2372 requirements.add('dotencode')
2372 requirements.add('dotencode')
2373
2373
2374 compengine = ui.config('experimental', 'format.compression')
2374 compengine = ui.config('experimental', 'format.compression')
2375 if compengine not in util.compengines:
2375 if compengine not in util.compengines:
2376 raise error.Abort(_('compression engine %s defined by '
2376 raise error.Abort(_('compression engine %s defined by '
2377 'experimental.format.compression not available') %
2377 'experimental.format.compression not available') %
2378 compengine,
2378 compengine,
2379 hint=_('run "hg debuginstall" to list available '
2379 hint=_('run "hg debuginstall" to list available '
2380 'compression engines'))
2380 'compression engines'))
2381
2381
2382 # zlib is the historical default and doesn't need an explicit requirement.
2382 # zlib is the historical default and doesn't need an explicit requirement.
2383 if compengine != 'zlib':
2383 if compengine != 'zlib':
2384 requirements.add('exp-compression-%s' % compengine)
2384 requirements.add('exp-compression-%s' % compengine)
2385
2385
2386 if scmutil.gdinitconfig(ui):
2386 if scmutil.gdinitconfig(ui):
2387 requirements.add('generaldelta')
2387 requirements.add('generaldelta')
2388 if ui.configbool('experimental', 'treemanifest'):
2388 if ui.configbool('experimental', 'treemanifest'):
2389 requirements.add('treemanifest')
2389 requirements.add('treemanifest')
2390 # experimental config: format.sparse-revlog
2390 # experimental config: format.sparse-revlog
2391 if ui.configbool('format', 'sparse-revlog'):
2391 if ui.configbool('format', 'sparse-revlog'):
2392 requirements.add(SPARSEREVLOG_REQUIREMENT)
2392 requirements.add(SPARSEREVLOG_REQUIREMENT)
2393
2393
2394 revlogv2 = ui.config('experimental', 'revlogv2')
2394 revlogv2 = ui.config('experimental', 'revlogv2')
2395 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2395 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2396 requirements.remove('revlogv1')
2396 requirements.remove('revlogv1')
2397 # generaldelta is implied by revlogv2.
2397 # generaldelta is implied by revlogv2.
2398 requirements.discard('generaldelta')
2398 requirements.discard('generaldelta')
2399 requirements.add(REVLOGV2_REQUIREMENT)
2399 requirements.add(REVLOGV2_REQUIREMENT)
2400
2400
2401 return requirements
2401 return requirements
@@ -1,1302 +1,1306 b''
1 # repository.py - Interfaces and base classes for repositories and peers.
1 # repository.py - Interfaces and base classes for repositories and peers.
2 #
2 #
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
3 # Copyright 2017 Gregory Szorc <gregory.szorc@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from . import (
11 from . import (
12 error,
12 error,
13 )
13 )
14 from .utils import (
14 from .utils import (
15 interfaceutil,
15 interfaceutil,
16 )
16 )
17
17
18 # When narrowing is finalized and no longer subject to format changes,
19 # we should move this to just "narrow" or similar.
20 NARROW_REQUIREMENT = 'narrowhg-experimental'
21
18 class ipeerconnection(interfaceutil.Interface):
22 class ipeerconnection(interfaceutil.Interface):
19 """Represents a "connection" to a repository.
23 """Represents a "connection" to a repository.
20
24
21 This is the base interface for representing a connection to a repository.
25 This is the base interface for representing a connection to a repository.
22 It holds basic properties and methods applicable to all peer types.
26 It holds basic properties and methods applicable to all peer types.
23
27
24 This is not a complete interface definition and should not be used
28 This is not a complete interface definition and should not be used
25 outside of this module.
29 outside of this module.
26 """
30 """
27 ui = interfaceutil.Attribute("""ui.ui instance""")
31 ui = interfaceutil.Attribute("""ui.ui instance""")
28
32
29 def url():
33 def url():
30 """Returns a URL string representing this peer.
34 """Returns a URL string representing this peer.
31
35
32 Currently, implementations expose the raw URL used to construct the
36 Currently, implementations expose the raw URL used to construct the
33 instance. It may contain credentials as part of the URL. The
37 instance. It may contain credentials as part of the URL. The
34 expectations of the value aren't well-defined and this could lead to
38 expectations of the value aren't well-defined and this could lead to
35 data leakage.
39 data leakage.
36
40
37 TODO audit/clean consumers and more clearly define the contents of this
41 TODO audit/clean consumers and more clearly define the contents of this
38 value.
42 value.
39 """
43 """
40
44
41 def local():
45 def local():
42 """Returns a local repository instance.
46 """Returns a local repository instance.
43
47
44 If the peer represents a local repository, returns an object that
48 If the peer represents a local repository, returns an object that
45 can be used to interface with it. Otherwise returns ``None``.
49 can be used to interface with it. Otherwise returns ``None``.
46 """
50 """
47
51
48 def peer():
52 def peer():
49 """Returns an object conforming to this interface.
53 """Returns an object conforming to this interface.
50
54
51 Most implementations will ``return self``.
55 Most implementations will ``return self``.
52 """
56 """
53
57
54 def canpush():
58 def canpush():
55 """Returns a boolean indicating if this peer can be pushed to."""
59 """Returns a boolean indicating if this peer can be pushed to."""
56
60
57 def close():
61 def close():
58 """Close the connection to this peer.
62 """Close the connection to this peer.
59
63
60 This is called when the peer will no longer be used. Resources
64 This is called when the peer will no longer be used. Resources
61 associated with the peer should be cleaned up.
65 associated with the peer should be cleaned up.
62 """
66 """
63
67
64 class ipeercapabilities(interfaceutil.Interface):
68 class ipeercapabilities(interfaceutil.Interface):
65 """Peer sub-interface related to capabilities."""
69 """Peer sub-interface related to capabilities."""
66
70
67 def capable(name):
71 def capable(name):
68 """Determine support for a named capability.
72 """Determine support for a named capability.
69
73
70 Returns ``False`` if capability not supported.
74 Returns ``False`` if capability not supported.
71
75
72 Returns ``True`` if boolean capability is supported. Returns a string
76 Returns ``True`` if boolean capability is supported. Returns a string
73 if capability support is non-boolean.
77 if capability support is non-boolean.
74
78
75 Capability strings may or may not map to wire protocol capabilities.
79 Capability strings may or may not map to wire protocol capabilities.
76 """
80 """
77
81
78 def requirecap(name, purpose):
82 def requirecap(name, purpose):
79 """Require a capability to be present.
83 """Require a capability to be present.
80
84
81 Raises a ``CapabilityError`` if the capability isn't present.
85 Raises a ``CapabilityError`` if the capability isn't present.
82 """
86 """
83
87
84 class ipeercommands(interfaceutil.Interface):
88 class ipeercommands(interfaceutil.Interface):
85 """Client-side interface for communicating over the wire protocol.
89 """Client-side interface for communicating over the wire protocol.
86
90
87 This interface is used as a gateway to the Mercurial wire protocol.
91 This interface is used as a gateway to the Mercurial wire protocol.
88 methods commonly call wire protocol commands of the same name.
92 methods commonly call wire protocol commands of the same name.
89 """
93 """
90
94
91 def branchmap():
95 def branchmap():
92 """Obtain heads in named branches.
96 """Obtain heads in named branches.
93
97
94 Returns a dict mapping branch name to an iterable of nodes that are
98 Returns a dict mapping branch name to an iterable of nodes that are
95 heads on that branch.
99 heads on that branch.
96 """
100 """
97
101
98 def capabilities():
102 def capabilities():
99 """Obtain capabilities of the peer.
103 """Obtain capabilities of the peer.
100
104
101 Returns a set of string capabilities.
105 Returns a set of string capabilities.
102 """
106 """
103
107
104 def clonebundles():
108 def clonebundles():
105 """Obtains the clone bundles manifest for the repo.
109 """Obtains the clone bundles manifest for the repo.
106
110
107 Returns the manifest as unparsed bytes.
111 Returns the manifest as unparsed bytes.
108 """
112 """
109
113
110 def debugwireargs(one, two, three=None, four=None, five=None):
114 def debugwireargs(one, two, three=None, four=None, five=None):
111 """Used to facilitate debugging of arguments passed over the wire."""
115 """Used to facilitate debugging of arguments passed over the wire."""
112
116
113 def getbundle(source, **kwargs):
117 def getbundle(source, **kwargs):
114 """Obtain remote repository data as a bundle.
118 """Obtain remote repository data as a bundle.
115
119
116 This command is how the bulk of repository data is transferred from
120 This command is how the bulk of repository data is transferred from
117 the peer to the local repository
121 the peer to the local repository
118
122
119 Returns a generator of bundle data.
123 Returns a generator of bundle data.
120 """
124 """
121
125
122 def heads():
126 def heads():
123 """Determine all known head revisions in the peer.
127 """Determine all known head revisions in the peer.
124
128
125 Returns an iterable of binary nodes.
129 Returns an iterable of binary nodes.
126 """
130 """
127
131
128 def known(nodes):
132 def known(nodes):
129 """Determine whether multiple nodes are known.
133 """Determine whether multiple nodes are known.
130
134
131 Accepts an iterable of nodes whose presence to check for.
135 Accepts an iterable of nodes whose presence to check for.
132
136
133 Returns an iterable of booleans indicating of the corresponding node
137 Returns an iterable of booleans indicating of the corresponding node
134 at that index is known to the peer.
138 at that index is known to the peer.
135 """
139 """
136
140
137 def listkeys(namespace):
141 def listkeys(namespace):
138 """Obtain all keys in a pushkey namespace.
142 """Obtain all keys in a pushkey namespace.
139
143
140 Returns an iterable of key names.
144 Returns an iterable of key names.
141 """
145 """
142
146
143 def lookup(key):
147 def lookup(key):
144 """Resolve a value to a known revision.
148 """Resolve a value to a known revision.
145
149
146 Returns a binary node of the resolved revision on success.
150 Returns a binary node of the resolved revision on success.
147 """
151 """
148
152
149 def pushkey(namespace, key, old, new):
153 def pushkey(namespace, key, old, new):
150 """Set a value using the ``pushkey`` protocol.
154 """Set a value using the ``pushkey`` protocol.
151
155
152 Arguments correspond to the pushkey namespace and key to operate on and
156 Arguments correspond to the pushkey namespace and key to operate on and
153 the old and new values for that key.
157 the old and new values for that key.
154
158
155 Returns a string with the peer result. The value inside varies by the
159 Returns a string with the peer result. The value inside varies by the
156 namespace.
160 namespace.
157 """
161 """
158
162
159 def stream_out():
163 def stream_out():
160 """Obtain streaming clone data.
164 """Obtain streaming clone data.
161
165
162 Successful result should be a generator of data chunks.
166 Successful result should be a generator of data chunks.
163 """
167 """
164
168
165 def unbundle(bundle, heads, url):
169 def unbundle(bundle, heads, url):
166 """Transfer repository data to the peer.
170 """Transfer repository data to the peer.
167
171
168 This is how the bulk of data during a push is transferred.
172 This is how the bulk of data during a push is transferred.
169
173
170 Returns the integer number of heads added to the peer.
174 Returns the integer number of heads added to the peer.
171 """
175 """
172
176
173 class ipeerlegacycommands(interfaceutil.Interface):
177 class ipeerlegacycommands(interfaceutil.Interface):
174 """Interface for implementing support for legacy wire protocol commands.
178 """Interface for implementing support for legacy wire protocol commands.
175
179
176 Wire protocol commands transition to legacy status when they are no longer
180 Wire protocol commands transition to legacy status when they are no longer
177 used by modern clients. To facilitate identifying which commands are
181 used by modern clients. To facilitate identifying which commands are
178 legacy, the interfaces are split.
182 legacy, the interfaces are split.
179 """
183 """
180
184
181 def between(pairs):
185 def between(pairs):
182 """Obtain nodes between pairs of nodes.
186 """Obtain nodes between pairs of nodes.
183
187
184 ``pairs`` is an iterable of node pairs.
188 ``pairs`` is an iterable of node pairs.
185
189
186 Returns an iterable of iterables of nodes corresponding to each
190 Returns an iterable of iterables of nodes corresponding to each
187 requested pair.
191 requested pair.
188 """
192 """
189
193
190 def branches(nodes):
194 def branches(nodes):
191 """Obtain ancestor changesets of specific nodes back to a branch point.
195 """Obtain ancestor changesets of specific nodes back to a branch point.
192
196
193 For each requested node, the peer finds the first ancestor node that is
197 For each requested node, the peer finds the first ancestor node that is
194 a DAG root or is a merge.
198 a DAG root or is a merge.
195
199
196 Returns an iterable of iterables with the resolved values for each node.
200 Returns an iterable of iterables with the resolved values for each node.
197 """
201 """
198
202
199 def changegroup(nodes, source):
203 def changegroup(nodes, source):
200 """Obtain a changegroup with data for descendants of specified nodes."""
204 """Obtain a changegroup with data for descendants of specified nodes."""
201
205
202 def changegroupsubset(bases, heads, source):
206 def changegroupsubset(bases, heads, source):
203 pass
207 pass
204
208
205 class ipeercommandexecutor(interfaceutil.Interface):
209 class ipeercommandexecutor(interfaceutil.Interface):
206 """Represents a mechanism to execute remote commands.
210 """Represents a mechanism to execute remote commands.
207
211
208 This is the primary interface for requesting that wire protocol commands
212 This is the primary interface for requesting that wire protocol commands
209 be executed. Instances of this interface are active in a context manager
213 be executed. Instances of this interface are active in a context manager
210 and have a well-defined lifetime. When the context manager exits, all
214 and have a well-defined lifetime. When the context manager exits, all
211 outstanding requests are waited on.
215 outstanding requests are waited on.
212 """
216 """
213
217
214 def callcommand(name, args):
218 def callcommand(name, args):
215 """Request that a named command be executed.
219 """Request that a named command be executed.
216
220
217 Receives the command name and a dictionary of command arguments.
221 Receives the command name and a dictionary of command arguments.
218
222
219 Returns a ``concurrent.futures.Future`` that will resolve to the
223 Returns a ``concurrent.futures.Future`` that will resolve to the
220 result of that command request. That exact value is left up to
224 result of that command request. That exact value is left up to
221 the implementation and possibly varies by command.
225 the implementation and possibly varies by command.
222
226
223 Not all commands can coexist with other commands in an executor
227 Not all commands can coexist with other commands in an executor
224 instance: it depends on the underlying wire protocol transport being
228 instance: it depends on the underlying wire protocol transport being
225 used and the command itself.
229 used and the command itself.
226
230
227 Implementations MAY call ``sendcommands()`` automatically if the
231 Implementations MAY call ``sendcommands()`` automatically if the
228 requested command can not coexist with other commands in this executor.
232 requested command can not coexist with other commands in this executor.
229
233
230 Implementations MAY call ``sendcommands()`` automatically when the
234 Implementations MAY call ``sendcommands()`` automatically when the
231 future's ``result()`` is called. So, consumers using multiple
235 future's ``result()`` is called. So, consumers using multiple
232 commands with an executor MUST ensure that ``result()`` is not called
236 commands with an executor MUST ensure that ``result()`` is not called
233 until all command requests have been issued.
237 until all command requests have been issued.
234 """
238 """
235
239
236 def sendcommands():
240 def sendcommands():
237 """Trigger submission of queued command requests.
241 """Trigger submission of queued command requests.
238
242
239 Not all transports submit commands as soon as they are requested to
243 Not all transports submit commands as soon as they are requested to
240 run. When called, this method forces queued command requests to be
244 run. When called, this method forces queued command requests to be
241 issued. It will no-op if all commands have already been sent.
245 issued. It will no-op if all commands have already been sent.
242
246
243 When called, no more new commands may be issued with this executor.
247 When called, no more new commands may be issued with this executor.
244 """
248 """
245
249
246 def close():
250 def close():
247 """Signal that this command request is finished.
251 """Signal that this command request is finished.
248
252
249 When called, no more new commands may be issued. All outstanding
253 When called, no more new commands may be issued. All outstanding
250 commands that have previously been issued are waited on before
254 commands that have previously been issued are waited on before
251 returning. This not only includes waiting for the futures to resolve,
255 returning. This not only includes waiting for the futures to resolve,
252 but also waiting for all response data to arrive. In other words,
256 but also waiting for all response data to arrive. In other words,
253 calling this waits for all on-wire state for issued command requests
257 calling this waits for all on-wire state for issued command requests
254 to finish.
258 to finish.
255
259
256 When used as a context manager, this method is called when exiting the
260 When used as a context manager, this method is called when exiting the
257 context manager.
261 context manager.
258
262
259 This method may call ``sendcommands()`` if there are buffered commands.
263 This method may call ``sendcommands()`` if there are buffered commands.
260 """
264 """
261
265
262 class ipeerrequests(interfaceutil.Interface):
266 class ipeerrequests(interfaceutil.Interface):
263 """Interface for executing commands on a peer."""
267 """Interface for executing commands on a peer."""
264
268
265 def commandexecutor():
269 def commandexecutor():
266 """A context manager that resolves to an ipeercommandexecutor.
270 """A context manager that resolves to an ipeercommandexecutor.
267
271
268 The object this resolves to can be used to issue command requests
272 The object this resolves to can be used to issue command requests
269 to the peer.
273 to the peer.
270
274
271 Callers should call its ``callcommand`` method to issue command
275 Callers should call its ``callcommand`` method to issue command
272 requests.
276 requests.
273
277
274 A new executor should be obtained for each distinct set of commands
278 A new executor should be obtained for each distinct set of commands
275 (possibly just a single command) that the consumer wants to execute
279 (possibly just a single command) that the consumer wants to execute
276 as part of a single operation or round trip. This is because some
280 as part of a single operation or round trip. This is because some
277 peers are half-duplex and/or don't support persistent connections.
281 peers are half-duplex and/or don't support persistent connections.
278 e.g. in the case of HTTP peers, commands sent to an executor represent
282 e.g. in the case of HTTP peers, commands sent to an executor represent
279 a single HTTP request. While some peers may support multiple command
283 a single HTTP request. While some peers may support multiple command
280 sends over the wire per executor, consumers need to code to the least
284 sends over the wire per executor, consumers need to code to the least
281 capable peer. So it should be assumed that command executors buffer
285 capable peer. So it should be assumed that command executors buffer
282 called commands until they are told to send them and that each
286 called commands until they are told to send them and that each
283 command executor could result in a new connection or wire-level request
287 command executor could result in a new connection or wire-level request
284 being issued.
288 being issued.
285 """
289 """
286
290
287 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
291 class ipeerbase(ipeerconnection, ipeercapabilities, ipeerrequests):
288 """Unified interface for peer repositories.
292 """Unified interface for peer repositories.
289
293
290 All peer instances must conform to this interface.
294 All peer instances must conform to this interface.
291 """
295 """
292
296
293 @interfaceutil.implementer(ipeerbase)
297 @interfaceutil.implementer(ipeerbase)
294 class peer(object):
298 class peer(object):
295 """Base class for peer repositories."""
299 """Base class for peer repositories."""
296
300
297 def capable(self, name):
301 def capable(self, name):
298 caps = self.capabilities()
302 caps = self.capabilities()
299 if name in caps:
303 if name in caps:
300 return True
304 return True
301
305
302 name = '%s=' % name
306 name = '%s=' % name
303 for cap in caps:
307 for cap in caps:
304 if cap.startswith(name):
308 if cap.startswith(name):
305 return cap[len(name):]
309 return cap[len(name):]
306
310
307 return False
311 return False
308
312
309 def requirecap(self, name, purpose):
313 def requirecap(self, name, purpose):
310 if self.capable(name):
314 if self.capable(name):
311 return
315 return
312
316
313 raise error.CapabilityError(
317 raise error.CapabilityError(
314 _('cannot %s; remote repository does not support the %r '
318 _('cannot %s; remote repository does not support the %r '
315 'capability') % (purpose, name))
319 'capability') % (purpose, name))
316
320
317 class ifilerevisionssequence(interfaceutil.Interface):
321 class ifilerevisionssequence(interfaceutil.Interface):
318 """Contains index data for all revisions of a file.
322 """Contains index data for all revisions of a file.
319
323
320 Types implementing this behave like lists of tuples. The index
324 Types implementing this behave like lists of tuples. The index
321 in the list corresponds to the revision number. The values contain
325 in the list corresponds to the revision number. The values contain
322 index metadata.
326 index metadata.
323
327
324 The *null* revision (revision number -1) is always the last item
328 The *null* revision (revision number -1) is always the last item
325 in the index.
329 in the index.
326 """
330 """
327
331
328 def __len__():
332 def __len__():
329 """The total number of revisions."""
333 """The total number of revisions."""
330
334
331 def __getitem__(rev):
335 def __getitem__(rev):
332 """Returns the object having a specific revision number.
336 """Returns the object having a specific revision number.
333
337
334 Returns an 8-tuple with the following fields:
338 Returns an 8-tuple with the following fields:
335
339
336 offset+flags
340 offset+flags
337 Contains the offset and flags for the revision. 64-bit unsigned
341 Contains the offset and flags for the revision. 64-bit unsigned
338 integer where first 6 bytes are the offset and the next 2 bytes
342 integer where first 6 bytes are the offset and the next 2 bytes
339 are flags. The offset can be 0 if it is not used by the store.
343 are flags. The offset can be 0 if it is not used by the store.
340 compressed size
344 compressed size
341 Size of the revision data in the store. It can be 0 if it isn't
345 Size of the revision data in the store. It can be 0 if it isn't
342 needed by the store.
346 needed by the store.
343 uncompressed size
347 uncompressed size
344 Fulltext size. It can be 0 if it isn't needed by the store.
348 Fulltext size. It can be 0 if it isn't needed by the store.
345 base revision
349 base revision
346 Revision number of revision the delta for storage is encoded
350 Revision number of revision the delta for storage is encoded
347 against. -1 indicates not encoded against a base revision.
351 against. -1 indicates not encoded against a base revision.
348 link revision
352 link revision
349 Revision number of changelog revision this entry is related to.
353 Revision number of changelog revision this entry is related to.
350 p1 revision
354 p1 revision
351 Revision number of 1st parent. -1 if no 1st parent.
355 Revision number of 1st parent. -1 if no 1st parent.
352 p2 revision
356 p2 revision
353 Revision number of 2nd parent. -1 if no 1st parent.
357 Revision number of 2nd parent. -1 if no 1st parent.
354 node
358 node
355 Binary node value for this revision number.
359 Binary node value for this revision number.
356
360
357 Negative values should index off the end of the sequence. ``-1``
361 Negative values should index off the end of the sequence. ``-1``
358 should return the null revision. ``-2`` should return the most
362 should return the null revision. ``-2`` should return the most
359 recent revision.
363 recent revision.
360 """
364 """
361
365
362 def __contains__(rev):
366 def __contains__(rev):
363 """Whether a revision number exists."""
367 """Whether a revision number exists."""
364
368
365 def insert(self, i, entry):
369 def insert(self, i, entry):
366 """Add an item to the index at specific revision."""
370 """Add an item to the index at specific revision."""
367
371
368 class ifileindex(interfaceutil.Interface):
372 class ifileindex(interfaceutil.Interface):
369 """Storage interface for index data of a single file.
373 """Storage interface for index data of a single file.
370
374
371 File storage data is divided into index metadata and data storage.
375 File storage data is divided into index metadata and data storage.
372 This interface defines the index portion of the interface.
376 This interface defines the index portion of the interface.
373
377
374 The index logically consists of:
378 The index logically consists of:
375
379
376 * A mapping between revision numbers and nodes.
380 * A mapping between revision numbers and nodes.
377 * DAG data (storing and querying the relationship between nodes).
381 * DAG data (storing and querying the relationship between nodes).
378 * Metadata to facilitate storage.
382 * Metadata to facilitate storage.
379 """
383 """
380 index = interfaceutil.Attribute(
384 index = interfaceutil.Attribute(
381 """An ``ifilerevisionssequence`` instance.""")
385 """An ``ifilerevisionssequence`` instance.""")
382
386
383 def __len__():
387 def __len__():
384 """Obtain the number of revisions stored for this file."""
388 """Obtain the number of revisions stored for this file."""
385
389
386 def __iter__():
390 def __iter__():
387 """Iterate over revision numbers for this file."""
391 """Iterate over revision numbers for this file."""
388
392
389 def revs(start=0, stop=None):
393 def revs(start=0, stop=None):
390 """Iterate over revision numbers for this file, with control."""
394 """Iterate over revision numbers for this file, with control."""
391
395
392 def parents(node):
396 def parents(node):
393 """Returns a 2-tuple of parent nodes for a revision.
397 """Returns a 2-tuple of parent nodes for a revision.
394
398
395 Values will be ``nullid`` if the parent is empty.
399 Values will be ``nullid`` if the parent is empty.
396 """
400 """
397
401
398 def parentrevs(rev):
402 def parentrevs(rev):
399 """Like parents() but operates on revision numbers."""
403 """Like parents() but operates on revision numbers."""
400
404
401 def rev(node):
405 def rev(node):
402 """Obtain the revision number given a node.
406 """Obtain the revision number given a node.
403
407
404 Raises ``error.LookupError`` if the node is not known.
408 Raises ``error.LookupError`` if the node is not known.
405 """
409 """
406
410
407 def node(rev):
411 def node(rev):
408 """Obtain the node value given a revision number.
412 """Obtain the node value given a revision number.
409
413
410 Raises ``IndexError`` if the node is not known.
414 Raises ``IndexError`` if the node is not known.
411 """
415 """
412
416
413 def lookup(node):
417 def lookup(node):
414 """Attempt to resolve a value to a node.
418 """Attempt to resolve a value to a node.
415
419
416 Value can be a binary node, hex node, revision number, or a string
420 Value can be a binary node, hex node, revision number, or a string
417 that can be converted to an integer.
421 that can be converted to an integer.
418
422
419 Raises ``error.LookupError`` if a node could not be resolved.
423 Raises ``error.LookupError`` if a node could not be resolved.
420 """
424 """
421
425
422 def linkrev(rev):
426 def linkrev(rev):
423 """Obtain the changeset revision number a revision is linked to."""
427 """Obtain the changeset revision number a revision is linked to."""
424
428
425 def flags(rev):
429 def flags(rev):
426 """Obtain flags used to affect storage of a revision."""
430 """Obtain flags used to affect storage of a revision."""
427
431
428 def iscensored(rev):
432 def iscensored(rev):
429 """Return whether a revision's content has been censored."""
433 """Return whether a revision's content has been censored."""
430
434
431 def commonancestorsheads(node1, node2):
435 def commonancestorsheads(node1, node2):
432 """Obtain an iterable of nodes containing heads of common ancestors.
436 """Obtain an iterable of nodes containing heads of common ancestors.
433
437
434 See ``ancestor.commonancestorsheads()``.
438 See ``ancestor.commonancestorsheads()``.
435 """
439 """
436
440
437 def descendants(revs):
441 def descendants(revs):
438 """Obtain descendant revision numbers for a set of revision numbers.
442 """Obtain descendant revision numbers for a set of revision numbers.
439
443
440 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
444 If ``nullrev`` is in the set, this is equivalent to ``revs()``.
441 """
445 """
442
446
443 def headrevs():
447 def headrevs():
444 """Obtain a list of revision numbers that are DAG heads.
448 """Obtain a list of revision numbers that are DAG heads.
445
449
446 The list is sorted oldest to newest.
450 The list is sorted oldest to newest.
447
451
448 TODO determine if sorting is required.
452 TODO determine if sorting is required.
449 """
453 """
450
454
451 def heads(start=None, stop=None):
455 def heads(start=None, stop=None):
452 """Obtain a list of nodes that are DAG heads, with control.
456 """Obtain a list of nodes that are DAG heads, with control.
453
457
454 The set of revisions examined can be limited by specifying
458 The set of revisions examined can be limited by specifying
455 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
459 ``start`` and ``stop``. ``start`` is a node. ``stop`` is an
456 iterable of nodes. DAG traversal starts at earlier revision
460 iterable of nodes. DAG traversal starts at earlier revision
457 ``start`` and iterates forward until any node in ``stop`` is
461 ``start`` and iterates forward until any node in ``stop`` is
458 encountered.
462 encountered.
459 """
463 """
460
464
461 def children(node):
465 def children(node):
462 """Obtain nodes that are children of a node.
466 """Obtain nodes that are children of a node.
463
467
464 Returns a list of nodes.
468 Returns a list of nodes.
465 """
469 """
466
470
467 def deltaparent(rev):
471 def deltaparent(rev):
468 """"Return the revision that is a suitable parent to delta against."""
472 """"Return the revision that is a suitable parent to delta against."""
469
473
470 def candelta(baserev, rev):
474 def candelta(baserev, rev):
471 """"Whether a delta can be generated between two revisions."""
475 """"Whether a delta can be generated between two revisions."""
472
476
473 class ifiledata(interfaceutil.Interface):
477 class ifiledata(interfaceutil.Interface):
474 """Storage interface for data storage of a specific file.
478 """Storage interface for data storage of a specific file.
475
479
476 This complements ``ifileindex`` and provides an interface for accessing
480 This complements ``ifileindex`` and provides an interface for accessing
477 data for a tracked file.
481 data for a tracked file.
478 """
482 """
479 def rawsize(rev):
483 def rawsize(rev):
480 """The size of the fulltext data for a revision as stored."""
484 """The size of the fulltext data for a revision as stored."""
481
485
482 def size(rev):
486 def size(rev):
483 """Obtain the fulltext size of file data.
487 """Obtain the fulltext size of file data.
484
488
485 Any metadata is excluded from size measurements. Use ``rawsize()`` if
489 Any metadata is excluded from size measurements. Use ``rawsize()`` if
486 metadata size is important.
490 metadata size is important.
487 """
491 """
488
492
489 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
493 def checkhash(fulltext, node, p1=None, p2=None, rev=None):
490 """Validate the stored hash of a given fulltext and node.
494 """Validate the stored hash of a given fulltext and node.
491
495
492 Raises ``error.RevlogError`` is hash validation fails.
496 Raises ``error.RevlogError`` is hash validation fails.
493 """
497 """
494
498
495 def revision(node, raw=False):
499 def revision(node, raw=False):
496 """"Obtain fulltext data for a node.
500 """"Obtain fulltext data for a node.
497
501
498 By default, any storage transformations are applied before the data
502 By default, any storage transformations are applied before the data
499 is returned. If ``raw`` is True, non-raw storage transformations
503 is returned. If ``raw`` is True, non-raw storage transformations
500 are not applied.
504 are not applied.
501
505
502 The fulltext data may contain a header containing metadata. Most
506 The fulltext data may contain a header containing metadata. Most
503 consumers should use ``read()`` to obtain the actual file data.
507 consumers should use ``read()`` to obtain the actual file data.
504 """
508 """
505
509
506 def read(node):
510 def read(node):
507 """Resolve file fulltext data.
511 """Resolve file fulltext data.
508
512
509 This is similar to ``revision()`` except any metadata in the data
513 This is similar to ``revision()`` except any metadata in the data
510 headers is stripped.
514 headers is stripped.
511 """
515 """
512
516
513 def renamed(node):
517 def renamed(node):
514 """Obtain copy metadata for a node.
518 """Obtain copy metadata for a node.
515
519
516 Returns ``False`` if no copy metadata is stored or a 2-tuple of
520 Returns ``False`` if no copy metadata is stored or a 2-tuple of
517 (path, node) from which this revision was copied.
521 (path, node) from which this revision was copied.
518 """
522 """
519
523
520 def cmp(node, fulltext):
524 def cmp(node, fulltext):
521 """Compare fulltext to another revision.
525 """Compare fulltext to another revision.
522
526
523 Returns True if the fulltext is different from what is stored.
527 Returns True if the fulltext is different from what is stored.
524
528
525 This takes copy metadata into account.
529 This takes copy metadata into account.
526
530
527 TODO better document the copy metadata and censoring logic.
531 TODO better document the copy metadata and censoring logic.
528 """
532 """
529
533
530 def revdiff(rev1, rev2):
534 def revdiff(rev1, rev2):
531 """Obtain a delta between two revision numbers.
535 """Obtain a delta between two revision numbers.
532
536
533 Operates on raw data in the store (``revision(node, raw=True)``).
537 Operates on raw data in the store (``revision(node, raw=True)``).
534
538
535 The returned data is the result of ``bdiff.bdiff`` on the raw
539 The returned data is the result of ``bdiff.bdiff`` on the raw
536 revision data.
540 revision data.
537 """
541 """
538
542
539 class ifilemutation(interfaceutil.Interface):
543 class ifilemutation(interfaceutil.Interface):
540 """Storage interface for mutation events of a tracked file."""
544 """Storage interface for mutation events of a tracked file."""
541
545
542 def add(filedata, meta, transaction, linkrev, p1, p2):
546 def add(filedata, meta, transaction, linkrev, p1, p2):
543 """Add a new revision to the store.
547 """Add a new revision to the store.
544
548
545 Takes file data, dictionary of metadata, a transaction, linkrev,
549 Takes file data, dictionary of metadata, a transaction, linkrev,
546 and parent nodes.
550 and parent nodes.
547
551
548 Returns the node that was added.
552 Returns the node that was added.
549
553
550 May no-op if a revision matching the supplied data is already stored.
554 May no-op if a revision matching the supplied data is already stored.
551 """
555 """
552
556
553 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
557 def addrevision(revisiondata, transaction, linkrev, p1, p2, node=None,
554 flags=0, cachedelta=None):
558 flags=0, cachedelta=None):
555 """Add a new revision to the store.
559 """Add a new revision to the store.
556
560
557 This is similar to ``add()`` except it operates at a lower level.
561 This is similar to ``add()`` except it operates at a lower level.
558
562
559 The data passed in already contains a metadata header, if any.
563 The data passed in already contains a metadata header, if any.
560
564
561 ``node`` and ``flags`` can be used to define the expected node and
565 ``node`` and ``flags`` can be used to define the expected node and
562 the flags to use with storage.
566 the flags to use with storage.
563
567
564 ``add()`` is usually called when adding files from e.g. the working
568 ``add()`` is usually called when adding files from e.g. the working
565 directory. ``addrevision()`` is often called by ``add()`` and for
569 directory. ``addrevision()`` is often called by ``add()`` and for
566 scenarios where revision data has already been computed, such as when
570 scenarios where revision data has already been computed, such as when
567 applying raw data from a peer repo.
571 applying raw data from a peer repo.
568 """
572 """
569
573
570 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
574 def addgroup(deltas, linkmapper, transaction, addrevisioncb=None):
571 """Process a series of deltas for storage.
575 """Process a series of deltas for storage.
572
576
573 ``deltas`` is an iterable of 7-tuples of
577 ``deltas`` is an iterable of 7-tuples of
574 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
578 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
575 to add.
579 to add.
576
580
577 The ``delta`` field contains ``mpatch`` data to apply to a base
581 The ``delta`` field contains ``mpatch`` data to apply to a base
578 revision, identified by ``deltabase``. The base node can be
582 revision, identified by ``deltabase``. The base node can be
579 ``nullid``, in which case the header from the delta can be ignored
583 ``nullid``, in which case the header from the delta can be ignored
580 and the delta used as the fulltext.
584 and the delta used as the fulltext.
581
585
582 ``addrevisioncb`` should be called for each node as it is committed.
586 ``addrevisioncb`` should be called for each node as it is committed.
583
587
584 Returns a list of nodes that were processed. A node will be in the list
588 Returns a list of nodes that were processed. A node will be in the list
585 even if it existed in the store previously.
589 even if it existed in the store previously.
586 """
590 """
587
591
588 def getstrippoint(minlink):
592 def getstrippoint(minlink):
589 """Find the minimum revision that must be stripped to strip a linkrev.
593 """Find the minimum revision that must be stripped to strip a linkrev.
590
594
591 Returns a 2-tuple containing the minimum revision number and a set
595 Returns a 2-tuple containing the minimum revision number and a set
592 of all revisions numbers that would be broken by this strip.
596 of all revisions numbers that would be broken by this strip.
593
597
594 TODO this is highly revlog centric and should be abstracted into
598 TODO this is highly revlog centric and should be abstracted into
595 a higher-level deletion API. ``repair.strip()`` relies on this.
599 a higher-level deletion API. ``repair.strip()`` relies on this.
596 """
600 """
597
601
598 def strip(minlink, transaction):
602 def strip(minlink, transaction):
599 """Remove storage of items starting at a linkrev.
603 """Remove storage of items starting at a linkrev.
600
604
601 This uses ``getstrippoint()`` to determine the first node to remove.
605 This uses ``getstrippoint()`` to determine the first node to remove.
602 Then it effectively truncates storage for all revisions after that.
606 Then it effectively truncates storage for all revisions after that.
603
607
604 TODO this is highly revlog centric and should be abstracted into a
608 TODO this is highly revlog centric and should be abstracted into a
605 higher-level deletion API.
609 higher-level deletion API.
606 """
610 """
607
611
608 class ifilestorage(ifileindex, ifiledata, ifilemutation):
612 class ifilestorage(ifileindex, ifiledata, ifilemutation):
609 """Complete storage interface for a single tracked file."""
613 """Complete storage interface for a single tracked file."""
610
614
611 version = interfaceutil.Attribute(
615 version = interfaceutil.Attribute(
612 """Version number of storage.
616 """Version number of storage.
613
617
614 TODO this feels revlog centric and could likely be removed.
618 TODO this feels revlog centric and could likely be removed.
615 """)
619 """)
616
620
617 storedeltachains = interfaceutil.Attribute(
621 storedeltachains = interfaceutil.Attribute(
618 """Whether the store stores deltas.
622 """Whether the store stores deltas.
619
623
620 TODO deltachains are revlog centric. This can probably removed
624 TODO deltachains are revlog centric. This can probably removed
621 once there are better abstractions for obtaining/writing
625 once there are better abstractions for obtaining/writing
622 data.
626 data.
623 """)
627 """)
624
628
625 _generaldelta = interfaceutil.Attribute(
629 _generaldelta = interfaceutil.Attribute(
626 """Whether deltas can be against any parent revision.
630 """Whether deltas can be against any parent revision.
627
631
628 TODO this is used by changegroup code and it could probably be
632 TODO this is used by changegroup code and it could probably be
629 folded into another API.
633 folded into another API.
630 """)
634 """)
631
635
632 def files():
636 def files():
633 """Obtain paths that are backing storage for this file.
637 """Obtain paths that are backing storage for this file.
634
638
635 TODO this is used heavily by verify code and there should probably
639 TODO this is used heavily by verify code and there should probably
636 be a better API for that.
640 be a better API for that.
637 """
641 """
638
642
639 def checksize():
643 def checksize():
640 """Obtain the expected sizes of backing files.
644 """Obtain the expected sizes of backing files.
641
645
642 TODO this is used by verify and it should not be part of the interface.
646 TODO this is used by verify and it should not be part of the interface.
643 """
647 """
644
648
645 class idirs(interfaceutil.Interface):
649 class idirs(interfaceutil.Interface):
646 """Interface representing a collection of directories from paths.
650 """Interface representing a collection of directories from paths.
647
651
648 This interface is essentially a derived data structure representing
652 This interface is essentially a derived data structure representing
649 directories from a collection of paths.
653 directories from a collection of paths.
650 """
654 """
651
655
652 def addpath(path):
656 def addpath(path):
653 """Add a path to the collection.
657 """Add a path to the collection.
654
658
655 All directories in the path will be added to the collection.
659 All directories in the path will be added to the collection.
656 """
660 """
657
661
658 def delpath(path):
662 def delpath(path):
659 """Remove a path from the collection.
663 """Remove a path from the collection.
660
664
661 If the removal was the last path in a particular directory, the
665 If the removal was the last path in a particular directory, the
662 directory is removed from the collection.
666 directory is removed from the collection.
663 """
667 """
664
668
665 def __iter__():
669 def __iter__():
666 """Iterate over the directories in this collection of paths."""
670 """Iterate over the directories in this collection of paths."""
667
671
668 def __contains__(path):
672 def __contains__(path):
669 """Whether a specific directory is in this collection."""
673 """Whether a specific directory is in this collection."""
670
674
671 class imanifestdict(interfaceutil.Interface):
675 class imanifestdict(interfaceutil.Interface):
672 """Interface representing a manifest data structure.
676 """Interface representing a manifest data structure.
673
677
674 A manifest is effectively a dict mapping paths to entries. Each entry
678 A manifest is effectively a dict mapping paths to entries. Each entry
675 consists of a binary node and extra flags affecting that entry.
679 consists of a binary node and extra flags affecting that entry.
676 """
680 """
677
681
678 def __getitem__(path):
682 def __getitem__(path):
679 """Returns the binary node value for a path in the manifest.
683 """Returns the binary node value for a path in the manifest.
680
684
681 Raises ``KeyError`` if the path does not exist in the manifest.
685 Raises ``KeyError`` if the path does not exist in the manifest.
682
686
683 Equivalent to ``self.find(path)[0]``.
687 Equivalent to ``self.find(path)[0]``.
684 """
688 """
685
689
686 def find(path):
690 def find(path):
687 """Returns the entry for a path in the manifest.
691 """Returns the entry for a path in the manifest.
688
692
689 Returns a 2-tuple of (node, flags).
693 Returns a 2-tuple of (node, flags).
690
694
691 Raises ``KeyError`` if the path does not exist in the manifest.
695 Raises ``KeyError`` if the path does not exist in the manifest.
692 """
696 """
693
697
694 def __len__():
698 def __len__():
695 """Return the number of entries in the manifest."""
699 """Return the number of entries in the manifest."""
696
700
697 def __nonzero__():
701 def __nonzero__():
698 """Returns True if the manifest has entries, False otherwise."""
702 """Returns True if the manifest has entries, False otherwise."""
699
703
700 __bool__ = __nonzero__
704 __bool__ = __nonzero__
701
705
702 def __setitem__(path, node):
706 def __setitem__(path, node):
703 """Define the node value for a path in the manifest.
707 """Define the node value for a path in the manifest.
704
708
705 If the path is already in the manifest, its flags will be copied to
709 If the path is already in the manifest, its flags will be copied to
706 the new entry.
710 the new entry.
707 """
711 """
708
712
709 def __contains__(path):
713 def __contains__(path):
710 """Whether a path exists in the manifest."""
714 """Whether a path exists in the manifest."""
711
715
712 def __delitem__(path):
716 def __delitem__(path):
713 """Remove a path from the manifest.
717 """Remove a path from the manifest.
714
718
715 Raises ``KeyError`` if the path is not in the manifest.
719 Raises ``KeyError`` if the path is not in the manifest.
716 """
720 """
717
721
718 def __iter__():
722 def __iter__():
719 """Iterate over paths in the manifest."""
723 """Iterate over paths in the manifest."""
720
724
721 def iterkeys():
725 def iterkeys():
722 """Iterate over paths in the manifest."""
726 """Iterate over paths in the manifest."""
723
727
724 def keys():
728 def keys():
725 """Obtain a list of paths in the manifest."""
729 """Obtain a list of paths in the manifest."""
726
730
727 def filesnotin(other, match=None):
731 def filesnotin(other, match=None):
728 """Obtain the set of paths in this manifest but not in another.
732 """Obtain the set of paths in this manifest but not in another.
729
733
730 ``match`` is an optional matcher function to be applied to both
734 ``match`` is an optional matcher function to be applied to both
731 manifests.
735 manifests.
732
736
733 Returns a set of paths.
737 Returns a set of paths.
734 """
738 """
735
739
736 def dirs():
740 def dirs():
737 """Returns an object implementing the ``idirs`` interface."""
741 """Returns an object implementing the ``idirs`` interface."""
738
742
739 def hasdir(dir):
743 def hasdir(dir):
740 """Returns a bool indicating if a directory is in this manifest."""
744 """Returns a bool indicating if a directory is in this manifest."""
741
745
742 def matches(match):
746 def matches(match):
743 """Generate a new manifest filtered through a matcher.
747 """Generate a new manifest filtered through a matcher.
744
748
745 Returns an object conforming to the ``imanifestdict`` interface.
749 Returns an object conforming to the ``imanifestdict`` interface.
746 """
750 """
747
751
748 def walk(match):
752 def walk(match):
749 """Generator of paths in manifest satisfying a matcher.
753 """Generator of paths in manifest satisfying a matcher.
750
754
751 This is equivalent to ``self.matches(match).iterkeys()`` except a new
755 This is equivalent to ``self.matches(match).iterkeys()`` except a new
752 manifest object is not created.
756 manifest object is not created.
753
757
754 If the matcher has explicit files listed and they don't exist in
758 If the matcher has explicit files listed and they don't exist in
755 the manifest, ``match.bad()`` is called for each missing file.
759 the manifest, ``match.bad()`` is called for each missing file.
756 """
760 """
757
761
758 def diff(other, match=None, clean=False):
762 def diff(other, match=None, clean=False):
759 """Find differences between this manifest and another.
763 """Find differences between this manifest and another.
760
764
761 This manifest is compared to ``other``.
765 This manifest is compared to ``other``.
762
766
763 If ``match`` is provided, the two manifests are filtered against this
767 If ``match`` is provided, the two manifests are filtered against this
764 matcher and only entries satisfying the matcher are compared.
768 matcher and only entries satisfying the matcher are compared.
765
769
766 If ``clean`` is True, unchanged files are included in the returned
770 If ``clean`` is True, unchanged files are included in the returned
767 object.
771 object.
768
772
769 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
773 Returns a dict with paths as keys and values of 2-tuples of 2-tuples of
770 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
774 the form ``((node1, flag1), (node2, flag2))`` where ``(node1, flag1)``
771 represents the node and flags for this manifest and ``(node2, flag2)``
775 represents the node and flags for this manifest and ``(node2, flag2)``
772 are the same for the other manifest.
776 are the same for the other manifest.
773 """
777 """
774
778
775 def setflag(path, flag):
779 def setflag(path, flag):
776 """Set the flag value for a given path.
780 """Set the flag value for a given path.
777
781
778 Raises ``KeyError`` if the path is not already in the manifest.
782 Raises ``KeyError`` if the path is not already in the manifest.
779 """
783 """
780
784
781 def get(path, default=None):
785 def get(path, default=None):
782 """Obtain the node value for a path or a default value if missing."""
786 """Obtain the node value for a path or a default value if missing."""
783
787
784 def flags(path, default=''):
788 def flags(path, default=''):
785 """Return the flags value for a path or a default value if missing."""
789 """Return the flags value for a path or a default value if missing."""
786
790
787 def copy():
791 def copy():
788 """Return a copy of this manifest."""
792 """Return a copy of this manifest."""
789
793
790 def items():
794 def items():
791 """Returns an iterable of (path, node) for items in this manifest."""
795 """Returns an iterable of (path, node) for items in this manifest."""
792
796
793 def iteritems():
797 def iteritems():
794 """Identical to items()."""
798 """Identical to items()."""
795
799
796 def iterentries():
800 def iterentries():
797 """Returns an iterable of (path, node, flags) for this manifest.
801 """Returns an iterable of (path, node, flags) for this manifest.
798
802
799 Similar to ``iteritems()`` except items are a 3-tuple and include
803 Similar to ``iteritems()`` except items are a 3-tuple and include
800 flags.
804 flags.
801 """
805 """
802
806
803 def text():
807 def text():
804 """Obtain the raw data representation for this manifest.
808 """Obtain the raw data representation for this manifest.
805
809
806 Result is used to create a manifest revision.
810 Result is used to create a manifest revision.
807 """
811 """
808
812
809 def fastdelta(base, changes):
813 def fastdelta(base, changes):
810 """Obtain a delta between this manifest and another given changes.
814 """Obtain a delta between this manifest and another given changes.
811
815
812 ``base`` in the raw data representation for another manifest.
816 ``base`` in the raw data representation for another manifest.
813
817
814 ``changes`` is an iterable of ``(path, to_delete)``.
818 ``changes`` is an iterable of ``(path, to_delete)``.
815
819
816 Returns a 2-tuple containing ``bytearray(self.text())`` and the
820 Returns a 2-tuple containing ``bytearray(self.text())`` and the
817 delta between ``base`` and this manifest.
821 delta between ``base`` and this manifest.
818 """
822 """
819
823
820 class imanifestrevisionbase(interfaceutil.Interface):
824 class imanifestrevisionbase(interfaceutil.Interface):
821 """Base interface representing a single revision of a manifest.
825 """Base interface representing a single revision of a manifest.
822
826
823 Should not be used as a primary interface: should always be inherited
827 Should not be used as a primary interface: should always be inherited
824 as part of a larger interface.
828 as part of a larger interface.
825 """
829 """
826
830
827 def new():
831 def new():
828 """Obtain a new manifest instance.
832 """Obtain a new manifest instance.
829
833
830 Returns an object conforming to the ``imanifestrevisionwritable``
834 Returns an object conforming to the ``imanifestrevisionwritable``
831 interface. The instance will be associated with the same
835 interface. The instance will be associated with the same
832 ``imanifestlog`` collection as this instance.
836 ``imanifestlog`` collection as this instance.
833 """
837 """
834
838
835 def copy():
839 def copy():
836 """Obtain a copy of this manifest instance.
840 """Obtain a copy of this manifest instance.
837
841
838 Returns an object conforming to the ``imanifestrevisionwritable``
842 Returns an object conforming to the ``imanifestrevisionwritable``
839 interface. The instance will be associated with the same
843 interface. The instance will be associated with the same
840 ``imanifestlog`` collection as this instance.
844 ``imanifestlog`` collection as this instance.
841 """
845 """
842
846
843 def read():
847 def read():
844 """Obtain the parsed manifest data structure.
848 """Obtain the parsed manifest data structure.
845
849
846 The returned object conforms to the ``imanifestdict`` interface.
850 The returned object conforms to the ``imanifestdict`` interface.
847 """
851 """
848
852
849 class imanifestrevisionstored(imanifestrevisionbase):
853 class imanifestrevisionstored(imanifestrevisionbase):
850 """Interface representing a manifest revision committed to storage."""
854 """Interface representing a manifest revision committed to storage."""
851
855
852 def node():
856 def node():
853 """The binary node for this manifest."""
857 """The binary node for this manifest."""
854
858
855 parents = interfaceutil.Attribute(
859 parents = interfaceutil.Attribute(
856 """List of binary nodes that are parents for this manifest revision."""
860 """List of binary nodes that are parents for this manifest revision."""
857 )
861 )
858
862
859 def readdelta(shallow=False):
863 def readdelta(shallow=False):
860 """Obtain the manifest data structure representing changes from parent.
864 """Obtain the manifest data structure representing changes from parent.
861
865
862 This manifest is compared to its 1st parent. A new manifest representing
866 This manifest is compared to its 1st parent. A new manifest representing
863 those differences is constructed.
867 those differences is constructed.
864
868
865 The returned object conforms to the ``imanifestdict`` interface.
869 The returned object conforms to the ``imanifestdict`` interface.
866 """
870 """
867
871
868 def readfast(shallow=False):
872 def readfast(shallow=False):
869 """Calls either ``read()`` or ``readdelta()``.
873 """Calls either ``read()`` or ``readdelta()``.
870
874
871 The faster of the two options is called.
875 The faster of the two options is called.
872 """
876 """
873
877
874 def find(key):
878 def find(key):
875 """Calls self.read().find(key)``.
879 """Calls self.read().find(key)``.
876
880
877 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
881 Returns a 2-tuple of ``(node, flags)`` or raises ``KeyError``.
878 """
882 """
879
883
880 class imanifestrevisionwritable(imanifestrevisionbase):
884 class imanifestrevisionwritable(imanifestrevisionbase):
881 """Interface representing a manifest revision that can be committed."""
885 """Interface representing a manifest revision that can be committed."""
882
886
883 def write(transaction, linkrev, p1node, p2node, added, removed):
887 def write(transaction, linkrev, p1node, p2node, added, removed):
884 """Add this revision to storage.
888 """Add this revision to storage.
885
889
886 Takes a transaction object, the changeset revision number it will
890 Takes a transaction object, the changeset revision number it will
887 be associated with, its parent nodes, and lists of added and
891 be associated with, its parent nodes, and lists of added and
888 removed paths.
892 removed paths.
889
893
890 Returns the binary node of the created revision.
894 Returns the binary node of the created revision.
891 """
895 """
892
896
893 class imanifestlog(interfaceutil.Interface):
897 class imanifestlog(interfaceutil.Interface):
894 """Interface representing a collection of manifest snapshots."""
898 """Interface representing a collection of manifest snapshots."""
895
899
896 def __getitem__(node):
900 def __getitem__(node):
897 """Obtain a manifest instance for a given binary node.
901 """Obtain a manifest instance for a given binary node.
898
902
899 Equivalent to calling ``self.get('', node)``.
903 Equivalent to calling ``self.get('', node)``.
900
904
901 The returned object conforms to the ``imanifestrevisionstored``
905 The returned object conforms to the ``imanifestrevisionstored``
902 interface.
906 interface.
903 """
907 """
904
908
905 def get(dir, node, verify=True):
909 def get(dir, node, verify=True):
906 """Retrieve the manifest instance for a given directory and binary node.
910 """Retrieve the manifest instance for a given directory and binary node.
907
911
908 ``node`` always refers to the node of the root manifest (which will be
912 ``node`` always refers to the node of the root manifest (which will be
909 the only manifest if flat manifests are being used).
913 the only manifest if flat manifests are being used).
910
914
911 If ``dir`` is the empty string, the root manifest is returned. Otherwise
915 If ``dir`` is the empty string, the root manifest is returned. Otherwise
912 the manifest for the specified directory will be returned (requires
916 the manifest for the specified directory will be returned (requires
913 tree manifests).
917 tree manifests).
914
918
915 If ``verify`` is True, ``LookupError`` is raised if the node is not
919 If ``verify`` is True, ``LookupError`` is raised if the node is not
916 known.
920 known.
917
921
918 The returned object conforms to the ``imanifestrevisionstored``
922 The returned object conforms to the ``imanifestrevisionstored``
919 interface.
923 interface.
920 """
924 """
921
925
922 def clearcaches():
926 def clearcaches():
923 """Clear caches associated with this collection."""
927 """Clear caches associated with this collection."""
924
928
925 def rev(node):
929 def rev(node):
926 """Obtain the revision number for a binary node.
930 """Obtain the revision number for a binary node.
927
931
928 Raises ``error.LookupError`` if the node is not known.
932 Raises ``error.LookupError`` if the node is not known.
929 """
933 """
930
934
931 def addgroup(deltas, linkmapper, transaction):
935 def addgroup(deltas, linkmapper, transaction):
932 """Process a series of deltas for storage.
936 """Process a series of deltas for storage.
933
937
934 ``deltas`` is an iterable of 7-tuples of
938 ``deltas`` is an iterable of 7-tuples of
935 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
939 (node, p1, p2, linknode, deltabase, delta, flags) defining revisions
936 to add.
940 to add.
937
941
938 The ``delta`` field contains ``mpatch`` data to apply to a base
942 The ``delta`` field contains ``mpatch`` data to apply to a base
939 revision, identified by ``deltabase``. The base node can be
943 revision, identified by ``deltabase``. The base node can be
940 ``nullid``, in which case the header from the delta can be ignored
944 ``nullid``, in which case the header from the delta can be ignored
941 and the delta used as the fulltext.
945 and the delta used as the fulltext.
942
946
943 Returns a list of nodes that were processed. A node will be in the list
947 Returns a list of nodes that were processed. A node will be in the list
944 even if it existed in the store previously.
948 even if it existed in the store previously.
945 """
949 """
946
950
947 class completelocalrepository(interfaceutil.Interface):
951 class completelocalrepository(interfaceutil.Interface):
948 """Monolithic interface for local repositories.
952 """Monolithic interface for local repositories.
949
953
950 This currently captures the reality of things - not how things should be.
954 This currently captures the reality of things - not how things should be.
951 """
955 """
952
956
953 supportedformats = interfaceutil.Attribute(
957 supportedformats = interfaceutil.Attribute(
954 """Set of requirements that apply to stream clone.
958 """Set of requirements that apply to stream clone.
955
959
956 This is actually a class attribute and is shared among all instances.
960 This is actually a class attribute and is shared among all instances.
957 """)
961 """)
958
962
959 openerreqs = interfaceutil.Attribute(
963 openerreqs = interfaceutil.Attribute(
960 """Set of requirements that are passed to the opener.
964 """Set of requirements that are passed to the opener.
961
965
962 This is actually a class attribute and is shared among all instances.
966 This is actually a class attribute and is shared among all instances.
963 """)
967 """)
964
968
965 supported = interfaceutil.Attribute(
969 supported = interfaceutil.Attribute(
966 """Set of requirements that this repo is capable of opening.""")
970 """Set of requirements that this repo is capable of opening.""")
967
971
968 requirements = interfaceutil.Attribute(
972 requirements = interfaceutil.Attribute(
969 """Set of requirements this repo uses.""")
973 """Set of requirements this repo uses.""")
970
974
971 filtername = interfaceutil.Attribute(
975 filtername = interfaceutil.Attribute(
972 """Name of the repoview that is active on this repo.""")
976 """Name of the repoview that is active on this repo.""")
973
977
974 wvfs = interfaceutil.Attribute(
978 wvfs = interfaceutil.Attribute(
975 """VFS used to access the working directory.""")
979 """VFS used to access the working directory.""")
976
980
977 vfs = interfaceutil.Attribute(
981 vfs = interfaceutil.Attribute(
978 """VFS rooted at the .hg directory.
982 """VFS rooted at the .hg directory.
979
983
980 Used to access repository data not in the store.
984 Used to access repository data not in the store.
981 """)
985 """)
982
986
983 svfs = interfaceutil.Attribute(
987 svfs = interfaceutil.Attribute(
984 """VFS rooted at the store.
988 """VFS rooted at the store.
985
989
986 Used to access repository data in the store. Typically .hg/store.
990 Used to access repository data in the store. Typically .hg/store.
987 But can point elsewhere if the store is shared.
991 But can point elsewhere if the store is shared.
988 """)
992 """)
989
993
990 root = interfaceutil.Attribute(
994 root = interfaceutil.Attribute(
991 """Path to the root of the working directory.""")
995 """Path to the root of the working directory.""")
992
996
993 path = interfaceutil.Attribute(
997 path = interfaceutil.Attribute(
994 """Path to the .hg directory.""")
998 """Path to the .hg directory.""")
995
999
996 origroot = interfaceutil.Attribute(
1000 origroot = interfaceutil.Attribute(
997 """The filesystem path that was used to construct the repo.""")
1001 """The filesystem path that was used to construct the repo.""")
998
1002
999 auditor = interfaceutil.Attribute(
1003 auditor = interfaceutil.Attribute(
1000 """A pathauditor for the working directory.
1004 """A pathauditor for the working directory.
1001
1005
1002 This checks if a path refers to a nested repository.
1006 This checks if a path refers to a nested repository.
1003
1007
1004 Operates on the filesystem.
1008 Operates on the filesystem.
1005 """)
1009 """)
1006
1010
1007 nofsauditor = interfaceutil.Attribute(
1011 nofsauditor = interfaceutil.Attribute(
1008 """A pathauditor for the working directory.
1012 """A pathauditor for the working directory.
1009
1013
1010 This is like ``auditor`` except it doesn't do filesystem checks.
1014 This is like ``auditor`` except it doesn't do filesystem checks.
1011 """)
1015 """)
1012
1016
1013 baseui = interfaceutil.Attribute(
1017 baseui = interfaceutil.Attribute(
1014 """Original ui instance passed into constructor.""")
1018 """Original ui instance passed into constructor.""")
1015
1019
1016 ui = interfaceutil.Attribute(
1020 ui = interfaceutil.Attribute(
1017 """Main ui instance for this instance.""")
1021 """Main ui instance for this instance.""")
1018
1022
1019 sharedpath = interfaceutil.Attribute(
1023 sharedpath = interfaceutil.Attribute(
1020 """Path to the .hg directory of the repo this repo was shared from.""")
1024 """Path to the .hg directory of the repo this repo was shared from.""")
1021
1025
1022 store = interfaceutil.Attribute(
1026 store = interfaceutil.Attribute(
1023 """A store instance.""")
1027 """A store instance.""")
1024
1028
1025 spath = interfaceutil.Attribute(
1029 spath = interfaceutil.Attribute(
1026 """Path to the store.""")
1030 """Path to the store.""")
1027
1031
1028 sjoin = interfaceutil.Attribute(
1032 sjoin = interfaceutil.Attribute(
1029 """Alias to self.store.join.""")
1033 """Alias to self.store.join.""")
1030
1034
1031 cachevfs = interfaceutil.Attribute(
1035 cachevfs = interfaceutil.Attribute(
1032 """A VFS used to access the cache directory.
1036 """A VFS used to access the cache directory.
1033
1037
1034 Typically .hg/cache.
1038 Typically .hg/cache.
1035 """)
1039 """)
1036
1040
1037 filteredrevcache = interfaceutil.Attribute(
1041 filteredrevcache = interfaceutil.Attribute(
1038 """Holds sets of revisions to be filtered.""")
1042 """Holds sets of revisions to be filtered.""")
1039
1043
1040 names = interfaceutil.Attribute(
1044 names = interfaceutil.Attribute(
1041 """A ``namespaces`` instance.""")
1045 """A ``namespaces`` instance.""")
1042
1046
1043 def close():
1047 def close():
1044 """Close the handle on this repository."""
1048 """Close the handle on this repository."""
1045
1049
1046 def peer():
1050 def peer():
1047 """Obtain an object conforming to the ``peer`` interface."""
1051 """Obtain an object conforming to the ``peer`` interface."""
1048
1052
1049 def unfiltered():
1053 def unfiltered():
1050 """Obtain an unfiltered/raw view of this repo."""
1054 """Obtain an unfiltered/raw view of this repo."""
1051
1055
1052 def filtered(name, visibilityexceptions=None):
1056 def filtered(name, visibilityexceptions=None):
1053 """Obtain a named view of this repository."""
1057 """Obtain a named view of this repository."""
1054
1058
1055 obsstore = interfaceutil.Attribute(
1059 obsstore = interfaceutil.Attribute(
1056 """A store of obsolescence data.""")
1060 """A store of obsolescence data.""")
1057
1061
1058 changelog = interfaceutil.Attribute(
1062 changelog = interfaceutil.Attribute(
1059 """A handle on the changelog revlog.""")
1063 """A handle on the changelog revlog.""")
1060
1064
1061 manifestlog = interfaceutil.Attribute(
1065 manifestlog = interfaceutil.Attribute(
1062 """An instance conforming to the ``imanifestlog`` interface.
1066 """An instance conforming to the ``imanifestlog`` interface.
1063
1067
1064 Provides access to manifests for the repository.
1068 Provides access to manifests for the repository.
1065 """)
1069 """)
1066
1070
1067 dirstate = interfaceutil.Attribute(
1071 dirstate = interfaceutil.Attribute(
1068 """Working directory state.""")
1072 """Working directory state.""")
1069
1073
1070 narrowpats = interfaceutil.Attribute(
1074 narrowpats = interfaceutil.Attribute(
1071 """Matcher patterns for this repository's narrowspec.""")
1075 """Matcher patterns for this repository's narrowspec.""")
1072
1076
1073 def narrowmatch():
1077 def narrowmatch():
1074 """Obtain a matcher for the narrowspec."""
1078 """Obtain a matcher for the narrowspec."""
1075
1079
1076 def setnarrowpats(newincludes, newexcludes):
1080 def setnarrowpats(newincludes, newexcludes):
1077 """Define the narrowspec for this repository."""
1081 """Define the narrowspec for this repository."""
1078
1082
1079 def __getitem__(changeid):
1083 def __getitem__(changeid):
1080 """Try to resolve a changectx."""
1084 """Try to resolve a changectx."""
1081
1085
1082 def __contains__(changeid):
1086 def __contains__(changeid):
1083 """Whether a changeset exists."""
1087 """Whether a changeset exists."""
1084
1088
1085 def __nonzero__():
1089 def __nonzero__():
1086 """Always returns True."""
1090 """Always returns True."""
1087 return True
1091 return True
1088
1092
1089 __bool__ = __nonzero__
1093 __bool__ = __nonzero__
1090
1094
1091 def __len__():
1095 def __len__():
1092 """Returns the number of changesets in the repo."""
1096 """Returns the number of changesets in the repo."""
1093
1097
1094 def __iter__():
1098 def __iter__():
1095 """Iterate over revisions in the changelog."""
1099 """Iterate over revisions in the changelog."""
1096
1100
1097 def revs(expr, *args):
1101 def revs(expr, *args):
1098 """Evaluate a revset.
1102 """Evaluate a revset.
1099
1103
1100 Emits revisions.
1104 Emits revisions.
1101 """
1105 """
1102
1106
1103 def set(expr, *args):
1107 def set(expr, *args):
1104 """Evaluate a revset.
1108 """Evaluate a revset.
1105
1109
1106 Emits changectx instances.
1110 Emits changectx instances.
1107 """
1111 """
1108
1112
1109 def anyrevs(specs, user=False, localalias=None):
1113 def anyrevs(specs, user=False, localalias=None):
1110 """Find revisions matching one of the given revsets."""
1114 """Find revisions matching one of the given revsets."""
1111
1115
1112 def url():
1116 def url():
1113 """Returns a string representing the location of this repo."""
1117 """Returns a string representing the location of this repo."""
1114
1118
1115 def hook(name, throw=False, **args):
1119 def hook(name, throw=False, **args):
1116 """Call a hook."""
1120 """Call a hook."""
1117
1121
1118 def tags():
1122 def tags():
1119 """Return a mapping of tag to node."""
1123 """Return a mapping of tag to node."""
1120
1124
1121 def tagtype(tagname):
1125 def tagtype(tagname):
1122 """Return the type of a given tag."""
1126 """Return the type of a given tag."""
1123
1127
1124 def tagslist():
1128 def tagslist():
1125 """Return a list of tags ordered by revision."""
1129 """Return a list of tags ordered by revision."""
1126
1130
1127 def nodetags(node):
1131 def nodetags(node):
1128 """Return the tags associated with a node."""
1132 """Return the tags associated with a node."""
1129
1133
1130 def nodebookmarks(node):
1134 def nodebookmarks(node):
1131 """Return the list of bookmarks pointing to the specified node."""
1135 """Return the list of bookmarks pointing to the specified node."""
1132
1136
1133 def branchmap():
1137 def branchmap():
1134 """Return a mapping of branch to heads in that branch."""
1138 """Return a mapping of branch to heads in that branch."""
1135
1139
1136 def revbranchcache():
1140 def revbranchcache():
1137 pass
1141 pass
1138
1142
1139 def branchtip(branchtip, ignoremissing=False):
1143 def branchtip(branchtip, ignoremissing=False):
1140 """Return the tip node for a given branch."""
1144 """Return the tip node for a given branch."""
1141
1145
1142 def lookup(key):
1146 def lookup(key):
1143 """Resolve the node for a revision."""
1147 """Resolve the node for a revision."""
1144
1148
1145 def lookupbranch(key):
1149 def lookupbranch(key):
1146 """Look up the branch name of the given revision or branch name."""
1150 """Look up the branch name of the given revision or branch name."""
1147
1151
1148 def known(nodes):
1152 def known(nodes):
1149 """Determine whether a series of nodes is known.
1153 """Determine whether a series of nodes is known.
1150
1154
1151 Returns a list of bools.
1155 Returns a list of bools.
1152 """
1156 """
1153
1157
1154 def local():
1158 def local():
1155 """Whether the repository is local."""
1159 """Whether the repository is local."""
1156 return True
1160 return True
1157
1161
1158 def publishing():
1162 def publishing():
1159 """Whether the repository is a publishing repository."""
1163 """Whether the repository is a publishing repository."""
1160
1164
1161 def cancopy():
1165 def cancopy():
1162 pass
1166 pass
1163
1167
1164 def shared():
1168 def shared():
1165 """The type of shared repository or None."""
1169 """The type of shared repository or None."""
1166
1170
1167 def wjoin(f, *insidef):
1171 def wjoin(f, *insidef):
1168 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1172 """Calls self.vfs.reljoin(self.root, f, *insidef)"""
1169
1173
1170 def file(f):
1174 def file(f):
1171 """Obtain a filelog for a tracked path.
1175 """Obtain a filelog for a tracked path.
1172
1176
1173 The returned type conforms to the ``ifilestorage`` interface.
1177 The returned type conforms to the ``ifilestorage`` interface.
1174 """
1178 """
1175
1179
1176 def setparents(p1, p2):
1180 def setparents(p1, p2):
1177 """Set the parent nodes of the working directory."""
1181 """Set the parent nodes of the working directory."""
1178
1182
1179 def filectx(path, changeid=None, fileid=None):
1183 def filectx(path, changeid=None, fileid=None):
1180 """Obtain a filectx for the given file revision."""
1184 """Obtain a filectx for the given file revision."""
1181
1185
1182 def getcwd():
1186 def getcwd():
1183 """Obtain the current working directory from the dirstate."""
1187 """Obtain the current working directory from the dirstate."""
1184
1188
1185 def pathto(f, cwd=None):
1189 def pathto(f, cwd=None):
1186 """Obtain the relative path to a file."""
1190 """Obtain the relative path to a file."""
1187
1191
1188 def adddatafilter(name, fltr):
1192 def adddatafilter(name, fltr):
1189 pass
1193 pass
1190
1194
1191 def wread(filename):
1195 def wread(filename):
1192 """Read a file from wvfs, using data filters."""
1196 """Read a file from wvfs, using data filters."""
1193
1197
1194 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1198 def wwrite(filename, data, flags, backgroundclose=False, **kwargs):
1195 """Write data to a file in the wvfs, using data filters."""
1199 """Write data to a file in the wvfs, using data filters."""
1196
1200
1197 def wwritedata(filename, data):
1201 def wwritedata(filename, data):
1198 """Resolve data for writing to the wvfs, using data filters."""
1202 """Resolve data for writing to the wvfs, using data filters."""
1199
1203
1200 def currenttransaction():
1204 def currenttransaction():
1201 """Obtain the current transaction instance or None."""
1205 """Obtain the current transaction instance or None."""
1202
1206
1203 def transaction(desc, report=None):
1207 def transaction(desc, report=None):
1204 """Open a new transaction to write to the repository."""
1208 """Open a new transaction to write to the repository."""
1205
1209
1206 def undofiles():
1210 def undofiles():
1207 """Returns a list of (vfs, path) for files to undo transactions."""
1211 """Returns a list of (vfs, path) for files to undo transactions."""
1208
1212
1209 def recover():
1213 def recover():
1210 """Roll back an interrupted transaction."""
1214 """Roll back an interrupted transaction."""
1211
1215
1212 def rollback(dryrun=False, force=False):
1216 def rollback(dryrun=False, force=False):
1213 """Undo the last transaction.
1217 """Undo the last transaction.
1214
1218
1215 DANGEROUS.
1219 DANGEROUS.
1216 """
1220 """
1217
1221
1218 def updatecaches(tr=None, full=False):
1222 def updatecaches(tr=None, full=False):
1219 """Warm repo caches."""
1223 """Warm repo caches."""
1220
1224
1221 def invalidatecaches():
1225 def invalidatecaches():
1222 """Invalidate cached data due to the repository mutating."""
1226 """Invalidate cached data due to the repository mutating."""
1223
1227
1224 def invalidatevolatilesets():
1228 def invalidatevolatilesets():
1225 pass
1229 pass
1226
1230
1227 def invalidatedirstate():
1231 def invalidatedirstate():
1228 """Invalidate the dirstate."""
1232 """Invalidate the dirstate."""
1229
1233
1230 def invalidate(clearfilecache=False):
1234 def invalidate(clearfilecache=False):
1231 pass
1235 pass
1232
1236
1233 def invalidateall():
1237 def invalidateall():
1234 pass
1238 pass
1235
1239
1236 def lock(wait=True):
1240 def lock(wait=True):
1237 """Lock the repository store and return a lock instance."""
1241 """Lock the repository store and return a lock instance."""
1238
1242
1239 def wlock(wait=True):
1243 def wlock(wait=True):
1240 """Lock the non-store parts of the repository."""
1244 """Lock the non-store parts of the repository."""
1241
1245
1242 def currentwlock():
1246 def currentwlock():
1243 """Return the wlock if it's held or None."""
1247 """Return the wlock if it's held or None."""
1244
1248
1245 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1249 def checkcommitpatterns(wctx, vdirs, match, status, fail):
1246 pass
1250 pass
1247
1251
1248 def commit(text='', user=None, date=None, match=None, force=False,
1252 def commit(text='', user=None, date=None, match=None, force=False,
1249 editor=False, extra=None):
1253 editor=False, extra=None):
1250 """Add a new revision to the repository."""
1254 """Add a new revision to the repository."""
1251
1255
1252 def commitctx(ctx, error=False):
1256 def commitctx(ctx, error=False):
1253 """Commit a commitctx instance to the repository."""
1257 """Commit a commitctx instance to the repository."""
1254
1258
1255 def destroying():
1259 def destroying():
1256 """Inform the repository that nodes are about to be destroyed."""
1260 """Inform the repository that nodes are about to be destroyed."""
1257
1261
1258 def destroyed():
1262 def destroyed():
1259 """Inform the repository that nodes have been destroyed."""
1263 """Inform the repository that nodes have been destroyed."""
1260
1264
1261 def status(node1='.', node2=None, match=None, ignored=False,
1265 def status(node1='.', node2=None, match=None, ignored=False,
1262 clean=False, unknown=False, listsubrepos=False):
1266 clean=False, unknown=False, listsubrepos=False):
1263 """Convenience method to call repo[x].status()."""
1267 """Convenience method to call repo[x].status()."""
1264
1268
1265 def addpostdsstatus(ps):
1269 def addpostdsstatus(ps):
1266 pass
1270 pass
1267
1271
1268 def postdsstatus():
1272 def postdsstatus():
1269 pass
1273 pass
1270
1274
1271 def clearpostdsstatus():
1275 def clearpostdsstatus():
1272 pass
1276 pass
1273
1277
1274 def heads(start=None):
1278 def heads(start=None):
1275 """Obtain list of nodes that are DAG heads."""
1279 """Obtain list of nodes that are DAG heads."""
1276
1280
1277 def branchheads(branch=None, start=None, closed=False):
1281 def branchheads(branch=None, start=None, closed=False):
1278 pass
1282 pass
1279
1283
1280 def branches(nodes):
1284 def branches(nodes):
1281 pass
1285 pass
1282
1286
1283 def between(pairs):
1287 def between(pairs):
1284 pass
1288 pass
1285
1289
1286 def checkpush(pushop):
1290 def checkpush(pushop):
1287 pass
1291 pass
1288
1292
1289 prepushoutgoinghooks = interfaceutil.Attribute(
1293 prepushoutgoinghooks = interfaceutil.Attribute(
1290 """util.hooks instance.""")
1294 """util.hooks instance.""")
1291
1295
1292 def pushkey(namespace, key, old, new):
1296 def pushkey(namespace, key, old, new):
1293 pass
1297 pass
1294
1298
1295 def listkeys(namespace):
1299 def listkeys(namespace):
1296 pass
1300 pass
1297
1301
1298 def debugwireargs(one, two, three=None, four=None, five=None):
1302 def debugwireargs(one, two, three=None, four=None, five=None):
1299 pass
1303 pass
1300
1304
1301 def savecommitmessage(text):
1305 def savecommitmessage(text):
1302 pass
1306 pass
General Comments 0
You need to be logged in to leave comments. Login now