##// END OF EJS Templates
hg: pass command intents to repo/peer creation (API)...
Gregory Szorc -
r37735:0664be4f default
parent child Browse files
Show More
@@ -1,134 +1,134 b''
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
1 # Copyright 2009, Alexander Solovyov <piranha@piranha.org.ua>
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """extend schemes with shortcuts to repository swarms
6 """extend schemes with shortcuts to repository swarms
7
7
8 This extension allows you to specify shortcuts for parent URLs with a
8 This extension allows you to specify shortcuts for parent URLs with a
9 lot of repositories to act like a scheme, for example::
9 lot of repositories to act like a scheme, for example::
10
10
11 [schemes]
11 [schemes]
12 py = http://code.python.org/hg/
12 py = http://code.python.org/hg/
13
13
14 After that you can use it like::
14 After that you can use it like::
15
15
16 hg clone py://trunk/
16 hg clone py://trunk/
17
17
18 Additionally there is support for some more complex schemas, for
18 Additionally there is support for some more complex schemas, for
19 example used by Google Code::
19 example used by Google Code::
20
20
21 [schemes]
21 [schemes]
22 gcode = http://{1}.googlecode.com/hg/
22 gcode = http://{1}.googlecode.com/hg/
23
23
24 The syntax is taken from Mercurial templates, and you have unlimited
24 The syntax is taken from Mercurial templates, and you have unlimited
25 number of variables, starting with ``{1}`` and continuing with
25 number of variables, starting with ``{1}`` and continuing with
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
26 ``{2}``, ``{3}`` and so on. This variables will receive parts of URL
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
27 supplied, split by ``/``. Anything not specified as ``{part}`` will be
28 just appended to an URL.
28 just appended to an URL.
29
29
30 For convenience, the extension adds these schemes by default::
30 For convenience, the extension adds these schemes by default::
31
31
32 [schemes]
32 [schemes]
33 py = http://hg.python.org/
33 py = http://hg.python.org/
34 bb = https://bitbucket.org/
34 bb = https://bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
35 bb+ssh = ssh://hg@bitbucket.org/
36 gcode = https://{1}.googlecode.com/hg/
36 gcode = https://{1}.googlecode.com/hg/
37 kiln = https://{1}.kilnhg.com/Repo/
37 kiln = https://{1}.kilnhg.com/Repo/
38
38
39 You can override a predefined scheme by defining a new scheme with the
39 You can override a predefined scheme by defining a new scheme with the
40 same name.
40 same name.
41 """
41 """
42 from __future__ import absolute_import
42 from __future__ import absolute_import
43
43
44 import os
44 import os
45 import re
45 import re
46
46
47 from mercurial.i18n import _
47 from mercurial.i18n import _
48 from mercurial import (
48 from mercurial import (
49 error,
49 error,
50 extensions,
50 extensions,
51 hg,
51 hg,
52 pycompat,
52 pycompat,
53 registrar,
53 registrar,
54 templater,
54 templater,
55 util,
55 util,
56 )
56 )
57
57
58 cmdtable = {}
58 cmdtable = {}
59 command = registrar.command(cmdtable)
59 command = registrar.command(cmdtable)
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
60 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
61 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
62 # be specifying the version(s) of Mercurial they are tested with, or
62 # be specifying the version(s) of Mercurial they are tested with, or
63 # leave the attribute unspecified.
63 # leave the attribute unspecified.
64 testedwith = 'ships-with-hg-core'
64 testedwith = 'ships-with-hg-core'
65
65
66 _partre = re.compile(br'\{(\d+)\}')
66 _partre = re.compile(br'\{(\d+)\}')
67
67
68 class ShortRepository(object):
68 class ShortRepository(object):
69 def __init__(self, url, scheme, templater):
69 def __init__(self, url, scheme, templater):
70 self.scheme = scheme
70 self.scheme = scheme
71 self.templater = templater
71 self.templater = templater
72 self.url = url
72 self.url = url
73 try:
73 try:
74 self.parts = max(map(int, _partre.findall(self.url)))
74 self.parts = max(map(int, _partre.findall(self.url)))
75 except ValueError:
75 except ValueError:
76 self.parts = 0
76 self.parts = 0
77
77
78 def __repr__(self):
78 def __repr__(self):
79 return '<ShortRepository: %s>' % self.scheme
79 return '<ShortRepository: %s>' % self.scheme
80
80
81 def instance(self, ui, url, create):
81 def instance(self, ui, url, create, intents=None):
82 url = self.resolve(url)
82 url = self.resolve(url)
83 return hg._peerlookup(url).instance(ui, url, create)
83 return hg._peerlookup(url).instance(ui, url, create, intents=intents)
84
84
85 def resolve(self, url):
85 def resolve(self, url):
86 # Should this use the util.url class, or is manual parsing better?
86 # Should this use the util.url class, or is manual parsing better?
87 try:
87 try:
88 url = url.split('://', 1)[1]
88 url = url.split('://', 1)[1]
89 except IndexError:
89 except IndexError:
90 raise error.Abort(_("no '://' in scheme url '%s'") % url)
90 raise error.Abort(_("no '://' in scheme url '%s'") % url)
91 parts = url.split('/', self.parts)
91 parts = url.split('/', self.parts)
92 if len(parts) > self.parts:
92 if len(parts) > self.parts:
93 tail = parts[-1]
93 tail = parts[-1]
94 parts = parts[:-1]
94 parts = parts[:-1]
95 else:
95 else:
96 tail = ''
96 tail = ''
97 context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts))
97 context = dict(('%d' % (i + 1), v) for i, v in enumerate(parts))
98 return ''.join(self.templater.process(self.url, context)) + tail
98 return ''.join(self.templater.process(self.url, context)) + tail
99
99
100 def hasdriveletter(orig, path):
100 def hasdriveletter(orig, path):
101 if path:
101 if path:
102 for scheme in schemes:
102 for scheme in schemes:
103 if path.startswith(scheme + ':'):
103 if path.startswith(scheme + ':'):
104 return False
104 return False
105 return orig(path)
105 return orig(path)
106
106
107 schemes = {
107 schemes = {
108 'py': 'http://hg.python.org/',
108 'py': 'http://hg.python.org/',
109 'bb': 'https://bitbucket.org/',
109 'bb': 'https://bitbucket.org/',
110 'bb+ssh': 'ssh://hg@bitbucket.org/',
110 'bb+ssh': 'ssh://hg@bitbucket.org/',
111 'gcode': 'https://{1}.googlecode.com/hg/',
111 'gcode': 'https://{1}.googlecode.com/hg/',
112 'kiln': 'https://{1}.kilnhg.com/Repo/'
112 'kiln': 'https://{1}.kilnhg.com/Repo/'
113 }
113 }
114
114
115 def extsetup(ui):
115 def extsetup(ui):
116 schemes.update(dict(ui.configitems('schemes')))
116 schemes.update(dict(ui.configitems('schemes')))
117 t = templater.engine(lambda x: x)
117 t = templater.engine(lambda x: x)
118 for scheme, url in schemes.items():
118 for scheme, url in schemes.items():
119 if (pycompat.iswindows and len(scheme) == 1 and scheme.isalpha()
119 if (pycompat.iswindows and len(scheme) == 1 and scheme.isalpha()
120 and os.path.exists('%s:\\' % scheme)):
120 and os.path.exists('%s:\\' % scheme)):
121 raise error.Abort(_('custom scheme %s:// conflicts with drive '
121 raise error.Abort(_('custom scheme %s:// conflicts with drive '
122 'letter %s:\\\n') % (scheme, scheme.upper()))
122 'letter %s:\\\n') % (scheme, scheme.upper()))
123 hg.schemes[scheme] = ShortRepository(url, scheme, t)
123 hg.schemes[scheme] = ShortRepository(url, scheme, t)
124
124
125 extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
125 extensions.wrapfunction(util, 'hasdriveletter', hasdriveletter)
126
126
127 @command('debugexpandscheme', norepo=True)
127 @command('debugexpandscheme', norepo=True)
128 def expandscheme(ui, url, **opts):
128 def expandscheme(ui, url, **opts):
129 """given a repo path, provide the scheme-expanded path
129 """given a repo path, provide the scheme-expanded path
130 """
130 """
131 repo = hg._peerlookup(url)
131 repo = hg._peerlookup(url)
132 if isinstance(repo, ShortRepository):
132 if isinstance(repo, ShortRepository):
133 url = repo.resolve(url)
133 url = repo.resolve(url)
134 ui.write(url + '\n')
134 ui.write(url + '\n')
@@ -1,623 +1,623 b''
1 # bundlerepo.py - repository class for viewing uncompressed bundles
1 # bundlerepo.py - repository class for viewing uncompressed bundles
2 #
2 #
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
3 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Repository class for viewing uncompressed bundles.
8 """Repository class for viewing uncompressed bundles.
9
9
10 This provides a read-only repository interface to bundles as if they
10 This provides a read-only repository interface to bundles as if they
11 were part of the actual repository.
11 were part of the actual repository.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import os
16 import os
17 import shutil
17 import shutil
18 import tempfile
18 import tempfile
19
19
20 from .i18n import _
20 from .i18n import _
21 from .node import nullid
21 from .node import nullid
22
22
23 from . import (
23 from . import (
24 bundle2,
24 bundle2,
25 changegroup,
25 changegroup,
26 changelog,
26 changelog,
27 cmdutil,
27 cmdutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 filelog,
31 filelog,
32 localrepo,
32 localrepo,
33 manifest,
33 manifest,
34 mdiff,
34 mdiff,
35 node as nodemod,
35 node as nodemod,
36 pathutil,
36 pathutil,
37 phases,
37 phases,
38 pycompat,
38 pycompat,
39 revlog,
39 revlog,
40 util,
40 util,
41 vfs as vfsmod,
41 vfs as vfsmod,
42 )
42 )
43
43
44 class bundlerevlog(revlog.revlog):
44 class bundlerevlog(revlog.revlog):
45 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
45 def __init__(self, opener, indexfile, cgunpacker, linkmapper):
46 # How it works:
46 # How it works:
47 # To retrieve a revision, we need to know the offset of the revision in
47 # To retrieve a revision, we need to know the offset of the revision in
48 # the bundle (an unbundle object). We store this offset in the index
48 # the bundle (an unbundle object). We store this offset in the index
49 # (start). The base of the delta is stored in the base field.
49 # (start). The base of the delta is stored in the base field.
50 #
50 #
51 # To differentiate a rev in the bundle from a rev in the revlog, we
51 # To differentiate a rev in the bundle from a rev in the revlog, we
52 # check revision against repotiprev.
52 # check revision against repotiprev.
53 opener = vfsmod.readonlyvfs(opener)
53 opener = vfsmod.readonlyvfs(opener)
54 revlog.revlog.__init__(self, opener, indexfile)
54 revlog.revlog.__init__(self, opener, indexfile)
55 self.bundle = cgunpacker
55 self.bundle = cgunpacker
56 n = len(self)
56 n = len(self)
57 self.repotiprev = n - 1
57 self.repotiprev = n - 1
58 self.bundlerevs = set() # used by 'bundle()' revset expression
58 self.bundlerevs = set() # used by 'bundle()' revset expression
59 for deltadata in cgunpacker.deltaiter():
59 for deltadata in cgunpacker.deltaiter():
60 node, p1, p2, cs, deltabase, delta, flags = deltadata
60 node, p1, p2, cs, deltabase, delta, flags = deltadata
61
61
62 size = len(delta)
62 size = len(delta)
63 start = cgunpacker.tell() - size
63 start = cgunpacker.tell() - size
64
64
65 link = linkmapper(cs)
65 link = linkmapper(cs)
66 if node in self.nodemap:
66 if node in self.nodemap:
67 # this can happen if two branches make the same change
67 # this can happen if two branches make the same change
68 self.bundlerevs.add(self.nodemap[node])
68 self.bundlerevs.add(self.nodemap[node])
69 continue
69 continue
70
70
71 for p in (p1, p2):
71 for p in (p1, p2):
72 if p not in self.nodemap:
72 if p not in self.nodemap:
73 raise error.LookupError(p, self.indexfile,
73 raise error.LookupError(p, self.indexfile,
74 _("unknown parent"))
74 _("unknown parent"))
75
75
76 if deltabase not in self.nodemap:
76 if deltabase not in self.nodemap:
77 raise LookupError(deltabase, self.indexfile,
77 raise LookupError(deltabase, self.indexfile,
78 _('unknown delta base'))
78 _('unknown delta base'))
79
79
80 baserev = self.rev(deltabase)
80 baserev = self.rev(deltabase)
81 # start, size, full unc. size, base (unused), link, p1, p2, node
81 # start, size, full unc. size, base (unused), link, p1, p2, node
82 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
82 e = (revlog.offset_type(start, flags), size, -1, baserev, link,
83 self.rev(p1), self.rev(p2), node)
83 self.rev(p1), self.rev(p2), node)
84 self.index.insert(-1, e)
84 self.index.insert(-1, e)
85 self.nodemap[node] = n
85 self.nodemap[node] = n
86 self.bundlerevs.add(n)
86 self.bundlerevs.add(n)
87 n += 1
87 n += 1
88
88
89 def _chunk(self, rev, df=None):
89 def _chunk(self, rev, df=None):
90 # Warning: in case of bundle, the diff is against what we stored as
90 # Warning: in case of bundle, the diff is against what we stored as
91 # delta base, not against rev - 1
91 # delta base, not against rev - 1
92 # XXX: could use some caching
92 # XXX: could use some caching
93 if rev <= self.repotiprev:
93 if rev <= self.repotiprev:
94 return revlog.revlog._chunk(self, rev)
94 return revlog.revlog._chunk(self, rev)
95 self.bundle.seek(self.start(rev))
95 self.bundle.seek(self.start(rev))
96 return self.bundle.read(self.length(rev))
96 return self.bundle.read(self.length(rev))
97
97
98 def revdiff(self, rev1, rev2):
98 def revdiff(self, rev1, rev2):
99 """return or calculate a delta between two revisions"""
99 """return or calculate a delta between two revisions"""
100 if rev1 > self.repotiprev and rev2 > self.repotiprev:
100 if rev1 > self.repotiprev and rev2 > self.repotiprev:
101 # hot path for bundle
101 # hot path for bundle
102 revb = self.index[rev2][3]
102 revb = self.index[rev2][3]
103 if revb == rev1:
103 if revb == rev1:
104 return self._chunk(rev2)
104 return self._chunk(rev2)
105 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
105 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
106 return revlog.revlog.revdiff(self, rev1, rev2)
106 return revlog.revlog.revdiff(self, rev1, rev2)
107
107
108 return mdiff.textdiff(self.revision(rev1, raw=True),
108 return mdiff.textdiff(self.revision(rev1, raw=True),
109 self.revision(rev2, raw=True))
109 self.revision(rev2, raw=True))
110
110
111 def revision(self, nodeorrev, _df=None, raw=False):
111 def revision(self, nodeorrev, _df=None, raw=False):
112 """return an uncompressed revision of a given node or revision
112 """return an uncompressed revision of a given node or revision
113 number.
113 number.
114 """
114 """
115 if isinstance(nodeorrev, int):
115 if isinstance(nodeorrev, int):
116 rev = nodeorrev
116 rev = nodeorrev
117 node = self.node(rev)
117 node = self.node(rev)
118 else:
118 else:
119 node = nodeorrev
119 node = nodeorrev
120 rev = self.rev(node)
120 rev = self.rev(node)
121
121
122 if node == nullid:
122 if node == nullid:
123 return ""
123 return ""
124
124
125 rawtext = None
125 rawtext = None
126 chain = []
126 chain = []
127 iterrev = rev
127 iterrev = rev
128 # reconstruct the revision if it is from a changegroup
128 # reconstruct the revision if it is from a changegroup
129 while iterrev > self.repotiprev:
129 while iterrev > self.repotiprev:
130 if self._cache and self._cache[1] == iterrev:
130 if self._cache and self._cache[1] == iterrev:
131 rawtext = self._cache[2]
131 rawtext = self._cache[2]
132 break
132 break
133 chain.append(iterrev)
133 chain.append(iterrev)
134 iterrev = self.index[iterrev][3]
134 iterrev = self.index[iterrev][3]
135 if rawtext is None:
135 if rawtext is None:
136 rawtext = self.baserevision(iterrev)
136 rawtext = self.baserevision(iterrev)
137
137
138 while chain:
138 while chain:
139 delta = self._chunk(chain.pop())
139 delta = self._chunk(chain.pop())
140 rawtext = mdiff.patches(rawtext, [delta])
140 rawtext = mdiff.patches(rawtext, [delta])
141
141
142 text, validatehash = self._processflags(rawtext, self.flags(rev),
142 text, validatehash = self._processflags(rawtext, self.flags(rev),
143 'read', raw=raw)
143 'read', raw=raw)
144 if validatehash:
144 if validatehash:
145 self.checkhash(text, node, rev=rev)
145 self.checkhash(text, node, rev=rev)
146 self._cache = (node, rev, rawtext)
146 self._cache = (node, rev, rawtext)
147 return text
147 return text
148
148
149 def baserevision(self, nodeorrev):
149 def baserevision(self, nodeorrev):
150 # Revlog subclasses may override 'revision' method to modify format of
150 # Revlog subclasses may override 'revision' method to modify format of
151 # content retrieved from revlog. To use bundlerevlog with such class one
151 # content retrieved from revlog. To use bundlerevlog with such class one
152 # needs to override 'baserevision' and make more specific call here.
152 # needs to override 'baserevision' and make more specific call here.
153 return revlog.revlog.revision(self, nodeorrev, raw=True)
153 return revlog.revlog.revision(self, nodeorrev, raw=True)
154
154
155 def addrevision(self, *args, **kwargs):
155 def addrevision(self, *args, **kwargs):
156 raise NotImplementedError
156 raise NotImplementedError
157
157
158 def addgroup(self, *args, **kwargs):
158 def addgroup(self, *args, **kwargs):
159 raise NotImplementedError
159 raise NotImplementedError
160
160
161 def strip(self, *args, **kwargs):
161 def strip(self, *args, **kwargs):
162 raise NotImplementedError
162 raise NotImplementedError
163
163
164 def checksize(self):
164 def checksize(self):
165 raise NotImplementedError
165 raise NotImplementedError
166
166
167 class bundlechangelog(bundlerevlog, changelog.changelog):
167 class bundlechangelog(bundlerevlog, changelog.changelog):
168 def __init__(self, opener, cgunpacker):
168 def __init__(self, opener, cgunpacker):
169 changelog.changelog.__init__(self, opener)
169 changelog.changelog.__init__(self, opener)
170 linkmapper = lambda x: x
170 linkmapper = lambda x: x
171 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
171 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
172 linkmapper)
172 linkmapper)
173
173
174 def baserevision(self, nodeorrev):
174 def baserevision(self, nodeorrev):
175 # Although changelog doesn't override 'revision' method, some extensions
175 # Although changelog doesn't override 'revision' method, some extensions
176 # may replace this class with another that does. Same story with
176 # may replace this class with another that does. Same story with
177 # manifest and filelog classes.
177 # manifest and filelog classes.
178
178
179 # This bypasses filtering on changelog.node() and rev() because we need
179 # This bypasses filtering on changelog.node() and rev() because we need
180 # revision text of the bundle base even if it is hidden.
180 # revision text of the bundle base even if it is hidden.
181 oldfilter = self.filteredrevs
181 oldfilter = self.filteredrevs
182 try:
182 try:
183 self.filteredrevs = ()
183 self.filteredrevs = ()
184 return changelog.changelog.revision(self, nodeorrev, raw=True)
184 return changelog.changelog.revision(self, nodeorrev, raw=True)
185 finally:
185 finally:
186 self.filteredrevs = oldfilter
186 self.filteredrevs = oldfilter
187
187
188 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
188 class bundlemanifest(bundlerevlog, manifest.manifestrevlog):
189 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
189 def __init__(self, opener, cgunpacker, linkmapper, dirlogstarts=None,
190 dir=''):
190 dir=''):
191 manifest.manifestrevlog.__init__(self, opener, dir=dir)
191 manifest.manifestrevlog.__init__(self, opener, dir=dir)
192 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
192 bundlerevlog.__init__(self, opener, self.indexfile, cgunpacker,
193 linkmapper)
193 linkmapper)
194 if dirlogstarts is None:
194 if dirlogstarts is None:
195 dirlogstarts = {}
195 dirlogstarts = {}
196 if self.bundle.version == "03":
196 if self.bundle.version == "03":
197 dirlogstarts = _getfilestarts(self.bundle)
197 dirlogstarts = _getfilestarts(self.bundle)
198 self._dirlogstarts = dirlogstarts
198 self._dirlogstarts = dirlogstarts
199 self._linkmapper = linkmapper
199 self._linkmapper = linkmapper
200
200
201 def baserevision(self, nodeorrev):
201 def baserevision(self, nodeorrev):
202 node = nodeorrev
202 node = nodeorrev
203 if isinstance(node, int):
203 if isinstance(node, int):
204 node = self.node(node)
204 node = self.node(node)
205
205
206 if node in self.fulltextcache:
206 if node in self.fulltextcache:
207 result = '%s' % self.fulltextcache[node]
207 result = '%s' % self.fulltextcache[node]
208 else:
208 else:
209 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
209 result = manifest.manifestrevlog.revision(self, nodeorrev, raw=True)
210 return result
210 return result
211
211
212 def dirlog(self, d):
212 def dirlog(self, d):
213 if d in self._dirlogstarts:
213 if d in self._dirlogstarts:
214 self.bundle.seek(self._dirlogstarts[d])
214 self.bundle.seek(self._dirlogstarts[d])
215 return bundlemanifest(
215 return bundlemanifest(
216 self.opener, self.bundle, self._linkmapper,
216 self.opener, self.bundle, self._linkmapper,
217 self._dirlogstarts, dir=d)
217 self._dirlogstarts, dir=d)
218 return super(bundlemanifest, self).dirlog(d)
218 return super(bundlemanifest, self).dirlog(d)
219
219
220 class bundlefilelog(filelog.filelog):
220 class bundlefilelog(filelog.filelog):
221 def __init__(self, opener, path, cgunpacker, linkmapper):
221 def __init__(self, opener, path, cgunpacker, linkmapper):
222 filelog.filelog.__init__(self, opener, path)
222 filelog.filelog.__init__(self, opener, path)
223 self._revlog = bundlerevlog(opener, self.indexfile,
223 self._revlog = bundlerevlog(opener, self.indexfile,
224 cgunpacker, linkmapper)
224 cgunpacker, linkmapper)
225
225
226 def baserevision(self, nodeorrev):
226 def baserevision(self, nodeorrev):
227 return filelog.filelog.revision(self, nodeorrev, raw=True)
227 return filelog.filelog.revision(self, nodeorrev, raw=True)
228
228
229 class bundlepeer(localrepo.localpeer):
229 class bundlepeer(localrepo.localpeer):
230 def canpush(self):
230 def canpush(self):
231 return False
231 return False
232
232
233 class bundlephasecache(phases.phasecache):
233 class bundlephasecache(phases.phasecache):
234 def __init__(self, *args, **kwargs):
234 def __init__(self, *args, **kwargs):
235 super(bundlephasecache, self).__init__(*args, **kwargs)
235 super(bundlephasecache, self).__init__(*args, **kwargs)
236 if util.safehasattr(self, 'opener'):
236 if util.safehasattr(self, 'opener'):
237 self.opener = vfsmod.readonlyvfs(self.opener)
237 self.opener = vfsmod.readonlyvfs(self.opener)
238
238
239 def write(self):
239 def write(self):
240 raise NotImplementedError
240 raise NotImplementedError
241
241
242 def _write(self, fp):
242 def _write(self, fp):
243 raise NotImplementedError
243 raise NotImplementedError
244
244
245 def _updateroots(self, phase, newroots, tr):
245 def _updateroots(self, phase, newroots, tr):
246 self.phaseroots[phase] = newroots
246 self.phaseroots[phase] = newroots
247 self.invalidate()
247 self.invalidate()
248 self.dirty = True
248 self.dirty = True
249
249
250 def _getfilestarts(cgunpacker):
250 def _getfilestarts(cgunpacker):
251 filespos = {}
251 filespos = {}
252 for chunkdata in iter(cgunpacker.filelogheader, {}):
252 for chunkdata in iter(cgunpacker.filelogheader, {}):
253 fname = chunkdata['filename']
253 fname = chunkdata['filename']
254 filespos[fname] = cgunpacker.tell()
254 filespos[fname] = cgunpacker.tell()
255 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
255 for chunk in iter(lambda: cgunpacker.deltachunk(None), {}):
256 pass
256 pass
257 return filespos
257 return filespos
258
258
259 class bundlerepository(localrepo.localrepository):
259 class bundlerepository(localrepo.localrepository):
260 """A repository instance that is a union of a local repo and a bundle.
260 """A repository instance that is a union of a local repo and a bundle.
261
261
262 Instances represent a read-only repository composed of a local repository
262 Instances represent a read-only repository composed of a local repository
263 with the contents of a bundle file applied. The repository instance is
263 with the contents of a bundle file applied. The repository instance is
264 conceptually similar to the state of a repository after an
264 conceptually similar to the state of a repository after an
265 ``hg unbundle`` operation. However, the contents of the bundle are never
265 ``hg unbundle`` operation. However, the contents of the bundle are never
266 applied to the actual base repository.
266 applied to the actual base repository.
267 """
267 """
268 def __init__(self, ui, repopath, bundlepath):
268 def __init__(self, ui, repopath, bundlepath):
269 self._tempparent = None
269 self._tempparent = None
270 try:
270 try:
271 localrepo.localrepository.__init__(self, ui, repopath)
271 localrepo.localrepository.__init__(self, ui, repopath)
272 except error.RepoError:
272 except error.RepoError:
273 self._tempparent = tempfile.mkdtemp()
273 self._tempparent = tempfile.mkdtemp()
274 localrepo.instance(ui, self._tempparent, 1)
274 localrepo.instance(ui, self._tempparent, 1)
275 localrepo.localrepository.__init__(self, ui, self._tempparent)
275 localrepo.localrepository.__init__(self, ui, self._tempparent)
276 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
276 self.ui.setconfig('phases', 'publish', False, 'bundlerepo')
277
277
278 if repopath:
278 if repopath:
279 self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
279 self._url = 'bundle:' + util.expandpath(repopath) + '+' + bundlepath
280 else:
280 else:
281 self._url = 'bundle:' + bundlepath
281 self._url = 'bundle:' + bundlepath
282
282
283 self.tempfile = None
283 self.tempfile = None
284 f = util.posixfile(bundlepath, "rb")
284 f = util.posixfile(bundlepath, "rb")
285 bundle = exchange.readbundle(ui, f, bundlepath)
285 bundle = exchange.readbundle(ui, f, bundlepath)
286
286
287 if isinstance(bundle, bundle2.unbundle20):
287 if isinstance(bundle, bundle2.unbundle20):
288 self._bundlefile = bundle
288 self._bundlefile = bundle
289 self._cgunpacker = None
289 self._cgunpacker = None
290
290
291 cgpart = None
291 cgpart = None
292 for part in bundle.iterparts(seekable=True):
292 for part in bundle.iterparts(seekable=True):
293 if part.type == 'changegroup':
293 if part.type == 'changegroup':
294 if cgpart:
294 if cgpart:
295 raise NotImplementedError("can't process "
295 raise NotImplementedError("can't process "
296 "multiple changegroups")
296 "multiple changegroups")
297 cgpart = part
297 cgpart = part
298
298
299 self._handlebundle2part(bundle, part)
299 self._handlebundle2part(bundle, part)
300
300
301 if not cgpart:
301 if not cgpart:
302 raise error.Abort(_("No changegroups found"))
302 raise error.Abort(_("No changegroups found"))
303
303
304 # This is required to placate a later consumer, which expects
304 # This is required to placate a later consumer, which expects
305 # the payload offset to be at the beginning of the changegroup.
305 # the payload offset to be at the beginning of the changegroup.
306 # We need to do this after the iterparts() generator advances
306 # We need to do this after the iterparts() generator advances
307 # because iterparts() will seek to end of payload after the
307 # because iterparts() will seek to end of payload after the
308 # generator returns control to iterparts().
308 # generator returns control to iterparts().
309 cgpart.seek(0, os.SEEK_SET)
309 cgpart.seek(0, os.SEEK_SET)
310
310
311 elif isinstance(bundle, changegroup.cg1unpacker):
311 elif isinstance(bundle, changegroup.cg1unpacker):
312 if bundle.compressed():
312 if bundle.compressed():
313 f = self._writetempbundle(bundle.read, '.hg10un',
313 f = self._writetempbundle(bundle.read, '.hg10un',
314 header='HG10UN')
314 header='HG10UN')
315 bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
315 bundle = exchange.readbundle(ui, f, bundlepath, self.vfs)
316
316
317 self._bundlefile = bundle
317 self._bundlefile = bundle
318 self._cgunpacker = bundle
318 self._cgunpacker = bundle
319 else:
319 else:
320 raise error.Abort(_('bundle type %s cannot be read') %
320 raise error.Abort(_('bundle type %s cannot be read') %
321 type(bundle))
321 type(bundle))
322
322
323 # dict with the mapping 'filename' -> position in the changegroup.
323 # dict with the mapping 'filename' -> position in the changegroup.
324 self._cgfilespos = {}
324 self._cgfilespos = {}
325
325
326 self.firstnewrev = self.changelog.repotiprev + 1
326 self.firstnewrev = self.changelog.repotiprev + 1
327 phases.retractboundary(self, None, phases.draft,
327 phases.retractboundary(self, None, phases.draft,
328 [ctx.node() for ctx in self[self.firstnewrev:]])
328 [ctx.node() for ctx in self[self.firstnewrev:]])
329
329
330 def _handlebundle2part(self, bundle, part):
330 def _handlebundle2part(self, bundle, part):
331 if part.type != 'changegroup':
331 if part.type != 'changegroup':
332 return
332 return
333
333
334 cgstream = part
334 cgstream = part
335 version = part.params.get('version', '01')
335 version = part.params.get('version', '01')
336 legalcgvers = changegroup.supportedincomingversions(self)
336 legalcgvers = changegroup.supportedincomingversions(self)
337 if version not in legalcgvers:
337 if version not in legalcgvers:
338 msg = _('Unsupported changegroup version: %s')
338 msg = _('Unsupported changegroup version: %s')
339 raise error.Abort(msg % version)
339 raise error.Abort(msg % version)
340 if bundle.compressed():
340 if bundle.compressed():
341 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
341 cgstream = self._writetempbundle(part.read, '.cg%sun' % version)
342
342
343 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
343 self._cgunpacker = changegroup.getunbundler(version, cgstream, 'UN')
344
344
345 def _writetempbundle(self, readfn, suffix, header=''):
345 def _writetempbundle(self, readfn, suffix, header=''):
346 """Write a temporary file to disk
346 """Write a temporary file to disk
347 """
347 """
348 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
348 fdtemp, temp = self.vfs.mkstemp(prefix="hg-bundle-",
349 suffix=suffix)
349 suffix=suffix)
350 self.tempfile = temp
350 self.tempfile = temp
351
351
352 with os.fdopen(fdtemp, r'wb') as fptemp:
352 with os.fdopen(fdtemp, r'wb') as fptemp:
353 fptemp.write(header)
353 fptemp.write(header)
354 while True:
354 while True:
355 chunk = readfn(2**18)
355 chunk = readfn(2**18)
356 if not chunk:
356 if not chunk:
357 break
357 break
358 fptemp.write(chunk)
358 fptemp.write(chunk)
359
359
360 return self.vfs.open(self.tempfile, mode="rb")
360 return self.vfs.open(self.tempfile, mode="rb")
361
361
362 @localrepo.unfilteredpropertycache
362 @localrepo.unfilteredpropertycache
363 def _phasecache(self):
363 def _phasecache(self):
364 return bundlephasecache(self, self._phasedefaults)
364 return bundlephasecache(self, self._phasedefaults)
365
365
366 @localrepo.unfilteredpropertycache
366 @localrepo.unfilteredpropertycache
367 def changelog(self):
367 def changelog(self):
368 # consume the header if it exists
368 # consume the header if it exists
369 self._cgunpacker.changelogheader()
369 self._cgunpacker.changelogheader()
370 c = bundlechangelog(self.svfs, self._cgunpacker)
370 c = bundlechangelog(self.svfs, self._cgunpacker)
371 self.manstart = self._cgunpacker.tell()
371 self.manstart = self._cgunpacker.tell()
372 return c
372 return c
373
373
374 def _constructmanifest(self):
374 def _constructmanifest(self):
375 self._cgunpacker.seek(self.manstart)
375 self._cgunpacker.seek(self.manstart)
376 # consume the header if it exists
376 # consume the header if it exists
377 self._cgunpacker.manifestheader()
377 self._cgunpacker.manifestheader()
378 linkmapper = self.unfiltered().changelog.rev
378 linkmapper = self.unfiltered().changelog.rev
379 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
379 m = bundlemanifest(self.svfs, self._cgunpacker, linkmapper)
380 self.filestart = self._cgunpacker.tell()
380 self.filestart = self._cgunpacker.tell()
381 return m
381 return m
382
382
383 def _consumemanifest(self):
383 def _consumemanifest(self):
384 """Consumes the manifest portion of the bundle, setting filestart so the
384 """Consumes the manifest portion of the bundle, setting filestart so the
385 file portion can be read."""
385 file portion can be read."""
386 self._cgunpacker.seek(self.manstart)
386 self._cgunpacker.seek(self.manstart)
387 self._cgunpacker.manifestheader()
387 self._cgunpacker.manifestheader()
388 for delta in self._cgunpacker.deltaiter():
388 for delta in self._cgunpacker.deltaiter():
389 pass
389 pass
390 self.filestart = self._cgunpacker.tell()
390 self.filestart = self._cgunpacker.tell()
391
391
392 @localrepo.unfilteredpropertycache
392 @localrepo.unfilteredpropertycache
393 def manstart(self):
393 def manstart(self):
394 self.changelog
394 self.changelog
395 return self.manstart
395 return self.manstart
396
396
397 @localrepo.unfilteredpropertycache
397 @localrepo.unfilteredpropertycache
398 def filestart(self):
398 def filestart(self):
399 self.manifestlog
399 self.manifestlog
400
400
401 # If filestart was not set by self.manifestlog, that means the
401 # If filestart was not set by self.manifestlog, that means the
402 # manifestlog implementation did not consume the manifests from the
402 # manifestlog implementation did not consume the manifests from the
403 # changegroup (ex: it might be consuming trees from a separate bundle2
403 # changegroup (ex: it might be consuming trees from a separate bundle2
404 # part instead). So we need to manually consume it.
404 # part instead). So we need to manually consume it.
405 if r'filestart' not in self.__dict__:
405 if r'filestart' not in self.__dict__:
406 self._consumemanifest()
406 self._consumemanifest()
407
407
408 return self.filestart
408 return self.filestart
409
409
410 def url(self):
410 def url(self):
411 return self._url
411 return self._url
412
412
413 def file(self, f):
413 def file(self, f):
414 if not self._cgfilespos:
414 if not self._cgfilespos:
415 self._cgunpacker.seek(self.filestart)
415 self._cgunpacker.seek(self.filestart)
416 self._cgfilespos = _getfilestarts(self._cgunpacker)
416 self._cgfilespos = _getfilestarts(self._cgunpacker)
417
417
418 if f in self._cgfilespos:
418 if f in self._cgfilespos:
419 self._cgunpacker.seek(self._cgfilespos[f])
419 self._cgunpacker.seek(self._cgfilespos[f])
420 linkmapper = self.unfiltered().changelog.rev
420 linkmapper = self.unfiltered().changelog.rev
421 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
421 return bundlefilelog(self.svfs, f, self._cgunpacker, linkmapper)
422 else:
422 else:
423 return super(bundlerepository, self).file(f)
423 return super(bundlerepository, self).file(f)
424
424
425 def close(self):
425 def close(self):
426 """Close assigned bundle file immediately."""
426 """Close assigned bundle file immediately."""
427 self._bundlefile.close()
427 self._bundlefile.close()
428 if self.tempfile is not None:
428 if self.tempfile is not None:
429 self.vfs.unlink(self.tempfile)
429 self.vfs.unlink(self.tempfile)
430 if self._tempparent:
430 if self._tempparent:
431 shutil.rmtree(self._tempparent, True)
431 shutil.rmtree(self._tempparent, True)
432
432
433 def cancopy(self):
433 def cancopy(self):
434 return False
434 return False
435
435
436 def peer(self):
436 def peer(self):
437 return bundlepeer(self)
437 return bundlepeer(self)
438
438
439 def getcwd(self):
439 def getcwd(self):
440 return pycompat.getcwd() # always outside the repo
440 return pycompat.getcwd() # always outside the repo
441
441
442 # Check if parents exist in localrepo before setting
442 # Check if parents exist in localrepo before setting
443 def setparents(self, p1, p2=nullid):
443 def setparents(self, p1, p2=nullid):
444 p1rev = self.changelog.rev(p1)
444 p1rev = self.changelog.rev(p1)
445 p2rev = self.changelog.rev(p2)
445 p2rev = self.changelog.rev(p2)
446 msg = _("setting parent to node %s that only exists in the bundle\n")
446 msg = _("setting parent to node %s that only exists in the bundle\n")
447 if self.changelog.repotiprev < p1rev:
447 if self.changelog.repotiprev < p1rev:
448 self.ui.warn(msg % nodemod.hex(p1))
448 self.ui.warn(msg % nodemod.hex(p1))
449 if self.changelog.repotiprev < p2rev:
449 if self.changelog.repotiprev < p2rev:
450 self.ui.warn(msg % nodemod.hex(p2))
450 self.ui.warn(msg % nodemod.hex(p2))
451 return super(bundlerepository, self).setparents(p1, p2)
451 return super(bundlerepository, self).setparents(p1, p2)
452
452
453 def instance(ui, path, create):
453 def instance(ui, path, create, intents=None):
454 if create:
454 if create:
455 raise error.Abort(_('cannot create new bundle repository'))
455 raise error.Abort(_('cannot create new bundle repository'))
456 # internal config: bundle.mainreporoot
456 # internal config: bundle.mainreporoot
457 parentpath = ui.config("bundle", "mainreporoot")
457 parentpath = ui.config("bundle", "mainreporoot")
458 if not parentpath:
458 if not parentpath:
459 # try to find the correct path to the working directory repo
459 # try to find the correct path to the working directory repo
460 parentpath = cmdutil.findrepo(pycompat.getcwd())
460 parentpath = cmdutil.findrepo(pycompat.getcwd())
461 if parentpath is None:
461 if parentpath is None:
462 parentpath = ''
462 parentpath = ''
463 if parentpath:
463 if parentpath:
464 # Try to make the full path relative so we get a nice, short URL.
464 # Try to make the full path relative so we get a nice, short URL.
465 # In particular, we don't want temp dir names in test outputs.
465 # In particular, we don't want temp dir names in test outputs.
466 cwd = pycompat.getcwd()
466 cwd = pycompat.getcwd()
467 if parentpath == cwd:
467 if parentpath == cwd:
468 parentpath = ''
468 parentpath = ''
469 else:
469 else:
470 cwd = pathutil.normasprefix(cwd)
470 cwd = pathutil.normasprefix(cwd)
471 if parentpath.startswith(cwd):
471 if parentpath.startswith(cwd):
472 parentpath = parentpath[len(cwd):]
472 parentpath = parentpath[len(cwd):]
473 u = util.url(path)
473 u = util.url(path)
474 path = u.localpath()
474 path = u.localpath()
475 if u.scheme == 'bundle':
475 if u.scheme == 'bundle':
476 s = path.split("+", 1)
476 s = path.split("+", 1)
477 if len(s) == 1:
477 if len(s) == 1:
478 repopath, bundlename = parentpath, s[0]
478 repopath, bundlename = parentpath, s[0]
479 else:
479 else:
480 repopath, bundlename = s
480 repopath, bundlename = s
481 else:
481 else:
482 repopath, bundlename = parentpath, path
482 repopath, bundlename = parentpath, path
483 return bundlerepository(ui, repopath, bundlename)
483 return bundlerepository(ui, repopath, bundlename)
484
484
485 class bundletransactionmanager(object):
485 class bundletransactionmanager(object):
486 def transaction(self):
486 def transaction(self):
487 return None
487 return None
488
488
489 def close(self):
489 def close(self):
490 raise NotImplementedError
490 raise NotImplementedError
491
491
492 def release(self):
492 def release(self):
493 raise NotImplementedError
493 raise NotImplementedError
494
494
495 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
495 def getremotechanges(ui, repo, peer, onlyheads=None, bundlename=None,
496 force=False):
496 force=False):
497 '''obtains a bundle of changes incoming from peer
497 '''obtains a bundle of changes incoming from peer
498
498
499 "onlyheads" restricts the returned changes to those reachable from the
499 "onlyheads" restricts the returned changes to those reachable from the
500 specified heads.
500 specified heads.
501 "bundlename", if given, stores the bundle to this file path permanently;
501 "bundlename", if given, stores the bundle to this file path permanently;
502 otherwise it's stored to a temp file and gets deleted again when you call
502 otherwise it's stored to a temp file and gets deleted again when you call
503 the returned "cleanupfn".
503 the returned "cleanupfn".
504 "force" indicates whether to proceed on unrelated repos.
504 "force" indicates whether to proceed on unrelated repos.
505
505
506 Returns a tuple (local, csets, cleanupfn):
506 Returns a tuple (local, csets, cleanupfn):
507
507
508 "local" is a local repo from which to obtain the actual incoming
508 "local" is a local repo from which to obtain the actual incoming
509 changesets; it is a bundlerepo for the obtained bundle when the
509 changesets; it is a bundlerepo for the obtained bundle when the
510 original "peer" is remote.
510 original "peer" is remote.
511 "csets" lists the incoming changeset node ids.
511 "csets" lists the incoming changeset node ids.
512 "cleanupfn" must be called without arguments when you're done processing
512 "cleanupfn" must be called without arguments when you're done processing
513 the changes; it closes both the original "peer" and the one returned
513 the changes; it closes both the original "peer" and the one returned
514 here.
514 here.
515 '''
515 '''
516 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
516 tmp = discovery.findcommonincoming(repo, peer, heads=onlyheads,
517 force=force)
517 force=force)
518 common, incoming, rheads = tmp
518 common, incoming, rheads = tmp
519 if not incoming:
519 if not incoming:
520 try:
520 try:
521 if bundlename:
521 if bundlename:
522 os.unlink(bundlename)
522 os.unlink(bundlename)
523 except OSError:
523 except OSError:
524 pass
524 pass
525 return repo, [], peer.close
525 return repo, [], peer.close
526
526
527 commonset = set(common)
527 commonset = set(common)
528 rheads = [x for x in rheads if x not in commonset]
528 rheads = [x for x in rheads if x not in commonset]
529
529
530 bundle = None
530 bundle = None
531 bundlerepo = None
531 bundlerepo = None
532 localrepo = peer.local()
532 localrepo = peer.local()
533 if bundlename or not localrepo:
533 if bundlename or not localrepo:
534 # create a bundle (uncompressed if peer repo is not local)
534 # create a bundle (uncompressed if peer repo is not local)
535
535
536 # developer config: devel.legacy.exchange
536 # developer config: devel.legacy.exchange
537 legexc = ui.configlist('devel', 'legacy.exchange')
537 legexc = ui.configlist('devel', 'legacy.exchange')
538 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
538 forcebundle1 = 'bundle2' not in legexc and 'bundle1' in legexc
539 canbundle2 = (not forcebundle1
539 canbundle2 = (not forcebundle1
540 and peer.capable('getbundle')
540 and peer.capable('getbundle')
541 and peer.capable('bundle2'))
541 and peer.capable('bundle2'))
542 if canbundle2:
542 if canbundle2:
543 with peer.commandexecutor() as e:
543 with peer.commandexecutor() as e:
544 b2 = e.callcommand('getbundle', {
544 b2 = e.callcommand('getbundle', {
545 'source': 'incoming',
545 'source': 'incoming',
546 'common': common,
546 'common': common,
547 'heads': rheads,
547 'heads': rheads,
548 'bundlecaps': exchange.caps20to10(repo, role='client'),
548 'bundlecaps': exchange.caps20to10(repo, role='client'),
549 'cg': True,
549 'cg': True,
550 }).result()
550 }).result()
551
551
552 fname = bundle = changegroup.writechunks(ui,
552 fname = bundle = changegroup.writechunks(ui,
553 b2._forwardchunks(),
553 b2._forwardchunks(),
554 bundlename)
554 bundlename)
555 else:
555 else:
556 if peer.capable('getbundle'):
556 if peer.capable('getbundle'):
557 with peer.commandexecutor() as e:
557 with peer.commandexecutor() as e:
558 cg = e.callcommand('getbundle', {
558 cg = e.callcommand('getbundle', {
559 'source': 'incoming',
559 'source': 'incoming',
560 'common': common,
560 'common': common,
561 'heads': rheads,
561 'heads': rheads,
562 }).result()
562 }).result()
563 elif onlyheads is None and not peer.capable('changegroupsubset'):
563 elif onlyheads is None and not peer.capable('changegroupsubset'):
564 # compat with older servers when pulling all remote heads
564 # compat with older servers when pulling all remote heads
565
565
566 with peer.commandexecutor() as e:
566 with peer.commandexecutor() as e:
567 cg = e.callcommand('changegroup', {
567 cg = e.callcommand('changegroup', {
568 'nodes': incoming,
568 'nodes': incoming,
569 'source': 'incoming',
569 'source': 'incoming',
570 }).result()
570 }).result()
571
571
572 rheads = None
572 rheads = None
573 else:
573 else:
574 with peer.commandexecutor() as e:
574 with peer.commandexecutor() as e:
575 cg = e.callcommand('changegroupsubset', {
575 cg = e.callcommand('changegroupsubset', {
576 'bases': incoming,
576 'bases': incoming,
577 'heads': rheads,
577 'heads': rheads,
578 'source': 'incoming',
578 'source': 'incoming',
579 }).result()
579 }).result()
580
580
581 if localrepo:
581 if localrepo:
582 bundletype = "HG10BZ"
582 bundletype = "HG10BZ"
583 else:
583 else:
584 bundletype = "HG10UN"
584 bundletype = "HG10UN"
585 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
585 fname = bundle = bundle2.writebundle(ui, cg, bundlename,
586 bundletype)
586 bundletype)
587 # keep written bundle?
587 # keep written bundle?
588 if bundlename:
588 if bundlename:
589 bundle = None
589 bundle = None
590 if not localrepo:
590 if not localrepo:
591 # use the created uncompressed bundlerepo
591 # use the created uncompressed bundlerepo
592 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
592 localrepo = bundlerepo = bundlerepository(repo.baseui, repo.root,
593 fname)
593 fname)
594 # this repo contains local and peer now, so filter out local again
594 # this repo contains local and peer now, so filter out local again
595 common = repo.heads()
595 common = repo.heads()
596 if localrepo:
596 if localrepo:
597 # Part of common may be remotely filtered
597 # Part of common may be remotely filtered
598 # So use an unfiltered version
598 # So use an unfiltered version
599 # The discovery process probably need cleanup to avoid that
599 # The discovery process probably need cleanup to avoid that
600 localrepo = localrepo.unfiltered()
600 localrepo = localrepo.unfiltered()
601
601
602 csets = localrepo.changelog.findmissing(common, rheads)
602 csets = localrepo.changelog.findmissing(common, rheads)
603
603
604 if bundlerepo:
604 if bundlerepo:
605 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
605 reponodes = [ctx.node() for ctx in bundlerepo[bundlerepo.firstnewrev:]]
606
606
607 with peer.commandexecutor() as e:
607 with peer.commandexecutor() as e:
608 remotephases = e.callcommand('listkeys', {
608 remotephases = e.callcommand('listkeys', {
609 'namespace': 'phases',
609 'namespace': 'phases',
610 }).result()
610 }).result()
611
611
612 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
612 pullop = exchange.pulloperation(bundlerepo, peer, heads=reponodes)
613 pullop.trmanager = bundletransactionmanager()
613 pullop.trmanager = bundletransactionmanager()
614 exchange._pullapplyphases(pullop, remotephases)
614 exchange._pullapplyphases(pullop, remotephases)
615
615
616 def cleanup():
616 def cleanup():
617 if bundlerepo:
617 if bundlerepo:
618 bundlerepo.close()
618 bundlerepo.close()
619 if bundle:
619 if bundle:
620 os.unlink(bundle)
620 os.unlink(bundle)
621 peer.close()
621 peer.close()
622
622
623 return (localrepo, csets, cleanup)
623 return (localrepo, csets, cleanup)
@@ -1,1052 +1,1053 b''
1 # dispatch.py - command dispatching for mercurial
1 # dispatch.py - command dispatching for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import difflib
10 import difflib
11 import errno
11 import errno
12 import getopt
12 import getopt
13 import os
13 import os
14 import pdb
14 import pdb
15 import re
15 import re
16 import signal
16 import signal
17 import sys
17 import sys
18 import time
18 import time
19 import traceback
19 import traceback
20
20
21
21
22 from .i18n import _
22 from .i18n import _
23
23
24 from . import (
24 from . import (
25 cmdutil,
25 cmdutil,
26 color,
26 color,
27 commands,
27 commands,
28 demandimport,
28 demandimport,
29 encoding,
29 encoding,
30 error,
30 error,
31 extensions,
31 extensions,
32 fancyopts,
32 fancyopts,
33 help,
33 help,
34 hg,
34 hg,
35 hook,
35 hook,
36 profiling,
36 profiling,
37 pycompat,
37 pycompat,
38 scmutil,
38 scmutil,
39 ui as uimod,
39 ui as uimod,
40 util,
40 util,
41 )
41 )
42
42
43 from .utils import (
43 from .utils import (
44 procutil,
44 procutil,
45 stringutil,
45 stringutil,
46 )
46 )
47
47
48 class request(object):
48 class request(object):
49 def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
49 def __init__(self, args, ui=None, repo=None, fin=None, fout=None,
50 ferr=None, prereposetups=None):
50 ferr=None, prereposetups=None):
51 self.args = args
51 self.args = args
52 self.ui = ui
52 self.ui = ui
53 self.repo = repo
53 self.repo = repo
54
54
55 # input/output/error streams
55 # input/output/error streams
56 self.fin = fin
56 self.fin = fin
57 self.fout = fout
57 self.fout = fout
58 self.ferr = ferr
58 self.ferr = ferr
59
59
60 # remember options pre-parsed by _earlyparseopts()
60 # remember options pre-parsed by _earlyparseopts()
61 self.earlyoptions = {}
61 self.earlyoptions = {}
62
62
63 # reposetups which run before extensions, useful for chg to pre-fill
63 # reposetups which run before extensions, useful for chg to pre-fill
64 # low-level repo state (for example, changelog) before extensions.
64 # low-level repo state (for example, changelog) before extensions.
65 self.prereposetups = prereposetups or []
65 self.prereposetups = prereposetups or []
66
66
67 def _runexithandlers(self):
67 def _runexithandlers(self):
68 exc = None
68 exc = None
69 handlers = self.ui._exithandlers
69 handlers = self.ui._exithandlers
70 try:
70 try:
71 while handlers:
71 while handlers:
72 func, args, kwargs = handlers.pop()
72 func, args, kwargs = handlers.pop()
73 try:
73 try:
74 func(*args, **kwargs)
74 func(*args, **kwargs)
75 except: # re-raises below
75 except: # re-raises below
76 if exc is None:
76 if exc is None:
77 exc = sys.exc_info()[1]
77 exc = sys.exc_info()[1]
78 self.ui.warn(('error in exit handlers:\n'))
78 self.ui.warn(('error in exit handlers:\n'))
79 self.ui.traceback(force=True)
79 self.ui.traceback(force=True)
80 finally:
80 finally:
81 if exc is not None:
81 if exc is not None:
82 raise exc
82 raise exc
83
83
84 def run():
84 def run():
85 "run the command in sys.argv"
85 "run the command in sys.argv"
86 _initstdio()
86 _initstdio()
87 req = request(pycompat.sysargv[1:])
87 req = request(pycompat.sysargv[1:])
88 err = None
88 err = None
89 try:
89 try:
90 status = (dispatch(req) or 0)
90 status = (dispatch(req) or 0)
91 except error.StdioError as e:
91 except error.StdioError as e:
92 err = e
92 err = e
93 status = -1
93 status = -1
94 if util.safehasattr(req.ui, 'fout'):
94 if util.safehasattr(req.ui, 'fout'):
95 try:
95 try:
96 req.ui.fout.flush()
96 req.ui.fout.flush()
97 except IOError as e:
97 except IOError as e:
98 err = e
98 err = e
99 status = -1
99 status = -1
100 if util.safehasattr(req.ui, 'ferr'):
100 if util.safehasattr(req.ui, 'ferr'):
101 try:
101 try:
102 if err is not None and err.errno != errno.EPIPE:
102 if err is not None and err.errno != errno.EPIPE:
103 req.ui.ferr.write('abort: %s\n' %
103 req.ui.ferr.write('abort: %s\n' %
104 encoding.strtolocal(err.strerror))
104 encoding.strtolocal(err.strerror))
105 req.ui.ferr.flush()
105 req.ui.ferr.flush()
106 # There's not much we can do about an I/O error here. So (possibly)
106 # There's not much we can do about an I/O error here. So (possibly)
107 # change the status code and move on.
107 # change the status code and move on.
108 except IOError:
108 except IOError:
109 status = -1
109 status = -1
110
110
111 _silencestdio()
111 _silencestdio()
112 sys.exit(status & 255)
112 sys.exit(status & 255)
113
113
114 if pycompat.ispy3:
114 if pycompat.ispy3:
115 def _initstdio():
115 def _initstdio():
116 pass
116 pass
117
117
118 def _silencestdio():
118 def _silencestdio():
119 for fp in (sys.stdout, sys.stderr):
119 for fp in (sys.stdout, sys.stderr):
120 # Check if the file is okay
120 # Check if the file is okay
121 try:
121 try:
122 fp.flush()
122 fp.flush()
123 continue
123 continue
124 except IOError:
124 except IOError:
125 pass
125 pass
126 # Otherwise mark it as closed to silence "Exception ignored in"
126 # Otherwise mark it as closed to silence "Exception ignored in"
127 # message emitted by the interpreter finalizer. Be careful to
127 # message emitted by the interpreter finalizer. Be careful to
128 # not close procutil.stdout, which may be a fdopen-ed file object
128 # not close procutil.stdout, which may be a fdopen-ed file object
129 # and its close() actually closes the underlying file descriptor.
129 # and its close() actually closes the underlying file descriptor.
130 try:
130 try:
131 fp.close()
131 fp.close()
132 except IOError:
132 except IOError:
133 pass
133 pass
134 else:
134 else:
135 def _initstdio():
135 def _initstdio():
136 for fp in (sys.stdin, sys.stdout, sys.stderr):
136 for fp in (sys.stdin, sys.stdout, sys.stderr):
137 procutil.setbinary(fp)
137 procutil.setbinary(fp)
138
138
139 def _silencestdio():
139 def _silencestdio():
140 pass
140 pass
141
141
142 def _getsimilar(symbols, value):
142 def _getsimilar(symbols, value):
143 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
143 sim = lambda x: difflib.SequenceMatcher(None, value, x).ratio()
144 # The cutoff for similarity here is pretty arbitrary. It should
144 # The cutoff for similarity here is pretty arbitrary. It should
145 # probably be investigated and tweaked.
145 # probably be investigated and tweaked.
146 return [s for s in symbols if sim(s) > 0.6]
146 return [s for s in symbols if sim(s) > 0.6]
147
147
148 def _reportsimilar(write, similar):
148 def _reportsimilar(write, similar):
149 if len(similar) == 1:
149 if len(similar) == 1:
150 write(_("(did you mean %s?)\n") % similar[0])
150 write(_("(did you mean %s?)\n") % similar[0])
151 elif similar:
151 elif similar:
152 ss = ", ".join(sorted(similar))
152 ss = ", ".join(sorted(similar))
153 write(_("(did you mean one of %s?)\n") % ss)
153 write(_("(did you mean one of %s?)\n") % ss)
154
154
155 def _formatparse(write, inst):
155 def _formatparse(write, inst):
156 similar = []
156 similar = []
157 if isinstance(inst, error.UnknownIdentifier):
157 if isinstance(inst, error.UnknownIdentifier):
158 # make sure to check fileset first, as revset can invoke fileset
158 # make sure to check fileset first, as revset can invoke fileset
159 similar = _getsimilar(inst.symbols, inst.function)
159 similar = _getsimilar(inst.symbols, inst.function)
160 if len(inst.args) > 1:
160 if len(inst.args) > 1:
161 write(_("hg: parse error at %s: %s\n") %
161 write(_("hg: parse error at %s: %s\n") %
162 (pycompat.bytestr(inst.args[1]), inst.args[0]))
162 (pycompat.bytestr(inst.args[1]), inst.args[0]))
163 if inst.args[0].startswith(' '):
163 if inst.args[0].startswith(' '):
164 write(_("unexpected leading whitespace\n"))
164 write(_("unexpected leading whitespace\n"))
165 else:
165 else:
166 write(_("hg: parse error: %s\n") % inst.args[0])
166 write(_("hg: parse error: %s\n") % inst.args[0])
167 _reportsimilar(write, similar)
167 _reportsimilar(write, similar)
168 if inst.hint:
168 if inst.hint:
169 write(_("(%s)\n") % inst.hint)
169 write(_("(%s)\n") % inst.hint)
170
170
171 def _formatargs(args):
171 def _formatargs(args):
172 return ' '.join(procutil.shellquote(a) for a in args)
172 return ' '.join(procutil.shellquote(a) for a in args)
173
173
174 def dispatch(req):
174 def dispatch(req):
175 "run the command specified in req.args"
175 "run the command specified in req.args"
176 if req.ferr:
176 if req.ferr:
177 ferr = req.ferr
177 ferr = req.ferr
178 elif req.ui:
178 elif req.ui:
179 ferr = req.ui.ferr
179 ferr = req.ui.ferr
180 else:
180 else:
181 ferr = procutil.stderr
181 ferr = procutil.stderr
182
182
183 try:
183 try:
184 if not req.ui:
184 if not req.ui:
185 req.ui = uimod.ui.load()
185 req.ui = uimod.ui.load()
186 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
186 req.earlyoptions.update(_earlyparseopts(req.ui, req.args))
187 if req.earlyoptions['traceback']:
187 if req.earlyoptions['traceback']:
188 req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
188 req.ui.setconfig('ui', 'traceback', 'on', '--traceback')
189
189
190 # set ui streams from the request
190 # set ui streams from the request
191 if req.fin:
191 if req.fin:
192 req.ui.fin = req.fin
192 req.ui.fin = req.fin
193 if req.fout:
193 if req.fout:
194 req.ui.fout = req.fout
194 req.ui.fout = req.fout
195 if req.ferr:
195 if req.ferr:
196 req.ui.ferr = req.ferr
196 req.ui.ferr = req.ferr
197 except error.Abort as inst:
197 except error.Abort as inst:
198 ferr.write(_("abort: %s\n") % inst)
198 ferr.write(_("abort: %s\n") % inst)
199 if inst.hint:
199 if inst.hint:
200 ferr.write(_("(%s)\n") % inst.hint)
200 ferr.write(_("(%s)\n") % inst.hint)
201 return -1
201 return -1
202 except error.ParseError as inst:
202 except error.ParseError as inst:
203 _formatparse(ferr.write, inst)
203 _formatparse(ferr.write, inst)
204 return -1
204 return -1
205
205
206 msg = _formatargs(req.args)
206 msg = _formatargs(req.args)
207 starttime = util.timer()
207 starttime = util.timer()
208 ret = None
208 ret = None
209 try:
209 try:
210 ret = _runcatch(req)
210 ret = _runcatch(req)
211 except error.ProgrammingError as inst:
211 except error.ProgrammingError as inst:
212 req.ui.warn(_('** ProgrammingError: %s\n') % inst)
212 req.ui.warn(_('** ProgrammingError: %s\n') % inst)
213 if inst.hint:
213 if inst.hint:
214 req.ui.warn(_('** (%s)\n') % inst.hint)
214 req.ui.warn(_('** (%s)\n') % inst.hint)
215 raise
215 raise
216 except KeyboardInterrupt as inst:
216 except KeyboardInterrupt as inst:
217 try:
217 try:
218 if isinstance(inst, error.SignalInterrupt):
218 if isinstance(inst, error.SignalInterrupt):
219 msg = _("killed!\n")
219 msg = _("killed!\n")
220 else:
220 else:
221 msg = _("interrupted!\n")
221 msg = _("interrupted!\n")
222 req.ui.warn(msg)
222 req.ui.warn(msg)
223 except error.SignalInterrupt:
223 except error.SignalInterrupt:
224 # maybe pager would quit without consuming all the output, and
224 # maybe pager would quit without consuming all the output, and
225 # SIGPIPE was raised. we cannot print anything in this case.
225 # SIGPIPE was raised. we cannot print anything in this case.
226 pass
226 pass
227 except IOError as inst:
227 except IOError as inst:
228 if inst.errno != errno.EPIPE:
228 if inst.errno != errno.EPIPE:
229 raise
229 raise
230 ret = -1
230 ret = -1
231 finally:
231 finally:
232 duration = util.timer() - starttime
232 duration = util.timer() - starttime
233 req.ui.flush()
233 req.ui.flush()
234 if req.ui.logblockedtimes:
234 if req.ui.logblockedtimes:
235 req.ui._blockedtimes['command_duration'] = duration * 1000
235 req.ui._blockedtimes['command_duration'] = duration * 1000
236 req.ui.log('uiblocked', 'ui blocked ms',
236 req.ui.log('uiblocked', 'ui blocked ms',
237 **pycompat.strkwargs(req.ui._blockedtimes))
237 **pycompat.strkwargs(req.ui._blockedtimes))
238 req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
238 req.ui.log("commandfinish", "%s exited %d after %0.2f seconds\n",
239 msg, ret or 0, duration)
239 msg, ret or 0, duration)
240 try:
240 try:
241 req._runexithandlers()
241 req._runexithandlers()
242 except: # exiting, so no re-raises
242 except: # exiting, so no re-raises
243 ret = ret or -1
243 ret = ret or -1
244 return ret
244 return ret
245
245
246 def _runcatch(req):
246 def _runcatch(req):
247 def catchterm(*args):
247 def catchterm(*args):
248 raise error.SignalInterrupt
248 raise error.SignalInterrupt
249
249
250 ui = req.ui
250 ui = req.ui
251 try:
251 try:
252 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
252 for name in 'SIGBREAK', 'SIGHUP', 'SIGTERM':
253 num = getattr(signal, name, None)
253 num = getattr(signal, name, None)
254 if num:
254 if num:
255 signal.signal(num, catchterm)
255 signal.signal(num, catchterm)
256 except ValueError:
256 except ValueError:
257 pass # happens if called in a thread
257 pass # happens if called in a thread
258
258
259 def _runcatchfunc():
259 def _runcatchfunc():
260 realcmd = None
260 realcmd = None
261 try:
261 try:
262 cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
262 cmdargs = fancyopts.fancyopts(req.args[:], commands.globalopts, {})
263 cmd = cmdargs[0]
263 cmd = cmdargs[0]
264 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
264 aliases, entry = cmdutil.findcmd(cmd, commands.table, False)
265 realcmd = aliases[0]
265 realcmd = aliases[0]
266 except (error.UnknownCommand, error.AmbiguousCommand,
266 except (error.UnknownCommand, error.AmbiguousCommand,
267 IndexError, getopt.GetoptError):
267 IndexError, getopt.GetoptError):
268 # Don't handle this here. We know the command is
268 # Don't handle this here. We know the command is
269 # invalid, but all we're worried about for now is that
269 # invalid, but all we're worried about for now is that
270 # it's not a command that server operators expect to
270 # it's not a command that server operators expect to
271 # be safe to offer to users in a sandbox.
271 # be safe to offer to users in a sandbox.
272 pass
272 pass
273 if realcmd == 'serve' and '--stdio' in cmdargs:
273 if realcmd == 'serve' and '--stdio' in cmdargs:
274 # We want to constrain 'hg serve --stdio' instances pretty
274 # We want to constrain 'hg serve --stdio' instances pretty
275 # closely, as many shared-ssh access tools want to grant
275 # closely, as many shared-ssh access tools want to grant
276 # access to run *only* 'hg -R $repo serve --stdio'. We
276 # access to run *only* 'hg -R $repo serve --stdio'. We
277 # restrict to exactly that set of arguments, and prohibit
277 # restrict to exactly that set of arguments, and prohibit
278 # any repo name that starts with '--' to prevent
278 # any repo name that starts with '--' to prevent
279 # shenanigans wherein a user does something like pass
279 # shenanigans wherein a user does something like pass
280 # --debugger or --config=ui.debugger=1 as a repo
280 # --debugger or --config=ui.debugger=1 as a repo
281 # name. This used to actually run the debugger.
281 # name. This used to actually run the debugger.
282 if (len(req.args) != 4 or
282 if (len(req.args) != 4 or
283 req.args[0] != '-R' or
283 req.args[0] != '-R' or
284 req.args[1].startswith('--') or
284 req.args[1].startswith('--') or
285 req.args[2] != 'serve' or
285 req.args[2] != 'serve' or
286 req.args[3] != '--stdio'):
286 req.args[3] != '--stdio'):
287 raise error.Abort(
287 raise error.Abort(
288 _('potentially unsafe serve --stdio invocation: %r') %
288 _('potentially unsafe serve --stdio invocation: %r') %
289 (req.args,))
289 (req.args,))
290
290
291 try:
291 try:
292 debugger = 'pdb'
292 debugger = 'pdb'
293 debugtrace = {
293 debugtrace = {
294 'pdb': pdb.set_trace
294 'pdb': pdb.set_trace
295 }
295 }
296 debugmortem = {
296 debugmortem = {
297 'pdb': pdb.post_mortem
297 'pdb': pdb.post_mortem
298 }
298 }
299
299
300 # read --config before doing anything else
300 # read --config before doing anything else
301 # (e.g. to change trust settings for reading .hg/hgrc)
301 # (e.g. to change trust settings for reading .hg/hgrc)
302 cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
302 cfgs = _parseconfig(req.ui, req.earlyoptions['config'])
303
303
304 if req.repo:
304 if req.repo:
305 # copy configs that were passed on the cmdline (--config) to
305 # copy configs that were passed on the cmdline (--config) to
306 # the repo ui
306 # the repo ui
307 for sec, name, val in cfgs:
307 for sec, name, val in cfgs:
308 req.repo.ui.setconfig(sec, name, val, source='--config')
308 req.repo.ui.setconfig(sec, name, val, source='--config')
309
309
310 # developer config: ui.debugger
310 # developer config: ui.debugger
311 debugger = ui.config("ui", "debugger")
311 debugger = ui.config("ui", "debugger")
312 debugmod = pdb
312 debugmod = pdb
313 if not debugger or ui.plain():
313 if not debugger or ui.plain():
314 # if we are in HGPLAIN mode, then disable custom debugging
314 # if we are in HGPLAIN mode, then disable custom debugging
315 debugger = 'pdb'
315 debugger = 'pdb'
316 elif req.earlyoptions['debugger']:
316 elif req.earlyoptions['debugger']:
317 # This import can be slow for fancy debuggers, so only
317 # This import can be slow for fancy debuggers, so only
318 # do it when absolutely necessary, i.e. when actual
318 # do it when absolutely necessary, i.e. when actual
319 # debugging has been requested
319 # debugging has been requested
320 with demandimport.deactivated():
320 with demandimport.deactivated():
321 try:
321 try:
322 debugmod = __import__(debugger)
322 debugmod = __import__(debugger)
323 except ImportError:
323 except ImportError:
324 pass # Leave debugmod = pdb
324 pass # Leave debugmod = pdb
325
325
326 debugtrace[debugger] = debugmod.set_trace
326 debugtrace[debugger] = debugmod.set_trace
327 debugmortem[debugger] = debugmod.post_mortem
327 debugmortem[debugger] = debugmod.post_mortem
328
328
329 # enter the debugger before command execution
329 # enter the debugger before command execution
330 if req.earlyoptions['debugger']:
330 if req.earlyoptions['debugger']:
331 ui.warn(_("entering debugger - "
331 ui.warn(_("entering debugger - "
332 "type c to continue starting hg or h for help\n"))
332 "type c to continue starting hg or h for help\n"))
333
333
334 if (debugger != 'pdb' and
334 if (debugger != 'pdb' and
335 debugtrace[debugger] == debugtrace['pdb']):
335 debugtrace[debugger] == debugtrace['pdb']):
336 ui.warn(_("%s debugger specified "
336 ui.warn(_("%s debugger specified "
337 "but its module was not found\n") % debugger)
337 "but its module was not found\n") % debugger)
338 with demandimport.deactivated():
338 with demandimport.deactivated():
339 debugtrace[debugger]()
339 debugtrace[debugger]()
340 try:
340 try:
341 return _dispatch(req)
341 return _dispatch(req)
342 finally:
342 finally:
343 ui.flush()
343 ui.flush()
344 except: # re-raises
344 except: # re-raises
345 # enter the debugger when we hit an exception
345 # enter the debugger when we hit an exception
346 if req.earlyoptions['debugger']:
346 if req.earlyoptions['debugger']:
347 traceback.print_exc()
347 traceback.print_exc()
348 debugmortem[debugger](sys.exc_info()[2])
348 debugmortem[debugger](sys.exc_info()[2])
349 raise
349 raise
350
350
351 return _callcatch(ui, _runcatchfunc)
351 return _callcatch(ui, _runcatchfunc)
352
352
353 def _callcatch(ui, func):
353 def _callcatch(ui, func):
354 """like scmutil.callcatch but handles more high-level exceptions about
354 """like scmutil.callcatch but handles more high-level exceptions about
355 config parsing and commands. besides, use handlecommandexception to handle
355 config parsing and commands. besides, use handlecommandexception to handle
356 uncaught exceptions.
356 uncaught exceptions.
357 """
357 """
358 try:
358 try:
359 return scmutil.callcatch(ui, func)
359 return scmutil.callcatch(ui, func)
360 except error.AmbiguousCommand as inst:
360 except error.AmbiguousCommand as inst:
361 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
361 ui.warn(_("hg: command '%s' is ambiguous:\n %s\n") %
362 (inst.args[0], " ".join(inst.args[1])))
362 (inst.args[0], " ".join(inst.args[1])))
363 except error.CommandError as inst:
363 except error.CommandError as inst:
364 if inst.args[0]:
364 if inst.args[0]:
365 ui.pager('help')
365 ui.pager('help')
366 msgbytes = pycompat.bytestr(inst.args[1])
366 msgbytes = pycompat.bytestr(inst.args[1])
367 ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
367 ui.warn(_("hg %s: %s\n") % (inst.args[0], msgbytes))
368 commands.help_(ui, inst.args[0], full=False, command=True)
368 commands.help_(ui, inst.args[0], full=False, command=True)
369 else:
369 else:
370 ui.pager('help')
370 ui.pager('help')
371 ui.warn(_("hg: %s\n") % inst.args[1])
371 ui.warn(_("hg: %s\n") % inst.args[1])
372 commands.help_(ui, 'shortlist')
372 commands.help_(ui, 'shortlist')
373 except error.ParseError as inst:
373 except error.ParseError as inst:
374 _formatparse(ui.warn, inst)
374 _formatparse(ui.warn, inst)
375 return -1
375 return -1
376 except error.UnknownCommand as inst:
376 except error.UnknownCommand as inst:
377 nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
377 nocmdmsg = _("hg: unknown command '%s'\n") % inst.args[0]
378 try:
378 try:
379 # check if the command is in a disabled extension
379 # check if the command is in a disabled extension
380 # (but don't check for extensions themselves)
380 # (but don't check for extensions themselves)
381 formatted = help.formattedhelp(ui, commands, inst.args[0],
381 formatted = help.formattedhelp(ui, commands, inst.args[0],
382 unknowncmd=True)
382 unknowncmd=True)
383 ui.warn(nocmdmsg)
383 ui.warn(nocmdmsg)
384 ui.write(formatted)
384 ui.write(formatted)
385 except (error.UnknownCommand, error.Abort):
385 except (error.UnknownCommand, error.Abort):
386 suggested = False
386 suggested = False
387 if len(inst.args) == 2:
387 if len(inst.args) == 2:
388 sim = _getsimilar(inst.args[1], inst.args[0])
388 sim = _getsimilar(inst.args[1], inst.args[0])
389 if sim:
389 if sim:
390 ui.warn(nocmdmsg)
390 ui.warn(nocmdmsg)
391 _reportsimilar(ui.warn, sim)
391 _reportsimilar(ui.warn, sim)
392 suggested = True
392 suggested = True
393 if not suggested:
393 if not suggested:
394 ui.pager('help')
394 ui.pager('help')
395 ui.warn(nocmdmsg)
395 ui.warn(nocmdmsg)
396 commands.help_(ui, 'shortlist')
396 commands.help_(ui, 'shortlist')
397 except IOError:
397 except IOError:
398 raise
398 raise
399 except KeyboardInterrupt:
399 except KeyboardInterrupt:
400 raise
400 raise
401 except: # probably re-raises
401 except: # probably re-raises
402 if not handlecommandexception(ui):
402 if not handlecommandexception(ui):
403 raise
403 raise
404
404
405 return -1
405 return -1
406
406
407 def aliasargs(fn, givenargs):
407 def aliasargs(fn, givenargs):
408 args = []
408 args = []
409 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
409 # only care about alias 'args', ignore 'args' set by extensions.wrapfunction
410 if not util.safehasattr(fn, '_origfunc'):
410 if not util.safehasattr(fn, '_origfunc'):
411 args = getattr(fn, 'args', args)
411 args = getattr(fn, 'args', args)
412 if args:
412 if args:
413 cmd = ' '.join(map(procutil.shellquote, args))
413 cmd = ' '.join(map(procutil.shellquote, args))
414
414
415 nums = []
415 nums = []
416 def replacer(m):
416 def replacer(m):
417 num = int(m.group(1)) - 1
417 num = int(m.group(1)) - 1
418 nums.append(num)
418 nums.append(num)
419 if num < len(givenargs):
419 if num < len(givenargs):
420 return givenargs[num]
420 return givenargs[num]
421 raise error.Abort(_('too few arguments for command alias'))
421 raise error.Abort(_('too few arguments for command alias'))
422 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
422 cmd = re.sub(br'\$(\d+|\$)', replacer, cmd)
423 givenargs = [x for i, x in enumerate(givenargs)
423 givenargs = [x for i, x in enumerate(givenargs)
424 if i not in nums]
424 if i not in nums]
425 args = pycompat.shlexsplit(cmd)
425 args = pycompat.shlexsplit(cmd)
426 return args + givenargs
426 return args + givenargs
427
427
428 def aliasinterpolate(name, args, cmd):
428 def aliasinterpolate(name, args, cmd):
429 '''interpolate args into cmd for shell aliases
429 '''interpolate args into cmd for shell aliases
430
430
431 This also handles $0, $@ and "$@".
431 This also handles $0, $@ and "$@".
432 '''
432 '''
433 # util.interpolate can't deal with "$@" (with quotes) because it's only
433 # util.interpolate can't deal with "$@" (with quotes) because it's only
434 # built to match prefix + patterns.
434 # built to match prefix + patterns.
435 replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
435 replacemap = dict(('$%d' % (i + 1), arg) for i, arg in enumerate(args))
436 replacemap['$0'] = name
436 replacemap['$0'] = name
437 replacemap['$$'] = '$'
437 replacemap['$$'] = '$'
438 replacemap['$@'] = ' '.join(args)
438 replacemap['$@'] = ' '.join(args)
439 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
439 # Typical Unix shells interpolate "$@" (with quotes) as all the positional
440 # parameters, separated out into words. Emulate the same behavior here by
440 # parameters, separated out into words. Emulate the same behavior here by
441 # quoting the arguments individually. POSIX shells will then typically
441 # quoting the arguments individually. POSIX shells will then typically
442 # tokenize each argument into exactly one word.
442 # tokenize each argument into exactly one word.
443 replacemap['"$@"'] = ' '.join(procutil.shellquote(arg) for arg in args)
443 replacemap['"$@"'] = ' '.join(procutil.shellquote(arg) for arg in args)
444 # escape '\$' for regex
444 # escape '\$' for regex
445 regex = '|'.join(replacemap.keys()).replace('$', br'\$')
445 regex = '|'.join(replacemap.keys()).replace('$', br'\$')
446 r = re.compile(regex)
446 r = re.compile(regex)
447 return r.sub(lambda x: replacemap[x.group()], cmd)
447 return r.sub(lambda x: replacemap[x.group()], cmd)
448
448
449 class cmdalias(object):
449 class cmdalias(object):
450 def __init__(self, ui, name, definition, cmdtable, source):
450 def __init__(self, ui, name, definition, cmdtable, source):
451 self.name = self.cmd = name
451 self.name = self.cmd = name
452 self.cmdname = ''
452 self.cmdname = ''
453 self.definition = definition
453 self.definition = definition
454 self.fn = None
454 self.fn = None
455 self.givenargs = []
455 self.givenargs = []
456 self.opts = []
456 self.opts = []
457 self.help = ''
457 self.help = ''
458 self.badalias = None
458 self.badalias = None
459 self.unknowncmd = False
459 self.unknowncmd = False
460 self.source = source
460 self.source = source
461
461
462 try:
462 try:
463 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
463 aliases, entry = cmdutil.findcmd(self.name, cmdtable)
464 for alias, e in cmdtable.iteritems():
464 for alias, e in cmdtable.iteritems():
465 if e is entry:
465 if e is entry:
466 self.cmd = alias
466 self.cmd = alias
467 break
467 break
468 self.shadows = True
468 self.shadows = True
469 except error.UnknownCommand:
469 except error.UnknownCommand:
470 self.shadows = False
470 self.shadows = False
471
471
472 if not self.definition:
472 if not self.definition:
473 self.badalias = _("no definition for alias '%s'") % self.name
473 self.badalias = _("no definition for alias '%s'") % self.name
474 return
474 return
475
475
476 if self.definition.startswith('!'):
476 if self.definition.startswith('!'):
477 shdef = self.definition[1:]
477 shdef = self.definition[1:]
478 self.shell = True
478 self.shell = True
479 def fn(ui, *args):
479 def fn(ui, *args):
480 env = {'HG_ARGS': ' '.join((self.name,) + args)}
480 env = {'HG_ARGS': ' '.join((self.name,) + args)}
481 def _checkvar(m):
481 def _checkvar(m):
482 if m.groups()[0] == '$':
482 if m.groups()[0] == '$':
483 return m.group()
483 return m.group()
484 elif int(m.groups()[0]) <= len(args):
484 elif int(m.groups()[0]) <= len(args):
485 return m.group()
485 return m.group()
486 else:
486 else:
487 ui.debug("No argument found for substitution "
487 ui.debug("No argument found for substitution "
488 "of %i variable in alias '%s' definition.\n"
488 "of %i variable in alias '%s' definition.\n"
489 % (int(m.groups()[0]), self.name))
489 % (int(m.groups()[0]), self.name))
490 return ''
490 return ''
491 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
491 cmd = re.sub(br'\$(\d+|\$)', _checkvar, shdef)
492 cmd = aliasinterpolate(self.name, args, cmd)
492 cmd = aliasinterpolate(self.name, args, cmd)
493 return ui.system(cmd, environ=env,
493 return ui.system(cmd, environ=env,
494 blockedtag='alias_%s' % self.name)
494 blockedtag='alias_%s' % self.name)
495 self.fn = fn
495 self.fn = fn
496 self._populatehelp(ui, name, shdef, self.fn)
496 self._populatehelp(ui, name, shdef, self.fn)
497 return
497 return
498
498
499 try:
499 try:
500 args = pycompat.shlexsplit(self.definition)
500 args = pycompat.shlexsplit(self.definition)
501 except ValueError as inst:
501 except ValueError as inst:
502 self.badalias = (_("error in definition for alias '%s': %s")
502 self.badalias = (_("error in definition for alias '%s': %s")
503 % (self.name, stringutil.forcebytestr(inst)))
503 % (self.name, stringutil.forcebytestr(inst)))
504 return
504 return
505 earlyopts, args = _earlysplitopts(args)
505 earlyopts, args = _earlysplitopts(args)
506 if earlyopts:
506 if earlyopts:
507 self.badalias = (_("error in definition for alias '%s': %s may "
507 self.badalias = (_("error in definition for alias '%s': %s may "
508 "only be given on the command line")
508 "only be given on the command line")
509 % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
509 % (self.name, '/'.join(pycompat.ziplist(*earlyopts)
510 [0])))
510 [0])))
511 return
511 return
512 self.cmdname = cmd = args.pop(0)
512 self.cmdname = cmd = args.pop(0)
513 self.givenargs = args
513 self.givenargs = args
514
514
515 try:
515 try:
516 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
516 tableentry = cmdutil.findcmd(cmd, cmdtable, False)[1]
517 if len(tableentry) > 2:
517 if len(tableentry) > 2:
518 self.fn, self.opts, cmdhelp = tableentry
518 self.fn, self.opts, cmdhelp = tableentry
519 else:
519 else:
520 self.fn, self.opts = tableentry
520 self.fn, self.opts = tableentry
521 cmdhelp = None
521 cmdhelp = None
522
522
523 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
523 self._populatehelp(ui, name, cmd, self.fn, cmdhelp)
524
524
525 except error.UnknownCommand:
525 except error.UnknownCommand:
526 self.badalias = (_("alias '%s' resolves to unknown command '%s'")
526 self.badalias = (_("alias '%s' resolves to unknown command '%s'")
527 % (self.name, cmd))
527 % (self.name, cmd))
528 self.unknowncmd = True
528 self.unknowncmd = True
529 except error.AmbiguousCommand:
529 except error.AmbiguousCommand:
530 self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
530 self.badalias = (_("alias '%s' resolves to ambiguous command '%s'")
531 % (self.name, cmd))
531 % (self.name, cmd))
532
532
533 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
533 def _populatehelp(self, ui, name, cmd, fn, defaulthelp=None):
534 # confine strings to be passed to i18n.gettext()
534 # confine strings to be passed to i18n.gettext()
535 cfg = {}
535 cfg = {}
536 for k in ('doc', 'help'):
536 for k in ('doc', 'help'):
537 v = ui.config('alias', '%s:%s' % (name, k), None)
537 v = ui.config('alias', '%s:%s' % (name, k), None)
538 if v is None:
538 if v is None:
539 continue
539 continue
540 if not encoding.isasciistr(v):
540 if not encoding.isasciistr(v):
541 self.badalias = (_("non-ASCII character in alias definition "
541 self.badalias = (_("non-ASCII character in alias definition "
542 "'%s:%s'") % (name, k))
542 "'%s:%s'") % (name, k))
543 return
543 return
544 cfg[k] = v
544 cfg[k] = v
545
545
546 self.help = cfg.get('help', defaulthelp or '')
546 self.help = cfg.get('help', defaulthelp or '')
547 if self.help and self.help.startswith("hg " + cmd):
547 if self.help and self.help.startswith("hg " + cmd):
548 # drop prefix in old-style help lines so hg shows the alias
548 # drop prefix in old-style help lines so hg shows the alias
549 self.help = self.help[4 + len(cmd):]
549 self.help = self.help[4 + len(cmd):]
550
550
551 doc = cfg.get('doc', pycompat.getdoc(fn))
551 doc = cfg.get('doc', pycompat.getdoc(fn))
552 if doc is not None:
552 if doc is not None:
553 doc = pycompat.sysstr(doc)
553 doc = pycompat.sysstr(doc)
554 self.__doc__ = doc
554 self.__doc__ = doc
555
555
556 @property
556 @property
557 def args(self):
557 def args(self):
558 args = pycompat.maplist(util.expandpath, self.givenargs)
558 args = pycompat.maplist(util.expandpath, self.givenargs)
559 return aliasargs(self.fn, args)
559 return aliasargs(self.fn, args)
560
560
561 def __getattr__(self, name):
561 def __getattr__(self, name):
562 adefaults = {r'norepo': True, r'intents': set(),
562 adefaults = {r'norepo': True, r'intents': set(),
563 r'optionalrepo': False, r'inferrepo': False}
563 r'optionalrepo': False, r'inferrepo': False}
564 if name not in adefaults:
564 if name not in adefaults:
565 raise AttributeError(name)
565 raise AttributeError(name)
566 if self.badalias or util.safehasattr(self, 'shell'):
566 if self.badalias or util.safehasattr(self, 'shell'):
567 return adefaults[name]
567 return adefaults[name]
568 return getattr(self.fn, name)
568 return getattr(self.fn, name)
569
569
570 def __call__(self, ui, *args, **opts):
570 def __call__(self, ui, *args, **opts):
571 if self.badalias:
571 if self.badalias:
572 hint = None
572 hint = None
573 if self.unknowncmd:
573 if self.unknowncmd:
574 try:
574 try:
575 # check if the command is in a disabled extension
575 # check if the command is in a disabled extension
576 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
576 cmd, ext = extensions.disabledcmd(ui, self.cmdname)[:2]
577 hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
577 hint = _("'%s' is provided by '%s' extension") % (cmd, ext)
578 except error.UnknownCommand:
578 except error.UnknownCommand:
579 pass
579 pass
580 raise error.Abort(self.badalias, hint=hint)
580 raise error.Abort(self.badalias, hint=hint)
581 if self.shadows:
581 if self.shadows:
582 ui.debug("alias '%s' shadows command '%s'\n" %
582 ui.debug("alias '%s' shadows command '%s'\n" %
583 (self.name, self.cmdname))
583 (self.name, self.cmdname))
584
584
585 ui.log('commandalias', "alias '%s' expands to '%s'\n",
585 ui.log('commandalias', "alias '%s' expands to '%s'\n",
586 self.name, self.definition)
586 self.name, self.definition)
587 if util.safehasattr(self, 'shell'):
587 if util.safehasattr(self, 'shell'):
588 return self.fn(ui, *args, **opts)
588 return self.fn(ui, *args, **opts)
589 else:
589 else:
590 try:
590 try:
591 return util.checksignature(self.fn)(ui, *args, **opts)
591 return util.checksignature(self.fn)(ui, *args, **opts)
592 except error.SignatureError:
592 except error.SignatureError:
593 args = ' '.join([self.cmdname] + self.args)
593 args = ' '.join([self.cmdname] + self.args)
594 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
594 ui.debug("alias '%s' expands to '%s'\n" % (self.name, args))
595 raise
595 raise
596
596
597 class lazyaliasentry(object):
597 class lazyaliasentry(object):
598 """like a typical command entry (func, opts, help), but is lazy"""
598 """like a typical command entry (func, opts, help), but is lazy"""
599
599
600 def __init__(self, ui, name, definition, cmdtable, source):
600 def __init__(self, ui, name, definition, cmdtable, source):
601 self.ui = ui
601 self.ui = ui
602 self.name = name
602 self.name = name
603 self.definition = definition
603 self.definition = definition
604 self.cmdtable = cmdtable.copy()
604 self.cmdtable = cmdtable.copy()
605 self.source = source
605 self.source = source
606
606
607 @util.propertycache
607 @util.propertycache
608 def _aliasdef(self):
608 def _aliasdef(self):
609 return cmdalias(self.ui, self.name, self.definition, self.cmdtable,
609 return cmdalias(self.ui, self.name, self.definition, self.cmdtable,
610 self.source)
610 self.source)
611
611
612 def __getitem__(self, n):
612 def __getitem__(self, n):
613 aliasdef = self._aliasdef
613 aliasdef = self._aliasdef
614 if n == 0:
614 if n == 0:
615 return aliasdef
615 return aliasdef
616 elif n == 1:
616 elif n == 1:
617 return aliasdef.opts
617 return aliasdef.opts
618 elif n == 2:
618 elif n == 2:
619 return aliasdef.help
619 return aliasdef.help
620 else:
620 else:
621 raise IndexError
621 raise IndexError
622
622
623 def __iter__(self):
623 def __iter__(self):
624 for i in range(3):
624 for i in range(3):
625 yield self[i]
625 yield self[i]
626
626
627 def __len__(self):
627 def __len__(self):
628 return 3
628 return 3
629
629
630 def addaliases(ui, cmdtable):
630 def addaliases(ui, cmdtable):
631 # aliases are processed after extensions have been loaded, so they
631 # aliases are processed after extensions have been loaded, so they
632 # may use extension commands. Aliases can also use other alias definitions,
632 # may use extension commands. Aliases can also use other alias definitions,
633 # but only if they have been defined prior to the current definition.
633 # but only if they have been defined prior to the current definition.
634 for alias, definition in ui.configitems('alias', ignoresub=True):
634 for alias, definition in ui.configitems('alias', ignoresub=True):
635 try:
635 try:
636 if cmdtable[alias].definition == definition:
636 if cmdtable[alias].definition == definition:
637 continue
637 continue
638 except (KeyError, AttributeError):
638 except (KeyError, AttributeError):
639 # definition might not exist or it might not be a cmdalias
639 # definition might not exist or it might not be a cmdalias
640 pass
640 pass
641
641
642 source = ui.configsource('alias', alias)
642 source = ui.configsource('alias', alias)
643 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
643 entry = lazyaliasentry(ui, alias, definition, cmdtable, source)
644 cmdtable[alias] = entry
644 cmdtable[alias] = entry
645
645
646 def _parse(ui, args):
646 def _parse(ui, args):
647 options = {}
647 options = {}
648 cmdoptions = {}
648 cmdoptions = {}
649
649
650 try:
650 try:
651 args = fancyopts.fancyopts(args, commands.globalopts, options)
651 args = fancyopts.fancyopts(args, commands.globalopts, options)
652 except getopt.GetoptError as inst:
652 except getopt.GetoptError as inst:
653 raise error.CommandError(None, stringutil.forcebytestr(inst))
653 raise error.CommandError(None, stringutil.forcebytestr(inst))
654
654
655 if args:
655 if args:
656 cmd, args = args[0], args[1:]
656 cmd, args = args[0], args[1:]
657 aliases, entry = cmdutil.findcmd(cmd, commands.table,
657 aliases, entry = cmdutil.findcmd(cmd, commands.table,
658 ui.configbool("ui", "strict"))
658 ui.configbool("ui", "strict"))
659 cmd = aliases[0]
659 cmd = aliases[0]
660 args = aliasargs(entry[0], args)
660 args = aliasargs(entry[0], args)
661 defaults = ui.config("defaults", cmd)
661 defaults = ui.config("defaults", cmd)
662 if defaults:
662 if defaults:
663 args = pycompat.maplist(
663 args = pycompat.maplist(
664 util.expandpath, pycompat.shlexsplit(defaults)) + args
664 util.expandpath, pycompat.shlexsplit(defaults)) + args
665 c = list(entry[1])
665 c = list(entry[1])
666 else:
666 else:
667 cmd = None
667 cmd = None
668 c = []
668 c = []
669
669
670 # combine global options into local
670 # combine global options into local
671 for o in commands.globalopts:
671 for o in commands.globalopts:
672 c.append((o[0], o[1], options[o[1]], o[3]))
672 c.append((o[0], o[1], options[o[1]], o[3]))
673
673
674 try:
674 try:
675 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
675 args = fancyopts.fancyopts(args, c, cmdoptions, gnu=True)
676 except getopt.GetoptError as inst:
676 except getopt.GetoptError as inst:
677 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
677 raise error.CommandError(cmd, stringutil.forcebytestr(inst))
678
678
679 # separate global options back out
679 # separate global options back out
680 for o in commands.globalopts:
680 for o in commands.globalopts:
681 n = o[1]
681 n = o[1]
682 options[n] = cmdoptions[n]
682 options[n] = cmdoptions[n]
683 del cmdoptions[n]
683 del cmdoptions[n]
684
684
685 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
685 return (cmd, cmd and entry[0] or None, args, options, cmdoptions)
686
686
687 def _parseconfig(ui, config):
687 def _parseconfig(ui, config):
688 """parse the --config options from the command line"""
688 """parse the --config options from the command line"""
689 configs = []
689 configs = []
690
690
691 for cfg in config:
691 for cfg in config:
692 try:
692 try:
693 name, value = [cfgelem.strip()
693 name, value = [cfgelem.strip()
694 for cfgelem in cfg.split('=', 1)]
694 for cfgelem in cfg.split('=', 1)]
695 section, name = name.split('.', 1)
695 section, name = name.split('.', 1)
696 if not section or not name:
696 if not section or not name:
697 raise IndexError
697 raise IndexError
698 ui.setconfig(section, name, value, '--config')
698 ui.setconfig(section, name, value, '--config')
699 configs.append((section, name, value))
699 configs.append((section, name, value))
700 except (IndexError, ValueError):
700 except (IndexError, ValueError):
701 raise error.Abort(_('malformed --config option: %r '
701 raise error.Abort(_('malformed --config option: %r '
702 '(use --config section.name=value)')
702 '(use --config section.name=value)')
703 % pycompat.bytestr(cfg))
703 % pycompat.bytestr(cfg))
704
704
705 return configs
705 return configs
706
706
707 def _earlyparseopts(ui, args):
707 def _earlyparseopts(ui, args):
708 options = {}
708 options = {}
709 fancyopts.fancyopts(args, commands.globalopts, options,
709 fancyopts.fancyopts(args, commands.globalopts, options,
710 gnu=not ui.plain('strictflags'), early=True,
710 gnu=not ui.plain('strictflags'), early=True,
711 optaliases={'repository': ['repo']})
711 optaliases={'repository': ['repo']})
712 return options
712 return options
713
713
714 def _earlysplitopts(args):
714 def _earlysplitopts(args):
715 """Split args into a list of possible early options and remainder args"""
715 """Split args into a list of possible early options and remainder args"""
716 shortoptions = 'R:'
716 shortoptions = 'R:'
717 # TODO: perhaps 'debugger' should be included
717 # TODO: perhaps 'debugger' should be included
718 longoptions = ['cwd=', 'repository=', 'repo=', 'config=']
718 longoptions = ['cwd=', 'repository=', 'repo=', 'config=']
719 return fancyopts.earlygetopt(args, shortoptions, longoptions,
719 return fancyopts.earlygetopt(args, shortoptions, longoptions,
720 gnu=True, keepsep=True)
720 gnu=True, keepsep=True)
721
721
722 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
722 def runcommand(lui, repo, cmd, fullargs, ui, options, d, cmdpats, cmdoptions):
723 # run pre-hook, and abort if it fails
723 # run pre-hook, and abort if it fails
724 hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
724 hook.hook(lui, repo, "pre-%s" % cmd, True, args=" ".join(fullargs),
725 pats=cmdpats, opts=cmdoptions)
725 pats=cmdpats, opts=cmdoptions)
726 try:
726 try:
727 ret = _runcommand(ui, options, cmd, d)
727 ret = _runcommand(ui, options, cmd, d)
728 # run post-hook, passing command result
728 # run post-hook, passing command result
729 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
729 hook.hook(lui, repo, "post-%s" % cmd, False, args=" ".join(fullargs),
730 result=ret, pats=cmdpats, opts=cmdoptions)
730 result=ret, pats=cmdpats, opts=cmdoptions)
731 except Exception:
731 except Exception:
732 # run failure hook and re-raise
732 # run failure hook and re-raise
733 hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs),
733 hook.hook(lui, repo, "fail-%s" % cmd, False, args=" ".join(fullargs),
734 pats=cmdpats, opts=cmdoptions)
734 pats=cmdpats, opts=cmdoptions)
735 raise
735 raise
736 return ret
736 return ret
737
737
738 def _getlocal(ui, rpath, wd=None):
738 def _getlocal(ui, rpath, wd=None):
739 """Return (path, local ui object) for the given target path.
739 """Return (path, local ui object) for the given target path.
740
740
741 Takes paths in [cwd]/.hg/hgrc into account."
741 Takes paths in [cwd]/.hg/hgrc into account."
742 """
742 """
743 if wd is None:
743 if wd is None:
744 try:
744 try:
745 wd = pycompat.getcwd()
745 wd = pycompat.getcwd()
746 except OSError as e:
746 except OSError as e:
747 raise error.Abort(_("error getting current working directory: %s") %
747 raise error.Abort(_("error getting current working directory: %s") %
748 encoding.strtolocal(e.strerror))
748 encoding.strtolocal(e.strerror))
749 path = cmdutil.findrepo(wd) or ""
749 path = cmdutil.findrepo(wd) or ""
750 if not path:
750 if not path:
751 lui = ui
751 lui = ui
752 else:
752 else:
753 lui = ui.copy()
753 lui = ui.copy()
754 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
754 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
755
755
756 if rpath:
756 if rpath:
757 path = lui.expandpath(rpath)
757 path = lui.expandpath(rpath)
758 lui = ui.copy()
758 lui = ui.copy()
759 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
759 lui.readconfig(os.path.join(path, ".hg", "hgrc"), path)
760
760
761 return path, lui
761 return path, lui
762
762
763 def _checkshellalias(lui, ui, args):
763 def _checkshellalias(lui, ui, args):
764 """Return the function to run the shell alias, if it is required"""
764 """Return the function to run the shell alias, if it is required"""
765 options = {}
765 options = {}
766
766
767 try:
767 try:
768 args = fancyopts.fancyopts(args, commands.globalopts, options)
768 args = fancyopts.fancyopts(args, commands.globalopts, options)
769 except getopt.GetoptError:
769 except getopt.GetoptError:
770 return
770 return
771
771
772 if not args:
772 if not args:
773 return
773 return
774
774
775 cmdtable = commands.table
775 cmdtable = commands.table
776
776
777 cmd = args[0]
777 cmd = args[0]
778 try:
778 try:
779 strict = ui.configbool("ui", "strict")
779 strict = ui.configbool("ui", "strict")
780 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
780 aliases, entry = cmdutil.findcmd(cmd, cmdtable, strict)
781 except (error.AmbiguousCommand, error.UnknownCommand):
781 except (error.AmbiguousCommand, error.UnknownCommand):
782 return
782 return
783
783
784 cmd = aliases[0]
784 cmd = aliases[0]
785 fn = entry[0]
785 fn = entry[0]
786
786
787 if cmd and util.safehasattr(fn, 'shell'):
787 if cmd and util.safehasattr(fn, 'shell'):
788 # shell alias shouldn't receive early options which are consumed by hg
788 # shell alias shouldn't receive early options which are consumed by hg
789 _earlyopts, args = _earlysplitopts(args)
789 _earlyopts, args = _earlysplitopts(args)
790 d = lambda: fn(ui, *args[1:])
790 d = lambda: fn(ui, *args[1:])
791 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
791 return lambda: runcommand(lui, None, cmd, args[:1], ui, options, d,
792 [], {})
792 [], {})
793
793
794 def _dispatch(req):
794 def _dispatch(req):
795 args = req.args
795 args = req.args
796 ui = req.ui
796 ui = req.ui
797
797
798 # check for cwd
798 # check for cwd
799 cwd = req.earlyoptions['cwd']
799 cwd = req.earlyoptions['cwd']
800 if cwd:
800 if cwd:
801 os.chdir(cwd)
801 os.chdir(cwd)
802
802
803 rpath = req.earlyoptions['repository']
803 rpath = req.earlyoptions['repository']
804 path, lui = _getlocal(ui, rpath)
804 path, lui = _getlocal(ui, rpath)
805
805
806 uis = {ui, lui}
806 uis = {ui, lui}
807
807
808 if req.repo:
808 if req.repo:
809 uis.add(req.repo.ui)
809 uis.add(req.repo.ui)
810
810
811 if req.earlyoptions['profile']:
811 if req.earlyoptions['profile']:
812 for ui_ in uis:
812 for ui_ in uis:
813 ui_.setconfig('profiling', 'enabled', 'true', '--profile')
813 ui_.setconfig('profiling', 'enabled', 'true', '--profile')
814
814
815 profile = lui.configbool('profiling', 'enabled')
815 profile = lui.configbool('profiling', 'enabled')
816 with profiling.profile(lui, enabled=profile) as profiler:
816 with profiling.profile(lui, enabled=profile) as profiler:
817 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
817 # Configure extensions in phases: uisetup, extsetup, cmdtable, and
818 # reposetup
818 # reposetup
819 extensions.loadall(lui)
819 extensions.loadall(lui)
820 # Propagate any changes to lui.__class__ by extensions
820 # Propagate any changes to lui.__class__ by extensions
821 ui.__class__ = lui.__class__
821 ui.__class__ = lui.__class__
822
822
823 # (uisetup and extsetup are handled in extensions.loadall)
823 # (uisetup and extsetup are handled in extensions.loadall)
824
824
825 # (reposetup is handled in hg.repository)
825 # (reposetup is handled in hg.repository)
826
826
827 addaliases(lui, commands.table)
827 addaliases(lui, commands.table)
828
828
829 # All aliases and commands are completely defined, now.
829 # All aliases and commands are completely defined, now.
830 # Check abbreviation/ambiguity of shell alias.
830 # Check abbreviation/ambiguity of shell alias.
831 shellaliasfn = _checkshellalias(lui, ui, args)
831 shellaliasfn = _checkshellalias(lui, ui, args)
832 if shellaliasfn:
832 if shellaliasfn:
833 return shellaliasfn()
833 return shellaliasfn()
834
834
835 # check for fallback encoding
835 # check for fallback encoding
836 fallback = lui.config('ui', 'fallbackencoding')
836 fallback = lui.config('ui', 'fallbackencoding')
837 if fallback:
837 if fallback:
838 encoding.fallbackencoding = fallback
838 encoding.fallbackencoding = fallback
839
839
840 fullargs = args
840 fullargs = args
841 cmd, func, args, options, cmdoptions = _parse(lui, args)
841 cmd, func, args, options, cmdoptions = _parse(lui, args)
842
842
843 if options["config"] != req.earlyoptions["config"]:
843 if options["config"] != req.earlyoptions["config"]:
844 raise error.Abort(_("option --config may not be abbreviated!"))
844 raise error.Abort(_("option --config may not be abbreviated!"))
845 if options["cwd"] != req.earlyoptions["cwd"]:
845 if options["cwd"] != req.earlyoptions["cwd"]:
846 raise error.Abort(_("option --cwd may not be abbreviated!"))
846 raise error.Abort(_("option --cwd may not be abbreviated!"))
847 if options["repository"] != req.earlyoptions["repository"]:
847 if options["repository"] != req.earlyoptions["repository"]:
848 raise error.Abort(_(
848 raise error.Abort(_(
849 "option -R has to be separated from other options (e.g. not "
849 "option -R has to be separated from other options (e.g. not "
850 "-qR) and --repository may only be abbreviated as --repo!"))
850 "-qR) and --repository may only be abbreviated as --repo!"))
851 if options["debugger"] != req.earlyoptions["debugger"]:
851 if options["debugger"] != req.earlyoptions["debugger"]:
852 raise error.Abort(_("option --debugger may not be abbreviated!"))
852 raise error.Abort(_("option --debugger may not be abbreviated!"))
853 # don't validate --profile/--traceback, which can be enabled from now
853 # don't validate --profile/--traceback, which can be enabled from now
854
854
855 if options["encoding"]:
855 if options["encoding"]:
856 encoding.encoding = options["encoding"]
856 encoding.encoding = options["encoding"]
857 if options["encodingmode"]:
857 if options["encodingmode"]:
858 encoding.encodingmode = options["encodingmode"]
858 encoding.encodingmode = options["encodingmode"]
859 if options["time"]:
859 if options["time"]:
860 def get_times():
860 def get_times():
861 t = os.times()
861 t = os.times()
862 if t[4] == 0.0:
862 if t[4] == 0.0:
863 # Windows leaves this as zero, so use time.clock()
863 # Windows leaves this as zero, so use time.clock()
864 t = (t[0], t[1], t[2], t[3], time.clock())
864 t = (t[0], t[1], t[2], t[3], time.clock())
865 return t
865 return t
866 s = get_times()
866 s = get_times()
867 def print_time():
867 def print_time():
868 t = get_times()
868 t = get_times()
869 ui.warn(
869 ui.warn(
870 _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
870 _("time: real %.3f secs (user %.3f+%.3f sys %.3f+%.3f)\n") %
871 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
871 (t[4]-s[4], t[0]-s[0], t[2]-s[2], t[1]-s[1], t[3]-s[3]))
872 ui.atexit(print_time)
872 ui.atexit(print_time)
873 if options["profile"]:
873 if options["profile"]:
874 profiler.start()
874 profiler.start()
875
875
876 if options['verbose'] or options['debug'] or options['quiet']:
876 if options['verbose'] or options['debug'] or options['quiet']:
877 for opt in ('verbose', 'debug', 'quiet'):
877 for opt in ('verbose', 'debug', 'quiet'):
878 val = pycompat.bytestr(bool(options[opt]))
878 val = pycompat.bytestr(bool(options[opt]))
879 for ui_ in uis:
879 for ui_ in uis:
880 ui_.setconfig('ui', opt, val, '--' + opt)
880 ui_.setconfig('ui', opt, val, '--' + opt)
881
881
882 if options['traceback']:
882 if options['traceback']:
883 for ui_ in uis:
883 for ui_ in uis:
884 ui_.setconfig('ui', 'traceback', 'on', '--traceback')
884 ui_.setconfig('ui', 'traceback', 'on', '--traceback')
885
885
886 if options['noninteractive']:
886 if options['noninteractive']:
887 for ui_ in uis:
887 for ui_ in uis:
888 ui_.setconfig('ui', 'interactive', 'off', '-y')
888 ui_.setconfig('ui', 'interactive', 'off', '-y')
889
889
890 if cmdoptions.get('insecure', False):
890 if cmdoptions.get('insecure', False):
891 for ui_ in uis:
891 for ui_ in uis:
892 ui_.insecureconnections = True
892 ui_.insecureconnections = True
893
893
894 # setup color handling before pager, because setting up pager
894 # setup color handling before pager, because setting up pager
895 # might cause incorrect console information
895 # might cause incorrect console information
896 coloropt = options['color']
896 coloropt = options['color']
897 for ui_ in uis:
897 for ui_ in uis:
898 if coloropt:
898 if coloropt:
899 ui_.setconfig('ui', 'color', coloropt, '--color')
899 ui_.setconfig('ui', 'color', coloropt, '--color')
900 color.setup(ui_)
900 color.setup(ui_)
901
901
902 if stringutil.parsebool(options['pager']):
902 if stringutil.parsebool(options['pager']):
903 # ui.pager() expects 'internal-always-' prefix in this case
903 # ui.pager() expects 'internal-always-' prefix in this case
904 ui.pager('internal-always-' + cmd)
904 ui.pager('internal-always-' + cmd)
905 elif options['pager'] != 'auto':
905 elif options['pager'] != 'auto':
906 for ui_ in uis:
906 for ui_ in uis:
907 ui_.disablepager()
907 ui_.disablepager()
908
908
909 if options['version']:
909 if options['version']:
910 return commands.version_(ui)
910 return commands.version_(ui)
911 if options['help']:
911 if options['help']:
912 return commands.help_(ui, cmd, command=cmd is not None)
912 return commands.help_(ui, cmd, command=cmd is not None)
913 elif not cmd:
913 elif not cmd:
914 return commands.help_(ui, 'shortlist')
914 return commands.help_(ui, 'shortlist')
915
915
916 repo = None
916 repo = None
917 cmdpats = args[:]
917 cmdpats = args[:]
918 if not func.norepo:
918 if not func.norepo:
919 # use the repo from the request only if we don't have -R
919 # use the repo from the request only if we don't have -R
920 if not rpath and not cwd:
920 if not rpath and not cwd:
921 repo = req.repo
921 repo = req.repo
922
922
923 if repo:
923 if repo:
924 # set the descriptors of the repo ui to those of ui
924 # set the descriptors of the repo ui to those of ui
925 repo.ui.fin = ui.fin
925 repo.ui.fin = ui.fin
926 repo.ui.fout = ui.fout
926 repo.ui.fout = ui.fout
927 repo.ui.ferr = ui.ferr
927 repo.ui.ferr = ui.ferr
928 else:
928 else:
929 try:
929 try:
930 repo = hg.repository(ui, path=path,
930 repo = hg.repository(ui, path=path,
931 presetupfuncs=req.prereposetups)
931 presetupfuncs=req.prereposetups,
932 intents=func.intents)
932 if not repo.local():
933 if not repo.local():
933 raise error.Abort(_("repository '%s' is not local")
934 raise error.Abort(_("repository '%s' is not local")
934 % path)
935 % path)
935 repo.ui.setconfig("bundle", "mainreporoot", repo.root,
936 repo.ui.setconfig("bundle", "mainreporoot", repo.root,
936 'repo')
937 'repo')
937 except error.RequirementError:
938 except error.RequirementError:
938 raise
939 raise
939 except error.RepoError:
940 except error.RepoError:
940 if rpath: # invalid -R path
941 if rpath: # invalid -R path
941 raise
942 raise
942 if not func.optionalrepo:
943 if not func.optionalrepo:
943 if func.inferrepo and args and not path:
944 if func.inferrepo and args and not path:
944 # try to infer -R from command args
945 # try to infer -R from command args
945 repos = pycompat.maplist(cmdutil.findrepo, args)
946 repos = pycompat.maplist(cmdutil.findrepo, args)
946 guess = repos[0]
947 guess = repos[0]
947 if guess and repos.count(guess) == len(repos):
948 if guess and repos.count(guess) == len(repos):
948 req.args = ['--repository', guess] + fullargs
949 req.args = ['--repository', guess] + fullargs
949 req.earlyoptions['repository'] = guess
950 req.earlyoptions['repository'] = guess
950 return _dispatch(req)
951 return _dispatch(req)
951 if not path:
952 if not path:
952 raise error.RepoError(_("no repository found in"
953 raise error.RepoError(_("no repository found in"
953 " '%s' (.hg not found)")
954 " '%s' (.hg not found)")
954 % pycompat.getcwd())
955 % pycompat.getcwd())
955 raise
956 raise
956 if repo:
957 if repo:
957 ui = repo.ui
958 ui = repo.ui
958 if options['hidden']:
959 if options['hidden']:
959 repo = repo.unfiltered()
960 repo = repo.unfiltered()
960 args.insert(0, repo)
961 args.insert(0, repo)
961 elif rpath:
962 elif rpath:
962 ui.warn(_("warning: --repository ignored\n"))
963 ui.warn(_("warning: --repository ignored\n"))
963
964
964 msg = _formatargs(fullargs)
965 msg = _formatargs(fullargs)
965 ui.log("command", '%s\n', msg)
966 ui.log("command", '%s\n', msg)
966 strcmdopt = pycompat.strkwargs(cmdoptions)
967 strcmdopt = pycompat.strkwargs(cmdoptions)
967 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
968 d = lambda: util.checksignature(func)(ui, *args, **strcmdopt)
968 try:
969 try:
969 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
970 return runcommand(lui, repo, cmd, fullargs, ui, options, d,
970 cmdpats, cmdoptions)
971 cmdpats, cmdoptions)
971 finally:
972 finally:
972 if repo and repo != req.repo:
973 if repo and repo != req.repo:
973 repo.close()
974 repo.close()
974
975
975 def _runcommand(ui, options, cmd, cmdfunc):
976 def _runcommand(ui, options, cmd, cmdfunc):
976 """Run a command function, possibly with profiling enabled."""
977 """Run a command function, possibly with profiling enabled."""
977 try:
978 try:
978 return cmdfunc()
979 return cmdfunc()
979 except error.SignatureError:
980 except error.SignatureError:
980 raise error.CommandError(cmd, _('invalid arguments'))
981 raise error.CommandError(cmd, _('invalid arguments'))
981
982
982 def _exceptionwarning(ui):
983 def _exceptionwarning(ui):
983 """Produce a warning message for the current active exception"""
984 """Produce a warning message for the current active exception"""
984
985
985 # For compatibility checking, we discard the portion of the hg
986 # For compatibility checking, we discard the portion of the hg
986 # version after the + on the assumption that if a "normal
987 # version after the + on the assumption that if a "normal
987 # user" is running a build with a + in it the packager
988 # user" is running a build with a + in it the packager
988 # probably built from fairly close to a tag and anyone with a
989 # probably built from fairly close to a tag and anyone with a
989 # 'make local' copy of hg (where the version number can be out
990 # 'make local' copy of hg (where the version number can be out
990 # of date) will be clueful enough to notice the implausible
991 # of date) will be clueful enough to notice the implausible
991 # version number and try updating.
992 # version number and try updating.
992 ct = util.versiontuple(n=2)
993 ct = util.versiontuple(n=2)
993 worst = None, ct, ''
994 worst = None, ct, ''
994 if ui.config('ui', 'supportcontact') is None:
995 if ui.config('ui', 'supportcontact') is None:
995 for name, mod in extensions.extensions():
996 for name, mod in extensions.extensions():
996 # 'testedwith' should be bytes, but not all extensions are ported
997 # 'testedwith' should be bytes, but not all extensions are ported
997 # to py3 and we don't want UnicodeException because of that.
998 # to py3 and we don't want UnicodeException because of that.
998 testedwith = stringutil.forcebytestr(getattr(mod, 'testedwith', ''))
999 testedwith = stringutil.forcebytestr(getattr(mod, 'testedwith', ''))
999 report = getattr(mod, 'buglink', _('the extension author.'))
1000 report = getattr(mod, 'buglink', _('the extension author.'))
1000 if not testedwith.strip():
1001 if not testedwith.strip():
1001 # We found an untested extension. It's likely the culprit.
1002 # We found an untested extension. It's likely the culprit.
1002 worst = name, 'unknown', report
1003 worst = name, 'unknown', report
1003 break
1004 break
1004
1005
1005 # Never blame on extensions bundled with Mercurial.
1006 # Never blame on extensions bundled with Mercurial.
1006 if extensions.ismoduleinternal(mod):
1007 if extensions.ismoduleinternal(mod):
1007 continue
1008 continue
1008
1009
1009 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1010 tested = [util.versiontuple(t, 2) for t in testedwith.split()]
1010 if ct in tested:
1011 if ct in tested:
1011 continue
1012 continue
1012
1013
1013 lower = [t for t in tested if t < ct]
1014 lower = [t for t in tested if t < ct]
1014 nearest = max(lower or tested)
1015 nearest = max(lower or tested)
1015 if worst[0] is None or nearest < worst[1]:
1016 if worst[0] is None or nearest < worst[1]:
1016 worst = name, nearest, report
1017 worst = name, nearest, report
1017 if worst[0] is not None:
1018 if worst[0] is not None:
1018 name, testedwith, report = worst
1019 name, testedwith, report = worst
1019 if not isinstance(testedwith, (bytes, str)):
1020 if not isinstance(testedwith, (bytes, str)):
1020 testedwith = '.'.join([stringutil.forcebytestr(c)
1021 testedwith = '.'.join([stringutil.forcebytestr(c)
1021 for c in testedwith])
1022 for c in testedwith])
1022 warning = (_('** Unknown exception encountered with '
1023 warning = (_('** Unknown exception encountered with '
1023 'possibly-broken third-party extension %s\n'
1024 'possibly-broken third-party extension %s\n'
1024 '** which supports versions %s of Mercurial.\n'
1025 '** which supports versions %s of Mercurial.\n'
1025 '** Please disable %s and try your action again.\n'
1026 '** Please disable %s and try your action again.\n'
1026 '** If that fixes the bug please report it to %s\n')
1027 '** If that fixes the bug please report it to %s\n')
1027 % (name, testedwith, name, report))
1028 % (name, testedwith, name, report))
1028 else:
1029 else:
1029 bugtracker = ui.config('ui', 'supportcontact')
1030 bugtracker = ui.config('ui', 'supportcontact')
1030 if bugtracker is None:
1031 if bugtracker is None:
1031 bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
1032 bugtracker = _("https://mercurial-scm.org/wiki/BugTracker")
1032 warning = (_("** unknown exception encountered, "
1033 warning = (_("** unknown exception encountered, "
1033 "please report by visiting\n** ") + bugtracker + '\n')
1034 "please report by visiting\n** ") + bugtracker + '\n')
1034 sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
1035 sysversion = pycompat.sysbytes(sys.version).replace('\n', '')
1035 warning += ((_("** Python %s\n") % sysversion) +
1036 warning += ((_("** Python %s\n") % sysversion) +
1036 (_("** Mercurial Distributed SCM (version %s)\n") %
1037 (_("** Mercurial Distributed SCM (version %s)\n") %
1037 util.version()) +
1038 util.version()) +
1038 (_("** Extensions loaded: %s\n") %
1039 (_("** Extensions loaded: %s\n") %
1039 ", ".join([x[0] for x in extensions.extensions()])))
1040 ", ".join([x[0] for x in extensions.extensions()])))
1040 return warning
1041 return warning
1041
1042
1042 def handlecommandexception(ui):
1043 def handlecommandexception(ui):
1043 """Produce a warning message for broken commands
1044 """Produce a warning message for broken commands
1044
1045
1045 Called when handling an exception; the exception is reraised if
1046 Called when handling an exception; the exception is reraised if
1046 this function returns False, ignored otherwise.
1047 this function returns False, ignored otherwise.
1047 """
1048 """
1048 warning = _exceptionwarning(ui)
1049 warning = _exceptionwarning(ui)
1049 ui.log("commandexception", "%s\n%s\n", warning,
1050 ui.log("commandexception", "%s\n%s\n", warning,
1050 pycompat.sysbytes(traceback.format_exc()))
1051 pycompat.sysbytes(traceback.format_exc()))
1051 ui.warn(warning)
1052 ui.warn(warning)
1052 return False # re-raise the exception
1053 return False # re-raise the exception
@@ -1,1168 +1,1170 b''
1 # hg.py - repository classes for mercurial
1 # hg.py - repository classes for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import shutil
14 import shutil
15 import stat
15 import stat
16
16
17 from .i18n import _
17 from .i18n import _
18 from .node import (
18 from .node import (
19 nullid,
19 nullid,
20 )
20 )
21
21
22 from . import (
22 from . import (
23 bookmarks,
23 bookmarks,
24 bundlerepo,
24 bundlerepo,
25 cacheutil,
25 cacheutil,
26 cmdutil,
26 cmdutil,
27 destutil,
27 destutil,
28 discovery,
28 discovery,
29 error,
29 error,
30 exchange,
30 exchange,
31 extensions,
31 extensions,
32 httppeer,
32 httppeer,
33 localrepo,
33 localrepo,
34 lock,
34 lock,
35 logcmdutil,
35 logcmdutil,
36 logexchange,
36 logexchange,
37 merge as mergemod,
37 merge as mergemod,
38 node,
38 node,
39 phases,
39 phases,
40 scmutil,
40 scmutil,
41 sshpeer,
41 sshpeer,
42 statichttprepo,
42 statichttprepo,
43 ui as uimod,
43 ui as uimod,
44 unionrepo,
44 unionrepo,
45 url,
45 url,
46 util,
46 util,
47 verify as verifymod,
47 verify as verifymod,
48 vfs as vfsmod,
48 vfs as vfsmod,
49 )
49 )
50
50
51 from .utils import (
51 from .utils import (
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 release = lock.release
55 release = lock.release
56
56
57 # shared features
57 # shared features
58 sharedbookmarks = 'bookmarks'
58 sharedbookmarks = 'bookmarks'
59
59
60 def _local(path):
60 def _local(path):
61 path = util.expandpath(util.urllocalpath(path))
61 path = util.expandpath(util.urllocalpath(path))
62 return (os.path.isfile(path) and bundlerepo or localrepo)
62 return (os.path.isfile(path) and bundlerepo or localrepo)
63
63
64 def addbranchrevs(lrepo, other, branches, revs):
64 def addbranchrevs(lrepo, other, branches, revs):
65 peer = other.peer() # a courtesy to callers using a localrepo for other
65 peer = other.peer() # a courtesy to callers using a localrepo for other
66 hashbranch, branches = branches
66 hashbranch, branches = branches
67 if not hashbranch and not branches:
67 if not hashbranch and not branches:
68 x = revs or None
68 x = revs or None
69 if revs:
69 if revs:
70 y = revs[0]
70 y = revs[0]
71 else:
71 else:
72 y = None
72 y = None
73 return x, y
73 return x, y
74 if revs:
74 if revs:
75 revs = list(revs)
75 revs = list(revs)
76 else:
76 else:
77 revs = []
77 revs = []
78
78
79 if not peer.capable('branchmap'):
79 if not peer.capable('branchmap'):
80 if branches:
80 if branches:
81 raise error.Abort(_("remote branch lookup not supported"))
81 raise error.Abort(_("remote branch lookup not supported"))
82 revs.append(hashbranch)
82 revs.append(hashbranch)
83 return revs, revs[0]
83 return revs, revs[0]
84
84
85 with peer.commandexecutor() as e:
85 with peer.commandexecutor() as e:
86 branchmap = e.callcommand('branchmap', {}).result()
86 branchmap = e.callcommand('branchmap', {}).result()
87
87
88 def primary(branch):
88 def primary(branch):
89 if branch == '.':
89 if branch == '.':
90 if not lrepo:
90 if not lrepo:
91 raise error.Abort(_("dirstate branch not accessible"))
91 raise error.Abort(_("dirstate branch not accessible"))
92 branch = lrepo.dirstate.branch()
92 branch = lrepo.dirstate.branch()
93 if branch in branchmap:
93 if branch in branchmap:
94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
94 revs.extend(node.hex(r) for r in reversed(branchmap[branch]))
95 return True
95 return True
96 else:
96 else:
97 return False
97 return False
98
98
99 for branch in branches:
99 for branch in branches:
100 if not primary(branch):
100 if not primary(branch):
101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
101 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
102 if hashbranch:
102 if hashbranch:
103 if not primary(hashbranch):
103 if not primary(hashbranch):
104 revs.append(hashbranch)
104 revs.append(hashbranch)
105 return revs, revs[0]
105 return revs, revs[0]
106
106
107 def parseurl(path, branches=None):
107 def parseurl(path, branches=None):
108 '''parse url#branch, returning (url, (branch, branches))'''
108 '''parse url#branch, returning (url, (branch, branches))'''
109
109
110 u = util.url(path)
110 u = util.url(path)
111 branch = None
111 branch = None
112 if u.fragment:
112 if u.fragment:
113 branch = u.fragment
113 branch = u.fragment
114 u.fragment = None
114 u.fragment = None
115 return bytes(u), (branch, branches or [])
115 return bytes(u), (branch, branches or [])
116
116
117 schemes = {
117 schemes = {
118 'bundle': bundlerepo,
118 'bundle': bundlerepo,
119 'union': unionrepo,
119 'union': unionrepo,
120 'file': _local,
120 'file': _local,
121 'http': httppeer,
121 'http': httppeer,
122 'https': httppeer,
122 'https': httppeer,
123 'ssh': sshpeer,
123 'ssh': sshpeer,
124 'static-http': statichttprepo,
124 'static-http': statichttprepo,
125 }
125 }
126
126
127 def _peerlookup(path):
127 def _peerlookup(path):
128 u = util.url(path)
128 u = util.url(path)
129 scheme = u.scheme or 'file'
129 scheme = u.scheme or 'file'
130 thing = schemes.get(scheme) or schemes['file']
130 thing = schemes.get(scheme) or schemes['file']
131 try:
131 try:
132 return thing(path)
132 return thing(path)
133 except TypeError:
133 except TypeError:
134 # we can't test callable(thing) because 'thing' can be an unloaded
134 # we can't test callable(thing) because 'thing' can be an unloaded
135 # module that implements __call__
135 # module that implements __call__
136 if not util.safehasattr(thing, 'instance'):
136 if not util.safehasattr(thing, 'instance'):
137 raise
137 raise
138 return thing
138 return thing
139
139
140 def islocal(repo):
140 def islocal(repo):
141 '''return true if repo (or path pointing to repo) is local'''
141 '''return true if repo (or path pointing to repo) is local'''
142 if isinstance(repo, bytes):
142 if isinstance(repo, bytes):
143 try:
143 try:
144 return _peerlookup(repo).islocal(repo)
144 return _peerlookup(repo).islocal(repo)
145 except AttributeError:
145 except AttributeError:
146 return False
146 return False
147 return repo.local()
147 return repo.local()
148
148
149 def openpath(ui, path):
149 def openpath(ui, path):
150 '''open path with open if local, url.open if remote'''
150 '''open path with open if local, url.open if remote'''
151 pathurl = util.url(path, parsequery=False, parsefragment=False)
151 pathurl = util.url(path, parsequery=False, parsefragment=False)
152 if pathurl.islocal():
152 if pathurl.islocal():
153 return util.posixfile(pathurl.localpath(), 'rb')
153 return util.posixfile(pathurl.localpath(), 'rb')
154 else:
154 else:
155 return url.open(ui, path)
155 return url.open(ui, path)
156
156
157 # a list of (ui, repo) functions called for wire peer initialization
157 # a list of (ui, repo) functions called for wire peer initialization
158 wirepeersetupfuncs = []
158 wirepeersetupfuncs = []
159
159
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None):
160 def _peerorrepo(ui, path, create=False, presetupfuncs=None,
161 intents=None):
161 """return a repository object for the specified path"""
162 """return a repository object for the specified path"""
162 obj = _peerlookup(path).instance(ui, path, create)
163 obj = _peerlookup(path).instance(ui, path, create, intents=intents)
163 ui = getattr(obj, "ui", ui)
164 ui = getattr(obj, "ui", ui)
164 for f in presetupfuncs or []:
165 for f in presetupfuncs or []:
165 f(ui, obj)
166 f(ui, obj)
166 for name, module in extensions.extensions(ui):
167 for name, module in extensions.extensions(ui):
167 hook = getattr(module, 'reposetup', None)
168 hook = getattr(module, 'reposetup', None)
168 if hook:
169 if hook:
169 hook(ui, obj)
170 hook(ui, obj)
170 if not obj.local():
171 if not obj.local():
171 for f in wirepeersetupfuncs:
172 for f in wirepeersetupfuncs:
172 f(ui, obj)
173 f(ui, obj)
173 return obj
174 return obj
174
175
175 def repository(ui, path='', create=False, presetupfuncs=None):
176 def repository(ui, path='', create=False, presetupfuncs=None, intents=None):
176 """return a repository object for the specified path"""
177 """return a repository object for the specified path"""
177 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs)
178 peer = _peerorrepo(ui, path, create, presetupfuncs=presetupfuncs,
179 intents=intents)
178 repo = peer.local()
180 repo = peer.local()
179 if not repo:
181 if not repo:
180 raise error.Abort(_("repository '%s' is not local") %
182 raise error.Abort(_("repository '%s' is not local") %
181 (path or peer.url()))
183 (path or peer.url()))
182 return repo.filtered('visible')
184 return repo.filtered('visible')
183
185
184 def peer(uiorrepo, opts, path, create=False):
186 def peer(uiorrepo, opts, path, create=False, intents=None):
185 '''return a repository peer for the specified path'''
187 '''return a repository peer for the specified path'''
186 rui = remoteui(uiorrepo, opts)
188 rui = remoteui(uiorrepo, opts)
187 return _peerorrepo(rui, path, create).peer()
189 return _peerorrepo(rui, path, create, intents=intents).peer()
188
190
189 def defaultdest(source):
191 def defaultdest(source):
190 '''return default destination of clone if none is given
192 '''return default destination of clone if none is given
191
193
192 >>> defaultdest(b'foo')
194 >>> defaultdest(b'foo')
193 'foo'
195 'foo'
194 >>> defaultdest(b'/foo/bar')
196 >>> defaultdest(b'/foo/bar')
195 'bar'
197 'bar'
196 >>> defaultdest(b'/')
198 >>> defaultdest(b'/')
197 ''
199 ''
198 >>> defaultdest(b'')
200 >>> defaultdest(b'')
199 ''
201 ''
200 >>> defaultdest(b'http://example.org/')
202 >>> defaultdest(b'http://example.org/')
201 ''
203 ''
202 >>> defaultdest(b'http://example.org/foo/')
204 >>> defaultdest(b'http://example.org/foo/')
203 'foo'
205 'foo'
204 '''
206 '''
205 path = util.url(source).path
207 path = util.url(source).path
206 if not path:
208 if not path:
207 return ''
209 return ''
208 return os.path.basename(os.path.normpath(path))
210 return os.path.basename(os.path.normpath(path))
209
211
210 def sharedreposource(repo):
212 def sharedreposource(repo):
211 """Returns repository object for source repository of a shared repo.
213 """Returns repository object for source repository of a shared repo.
212
214
213 If repo is not a shared repository, returns None.
215 If repo is not a shared repository, returns None.
214 """
216 """
215 if repo.sharedpath == repo.path:
217 if repo.sharedpath == repo.path:
216 return None
218 return None
217
219
218 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
220 if util.safehasattr(repo, 'srcrepo') and repo.srcrepo:
219 return repo.srcrepo
221 return repo.srcrepo
220
222
221 # the sharedpath always ends in the .hg; we want the path to the repo
223 # the sharedpath always ends in the .hg; we want the path to the repo
222 source = repo.vfs.split(repo.sharedpath)[0]
224 source = repo.vfs.split(repo.sharedpath)[0]
223 srcurl, branches = parseurl(source)
225 srcurl, branches = parseurl(source)
224 srcrepo = repository(repo.ui, srcurl)
226 srcrepo = repository(repo.ui, srcurl)
225 repo.srcrepo = srcrepo
227 repo.srcrepo = srcrepo
226 return srcrepo
228 return srcrepo
227
229
228 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
230 def share(ui, source, dest=None, update=True, bookmarks=True, defaultpath=None,
229 relative=False):
231 relative=False):
230 '''create a shared repository'''
232 '''create a shared repository'''
231
233
232 if not islocal(source):
234 if not islocal(source):
233 raise error.Abort(_('can only share local repositories'))
235 raise error.Abort(_('can only share local repositories'))
234
236
235 if not dest:
237 if not dest:
236 dest = defaultdest(source)
238 dest = defaultdest(source)
237 else:
239 else:
238 dest = ui.expandpath(dest)
240 dest = ui.expandpath(dest)
239
241
240 if isinstance(source, bytes):
242 if isinstance(source, bytes):
241 origsource = ui.expandpath(source)
243 origsource = ui.expandpath(source)
242 source, branches = parseurl(origsource)
244 source, branches = parseurl(origsource)
243 srcrepo = repository(ui, source)
245 srcrepo = repository(ui, source)
244 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
246 rev, checkout = addbranchrevs(srcrepo, srcrepo, branches, None)
245 else:
247 else:
246 srcrepo = source.local()
248 srcrepo = source.local()
247 origsource = source = srcrepo.url()
249 origsource = source = srcrepo.url()
248 checkout = None
250 checkout = None
249
251
250 sharedpath = srcrepo.sharedpath # if our source is already sharing
252 sharedpath = srcrepo.sharedpath # if our source is already sharing
251
253
252 destwvfs = vfsmod.vfs(dest, realpath=True)
254 destwvfs = vfsmod.vfs(dest, realpath=True)
253 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
255 destvfs = vfsmod.vfs(os.path.join(destwvfs.base, '.hg'), realpath=True)
254
256
255 if destvfs.lexists():
257 if destvfs.lexists():
256 raise error.Abort(_('destination already exists'))
258 raise error.Abort(_('destination already exists'))
257
259
258 if not destwvfs.isdir():
260 if not destwvfs.isdir():
259 destwvfs.mkdir()
261 destwvfs.mkdir()
260 destvfs.makedir()
262 destvfs.makedir()
261
263
262 requirements = ''
264 requirements = ''
263 try:
265 try:
264 requirements = srcrepo.vfs.read('requires')
266 requirements = srcrepo.vfs.read('requires')
265 except IOError as inst:
267 except IOError as inst:
266 if inst.errno != errno.ENOENT:
268 if inst.errno != errno.ENOENT:
267 raise
269 raise
268
270
269 if relative:
271 if relative:
270 try:
272 try:
271 sharedpath = os.path.relpath(sharedpath, destvfs.base)
273 sharedpath = os.path.relpath(sharedpath, destvfs.base)
272 requirements += 'relshared\n'
274 requirements += 'relshared\n'
273 except (IOError, ValueError) as e:
275 except (IOError, ValueError) as e:
274 # ValueError is raised on Windows if the drive letters differ on
276 # ValueError is raised on Windows if the drive letters differ on
275 # each path
277 # each path
276 raise error.Abort(_('cannot calculate relative path'),
278 raise error.Abort(_('cannot calculate relative path'),
277 hint=stringutil.forcebytestr(e))
279 hint=stringutil.forcebytestr(e))
278 else:
280 else:
279 requirements += 'shared\n'
281 requirements += 'shared\n'
280
282
281 destvfs.write('requires', requirements)
283 destvfs.write('requires', requirements)
282 destvfs.write('sharedpath', sharedpath)
284 destvfs.write('sharedpath', sharedpath)
283
285
284 r = repository(ui, destwvfs.base)
286 r = repository(ui, destwvfs.base)
285 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
287 postshare(srcrepo, r, bookmarks=bookmarks, defaultpath=defaultpath)
286 _postshareupdate(r, update, checkout=checkout)
288 _postshareupdate(r, update, checkout=checkout)
287 return r
289 return r
288
290
289 def unshare(ui, repo):
291 def unshare(ui, repo):
290 """convert a shared repository to a normal one
292 """convert a shared repository to a normal one
291
293
292 Copy the store data to the repo and remove the sharedpath data.
294 Copy the store data to the repo and remove the sharedpath data.
293 """
295 """
294
296
295 destlock = lock = None
297 destlock = lock = None
296 lock = repo.lock()
298 lock = repo.lock()
297 try:
299 try:
298 # we use locks here because if we race with commit, we
300 # we use locks here because if we race with commit, we
299 # can end up with extra data in the cloned revlogs that's
301 # can end up with extra data in the cloned revlogs that's
300 # not pointed to by changesets, thus causing verify to
302 # not pointed to by changesets, thus causing verify to
301 # fail
303 # fail
302
304
303 destlock = copystore(ui, repo, repo.path)
305 destlock = copystore(ui, repo, repo.path)
304
306
305 sharefile = repo.vfs.join('sharedpath')
307 sharefile = repo.vfs.join('sharedpath')
306 util.rename(sharefile, sharefile + '.old')
308 util.rename(sharefile, sharefile + '.old')
307
309
308 repo.requirements.discard('shared')
310 repo.requirements.discard('shared')
309 repo.requirements.discard('relshared')
311 repo.requirements.discard('relshared')
310 repo._writerequirements()
312 repo._writerequirements()
311 finally:
313 finally:
312 destlock and destlock.release()
314 destlock and destlock.release()
313 lock and lock.release()
315 lock and lock.release()
314
316
315 # update store, spath, svfs and sjoin of repo
317 # update store, spath, svfs and sjoin of repo
316 repo.unfiltered().__init__(repo.baseui, repo.root)
318 repo.unfiltered().__init__(repo.baseui, repo.root)
317
319
318 # TODO: figure out how to access subrepos that exist, but were previously
320 # TODO: figure out how to access subrepos that exist, but were previously
319 # removed from .hgsub
321 # removed from .hgsub
320 c = repo['.']
322 c = repo['.']
321 subs = c.substate
323 subs = c.substate
322 for s in sorted(subs):
324 for s in sorted(subs):
323 c.sub(s).unshare()
325 c.sub(s).unshare()
324
326
325 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
327 def postshare(sourcerepo, destrepo, bookmarks=True, defaultpath=None):
326 """Called after a new shared repo is created.
328 """Called after a new shared repo is created.
327
329
328 The new repo only has a requirements file and pointer to the source.
330 The new repo only has a requirements file and pointer to the source.
329 This function configures additional shared data.
331 This function configures additional shared data.
330
332
331 Extensions can wrap this function and write additional entries to
333 Extensions can wrap this function and write additional entries to
332 destrepo/.hg/shared to indicate additional pieces of data to be shared.
334 destrepo/.hg/shared to indicate additional pieces of data to be shared.
333 """
335 """
334 default = defaultpath or sourcerepo.ui.config('paths', 'default')
336 default = defaultpath or sourcerepo.ui.config('paths', 'default')
335 if default:
337 if default:
336 template = ('[paths]\n'
338 template = ('[paths]\n'
337 'default = %s\n')
339 'default = %s\n')
338 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
340 destrepo.vfs.write('hgrc', util.tonativeeol(template % default))
339
341
340 with destrepo.wlock():
342 with destrepo.wlock():
341 if bookmarks:
343 if bookmarks:
342 destrepo.vfs.write('shared', sharedbookmarks + '\n')
344 destrepo.vfs.write('shared', sharedbookmarks + '\n')
343
345
344 def _postshareupdate(repo, update, checkout=None):
346 def _postshareupdate(repo, update, checkout=None):
345 """Maybe perform a working directory update after a shared repo is created.
347 """Maybe perform a working directory update after a shared repo is created.
346
348
347 ``update`` can be a boolean or a revision to update to.
349 ``update`` can be a boolean or a revision to update to.
348 """
350 """
349 if not update:
351 if not update:
350 return
352 return
351
353
352 repo.ui.status(_("updating working directory\n"))
354 repo.ui.status(_("updating working directory\n"))
353 if update is not True:
355 if update is not True:
354 checkout = update
356 checkout = update
355 for test in (checkout, 'default', 'tip'):
357 for test in (checkout, 'default', 'tip'):
356 if test is None:
358 if test is None:
357 continue
359 continue
358 try:
360 try:
359 uprev = repo.lookup(test)
361 uprev = repo.lookup(test)
360 break
362 break
361 except error.RepoLookupError:
363 except error.RepoLookupError:
362 continue
364 continue
363 _update(repo, uprev)
365 _update(repo, uprev)
364
366
365 def copystore(ui, srcrepo, destpath):
367 def copystore(ui, srcrepo, destpath):
366 '''copy files from store of srcrepo in destpath
368 '''copy files from store of srcrepo in destpath
367
369
368 returns destlock
370 returns destlock
369 '''
371 '''
370 destlock = None
372 destlock = None
371 try:
373 try:
372 hardlink = None
374 hardlink = None
373 num = 0
375 num = 0
374 closetopic = [None]
376 closetopic = [None]
375 def prog(topic, pos):
377 def prog(topic, pos):
376 if pos is None:
378 if pos is None:
377 closetopic[0] = topic
379 closetopic[0] = topic
378 else:
380 else:
379 ui.progress(topic, pos + num)
381 ui.progress(topic, pos + num)
380 srcpublishing = srcrepo.publishing()
382 srcpublishing = srcrepo.publishing()
381 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
383 srcvfs = vfsmod.vfs(srcrepo.sharedpath)
382 dstvfs = vfsmod.vfs(destpath)
384 dstvfs = vfsmod.vfs(destpath)
383 for f in srcrepo.store.copylist():
385 for f in srcrepo.store.copylist():
384 if srcpublishing and f.endswith('phaseroots'):
386 if srcpublishing and f.endswith('phaseroots'):
385 continue
387 continue
386 dstbase = os.path.dirname(f)
388 dstbase = os.path.dirname(f)
387 if dstbase and not dstvfs.exists(dstbase):
389 if dstbase and not dstvfs.exists(dstbase):
388 dstvfs.mkdir(dstbase)
390 dstvfs.mkdir(dstbase)
389 if srcvfs.exists(f):
391 if srcvfs.exists(f):
390 if f.endswith('data'):
392 if f.endswith('data'):
391 # 'dstbase' may be empty (e.g. revlog format 0)
393 # 'dstbase' may be empty (e.g. revlog format 0)
392 lockfile = os.path.join(dstbase, "lock")
394 lockfile = os.path.join(dstbase, "lock")
393 # lock to avoid premature writing to the target
395 # lock to avoid premature writing to the target
394 destlock = lock.lock(dstvfs, lockfile)
396 destlock = lock.lock(dstvfs, lockfile)
395 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
397 hardlink, n = util.copyfiles(srcvfs.join(f), dstvfs.join(f),
396 hardlink, progress=prog)
398 hardlink, progress=prog)
397 num += n
399 num += n
398 if hardlink:
400 if hardlink:
399 ui.debug("linked %d files\n" % num)
401 ui.debug("linked %d files\n" % num)
400 if closetopic[0]:
402 if closetopic[0]:
401 ui.progress(closetopic[0], None)
403 ui.progress(closetopic[0], None)
402 else:
404 else:
403 ui.debug("copied %d files\n" % num)
405 ui.debug("copied %d files\n" % num)
404 if closetopic[0]:
406 if closetopic[0]:
405 ui.progress(closetopic[0], None)
407 ui.progress(closetopic[0], None)
406 return destlock
408 return destlock
407 except: # re-raises
409 except: # re-raises
408 release(destlock)
410 release(destlock)
409 raise
411 raise
410
412
411 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
413 def clonewithshare(ui, peeropts, sharepath, source, srcpeer, dest, pull=False,
412 rev=None, update=True, stream=False):
414 rev=None, update=True, stream=False):
413 """Perform a clone using a shared repo.
415 """Perform a clone using a shared repo.
414
416
415 The store for the repository will be located at <sharepath>/.hg. The
417 The store for the repository will be located at <sharepath>/.hg. The
416 specified revisions will be cloned or pulled from "source". A shared repo
418 specified revisions will be cloned or pulled from "source". A shared repo
417 will be created at "dest" and a working copy will be created if "update" is
419 will be created at "dest" and a working copy will be created if "update" is
418 True.
420 True.
419 """
421 """
420 revs = None
422 revs = None
421 if rev:
423 if rev:
422 if not srcpeer.capable('lookup'):
424 if not srcpeer.capable('lookup'):
423 raise error.Abort(_("src repository does not support "
425 raise error.Abort(_("src repository does not support "
424 "revision lookup and so doesn't "
426 "revision lookup and so doesn't "
425 "support clone by revision"))
427 "support clone by revision"))
426
428
427 # TODO this is batchable.
429 # TODO this is batchable.
428 remoterevs = []
430 remoterevs = []
429 for r in rev:
431 for r in rev:
430 with srcpeer.commandexecutor() as e:
432 with srcpeer.commandexecutor() as e:
431 remoterevs.append(e.callcommand('lookup', {
433 remoterevs.append(e.callcommand('lookup', {
432 'key': r,
434 'key': r,
433 }).result())
435 }).result())
434 revs = remoterevs
436 revs = remoterevs
435
437
436 # Obtain a lock before checking for or cloning the pooled repo otherwise
438 # Obtain a lock before checking for or cloning the pooled repo otherwise
437 # 2 clients may race creating or populating it.
439 # 2 clients may race creating or populating it.
438 pooldir = os.path.dirname(sharepath)
440 pooldir = os.path.dirname(sharepath)
439 # lock class requires the directory to exist.
441 # lock class requires the directory to exist.
440 try:
442 try:
441 util.makedir(pooldir, False)
443 util.makedir(pooldir, False)
442 except OSError as e:
444 except OSError as e:
443 if e.errno != errno.EEXIST:
445 if e.errno != errno.EEXIST:
444 raise
446 raise
445
447
446 poolvfs = vfsmod.vfs(pooldir)
448 poolvfs = vfsmod.vfs(pooldir)
447 basename = os.path.basename(sharepath)
449 basename = os.path.basename(sharepath)
448
450
449 with lock.lock(poolvfs, '%s.lock' % basename):
451 with lock.lock(poolvfs, '%s.lock' % basename):
450 if os.path.exists(sharepath):
452 if os.path.exists(sharepath):
451 ui.status(_('(sharing from existing pooled repository %s)\n') %
453 ui.status(_('(sharing from existing pooled repository %s)\n') %
452 basename)
454 basename)
453 else:
455 else:
454 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
456 ui.status(_('(sharing from new pooled repository %s)\n') % basename)
455 # Always use pull mode because hardlinks in share mode don't work
457 # Always use pull mode because hardlinks in share mode don't work
456 # well. Never update because working copies aren't necessary in
458 # well. Never update because working copies aren't necessary in
457 # share mode.
459 # share mode.
458 clone(ui, peeropts, source, dest=sharepath, pull=True,
460 clone(ui, peeropts, source, dest=sharepath, pull=True,
459 revs=rev, update=False, stream=stream)
461 revs=rev, update=False, stream=stream)
460
462
461 # Resolve the value to put in [paths] section for the source.
463 # Resolve the value to put in [paths] section for the source.
462 if islocal(source):
464 if islocal(source):
463 defaultpath = os.path.abspath(util.urllocalpath(source))
465 defaultpath = os.path.abspath(util.urllocalpath(source))
464 else:
466 else:
465 defaultpath = source
467 defaultpath = source
466
468
467 sharerepo = repository(ui, path=sharepath)
469 sharerepo = repository(ui, path=sharepath)
468 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
470 share(ui, sharerepo, dest=dest, update=False, bookmarks=False,
469 defaultpath=defaultpath)
471 defaultpath=defaultpath)
470
472
471 # We need to perform a pull against the dest repo to fetch bookmarks
473 # We need to perform a pull against the dest repo to fetch bookmarks
472 # and other non-store data that isn't shared by default. In the case of
474 # and other non-store data that isn't shared by default. In the case of
473 # non-existing shared repo, this means we pull from the remote twice. This
475 # non-existing shared repo, this means we pull from the remote twice. This
474 # is a bit weird. But at the time it was implemented, there wasn't an easy
476 # is a bit weird. But at the time it was implemented, there wasn't an easy
475 # way to pull just non-changegroup data.
477 # way to pull just non-changegroup data.
476 destrepo = repository(ui, path=dest)
478 destrepo = repository(ui, path=dest)
477 exchange.pull(destrepo, srcpeer, heads=revs)
479 exchange.pull(destrepo, srcpeer, heads=revs)
478
480
479 _postshareupdate(destrepo, update)
481 _postshareupdate(destrepo, update)
480
482
481 return srcpeer, peer(ui, peeropts, dest)
483 return srcpeer, peer(ui, peeropts, dest)
482
484
483 # Recomputing branch cache might be slow on big repos,
485 # Recomputing branch cache might be slow on big repos,
484 # so just copy it
486 # so just copy it
485 def _copycache(srcrepo, dstcachedir, fname):
487 def _copycache(srcrepo, dstcachedir, fname):
486 """copy a cache from srcrepo to destcachedir (if it exists)"""
488 """copy a cache from srcrepo to destcachedir (if it exists)"""
487 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
489 srcbranchcache = srcrepo.vfs.join('cache/%s' % fname)
488 dstbranchcache = os.path.join(dstcachedir, fname)
490 dstbranchcache = os.path.join(dstcachedir, fname)
489 if os.path.exists(srcbranchcache):
491 if os.path.exists(srcbranchcache):
490 if not os.path.exists(dstcachedir):
492 if not os.path.exists(dstcachedir):
491 os.mkdir(dstcachedir)
493 os.mkdir(dstcachedir)
492 util.copyfile(srcbranchcache, dstbranchcache)
494 util.copyfile(srcbranchcache, dstbranchcache)
493
495
494 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
496 def clone(ui, peeropts, source, dest=None, pull=False, revs=None,
495 update=True, stream=False, branch=None, shareopts=None):
497 update=True, stream=False, branch=None, shareopts=None):
496 """Make a copy of an existing repository.
498 """Make a copy of an existing repository.
497
499
498 Create a copy of an existing repository in a new directory. The
500 Create a copy of an existing repository in a new directory. The
499 source and destination are URLs, as passed to the repository
501 source and destination are URLs, as passed to the repository
500 function. Returns a pair of repository peers, the source and
502 function. Returns a pair of repository peers, the source and
501 newly created destination.
503 newly created destination.
502
504
503 The location of the source is added to the new repository's
505 The location of the source is added to the new repository's
504 .hg/hgrc file, as the default to be used for future pulls and
506 .hg/hgrc file, as the default to be used for future pulls and
505 pushes.
507 pushes.
506
508
507 If an exception is raised, the partly cloned/updated destination
509 If an exception is raised, the partly cloned/updated destination
508 repository will be deleted.
510 repository will be deleted.
509
511
510 Arguments:
512 Arguments:
511
513
512 source: repository object or URL
514 source: repository object or URL
513
515
514 dest: URL of destination repository to create (defaults to base
516 dest: URL of destination repository to create (defaults to base
515 name of source repository)
517 name of source repository)
516
518
517 pull: always pull from source repository, even in local case or if the
519 pull: always pull from source repository, even in local case or if the
518 server prefers streaming
520 server prefers streaming
519
521
520 stream: stream raw data uncompressed from repository (fast over
522 stream: stream raw data uncompressed from repository (fast over
521 LAN, slow over WAN)
523 LAN, slow over WAN)
522
524
523 revs: revision to clone up to (implies pull=True)
525 revs: revision to clone up to (implies pull=True)
524
526
525 update: update working directory after clone completes, if
527 update: update working directory after clone completes, if
526 destination is local repository (True means update to default rev,
528 destination is local repository (True means update to default rev,
527 anything else is treated as a revision)
529 anything else is treated as a revision)
528
530
529 branch: branches to clone
531 branch: branches to clone
530
532
531 shareopts: dict of options to control auto sharing behavior. The "pool" key
533 shareopts: dict of options to control auto sharing behavior. The "pool" key
532 activates auto sharing mode and defines the directory for stores. The
534 activates auto sharing mode and defines the directory for stores. The
533 "mode" key determines how to construct the directory name of the shared
535 "mode" key determines how to construct the directory name of the shared
534 repository. "identity" means the name is derived from the node of the first
536 repository. "identity" means the name is derived from the node of the first
535 changeset in the repository. "remote" means the name is derived from the
537 changeset in the repository. "remote" means the name is derived from the
536 remote's path/URL. Defaults to "identity."
538 remote's path/URL. Defaults to "identity."
537 """
539 """
538
540
539 if isinstance(source, bytes):
541 if isinstance(source, bytes):
540 origsource = ui.expandpath(source)
542 origsource = ui.expandpath(source)
541 source, branches = parseurl(origsource, branch)
543 source, branches = parseurl(origsource, branch)
542 srcpeer = peer(ui, peeropts, source)
544 srcpeer = peer(ui, peeropts, source)
543 else:
545 else:
544 srcpeer = source.peer() # in case we were called with a localrepo
546 srcpeer = source.peer() # in case we were called with a localrepo
545 branches = (None, branch or [])
547 branches = (None, branch or [])
546 origsource = source = srcpeer.url()
548 origsource = source = srcpeer.url()
547 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
549 revs, checkout = addbranchrevs(srcpeer, srcpeer, branches, revs)
548
550
549 if dest is None:
551 if dest is None:
550 dest = defaultdest(source)
552 dest = defaultdest(source)
551 if dest:
553 if dest:
552 ui.status(_("destination directory: %s\n") % dest)
554 ui.status(_("destination directory: %s\n") % dest)
553 else:
555 else:
554 dest = ui.expandpath(dest)
556 dest = ui.expandpath(dest)
555
557
556 dest = util.urllocalpath(dest)
558 dest = util.urllocalpath(dest)
557 source = util.urllocalpath(source)
559 source = util.urllocalpath(source)
558
560
559 if not dest:
561 if not dest:
560 raise error.Abort(_("empty destination path is not valid"))
562 raise error.Abort(_("empty destination path is not valid"))
561
563
562 destvfs = vfsmod.vfs(dest, expandpath=True)
564 destvfs = vfsmod.vfs(dest, expandpath=True)
563 if destvfs.lexists():
565 if destvfs.lexists():
564 if not destvfs.isdir():
566 if not destvfs.isdir():
565 raise error.Abort(_("destination '%s' already exists") % dest)
567 raise error.Abort(_("destination '%s' already exists") % dest)
566 elif destvfs.listdir():
568 elif destvfs.listdir():
567 raise error.Abort(_("destination '%s' is not empty") % dest)
569 raise error.Abort(_("destination '%s' is not empty") % dest)
568
570
569 shareopts = shareopts or {}
571 shareopts = shareopts or {}
570 sharepool = shareopts.get('pool')
572 sharepool = shareopts.get('pool')
571 sharenamemode = shareopts.get('mode')
573 sharenamemode = shareopts.get('mode')
572 if sharepool and islocal(dest):
574 if sharepool and islocal(dest):
573 sharepath = None
575 sharepath = None
574 if sharenamemode == 'identity':
576 if sharenamemode == 'identity':
575 # Resolve the name from the initial changeset in the remote
577 # Resolve the name from the initial changeset in the remote
576 # repository. This returns nullid when the remote is empty. It
578 # repository. This returns nullid when the remote is empty. It
577 # raises RepoLookupError if revision 0 is filtered or otherwise
579 # raises RepoLookupError if revision 0 is filtered or otherwise
578 # not available. If we fail to resolve, sharing is not enabled.
580 # not available. If we fail to resolve, sharing is not enabled.
579 try:
581 try:
580 with srcpeer.commandexecutor() as e:
582 with srcpeer.commandexecutor() as e:
581 rootnode = e.callcommand('lookup', {
583 rootnode = e.callcommand('lookup', {
582 'key': '0',
584 'key': '0',
583 }).result()
585 }).result()
584
586
585 if rootnode != node.nullid:
587 if rootnode != node.nullid:
586 sharepath = os.path.join(sharepool, node.hex(rootnode))
588 sharepath = os.path.join(sharepool, node.hex(rootnode))
587 else:
589 else:
588 ui.status(_('(not using pooled storage: '
590 ui.status(_('(not using pooled storage: '
589 'remote appears to be empty)\n'))
591 'remote appears to be empty)\n'))
590 except error.RepoLookupError:
592 except error.RepoLookupError:
591 ui.status(_('(not using pooled storage: '
593 ui.status(_('(not using pooled storage: '
592 'unable to resolve identity of remote)\n'))
594 'unable to resolve identity of remote)\n'))
593 elif sharenamemode == 'remote':
595 elif sharenamemode == 'remote':
594 sharepath = os.path.join(
596 sharepath = os.path.join(
595 sharepool, node.hex(hashlib.sha1(source).digest()))
597 sharepool, node.hex(hashlib.sha1(source).digest()))
596 else:
598 else:
597 raise error.Abort(_('unknown share naming mode: %s') %
599 raise error.Abort(_('unknown share naming mode: %s') %
598 sharenamemode)
600 sharenamemode)
599
601
600 if sharepath:
602 if sharepath:
601 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
603 return clonewithshare(ui, peeropts, sharepath, source, srcpeer,
602 dest, pull=pull, rev=revs, update=update,
604 dest, pull=pull, rev=revs, update=update,
603 stream=stream)
605 stream=stream)
604
606
605 srclock = destlock = cleandir = None
607 srclock = destlock = cleandir = None
606 srcrepo = srcpeer.local()
608 srcrepo = srcpeer.local()
607 try:
609 try:
608 abspath = origsource
610 abspath = origsource
609 if islocal(origsource):
611 if islocal(origsource):
610 abspath = os.path.abspath(util.urllocalpath(origsource))
612 abspath = os.path.abspath(util.urllocalpath(origsource))
611
613
612 if islocal(dest):
614 if islocal(dest):
613 cleandir = dest
615 cleandir = dest
614
616
615 copy = False
617 copy = False
616 if (srcrepo and srcrepo.cancopy() and islocal(dest)
618 if (srcrepo and srcrepo.cancopy() and islocal(dest)
617 and not phases.hassecret(srcrepo)):
619 and not phases.hassecret(srcrepo)):
618 copy = not pull and not revs
620 copy = not pull and not revs
619
621
620 if copy:
622 if copy:
621 try:
623 try:
622 # we use a lock here because if we race with commit, we
624 # we use a lock here because if we race with commit, we
623 # can end up with extra data in the cloned revlogs that's
625 # can end up with extra data in the cloned revlogs that's
624 # not pointed to by changesets, thus causing verify to
626 # not pointed to by changesets, thus causing verify to
625 # fail
627 # fail
626 srclock = srcrepo.lock(wait=False)
628 srclock = srcrepo.lock(wait=False)
627 except error.LockError:
629 except error.LockError:
628 copy = False
630 copy = False
629
631
630 if copy:
632 if copy:
631 srcrepo.hook('preoutgoing', throw=True, source='clone')
633 srcrepo.hook('preoutgoing', throw=True, source='clone')
632 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
634 hgdir = os.path.realpath(os.path.join(dest, ".hg"))
633 if not os.path.exists(dest):
635 if not os.path.exists(dest):
634 os.mkdir(dest)
636 os.mkdir(dest)
635 else:
637 else:
636 # only clean up directories we create ourselves
638 # only clean up directories we create ourselves
637 cleandir = hgdir
639 cleandir = hgdir
638 try:
640 try:
639 destpath = hgdir
641 destpath = hgdir
640 util.makedir(destpath, notindexed=True)
642 util.makedir(destpath, notindexed=True)
641 except OSError as inst:
643 except OSError as inst:
642 if inst.errno == errno.EEXIST:
644 if inst.errno == errno.EEXIST:
643 cleandir = None
645 cleandir = None
644 raise error.Abort(_("destination '%s' already exists")
646 raise error.Abort(_("destination '%s' already exists")
645 % dest)
647 % dest)
646 raise
648 raise
647
649
648 destlock = copystore(ui, srcrepo, destpath)
650 destlock = copystore(ui, srcrepo, destpath)
649 # copy bookmarks over
651 # copy bookmarks over
650 srcbookmarks = srcrepo.vfs.join('bookmarks')
652 srcbookmarks = srcrepo.vfs.join('bookmarks')
651 dstbookmarks = os.path.join(destpath, 'bookmarks')
653 dstbookmarks = os.path.join(destpath, 'bookmarks')
652 if os.path.exists(srcbookmarks):
654 if os.path.exists(srcbookmarks):
653 util.copyfile(srcbookmarks, dstbookmarks)
655 util.copyfile(srcbookmarks, dstbookmarks)
654
656
655 dstcachedir = os.path.join(destpath, 'cache')
657 dstcachedir = os.path.join(destpath, 'cache')
656 for cache in cacheutil.cachetocopy(srcrepo):
658 for cache in cacheutil.cachetocopy(srcrepo):
657 _copycache(srcrepo, dstcachedir, cache)
659 _copycache(srcrepo, dstcachedir, cache)
658
660
659 # we need to re-init the repo after manually copying the data
661 # we need to re-init the repo after manually copying the data
660 # into it
662 # into it
661 destpeer = peer(srcrepo, peeropts, dest)
663 destpeer = peer(srcrepo, peeropts, dest)
662 srcrepo.hook('outgoing', source='clone',
664 srcrepo.hook('outgoing', source='clone',
663 node=node.hex(node.nullid))
665 node=node.hex(node.nullid))
664 else:
666 else:
665 try:
667 try:
666 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
668 destpeer = peer(srcrepo or ui, peeropts, dest, create=True)
667 # only pass ui when no srcrepo
669 # only pass ui when no srcrepo
668 except OSError as inst:
670 except OSError as inst:
669 if inst.errno == errno.EEXIST:
671 if inst.errno == errno.EEXIST:
670 cleandir = None
672 cleandir = None
671 raise error.Abort(_("destination '%s' already exists")
673 raise error.Abort(_("destination '%s' already exists")
672 % dest)
674 % dest)
673 raise
675 raise
674
676
675 if revs:
677 if revs:
676 if not srcpeer.capable('lookup'):
678 if not srcpeer.capable('lookup'):
677 raise error.Abort(_("src repository does not support "
679 raise error.Abort(_("src repository does not support "
678 "revision lookup and so doesn't "
680 "revision lookup and so doesn't "
679 "support clone by revision"))
681 "support clone by revision"))
680
682
681 # TODO this is batchable.
683 # TODO this is batchable.
682 remoterevs = []
684 remoterevs = []
683 for rev in revs:
685 for rev in revs:
684 with srcpeer.commandexecutor() as e:
686 with srcpeer.commandexecutor() as e:
685 remoterevs.append(e.callcommand('lookup', {
687 remoterevs.append(e.callcommand('lookup', {
686 'key': rev,
688 'key': rev,
687 }).result())
689 }).result())
688 revs = remoterevs
690 revs = remoterevs
689
691
690 checkout = revs[0]
692 checkout = revs[0]
691 else:
693 else:
692 revs = None
694 revs = None
693 local = destpeer.local()
695 local = destpeer.local()
694 if local:
696 if local:
695 u = util.url(abspath)
697 u = util.url(abspath)
696 defaulturl = bytes(u)
698 defaulturl = bytes(u)
697 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
699 local.ui.setconfig('paths', 'default', defaulturl, 'clone')
698 if not stream:
700 if not stream:
699 if pull:
701 if pull:
700 stream = False
702 stream = False
701 else:
703 else:
702 stream = None
704 stream = None
703 # internal config: ui.quietbookmarkmove
705 # internal config: ui.quietbookmarkmove
704 overrides = {('ui', 'quietbookmarkmove'): True}
706 overrides = {('ui', 'quietbookmarkmove'): True}
705 with local.ui.configoverride(overrides, 'clone'):
707 with local.ui.configoverride(overrides, 'clone'):
706 exchange.pull(local, srcpeer, revs,
708 exchange.pull(local, srcpeer, revs,
707 streamclonerequested=stream)
709 streamclonerequested=stream)
708 elif srcrepo:
710 elif srcrepo:
709 exchange.push(srcrepo, destpeer, revs=revs,
711 exchange.push(srcrepo, destpeer, revs=revs,
710 bookmarks=srcrepo._bookmarks.keys())
712 bookmarks=srcrepo._bookmarks.keys())
711 else:
713 else:
712 raise error.Abort(_("clone from remote to remote not supported")
714 raise error.Abort(_("clone from remote to remote not supported")
713 )
715 )
714
716
715 cleandir = None
717 cleandir = None
716
718
717 destrepo = destpeer.local()
719 destrepo = destpeer.local()
718 if destrepo:
720 if destrepo:
719 template = uimod.samplehgrcs['cloned']
721 template = uimod.samplehgrcs['cloned']
720 u = util.url(abspath)
722 u = util.url(abspath)
721 u.passwd = None
723 u.passwd = None
722 defaulturl = bytes(u)
724 defaulturl = bytes(u)
723 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
725 destrepo.vfs.write('hgrc', util.tonativeeol(template % defaulturl))
724 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
726 destrepo.ui.setconfig('paths', 'default', defaulturl, 'clone')
725
727
726 if ui.configbool('experimental', 'remotenames'):
728 if ui.configbool('experimental', 'remotenames'):
727 logexchange.pullremotenames(destrepo, srcpeer)
729 logexchange.pullremotenames(destrepo, srcpeer)
728
730
729 if update:
731 if update:
730 if update is not True:
732 if update is not True:
731 with srcpeer.commandexecutor() as e:
733 with srcpeer.commandexecutor() as e:
732 checkout = e.callcommand('lookup', {
734 checkout = e.callcommand('lookup', {
733 'key': update,
735 'key': update,
734 }).result()
736 }).result()
735
737
736 uprev = None
738 uprev = None
737 status = None
739 status = None
738 if checkout is not None:
740 if checkout is not None:
739 if checkout in destrepo:
741 if checkout in destrepo:
740 uprev = checkout
742 uprev = checkout
741 else:
743 else:
742 if update is not True:
744 if update is not True:
743 try:
745 try:
744 uprev = destrepo.lookup(update)
746 uprev = destrepo.lookup(update)
745 except error.RepoLookupError:
747 except error.RepoLookupError:
746 pass
748 pass
747 if uprev is None:
749 if uprev is None:
748 try:
750 try:
749 uprev = destrepo._bookmarks['@']
751 uprev = destrepo._bookmarks['@']
750 update = '@'
752 update = '@'
751 bn = destrepo[uprev].branch()
753 bn = destrepo[uprev].branch()
752 if bn == 'default':
754 if bn == 'default':
753 status = _("updating to bookmark @\n")
755 status = _("updating to bookmark @\n")
754 else:
756 else:
755 status = (_("updating to bookmark @ on branch %s\n")
757 status = (_("updating to bookmark @ on branch %s\n")
756 % bn)
758 % bn)
757 except KeyError:
759 except KeyError:
758 try:
760 try:
759 uprev = destrepo.branchtip('default')
761 uprev = destrepo.branchtip('default')
760 except error.RepoLookupError:
762 except error.RepoLookupError:
761 uprev = destrepo.lookup('tip')
763 uprev = destrepo.lookup('tip')
762 if not status:
764 if not status:
763 bn = destrepo[uprev].branch()
765 bn = destrepo[uprev].branch()
764 status = _("updating to branch %s\n") % bn
766 status = _("updating to branch %s\n") % bn
765 destrepo.ui.status(status)
767 destrepo.ui.status(status)
766 _update(destrepo, uprev)
768 _update(destrepo, uprev)
767 if update in destrepo._bookmarks:
769 if update in destrepo._bookmarks:
768 bookmarks.activate(destrepo, update)
770 bookmarks.activate(destrepo, update)
769 finally:
771 finally:
770 release(srclock, destlock)
772 release(srclock, destlock)
771 if cleandir is not None:
773 if cleandir is not None:
772 shutil.rmtree(cleandir, True)
774 shutil.rmtree(cleandir, True)
773 if srcpeer is not None:
775 if srcpeer is not None:
774 srcpeer.close()
776 srcpeer.close()
775 return srcpeer, destpeer
777 return srcpeer, destpeer
776
778
777 def _showstats(repo, stats, quietempty=False):
779 def _showstats(repo, stats, quietempty=False):
778 if quietempty and stats.isempty():
780 if quietempty and stats.isempty():
779 return
781 return
780 repo.ui.status(_("%d files updated, %d files merged, "
782 repo.ui.status(_("%d files updated, %d files merged, "
781 "%d files removed, %d files unresolved\n") % (
783 "%d files removed, %d files unresolved\n") % (
782 stats.updatedcount, stats.mergedcount,
784 stats.updatedcount, stats.mergedcount,
783 stats.removedcount, stats.unresolvedcount))
785 stats.removedcount, stats.unresolvedcount))
784
786
785 def updaterepo(repo, node, overwrite, updatecheck=None):
787 def updaterepo(repo, node, overwrite, updatecheck=None):
786 """Update the working directory to node.
788 """Update the working directory to node.
787
789
788 When overwrite is set, changes are clobbered, merged else
790 When overwrite is set, changes are clobbered, merged else
789
791
790 returns stats (see pydoc mercurial.merge.applyupdates)"""
792 returns stats (see pydoc mercurial.merge.applyupdates)"""
791 return mergemod.update(repo, node, False, overwrite,
793 return mergemod.update(repo, node, False, overwrite,
792 labels=['working copy', 'destination'],
794 labels=['working copy', 'destination'],
793 updatecheck=updatecheck)
795 updatecheck=updatecheck)
794
796
795 def update(repo, node, quietempty=False, updatecheck=None):
797 def update(repo, node, quietempty=False, updatecheck=None):
796 """update the working directory to node"""
798 """update the working directory to node"""
797 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
799 stats = updaterepo(repo, node, False, updatecheck=updatecheck)
798 _showstats(repo, stats, quietempty)
800 _showstats(repo, stats, quietempty)
799 if stats.unresolvedcount:
801 if stats.unresolvedcount:
800 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
802 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges\n"))
801 return stats.unresolvedcount > 0
803 return stats.unresolvedcount > 0
802
804
803 # naming conflict in clone()
805 # naming conflict in clone()
804 _update = update
806 _update = update
805
807
806 def clean(repo, node, show_stats=True, quietempty=False):
808 def clean(repo, node, show_stats=True, quietempty=False):
807 """forcibly switch the working directory to node, clobbering changes"""
809 """forcibly switch the working directory to node, clobbering changes"""
808 stats = updaterepo(repo, node, True)
810 stats = updaterepo(repo, node, True)
809 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
811 repo.vfs.unlinkpath('graftstate', ignoremissing=True)
810 if show_stats:
812 if show_stats:
811 _showstats(repo, stats, quietempty)
813 _showstats(repo, stats, quietempty)
812 return stats.unresolvedcount > 0
814 return stats.unresolvedcount > 0
813
815
814 # naming conflict in updatetotally()
816 # naming conflict in updatetotally()
815 _clean = clean
817 _clean = clean
816
818
817 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
819 def updatetotally(ui, repo, checkout, brev, clean=False, updatecheck=None):
818 """Update the working directory with extra care for non-file components
820 """Update the working directory with extra care for non-file components
819
821
820 This takes care of non-file components below:
822 This takes care of non-file components below:
821
823
822 :bookmark: might be advanced or (in)activated
824 :bookmark: might be advanced or (in)activated
823
825
824 This takes arguments below:
826 This takes arguments below:
825
827
826 :checkout: to which revision the working directory is updated
828 :checkout: to which revision the working directory is updated
827 :brev: a name, which might be a bookmark to be activated after updating
829 :brev: a name, which might be a bookmark to be activated after updating
828 :clean: whether changes in the working directory can be discarded
830 :clean: whether changes in the working directory can be discarded
829 :updatecheck: how to deal with a dirty working directory
831 :updatecheck: how to deal with a dirty working directory
830
832
831 Valid values for updatecheck are (None => linear):
833 Valid values for updatecheck are (None => linear):
832
834
833 * abort: abort if the working directory is dirty
835 * abort: abort if the working directory is dirty
834 * none: don't check (merge working directory changes into destination)
836 * none: don't check (merge working directory changes into destination)
835 * linear: check that update is linear before merging working directory
837 * linear: check that update is linear before merging working directory
836 changes into destination
838 changes into destination
837 * noconflict: check that the update does not result in file merges
839 * noconflict: check that the update does not result in file merges
838
840
839 This returns whether conflict is detected at updating or not.
841 This returns whether conflict is detected at updating or not.
840 """
842 """
841 if updatecheck is None:
843 if updatecheck is None:
842 updatecheck = ui.config('commands', 'update.check')
844 updatecheck = ui.config('commands', 'update.check')
843 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
845 if updatecheck not in ('abort', 'none', 'linear', 'noconflict'):
844 # If not configured, or invalid value configured
846 # If not configured, or invalid value configured
845 updatecheck = 'linear'
847 updatecheck = 'linear'
846 with repo.wlock():
848 with repo.wlock():
847 movemarkfrom = None
849 movemarkfrom = None
848 warndest = False
850 warndest = False
849 if checkout is None:
851 if checkout is None:
850 updata = destutil.destupdate(repo, clean=clean)
852 updata = destutil.destupdate(repo, clean=clean)
851 checkout, movemarkfrom, brev = updata
853 checkout, movemarkfrom, brev = updata
852 warndest = True
854 warndest = True
853
855
854 if clean:
856 if clean:
855 ret = _clean(repo, checkout)
857 ret = _clean(repo, checkout)
856 else:
858 else:
857 if updatecheck == 'abort':
859 if updatecheck == 'abort':
858 cmdutil.bailifchanged(repo, merge=False)
860 cmdutil.bailifchanged(repo, merge=False)
859 updatecheck = 'none'
861 updatecheck = 'none'
860 ret = _update(repo, checkout, updatecheck=updatecheck)
862 ret = _update(repo, checkout, updatecheck=updatecheck)
861
863
862 if not ret and movemarkfrom:
864 if not ret and movemarkfrom:
863 if movemarkfrom == repo['.'].node():
865 if movemarkfrom == repo['.'].node():
864 pass # no-op update
866 pass # no-op update
865 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
867 elif bookmarks.update(repo, [movemarkfrom], repo['.'].node()):
866 b = ui.label(repo._activebookmark, 'bookmarks.active')
868 b = ui.label(repo._activebookmark, 'bookmarks.active')
867 ui.status(_("updating bookmark %s\n") % b)
869 ui.status(_("updating bookmark %s\n") % b)
868 else:
870 else:
869 # this can happen with a non-linear update
871 # this can happen with a non-linear update
870 b = ui.label(repo._activebookmark, 'bookmarks')
872 b = ui.label(repo._activebookmark, 'bookmarks')
871 ui.status(_("(leaving bookmark %s)\n") % b)
873 ui.status(_("(leaving bookmark %s)\n") % b)
872 bookmarks.deactivate(repo)
874 bookmarks.deactivate(repo)
873 elif brev in repo._bookmarks:
875 elif brev in repo._bookmarks:
874 if brev != repo._activebookmark:
876 if brev != repo._activebookmark:
875 b = ui.label(brev, 'bookmarks.active')
877 b = ui.label(brev, 'bookmarks.active')
876 ui.status(_("(activating bookmark %s)\n") % b)
878 ui.status(_("(activating bookmark %s)\n") % b)
877 bookmarks.activate(repo, brev)
879 bookmarks.activate(repo, brev)
878 elif brev:
880 elif brev:
879 if repo._activebookmark:
881 if repo._activebookmark:
880 b = ui.label(repo._activebookmark, 'bookmarks')
882 b = ui.label(repo._activebookmark, 'bookmarks')
881 ui.status(_("(leaving bookmark %s)\n") % b)
883 ui.status(_("(leaving bookmark %s)\n") % b)
882 bookmarks.deactivate(repo)
884 bookmarks.deactivate(repo)
883
885
884 if warndest:
886 if warndest:
885 destutil.statusotherdests(ui, repo)
887 destutil.statusotherdests(ui, repo)
886
888
887 return ret
889 return ret
888
890
889 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
891 def merge(repo, node, force=None, remind=True, mergeforce=False, labels=None,
890 abort=False):
892 abort=False):
891 """Branch merge with node, resolving changes. Return true if any
893 """Branch merge with node, resolving changes. Return true if any
892 unresolved conflicts."""
894 unresolved conflicts."""
893 if not abort:
895 if not abort:
894 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
896 stats = mergemod.update(repo, node, True, force, mergeforce=mergeforce,
895 labels=labels)
897 labels=labels)
896 else:
898 else:
897 ms = mergemod.mergestate.read(repo)
899 ms = mergemod.mergestate.read(repo)
898 if ms.active():
900 if ms.active():
899 # there were conflicts
901 # there were conflicts
900 node = ms.localctx.hex()
902 node = ms.localctx.hex()
901 else:
903 else:
902 # there were no conficts, mergestate was not stored
904 # there were no conficts, mergestate was not stored
903 node = repo['.'].hex()
905 node = repo['.'].hex()
904
906
905 repo.ui.status(_("aborting the merge, updating back to"
907 repo.ui.status(_("aborting the merge, updating back to"
906 " %s\n") % node[:12])
908 " %s\n") % node[:12])
907 stats = mergemod.update(repo, node, branchmerge=False, force=True,
909 stats = mergemod.update(repo, node, branchmerge=False, force=True,
908 labels=labels)
910 labels=labels)
909
911
910 _showstats(repo, stats)
912 _showstats(repo, stats)
911 if stats.unresolvedcount:
913 if stats.unresolvedcount:
912 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
914 repo.ui.status(_("use 'hg resolve' to retry unresolved file merges "
913 "or 'hg merge --abort' to abandon\n"))
915 "or 'hg merge --abort' to abandon\n"))
914 elif remind and not abort:
916 elif remind and not abort:
915 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
917 repo.ui.status(_("(branch merge, don't forget to commit)\n"))
916 return stats.unresolvedcount > 0
918 return stats.unresolvedcount > 0
917
919
918 def _incoming(displaychlist, subreporecurse, ui, repo, source,
920 def _incoming(displaychlist, subreporecurse, ui, repo, source,
919 opts, buffered=False):
921 opts, buffered=False):
920 """
922 """
921 Helper for incoming / gincoming.
923 Helper for incoming / gincoming.
922 displaychlist gets called with
924 displaychlist gets called with
923 (remoterepo, incomingchangesetlist, displayer) parameters,
925 (remoterepo, incomingchangesetlist, displayer) parameters,
924 and is supposed to contain only code that can't be unified.
926 and is supposed to contain only code that can't be unified.
925 """
927 """
926 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
928 source, branches = parseurl(ui.expandpath(source), opts.get('branch'))
927 other = peer(repo, opts, source)
929 other = peer(repo, opts, source)
928 ui.status(_('comparing with %s\n') % util.hidepassword(source))
930 ui.status(_('comparing with %s\n') % util.hidepassword(source))
929 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
931 revs, checkout = addbranchrevs(repo, other, branches, opts.get('rev'))
930
932
931 if revs:
933 if revs:
932 revs = [other.lookup(rev) for rev in revs]
934 revs = [other.lookup(rev) for rev in revs]
933 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
935 other, chlist, cleanupfn = bundlerepo.getremotechanges(ui, repo, other,
934 revs, opts["bundle"], opts["force"])
936 revs, opts["bundle"], opts["force"])
935 try:
937 try:
936 if not chlist:
938 if not chlist:
937 ui.status(_("no changes found\n"))
939 ui.status(_("no changes found\n"))
938 return subreporecurse()
940 return subreporecurse()
939 ui.pager('incoming')
941 ui.pager('incoming')
940 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
942 displayer = logcmdutil.changesetdisplayer(ui, other, opts,
941 buffered=buffered)
943 buffered=buffered)
942 displaychlist(other, chlist, displayer)
944 displaychlist(other, chlist, displayer)
943 displayer.close()
945 displayer.close()
944 finally:
946 finally:
945 cleanupfn()
947 cleanupfn()
946 subreporecurse()
948 subreporecurse()
947 return 0 # exit code is zero since we found incoming changes
949 return 0 # exit code is zero since we found incoming changes
948
950
949 def incoming(ui, repo, source, opts):
951 def incoming(ui, repo, source, opts):
950 def subreporecurse():
952 def subreporecurse():
951 ret = 1
953 ret = 1
952 if opts.get('subrepos'):
954 if opts.get('subrepos'):
953 ctx = repo[None]
955 ctx = repo[None]
954 for subpath in sorted(ctx.substate):
956 for subpath in sorted(ctx.substate):
955 sub = ctx.sub(subpath)
957 sub = ctx.sub(subpath)
956 ret = min(ret, sub.incoming(ui, source, opts))
958 ret = min(ret, sub.incoming(ui, source, opts))
957 return ret
959 return ret
958
960
959 def display(other, chlist, displayer):
961 def display(other, chlist, displayer):
960 limit = logcmdutil.getlimit(opts)
962 limit = logcmdutil.getlimit(opts)
961 if opts.get('newest_first'):
963 if opts.get('newest_first'):
962 chlist.reverse()
964 chlist.reverse()
963 count = 0
965 count = 0
964 for n in chlist:
966 for n in chlist:
965 if limit is not None and count >= limit:
967 if limit is not None and count >= limit:
966 break
968 break
967 parents = [p for p in other.changelog.parents(n) if p != nullid]
969 parents = [p for p in other.changelog.parents(n) if p != nullid]
968 if opts.get('no_merges') and len(parents) == 2:
970 if opts.get('no_merges') and len(parents) == 2:
969 continue
971 continue
970 count += 1
972 count += 1
971 displayer.show(other[n])
973 displayer.show(other[n])
972 return _incoming(display, subreporecurse, ui, repo, source, opts)
974 return _incoming(display, subreporecurse, ui, repo, source, opts)
973
975
974 def _outgoing(ui, repo, dest, opts):
976 def _outgoing(ui, repo, dest, opts):
975 path = ui.paths.getpath(dest, default=('default-push', 'default'))
977 path = ui.paths.getpath(dest, default=('default-push', 'default'))
976 if not path:
978 if not path:
977 raise error.Abort(_('default repository not configured!'),
979 raise error.Abort(_('default repository not configured!'),
978 hint=_("see 'hg help config.paths'"))
980 hint=_("see 'hg help config.paths'"))
979 dest = path.pushloc or path.loc
981 dest = path.pushloc or path.loc
980 branches = path.branch, opts.get('branch') or []
982 branches = path.branch, opts.get('branch') or []
981
983
982 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
984 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
983 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
985 revs, checkout = addbranchrevs(repo, repo, branches, opts.get('rev'))
984 if revs:
986 if revs:
985 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
987 revs = [repo[rev].node() for rev in scmutil.revrange(repo, revs)]
986
988
987 other = peer(repo, opts, dest)
989 other = peer(repo, opts, dest)
988 outgoing = discovery.findcommonoutgoing(repo, other, revs,
990 outgoing = discovery.findcommonoutgoing(repo, other, revs,
989 force=opts.get('force'))
991 force=opts.get('force'))
990 o = outgoing.missing
992 o = outgoing.missing
991 if not o:
993 if not o:
992 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
994 scmutil.nochangesfound(repo.ui, repo, outgoing.excluded)
993 return o, other
995 return o, other
994
996
995 def outgoing(ui, repo, dest, opts):
997 def outgoing(ui, repo, dest, opts):
996 def recurse():
998 def recurse():
997 ret = 1
999 ret = 1
998 if opts.get('subrepos'):
1000 if opts.get('subrepos'):
999 ctx = repo[None]
1001 ctx = repo[None]
1000 for subpath in sorted(ctx.substate):
1002 for subpath in sorted(ctx.substate):
1001 sub = ctx.sub(subpath)
1003 sub = ctx.sub(subpath)
1002 ret = min(ret, sub.outgoing(ui, dest, opts))
1004 ret = min(ret, sub.outgoing(ui, dest, opts))
1003 return ret
1005 return ret
1004
1006
1005 limit = logcmdutil.getlimit(opts)
1007 limit = logcmdutil.getlimit(opts)
1006 o, other = _outgoing(ui, repo, dest, opts)
1008 o, other = _outgoing(ui, repo, dest, opts)
1007 if not o:
1009 if not o:
1008 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1010 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1009 return recurse()
1011 return recurse()
1010
1012
1011 if opts.get('newest_first'):
1013 if opts.get('newest_first'):
1012 o.reverse()
1014 o.reverse()
1013 ui.pager('outgoing')
1015 ui.pager('outgoing')
1014 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1016 displayer = logcmdutil.changesetdisplayer(ui, repo, opts)
1015 count = 0
1017 count = 0
1016 for n in o:
1018 for n in o:
1017 if limit is not None and count >= limit:
1019 if limit is not None and count >= limit:
1018 break
1020 break
1019 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1021 parents = [p for p in repo.changelog.parents(n) if p != nullid]
1020 if opts.get('no_merges') and len(parents) == 2:
1022 if opts.get('no_merges') and len(parents) == 2:
1021 continue
1023 continue
1022 count += 1
1024 count += 1
1023 displayer.show(repo[n])
1025 displayer.show(repo[n])
1024 displayer.close()
1026 displayer.close()
1025 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1027 cmdutil.outgoinghooks(ui, repo, other, opts, o)
1026 recurse()
1028 recurse()
1027 return 0 # exit code is zero since we found outgoing changes
1029 return 0 # exit code is zero since we found outgoing changes
1028
1030
1029 def verify(repo):
1031 def verify(repo):
1030 """verify the consistency of a repository"""
1032 """verify the consistency of a repository"""
1031 ret = verifymod.verify(repo)
1033 ret = verifymod.verify(repo)
1032
1034
1033 # Broken subrepo references in hidden csets don't seem worth worrying about,
1035 # Broken subrepo references in hidden csets don't seem worth worrying about,
1034 # since they can't be pushed/pulled, and --hidden can be used if they are a
1036 # since they can't be pushed/pulled, and --hidden can be used if they are a
1035 # concern.
1037 # concern.
1036
1038
1037 # pathto() is needed for -R case
1039 # pathto() is needed for -R case
1038 revs = repo.revs("filelog(%s)",
1040 revs = repo.revs("filelog(%s)",
1039 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1041 util.pathto(repo.root, repo.getcwd(), '.hgsubstate'))
1040
1042
1041 if revs:
1043 if revs:
1042 repo.ui.status(_('checking subrepo links\n'))
1044 repo.ui.status(_('checking subrepo links\n'))
1043 for rev in revs:
1045 for rev in revs:
1044 ctx = repo[rev]
1046 ctx = repo[rev]
1045 try:
1047 try:
1046 for subpath in ctx.substate:
1048 for subpath in ctx.substate:
1047 try:
1049 try:
1048 ret = (ctx.sub(subpath, allowcreate=False).verify()
1050 ret = (ctx.sub(subpath, allowcreate=False).verify()
1049 or ret)
1051 or ret)
1050 except error.RepoError as e:
1052 except error.RepoError as e:
1051 repo.ui.warn(('%d: %s\n') % (rev, e))
1053 repo.ui.warn(('%d: %s\n') % (rev, e))
1052 except Exception:
1054 except Exception:
1053 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1055 repo.ui.warn(_('.hgsubstate is corrupt in revision %s\n') %
1054 node.short(ctx.node()))
1056 node.short(ctx.node()))
1055
1057
1056 return ret
1058 return ret
1057
1059
1058 def remoteui(src, opts):
1060 def remoteui(src, opts):
1059 'build a remote ui from ui or repo and opts'
1061 'build a remote ui from ui or repo and opts'
1060 if util.safehasattr(src, 'baseui'): # looks like a repository
1062 if util.safehasattr(src, 'baseui'): # looks like a repository
1061 dst = src.baseui.copy() # drop repo-specific config
1063 dst = src.baseui.copy() # drop repo-specific config
1062 src = src.ui # copy target options from repo
1064 src = src.ui # copy target options from repo
1063 else: # assume it's a global ui object
1065 else: # assume it's a global ui object
1064 dst = src.copy() # keep all global options
1066 dst = src.copy() # keep all global options
1065
1067
1066 # copy ssh-specific options
1068 # copy ssh-specific options
1067 for o in 'ssh', 'remotecmd':
1069 for o in 'ssh', 'remotecmd':
1068 v = opts.get(o) or src.config('ui', o)
1070 v = opts.get(o) or src.config('ui', o)
1069 if v:
1071 if v:
1070 dst.setconfig("ui", o, v, 'copied')
1072 dst.setconfig("ui", o, v, 'copied')
1071
1073
1072 # copy bundle-specific options
1074 # copy bundle-specific options
1073 r = src.config('bundle', 'mainreporoot')
1075 r = src.config('bundle', 'mainreporoot')
1074 if r:
1076 if r:
1075 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1077 dst.setconfig('bundle', 'mainreporoot', r, 'copied')
1076
1078
1077 # copy selected local settings to the remote ui
1079 # copy selected local settings to the remote ui
1078 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1080 for sect in ('auth', 'hostfingerprints', 'hostsecurity', 'http_proxy'):
1079 for key, val in src.configitems(sect):
1081 for key, val in src.configitems(sect):
1080 dst.setconfig(sect, key, val, 'copied')
1082 dst.setconfig(sect, key, val, 'copied')
1081 v = src.config('web', 'cacerts')
1083 v = src.config('web', 'cacerts')
1082 if v:
1084 if v:
1083 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1085 dst.setconfig('web', 'cacerts', util.expandpath(v), 'copied')
1084
1086
1085 return dst
1087 return dst
1086
1088
1087 # Files of interest
1089 # Files of interest
1088 # Used to check if the repository has changed looking at mtime and size of
1090 # Used to check if the repository has changed looking at mtime and size of
1089 # these files.
1091 # these files.
1090 foi = [('spath', '00changelog.i'),
1092 foi = [('spath', '00changelog.i'),
1091 ('spath', 'phaseroots'), # ! phase can change content at the same size
1093 ('spath', 'phaseroots'), # ! phase can change content at the same size
1092 ('spath', 'obsstore'),
1094 ('spath', 'obsstore'),
1093 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1095 ('path', 'bookmarks'), # ! bookmark can change content at the same size
1094 ]
1096 ]
1095
1097
1096 class cachedlocalrepo(object):
1098 class cachedlocalrepo(object):
1097 """Holds a localrepository that can be cached and reused."""
1099 """Holds a localrepository that can be cached and reused."""
1098
1100
1099 def __init__(self, repo):
1101 def __init__(self, repo):
1100 """Create a new cached repo from an existing repo.
1102 """Create a new cached repo from an existing repo.
1101
1103
1102 We assume the passed in repo was recently created. If the
1104 We assume the passed in repo was recently created. If the
1103 repo has changed between when it was created and when it was
1105 repo has changed between when it was created and when it was
1104 turned into a cache, it may not refresh properly.
1106 turned into a cache, it may not refresh properly.
1105 """
1107 """
1106 assert isinstance(repo, localrepo.localrepository)
1108 assert isinstance(repo, localrepo.localrepository)
1107 self._repo = repo
1109 self._repo = repo
1108 self._state, self.mtime = self._repostate()
1110 self._state, self.mtime = self._repostate()
1109 self._filtername = repo.filtername
1111 self._filtername = repo.filtername
1110
1112
1111 def fetch(self):
1113 def fetch(self):
1112 """Refresh (if necessary) and return a repository.
1114 """Refresh (if necessary) and return a repository.
1113
1115
1114 If the cached instance is out of date, it will be recreated
1116 If the cached instance is out of date, it will be recreated
1115 automatically and returned.
1117 automatically and returned.
1116
1118
1117 Returns a tuple of the repo and a boolean indicating whether a new
1119 Returns a tuple of the repo and a boolean indicating whether a new
1118 repo instance was created.
1120 repo instance was created.
1119 """
1121 """
1120 # We compare the mtimes and sizes of some well-known files to
1122 # We compare the mtimes and sizes of some well-known files to
1121 # determine if the repo changed. This is not precise, as mtimes
1123 # determine if the repo changed. This is not precise, as mtimes
1122 # are susceptible to clock skew and imprecise filesystems and
1124 # are susceptible to clock skew and imprecise filesystems and
1123 # file content can change while maintaining the same size.
1125 # file content can change while maintaining the same size.
1124
1126
1125 state, mtime = self._repostate()
1127 state, mtime = self._repostate()
1126 if state == self._state:
1128 if state == self._state:
1127 return self._repo, False
1129 return self._repo, False
1128
1130
1129 repo = repository(self._repo.baseui, self._repo.url())
1131 repo = repository(self._repo.baseui, self._repo.url())
1130 if self._filtername:
1132 if self._filtername:
1131 self._repo = repo.filtered(self._filtername)
1133 self._repo = repo.filtered(self._filtername)
1132 else:
1134 else:
1133 self._repo = repo.unfiltered()
1135 self._repo = repo.unfiltered()
1134 self._state = state
1136 self._state = state
1135 self.mtime = mtime
1137 self.mtime = mtime
1136
1138
1137 return self._repo, True
1139 return self._repo, True
1138
1140
1139 def _repostate(self):
1141 def _repostate(self):
1140 state = []
1142 state = []
1141 maxmtime = -1
1143 maxmtime = -1
1142 for attr, fname in foi:
1144 for attr, fname in foi:
1143 prefix = getattr(self._repo, attr)
1145 prefix = getattr(self._repo, attr)
1144 p = os.path.join(prefix, fname)
1146 p = os.path.join(prefix, fname)
1145 try:
1147 try:
1146 st = os.stat(p)
1148 st = os.stat(p)
1147 except OSError:
1149 except OSError:
1148 st = os.stat(prefix)
1150 st = os.stat(prefix)
1149 state.append((st[stat.ST_MTIME], st.st_size))
1151 state.append((st[stat.ST_MTIME], st.st_size))
1150 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1152 maxmtime = max(maxmtime, st[stat.ST_MTIME])
1151
1153
1152 return tuple(state), maxmtime
1154 return tuple(state), maxmtime
1153
1155
1154 def copy(self):
1156 def copy(self):
1155 """Obtain a copy of this class instance.
1157 """Obtain a copy of this class instance.
1156
1158
1157 A new localrepository instance is obtained. The new instance should be
1159 A new localrepository instance is obtained. The new instance should be
1158 completely independent of the original.
1160 completely independent of the original.
1159 """
1161 """
1160 repo = repository(self._repo.baseui, self._repo.origroot)
1162 repo = repository(self._repo.baseui, self._repo.origroot)
1161 if self._filtername:
1163 if self._filtername:
1162 repo = repo.filtered(self._filtername)
1164 repo = repo.filtered(self._filtername)
1163 else:
1165 else:
1164 repo = repo.unfiltered()
1166 repo = repo.unfiltered()
1165 c = cachedlocalrepo(repo)
1167 c = cachedlocalrepo(repo)
1166 c._state = self._state
1168 c._state = self._state
1167 c.mtime = self.mtime
1169 c.mtime = self.mtime
1168 return c
1170 return c
@@ -1,1010 +1,1010 b''
1 # httppeer.py - HTTP repository proxy classes for mercurial
1 # httppeer.py - HTTP repository proxy classes for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12 import io
12 import io
13 import os
13 import os
14 import socket
14 import socket
15 import struct
15 import struct
16 import sys
16 import sys
17 import tempfile
17 import tempfile
18 import weakref
18 import weakref
19
19
20 from .i18n import _
20 from .i18n import _
21 from .thirdparty import (
21 from .thirdparty import (
22 cbor,
22 cbor,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bundle2,
28 bundle2,
29 error,
29 error,
30 httpconnection,
30 httpconnection,
31 pycompat,
31 pycompat,
32 repository,
32 repository,
33 statichttprepo,
33 statichttprepo,
34 url as urlmod,
34 url as urlmod,
35 util,
35 util,
36 wireprotoframing,
36 wireprotoframing,
37 wireprototypes,
37 wireprototypes,
38 wireprotov1peer,
38 wireprotov1peer,
39 wireprotov2server,
39 wireprotov2server,
40 )
40 )
41
41
42 httplib = util.httplib
42 httplib = util.httplib
43 urlerr = util.urlerr
43 urlerr = util.urlerr
44 urlreq = util.urlreq
44 urlreq = util.urlreq
45
45
46 def encodevalueinheaders(value, header, limit):
46 def encodevalueinheaders(value, header, limit):
47 """Encode a string value into multiple HTTP headers.
47 """Encode a string value into multiple HTTP headers.
48
48
49 ``value`` will be encoded into 1 or more HTTP headers with the names
49 ``value`` will be encoded into 1 or more HTTP headers with the names
50 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
50 ``header-<N>`` where ``<N>`` is an integer starting at 1. Each header
51 name + value will be at most ``limit`` bytes long.
51 name + value will be at most ``limit`` bytes long.
52
52
53 Returns an iterable of 2-tuples consisting of header names and
53 Returns an iterable of 2-tuples consisting of header names and
54 values as native strings.
54 values as native strings.
55 """
55 """
56 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
56 # HTTP Headers are ASCII. Python 3 requires them to be unicodes,
57 # not bytes. This function always takes bytes in as arguments.
57 # not bytes. This function always takes bytes in as arguments.
58 fmt = pycompat.strurl(header) + r'-%s'
58 fmt = pycompat.strurl(header) + r'-%s'
59 # Note: it is *NOT* a bug that the last bit here is a bytestring
59 # Note: it is *NOT* a bug that the last bit here is a bytestring
60 # and not a unicode: we're just getting the encoded length anyway,
60 # and not a unicode: we're just getting the encoded length anyway,
61 # and using an r-string to make it portable between Python 2 and 3
61 # and using an r-string to make it portable between Python 2 and 3
62 # doesn't work because then the \r is a literal backslash-r
62 # doesn't work because then the \r is a literal backslash-r
63 # instead of a carriage return.
63 # instead of a carriage return.
64 valuelen = limit - len(fmt % r'000') - len(': \r\n')
64 valuelen = limit - len(fmt % r'000') - len(': \r\n')
65 result = []
65 result = []
66
66
67 n = 0
67 n = 0
68 for i in xrange(0, len(value), valuelen):
68 for i in xrange(0, len(value), valuelen):
69 n += 1
69 n += 1
70 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
70 result.append((fmt % str(n), pycompat.strurl(value[i:i + valuelen])))
71
71
72 return result
72 return result
73
73
74 def _wraphttpresponse(resp):
74 def _wraphttpresponse(resp):
75 """Wrap an HTTPResponse with common error handlers.
75 """Wrap an HTTPResponse with common error handlers.
76
76
77 This ensures that any I/O from any consumer raises the appropriate
77 This ensures that any I/O from any consumer raises the appropriate
78 error and messaging.
78 error and messaging.
79 """
79 """
80 origread = resp.read
80 origread = resp.read
81
81
82 class readerproxy(resp.__class__):
82 class readerproxy(resp.__class__):
83 def read(self, size=None):
83 def read(self, size=None):
84 try:
84 try:
85 return origread(size)
85 return origread(size)
86 except httplib.IncompleteRead as e:
86 except httplib.IncompleteRead as e:
87 # e.expected is an integer if length known or None otherwise.
87 # e.expected is an integer if length known or None otherwise.
88 if e.expected:
88 if e.expected:
89 msg = _('HTTP request error (incomplete response; '
89 msg = _('HTTP request error (incomplete response; '
90 'expected %d bytes got %d)') % (e.expected,
90 'expected %d bytes got %d)') % (e.expected,
91 len(e.partial))
91 len(e.partial))
92 else:
92 else:
93 msg = _('HTTP request error (incomplete response)')
93 msg = _('HTTP request error (incomplete response)')
94
94
95 raise error.PeerTransportError(
95 raise error.PeerTransportError(
96 msg,
96 msg,
97 hint=_('this may be an intermittent network failure; '
97 hint=_('this may be an intermittent network failure; '
98 'if the error persists, consider contacting the '
98 'if the error persists, consider contacting the '
99 'network or server operator'))
99 'network or server operator'))
100 except httplib.HTTPException as e:
100 except httplib.HTTPException as e:
101 raise error.PeerTransportError(
101 raise error.PeerTransportError(
102 _('HTTP request error (%s)') % e,
102 _('HTTP request error (%s)') % e,
103 hint=_('this may be an intermittent network failure; '
103 hint=_('this may be an intermittent network failure; '
104 'if the error persists, consider contacting the '
104 'if the error persists, consider contacting the '
105 'network or server operator'))
105 'network or server operator'))
106
106
107 resp.__class__ = readerproxy
107 resp.__class__ = readerproxy
108
108
109 class _multifile(object):
109 class _multifile(object):
110 def __init__(self, *fileobjs):
110 def __init__(self, *fileobjs):
111 for f in fileobjs:
111 for f in fileobjs:
112 if not util.safehasattr(f, 'length'):
112 if not util.safehasattr(f, 'length'):
113 raise ValueError(
113 raise ValueError(
114 '_multifile only supports file objects that '
114 '_multifile only supports file objects that '
115 'have a length but this one does not:', type(f), f)
115 'have a length but this one does not:', type(f), f)
116 self._fileobjs = fileobjs
116 self._fileobjs = fileobjs
117 self._index = 0
117 self._index = 0
118
118
119 @property
119 @property
120 def length(self):
120 def length(self):
121 return sum(f.length for f in self._fileobjs)
121 return sum(f.length for f in self._fileobjs)
122
122
123 def read(self, amt=None):
123 def read(self, amt=None):
124 if amt <= 0:
124 if amt <= 0:
125 return ''.join(f.read() for f in self._fileobjs)
125 return ''.join(f.read() for f in self._fileobjs)
126 parts = []
126 parts = []
127 while amt and self._index < len(self._fileobjs):
127 while amt and self._index < len(self._fileobjs):
128 parts.append(self._fileobjs[self._index].read(amt))
128 parts.append(self._fileobjs[self._index].read(amt))
129 got = len(parts[-1])
129 got = len(parts[-1])
130 if got < amt:
130 if got < amt:
131 self._index += 1
131 self._index += 1
132 amt -= got
132 amt -= got
133 return ''.join(parts)
133 return ''.join(parts)
134
134
135 def seek(self, offset, whence=os.SEEK_SET):
135 def seek(self, offset, whence=os.SEEK_SET):
136 if whence != os.SEEK_SET:
136 if whence != os.SEEK_SET:
137 raise NotImplementedError(
137 raise NotImplementedError(
138 '_multifile does not support anything other'
138 '_multifile does not support anything other'
139 ' than os.SEEK_SET for whence on seek()')
139 ' than os.SEEK_SET for whence on seek()')
140 if offset != 0:
140 if offset != 0:
141 raise NotImplementedError(
141 raise NotImplementedError(
142 '_multifile only supports seeking to start, but that '
142 '_multifile only supports seeking to start, but that '
143 'could be fixed if you need it')
143 'could be fixed if you need it')
144 for f in self._fileobjs:
144 for f in self._fileobjs:
145 f.seek(0)
145 f.seek(0)
146 self._index = 0
146 self._index = 0
147
147
148 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
148 def makev1commandrequest(ui, requestbuilder, caps, capablefn,
149 repobaseurl, cmd, args):
149 repobaseurl, cmd, args):
150 """Make an HTTP request to run a command for a version 1 client.
150 """Make an HTTP request to run a command for a version 1 client.
151
151
152 ``caps`` is a set of known server capabilities. The value may be
152 ``caps`` is a set of known server capabilities. The value may be
153 None if capabilities are not yet known.
153 None if capabilities are not yet known.
154
154
155 ``capablefn`` is a function to evaluate a capability.
155 ``capablefn`` is a function to evaluate a capability.
156
156
157 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
157 ``cmd``, ``args``, and ``data`` define the command, its arguments, and
158 raw data to pass to it.
158 raw data to pass to it.
159 """
159 """
160 if cmd == 'pushkey':
160 if cmd == 'pushkey':
161 args['data'] = ''
161 args['data'] = ''
162 data = args.pop('data', None)
162 data = args.pop('data', None)
163 headers = args.pop('headers', {})
163 headers = args.pop('headers', {})
164
164
165 ui.debug("sending %s command\n" % cmd)
165 ui.debug("sending %s command\n" % cmd)
166 q = [('cmd', cmd)]
166 q = [('cmd', cmd)]
167 headersize = 0
167 headersize = 0
168 # Important: don't use self.capable() here or else you end up
168 # Important: don't use self.capable() here or else you end up
169 # with infinite recursion when trying to look up capabilities
169 # with infinite recursion when trying to look up capabilities
170 # for the first time.
170 # for the first time.
171 postargsok = caps is not None and 'httppostargs' in caps
171 postargsok = caps is not None and 'httppostargs' in caps
172
172
173 # Send arguments via POST.
173 # Send arguments via POST.
174 if postargsok and args:
174 if postargsok and args:
175 strargs = urlreq.urlencode(sorted(args.items()))
175 strargs = urlreq.urlencode(sorted(args.items()))
176 if not data:
176 if not data:
177 data = strargs
177 data = strargs
178 else:
178 else:
179 if isinstance(data, bytes):
179 if isinstance(data, bytes):
180 i = io.BytesIO(data)
180 i = io.BytesIO(data)
181 i.length = len(data)
181 i.length = len(data)
182 data = i
182 data = i
183 argsio = io.BytesIO(strargs)
183 argsio = io.BytesIO(strargs)
184 argsio.length = len(strargs)
184 argsio.length = len(strargs)
185 data = _multifile(argsio, data)
185 data = _multifile(argsio, data)
186 headers[r'X-HgArgs-Post'] = len(strargs)
186 headers[r'X-HgArgs-Post'] = len(strargs)
187 elif args:
187 elif args:
188 # Calling self.capable() can infinite loop if we are calling
188 # Calling self.capable() can infinite loop if we are calling
189 # "capabilities". But that command should never accept wire
189 # "capabilities". But that command should never accept wire
190 # protocol arguments. So this should never happen.
190 # protocol arguments. So this should never happen.
191 assert cmd != 'capabilities'
191 assert cmd != 'capabilities'
192 httpheader = capablefn('httpheader')
192 httpheader = capablefn('httpheader')
193 if httpheader:
193 if httpheader:
194 headersize = int(httpheader.split(',', 1)[0])
194 headersize = int(httpheader.split(',', 1)[0])
195
195
196 # Send arguments via HTTP headers.
196 # Send arguments via HTTP headers.
197 if headersize > 0:
197 if headersize > 0:
198 # The headers can typically carry more data than the URL.
198 # The headers can typically carry more data than the URL.
199 encargs = urlreq.urlencode(sorted(args.items()))
199 encargs = urlreq.urlencode(sorted(args.items()))
200 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
200 for header, value in encodevalueinheaders(encargs, 'X-HgArg',
201 headersize):
201 headersize):
202 headers[header] = value
202 headers[header] = value
203 # Send arguments via query string (Mercurial <1.9).
203 # Send arguments via query string (Mercurial <1.9).
204 else:
204 else:
205 q += sorted(args.items())
205 q += sorted(args.items())
206
206
207 qs = '?%s' % urlreq.urlencode(q)
207 qs = '?%s' % urlreq.urlencode(q)
208 cu = "%s%s" % (repobaseurl, qs)
208 cu = "%s%s" % (repobaseurl, qs)
209 size = 0
209 size = 0
210 if util.safehasattr(data, 'length'):
210 if util.safehasattr(data, 'length'):
211 size = data.length
211 size = data.length
212 elif data is not None:
212 elif data is not None:
213 size = len(data)
213 size = len(data)
214 if data is not None and r'Content-Type' not in headers:
214 if data is not None and r'Content-Type' not in headers:
215 headers[r'Content-Type'] = r'application/mercurial-0.1'
215 headers[r'Content-Type'] = r'application/mercurial-0.1'
216
216
217 # Tell the server we accept application/mercurial-0.2 and multiple
217 # Tell the server we accept application/mercurial-0.2 and multiple
218 # compression formats if the server is capable of emitting those
218 # compression formats if the server is capable of emitting those
219 # payloads.
219 # payloads.
220 # Note: Keep this set empty by default, as client advertisement of
220 # Note: Keep this set empty by default, as client advertisement of
221 # protocol parameters should only occur after the handshake.
221 # protocol parameters should only occur after the handshake.
222 protoparams = set()
222 protoparams = set()
223
223
224 mediatypes = set()
224 mediatypes = set()
225 if caps is not None:
225 if caps is not None:
226 mt = capablefn('httpmediatype')
226 mt = capablefn('httpmediatype')
227 if mt:
227 if mt:
228 protoparams.add('0.1')
228 protoparams.add('0.1')
229 mediatypes = set(mt.split(','))
229 mediatypes = set(mt.split(','))
230
230
231 protoparams.add('partial-pull')
231 protoparams.add('partial-pull')
232
232
233 if '0.2tx' in mediatypes:
233 if '0.2tx' in mediatypes:
234 protoparams.add('0.2')
234 protoparams.add('0.2')
235
235
236 if '0.2tx' in mediatypes and capablefn('compression'):
236 if '0.2tx' in mediatypes and capablefn('compression'):
237 # We /could/ compare supported compression formats and prune
237 # We /could/ compare supported compression formats and prune
238 # non-mutually supported or error if nothing is mutually supported.
238 # non-mutually supported or error if nothing is mutually supported.
239 # For now, send the full list to the server and have it error.
239 # For now, send the full list to the server and have it error.
240 comps = [e.wireprotosupport().name for e in
240 comps = [e.wireprotosupport().name for e in
241 util.compengines.supportedwireengines(util.CLIENTROLE)]
241 util.compengines.supportedwireengines(util.CLIENTROLE)]
242 protoparams.add('comp=%s' % ','.join(comps))
242 protoparams.add('comp=%s' % ','.join(comps))
243
243
244 if protoparams:
244 if protoparams:
245 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
245 protoheaders = encodevalueinheaders(' '.join(sorted(protoparams)),
246 'X-HgProto',
246 'X-HgProto',
247 headersize or 1024)
247 headersize or 1024)
248 for header, value in protoheaders:
248 for header, value in protoheaders:
249 headers[header] = value
249 headers[header] = value
250
250
251 varyheaders = []
251 varyheaders = []
252 for header in headers:
252 for header in headers:
253 if header.lower().startswith(r'x-hg'):
253 if header.lower().startswith(r'x-hg'):
254 varyheaders.append(header)
254 varyheaders.append(header)
255
255
256 if varyheaders:
256 if varyheaders:
257 headers[r'Vary'] = r','.join(sorted(varyheaders))
257 headers[r'Vary'] = r','.join(sorted(varyheaders))
258
258
259 req = requestbuilder(pycompat.strurl(cu), data, headers)
259 req = requestbuilder(pycompat.strurl(cu), data, headers)
260
260
261 if data is not None:
261 if data is not None:
262 ui.debug("sending %d bytes\n" % size)
262 ui.debug("sending %d bytes\n" % size)
263 req.add_unredirected_header(r'Content-Length', r'%d' % size)
263 req.add_unredirected_header(r'Content-Length', r'%d' % size)
264
264
265 return req, cu, qs
265 return req, cu, qs
266
266
267 def sendrequest(ui, opener, req):
267 def sendrequest(ui, opener, req):
268 """Send a prepared HTTP request.
268 """Send a prepared HTTP request.
269
269
270 Returns the response object.
270 Returns the response object.
271 """
271 """
272 if (ui.debugflag
272 if (ui.debugflag
273 and ui.configbool('devel', 'debug.peer-request')):
273 and ui.configbool('devel', 'debug.peer-request')):
274 dbg = ui.debug
274 dbg = ui.debug
275 line = 'devel-peer-request: %s\n'
275 line = 'devel-peer-request: %s\n'
276 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
276 dbg(line % '%s %s' % (req.get_method(), req.get_full_url()))
277 hgargssize = None
277 hgargssize = None
278
278
279 for header, value in sorted(req.header_items()):
279 for header, value in sorted(req.header_items()):
280 if header.startswith('X-hgarg-'):
280 if header.startswith('X-hgarg-'):
281 if hgargssize is None:
281 if hgargssize is None:
282 hgargssize = 0
282 hgargssize = 0
283 hgargssize += len(value)
283 hgargssize += len(value)
284 else:
284 else:
285 dbg(line % ' %s %s' % (header, value))
285 dbg(line % ' %s %s' % (header, value))
286
286
287 if hgargssize is not None:
287 if hgargssize is not None:
288 dbg(line % ' %d bytes of commands arguments in headers'
288 dbg(line % ' %d bytes of commands arguments in headers'
289 % hgargssize)
289 % hgargssize)
290
290
291 if req.has_data():
291 if req.has_data():
292 data = req.get_data()
292 data = req.get_data()
293 length = getattr(data, 'length', None)
293 length = getattr(data, 'length', None)
294 if length is None:
294 if length is None:
295 length = len(data)
295 length = len(data)
296 dbg(line % ' %d bytes of data' % length)
296 dbg(line % ' %d bytes of data' % length)
297
297
298 start = util.timer()
298 start = util.timer()
299
299
300 try:
300 try:
301 res = opener.open(req)
301 res = opener.open(req)
302 except urlerr.httperror as inst:
302 except urlerr.httperror as inst:
303 if inst.code == 401:
303 if inst.code == 401:
304 raise error.Abort(_('authorization failed'))
304 raise error.Abort(_('authorization failed'))
305 raise
305 raise
306 except httplib.HTTPException as inst:
306 except httplib.HTTPException as inst:
307 ui.debug('http error requesting %s\n' %
307 ui.debug('http error requesting %s\n' %
308 util.hidepassword(req.get_full_url()))
308 util.hidepassword(req.get_full_url()))
309 ui.traceback()
309 ui.traceback()
310 raise IOError(None, inst)
310 raise IOError(None, inst)
311 finally:
311 finally:
312 if ui.configbool('devel', 'debug.peer-request'):
312 if ui.configbool('devel', 'debug.peer-request'):
313 dbg(line % ' finished in %.4f seconds (%s)'
313 dbg(line % ' finished in %.4f seconds (%s)'
314 % (util.timer() - start, res.code))
314 % (util.timer() - start, res.code))
315
315
316 # Insert error handlers for common I/O failures.
316 # Insert error handlers for common I/O failures.
317 _wraphttpresponse(res)
317 _wraphttpresponse(res)
318
318
319 return res
319 return res
320
320
321 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
321 def parsev1commandresponse(ui, baseurl, requrl, qs, resp, compressible,
322 allowcbor=False):
322 allowcbor=False):
323 # record the url we got redirected to
323 # record the url we got redirected to
324 respurl = pycompat.bytesurl(resp.geturl())
324 respurl = pycompat.bytesurl(resp.geturl())
325 if respurl.endswith(qs):
325 if respurl.endswith(qs):
326 respurl = respurl[:-len(qs)]
326 respurl = respurl[:-len(qs)]
327 if baseurl.rstrip('/') != respurl.rstrip('/'):
327 if baseurl.rstrip('/') != respurl.rstrip('/'):
328 if not ui.quiet:
328 if not ui.quiet:
329 ui.warn(_('real URL is %s\n') % respurl)
329 ui.warn(_('real URL is %s\n') % respurl)
330
330
331 try:
331 try:
332 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
332 proto = pycompat.bytesurl(resp.getheader(r'content-type', r''))
333 except AttributeError:
333 except AttributeError:
334 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
334 proto = pycompat.bytesurl(resp.headers.get(r'content-type', r''))
335
335
336 safeurl = util.hidepassword(baseurl)
336 safeurl = util.hidepassword(baseurl)
337 if proto.startswith('application/hg-error'):
337 if proto.startswith('application/hg-error'):
338 raise error.OutOfBandError(resp.read())
338 raise error.OutOfBandError(resp.read())
339
339
340 # Pre 1.0 versions of Mercurial used text/plain and
340 # Pre 1.0 versions of Mercurial used text/plain and
341 # application/hg-changegroup. We don't support such old servers.
341 # application/hg-changegroup. We don't support such old servers.
342 if not proto.startswith('application/mercurial-'):
342 if not proto.startswith('application/mercurial-'):
343 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
343 ui.debug("requested URL: '%s'\n" % util.hidepassword(requrl))
344 raise error.RepoError(
344 raise error.RepoError(
345 _("'%s' does not appear to be an hg repository:\n"
345 _("'%s' does not appear to be an hg repository:\n"
346 "---%%<--- (%s)\n%s\n---%%<---\n")
346 "---%%<--- (%s)\n%s\n---%%<---\n")
347 % (safeurl, proto or 'no content-type', resp.read(1024)))
347 % (safeurl, proto or 'no content-type', resp.read(1024)))
348
348
349 try:
349 try:
350 subtype = proto.split('-', 1)[1]
350 subtype = proto.split('-', 1)[1]
351
351
352 # Unless we end up supporting CBOR in the legacy wire protocol,
352 # Unless we end up supporting CBOR in the legacy wire protocol,
353 # this should ONLY be encountered for the initial capabilities
353 # this should ONLY be encountered for the initial capabilities
354 # request during handshake.
354 # request during handshake.
355 if subtype == 'cbor':
355 if subtype == 'cbor':
356 if allowcbor:
356 if allowcbor:
357 return respurl, proto, resp
357 return respurl, proto, resp
358 else:
358 else:
359 raise error.RepoError(_('unexpected CBOR response from '
359 raise error.RepoError(_('unexpected CBOR response from '
360 'server'))
360 'server'))
361
361
362 version_info = tuple([int(n) for n in subtype.split('.')])
362 version_info = tuple([int(n) for n in subtype.split('.')])
363 except ValueError:
363 except ValueError:
364 raise error.RepoError(_("'%s' sent a broken Content-Type "
364 raise error.RepoError(_("'%s' sent a broken Content-Type "
365 "header (%s)") % (safeurl, proto))
365 "header (%s)") % (safeurl, proto))
366
366
367 # TODO consider switching to a decompression reader that uses
367 # TODO consider switching to a decompression reader that uses
368 # generators.
368 # generators.
369 if version_info == (0, 1):
369 if version_info == (0, 1):
370 if compressible:
370 if compressible:
371 resp = util.compengines['zlib'].decompressorreader(resp)
371 resp = util.compengines['zlib'].decompressorreader(resp)
372
372
373 elif version_info == (0, 2):
373 elif version_info == (0, 2):
374 # application/mercurial-0.2 always identifies the compression
374 # application/mercurial-0.2 always identifies the compression
375 # engine in the payload header.
375 # engine in the payload header.
376 elen = struct.unpack('B', resp.read(1))[0]
376 elen = struct.unpack('B', resp.read(1))[0]
377 ename = resp.read(elen)
377 ename = resp.read(elen)
378 engine = util.compengines.forwiretype(ename)
378 engine = util.compengines.forwiretype(ename)
379
379
380 resp = engine.decompressorreader(resp)
380 resp = engine.decompressorreader(resp)
381 else:
381 else:
382 raise error.RepoError(_("'%s' uses newer protocol %s") %
382 raise error.RepoError(_("'%s' uses newer protocol %s") %
383 (safeurl, subtype))
383 (safeurl, subtype))
384
384
385 return respurl, proto, resp
385 return respurl, proto, resp
386
386
387 class httppeer(wireprotov1peer.wirepeer):
387 class httppeer(wireprotov1peer.wirepeer):
388 def __init__(self, ui, path, url, opener, requestbuilder, caps):
388 def __init__(self, ui, path, url, opener, requestbuilder, caps):
389 self.ui = ui
389 self.ui = ui
390 self._path = path
390 self._path = path
391 self._url = url
391 self._url = url
392 self._caps = caps
392 self._caps = caps
393 self._urlopener = opener
393 self._urlopener = opener
394 self._requestbuilder = requestbuilder
394 self._requestbuilder = requestbuilder
395
395
396 def __del__(self):
396 def __del__(self):
397 for h in self._urlopener.handlers:
397 for h in self._urlopener.handlers:
398 h.close()
398 h.close()
399 getattr(h, "close_all", lambda: None)()
399 getattr(h, "close_all", lambda: None)()
400
400
401 # Begin of ipeerconnection interface.
401 # Begin of ipeerconnection interface.
402
402
403 def url(self):
403 def url(self):
404 return self._path
404 return self._path
405
405
406 def local(self):
406 def local(self):
407 return None
407 return None
408
408
409 def peer(self):
409 def peer(self):
410 return self
410 return self
411
411
412 def canpush(self):
412 def canpush(self):
413 return True
413 return True
414
414
415 def close(self):
415 def close(self):
416 pass
416 pass
417
417
418 # End of ipeerconnection interface.
418 # End of ipeerconnection interface.
419
419
420 # Begin of ipeercommands interface.
420 # Begin of ipeercommands interface.
421
421
422 def capabilities(self):
422 def capabilities(self):
423 return self._caps
423 return self._caps
424
424
425 # End of ipeercommands interface.
425 # End of ipeercommands interface.
426
426
427 # look up capabilities only when needed
427 # look up capabilities only when needed
428
428
429 def _callstream(self, cmd, _compressible=False, **args):
429 def _callstream(self, cmd, _compressible=False, **args):
430 args = pycompat.byteskwargs(args)
430 args = pycompat.byteskwargs(args)
431
431
432 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
432 req, cu, qs = makev1commandrequest(self.ui, self._requestbuilder,
433 self._caps, self.capable,
433 self._caps, self.capable,
434 self._url, cmd, args)
434 self._url, cmd, args)
435
435
436 resp = sendrequest(self.ui, self._urlopener, req)
436 resp = sendrequest(self.ui, self._urlopener, req)
437
437
438 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
438 self._url, ct, resp = parsev1commandresponse(self.ui, self._url, cu, qs,
439 resp, _compressible)
439 resp, _compressible)
440
440
441 return resp
441 return resp
442
442
443 def _call(self, cmd, **args):
443 def _call(self, cmd, **args):
444 fp = self._callstream(cmd, **args)
444 fp = self._callstream(cmd, **args)
445 try:
445 try:
446 return fp.read()
446 return fp.read()
447 finally:
447 finally:
448 # if using keepalive, allow connection to be reused
448 # if using keepalive, allow connection to be reused
449 fp.close()
449 fp.close()
450
450
451 def _callpush(self, cmd, cg, **args):
451 def _callpush(self, cmd, cg, **args):
452 # have to stream bundle to a temp file because we do not have
452 # have to stream bundle to a temp file because we do not have
453 # http 1.1 chunked transfer.
453 # http 1.1 chunked transfer.
454
454
455 types = self.capable('unbundle')
455 types = self.capable('unbundle')
456 try:
456 try:
457 types = types.split(',')
457 types = types.split(',')
458 except AttributeError:
458 except AttributeError:
459 # servers older than d1b16a746db6 will send 'unbundle' as a
459 # servers older than d1b16a746db6 will send 'unbundle' as a
460 # boolean capability. They only support headerless/uncompressed
460 # boolean capability. They only support headerless/uncompressed
461 # bundles.
461 # bundles.
462 types = [""]
462 types = [""]
463 for x in types:
463 for x in types:
464 if x in bundle2.bundletypes:
464 if x in bundle2.bundletypes:
465 type = x
465 type = x
466 break
466 break
467
467
468 tempname = bundle2.writebundle(self.ui, cg, None, type)
468 tempname = bundle2.writebundle(self.ui, cg, None, type)
469 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
469 fp = httpconnection.httpsendfile(self.ui, tempname, "rb")
470 headers = {r'Content-Type': r'application/mercurial-0.1'}
470 headers = {r'Content-Type': r'application/mercurial-0.1'}
471
471
472 try:
472 try:
473 r = self._call(cmd, data=fp, headers=headers, **args)
473 r = self._call(cmd, data=fp, headers=headers, **args)
474 vals = r.split('\n', 1)
474 vals = r.split('\n', 1)
475 if len(vals) < 2:
475 if len(vals) < 2:
476 raise error.ResponseError(_("unexpected response:"), r)
476 raise error.ResponseError(_("unexpected response:"), r)
477 return vals
477 return vals
478 except urlerr.httperror:
478 except urlerr.httperror:
479 # Catch and re-raise these so we don't try and treat them
479 # Catch and re-raise these so we don't try and treat them
480 # like generic socket errors. They lack any values in
480 # like generic socket errors. They lack any values in
481 # .args on Python 3 which breaks our socket.error block.
481 # .args on Python 3 which breaks our socket.error block.
482 raise
482 raise
483 except socket.error as err:
483 except socket.error as err:
484 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
484 if err.args[0] in (errno.ECONNRESET, errno.EPIPE):
485 raise error.Abort(_('push failed: %s') % err.args[1])
485 raise error.Abort(_('push failed: %s') % err.args[1])
486 raise error.Abort(err.args[1])
486 raise error.Abort(err.args[1])
487 finally:
487 finally:
488 fp.close()
488 fp.close()
489 os.unlink(tempname)
489 os.unlink(tempname)
490
490
491 def _calltwowaystream(self, cmd, fp, **args):
491 def _calltwowaystream(self, cmd, fp, **args):
492 fh = None
492 fh = None
493 fp_ = None
493 fp_ = None
494 filename = None
494 filename = None
495 try:
495 try:
496 # dump bundle to disk
496 # dump bundle to disk
497 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
497 fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg")
498 fh = os.fdopen(fd, r"wb")
498 fh = os.fdopen(fd, r"wb")
499 d = fp.read(4096)
499 d = fp.read(4096)
500 while d:
500 while d:
501 fh.write(d)
501 fh.write(d)
502 d = fp.read(4096)
502 d = fp.read(4096)
503 fh.close()
503 fh.close()
504 # start http push
504 # start http push
505 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
505 fp_ = httpconnection.httpsendfile(self.ui, filename, "rb")
506 headers = {r'Content-Type': r'application/mercurial-0.1'}
506 headers = {r'Content-Type': r'application/mercurial-0.1'}
507 return self._callstream(cmd, data=fp_, headers=headers, **args)
507 return self._callstream(cmd, data=fp_, headers=headers, **args)
508 finally:
508 finally:
509 if fp_ is not None:
509 if fp_ is not None:
510 fp_.close()
510 fp_.close()
511 if fh is not None:
511 if fh is not None:
512 fh.close()
512 fh.close()
513 os.unlink(filename)
513 os.unlink(filename)
514
514
515 def _callcompressable(self, cmd, **args):
515 def _callcompressable(self, cmd, **args):
516 return self._callstream(cmd, _compressible=True, **args)
516 return self._callstream(cmd, _compressible=True, **args)
517
517
518 def _abort(self, exception):
518 def _abort(self, exception):
519 raise exception
519 raise exception
520
520
521 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
521 def sendv2request(ui, opener, requestbuilder, apiurl, permission, requests):
522 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
522 reactor = wireprotoframing.clientreactor(hasmultiplesend=False,
523 buffersends=True)
523 buffersends=True)
524
524
525 url = '%s/%s' % (apiurl, permission)
525 url = '%s/%s' % (apiurl, permission)
526
526
527 if len(requests) > 1:
527 if len(requests) > 1:
528 url += '/multirequest'
528 url += '/multirequest'
529 else:
529 else:
530 url += '/%s' % requests[0][0]
530 url += '/%s' % requests[0][0]
531
531
532 # Request ID to (request, future)
532 # Request ID to (request, future)
533 requestmap = {}
533 requestmap = {}
534
534
535 for command, args, f in requests:
535 for command, args, f in requests:
536 request, action, meta = reactor.callcommand(command, args)
536 request, action, meta = reactor.callcommand(command, args)
537 assert action == 'noop'
537 assert action == 'noop'
538
538
539 requestmap[request.requestid] = (request, f)
539 requestmap[request.requestid] = (request, f)
540
540
541 action, meta = reactor.flushcommands()
541 action, meta = reactor.flushcommands()
542 assert action == 'sendframes'
542 assert action == 'sendframes'
543
543
544 # TODO stream this.
544 # TODO stream this.
545 body = b''.join(map(bytes, meta['framegen']))
545 body = b''.join(map(bytes, meta['framegen']))
546
546
547 # TODO modify user-agent to reflect v2
547 # TODO modify user-agent to reflect v2
548 headers = {
548 headers = {
549 r'Accept': wireprotov2server.FRAMINGTYPE,
549 r'Accept': wireprotov2server.FRAMINGTYPE,
550 r'Content-Type': wireprotov2server.FRAMINGTYPE,
550 r'Content-Type': wireprotov2server.FRAMINGTYPE,
551 }
551 }
552
552
553 req = requestbuilder(pycompat.strurl(url), body, headers)
553 req = requestbuilder(pycompat.strurl(url), body, headers)
554 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
554 req.add_unredirected_header(r'Content-Length', r'%d' % len(body))
555
555
556 try:
556 try:
557 res = opener.open(req)
557 res = opener.open(req)
558 except urlerr.httperror as e:
558 except urlerr.httperror as e:
559 if e.code == 401:
559 if e.code == 401:
560 raise error.Abort(_('authorization failed'))
560 raise error.Abort(_('authorization failed'))
561
561
562 raise
562 raise
563 except httplib.HTTPException as e:
563 except httplib.HTTPException as e:
564 ui.traceback()
564 ui.traceback()
565 raise IOError(None, e)
565 raise IOError(None, e)
566
566
567 return reactor, requestmap, res
567 return reactor, requestmap, res
568
568
569 class queuedcommandfuture(pycompat.futures.Future):
569 class queuedcommandfuture(pycompat.futures.Future):
570 """Wraps result() on command futures to trigger submission on call."""
570 """Wraps result() on command futures to trigger submission on call."""
571
571
572 def result(self, timeout=None):
572 def result(self, timeout=None):
573 if self.done():
573 if self.done():
574 return pycompat.futures.Future.result(self, timeout)
574 return pycompat.futures.Future.result(self, timeout)
575
575
576 self._peerexecutor.sendcommands()
576 self._peerexecutor.sendcommands()
577
577
578 # sendcommands() will restore the original __class__ and self.result
578 # sendcommands() will restore the original __class__ and self.result
579 # will resolve to Future.result.
579 # will resolve to Future.result.
580 return self.result(timeout)
580 return self.result(timeout)
581
581
582 @zi.implementer(repository.ipeercommandexecutor)
582 @zi.implementer(repository.ipeercommandexecutor)
583 class httpv2executor(object):
583 class httpv2executor(object):
584 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
584 def __init__(self, ui, opener, requestbuilder, apiurl, descriptor):
585 self._ui = ui
585 self._ui = ui
586 self._opener = opener
586 self._opener = opener
587 self._requestbuilder = requestbuilder
587 self._requestbuilder = requestbuilder
588 self._apiurl = apiurl
588 self._apiurl = apiurl
589 self._descriptor = descriptor
589 self._descriptor = descriptor
590 self._sent = False
590 self._sent = False
591 self._closed = False
591 self._closed = False
592 self._neededpermissions = set()
592 self._neededpermissions = set()
593 self._calls = []
593 self._calls = []
594 self._futures = weakref.WeakSet()
594 self._futures = weakref.WeakSet()
595 self._responseexecutor = None
595 self._responseexecutor = None
596 self._responsef = None
596 self._responsef = None
597
597
598 def __enter__(self):
598 def __enter__(self):
599 return self
599 return self
600
600
601 def __exit__(self, exctype, excvalue, exctb):
601 def __exit__(self, exctype, excvalue, exctb):
602 self.close()
602 self.close()
603
603
604 def callcommand(self, command, args):
604 def callcommand(self, command, args):
605 if self._sent:
605 if self._sent:
606 raise error.ProgrammingError('callcommand() cannot be used after '
606 raise error.ProgrammingError('callcommand() cannot be used after '
607 'commands are sent')
607 'commands are sent')
608
608
609 if self._closed:
609 if self._closed:
610 raise error.ProgrammingError('callcommand() cannot be used after '
610 raise error.ProgrammingError('callcommand() cannot be used after '
611 'close()')
611 'close()')
612
612
613 # The service advertises which commands are available. So if we attempt
613 # The service advertises which commands are available. So if we attempt
614 # to call an unknown command or pass an unknown argument, we can screen
614 # to call an unknown command or pass an unknown argument, we can screen
615 # for this.
615 # for this.
616 if command not in self._descriptor['commands']:
616 if command not in self._descriptor['commands']:
617 raise error.ProgrammingError(
617 raise error.ProgrammingError(
618 'wire protocol command %s is not available' % command)
618 'wire protocol command %s is not available' % command)
619
619
620 cmdinfo = self._descriptor['commands'][command]
620 cmdinfo = self._descriptor['commands'][command]
621 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
621 unknownargs = set(args.keys()) - set(cmdinfo.get('args', {}))
622
622
623 if unknownargs:
623 if unknownargs:
624 raise error.ProgrammingError(
624 raise error.ProgrammingError(
625 'wire protocol command %s does not accept argument: %s' % (
625 'wire protocol command %s does not accept argument: %s' % (
626 command, ', '.join(sorted(unknownargs))))
626 command, ', '.join(sorted(unknownargs))))
627
627
628 self._neededpermissions |= set(cmdinfo['permissions'])
628 self._neededpermissions |= set(cmdinfo['permissions'])
629
629
630 # TODO we /could/ also validate types here, since the API descriptor
630 # TODO we /could/ also validate types here, since the API descriptor
631 # includes types...
631 # includes types...
632
632
633 f = pycompat.futures.Future()
633 f = pycompat.futures.Future()
634
634
635 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
635 # Monkeypatch it so result() triggers sendcommands(), otherwise result()
636 # could deadlock.
636 # could deadlock.
637 f.__class__ = queuedcommandfuture
637 f.__class__ = queuedcommandfuture
638 f._peerexecutor = self
638 f._peerexecutor = self
639
639
640 self._futures.add(f)
640 self._futures.add(f)
641 self._calls.append((command, args, f))
641 self._calls.append((command, args, f))
642
642
643 return f
643 return f
644
644
645 def sendcommands(self):
645 def sendcommands(self):
646 if self._sent:
646 if self._sent:
647 return
647 return
648
648
649 if not self._calls:
649 if not self._calls:
650 return
650 return
651
651
652 self._sent = True
652 self._sent = True
653
653
654 # Unhack any future types so caller sees a clean type and so we
654 # Unhack any future types so caller sees a clean type and so we
655 # break reference cycle.
655 # break reference cycle.
656 for f in self._futures:
656 for f in self._futures:
657 if isinstance(f, queuedcommandfuture):
657 if isinstance(f, queuedcommandfuture):
658 f.__class__ = pycompat.futures.Future
658 f.__class__ = pycompat.futures.Future
659 f._peerexecutor = None
659 f._peerexecutor = None
660
660
661 # Mark the future as running and filter out cancelled futures.
661 # Mark the future as running and filter out cancelled futures.
662 calls = [(command, args, f)
662 calls = [(command, args, f)
663 for command, args, f in self._calls
663 for command, args, f in self._calls
664 if f.set_running_or_notify_cancel()]
664 if f.set_running_or_notify_cancel()]
665
665
666 # Clear out references, prevent improper object usage.
666 # Clear out references, prevent improper object usage.
667 self._calls = None
667 self._calls = None
668
668
669 if not calls:
669 if not calls:
670 return
670 return
671
671
672 permissions = set(self._neededpermissions)
672 permissions = set(self._neededpermissions)
673
673
674 if 'push' in permissions and 'pull' in permissions:
674 if 'push' in permissions and 'pull' in permissions:
675 permissions.remove('pull')
675 permissions.remove('pull')
676
676
677 if len(permissions) > 1:
677 if len(permissions) > 1:
678 raise error.RepoError(_('cannot make request requiring multiple '
678 raise error.RepoError(_('cannot make request requiring multiple '
679 'permissions: %s') %
679 'permissions: %s') %
680 _(', ').join(sorted(permissions)))
680 _(', ').join(sorted(permissions)))
681
681
682 permission = {
682 permission = {
683 'push': 'rw',
683 'push': 'rw',
684 'pull': 'ro',
684 'pull': 'ro',
685 }[permissions.pop()]
685 }[permissions.pop()]
686
686
687 reactor, requests, resp = sendv2request(
687 reactor, requests, resp = sendv2request(
688 self._ui, self._opener, self._requestbuilder, self._apiurl,
688 self._ui, self._opener, self._requestbuilder, self._apiurl,
689 permission, calls)
689 permission, calls)
690
690
691 # TODO we probably want to validate the HTTP code, media type, etc.
691 # TODO we probably want to validate the HTTP code, media type, etc.
692
692
693 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
693 self._responseexecutor = pycompat.futures.ThreadPoolExecutor(1)
694 self._responsef = self._responseexecutor.submit(self._handleresponse,
694 self._responsef = self._responseexecutor.submit(self._handleresponse,
695 reactor,
695 reactor,
696 requests,
696 requests,
697 resp)
697 resp)
698
698
699 def close(self):
699 def close(self):
700 if self._closed:
700 if self._closed:
701 return
701 return
702
702
703 self.sendcommands()
703 self.sendcommands()
704
704
705 self._closed = True
705 self._closed = True
706
706
707 if not self._responsef:
707 if not self._responsef:
708 return
708 return
709
709
710 try:
710 try:
711 self._responsef.result()
711 self._responsef.result()
712 finally:
712 finally:
713 self._responseexecutor.shutdown(wait=True)
713 self._responseexecutor.shutdown(wait=True)
714 self._responsef = None
714 self._responsef = None
715 self._responseexecutor = None
715 self._responseexecutor = None
716
716
717 # If any of our futures are still in progress, mark them as
717 # If any of our futures are still in progress, mark them as
718 # errored, otherwise a result() could wait indefinitely.
718 # errored, otherwise a result() could wait indefinitely.
719 for f in self._futures:
719 for f in self._futures:
720 if not f.done():
720 if not f.done():
721 f.set_exception(error.ResponseError(
721 f.set_exception(error.ResponseError(
722 _('unfulfilled command response')))
722 _('unfulfilled command response')))
723
723
724 self._futures = None
724 self._futures = None
725
725
726 def _handleresponse(self, reactor, requests, resp):
726 def _handleresponse(self, reactor, requests, resp):
727 # Called in a thread to read the response.
727 # Called in a thread to read the response.
728
728
729 results = {k: [] for k in requests}
729 results = {k: [] for k in requests}
730
730
731 while True:
731 while True:
732 frame = wireprotoframing.readframe(resp)
732 frame = wireprotoframing.readframe(resp)
733 if frame is None:
733 if frame is None:
734 break
734 break
735
735
736 self._ui.note(_('received %r\n') % frame)
736 self._ui.note(_('received %r\n') % frame)
737
737
738 # Guard against receiving a frame with a request ID that we
738 # Guard against receiving a frame with a request ID that we
739 # didn't issue. This should never happen.
739 # didn't issue. This should never happen.
740 request, f = requests.get(frame.requestid, [None, None])
740 request, f = requests.get(frame.requestid, [None, None])
741
741
742 action, meta = reactor.onframerecv(frame)
742 action, meta = reactor.onframerecv(frame)
743
743
744 if action == 'responsedata':
744 if action == 'responsedata':
745 assert request.requestid == meta['request'].requestid
745 assert request.requestid == meta['request'].requestid
746
746
747 result = results[request.requestid]
747 result = results[request.requestid]
748
748
749 if meta['cbor']:
749 if meta['cbor']:
750 payload = util.bytesio(meta['data'])
750 payload = util.bytesio(meta['data'])
751
751
752 decoder = cbor.CBORDecoder(payload)
752 decoder = cbor.CBORDecoder(payload)
753 while payload.tell() + 1 < len(meta['data']):
753 while payload.tell() + 1 < len(meta['data']):
754 try:
754 try:
755 result.append(decoder.decode())
755 result.append(decoder.decode())
756 except Exception:
756 except Exception:
757 pycompat.future_set_exception_info(
757 pycompat.future_set_exception_info(
758 f, sys.exc_info()[1:])
758 f, sys.exc_info()[1:])
759 continue
759 continue
760 else:
760 else:
761 result.append(meta['data'])
761 result.append(meta['data'])
762
762
763 if meta['eos']:
763 if meta['eos']:
764 f.set_result(result)
764 f.set_result(result)
765 del results[request.requestid]
765 del results[request.requestid]
766
766
767 elif action == 'error':
767 elif action == 'error':
768 e = error.RepoError(meta['message'])
768 e = error.RepoError(meta['message'])
769
769
770 if f:
770 if f:
771 f.set_exception(e)
771 f.set_exception(e)
772 else:
772 else:
773 raise e
773 raise e
774
774
775 else:
775 else:
776 e = error.ProgrammingError('unhandled action: %s' % action)
776 e = error.ProgrammingError('unhandled action: %s' % action)
777
777
778 if f:
778 if f:
779 f.set_exception(e)
779 f.set_exception(e)
780 else:
780 else:
781 raise e
781 raise e
782
782
783 # TODO implement interface for version 2 peers
783 # TODO implement interface for version 2 peers
784 @zi.implementer(repository.ipeerconnection, repository.ipeercapabilities,
784 @zi.implementer(repository.ipeerconnection, repository.ipeercapabilities,
785 repository.ipeerrequests)
785 repository.ipeerrequests)
786 class httpv2peer(object):
786 class httpv2peer(object):
787 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
787 def __init__(self, ui, repourl, apipath, opener, requestbuilder,
788 apidescriptor):
788 apidescriptor):
789 self.ui = ui
789 self.ui = ui
790
790
791 if repourl.endswith('/'):
791 if repourl.endswith('/'):
792 repourl = repourl[:-1]
792 repourl = repourl[:-1]
793
793
794 self._url = repourl
794 self._url = repourl
795 self._apipath = apipath
795 self._apipath = apipath
796 self._apiurl = '%s/%s' % (repourl, apipath)
796 self._apiurl = '%s/%s' % (repourl, apipath)
797 self._opener = opener
797 self._opener = opener
798 self._requestbuilder = requestbuilder
798 self._requestbuilder = requestbuilder
799 self._descriptor = apidescriptor
799 self._descriptor = apidescriptor
800
800
801 # Start of ipeerconnection.
801 # Start of ipeerconnection.
802
802
803 def url(self):
803 def url(self):
804 return self._url
804 return self._url
805
805
806 def local(self):
806 def local(self):
807 return None
807 return None
808
808
809 def peer(self):
809 def peer(self):
810 return self
810 return self
811
811
812 def canpush(self):
812 def canpush(self):
813 # TODO change once implemented.
813 # TODO change once implemented.
814 return False
814 return False
815
815
816 def close(self):
816 def close(self):
817 pass
817 pass
818
818
819 # End of ipeerconnection.
819 # End of ipeerconnection.
820
820
821 # Start of ipeercapabilities.
821 # Start of ipeercapabilities.
822
822
823 def capable(self, name):
823 def capable(self, name):
824 # The capabilities used internally historically map to capabilities
824 # The capabilities used internally historically map to capabilities
825 # advertised from the "capabilities" wire protocol command. However,
825 # advertised from the "capabilities" wire protocol command. However,
826 # version 2 of that command works differently.
826 # version 2 of that command works differently.
827
827
828 # Maps to commands that are available.
828 # Maps to commands that are available.
829 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
829 if name in ('branchmap', 'getbundle', 'known', 'lookup', 'pushkey'):
830 return True
830 return True
831
831
832 # Other concepts.
832 # Other concepts.
833 if name in ('bundle2',):
833 if name in ('bundle2',):
834 return True
834 return True
835
835
836 return False
836 return False
837
837
838 def requirecap(self, name, purpose):
838 def requirecap(self, name, purpose):
839 if self.capable(name):
839 if self.capable(name):
840 return
840 return
841
841
842 raise error.CapabilityError(
842 raise error.CapabilityError(
843 _('cannot %s; client or remote repository does not support the %r '
843 _('cannot %s; client or remote repository does not support the %r '
844 'capability') % (purpose, name))
844 'capability') % (purpose, name))
845
845
846 # End of ipeercapabilities.
846 # End of ipeercapabilities.
847
847
848 def _call(self, name, **args):
848 def _call(self, name, **args):
849 with self.commandexecutor() as e:
849 with self.commandexecutor() as e:
850 return e.callcommand(name, args).result()
850 return e.callcommand(name, args).result()
851
851
852 def commandexecutor(self):
852 def commandexecutor(self):
853 return httpv2executor(self.ui, self._opener, self._requestbuilder,
853 return httpv2executor(self.ui, self._opener, self._requestbuilder,
854 self._apiurl, self._descriptor)
854 self._apiurl, self._descriptor)
855
855
856 # Registry of API service names to metadata about peers that handle it.
856 # Registry of API service names to metadata about peers that handle it.
857 #
857 #
858 # The following keys are meaningful:
858 # The following keys are meaningful:
859 #
859 #
860 # init
860 # init
861 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
861 # Callable receiving (ui, repourl, servicepath, opener, requestbuilder,
862 # apidescriptor) to create a peer.
862 # apidescriptor) to create a peer.
863 #
863 #
864 # priority
864 # priority
865 # Integer priority for the service. If we could choose from multiple
865 # Integer priority for the service. If we could choose from multiple
866 # services, we choose the one with the highest priority.
866 # services, we choose the one with the highest priority.
867 API_PEERS = {
867 API_PEERS = {
868 wireprototypes.HTTP_WIREPROTO_V2: {
868 wireprototypes.HTTP_WIREPROTO_V2: {
869 'init': httpv2peer,
869 'init': httpv2peer,
870 'priority': 50,
870 'priority': 50,
871 },
871 },
872 }
872 }
873
873
874 def performhandshake(ui, url, opener, requestbuilder):
874 def performhandshake(ui, url, opener, requestbuilder):
875 # The handshake is a request to the capabilities command.
875 # The handshake is a request to the capabilities command.
876
876
877 caps = None
877 caps = None
878 def capable(x):
878 def capable(x):
879 raise error.ProgrammingError('should not be called')
879 raise error.ProgrammingError('should not be called')
880
880
881 args = {}
881 args = {}
882
882
883 # The client advertises support for newer protocols by adding an
883 # The client advertises support for newer protocols by adding an
884 # X-HgUpgrade-* header with a list of supported APIs and an
884 # X-HgUpgrade-* header with a list of supported APIs and an
885 # X-HgProto-* header advertising which serializing formats it supports.
885 # X-HgProto-* header advertising which serializing formats it supports.
886 # We only support the HTTP version 2 transport and CBOR responses for
886 # We only support the HTTP version 2 transport and CBOR responses for
887 # now.
887 # now.
888 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
888 advertisev2 = ui.configbool('experimental', 'httppeer.advertise-v2')
889
889
890 if advertisev2:
890 if advertisev2:
891 args['headers'] = {
891 args['headers'] = {
892 r'X-HgProto-1': r'cbor',
892 r'X-HgProto-1': r'cbor',
893 }
893 }
894
894
895 args['headers'].update(
895 args['headers'].update(
896 encodevalueinheaders(' '.join(sorted(API_PEERS)),
896 encodevalueinheaders(' '.join(sorted(API_PEERS)),
897 'X-HgUpgrade',
897 'X-HgUpgrade',
898 # We don't know the header limit this early.
898 # We don't know the header limit this early.
899 # So make it small.
899 # So make it small.
900 1024))
900 1024))
901
901
902 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
902 req, requrl, qs = makev1commandrequest(ui, requestbuilder, caps,
903 capable, url, 'capabilities',
903 capable, url, 'capabilities',
904 args)
904 args)
905
905
906 resp = sendrequest(ui, opener, req)
906 resp = sendrequest(ui, opener, req)
907
907
908 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
908 respurl, ct, resp = parsev1commandresponse(ui, url, requrl, qs, resp,
909 compressible=False,
909 compressible=False,
910 allowcbor=advertisev2)
910 allowcbor=advertisev2)
911
911
912 try:
912 try:
913 rawdata = resp.read()
913 rawdata = resp.read()
914 finally:
914 finally:
915 resp.close()
915 resp.close()
916
916
917 if not ct.startswith('application/mercurial-'):
917 if not ct.startswith('application/mercurial-'):
918 raise error.ProgrammingError('unexpected content-type: %s' % ct)
918 raise error.ProgrammingError('unexpected content-type: %s' % ct)
919
919
920 if advertisev2:
920 if advertisev2:
921 if ct == 'application/mercurial-cbor':
921 if ct == 'application/mercurial-cbor':
922 try:
922 try:
923 info = cbor.loads(rawdata)
923 info = cbor.loads(rawdata)
924 except cbor.CBORDecodeError:
924 except cbor.CBORDecodeError:
925 raise error.Abort(_('error decoding CBOR from remote server'),
925 raise error.Abort(_('error decoding CBOR from remote server'),
926 hint=_('try again and consider contacting '
926 hint=_('try again and consider contacting '
927 'the server operator'))
927 'the server operator'))
928
928
929 # We got a legacy response. That's fine.
929 # We got a legacy response. That's fine.
930 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
930 elif ct in ('application/mercurial-0.1', 'application/mercurial-0.2'):
931 info = {
931 info = {
932 'v1capabilities': set(rawdata.split())
932 'v1capabilities': set(rawdata.split())
933 }
933 }
934
934
935 else:
935 else:
936 raise error.RepoError(
936 raise error.RepoError(
937 _('unexpected response type from server: %s') % ct)
937 _('unexpected response type from server: %s') % ct)
938 else:
938 else:
939 info = {
939 info = {
940 'v1capabilities': set(rawdata.split())
940 'v1capabilities': set(rawdata.split())
941 }
941 }
942
942
943 return respurl, info
943 return respurl, info
944
944
945 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
945 def makepeer(ui, path, opener=None, requestbuilder=urlreq.request):
946 """Construct an appropriate HTTP peer instance.
946 """Construct an appropriate HTTP peer instance.
947
947
948 ``opener`` is an ``url.opener`` that should be used to establish
948 ``opener`` is an ``url.opener`` that should be used to establish
949 connections, perform HTTP requests.
949 connections, perform HTTP requests.
950
950
951 ``requestbuilder`` is the type used for constructing HTTP requests.
951 ``requestbuilder`` is the type used for constructing HTTP requests.
952 It exists as an argument so extensions can override the default.
952 It exists as an argument so extensions can override the default.
953 """
953 """
954 u = util.url(path)
954 u = util.url(path)
955 if u.query or u.fragment:
955 if u.query or u.fragment:
956 raise error.Abort(_('unsupported URL component: "%s"') %
956 raise error.Abort(_('unsupported URL component: "%s"') %
957 (u.query or u.fragment))
957 (u.query or u.fragment))
958
958
959 # urllib cannot handle URLs with embedded user or passwd.
959 # urllib cannot handle URLs with embedded user or passwd.
960 url, authinfo = u.authinfo()
960 url, authinfo = u.authinfo()
961 ui.debug('using %s\n' % url)
961 ui.debug('using %s\n' % url)
962
962
963 opener = opener or urlmod.opener(ui, authinfo)
963 opener = opener or urlmod.opener(ui, authinfo)
964
964
965 respurl, info = performhandshake(ui, url, opener, requestbuilder)
965 respurl, info = performhandshake(ui, url, opener, requestbuilder)
966
966
967 # Given the intersection of APIs that both we and the server support,
967 # Given the intersection of APIs that both we and the server support,
968 # sort by their advertised priority and pick the first one.
968 # sort by their advertised priority and pick the first one.
969 #
969 #
970 # TODO consider making this request-based and interface driven. For
970 # TODO consider making this request-based and interface driven. For
971 # example, the caller could say "I want a peer that does X." It's quite
971 # example, the caller could say "I want a peer that does X." It's quite
972 # possible that not all peers would do that. Since we know the service
972 # possible that not all peers would do that. Since we know the service
973 # capabilities, we could filter out services not meeting the
973 # capabilities, we could filter out services not meeting the
974 # requirements. Possibly by consulting the interfaces defined by the
974 # requirements. Possibly by consulting the interfaces defined by the
975 # peer type.
975 # peer type.
976 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
976 apipeerchoices = set(info.get('apis', {}).keys()) & set(API_PEERS.keys())
977
977
978 preferredchoices = sorted(apipeerchoices,
978 preferredchoices = sorted(apipeerchoices,
979 key=lambda x: API_PEERS[x]['priority'],
979 key=lambda x: API_PEERS[x]['priority'],
980 reverse=True)
980 reverse=True)
981
981
982 for service in preferredchoices:
982 for service in preferredchoices:
983 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
983 apipath = '%s/%s' % (info['apibase'].rstrip('/'), service)
984
984
985 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
985 return API_PEERS[service]['init'](ui, respurl, apipath, opener,
986 requestbuilder,
986 requestbuilder,
987 info['apis'][service])
987 info['apis'][service])
988
988
989 # Failed to construct an API peer. Fall back to legacy.
989 # Failed to construct an API peer. Fall back to legacy.
990 return httppeer(ui, path, respurl, opener, requestbuilder,
990 return httppeer(ui, path, respurl, opener, requestbuilder,
991 info['v1capabilities'])
991 info['v1capabilities'])
992
992
993 def instance(ui, path, create):
993 def instance(ui, path, create, intents=None):
994 if create:
994 if create:
995 raise error.Abort(_('cannot create new http repository'))
995 raise error.Abort(_('cannot create new http repository'))
996 try:
996 try:
997 if path.startswith('https:') and not urlmod.has_https:
997 if path.startswith('https:') and not urlmod.has_https:
998 raise error.Abort(_('Python support for SSL and HTTPS '
998 raise error.Abort(_('Python support for SSL and HTTPS '
999 'is not installed'))
999 'is not installed'))
1000
1000
1001 inst = makepeer(ui, path)
1001 inst = makepeer(ui, path)
1002
1002
1003 return inst
1003 return inst
1004 except error.RepoError as httpexception:
1004 except error.RepoError as httpexception:
1005 try:
1005 try:
1006 r = statichttprepo.instance(ui, "static-" + path, create)
1006 r = statichttprepo.instance(ui, "static-" + path, create)
1007 ui.note(_('(falling back to static-http)\n'))
1007 ui.note(_('(falling back to static-http)\n'))
1008 return r
1008 return r
1009 except error.RepoError:
1009 except error.RepoError:
1010 raise httpexception # use the original http RepoError instead
1010 raise httpexception # use the original http RepoError instead
@@ -1,2380 +1,2381 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import os
12 import os
13 import random
13 import random
14 import sys
14 import sys
15 import time
15 import time
16 import weakref
16 import weakref
17
17
18 from .i18n import _
18 from .i18n import _
19 from .node import (
19 from .node import (
20 hex,
20 hex,
21 nullid,
21 nullid,
22 short,
22 short,
23 )
23 )
24 from .thirdparty.zope import (
24 from .thirdparty.zope import (
25 interface as zi,
25 interface as zi,
26 )
26 )
27 from . import (
27 from . import (
28 bookmarks,
28 bookmarks,
29 branchmap,
29 branchmap,
30 bundle2,
30 bundle2,
31 changegroup,
31 changegroup,
32 changelog,
32 changelog,
33 color,
33 color,
34 context,
34 context,
35 dirstate,
35 dirstate,
36 dirstateguard,
36 dirstateguard,
37 discovery,
37 discovery,
38 encoding,
38 encoding,
39 error,
39 error,
40 exchange,
40 exchange,
41 extensions,
41 extensions,
42 filelog,
42 filelog,
43 hook,
43 hook,
44 lock as lockmod,
44 lock as lockmod,
45 manifest,
45 manifest,
46 match as matchmod,
46 match as matchmod,
47 merge as mergemod,
47 merge as mergemod,
48 mergeutil,
48 mergeutil,
49 namespaces,
49 namespaces,
50 narrowspec,
50 narrowspec,
51 obsolete,
51 obsolete,
52 pathutil,
52 pathutil,
53 phases,
53 phases,
54 pushkey,
54 pushkey,
55 pycompat,
55 pycompat,
56 repository,
56 repository,
57 repoview,
57 repoview,
58 revset,
58 revset,
59 revsetlang,
59 revsetlang,
60 scmutil,
60 scmutil,
61 sparse,
61 sparse,
62 store,
62 store,
63 subrepoutil,
63 subrepoutil,
64 tags as tagsmod,
64 tags as tagsmod,
65 transaction,
65 transaction,
66 txnutil,
66 txnutil,
67 util,
67 util,
68 vfs as vfsmod,
68 vfs as vfsmod,
69 )
69 )
70 from .utils import (
70 from .utils import (
71 procutil,
71 procutil,
72 stringutil,
72 stringutil,
73 )
73 )
74
74
75 release = lockmod.release
75 release = lockmod.release
76 urlerr = util.urlerr
76 urlerr = util.urlerr
77 urlreq = util.urlreq
77 urlreq = util.urlreq
78
78
79 # set of (path, vfs-location) tuples. vfs-location is:
79 # set of (path, vfs-location) tuples. vfs-location is:
80 # - 'plain for vfs relative paths
80 # - 'plain for vfs relative paths
81 # - '' for svfs relative paths
81 # - '' for svfs relative paths
82 _cachedfiles = set()
82 _cachedfiles = set()
83
83
84 class _basefilecache(scmutil.filecache):
84 class _basefilecache(scmutil.filecache):
85 """All filecache usage on repo are done for logic that should be unfiltered
85 """All filecache usage on repo are done for logic that should be unfiltered
86 """
86 """
87 def __get__(self, repo, type=None):
87 def __get__(self, repo, type=None):
88 if repo is None:
88 if repo is None:
89 return self
89 return self
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
90 return super(_basefilecache, self).__get__(repo.unfiltered(), type)
91 def __set__(self, repo, value):
91 def __set__(self, repo, value):
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
92 return super(_basefilecache, self).__set__(repo.unfiltered(), value)
93 def __delete__(self, repo):
93 def __delete__(self, repo):
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
94 return super(_basefilecache, self).__delete__(repo.unfiltered())
95
95
96 class repofilecache(_basefilecache):
96 class repofilecache(_basefilecache):
97 """filecache for files in .hg but outside of .hg/store"""
97 """filecache for files in .hg but outside of .hg/store"""
98 def __init__(self, *paths):
98 def __init__(self, *paths):
99 super(repofilecache, self).__init__(*paths)
99 super(repofilecache, self).__init__(*paths)
100 for path in paths:
100 for path in paths:
101 _cachedfiles.add((path, 'plain'))
101 _cachedfiles.add((path, 'plain'))
102
102
103 def join(self, obj, fname):
103 def join(self, obj, fname):
104 return obj.vfs.join(fname)
104 return obj.vfs.join(fname)
105
105
106 class storecache(_basefilecache):
106 class storecache(_basefilecache):
107 """filecache for files in the store"""
107 """filecache for files in the store"""
108 def __init__(self, *paths):
108 def __init__(self, *paths):
109 super(storecache, self).__init__(*paths)
109 super(storecache, self).__init__(*paths)
110 for path in paths:
110 for path in paths:
111 _cachedfiles.add((path, ''))
111 _cachedfiles.add((path, ''))
112
112
113 def join(self, obj, fname):
113 def join(self, obj, fname):
114 return obj.sjoin(fname)
114 return obj.sjoin(fname)
115
115
116 def isfilecached(repo, name):
116 def isfilecached(repo, name):
117 """check if a repo has already cached "name" filecache-ed property
117 """check if a repo has already cached "name" filecache-ed property
118
118
119 This returns (cachedobj-or-None, iscached) tuple.
119 This returns (cachedobj-or-None, iscached) tuple.
120 """
120 """
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
121 cacheentry = repo.unfiltered()._filecache.get(name, None)
122 if not cacheentry:
122 if not cacheentry:
123 return None, False
123 return None, False
124 return cacheentry.obj, True
124 return cacheentry.obj, True
125
125
126 class unfilteredpropertycache(util.propertycache):
126 class unfilteredpropertycache(util.propertycache):
127 """propertycache that apply to unfiltered repo only"""
127 """propertycache that apply to unfiltered repo only"""
128
128
129 def __get__(self, repo, type=None):
129 def __get__(self, repo, type=None):
130 unfi = repo.unfiltered()
130 unfi = repo.unfiltered()
131 if unfi is repo:
131 if unfi is repo:
132 return super(unfilteredpropertycache, self).__get__(unfi)
132 return super(unfilteredpropertycache, self).__get__(unfi)
133 return getattr(unfi, self.name)
133 return getattr(unfi, self.name)
134
134
135 class filteredpropertycache(util.propertycache):
135 class filteredpropertycache(util.propertycache):
136 """propertycache that must take filtering in account"""
136 """propertycache that must take filtering in account"""
137
137
138 def cachevalue(self, obj, value):
138 def cachevalue(self, obj, value):
139 object.__setattr__(obj, self.name, value)
139 object.__setattr__(obj, self.name, value)
140
140
141
141
142 def hasunfilteredcache(repo, name):
142 def hasunfilteredcache(repo, name):
143 """check if a repo has an unfilteredpropertycache value for <name>"""
143 """check if a repo has an unfilteredpropertycache value for <name>"""
144 return name in vars(repo.unfiltered())
144 return name in vars(repo.unfiltered())
145
145
146 def unfilteredmethod(orig):
146 def unfilteredmethod(orig):
147 """decorate method that always need to be run on unfiltered version"""
147 """decorate method that always need to be run on unfiltered version"""
148 def wrapper(repo, *args, **kwargs):
148 def wrapper(repo, *args, **kwargs):
149 return orig(repo.unfiltered(), *args, **kwargs)
149 return orig(repo.unfiltered(), *args, **kwargs)
150 return wrapper
150 return wrapper
151
151
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
152 moderncaps = {'lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
153 'unbundle'}
153 'unbundle'}
154 legacycaps = moderncaps.union({'changegroupsubset'})
154 legacycaps = moderncaps.union({'changegroupsubset'})
155
155
156 @zi.implementer(repository.ipeercommandexecutor)
156 @zi.implementer(repository.ipeercommandexecutor)
157 class localcommandexecutor(object):
157 class localcommandexecutor(object):
158 def __init__(self, peer):
158 def __init__(self, peer):
159 self._peer = peer
159 self._peer = peer
160 self._sent = False
160 self._sent = False
161 self._closed = False
161 self._closed = False
162
162
163 def __enter__(self):
163 def __enter__(self):
164 return self
164 return self
165
165
166 def __exit__(self, exctype, excvalue, exctb):
166 def __exit__(self, exctype, excvalue, exctb):
167 self.close()
167 self.close()
168
168
169 def callcommand(self, command, args):
169 def callcommand(self, command, args):
170 if self._sent:
170 if self._sent:
171 raise error.ProgrammingError('callcommand() cannot be used after '
171 raise error.ProgrammingError('callcommand() cannot be used after '
172 'sendcommands()')
172 'sendcommands()')
173
173
174 if self._closed:
174 if self._closed:
175 raise error.ProgrammingError('callcommand() cannot be used after '
175 raise error.ProgrammingError('callcommand() cannot be used after '
176 'close()')
176 'close()')
177
177
178 # We don't need to support anything fancy. Just call the named
178 # We don't need to support anything fancy. Just call the named
179 # method on the peer and return a resolved future.
179 # method on the peer and return a resolved future.
180 fn = getattr(self._peer, pycompat.sysstr(command))
180 fn = getattr(self._peer, pycompat.sysstr(command))
181
181
182 f = pycompat.futures.Future()
182 f = pycompat.futures.Future()
183
183
184 try:
184 try:
185 result = fn(**pycompat.strkwargs(args))
185 result = fn(**pycompat.strkwargs(args))
186 except Exception:
186 except Exception:
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
187 pycompat.future_set_exception_info(f, sys.exc_info()[1:])
188 else:
188 else:
189 f.set_result(result)
189 f.set_result(result)
190
190
191 return f
191 return f
192
192
193 def sendcommands(self):
193 def sendcommands(self):
194 self._sent = True
194 self._sent = True
195
195
196 def close(self):
196 def close(self):
197 self._closed = True
197 self._closed = True
198
198
199 @zi.implementer(repository.ipeercommands)
199 @zi.implementer(repository.ipeercommands)
200 class localpeer(repository.peer):
200 class localpeer(repository.peer):
201 '''peer for a local repo; reflects only the most recent API'''
201 '''peer for a local repo; reflects only the most recent API'''
202
202
203 def __init__(self, repo, caps=None):
203 def __init__(self, repo, caps=None):
204 super(localpeer, self).__init__()
204 super(localpeer, self).__init__()
205
205
206 if caps is None:
206 if caps is None:
207 caps = moderncaps.copy()
207 caps = moderncaps.copy()
208 self._repo = repo.filtered('served')
208 self._repo = repo.filtered('served')
209 self.ui = repo.ui
209 self.ui = repo.ui
210 self._caps = repo._restrictcapabilities(caps)
210 self._caps = repo._restrictcapabilities(caps)
211
211
212 # Begin of _basepeer interface.
212 # Begin of _basepeer interface.
213
213
214 def url(self):
214 def url(self):
215 return self._repo.url()
215 return self._repo.url()
216
216
217 def local(self):
217 def local(self):
218 return self._repo
218 return self._repo
219
219
220 def peer(self):
220 def peer(self):
221 return self
221 return self
222
222
223 def canpush(self):
223 def canpush(self):
224 return True
224 return True
225
225
226 def close(self):
226 def close(self):
227 self._repo.close()
227 self._repo.close()
228
228
229 # End of _basepeer interface.
229 # End of _basepeer interface.
230
230
231 # Begin of _basewirecommands interface.
231 # Begin of _basewirecommands interface.
232
232
233 def branchmap(self):
233 def branchmap(self):
234 return self._repo.branchmap()
234 return self._repo.branchmap()
235
235
236 def capabilities(self):
236 def capabilities(self):
237 return self._caps
237 return self._caps
238
238
239 def clonebundles(self):
239 def clonebundles(self):
240 return self._repo.tryread('clonebundles.manifest')
240 return self._repo.tryread('clonebundles.manifest')
241
241
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
242 def debugwireargs(self, one, two, three=None, four=None, five=None):
243 """Used to test argument passing over the wire"""
243 """Used to test argument passing over the wire"""
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
244 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
245 pycompat.bytestr(four),
245 pycompat.bytestr(four),
246 pycompat.bytestr(five))
246 pycompat.bytestr(five))
247
247
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
248 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
249 **kwargs):
249 **kwargs):
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
250 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
251 common=common, bundlecaps=bundlecaps,
251 common=common, bundlecaps=bundlecaps,
252 **kwargs)[1]
252 **kwargs)[1]
253 cb = util.chunkbuffer(chunks)
253 cb = util.chunkbuffer(chunks)
254
254
255 if exchange.bundle2requested(bundlecaps):
255 if exchange.bundle2requested(bundlecaps):
256 # When requesting a bundle2, getbundle returns a stream to make the
256 # When requesting a bundle2, getbundle returns a stream to make the
257 # wire level function happier. We need to build a proper object
257 # wire level function happier. We need to build a proper object
258 # from it in local peer.
258 # from it in local peer.
259 return bundle2.getunbundler(self.ui, cb)
259 return bundle2.getunbundler(self.ui, cb)
260 else:
260 else:
261 return changegroup.getunbundler('01', cb, None)
261 return changegroup.getunbundler('01', cb, None)
262
262
263 def heads(self):
263 def heads(self):
264 return self._repo.heads()
264 return self._repo.heads()
265
265
266 def known(self, nodes):
266 def known(self, nodes):
267 return self._repo.known(nodes)
267 return self._repo.known(nodes)
268
268
269 def listkeys(self, namespace):
269 def listkeys(self, namespace):
270 return self._repo.listkeys(namespace)
270 return self._repo.listkeys(namespace)
271
271
272 def lookup(self, key):
272 def lookup(self, key):
273 return self._repo.lookup(key)
273 return self._repo.lookup(key)
274
274
275 def pushkey(self, namespace, key, old, new):
275 def pushkey(self, namespace, key, old, new):
276 return self._repo.pushkey(namespace, key, old, new)
276 return self._repo.pushkey(namespace, key, old, new)
277
277
278 def stream_out(self):
278 def stream_out(self):
279 raise error.Abort(_('cannot perform stream clone against local '
279 raise error.Abort(_('cannot perform stream clone against local '
280 'peer'))
280 'peer'))
281
281
282 def unbundle(self, bundle, heads, url):
282 def unbundle(self, bundle, heads, url):
283 """apply a bundle on a repo
283 """apply a bundle on a repo
284
284
285 This function handles the repo locking itself."""
285 This function handles the repo locking itself."""
286 try:
286 try:
287 try:
287 try:
288 bundle = exchange.readbundle(self.ui, bundle, None)
288 bundle = exchange.readbundle(self.ui, bundle, None)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
289 ret = exchange.unbundle(self._repo, bundle, heads, 'push', url)
290 if util.safehasattr(ret, 'getchunks'):
290 if util.safehasattr(ret, 'getchunks'):
291 # This is a bundle20 object, turn it into an unbundler.
291 # This is a bundle20 object, turn it into an unbundler.
292 # This little dance should be dropped eventually when the
292 # This little dance should be dropped eventually when the
293 # API is finally improved.
293 # API is finally improved.
294 stream = util.chunkbuffer(ret.getchunks())
294 stream = util.chunkbuffer(ret.getchunks())
295 ret = bundle2.getunbundler(self.ui, stream)
295 ret = bundle2.getunbundler(self.ui, stream)
296 return ret
296 return ret
297 except Exception as exc:
297 except Exception as exc:
298 # If the exception contains output salvaged from a bundle2
298 # If the exception contains output salvaged from a bundle2
299 # reply, we need to make sure it is printed before continuing
299 # reply, we need to make sure it is printed before continuing
300 # to fail. So we build a bundle2 with such output and consume
300 # to fail. So we build a bundle2 with such output and consume
301 # it directly.
301 # it directly.
302 #
302 #
303 # This is not very elegant but allows a "simple" solution for
303 # This is not very elegant but allows a "simple" solution for
304 # issue4594
304 # issue4594
305 output = getattr(exc, '_bundle2salvagedoutput', ())
305 output = getattr(exc, '_bundle2salvagedoutput', ())
306 if output:
306 if output:
307 bundler = bundle2.bundle20(self._repo.ui)
307 bundler = bundle2.bundle20(self._repo.ui)
308 for out in output:
308 for out in output:
309 bundler.addpart(out)
309 bundler.addpart(out)
310 stream = util.chunkbuffer(bundler.getchunks())
310 stream = util.chunkbuffer(bundler.getchunks())
311 b = bundle2.getunbundler(self.ui, stream)
311 b = bundle2.getunbundler(self.ui, stream)
312 bundle2.processbundle(self._repo, b)
312 bundle2.processbundle(self._repo, b)
313 raise
313 raise
314 except error.PushRaced as exc:
314 except error.PushRaced as exc:
315 raise error.ResponseError(_('push failed:'),
315 raise error.ResponseError(_('push failed:'),
316 stringutil.forcebytestr(exc))
316 stringutil.forcebytestr(exc))
317
317
318 # End of _basewirecommands interface.
318 # End of _basewirecommands interface.
319
319
320 # Begin of peer interface.
320 # Begin of peer interface.
321
321
322 def commandexecutor(self):
322 def commandexecutor(self):
323 return localcommandexecutor(self)
323 return localcommandexecutor(self)
324
324
325 # End of peer interface.
325 # End of peer interface.
326
326
327 @zi.implementer(repository.ipeerlegacycommands)
327 @zi.implementer(repository.ipeerlegacycommands)
328 class locallegacypeer(localpeer):
328 class locallegacypeer(localpeer):
329 '''peer extension which implements legacy methods too; used for tests with
329 '''peer extension which implements legacy methods too; used for tests with
330 restricted capabilities'''
330 restricted capabilities'''
331
331
332 def __init__(self, repo):
332 def __init__(self, repo):
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
333 super(locallegacypeer, self).__init__(repo, caps=legacycaps)
334
334
335 # Begin of baselegacywirecommands interface.
335 # Begin of baselegacywirecommands interface.
336
336
337 def between(self, pairs):
337 def between(self, pairs):
338 return self._repo.between(pairs)
338 return self._repo.between(pairs)
339
339
340 def branches(self, nodes):
340 def branches(self, nodes):
341 return self._repo.branches(nodes)
341 return self._repo.branches(nodes)
342
342
343 def changegroup(self, nodes, source):
343 def changegroup(self, nodes, source):
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
344 outgoing = discovery.outgoing(self._repo, missingroots=nodes,
345 missingheads=self._repo.heads())
345 missingheads=self._repo.heads())
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
346 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
347
347
348 def changegroupsubset(self, bases, heads, source):
348 def changegroupsubset(self, bases, heads, source):
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
349 outgoing = discovery.outgoing(self._repo, missingroots=bases,
350 missingheads=heads)
350 missingheads=heads)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
351 return changegroup.makechangegroup(self._repo, outgoing, '01', source)
352
352
353 # End of baselegacywirecommands interface.
353 # End of baselegacywirecommands interface.
354
354
355 # Increment the sub-version when the revlog v2 format changes to lock out old
355 # Increment the sub-version when the revlog v2 format changes to lock out old
356 # clients.
356 # clients.
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
357 REVLOGV2_REQUIREMENT = 'exp-revlogv2.0'
358
358
359 # Functions receiving (ui, features) that extensions can register to impact
359 # Functions receiving (ui, features) that extensions can register to impact
360 # the ability to load repositories with custom requirements. Only
360 # the ability to load repositories with custom requirements. Only
361 # functions defined in loaded extensions are called.
361 # functions defined in loaded extensions are called.
362 #
362 #
363 # The function receives a set of requirement strings that the repository
363 # The function receives a set of requirement strings that the repository
364 # is capable of opening. Functions will typically add elements to the
364 # is capable of opening. Functions will typically add elements to the
365 # set to reflect that the extension knows how to handle that requirements.
365 # set to reflect that the extension knows how to handle that requirements.
366 featuresetupfuncs = set()
366 featuresetupfuncs = set()
367
367
368 @zi.implementer(repository.completelocalrepository)
368 @zi.implementer(repository.completelocalrepository)
369 class localrepository(object):
369 class localrepository(object):
370
370
371 # obsolete experimental requirements:
371 # obsolete experimental requirements:
372 # - manifestv2: An experimental new manifest format that allowed
372 # - manifestv2: An experimental new manifest format that allowed
373 # for stem compression of long paths. Experiment ended up not
373 # for stem compression of long paths. Experiment ended up not
374 # being successful (repository sizes went up due to worse delta
374 # being successful (repository sizes went up due to worse delta
375 # chains), and the code was deleted in 4.6.
375 # chains), and the code was deleted in 4.6.
376 supportedformats = {
376 supportedformats = {
377 'revlogv1',
377 'revlogv1',
378 'generaldelta',
378 'generaldelta',
379 'treemanifest',
379 'treemanifest',
380 REVLOGV2_REQUIREMENT,
380 REVLOGV2_REQUIREMENT,
381 }
381 }
382 _basesupported = supportedformats | {
382 _basesupported = supportedformats | {
383 'store',
383 'store',
384 'fncache',
384 'fncache',
385 'shared',
385 'shared',
386 'relshared',
386 'relshared',
387 'dotencode',
387 'dotencode',
388 'exp-sparse',
388 'exp-sparse',
389 }
389 }
390 openerreqs = {
390 openerreqs = {
391 'revlogv1',
391 'revlogv1',
392 'generaldelta',
392 'generaldelta',
393 'treemanifest',
393 'treemanifest',
394 }
394 }
395
395
396 # list of prefix for file which can be written without 'wlock'
396 # list of prefix for file which can be written without 'wlock'
397 # Extensions should extend this list when needed
397 # Extensions should extend this list when needed
398 _wlockfreeprefix = {
398 _wlockfreeprefix = {
399 # We migh consider requiring 'wlock' for the next
399 # We migh consider requiring 'wlock' for the next
400 # two, but pretty much all the existing code assume
400 # two, but pretty much all the existing code assume
401 # wlock is not needed so we keep them excluded for
401 # wlock is not needed so we keep them excluded for
402 # now.
402 # now.
403 'hgrc',
403 'hgrc',
404 'requires',
404 'requires',
405 # XXX cache is a complicatged business someone
405 # XXX cache is a complicatged business someone
406 # should investigate this in depth at some point
406 # should investigate this in depth at some point
407 'cache/',
407 'cache/',
408 # XXX shouldn't be dirstate covered by the wlock?
408 # XXX shouldn't be dirstate covered by the wlock?
409 'dirstate',
409 'dirstate',
410 # XXX bisect was still a bit too messy at the time
410 # XXX bisect was still a bit too messy at the time
411 # this changeset was introduced. Someone should fix
411 # this changeset was introduced. Someone should fix
412 # the remainig bit and drop this line
412 # the remainig bit and drop this line
413 'bisect.state',
413 'bisect.state',
414 }
414 }
415
415
416 def __init__(self, baseui, path, create=False):
416 def __init__(self, baseui, path, create=False, intents=None):
417 self.requirements = set()
417 self.requirements = set()
418 self.filtername = None
418 self.filtername = None
419 # wvfs: rooted at the repository root, used to access the working copy
419 # wvfs: rooted at the repository root, used to access the working copy
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
420 self.wvfs = vfsmod.vfs(path, expandpath=True, realpath=True)
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
421 # vfs: rooted at .hg, used to access repo files outside of .hg/store
422 self.vfs = None
422 self.vfs = None
423 # svfs: usually rooted at .hg/store, used to access repository history
423 # svfs: usually rooted at .hg/store, used to access repository history
424 # If this is a shared repository, this vfs may point to another
424 # If this is a shared repository, this vfs may point to another
425 # repository's .hg/store directory.
425 # repository's .hg/store directory.
426 self.svfs = None
426 self.svfs = None
427 self.root = self.wvfs.base
427 self.root = self.wvfs.base
428 self.path = self.wvfs.join(".hg")
428 self.path = self.wvfs.join(".hg")
429 self.origroot = path
429 self.origroot = path
430 # This is only used by context.workingctx.match in order to
430 # This is only used by context.workingctx.match in order to
431 # detect files in subrepos.
431 # detect files in subrepos.
432 self.auditor = pathutil.pathauditor(
432 self.auditor = pathutil.pathauditor(
433 self.root, callback=self._checknested)
433 self.root, callback=self._checknested)
434 # This is only used by context.basectx.match in order to detect
434 # This is only used by context.basectx.match in order to detect
435 # files in subrepos.
435 # files in subrepos.
436 self.nofsauditor = pathutil.pathauditor(
436 self.nofsauditor = pathutil.pathauditor(
437 self.root, callback=self._checknested, realfs=False, cached=True)
437 self.root, callback=self._checknested, realfs=False, cached=True)
438 self.baseui = baseui
438 self.baseui = baseui
439 self.ui = baseui.copy()
439 self.ui = baseui.copy()
440 self.ui.copy = baseui.copy # prevent copying repo configuration
440 self.ui.copy = baseui.copy # prevent copying repo configuration
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
441 self.vfs = vfsmod.vfs(self.path, cacheaudited=True)
442 if (self.ui.configbool('devel', 'all-warnings') or
442 if (self.ui.configbool('devel', 'all-warnings') or
443 self.ui.configbool('devel', 'check-locks')):
443 self.ui.configbool('devel', 'check-locks')):
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
444 self.vfs.audit = self._getvfsward(self.vfs.audit)
445 # A list of callback to shape the phase if no data were found.
445 # A list of callback to shape the phase if no data were found.
446 # Callback are in the form: func(repo, roots) --> processed root.
446 # Callback are in the form: func(repo, roots) --> processed root.
447 # This list it to be filled by extension during repo setup
447 # This list it to be filled by extension during repo setup
448 self._phasedefaults = []
448 self._phasedefaults = []
449 try:
449 try:
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
450 self.ui.readconfig(self.vfs.join("hgrc"), self.root)
451 self._loadextensions()
451 self._loadextensions()
452 except IOError:
452 except IOError:
453 pass
453 pass
454
454
455 if featuresetupfuncs:
455 if featuresetupfuncs:
456 self.supported = set(self._basesupported) # use private copy
456 self.supported = set(self._basesupported) # use private copy
457 extmods = set(m.__name__ for n, m
457 extmods = set(m.__name__ for n, m
458 in extensions.extensions(self.ui))
458 in extensions.extensions(self.ui))
459 for setupfunc in featuresetupfuncs:
459 for setupfunc in featuresetupfuncs:
460 if setupfunc.__module__ in extmods:
460 if setupfunc.__module__ in extmods:
461 setupfunc(self.ui, self.supported)
461 setupfunc(self.ui, self.supported)
462 else:
462 else:
463 self.supported = self._basesupported
463 self.supported = self._basesupported
464 color.setup(self.ui)
464 color.setup(self.ui)
465
465
466 # Add compression engines.
466 # Add compression engines.
467 for name in util.compengines:
467 for name in util.compengines:
468 engine = util.compengines[name]
468 engine = util.compengines[name]
469 if engine.revlogheader():
469 if engine.revlogheader():
470 self.supported.add('exp-compression-%s' % name)
470 self.supported.add('exp-compression-%s' % name)
471
471
472 if not self.vfs.isdir():
472 if not self.vfs.isdir():
473 if create:
473 if create:
474 self.requirements = newreporequirements(self)
474 self.requirements = newreporequirements(self)
475
475
476 if not self.wvfs.exists():
476 if not self.wvfs.exists():
477 self.wvfs.makedirs()
477 self.wvfs.makedirs()
478 self.vfs.makedir(notindexed=True)
478 self.vfs.makedir(notindexed=True)
479
479
480 if 'store' in self.requirements:
480 if 'store' in self.requirements:
481 self.vfs.mkdir("store")
481 self.vfs.mkdir("store")
482
482
483 # create an invalid changelog
483 # create an invalid changelog
484 self.vfs.append(
484 self.vfs.append(
485 "00changelog.i",
485 "00changelog.i",
486 '\0\0\0\2' # represents revlogv2
486 '\0\0\0\2' # represents revlogv2
487 ' dummy changelog to prevent using the old repo layout'
487 ' dummy changelog to prevent using the old repo layout'
488 )
488 )
489 else:
489 else:
490 raise error.RepoError(_("repository %s not found") % path)
490 raise error.RepoError(_("repository %s not found") % path)
491 elif create:
491 elif create:
492 raise error.RepoError(_("repository %s already exists") % path)
492 raise error.RepoError(_("repository %s already exists") % path)
493 else:
493 else:
494 try:
494 try:
495 self.requirements = scmutil.readrequires(
495 self.requirements = scmutil.readrequires(
496 self.vfs, self.supported)
496 self.vfs, self.supported)
497 except IOError as inst:
497 except IOError as inst:
498 if inst.errno != errno.ENOENT:
498 if inst.errno != errno.ENOENT:
499 raise
499 raise
500
500
501 cachepath = self.vfs.join('cache')
501 cachepath = self.vfs.join('cache')
502 self.sharedpath = self.path
502 self.sharedpath = self.path
503 try:
503 try:
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
504 sharedpath = self.vfs.read("sharedpath").rstrip('\n')
505 if 'relshared' in self.requirements:
505 if 'relshared' in self.requirements:
506 sharedpath = self.vfs.join(sharedpath)
506 sharedpath = self.vfs.join(sharedpath)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
507 vfs = vfsmod.vfs(sharedpath, realpath=True)
508 cachepath = vfs.join('cache')
508 cachepath = vfs.join('cache')
509 s = vfs.base
509 s = vfs.base
510 if not vfs.exists():
510 if not vfs.exists():
511 raise error.RepoError(
511 raise error.RepoError(
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
512 _('.hg/sharedpath points to nonexistent directory %s') % s)
513 self.sharedpath = s
513 self.sharedpath = s
514 except IOError as inst:
514 except IOError as inst:
515 if inst.errno != errno.ENOENT:
515 if inst.errno != errno.ENOENT:
516 raise
516 raise
517
517
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
518 if 'exp-sparse' in self.requirements and not sparse.enabled:
519 raise error.RepoError(_('repository is using sparse feature but '
519 raise error.RepoError(_('repository is using sparse feature but '
520 'sparse is not enabled; enable the '
520 'sparse is not enabled; enable the '
521 '"sparse" extensions to access'))
521 '"sparse" extensions to access'))
522
522
523 self.store = store.store(
523 self.store = store.store(
524 self.requirements, self.sharedpath,
524 self.requirements, self.sharedpath,
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
525 lambda base: vfsmod.vfs(base, cacheaudited=True))
526 self.spath = self.store.path
526 self.spath = self.store.path
527 self.svfs = self.store.vfs
527 self.svfs = self.store.vfs
528 self.sjoin = self.store.join
528 self.sjoin = self.store.join
529 self.vfs.createmode = self.store.createmode
529 self.vfs.createmode = self.store.createmode
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
530 self.cachevfs = vfsmod.vfs(cachepath, cacheaudited=True)
531 self.cachevfs.createmode = self.store.createmode
531 self.cachevfs.createmode = self.store.createmode
532 if (self.ui.configbool('devel', 'all-warnings') or
532 if (self.ui.configbool('devel', 'all-warnings') or
533 self.ui.configbool('devel', 'check-locks')):
533 self.ui.configbool('devel', 'check-locks')):
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
534 if util.safehasattr(self.svfs, 'vfs'): # this is filtervfs
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
535 self.svfs.vfs.audit = self._getsvfsward(self.svfs.vfs.audit)
536 else: # standard vfs
536 else: # standard vfs
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
537 self.svfs.audit = self._getsvfsward(self.svfs.audit)
538 self._applyopenerreqs()
538 self._applyopenerreqs()
539 if create:
539 if create:
540 self._writerequirements()
540 self._writerequirements()
541
541
542 self._dirstatevalidatewarned = False
542 self._dirstatevalidatewarned = False
543
543
544 self._branchcaches = {}
544 self._branchcaches = {}
545 self._revbranchcache = None
545 self._revbranchcache = None
546 self._filterpats = {}
546 self._filterpats = {}
547 self._datafilters = {}
547 self._datafilters = {}
548 self._transref = self._lockref = self._wlockref = None
548 self._transref = self._lockref = self._wlockref = None
549
549
550 # A cache for various files under .hg/ that tracks file changes,
550 # A cache for various files under .hg/ that tracks file changes,
551 # (used by the filecache decorator)
551 # (used by the filecache decorator)
552 #
552 #
553 # Maps a property name to its util.filecacheentry
553 # Maps a property name to its util.filecacheentry
554 self._filecache = {}
554 self._filecache = {}
555
555
556 # hold sets of revision to be filtered
556 # hold sets of revision to be filtered
557 # should be cleared when something might have changed the filter value:
557 # should be cleared when something might have changed the filter value:
558 # - new changesets,
558 # - new changesets,
559 # - phase change,
559 # - phase change,
560 # - new obsolescence marker,
560 # - new obsolescence marker,
561 # - working directory parent change,
561 # - working directory parent change,
562 # - bookmark changes
562 # - bookmark changes
563 self.filteredrevcache = {}
563 self.filteredrevcache = {}
564
564
565 # post-dirstate-status hooks
565 # post-dirstate-status hooks
566 self._postdsstatus = []
566 self._postdsstatus = []
567
567
568 # generic mapping between names and nodes
568 # generic mapping between names and nodes
569 self.names = namespaces.namespaces()
569 self.names = namespaces.namespaces()
570
570
571 # Key to signature value.
571 # Key to signature value.
572 self._sparsesignaturecache = {}
572 self._sparsesignaturecache = {}
573 # Signature to cached matcher instance.
573 # Signature to cached matcher instance.
574 self._sparsematchercache = {}
574 self._sparsematchercache = {}
575
575
576 def _getvfsward(self, origfunc):
576 def _getvfsward(self, origfunc):
577 """build a ward for self.vfs"""
577 """build a ward for self.vfs"""
578 rref = weakref.ref(self)
578 rref = weakref.ref(self)
579 def checkvfs(path, mode=None):
579 def checkvfs(path, mode=None):
580 ret = origfunc(path, mode=mode)
580 ret = origfunc(path, mode=mode)
581 repo = rref()
581 repo = rref()
582 if (repo is None
582 if (repo is None
583 or not util.safehasattr(repo, '_wlockref')
583 or not util.safehasattr(repo, '_wlockref')
584 or not util.safehasattr(repo, '_lockref')):
584 or not util.safehasattr(repo, '_lockref')):
585 return
585 return
586 if mode in (None, 'r', 'rb'):
586 if mode in (None, 'r', 'rb'):
587 return
587 return
588 if path.startswith(repo.path):
588 if path.startswith(repo.path):
589 # truncate name relative to the repository (.hg)
589 # truncate name relative to the repository (.hg)
590 path = path[len(repo.path) + 1:]
590 path = path[len(repo.path) + 1:]
591 if path.startswith('cache/'):
591 if path.startswith('cache/'):
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
592 msg = 'accessing cache with vfs instead of cachevfs: "%s"'
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
593 repo.ui.develwarn(msg % path, stacklevel=2, config="cache-vfs")
594 if path.startswith('journal.'):
594 if path.startswith('journal.'):
595 # journal is covered by 'lock'
595 # journal is covered by 'lock'
596 if repo._currentlock(repo._lockref) is None:
596 if repo._currentlock(repo._lockref) is None:
597 repo.ui.develwarn('write with no lock: "%s"' % path,
597 repo.ui.develwarn('write with no lock: "%s"' % path,
598 stacklevel=2, config='check-locks')
598 stacklevel=2, config='check-locks')
599 elif repo._currentlock(repo._wlockref) is None:
599 elif repo._currentlock(repo._wlockref) is None:
600 # rest of vfs files are covered by 'wlock'
600 # rest of vfs files are covered by 'wlock'
601 #
601 #
602 # exclude special files
602 # exclude special files
603 for prefix in self._wlockfreeprefix:
603 for prefix in self._wlockfreeprefix:
604 if path.startswith(prefix):
604 if path.startswith(prefix):
605 return
605 return
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
606 repo.ui.develwarn('write with no wlock: "%s"' % path,
607 stacklevel=2, config='check-locks')
607 stacklevel=2, config='check-locks')
608 return ret
608 return ret
609 return checkvfs
609 return checkvfs
610
610
611 def _getsvfsward(self, origfunc):
611 def _getsvfsward(self, origfunc):
612 """build a ward for self.svfs"""
612 """build a ward for self.svfs"""
613 rref = weakref.ref(self)
613 rref = weakref.ref(self)
614 def checksvfs(path, mode=None):
614 def checksvfs(path, mode=None):
615 ret = origfunc(path, mode=mode)
615 ret = origfunc(path, mode=mode)
616 repo = rref()
616 repo = rref()
617 if repo is None or not util.safehasattr(repo, '_lockref'):
617 if repo is None or not util.safehasattr(repo, '_lockref'):
618 return
618 return
619 if mode in (None, 'r', 'rb'):
619 if mode in (None, 'r', 'rb'):
620 return
620 return
621 if path.startswith(repo.sharedpath):
621 if path.startswith(repo.sharedpath):
622 # truncate name relative to the repository (.hg)
622 # truncate name relative to the repository (.hg)
623 path = path[len(repo.sharedpath) + 1:]
623 path = path[len(repo.sharedpath) + 1:]
624 if repo._currentlock(repo._lockref) is None:
624 if repo._currentlock(repo._lockref) is None:
625 repo.ui.develwarn('write with no lock: "%s"' % path,
625 repo.ui.develwarn('write with no lock: "%s"' % path,
626 stacklevel=3)
626 stacklevel=3)
627 return ret
627 return ret
628 return checksvfs
628 return checksvfs
629
629
630 def close(self):
630 def close(self):
631 self._writecaches()
631 self._writecaches()
632
632
633 def _loadextensions(self):
633 def _loadextensions(self):
634 extensions.loadall(self.ui)
634 extensions.loadall(self.ui)
635
635
636 def _writecaches(self):
636 def _writecaches(self):
637 if self._revbranchcache:
637 if self._revbranchcache:
638 self._revbranchcache.write()
638 self._revbranchcache.write()
639
639
640 def _restrictcapabilities(self, caps):
640 def _restrictcapabilities(self, caps):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
641 if self.ui.configbool('experimental', 'bundle2-advertise'):
642 caps = set(caps)
642 caps = set(caps)
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
643 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self,
644 role='client'))
644 role='client'))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
645 caps.add('bundle2=' + urlreq.quote(capsblob))
646 return caps
646 return caps
647
647
648 def _applyopenerreqs(self):
648 def _applyopenerreqs(self):
649 self.svfs.options = dict((r, 1) for r in self.requirements
649 self.svfs.options = dict((r, 1) for r in self.requirements
650 if r in self.openerreqs)
650 if r in self.openerreqs)
651 # experimental config: format.chunkcachesize
651 # experimental config: format.chunkcachesize
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
652 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
653 if chunkcachesize is not None:
653 if chunkcachesize is not None:
654 self.svfs.options['chunkcachesize'] = chunkcachesize
654 self.svfs.options['chunkcachesize'] = chunkcachesize
655 # experimental config: format.maxchainlen
655 # experimental config: format.maxchainlen
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
656 maxchainlen = self.ui.configint('format', 'maxchainlen')
657 if maxchainlen is not None:
657 if maxchainlen is not None:
658 self.svfs.options['maxchainlen'] = maxchainlen
658 self.svfs.options['maxchainlen'] = maxchainlen
659 # experimental config: format.manifestcachesize
659 # experimental config: format.manifestcachesize
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
660 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
661 if manifestcachesize is not None:
661 if manifestcachesize is not None:
662 self.svfs.options['manifestcachesize'] = manifestcachesize
662 self.svfs.options['manifestcachesize'] = manifestcachesize
663 # experimental config: format.aggressivemergedeltas
663 # experimental config: format.aggressivemergedeltas
664 aggressivemergedeltas = self.ui.configbool('format',
664 aggressivemergedeltas = self.ui.configbool('format',
665 'aggressivemergedeltas')
665 'aggressivemergedeltas')
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
666 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
667 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
668 chainspan = self.ui.configbytes('experimental', 'maxdeltachainspan')
669 if 0 <= chainspan:
669 if 0 <= chainspan:
670 self.svfs.options['maxdeltachainspan'] = chainspan
670 self.svfs.options['maxdeltachainspan'] = chainspan
671 mmapindexthreshold = self.ui.configbytes('experimental',
671 mmapindexthreshold = self.ui.configbytes('experimental',
672 'mmapindexthreshold')
672 'mmapindexthreshold')
673 if mmapindexthreshold is not None:
673 if mmapindexthreshold is not None:
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
674 self.svfs.options['mmapindexthreshold'] = mmapindexthreshold
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
675 withsparseread = self.ui.configbool('experimental', 'sparse-read')
676 srdensitythres = float(self.ui.config('experimental',
676 srdensitythres = float(self.ui.config('experimental',
677 'sparse-read.density-threshold'))
677 'sparse-read.density-threshold'))
678 srmingapsize = self.ui.configbytes('experimental',
678 srmingapsize = self.ui.configbytes('experimental',
679 'sparse-read.min-gap-size')
679 'sparse-read.min-gap-size')
680 self.svfs.options['with-sparse-read'] = withsparseread
680 self.svfs.options['with-sparse-read'] = withsparseread
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
681 self.svfs.options['sparse-read-density-threshold'] = srdensitythres
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
682 self.svfs.options['sparse-read-min-gap-size'] = srmingapsize
683
683
684 for r in self.requirements:
684 for r in self.requirements:
685 if r.startswith('exp-compression-'):
685 if r.startswith('exp-compression-'):
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
686 self.svfs.options['compengine'] = r[len('exp-compression-'):]
687
687
688 # TODO move "revlogv2" to openerreqs once finalized.
688 # TODO move "revlogv2" to openerreqs once finalized.
689 if REVLOGV2_REQUIREMENT in self.requirements:
689 if REVLOGV2_REQUIREMENT in self.requirements:
690 self.svfs.options['revlogv2'] = True
690 self.svfs.options['revlogv2'] = True
691
691
692 def _writerequirements(self):
692 def _writerequirements(self):
693 scmutil.writerequires(self.vfs, self.requirements)
693 scmutil.writerequires(self.vfs, self.requirements)
694
694
695 def _checknested(self, path):
695 def _checknested(self, path):
696 """Determine if path is a legal nested repository."""
696 """Determine if path is a legal nested repository."""
697 if not path.startswith(self.root):
697 if not path.startswith(self.root):
698 return False
698 return False
699 subpath = path[len(self.root) + 1:]
699 subpath = path[len(self.root) + 1:]
700 normsubpath = util.pconvert(subpath)
700 normsubpath = util.pconvert(subpath)
701
701
702 # XXX: Checking against the current working copy is wrong in
702 # XXX: Checking against the current working copy is wrong in
703 # the sense that it can reject things like
703 # the sense that it can reject things like
704 #
704 #
705 # $ hg cat -r 10 sub/x.txt
705 # $ hg cat -r 10 sub/x.txt
706 #
706 #
707 # if sub/ is no longer a subrepository in the working copy
707 # if sub/ is no longer a subrepository in the working copy
708 # parent revision.
708 # parent revision.
709 #
709 #
710 # However, it can of course also allow things that would have
710 # However, it can of course also allow things that would have
711 # been rejected before, such as the above cat command if sub/
711 # been rejected before, such as the above cat command if sub/
712 # is a subrepository now, but was a normal directory before.
712 # is a subrepository now, but was a normal directory before.
713 # The old path auditor would have rejected by mistake since it
713 # The old path auditor would have rejected by mistake since it
714 # panics when it sees sub/.hg/.
714 # panics when it sees sub/.hg/.
715 #
715 #
716 # All in all, checking against the working copy seems sensible
716 # All in all, checking against the working copy seems sensible
717 # since we want to prevent access to nested repositories on
717 # since we want to prevent access to nested repositories on
718 # the filesystem *now*.
718 # the filesystem *now*.
719 ctx = self[None]
719 ctx = self[None]
720 parts = util.splitpath(subpath)
720 parts = util.splitpath(subpath)
721 while parts:
721 while parts:
722 prefix = '/'.join(parts)
722 prefix = '/'.join(parts)
723 if prefix in ctx.substate:
723 if prefix in ctx.substate:
724 if prefix == normsubpath:
724 if prefix == normsubpath:
725 return True
725 return True
726 else:
726 else:
727 sub = ctx.sub(prefix)
727 sub = ctx.sub(prefix)
728 return sub.checknested(subpath[len(prefix) + 1:])
728 return sub.checknested(subpath[len(prefix) + 1:])
729 else:
729 else:
730 parts.pop()
730 parts.pop()
731 return False
731 return False
732
732
733 def peer(self):
733 def peer(self):
734 return localpeer(self) # not cached to avoid reference cycle
734 return localpeer(self) # not cached to avoid reference cycle
735
735
736 def unfiltered(self):
736 def unfiltered(self):
737 """Return unfiltered version of the repository
737 """Return unfiltered version of the repository
738
738
739 Intended to be overwritten by filtered repo."""
739 Intended to be overwritten by filtered repo."""
740 return self
740 return self
741
741
742 def filtered(self, name, visibilityexceptions=None):
742 def filtered(self, name, visibilityexceptions=None):
743 """Return a filtered version of a repository"""
743 """Return a filtered version of a repository"""
744 cls = repoview.newtype(self.unfiltered().__class__)
744 cls = repoview.newtype(self.unfiltered().__class__)
745 return cls(self, name, visibilityexceptions)
745 return cls(self, name, visibilityexceptions)
746
746
747 @repofilecache('bookmarks', 'bookmarks.current')
747 @repofilecache('bookmarks', 'bookmarks.current')
748 def _bookmarks(self):
748 def _bookmarks(self):
749 return bookmarks.bmstore(self)
749 return bookmarks.bmstore(self)
750
750
751 @property
751 @property
752 def _activebookmark(self):
752 def _activebookmark(self):
753 return self._bookmarks.active
753 return self._bookmarks.active
754
754
755 # _phasesets depend on changelog. what we need is to call
755 # _phasesets depend on changelog. what we need is to call
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
756 # _phasecache.invalidate() if '00changelog.i' was changed, but it
757 # can't be easily expressed in filecache mechanism.
757 # can't be easily expressed in filecache mechanism.
758 @storecache('phaseroots', '00changelog.i')
758 @storecache('phaseroots', '00changelog.i')
759 def _phasecache(self):
759 def _phasecache(self):
760 return phases.phasecache(self, self._phasedefaults)
760 return phases.phasecache(self, self._phasedefaults)
761
761
762 @storecache('obsstore')
762 @storecache('obsstore')
763 def obsstore(self):
763 def obsstore(self):
764 return obsolete.makestore(self.ui, self)
764 return obsolete.makestore(self.ui, self)
765
765
766 @storecache('00changelog.i')
766 @storecache('00changelog.i')
767 def changelog(self):
767 def changelog(self):
768 return changelog.changelog(self.svfs,
768 return changelog.changelog(self.svfs,
769 trypending=txnutil.mayhavepending(self.root))
769 trypending=txnutil.mayhavepending(self.root))
770
770
771 def _constructmanifest(self):
771 def _constructmanifest(self):
772 # This is a temporary function while we migrate from manifest to
772 # This is a temporary function while we migrate from manifest to
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
773 # manifestlog. It allows bundlerepo and unionrepo to intercept the
774 # manifest creation.
774 # manifest creation.
775 return manifest.manifestrevlog(self.svfs)
775 return manifest.manifestrevlog(self.svfs)
776
776
777 @storecache('00manifest.i')
777 @storecache('00manifest.i')
778 def manifestlog(self):
778 def manifestlog(self):
779 return manifest.manifestlog(self.svfs, self)
779 return manifest.manifestlog(self.svfs, self)
780
780
781 @repofilecache('dirstate')
781 @repofilecache('dirstate')
782 def dirstate(self):
782 def dirstate(self):
783 sparsematchfn = lambda: sparse.matcher(self)
783 sparsematchfn = lambda: sparse.matcher(self)
784
784
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
785 return dirstate.dirstate(self.vfs, self.ui, self.root,
786 self._dirstatevalidate, sparsematchfn)
786 self._dirstatevalidate, sparsematchfn)
787
787
788 def _dirstatevalidate(self, node):
788 def _dirstatevalidate(self, node):
789 try:
789 try:
790 self.changelog.rev(node)
790 self.changelog.rev(node)
791 return node
791 return node
792 except error.LookupError:
792 except error.LookupError:
793 if not self._dirstatevalidatewarned:
793 if not self._dirstatevalidatewarned:
794 self._dirstatevalidatewarned = True
794 self._dirstatevalidatewarned = True
795 self.ui.warn(_("warning: ignoring unknown"
795 self.ui.warn(_("warning: ignoring unknown"
796 " working parent %s!\n") % short(node))
796 " working parent %s!\n") % short(node))
797 return nullid
797 return nullid
798
798
799 @repofilecache(narrowspec.FILENAME)
799 @repofilecache(narrowspec.FILENAME)
800 def narrowpats(self):
800 def narrowpats(self):
801 """matcher patterns for this repository's narrowspec
801 """matcher patterns for this repository's narrowspec
802
802
803 A tuple of (includes, excludes).
803 A tuple of (includes, excludes).
804 """
804 """
805 source = self
805 source = self
806 if self.shared():
806 if self.shared():
807 from . import hg
807 from . import hg
808 source = hg.sharedreposource(self)
808 source = hg.sharedreposource(self)
809 return narrowspec.load(source)
809 return narrowspec.load(source)
810
810
811 @repofilecache(narrowspec.FILENAME)
811 @repofilecache(narrowspec.FILENAME)
812 def _narrowmatch(self):
812 def _narrowmatch(self):
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
813 if changegroup.NARROW_REQUIREMENT not in self.requirements:
814 return matchmod.always(self.root, '')
814 return matchmod.always(self.root, '')
815 include, exclude = self.narrowpats
815 include, exclude = self.narrowpats
816 return narrowspec.match(self.root, include=include, exclude=exclude)
816 return narrowspec.match(self.root, include=include, exclude=exclude)
817
817
818 # TODO(martinvonz): make this property-like instead?
818 # TODO(martinvonz): make this property-like instead?
819 def narrowmatch(self):
819 def narrowmatch(self):
820 return self._narrowmatch
820 return self._narrowmatch
821
821
822 def setnarrowpats(self, newincludes, newexcludes):
822 def setnarrowpats(self, newincludes, newexcludes):
823 target = self
823 target = self
824 if self.shared():
824 if self.shared():
825 from . import hg
825 from . import hg
826 target = hg.sharedreposource(self)
826 target = hg.sharedreposource(self)
827 narrowspec.save(target, newincludes, newexcludes)
827 narrowspec.save(target, newincludes, newexcludes)
828 self.invalidate(clearfilecache=True)
828 self.invalidate(clearfilecache=True)
829
829
830 def __getitem__(self, changeid):
830 def __getitem__(self, changeid):
831 if changeid is None:
831 if changeid is None:
832 return context.workingctx(self)
832 return context.workingctx(self)
833 if isinstance(changeid, context.basectx):
833 if isinstance(changeid, context.basectx):
834 return changeid
834 return changeid
835 if isinstance(changeid, slice):
835 if isinstance(changeid, slice):
836 # wdirrev isn't contiguous so the slice shouldn't include it
836 # wdirrev isn't contiguous so the slice shouldn't include it
837 return [context.changectx(self, i)
837 return [context.changectx(self, i)
838 for i in xrange(*changeid.indices(len(self)))
838 for i in xrange(*changeid.indices(len(self)))
839 if i not in self.changelog.filteredrevs]
839 if i not in self.changelog.filteredrevs]
840 try:
840 try:
841 return context.changectx(self, changeid)
841 return context.changectx(self, changeid)
842 except error.WdirUnsupported:
842 except error.WdirUnsupported:
843 return context.workingctx(self)
843 return context.workingctx(self)
844
844
845 def __contains__(self, changeid):
845 def __contains__(self, changeid):
846 """True if the given changeid exists
846 """True if the given changeid exists
847
847
848 error.LookupError is raised if an ambiguous node specified.
848 error.LookupError is raised if an ambiguous node specified.
849 """
849 """
850 try:
850 try:
851 self[changeid]
851 self[changeid]
852 return True
852 return True
853 except (error.RepoLookupError, error.FilteredIndexError,
853 except (error.RepoLookupError, error.FilteredIndexError,
854 error.FilteredLookupError):
854 error.FilteredLookupError):
855 return False
855 return False
856
856
857 def __nonzero__(self):
857 def __nonzero__(self):
858 return True
858 return True
859
859
860 __bool__ = __nonzero__
860 __bool__ = __nonzero__
861
861
862 def __len__(self):
862 def __len__(self):
863 # no need to pay the cost of repoview.changelog
863 # no need to pay the cost of repoview.changelog
864 unfi = self.unfiltered()
864 unfi = self.unfiltered()
865 return len(unfi.changelog)
865 return len(unfi.changelog)
866
866
867 def __iter__(self):
867 def __iter__(self):
868 return iter(self.changelog)
868 return iter(self.changelog)
869
869
870 def revs(self, expr, *args):
870 def revs(self, expr, *args):
871 '''Find revisions matching a revset.
871 '''Find revisions matching a revset.
872
872
873 The revset is specified as a string ``expr`` that may contain
873 The revset is specified as a string ``expr`` that may contain
874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
874 %-formatting to escape certain types. See ``revsetlang.formatspec``.
875
875
876 Revset aliases from the configuration are not expanded. To expand
876 Revset aliases from the configuration are not expanded. To expand
877 user aliases, consider calling ``scmutil.revrange()`` or
877 user aliases, consider calling ``scmutil.revrange()`` or
878 ``repo.anyrevs([expr], user=True)``.
878 ``repo.anyrevs([expr], user=True)``.
879
879
880 Returns a revset.abstractsmartset, which is a list-like interface
880 Returns a revset.abstractsmartset, which is a list-like interface
881 that contains integer revisions.
881 that contains integer revisions.
882 '''
882 '''
883 expr = revsetlang.formatspec(expr, *args)
883 expr = revsetlang.formatspec(expr, *args)
884 m = revset.match(None, expr)
884 m = revset.match(None, expr)
885 return m(self)
885 return m(self)
886
886
887 def set(self, expr, *args):
887 def set(self, expr, *args):
888 '''Find revisions matching a revset and emit changectx instances.
888 '''Find revisions matching a revset and emit changectx instances.
889
889
890 This is a convenience wrapper around ``revs()`` that iterates the
890 This is a convenience wrapper around ``revs()`` that iterates the
891 result and is a generator of changectx instances.
891 result and is a generator of changectx instances.
892
892
893 Revset aliases from the configuration are not expanded. To expand
893 Revset aliases from the configuration are not expanded. To expand
894 user aliases, consider calling ``scmutil.revrange()``.
894 user aliases, consider calling ``scmutil.revrange()``.
895 '''
895 '''
896 for r in self.revs(expr, *args):
896 for r in self.revs(expr, *args):
897 yield self[r]
897 yield self[r]
898
898
899 def anyrevs(self, specs, user=False, localalias=None):
899 def anyrevs(self, specs, user=False, localalias=None):
900 '''Find revisions matching one of the given revsets.
900 '''Find revisions matching one of the given revsets.
901
901
902 Revset aliases from the configuration are not expanded by default. To
902 Revset aliases from the configuration are not expanded by default. To
903 expand user aliases, specify ``user=True``. To provide some local
903 expand user aliases, specify ``user=True``. To provide some local
904 definitions overriding user aliases, set ``localalias`` to
904 definitions overriding user aliases, set ``localalias`` to
905 ``{name: definitionstring}``.
905 ``{name: definitionstring}``.
906 '''
906 '''
907 if user:
907 if user:
908 m = revset.matchany(self.ui, specs,
908 m = revset.matchany(self.ui, specs,
909 lookup=revset.lookupfn(self),
909 lookup=revset.lookupfn(self),
910 localalias=localalias)
910 localalias=localalias)
911 else:
911 else:
912 m = revset.matchany(None, specs, localalias=localalias)
912 m = revset.matchany(None, specs, localalias=localalias)
913 return m(self)
913 return m(self)
914
914
915 def url(self):
915 def url(self):
916 return 'file:' + self.root
916 return 'file:' + self.root
917
917
918 def hook(self, name, throw=False, **args):
918 def hook(self, name, throw=False, **args):
919 """Call a hook, passing this repo instance.
919 """Call a hook, passing this repo instance.
920
920
921 This a convenience method to aid invoking hooks. Extensions likely
921 This a convenience method to aid invoking hooks. Extensions likely
922 won't call this unless they have registered a custom hook or are
922 won't call this unless they have registered a custom hook or are
923 replacing code that is expected to call a hook.
923 replacing code that is expected to call a hook.
924 """
924 """
925 return hook.hook(self.ui, self, name, throw, **args)
925 return hook.hook(self.ui, self, name, throw, **args)
926
926
927 @filteredpropertycache
927 @filteredpropertycache
928 def _tagscache(self):
928 def _tagscache(self):
929 '''Returns a tagscache object that contains various tags related
929 '''Returns a tagscache object that contains various tags related
930 caches.'''
930 caches.'''
931
931
932 # This simplifies its cache management by having one decorated
932 # This simplifies its cache management by having one decorated
933 # function (this one) and the rest simply fetch things from it.
933 # function (this one) and the rest simply fetch things from it.
934 class tagscache(object):
934 class tagscache(object):
935 def __init__(self):
935 def __init__(self):
936 # These two define the set of tags for this repository. tags
936 # These two define the set of tags for this repository. tags
937 # maps tag name to node; tagtypes maps tag name to 'global' or
937 # maps tag name to node; tagtypes maps tag name to 'global' or
938 # 'local'. (Global tags are defined by .hgtags across all
938 # 'local'. (Global tags are defined by .hgtags across all
939 # heads, and local tags are defined in .hg/localtags.)
939 # heads, and local tags are defined in .hg/localtags.)
940 # They constitute the in-memory cache of tags.
940 # They constitute the in-memory cache of tags.
941 self.tags = self.tagtypes = None
941 self.tags = self.tagtypes = None
942
942
943 self.nodetagscache = self.tagslist = None
943 self.nodetagscache = self.tagslist = None
944
944
945 cache = tagscache()
945 cache = tagscache()
946 cache.tags, cache.tagtypes = self._findtags()
946 cache.tags, cache.tagtypes = self._findtags()
947
947
948 return cache
948 return cache
949
949
950 def tags(self):
950 def tags(self):
951 '''return a mapping of tag to node'''
951 '''return a mapping of tag to node'''
952 t = {}
952 t = {}
953 if self.changelog.filteredrevs:
953 if self.changelog.filteredrevs:
954 tags, tt = self._findtags()
954 tags, tt = self._findtags()
955 else:
955 else:
956 tags = self._tagscache.tags
956 tags = self._tagscache.tags
957 for k, v in tags.iteritems():
957 for k, v in tags.iteritems():
958 try:
958 try:
959 # ignore tags to unknown nodes
959 # ignore tags to unknown nodes
960 self.changelog.rev(v)
960 self.changelog.rev(v)
961 t[k] = v
961 t[k] = v
962 except (error.LookupError, ValueError):
962 except (error.LookupError, ValueError):
963 pass
963 pass
964 return t
964 return t
965
965
966 def _findtags(self):
966 def _findtags(self):
967 '''Do the hard work of finding tags. Return a pair of dicts
967 '''Do the hard work of finding tags. Return a pair of dicts
968 (tags, tagtypes) where tags maps tag name to node, and tagtypes
968 (tags, tagtypes) where tags maps tag name to node, and tagtypes
969 maps tag name to a string like \'global\' or \'local\'.
969 maps tag name to a string like \'global\' or \'local\'.
970 Subclasses or extensions are free to add their own tags, but
970 Subclasses or extensions are free to add their own tags, but
971 should be aware that the returned dicts will be retained for the
971 should be aware that the returned dicts will be retained for the
972 duration of the localrepo object.'''
972 duration of the localrepo object.'''
973
973
974 # XXX what tagtype should subclasses/extensions use? Currently
974 # XXX what tagtype should subclasses/extensions use? Currently
975 # mq and bookmarks add tags, but do not set the tagtype at all.
975 # mq and bookmarks add tags, but do not set the tagtype at all.
976 # Should each extension invent its own tag type? Should there
976 # Should each extension invent its own tag type? Should there
977 # be one tagtype for all such "virtual" tags? Or is the status
977 # be one tagtype for all such "virtual" tags? Or is the status
978 # quo fine?
978 # quo fine?
979
979
980
980
981 # map tag name to (node, hist)
981 # map tag name to (node, hist)
982 alltags = tagsmod.findglobaltags(self.ui, self)
982 alltags = tagsmod.findglobaltags(self.ui, self)
983 # map tag name to tag type
983 # map tag name to tag type
984 tagtypes = dict((tag, 'global') for tag in alltags)
984 tagtypes = dict((tag, 'global') for tag in alltags)
985
985
986 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
986 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
987
987
988 # Build the return dicts. Have to re-encode tag names because
988 # Build the return dicts. Have to re-encode tag names because
989 # the tags module always uses UTF-8 (in order not to lose info
989 # the tags module always uses UTF-8 (in order not to lose info
990 # writing to the cache), but the rest of Mercurial wants them in
990 # writing to the cache), but the rest of Mercurial wants them in
991 # local encoding.
991 # local encoding.
992 tags = {}
992 tags = {}
993 for (name, (node, hist)) in alltags.iteritems():
993 for (name, (node, hist)) in alltags.iteritems():
994 if node != nullid:
994 if node != nullid:
995 tags[encoding.tolocal(name)] = node
995 tags[encoding.tolocal(name)] = node
996 tags['tip'] = self.changelog.tip()
996 tags['tip'] = self.changelog.tip()
997 tagtypes = dict([(encoding.tolocal(name), value)
997 tagtypes = dict([(encoding.tolocal(name), value)
998 for (name, value) in tagtypes.iteritems()])
998 for (name, value) in tagtypes.iteritems()])
999 return (tags, tagtypes)
999 return (tags, tagtypes)
1000
1000
1001 def tagtype(self, tagname):
1001 def tagtype(self, tagname):
1002 '''
1002 '''
1003 return the type of the given tag. result can be:
1003 return the type of the given tag. result can be:
1004
1004
1005 'local' : a local tag
1005 'local' : a local tag
1006 'global' : a global tag
1006 'global' : a global tag
1007 None : tag does not exist
1007 None : tag does not exist
1008 '''
1008 '''
1009
1009
1010 return self._tagscache.tagtypes.get(tagname)
1010 return self._tagscache.tagtypes.get(tagname)
1011
1011
1012 def tagslist(self):
1012 def tagslist(self):
1013 '''return a list of tags ordered by revision'''
1013 '''return a list of tags ordered by revision'''
1014 if not self._tagscache.tagslist:
1014 if not self._tagscache.tagslist:
1015 l = []
1015 l = []
1016 for t, n in self.tags().iteritems():
1016 for t, n in self.tags().iteritems():
1017 l.append((self.changelog.rev(n), t, n))
1017 l.append((self.changelog.rev(n), t, n))
1018 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1018 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
1019
1019
1020 return self._tagscache.tagslist
1020 return self._tagscache.tagslist
1021
1021
1022 def nodetags(self, node):
1022 def nodetags(self, node):
1023 '''return the tags associated with a node'''
1023 '''return the tags associated with a node'''
1024 if not self._tagscache.nodetagscache:
1024 if not self._tagscache.nodetagscache:
1025 nodetagscache = {}
1025 nodetagscache = {}
1026 for t, n in self._tagscache.tags.iteritems():
1026 for t, n in self._tagscache.tags.iteritems():
1027 nodetagscache.setdefault(n, []).append(t)
1027 nodetagscache.setdefault(n, []).append(t)
1028 for tags in nodetagscache.itervalues():
1028 for tags in nodetagscache.itervalues():
1029 tags.sort()
1029 tags.sort()
1030 self._tagscache.nodetagscache = nodetagscache
1030 self._tagscache.nodetagscache = nodetagscache
1031 return self._tagscache.nodetagscache.get(node, [])
1031 return self._tagscache.nodetagscache.get(node, [])
1032
1032
1033 def nodebookmarks(self, node):
1033 def nodebookmarks(self, node):
1034 """return the list of bookmarks pointing to the specified node"""
1034 """return the list of bookmarks pointing to the specified node"""
1035 marks = []
1035 marks = []
1036 for bookmark, n in self._bookmarks.iteritems():
1036 for bookmark, n in self._bookmarks.iteritems():
1037 if n == node:
1037 if n == node:
1038 marks.append(bookmark)
1038 marks.append(bookmark)
1039 return sorted(marks)
1039 return sorted(marks)
1040
1040
1041 def branchmap(self):
1041 def branchmap(self):
1042 '''returns a dictionary {branch: [branchheads]} with branchheads
1042 '''returns a dictionary {branch: [branchheads]} with branchheads
1043 ordered by increasing revision number'''
1043 ordered by increasing revision number'''
1044 branchmap.updatecache(self)
1044 branchmap.updatecache(self)
1045 return self._branchcaches[self.filtername]
1045 return self._branchcaches[self.filtername]
1046
1046
1047 @unfilteredmethod
1047 @unfilteredmethod
1048 def revbranchcache(self):
1048 def revbranchcache(self):
1049 if not self._revbranchcache:
1049 if not self._revbranchcache:
1050 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1050 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
1051 return self._revbranchcache
1051 return self._revbranchcache
1052
1052
1053 def branchtip(self, branch, ignoremissing=False):
1053 def branchtip(self, branch, ignoremissing=False):
1054 '''return the tip node for a given branch
1054 '''return the tip node for a given branch
1055
1055
1056 If ignoremissing is True, then this method will not raise an error.
1056 If ignoremissing is True, then this method will not raise an error.
1057 This is helpful for callers that only expect None for a missing branch
1057 This is helpful for callers that only expect None for a missing branch
1058 (e.g. namespace).
1058 (e.g. namespace).
1059
1059
1060 '''
1060 '''
1061 try:
1061 try:
1062 return self.branchmap().branchtip(branch)
1062 return self.branchmap().branchtip(branch)
1063 except KeyError:
1063 except KeyError:
1064 if not ignoremissing:
1064 if not ignoremissing:
1065 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1065 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
1066 else:
1066 else:
1067 pass
1067 pass
1068
1068
1069 def lookup(self, key):
1069 def lookup(self, key):
1070 return scmutil.revsymbol(self, key).node()
1070 return scmutil.revsymbol(self, key).node()
1071
1071
1072 def lookupbranch(self, key):
1072 def lookupbranch(self, key):
1073 if key in self.branchmap():
1073 if key in self.branchmap():
1074 return key
1074 return key
1075
1075
1076 return scmutil.revsymbol(self, key).branch()
1076 return scmutil.revsymbol(self, key).branch()
1077
1077
1078 def known(self, nodes):
1078 def known(self, nodes):
1079 cl = self.changelog
1079 cl = self.changelog
1080 nm = cl.nodemap
1080 nm = cl.nodemap
1081 filtered = cl.filteredrevs
1081 filtered = cl.filteredrevs
1082 result = []
1082 result = []
1083 for n in nodes:
1083 for n in nodes:
1084 r = nm.get(n)
1084 r = nm.get(n)
1085 resp = not (r is None or r in filtered)
1085 resp = not (r is None or r in filtered)
1086 result.append(resp)
1086 result.append(resp)
1087 return result
1087 return result
1088
1088
1089 def local(self):
1089 def local(self):
1090 return self
1090 return self
1091
1091
1092 def publishing(self):
1092 def publishing(self):
1093 # it's safe (and desirable) to trust the publish flag unconditionally
1093 # it's safe (and desirable) to trust the publish flag unconditionally
1094 # so that we don't finalize changes shared between users via ssh or nfs
1094 # so that we don't finalize changes shared between users via ssh or nfs
1095 return self.ui.configbool('phases', 'publish', untrusted=True)
1095 return self.ui.configbool('phases', 'publish', untrusted=True)
1096
1096
1097 def cancopy(self):
1097 def cancopy(self):
1098 # so statichttprepo's override of local() works
1098 # so statichttprepo's override of local() works
1099 if not self.local():
1099 if not self.local():
1100 return False
1100 return False
1101 if not self.publishing():
1101 if not self.publishing():
1102 return True
1102 return True
1103 # if publishing we can't copy if there is filtered content
1103 # if publishing we can't copy if there is filtered content
1104 return not self.filtered('visible').changelog.filteredrevs
1104 return not self.filtered('visible').changelog.filteredrevs
1105
1105
1106 def shared(self):
1106 def shared(self):
1107 '''the type of shared repository (None if not shared)'''
1107 '''the type of shared repository (None if not shared)'''
1108 if self.sharedpath != self.path:
1108 if self.sharedpath != self.path:
1109 return 'store'
1109 return 'store'
1110 return None
1110 return None
1111
1111
1112 def wjoin(self, f, *insidef):
1112 def wjoin(self, f, *insidef):
1113 return self.vfs.reljoin(self.root, f, *insidef)
1113 return self.vfs.reljoin(self.root, f, *insidef)
1114
1114
1115 def file(self, f):
1115 def file(self, f):
1116 if f[0] == '/':
1116 if f[0] == '/':
1117 f = f[1:]
1117 f = f[1:]
1118 return filelog.filelog(self.svfs, f)
1118 return filelog.filelog(self.svfs, f)
1119
1119
1120 def setparents(self, p1, p2=nullid):
1120 def setparents(self, p1, p2=nullid):
1121 with self.dirstate.parentchange():
1121 with self.dirstate.parentchange():
1122 copies = self.dirstate.setparents(p1, p2)
1122 copies = self.dirstate.setparents(p1, p2)
1123 pctx = self[p1]
1123 pctx = self[p1]
1124 if copies:
1124 if copies:
1125 # Adjust copy records, the dirstate cannot do it, it
1125 # Adjust copy records, the dirstate cannot do it, it
1126 # requires access to parents manifests. Preserve them
1126 # requires access to parents manifests. Preserve them
1127 # only for entries added to first parent.
1127 # only for entries added to first parent.
1128 for f in copies:
1128 for f in copies:
1129 if f not in pctx and copies[f] in pctx:
1129 if f not in pctx and copies[f] in pctx:
1130 self.dirstate.copy(copies[f], f)
1130 self.dirstate.copy(copies[f], f)
1131 if p2 == nullid:
1131 if p2 == nullid:
1132 for f, s in sorted(self.dirstate.copies().items()):
1132 for f, s in sorted(self.dirstate.copies().items()):
1133 if f not in pctx and s not in pctx:
1133 if f not in pctx and s not in pctx:
1134 self.dirstate.copy(None, f)
1134 self.dirstate.copy(None, f)
1135
1135
1136 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1136 def filectx(self, path, changeid=None, fileid=None, changectx=None):
1137 """changeid can be a changeset revision, node, or tag.
1137 """changeid can be a changeset revision, node, or tag.
1138 fileid can be a file revision or node."""
1138 fileid can be a file revision or node."""
1139 return context.filectx(self, path, changeid, fileid,
1139 return context.filectx(self, path, changeid, fileid,
1140 changectx=changectx)
1140 changectx=changectx)
1141
1141
1142 def getcwd(self):
1142 def getcwd(self):
1143 return self.dirstate.getcwd()
1143 return self.dirstate.getcwd()
1144
1144
1145 def pathto(self, f, cwd=None):
1145 def pathto(self, f, cwd=None):
1146 return self.dirstate.pathto(f, cwd)
1146 return self.dirstate.pathto(f, cwd)
1147
1147
1148 def _loadfilter(self, filter):
1148 def _loadfilter(self, filter):
1149 if filter not in self._filterpats:
1149 if filter not in self._filterpats:
1150 l = []
1150 l = []
1151 for pat, cmd in self.ui.configitems(filter):
1151 for pat, cmd in self.ui.configitems(filter):
1152 if cmd == '!':
1152 if cmd == '!':
1153 continue
1153 continue
1154 mf = matchmod.match(self.root, '', [pat])
1154 mf = matchmod.match(self.root, '', [pat])
1155 fn = None
1155 fn = None
1156 params = cmd
1156 params = cmd
1157 for name, filterfn in self._datafilters.iteritems():
1157 for name, filterfn in self._datafilters.iteritems():
1158 if cmd.startswith(name):
1158 if cmd.startswith(name):
1159 fn = filterfn
1159 fn = filterfn
1160 params = cmd[len(name):].lstrip()
1160 params = cmd[len(name):].lstrip()
1161 break
1161 break
1162 if not fn:
1162 if not fn:
1163 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1163 fn = lambda s, c, **kwargs: procutil.filter(s, c)
1164 # Wrap old filters not supporting keyword arguments
1164 # Wrap old filters not supporting keyword arguments
1165 if not pycompat.getargspec(fn)[2]:
1165 if not pycompat.getargspec(fn)[2]:
1166 oldfn = fn
1166 oldfn = fn
1167 fn = lambda s, c, **kwargs: oldfn(s, c)
1167 fn = lambda s, c, **kwargs: oldfn(s, c)
1168 l.append((mf, fn, params))
1168 l.append((mf, fn, params))
1169 self._filterpats[filter] = l
1169 self._filterpats[filter] = l
1170 return self._filterpats[filter]
1170 return self._filterpats[filter]
1171
1171
1172 def _filter(self, filterpats, filename, data):
1172 def _filter(self, filterpats, filename, data):
1173 for mf, fn, cmd in filterpats:
1173 for mf, fn, cmd in filterpats:
1174 if mf(filename):
1174 if mf(filename):
1175 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1175 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
1176 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1176 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
1177 break
1177 break
1178
1178
1179 return data
1179 return data
1180
1180
1181 @unfilteredpropertycache
1181 @unfilteredpropertycache
1182 def _encodefilterpats(self):
1182 def _encodefilterpats(self):
1183 return self._loadfilter('encode')
1183 return self._loadfilter('encode')
1184
1184
1185 @unfilteredpropertycache
1185 @unfilteredpropertycache
1186 def _decodefilterpats(self):
1186 def _decodefilterpats(self):
1187 return self._loadfilter('decode')
1187 return self._loadfilter('decode')
1188
1188
1189 def adddatafilter(self, name, filter):
1189 def adddatafilter(self, name, filter):
1190 self._datafilters[name] = filter
1190 self._datafilters[name] = filter
1191
1191
1192 def wread(self, filename):
1192 def wread(self, filename):
1193 if self.wvfs.islink(filename):
1193 if self.wvfs.islink(filename):
1194 data = self.wvfs.readlink(filename)
1194 data = self.wvfs.readlink(filename)
1195 else:
1195 else:
1196 data = self.wvfs.read(filename)
1196 data = self.wvfs.read(filename)
1197 return self._filter(self._encodefilterpats, filename, data)
1197 return self._filter(self._encodefilterpats, filename, data)
1198
1198
1199 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1199 def wwrite(self, filename, data, flags, backgroundclose=False, **kwargs):
1200 """write ``data`` into ``filename`` in the working directory
1200 """write ``data`` into ``filename`` in the working directory
1201
1201
1202 This returns length of written (maybe decoded) data.
1202 This returns length of written (maybe decoded) data.
1203 """
1203 """
1204 data = self._filter(self._decodefilterpats, filename, data)
1204 data = self._filter(self._decodefilterpats, filename, data)
1205 if 'l' in flags:
1205 if 'l' in flags:
1206 self.wvfs.symlink(data, filename)
1206 self.wvfs.symlink(data, filename)
1207 else:
1207 else:
1208 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1208 self.wvfs.write(filename, data, backgroundclose=backgroundclose,
1209 **kwargs)
1209 **kwargs)
1210 if 'x' in flags:
1210 if 'x' in flags:
1211 self.wvfs.setflags(filename, False, True)
1211 self.wvfs.setflags(filename, False, True)
1212 else:
1212 else:
1213 self.wvfs.setflags(filename, False, False)
1213 self.wvfs.setflags(filename, False, False)
1214 return len(data)
1214 return len(data)
1215
1215
1216 def wwritedata(self, filename, data):
1216 def wwritedata(self, filename, data):
1217 return self._filter(self._decodefilterpats, filename, data)
1217 return self._filter(self._decodefilterpats, filename, data)
1218
1218
1219 def currenttransaction(self):
1219 def currenttransaction(self):
1220 """return the current transaction or None if non exists"""
1220 """return the current transaction or None if non exists"""
1221 if self._transref:
1221 if self._transref:
1222 tr = self._transref()
1222 tr = self._transref()
1223 else:
1223 else:
1224 tr = None
1224 tr = None
1225
1225
1226 if tr and tr.running():
1226 if tr and tr.running():
1227 return tr
1227 return tr
1228 return None
1228 return None
1229
1229
1230 def transaction(self, desc, report=None):
1230 def transaction(self, desc, report=None):
1231 if (self.ui.configbool('devel', 'all-warnings')
1231 if (self.ui.configbool('devel', 'all-warnings')
1232 or self.ui.configbool('devel', 'check-locks')):
1232 or self.ui.configbool('devel', 'check-locks')):
1233 if self._currentlock(self._lockref) is None:
1233 if self._currentlock(self._lockref) is None:
1234 raise error.ProgrammingError('transaction requires locking')
1234 raise error.ProgrammingError('transaction requires locking')
1235 tr = self.currenttransaction()
1235 tr = self.currenttransaction()
1236 if tr is not None:
1236 if tr is not None:
1237 return tr.nest(name=desc)
1237 return tr.nest(name=desc)
1238
1238
1239 # abort here if the journal already exists
1239 # abort here if the journal already exists
1240 if self.svfs.exists("journal"):
1240 if self.svfs.exists("journal"):
1241 raise error.RepoError(
1241 raise error.RepoError(
1242 _("abandoned transaction found"),
1242 _("abandoned transaction found"),
1243 hint=_("run 'hg recover' to clean up transaction"))
1243 hint=_("run 'hg recover' to clean up transaction"))
1244
1244
1245 idbase = "%.40f#%f" % (random.random(), time.time())
1245 idbase = "%.40f#%f" % (random.random(), time.time())
1246 ha = hex(hashlib.sha1(idbase).digest())
1246 ha = hex(hashlib.sha1(idbase).digest())
1247 txnid = 'TXN:' + ha
1247 txnid = 'TXN:' + ha
1248 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1248 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1249
1249
1250 self._writejournal(desc)
1250 self._writejournal(desc)
1251 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1251 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1252 if report:
1252 if report:
1253 rp = report
1253 rp = report
1254 else:
1254 else:
1255 rp = self.ui.warn
1255 rp = self.ui.warn
1256 vfsmap = {'plain': self.vfs} # root of .hg/
1256 vfsmap = {'plain': self.vfs} # root of .hg/
1257 # we must avoid cyclic reference between repo and transaction.
1257 # we must avoid cyclic reference between repo and transaction.
1258 reporef = weakref.ref(self)
1258 reporef = weakref.ref(self)
1259 # Code to track tag movement
1259 # Code to track tag movement
1260 #
1260 #
1261 # Since tags are all handled as file content, it is actually quite hard
1261 # Since tags are all handled as file content, it is actually quite hard
1262 # to track these movement from a code perspective. So we fallback to a
1262 # to track these movement from a code perspective. So we fallback to a
1263 # tracking at the repository level. One could envision to track changes
1263 # tracking at the repository level. One could envision to track changes
1264 # to the '.hgtags' file through changegroup apply but that fails to
1264 # to the '.hgtags' file through changegroup apply but that fails to
1265 # cope with case where transaction expose new heads without changegroup
1265 # cope with case where transaction expose new heads without changegroup
1266 # being involved (eg: phase movement).
1266 # being involved (eg: phase movement).
1267 #
1267 #
1268 # For now, We gate the feature behind a flag since this likely comes
1268 # For now, We gate the feature behind a flag since this likely comes
1269 # with performance impacts. The current code run more often than needed
1269 # with performance impacts. The current code run more often than needed
1270 # and do not use caches as much as it could. The current focus is on
1270 # and do not use caches as much as it could. The current focus is on
1271 # the behavior of the feature so we disable it by default. The flag
1271 # the behavior of the feature so we disable it by default. The flag
1272 # will be removed when we are happy with the performance impact.
1272 # will be removed when we are happy with the performance impact.
1273 #
1273 #
1274 # Once this feature is no longer experimental move the following
1274 # Once this feature is no longer experimental move the following
1275 # documentation to the appropriate help section:
1275 # documentation to the appropriate help section:
1276 #
1276 #
1277 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1277 # The ``HG_TAG_MOVED`` variable will be set if the transaction touched
1278 # tags (new or changed or deleted tags). In addition the details of
1278 # tags (new or changed or deleted tags). In addition the details of
1279 # these changes are made available in a file at:
1279 # these changes are made available in a file at:
1280 # ``REPOROOT/.hg/changes/tags.changes``.
1280 # ``REPOROOT/.hg/changes/tags.changes``.
1281 # Make sure you check for HG_TAG_MOVED before reading that file as it
1281 # Make sure you check for HG_TAG_MOVED before reading that file as it
1282 # might exist from a previous transaction even if no tag were touched
1282 # might exist from a previous transaction even if no tag were touched
1283 # in this one. Changes are recorded in a line base format::
1283 # in this one. Changes are recorded in a line base format::
1284 #
1284 #
1285 # <action> <hex-node> <tag-name>\n
1285 # <action> <hex-node> <tag-name>\n
1286 #
1286 #
1287 # Actions are defined as follow:
1287 # Actions are defined as follow:
1288 # "-R": tag is removed,
1288 # "-R": tag is removed,
1289 # "+A": tag is added,
1289 # "+A": tag is added,
1290 # "-M": tag is moved (old value),
1290 # "-M": tag is moved (old value),
1291 # "+M": tag is moved (new value),
1291 # "+M": tag is moved (new value),
1292 tracktags = lambda x: None
1292 tracktags = lambda x: None
1293 # experimental config: experimental.hook-track-tags
1293 # experimental config: experimental.hook-track-tags
1294 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1294 shouldtracktags = self.ui.configbool('experimental', 'hook-track-tags')
1295 if desc != 'strip' and shouldtracktags:
1295 if desc != 'strip' and shouldtracktags:
1296 oldheads = self.changelog.headrevs()
1296 oldheads = self.changelog.headrevs()
1297 def tracktags(tr2):
1297 def tracktags(tr2):
1298 repo = reporef()
1298 repo = reporef()
1299 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1299 oldfnodes = tagsmod.fnoderevs(repo.ui, repo, oldheads)
1300 newheads = repo.changelog.headrevs()
1300 newheads = repo.changelog.headrevs()
1301 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1301 newfnodes = tagsmod.fnoderevs(repo.ui, repo, newheads)
1302 # notes: we compare lists here.
1302 # notes: we compare lists here.
1303 # As we do it only once buiding set would not be cheaper
1303 # As we do it only once buiding set would not be cheaper
1304 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1304 changes = tagsmod.difftags(repo.ui, repo, oldfnodes, newfnodes)
1305 if changes:
1305 if changes:
1306 tr2.hookargs['tag_moved'] = '1'
1306 tr2.hookargs['tag_moved'] = '1'
1307 with repo.vfs('changes/tags.changes', 'w',
1307 with repo.vfs('changes/tags.changes', 'w',
1308 atomictemp=True) as changesfile:
1308 atomictemp=True) as changesfile:
1309 # note: we do not register the file to the transaction
1309 # note: we do not register the file to the transaction
1310 # because we needs it to still exist on the transaction
1310 # because we needs it to still exist on the transaction
1311 # is close (for txnclose hooks)
1311 # is close (for txnclose hooks)
1312 tagsmod.writediff(changesfile, changes)
1312 tagsmod.writediff(changesfile, changes)
1313 def validate(tr2):
1313 def validate(tr2):
1314 """will run pre-closing hooks"""
1314 """will run pre-closing hooks"""
1315 # XXX the transaction API is a bit lacking here so we take a hacky
1315 # XXX the transaction API is a bit lacking here so we take a hacky
1316 # path for now
1316 # path for now
1317 #
1317 #
1318 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1318 # We cannot add this as a "pending" hooks since the 'tr.hookargs'
1319 # dict is copied before these run. In addition we needs the data
1319 # dict is copied before these run. In addition we needs the data
1320 # available to in memory hooks too.
1320 # available to in memory hooks too.
1321 #
1321 #
1322 # Moreover, we also need to make sure this runs before txnclose
1322 # Moreover, we also need to make sure this runs before txnclose
1323 # hooks and there is no "pending" mechanism that would execute
1323 # hooks and there is no "pending" mechanism that would execute
1324 # logic only if hooks are about to run.
1324 # logic only if hooks are about to run.
1325 #
1325 #
1326 # Fixing this limitation of the transaction is also needed to track
1326 # Fixing this limitation of the transaction is also needed to track
1327 # other families of changes (bookmarks, phases, obsolescence).
1327 # other families of changes (bookmarks, phases, obsolescence).
1328 #
1328 #
1329 # This will have to be fixed before we remove the experimental
1329 # This will have to be fixed before we remove the experimental
1330 # gating.
1330 # gating.
1331 tracktags(tr2)
1331 tracktags(tr2)
1332 repo = reporef()
1332 repo = reporef()
1333 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1333 if repo.ui.configbool('experimental', 'single-head-per-branch'):
1334 scmutil.enforcesinglehead(repo, tr2, desc)
1334 scmutil.enforcesinglehead(repo, tr2, desc)
1335 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1335 if hook.hashook(repo.ui, 'pretxnclose-bookmark'):
1336 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1336 for name, (old, new) in sorted(tr.changes['bookmarks'].items()):
1337 args = tr.hookargs.copy()
1337 args = tr.hookargs.copy()
1338 args.update(bookmarks.preparehookargs(name, old, new))
1338 args.update(bookmarks.preparehookargs(name, old, new))
1339 repo.hook('pretxnclose-bookmark', throw=True,
1339 repo.hook('pretxnclose-bookmark', throw=True,
1340 txnname=desc,
1340 txnname=desc,
1341 **pycompat.strkwargs(args))
1341 **pycompat.strkwargs(args))
1342 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1342 if hook.hashook(repo.ui, 'pretxnclose-phase'):
1343 cl = repo.unfiltered().changelog
1343 cl = repo.unfiltered().changelog
1344 for rev, (old, new) in tr.changes['phases'].items():
1344 for rev, (old, new) in tr.changes['phases'].items():
1345 args = tr.hookargs.copy()
1345 args = tr.hookargs.copy()
1346 node = hex(cl.node(rev))
1346 node = hex(cl.node(rev))
1347 args.update(phases.preparehookargs(node, old, new))
1347 args.update(phases.preparehookargs(node, old, new))
1348 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1348 repo.hook('pretxnclose-phase', throw=True, txnname=desc,
1349 **pycompat.strkwargs(args))
1349 **pycompat.strkwargs(args))
1350
1350
1351 repo.hook('pretxnclose', throw=True,
1351 repo.hook('pretxnclose', throw=True,
1352 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1352 txnname=desc, **pycompat.strkwargs(tr.hookargs))
1353 def releasefn(tr, success):
1353 def releasefn(tr, success):
1354 repo = reporef()
1354 repo = reporef()
1355 if success:
1355 if success:
1356 # this should be explicitly invoked here, because
1356 # this should be explicitly invoked here, because
1357 # in-memory changes aren't written out at closing
1357 # in-memory changes aren't written out at closing
1358 # transaction, if tr.addfilegenerator (via
1358 # transaction, if tr.addfilegenerator (via
1359 # dirstate.write or so) isn't invoked while
1359 # dirstate.write or so) isn't invoked while
1360 # transaction running
1360 # transaction running
1361 repo.dirstate.write(None)
1361 repo.dirstate.write(None)
1362 else:
1362 else:
1363 # discard all changes (including ones already written
1363 # discard all changes (including ones already written
1364 # out) in this transaction
1364 # out) in this transaction
1365 repo.dirstate.restorebackup(None, 'journal.dirstate')
1365 repo.dirstate.restorebackup(None, 'journal.dirstate')
1366
1366
1367 repo.invalidate(clearfilecache=True)
1367 repo.invalidate(clearfilecache=True)
1368
1368
1369 tr = transaction.transaction(rp, self.svfs, vfsmap,
1369 tr = transaction.transaction(rp, self.svfs, vfsmap,
1370 "journal",
1370 "journal",
1371 "undo",
1371 "undo",
1372 aftertrans(renames),
1372 aftertrans(renames),
1373 self.store.createmode,
1373 self.store.createmode,
1374 validator=validate,
1374 validator=validate,
1375 releasefn=releasefn,
1375 releasefn=releasefn,
1376 checkambigfiles=_cachedfiles,
1376 checkambigfiles=_cachedfiles,
1377 name=desc)
1377 name=desc)
1378 tr.changes['revs'] = xrange(0, 0)
1378 tr.changes['revs'] = xrange(0, 0)
1379 tr.changes['obsmarkers'] = set()
1379 tr.changes['obsmarkers'] = set()
1380 tr.changes['phases'] = {}
1380 tr.changes['phases'] = {}
1381 tr.changes['bookmarks'] = {}
1381 tr.changes['bookmarks'] = {}
1382
1382
1383 tr.hookargs['txnid'] = txnid
1383 tr.hookargs['txnid'] = txnid
1384 # note: writing the fncache only during finalize mean that the file is
1384 # note: writing the fncache only during finalize mean that the file is
1385 # outdated when running hooks. As fncache is used for streaming clone,
1385 # outdated when running hooks. As fncache is used for streaming clone,
1386 # this is not expected to break anything that happen during the hooks.
1386 # this is not expected to break anything that happen during the hooks.
1387 tr.addfinalize('flush-fncache', self.store.write)
1387 tr.addfinalize('flush-fncache', self.store.write)
1388 def txnclosehook(tr2):
1388 def txnclosehook(tr2):
1389 """To be run if transaction is successful, will schedule a hook run
1389 """To be run if transaction is successful, will schedule a hook run
1390 """
1390 """
1391 # Don't reference tr2 in hook() so we don't hold a reference.
1391 # Don't reference tr2 in hook() so we don't hold a reference.
1392 # This reduces memory consumption when there are multiple
1392 # This reduces memory consumption when there are multiple
1393 # transactions per lock. This can likely go away if issue5045
1393 # transactions per lock. This can likely go away if issue5045
1394 # fixes the function accumulation.
1394 # fixes the function accumulation.
1395 hookargs = tr2.hookargs
1395 hookargs = tr2.hookargs
1396
1396
1397 def hookfunc():
1397 def hookfunc():
1398 repo = reporef()
1398 repo = reporef()
1399 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1399 if hook.hashook(repo.ui, 'txnclose-bookmark'):
1400 bmchanges = sorted(tr.changes['bookmarks'].items())
1400 bmchanges = sorted(tr.changes['bookmarks'].items())
1401 for name, (old, new) in bmchanges:
1401 for name, (old, new) in bmchanges:
1402 args = tr.hookargs.copy()
1402 args = tr.hookargs.copy()
1403 args.update(bookmarks.preparehookargs(name, old, new))
1403 args.update(bookmarks.preparehookargs(name, old, new))
1404 repo.hook('txnclose-bookmark', throw=False,
1404 repo.hook('txnclose-bookmark', throw=False,
1405 txnname=desc, **pycompat.strkwargs(args))
1405 txnname=desc, **pycompat.strkwargs(args))
1406
1406
1407 if hook.hashook(repo.ui, 'txnclose-phase'):
1407 if hook.hashook(repo.ui, 'txnclose-phase'):
1408 cl = repo.unfiltered().changelog
1408 cl = repo.unfiltered().changelog
1409 phasemv = sorted(tr.changes['phases'].items())
1409 phasemv = sorted(tr.changes['phases'].items())
1410 for rev, (old, new) in phasemv:
1410 for rev, (old, new) in phasemv:
1411 args = tr.hookargs.copy()
1411 args = tr.hookargs.copy()
1412 node = hex(cl.node(rev))
1412 node = hex(cl.node(rev))
1413 args.update(phases.preparehookargs(node, old, new))
1413 args.update(phases.preparehookargs(node, old, new))
1414 repo.hook('txnclose-phase', throw=False, txnname=desc,
1414 repo.hook('txnclose-phase', throw=False, txnname=desc,
1415 **pycompat.strkwargs(args))
1415 **pycompat.strkwargs(args))
1416
1416
1417 repo.hook('txnclose', throw=False, txnname=desc,
1417 repo.hook('txnclose', throw=False, txnname=desc,
1418 **pycompat.strkwargs(hookargs))
1418 **pycompat.strkwargs(hookargs))
1419 reporef()._afterlock(hookfunc)
1419 reporef()._afterlock(hookfunc)
1420 tr.addfinalize('txnclose-hook', txnclosehook)
1420 tr.addfinalize('txnclose-hook', txnclosehook)
1421 # Include a leading "-" to make it happen before the transaction summary
1421 # Include a leading "-" to make it happen before the transaction summary
1422 # reports registered via scmutil.registersummarycallback() whose names
1422 # reports registered via scmutil.registersummarycallback() whose names
1423 # are 00-txnreport etc. That way, the caches will be warm when the
1423 # are 00-txnreport etc. That way, the caches will be warm when the
1424 # callbacks run.
1424 # callbacks run.
1425 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1425 tr.addpostclose('-warm-cache', self._buildcacheupdater(tr))
1426 def txnaborthook(tr2):
1426 def txnaborthook(tr2):
1427 """To be run if transaction is aborted
1427 """To be run if transaction is aborted
1428 """
1428 """
1429 reporef().hook('txnabort', throw=False, txnname=desc,
1429 reporef().hook('txnabort', throw=False, txnname=desc,
1430 **pycompat.strkwargs(tr2.hookargs))
1430 **pycompat.strkwargs(tr2.hookargs))
1431 tr.addabort('txnabort-hook', txnaborthook)
1431 tr.addabort('txnabort-hook', txnaborthook)
1432 # avoid eager cache invalidation. in-memory data should be identical
1432 # avoid eager cache invalidation. in-memory data should be identical
1433 # to stored data if transaction has no error.
1433 # to stored data if transaction has no error.
1434 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1434 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1435 self._transref = weakref.ref(tr)
1435 self._transref = weakref.ref(tr)
1436 scmutil.registersummarycallback(self, tr, desc)
1436 scmutil.registersummarycallback(self, tr, desc)
1437 return tr
1437 return tr
1438
1438
1439 def _journalfiles(self):
1439 def _journalfiles(self):
1440 return ((self.svfs, 'journal'),
1440 return ((self.svfs, 'journal'),
1441 (self.vfs, 'journal.dirstate'),
1441 (self.vfs, 'journal.dirstate'),
1442 (self.vfs, 'journal.branch'),
1442 (self.vfs, 'journal.branch'),
1443 (self.vfs, 'journal.desc'),
1443 (self.vfs, 'journal.desc'),
1444 (self.vfs, 'journal.bookmarks'),
1444 (self.vfs, 'journal.bookmarks'),
1445 (self.svfs, 'journal.phaseroots'))
1445 (self.svfs, 'journal.phaseroots'))
1446
1446
1447 def undofiles(self):
1447 def undofiles(self):
1448 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1448 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1449
1449
1450 @unfilteredmethod
1450 @unfilteredmethod
1451 def _writejournal(self, desc):
1451 def _writejournal(self, desc):
1452 self.dirstate.savebackup(None, 'journal.dirstate')
1452 self.dirstate.savebackup(None, 'journal.dirstate')
1453 self.vfs.write("journal.branch",
1453 self.vfs.write("journal.branch",
1454 encoding.fromlocal(self.dirstate.branch()))
1454 encoding.fromlocal(self.dirstate.branch()))
1455 self.vfs.write("journal.desc",
1455 self.vfs.write("journal.desc",
1456 "%d\n%s\n" % (len(self), desc))
1456 "%d\n%s\n" % (len(self), desc))
1457 self.vfs.write("journal.bookmarks",
1457 self.vfs.write("journal.bookmarks",
1458 self.vfs.tryread("bookmarks"))
1458 self.vfs.tryread("bookmarks"))
1459 self.svfs.write("journal.phaseroots",
1459 self.svfs.write("journal.phaseroots",
1460 self.svfs.tryread("phaseroots"))
1460 self.svfs.tryread("phaseroots"))
1461
1461
1462 def recover(self):
1462 def recover(self):
1463 with self.lock():
1463 with self.lock():
1464 if self.svfs.exists("journal"):
1464 if self.svfs.exists("journal"):
1465 self.ui.status(_("rolling back interrupted transaction\n"))
1465 self.ui.status(_("rolling back interrupted transaction\n"))
1466 vfsmap = {'': self.svfs,
1466 vfsmap = {'': self.svfs,
1467 'plain': self.vfs,}
1467 'plain': self.vfs,}
1468 transaction.rollback(self.svfs, vfsmap, "journal",
1468 transaction.rollback(self.svfs, vfsmap, "journal",
1469 self.ui.warn,
1469 self.ui.warn,
1470 checkambigfiles=_cachedfiles)
1470 checkambigfiles=_cachedfiles)
1471 self.invalidate()
1471 self.invalidate()
1472 return True
1472 return True
1473 else:
1473 else:
1474 self.ui.warn(_("no interrupted transaction available\n"))
1474 self.ui.warn(_("no interrupted transaction available\n"))
1475 return False
1475 return False
1476
1476
1477 def rollback(self, dryrun=False, force=False):
1477 def rollback(self, dryrun=False, force=False):
1478 wlock = lock = dsguard = None
1478 wlock = lock = dsguard = None
1479 try:
1479 try:
1480 wlock = self.wlock()
1480 wlock = self.wlock()
1481 lock = self.lock()
1481 lock = self.lock()
1482 if self.svfs.exists("undo"):
1482 if self.svfs.exists("undo"):
1483 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1483 dsguard = dirstateguard.dirstateguard(self, 'rollback')
1484
1484
1485 return self._rollback(dryrun, force, dsguard)
1485 return self._rollback(dryrun, force, dsguard)
1486 else:
1486 else:
1487 self.ui.warn(_("no rollback information available\n"))
1487 self.ui.warn(_("no rollback information available\n"))
1488 return 1
1488 return 1
1489 finally:
1489 finally:
1490 release(dsguard, lock, wlock)
1490 release(dsguard, lock, wlock)
1491
1491
1492 @unfilteredmethod # Until we get smarter cache management
1492 @unfilteredmethod # Until we get smarter cache management
1493 def _rollback(self, dryrun, force, dsguard):
1493 def _rollback(self, dryrun, force, dsguard):
1494 ui = self.ui
1494 ui = self.ui
1495 try:
1495 try:
1496 args = self.vfs.read('undo.desc').splitlines()
1496 args = self.vfs.read('undo.desc').splitlines()
1497 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1497 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1498 if len(args) >= 3:
1498 if len(args) >= 3:
1499 detail = args[2]
1499 detail = args[2]
1500 oldtip = oldlen - 1
1500 oldtip = oldlen - 1
1501
1501
1502 if detail and ui.verbose:
1502 if detail and ui.verbose:
1503 msg = (_('repository tip rolled back to revision %d'
1503 msg = (_('repository tip rolled back to revision %d'
1504 ' (undo %s: %s)\n')
1504 ' (undo %s: %s)\n')
1505 % (oldtip, desc, detail))
1505 % (oldtip, desc, detail))
1506 else:
1506 else:
1507 msg = (_('repository tip rolled back to revision %d'
1507 msg = (_('repository tip rolled back to revision %d'
1508 ' (undo %s)\n')
1508 ' (undo %s)\n')
1509 % (oldtip, desc))
1509 % (oldtip, desc))
1510 except IOError:
1510 except IOError:
1511 msg = _('rolling back unknown transaction\n')
1511 msg = _('rolling back unknown transaction\n')
1512 desc = None
1512 desc = None
1513
1513
1514 if not force and self['.'] != self['tip'] and desc == 'commit':
1514 if not force and self['.'] != self['tip'] and desc == 'commit':
1515 raise error.Abort(
1515 raise error.Abort(
1516 _('rollback of last commit while not checked out '
1516 _('rollback of last commit while not checked out '
1517 'may lose data'), hint=_('use -f to force'))
1517 'may lose data'), hint=_('use -f to force'))
1518
1518
1519 ui.status(msg)
1519 ui.status(msg)
1520 if dryrun:
1520 if dryrun:
1521 return 0
1521 return 0
1522
1522
1523 parents = self.dirstate.parents()
1523 parents = self.dirstate.parents()
1524 self.destroying()
1524 self.destroying()
1525 vfsmap = {'plain': self.vfs, '': self.svfs}
1525 vfsmap = {'plain': self.vfs, '': self.svfs}
1526 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1526 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn,
1527 checkambigfiles=_cachedfiles)
1527 checkambigfiles=_cachedfiles)
1528 if self.vfs.exists('undo.bookmarks'):
1528 if self.vfs.exists('undo.bookmarks'):
1529 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1529 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1530 if self.svfs.exists('undo.phaseroots'):
1530 if self.svfs.exists('undo.phaseroots'):
1531 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1531 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1532 self.invalidate()
1532 self.invalidate()
1533
1533
1534 parentgone = (parents[0] not in self.changelog.nodemap or
1534 parentgone = (parents[0] not in self.changelog.nodemap or
1535 parents[1] not in self.changelog.nodemap)
1535 parents[1] not in self.changelog.nodemap)
1536 if parentgone:
1536 if parentgone:
1537 # prevent dirstateguard from overwriting already restored one
1537 # prevent dirstateguard from overwriting already restored one
1538 dsguard.close()
1538 dsguard.close()
1539
1539
1540 self.dirstate.restorebackup(None, 'undo.dirstate')
1540 self.dirstate.restorebackup(None, 'undo.dirstate')
1541 try:
1541 try:
1542 branch = self.vfs.read('undo.branch')
1542 branch = self.vfs.read('undo.branch')
1543 self.dirstate.setbranch(encoding.tolocal(branch))
1543 self.dirstate.setbranch(encoding.tolocal(branch))
1544 except IOError:
1544 except IOError:
1545 ui.warn(_('named branch could not be reset: '
1545 ui.warn(_('named branch could not be reset: '
1546 'current branch is still \'%s\'\n')
1546 'current branch is still \'%s\'\n')
1547 % self.dirstate.branch())
1547 % self.dirstate.branch())
1548
1548
1549 parents = tuple([p.rev() for p in self[None].parents()])
1549 parents = tuple([p.rev() for p in self[None].parents()])
1550 if len(parents) > 1:
1550 if len(parents) > 1:
1551 ui.status(_('working directory now based on '
1551 ui.status(_('working directory now based on '
1552 'revisions %d and %d\n') % parents)
1552 'revisions %d and %d\n') % parents)
1553 else:
1553 else:
1554 ui.status(_('working directory now based on '
1554 ui.status(_('working directory now based on '
1555 'revision %d\n') % parents)
1555 'revision %d\n') % parents)
1556 mergemod.mergestate.clean(self, self['.'].node())
1556 mergemod.mergestate.clean(self, self['.'].node())
1557
1557
1558 # TODO: if we know which new heads may result from this rollback, pass
1558 # TODO: if we know which new heads may result from this rollback, pass
1559 # them to destroy(), which will prevent the branchhead cache from being
1559 # them to destroy(), which will prevent the branchhead cache from being
1560 # invalidated.
1560 # invalidated.
1561 self.destroyed()
1561 self.destroyed()
1562 return 0
1562 return 0
1563
1563
1564 def _buildcacheupdater(self, newtransaction):
1564 def _buildcacheupdater(self, newtransaction):
1565 """called during transaction to build the callback updating cache
1565 """called during transaction to build the callback updating cache
1566
1566
1567 Lives on the repository to help extension who might want to augment
1567 Lives on the repository to help extension who might want to augment
1568 this logic. For this purpose, the created transaction is passed to the
1568 this logic. For this purpose, the created transaction is passed to the
1569 method.
1569 method.
1570 """
1570 """
1571 # we must avoid cyclic reference between repo and transaction.
1571 # we must avoid cyclic reference between repo and transaction.
1572 reporef = weakref.ref(self)
1572 reporef = weakref.ref(self)
1573 def updater(tr):
1573 def updater(tr):
1574 repo = reporef()
1574 repo = reporef()
1575 repo.updatecaches(tr)
1575 repo.updatecaches(tr)
1576 return updater
1576 return updater
1577
1577
1578 @unfilteredmethod
1578 @unfilteredmethod
1579 def updatecaches(self, tr=None, full=False):
1579 def updatecaches(self, tr=None, full=False):
1580 """warm appropriate caches
1580 """warm appropriate caches
1581
1581
1582 If this function is called after a transaction closed. The transaction
1582 If this function is called after a transaction closed. The transaction
1583 will be available in the 'tr' argument. This can be used to selectively
1583 will be available in the 'tr' argument. This can be used to selectively
1584 update caches relevant to the changes in that transaction.
1584 update caches relevant to the changes in that transaction.
1585
1585
1586 If 'full' is set, make sure all caches the function knows about have
1586 If 'full' is set, make sure all caches the function knows about have
1587 up-to-date data. Even the ones usually loaded more lazily.
1587 up-to-date data. Even the ones usually loaded more lazily.
1588 """
1588 """
1589 if tr is not None and tr.hookargs.get('source') == 'strip':
1589 if tr is not None and tr.hookargs.get('source') == 'strip':
1590 # During strip, many caches are invalid but
1590 # During strip, many caches are invalid but
1591 # later call to `destroyed` will refresh them.
1591 # later call to `destroyed` will refresh them.
1592 return
1592 return
1593
1593
1594 if tr is None or tr.changes['revs']:
1594 if tr is None or tr.changes['revs']:
1595 # updating the unfiltered branchmap should refresh all the others,
1595 # updating the unfiltered branchmap should refresh all the others,
1596 self.ui.debug('updating the branch cache\n')
1596 self.ui.debug('updating the branch cache\n')
1597 branchmap.updatecache(self.filtered('served'))
1597 branchmap.updatecache(self.filtered('served'))
1598
1598
1599 if full:
1599 if full:
1600 rbc = self.revbranchcache()
1600 rbc = self.revbranchcache()
1601 for r in self.changelog:
1601 for r in self.changelog:
1602 rbc.branchinfo(r)
1602 rbc.branchinfo(r)
1603 rbc.write()
1603 rbc.write()
1604
1604
1605 def invalidatecaches(self):
1605 def invalidatecaches(self):
1606
1606
1607 if '_tagscache' in vars(self):
1607 if '_tagscache' in vars(self):
1608 # can't use delattr on proxy
1608 # can't use delattr on proxy
1609 del self.__dict__['_tagscache']
1609 del self.__dict__['_tagscache']
1610
1610
1611 self.unfiltered()._branchcaches.clear()
1611 self.unfiltered()._branchcaches.clear()
1612 self.invalidatevolatilesets()
1612 self.invalidatevolatilesets()
1613 self._sparsesignaturecache.clear()
1613 self._sparsesignaturecache.clear()
1614
1614
1615 def invalidatevolatilesets(self):
1615 def invalidatevolatilesets(self):
1616 self.filteredrevcache.clear()
1616 self.filteredrevcache.clear()
1617 obsolete.clearobscaches(self)
1617 obsolete.clearobscaches(self)
1618
1618
1619 def invalidatedirstate(self):
1619 def invalidatedirstate(self):
1620 '''Invalidates the dirstate, causing the next call to dirstate
1620 '''Invalidates the dirstate, causing the next call to dirstate
1621 to check if it was modified since the last time it was read,
1621 to check if it was modified since the last time it was read,
1622 rereading it if it has.
1622 rereading it if it has.
1623
1623
1624 This is different to dirstate.invalidate() that it doesn't always
1624 This is different to dirstate.invalidate() that it doesn't always
1625 rereads the dirstate. Use dirstate.invalidate() if you want to
1625 rereads the dirstate. Use dirstate.invalidate() if you want to
1626 explicitly read the dirstate again (i.e. restoring it to a previous
1626 explicitly read the dirstate again (i.e. restoring it to a previous
1627 known good state).'''
1627 known good state).'''
1628 if hasunfilteredcache(self, 'dirstate'):
1628 if hasunfilteredcache(self, 'dirstate'):
1629 for k in self.dirstate._filecache:
1629 for k in self.dirstate._filecache:
1630 try:
1630 try:
1631 delattr(self.dirstate, k)
1631 delattr(self.dirstate, k)
1632 except AttributeError:
1632 except AttributeError:
1633 pass
1633 pass
1634 delattr(self.unfiltered(), 'dirstate')
1634 delattr(self.unfiltered(), 'dirstate')
1635
1635
1636 def invalidate(self, clearfilecache=False):
1636 def invalidate(self, clearfilecache=False):
1637 '''Invalidates both store and non-store parts other than dirstate
1637 '''Invalidates both store and non-store parts other than dirstate
1638
1638
1639 If a transaction is running, invalidation of store is omitted,
1639 If a transaction is running, invalidation of store is omitted,
1640 because discarding in-memory changes might cause inconsistency
1640 because discarding in-memory changes might cause inconsistency
1641 (e.g. incomplete fncache causes unintentional failure, but
1641 (e.g. incomplete fncache causes unintentional failure, but
1642 redundant one doesn't).
1642 redundant one doesn't).
1643 '''
1643 '''
1644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1644 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1645 for k in list(self._filecache.keys()):
1645 for k in list(self._filecache.keys()):
1646 # dirstate is invalidated separately in invalidatedirstate()
1646 # dirstate is invalidated separately in invalidatedirstate()
1647 if k == 'dirstate':
1647 if k == 'dirstate':
1648 continue
1648 continue
1649 if (k == 'changelog' and
1649 if (k == 'changelog' and
1650 self.currenttransaction() and
1650 self.currenttransaction() and
1651 self.changelog._delayed):
1651 self.changelog._delayed):
1652 # The changelog object may store unwritten revisions. We don't
1652 # The changelog object may store unwritten revisions. We don't
1653 # want to lose them.
1653 # want to lose them.
1654 # TODO: Solve the problem instead of working around it.
1654 # TODO: Solve the problem instead of working around it.
1655 continue
1655 continue
1656
1656
1657 if clearfilecache:
1657 if clearfilecache:
1658 del self._filecache[k]
1658 del self._filecache[k]
1659 try:
1659 try:
1660 delattr(unfiltered, k)
1660 delattr(unfiltered, k)
1661 except AttributeError:
1661 except AttributeError:
1662 pass
1662 pass
1663 self.invalidatecaches()
1663 self.invalidatecaches()
1664 if not self.currenttransaction():
1664 if not self.currenttransaction():
1665 # TODO: Changing contents of store outside transaction
1665 # TODO: Changing contents of store outside transaction
1666 # causes inconsistency. We should make in-memory store
1666 # causes inconsistency. We should make in-memory store
1667 # changes detectable, and abort if changed.
1667 # changes detectable, and abort if changed.
1668 self.store.invalidatecaches()
1668 self.store.invalidatecaches()
1669
1669
1670 def invalidateall(self):
1670 def invalidateall(self):
1671 '''Fully invalidates both store and non-store parts, causing the
1671 '''Fully invalidates both store and non-store parts, causing the
1672 subsequent operation to reread any outside changes.'''
1672 subsequent operation to reread any outside changes.'''
1673 # extension should hook this to invalidate its caches
1673 # extension should hook this to invalidate its caches
1674 self.invalidate()
1674 self.invalidate()
1675 self.invalidatedirstate()
1675 self.invalidatedirstate()
1676
1676
1677 @unfilteredmethod
1677 @unfilteredmethod
1678 def _refreshfilecachestats(self, tr):
1678 def _refreshfilecachestats(self, tr):
1679 """Reload stats of cached files so that they are flagged as valid"""
1679 """Reload stats of cached files so that they are flagged as valid"""
1680 for k, ce in self._filecache.items():
1680 for k, ce in self._filecache.items():
1681 k = pycompat.sysstr(k)
1681 k = pycompat.sysstr(k)
1682 if k == r'dirstate' or k not in self.__dict__:
1682 if k == r'dirstate' or k not in self.__dict__:
1683 continue
1683 continue
1684 ce.refresh()
1684 ce.refresh()
1685
1685
1686 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1686 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1687 inheritchecker=None, parentenvvar=None):
1687 inheritchecker=None, parentenvvar=None):
1688 parentlock = None
1688 parentlock = None
1689 # the contents of parentenvvar are used by the underlying lock to
1689 # the contents of parentenvvar are used by the underlying lock to
1690 # determine whether it can be inherited
1690 # determine whether it can be inherited
1691 if parentenvvar is not None:
1691 if parentenvvar is not None:
1692 parentlock = encoding.environ.get(parentenvvar)
1692 parentlock = encoding.environ.get(parentenvvar)
1693
1693
1694 timeout = 0
1694 timeout = 0
1695 warntimeout = 0
1695 warntimeout = 0
1696 if wait:
1696 if wait:
1697 timeout = self.ui.configint("ui", "timeout")
1697 timeout = self.ui.configint("ui", "timeout")
1698 warntimeout = self.ui.configint("ui", "timeout.warn")
1698 warntimeout = self.ui.configint("ui", "timeout.warn")
1699
1699
1700 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1700 l = lockmod.trylock(self.ui, vfs, lockname, timeout, warntimeout,
1701 releasefn=releasefn,
1701 releasefn=releasefn,
1702 acquirefn=acquirefn, desc=desc,
1702 acquirefn=acquirefn, desc=desc,
1703 inheritchecker=inheritchecker,
1703 inheritchecker=inheritchecker,
1704 parentlock=parentlock)
1704 parentlock=parentlock)
1705 return l
1705 return l
1706
1706
1707 def _afterlock(self, callback):
1707 def _afterlock(self, callback):
1708 """add a callback to be run when the repository is fully unlocked
1708 """add a callback to be run when the repository is fully unlocked
1709
1709
1710 The callback will be executed when the outermost lock is released
1710 The callback will be executed when the outermost lock is released
1711 (with wlock being higher level than 'lock')."""
1711 (with wlock being higher level than 'lock')."""
1712 for ref in (self._wlockref, self._lockref):
1712 for ref in (self._wlockref, self._lockref):
1713 l = ref and ref()
1713 l = ref and ref()
1714 if l and l.held:
1714 if l and l.held:
1715 l.postrelease.append(callback)
1715 l.postrelease.append(callback)
1716 break
1716 break
1717 else: # no lock have been found.
1717 else: # no lock have been found.
1718 callback()
1718 callback()
1719
1719
1720 def lock(self, wait=True):
1720 def lock(self, wait=True):
1721 '''Lock the repository store (.hg/store) and return a weak reference
1721 '''Lock the repository store (.hg/store) and return a weak reference
1722 to the lock. Use this before modifying the store (e.g. committing or
1722 to the lock. Use this before modifying the store (e.g. committing or
1723 stripping). If you are opening a transaction, get a lock as well.)
1723 stripping). If you are opening a transaction, get a lock as well.)
1724
1724
1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1725 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1726 'wlock' first to avoid a dead-lock hazard.'''
1726 'wlock' first to avoid a dead-lock hazard.'''
1727 l = self._currentlock(self._lockref)
1727 l = self._currentlock(self._lockref)
1728 if l is not None:
1728 if l is not None:
1729 l.lock()
1729 l.lock()
1730 return l
1730 return l
1731
1731
1732 l = self._lock(self.svfs, "lock", wait, None,
1732 l = self._lock(self.svfs, "lock", wait, None,
1733 self.invalidate, _('repository %s') % self.origroot)
1733 self.invalidate, _('repository %s') % self.origroot)
1734 self._lockref = weakref.ref(l)
1734 self._lockref = weakref.ref(l)
1735 return l
1735 return l
1736
1736
1737 def _wlockchecktransaction(self):
1737 def _wlockchecktransaction(self):
1738 if self.currenttransaction() is not None:
1738 if self.currenttransaction() is not None:
1739 raise error.LockInheritanceContractViolation(
1739 raise error.LockInheritanceContractViolation(
1740 'wlock cannot be inherited in the middle of a transaction')
1740 'wlock cannot be inherited in the middle of a transaction')
1741
1741
1742 def wlock(self, wait=True):
1742 def wlock(self, wait=True):
1743 '''Lock the non-store parts of the repository (everything under
1743 '''Lock the non-store parts of the repository (everything under
1744 .hg except .hg/store) and return a weak reference to the lock.
1744 .hg except .hg/store) and return a weak reference to the lock.
1745
1745
1746 Use this before modifying files in .hg.
1746 Use this before modifying files in .hg.
1747
1747
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1748 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1749 'wlock' first to avoid a dead-lock hazard.'''
1749 'wlock' first to avoid a dead-lock hazard.'''
1750 l = self._wlockref and self._wlockref()
1750 l = self._wlockref and self._wlockref()
1751 if l is not None and l.held:
1751 if l is not None and l.held:
1752 l.lock()
1752 l.lock()
1753 return l
1753 return l
1754
1754
1755 # We do not need to check for non-waiting lock acquisition. Such
1755 # We do not need to check for non-waiting lock acquisition. Such
1756 # acquisition would not cause dead-lock as they would just fail.
1756 # acquisition would not cause dead-lock as they would just fail.
1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1757 if wait and (self.ui.configbool('devel', 'all-warnings')
1758 or self.ui.configbool('devel', 'check-locks')):
1758 or self.ui.configbool('devel', 'check-locks')):
1759 if self._currentlock(self._lockref) is not None:
1759 if self._currentlock(self._lockref) is not None:
1760 self.ui.develwarn('"wlock" acquired after "lock"')
1760 self.ui.develwarn('"wlock" acquired after "lock"')
1761
1761
1762 def unlock():
1762 def unlock():
1763 if self.dirstate.pendingparentchange():
1763 if self.dirstate.pendingparentchange():
1764 self.dirstate.invalidate()
1764 self.dirstate.invalidate()
1765 else:
1765 else:
1766 self.dirstate.write(None)
1766 self.dirstate.write(None)
1767
1767
1768 self._filecache['dirstate'].refresh()
1768 self._filecache['dirstate'].refresh()
1769
1769
1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1770 l = self._lock(self.vfs, "wlock", wait, unlock,
1771 self.invalidatedirstate, _('working directory of %s') %
1771 self.invalidatedirstate, _('working directory of %s') %
1772 self.origroot,
1772 self.origroot,
1773 inheritchecker=self._wlockchecktransaction,
1773 inheritchecker=self._wlockchecktransaction,
1774 parentenvvar='HG_WLOCK_LOCKER')
1774 parentenvvar='HG_WLOCK_LOCKER')
1775 self._wlockref = weakref.ref(l)
1775 self._wlockref = weakref.ref(l)
1776 return l
1776 return l
1777
1777
1778 def _currentlock(self, lockref):
1778 def _currentlock(self, lockref):
1779 """Returns the lock if it's held, or None if it's not."""
1779 """Returns the lock if it's held, or None if it's not."""
1780 if lockref is None:
1780 if lockref is None:
1781 return None
1781 return None
1782 l = lockref()
1782 l = lockref()
1783 if l is None or not l.held:
1783 if l is None or not l.held:
1784 return None
1784 return None
1785 return l
1785 return l
1786
1786
1787 def currentwlock(self):
1787 def currentwlock(self):
1788 """Returns the wlock if it's held, or None if it's not."""
1788 """Returns the wlock if it's held, or None if it's not."""
1789 return self._currentlock(self._wlockref)
1789 return self._currentlock(self._wlockref)
1790
1790
1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1791 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1792 """
1792 """
1793 commit an individual file as part of a larger transaction
1793 commit an individual file as part of a larger transaction
1794 """
1794 """
1795
1795
1796 fname = fctx.path()
1796 fname = fctx.path()
1797 fparent1 = manifest1.get(fname, nullid)
1797 fparent1 = manifest1.get(fname, nullid)
1798 fparent2 = manifest2.get(fname, nullid)
1798 fparent2 = manifest2.get(fname, nullid)
1799 if isinstance(fctx, context.filectx):
1799 if isinstance(fctx, context.filectx):
1800 node = fctx.filenode()
1800 node = fctx.filenode()
1801 if node in [fparent1, fparent2]:
1801 if node in [fparent1, fparent2]:
1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1802 self.ui.debug('reusing %s filelog entry\n' % fname)
1803 if manifest1.flags(fname) != fctx.flags():
1803 if manifest1.flags(fname) != fctx.flags():
1804 changelist.append(fname)
1804 changelist.append(fname)
1805 return node
1805 return node
1806
1806
1807 flog = self.file(fname)
1807 flog = self.file(fname)
1808 meta = {}
1808 meta = {}
1809 copy = fctx.renamed()
1809 copy = fctx.renamed()
1810 if copy and copy[0] != fname:
1810 if copy and copy[0] != fname:
1811 # Mark the new revision of this file as a copy of another
1811 # Mark the new revision of this file as a copy of another
1812 # file. This copy data will effectively act as a parent
1812 # file. This copy data will effectively act as a parent
1813 # of this new revision. If this is a merge, the first
1813 # of this new revision. If this is a merge, the first
1814 # parent will be the nullid (meaning "look up the copy data")
1814 # parent will be the nullid (meaning "look up the copy data")
1815 # and the second one will be the other parent. For example:
1815 # and the second one will be the other parent. For example:
1816 #
1816 #
1817 # 0 --- 1 --- 3 rev1 changes file foo
1817 # 0 --- 1 --- 3 rev1 changes file foo
1818 # \ / rev2 renames foo to bar and changes it
1818 # \ / rev2 renames foo to bar and changes it
1819 # \- 2 -/ rev3 should have bar with all changes and
1819 # \- 2 -/ rev3 should have bar with all changes and
1820 # should record that bar descends from
1820 # should record that bar descends from
1821 # bar in rev2 and foo in rev1
1821 # bar in rev2 and foo in rev1
1822 #
1822 #
1823 # this allows this merge to succeed:
1823 # this allows this merge to succeed:
1824 #
1824 #
1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1825 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1826 # \ / merging rev3 and rev4 should use bar@rev2
1826 # \ / merging rev3 and rev4 should use bar@rev2
1827 # \- 2 --- 4 as the merge base
1827 # \- 2 --- 4 as the merge base
1828 #
1828 #
1829
1829
1830 cfname = copy[0]
1830 cfname = copy[0]
1831 crev = manifest1.get(cfname)
1831 crev = manifest1.get(cfname)
1832 newfparent = fparent2
1832 newfparent = fparent2
1833
1833
1834 if manifest2: # branch merge
1834 if manifest2: # branch merge
1835 if fparent2 == nullid or crev is None: # copied on remote side
1835 if fparent2 == nullid or crev is None: # copied on remote side
1836 if cfname in manifest2:
1836 if cfname in manifest2:
1837 crev = manifest2[cfname]
1837 crev = manifest2[cfname]
1838 newfparent = fparent1
1838 newfparent = fparent1
1839
1839
1840 # Here, we used to search backwards through history to try to find
1840 # Here, we used to search backwards through history to try to find
1841 # where the file copy came from if the source of a copy was not in
1841 # where the file copy came from if the source of a copy was not in
1842 # the parent directory. However, this doesn't actually make sense to
1842 # the parent directory. However, this doesn't actually make sense to
1843 # do (what does a copy from something not in your working copy even
1843 # do (what does a copy from something not in your working copy even
1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1844 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1845 # the user that copy information was dropped, so if they didn't
1845 # the user that copy information was dropped, so if they didn't
1846 # expect this outcome it can be fixed, but this is the correct
1846 # expect this outcome it can be fixed, but this is the correct
1847 # behavior in this circumstance.
1847 # behavior in this circumstance.
1848
1848
1849 if crev:
1849 if crev:
1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1850 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1851 meta["copy"] = cfname
1851 meta["copy"] = cfname
1852 meta["copyrev"] = hex(crev)
1852 meta["copyrev"] = hex(crev)
1853 fparent1, fparent2 = nullid, newfparent
1853 fparent1, fparent2 = nullid, newfparent
1854 else:
1854 else:
1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1855 self.ui.warn(_("warning: can't find ancestor for '%s' "
1856 "copied from '%s'!\n") % (fname, cfname))
1856 "copied from '%s'!\n") % (fname, cfname))
1857
1857
1858 elif fparent1 == nullid:
1858 elif fparent1 == nullid:
1859 fparent1, fparent2 = fparent2, nullid
1859 fparent1, fparent2 = fparent2, nullid
1860 elif fparent2 != nullid:
1860 elif fparent2 != nullid:
1861 # is one parent an ancestor of the other?
1861 # is one parent an ancestor of the other?
1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1862 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1863 if fparent1 in fparentancestors:
1863 if fparent1 in fparentancestors:
1864 fparent1, fparent2 = fparent2, nullid
1864 fparent1, fparent2 = fparent2, nullid
1865 elif fparent2 in fparentancestors:
1865 elif fparent2 in fparentancestors:
1866 fparent2 = nullid
1866 fparent2 = nullid
1867
1867
1868 # is the file changed?
1868 # is the file changed?
1869 text = fctx.data()
1869 text = fctx.data()
1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1870 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1871 changelist.append(fname)
1871 changelist.append(fname)
1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1872 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1873 # are just the flags changed during merge?
1873 # are just the flags changed during merge?
1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1874 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1875 changelist.append(fname)
1875 changelist.append(fname)
1876
1876
1877 return fparent1
1877 return fparent1
1878
1878
1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1879 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1880 """check for commit arguments that aren't committable"""
1880 """check for commit arguments that aren't committable"""
1881 if match.isexact() or match.prefix():
1881 if match.isexact() or match.prefix():
1882 matched = set(status.modified + status.added + status.removed)
1882 matched = set(status.modified + status.added + status.removed)
1883
1883
1884 for f in match.files():
1884 for f in match.files():
1885 f = self.dirstate.normalize(f)
1885 f = self.dirstate.normalize(f)
1886 if f == '.' or f in matched or f in wctx.substate:
1886 if f == '.' or f in matched or f in wctx.substate:
1887 continue
1887 continue
1888 if f in status.deleted:
1888 if f in status.deleted:
1889 fail(f, _('file not found!'))
1889 fail(f, _('file not found!'))
1890 if f in vdirs: # visited directory
1890 if f in vdirs: # visited directory
1891 d = f + '/'
1891 d = f + '/'
1892 for mf in matched:
1892 for mf in matched:
1893 if mf.startswith(d):
1893 if mf.startswith(d):
1894 break
1894 break
1895 else:
1895 else:
1896 fail(f, _("no match under directory!"))
1896 fail(f, _("no match under directory!"))
1897 elif f not in self.dirstate:
1897 elif f not in self.dirstate:
1898 fail(f, _("file not tracked!"))
1898 fail(f, _("file not tracked!"))
1899
1899
1900 @unfilteredmethod
1900 @unfilteredmethod
1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1901 def commit(self, text="", user=None, date=None, match=None, force=False,
1902 editor=False, extra=None):
1902 editor=False, extra=None):
1903 """Add a new revision to current repository.
1903 """Add a new revision to current repository.
1904
1904
1905 Revision information is gathered from the working directory,
1905 Revision information is gathered from the working directory,
1906 match can be used to filter the committed files. If editor is
1906 match can be used to filter the committed files. If editor is
1907 supplied, it is called to get a commit message.
1907 supplied, it is called to get a commit message.
1908 """
1908 """
1909 if extra is None:
1909 if extra is None:
1910 extra = {}
1910 extra = {}
1911
1911
1912 def fail(f, msg):
1912 def fail(f, msg):
1913 raise error.Abort('%s: %s' % (f, msg))
1913 raise error.Abort('%s: %s' % (f, msg))
1914
1914
1915 if not match:
1915 if not match:
1916 match = matchmod.always(self.root, '')
1916 match = matchmod.always(self.root, '')
1917
1917
1918 if not force:
1918 if not force:
1919 vdirs = []
1919 vdirs = []
1920 match.explicitdir = vdirs.append
1920 match.explicitdir = vdirs.append
1921 match.bad = fail
1921 match.bad = fail
1922
1922
1923 wlock = lock = tr = None
1923 wlock = lock = tr = None
1924 try:
1924 try:
1925 wlock = self.wlock()
1925 wlock = self.wlock()
1926 lock = self.lock() # for recent changelog (see issue4368)
1926 lock = self.lock() # for recent changelog (see issue4368)
1927
1927
1928 wctx = self[None]
1928 wctx = self[None]
1929 merge = len(wctx.parents()) > 1
1929 merge = len(wctx.parents()) > 1
1930
1930
1931 if not force and merge and not match.always():
1931 if not force and merge and not match.always():
1932 raise error.Abort(_('cannot partially commit a merge '
1932 raise error.Abort(_('cannot partially commit a merge '
1933 '(do not specify files or patterns)'))
1933 '(do not specify files or patterns)'))
1934
1934
1935 status = self.status(match=match, clean=force)
1935 status = self.status(match=match, clean=force)
1936 if force:
1936 if force:
1937 status.modified.extend(status.clean) # mq may commit clean files
1937 status.modified.extend(status.clean) # mq may commit clean files
1938
1938
1939 # check subrepos
1939 # check subrepos
1940 subs, commitsubs, newstate = subrepoutil.precommit(
1940 subs, commitsubs, newstate = subrepoutil.precommit(
1941 self.ui, wctx, status, match, force=force)
1941 self.ui, wctx, status, match, force=force)
1942
1942
1943 # make sure all explicit patterns are matched
1943 # make sure all explicit patterns are matched
1944 if not force:
1944 if not force:
1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1945 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1946
1946
1947 cctx = context.workingcommitctx(self, status,
1947 cctx = context.workingcommitctx(self, status,
1948 text, user, date, extra)
1948 text, user, date, extra)
1949
1949
1950 # internal config: ui.allowemptycommit
1950 # internal config: ui.allowemptycommit
1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1951 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1952 or extra.get('close') or merge or cctx.files()
1952 or extra.get('close') or merge or cctx.files()
1953 or self.ui.configbool('ui', 'allowemptycommit'))
1953 or self.ui.configbool('ui', 'allowemptycommit'))
1954 if not allowemptycommit:
1954 if not allowemptycommit:
1955 return None
1955 return None
1956
1956
1957 if merge and cctx.deleted():
1957 if merge and cctx.deleted():
1958 raise error.Abort(_("cannot commit merge with missing files"))
1958 raise error.Abort(_("cannot commit merge with missing files"))
1959
1959
1960 ms = mergemod.mergestate.read(self)
1960 ms = mergemod.mergestate.read(self)
1961 mergeutil.checkunresolved(ms)
1961 mergeutil.checkunresolved(ms)
1962
1962
1963 if editor:
1963 if editor:
1964 cctx._text = editor(self, cctx, subs)
1964 cctx._text = editor(self, cctx, subs)
1965 edited = (text != cctx._text)
1965 edited = (text != cctx._text)
1966
1966
1967 # Save commit message in case this transaction gets rolled back
1967 # Save commit message in case this transaction gets rolled back
1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1968 # (e.g. by a pretxncommit hook). Leave the content alone on
1969 # the assumption that the user will use the same editor again.
1969 # the assumption that the user will use the same editor again.
1970 msgfn = self.savecommitmessage(cctx._text)
1970 msgfn = self.savecommitmessage(cctx._text)
1971
1971
1972 # commit subs and write new state
1972 # commit subs and write new state
1973 if subs:
1973 if subs:
1974 for s in sorted(commitsubs):
1974 for s in sorted(commitsubs):
1975 sub = wctx.sub(s)
1975 sub = wctx.sub(s)
1976 self.ui.status(_('committing subrepository %s\n') %
1976 self.ui.status(_('committing subrepository %s\n') %
1977 subrepoutil.subrelpath(sub))
1977 subrepoutil.subrelpath(sub))
1978 sr = sub.commit(cctx._text, user, date)
1978 sr = sub.commit(cctx._text, user, date)
1979 newstate[s] = (newstate[s][0], sr)
1979 newstate[s] = (newstate[s][0], sr)
1980 subrepoutil.writestate(self, newstate)
1980 subrepoutil.writestate(self, newstate)
1981
1981
1982 p1, p2 = self.dirstate.parents()
1982 p1, p2 = self.dirstate.parents()
1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1983 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1984 try:
1984 try:
1985 self.hook("precommit", throw=True, parent1=hookp1,
1985 self.hook("precommit", throw=True, parent1=hookp1,
1986 parent2=hookp2)
1986 parent2=hookp2)
1987 tr = self.transaction('commit')
1987 tr = self.transaction('commit')
1988 ret = self.commitctx(cctx, True)
1988 ret = self.commitctx(cctx, True)
1989 except: # re-raises
1989 except: # re-raises
1990 if edited:
1990 if edited:
1991 self.ui.write(
1991 self.ui.write(
1992 _('note: commit message saved in %s\n') % msgfn)
1992 _('note: commit message saved in %s\n') % msgfn)
1993 raise
1993 raise
1994 # update bookmarks, dirstate and mergestate
1994 # update bookmarks, dirstate and mergestate
1995 bookmarks.update(self, [p1, p2], ret)
1995 bookmarks.update(self, [p1, p2], ret)
1996 cctx.markcommitted(ret)
1996 cctx.markcommitted(ret)
1997 ms.reset()
1997 ms.reset()
1998 tr.close()
1998 tr.close()
1999
1999
2000 finally:
2000 finally:
2001 lockmod.release(tr, lock, wlock)
2001 lockmod.release(tr, lock, wlock)
2002
2002
2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2003 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
2004 # hack for command that use a temporary commit (eg: histedit)
2004 # hack for command that use a temporary commit (eg: histedit)
2005 # temporary commit got stripped before hook release
2005 # temporary commit got stripped before hook release
2006 if self.changelog.hasnode(ret):
2006 if self.changelog.hasnode(ret):
2007 self.hook("commit", node=node, parent1=parent1,
2007 self.hook("commit", node=node, parent1=parent1,
2008 parent2=parent2)
2008 parent2=parent2)
2009 self._afterlock(commithook)
2009 self._afterlock(commithook)
2010 return ret
2010 return ret
2011
2011
2012 @unfilteredmethod
2012 @unfilteredmethod
2013 def commitctx(self, ctx, error=False):
2013 def commitctx(self, ctx, error=False):
2014 """Add a new revision to current repository.
2014 """Add a new revision to current repository.
2015 Revision information is passed via the context argument.
2015 Revision information is passed via the context argument.
2016 """
2016 """
2017
2017
2018 tr = None
2018 tr = None
2019 p1, p2 = ctx.p1(), ctx.p2()
2019 p1, p2 = ctx.p1(), ctx.p2()
2020 user = ctx.user()
2020 user = ctx.user()
2021
2021
2022 lock = self.lock()
2022 lock = self.lock()
2023 try:
2023 try:
2024 tr = self.transaction("commit")
2024 tr = self.transaction("commit")
2025 trp = weakref.proxy(tr)
2025 trp = weakref.proxy(tr)
2026
2026
2027 if ctx.manifestnode():
2027 if ctx.manifestnode():
2028 # reuse an existing manifest revision
2028 # reuse an existing manifest revision
2029 mn = ctx.manifestnode()
2029 mn = ctx.manifestnode()
2030 files = ctx.files()
2030 files = ctx.files()
2031 elif ctx.files():
2031 elif ctx.files():
2032 m1ctx = p1.manifestctx()
2032 m1ctx = p1.manifestctx()
2033 m2ctx = p2.manifestctx()
2033 m2ctx = p2.manifestctx()
2034 mctx = m1ctx.copy()
2034 mctx = m1ctx.copy()
2035
2035
2036 m = mctx.read()
2036 m = mctx.read()
2037 m1 = m1ctx.read()
2037 m1 = m1ctx.read()
2038 m2 = m2ctx.read()
2038 m2 = m2ctx.read()
2039
2039
2040 # check in files
2040 # check in files
2041 added = []
2041 added = []
2042 changed = []
2042 changed = []
2043 removed = list(ctx.removed())
2043 removed = list(ctx.removed())
2044 linkrev = len(self)
2044 linkrev = len(self)
2045 self.ui.note(_("committing files:\n"))
2045 self.ui.note(_("committing files:\n"))
2046 for f in sorted(ctx.modified() + ctx.added()):
2046 for f in sorted(ctx.modified() + ctx.added()):
2047 self.ui.note(f + "\n")
2047 self.ui.note(f + "\n")
2048 try:
2048 try:
2049 fctx = ctx[f]
2049 fctx = ctx[f]
2050 if fctx is None:
2050 if fctx is None:
2051 removed.append(f)
2051 removed.append(f)
2052 else:
2052 else:
2053 added.append(f)
2053 added.append(f)
2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2054 m[f] = self._filecommit(fctx, m1, m2, linkrev,
2055 trp, changed)
2055 trp, changed)
2056 m.setflag(f, fctx.flags())
2056 m.setflag(f, fctx.flags())
2057 except OSError as inst:
2057 except OSError as inst:
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2058 self.ui.warn(_("trouble committing %s!\n") % f)
2059 raise
2059 raise
2060 except IOError as inst:
2060 except IOError as inst:
2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2061 errcode = getattr(inst, 'errno', errno.ENOENT)
2062 if error or errcode and errcode != errno.ENOENT:
2062 if error or errcode and errcode != errno.ENOENT:
2063 self.ui.warn(_("trouble committing %s!\n") % f)
2063 self.ui.warn(_("trouble committing %s!\n") % f)
2064 raise
2064 raise
2065
2065
2066 # update manifest
2066 # update manifest
2067 self.ui.note(_("committing manifest\n"))
2067 self.ui.note(_("committing manifest\n"))
2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2068 removed = [f for f in sorted(removed) if f in m1 or f in m2]
2069 drop = [f for f in removed if f in m]
2069 drop = [f for f in removed if f in m]
2070 for f in drop:
2070 for f in drop:
2071 del m[f]
2071 del m[f]
2072 mn = mctx.write(trp, linkrev,
2072 mn = mctx.write(trp, linkrev,
2073 p1.manifestnode(), p2.manifestnode(),
2073 p1.manifestnode(), p2.manifestnode(),
2074 added, drop)
2074 added, drop)
2075 files = changed + removed
2075 files = changed + removed
2076 else:
2076 else:
2077 mn = p1.manifestnode()
2077 mn = p1.manifestnode()
2078 files = []
2078 files = []
2079
2079
2080 # update changelog
2080 # update changelog
2081 self.ui.note(_("committing changelog\n"))
2081 self.ui.note(_("committing changelog\n"))
2082 self.changelog.delayupdate(tr)
2082 self.changelog.delayupdate(tr)
2083 n = self.changelog.add(mn, files, ctx.description(),
2083 n = self.changelog.add(mn, files, ctx.description(),
2084 trp, p1.node(), p2.node(),
2084 trp, p1.node(), p2.node(),
2085 user, ctx.date(), ctx.extra().copy())
2085 user, ctx.date(), ctx.extra().copy())
2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2086 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2087 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
2088 parent2=xp2)
2088 parent2=xp2)
2089 # set the new commit is proper phase
2089 # set the new commit is proper phase
2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2090 targetphase = subrepoutil.newcommitphase(self.ui, ctx)
2091 if targetphase:
2091 if targetphase:
2092 # retract boundary do not alter parent changeset.
2092 # retract boundary do not alter parent changeset.
2093 # if a parent have higher the resulting phase will
2093 # if a parent have higher the resulting phase will
2094 # be compliant anyway
2094 # be compliant anyway
2095 #
2095 #
2096 # if minimal phase was 0 we don't need to retract anything
2096 # if minimal phase was 0 we don't need to retract anything
2097 phases.registernew(self, tr, targetphase, [n])
2097 phases.registernew(self, tr, targetphase, [n])
2098 tr.close()
2098 tr.close()
2099 return n
2099 return n
2100 finally:
2100 finally:
2101 if tr:
2101 if tr:
2102 tr.release()
2102 tr.release()
2103 lock.release()
2103 lock.release()
2104
2104
2105 @unfilteredmethod
2105 @unfilteredmethod
2106 def destroying(self):
2106 def destroying(self):
2107 '''Inform the repository that nodes are about to be destroyed.
2107 '''Inform the repository that nodes are about to be destroyed.
2108 Intended for use by strip and rollback, so there's a common
2108 Intended for use by strip and rollback, so there's a common
2109 place for anything that has to be done before destroying history.
2109 place for anything that has to be done before destroying history.
2110
2110
2111 This is mostly useful for saving state that is in memory and waiting
2111 This is mostly useful for saving state that is in memory and waiting
2112 to be flushed when the current lock is released. Because a call to
2112 to be flushed when the current lock is released. Because a call to
2113 destroyed is imminent, the repo will be invalidated causing those
2113 destroyed is imminent, the repo will be invalidated causing those
2114 changes to stay in memory (waiting for the next unlock), or vanish
2114 changes to stay in memory (waiting for the next unlock), or vanish
2115 completely.
2115 completely.
2116 '''
2116 '''
2117 # When using the same lock to commit and strip, the phasecache is left
2117 # When using the same lock to commit and strip, the phasecache is left
2118 # dirty after committing. Then when we strip, the repo is invalidated,
2118 # dirty after committing. Then when we strip, the repo is invalidated,
2119 # causing those changes to disappear.
2119 # causing those changes to disappear.
2120 if '_phasecache' in vars(self):
2120 if '_phasecache' in vars(self):
2121 self._phasecache.write()
2121 self._phasecache.write()
2122
2122
2123 @unfilteredmethod
2123 @unfilteredmethod
2124 def destroyed(self):
2124 def destroyed(self):
2125 '''Inform the repository that nodes have been destroyed.
2125 '''Inform the repository that nodes have been destroyed.
2126 Intended for use by strip and rollback, so there's a common
2126 Intended for use by strip and rollback, so there's a common
2127 place for anything that has to be done after destroying history.
2127 place for anything that has to be done after destroying history.
2128 '''
2128 '''
2129 # When one tries to:
2129 # When one tries to:
2130 # 1) destroy nodes thus calling this method (e.g. strip)
2130 # 1) destroy nodes thus calling this method (e.g. strip)
2131 # 2) use phasecache somewhere (e.g. commit)
2131 # 2) use phasecache somewhere (e.g. commit)
2132 #
2132 #
2133 # then 2) will fail because the phasecache contains nodes that were
2133 # then 2) will fail because the phasecache contains nodes that were
2134 # removed. We can either remove phasecache from the filecache,
2134 # removed. We can either remove phasecache from the filecache,
2135 # causing it to reload next time it is accessed, or simply filter
2135 # causing it to reload next time it is accessed, or simply filter
2136 # the removed nodes now and write the updated cache.
2136 # the removed nodes now and write the updated cache.
2137 self._phasecache.filterunknown(self)
2137 self._phasecache.filterunknown(self)
2138 self._phasecache.write()
2138 self._phasecache.write()
2139
2139
2140 # refresh all repository caches
2140 # refresh all repository caches
2141 self.updatecaches()
2141 self.updatecaches()
2142
2142
2143 # Ensure the persistent tag cache is updated. Doing it now
2143 # Ensure the persistent tag cache is updated. Doing it now
2144 # means that the tag cache only has to worry about destroyed
2144 # means that the tag cache only has to worry about destroyed
2145 # heads immediately after a strip/rollback. That in turn
2145 # heads immediately after a strip/rollback. That in turn
2146 # guarantees that "cachetip == currenttip" (comparing both rev
2146 # guarantees that "cachetip == currenttip" (comparing both rev
2147 # and node) always means no nodes have been added or destroyed.
2147 # and node) always means no nodes have been added or destroyed.
2148
2148
2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2149 # XXX this is suboptimal when qrefresh'ing: we strip the current
2150 # head, refresh the tag cache, then immediately add a new head.
2150 # head, refresh the tag cache, then immediately add a new head.
2151 # But I think doing it this way is necessary for the "instant
2151 # But I think doing it this way is necessary for the "instant
2152 # tag cache retrieval" case to work.
2152 # tag cache retrieval" case to work.
2153 self.invalidate()
2153 self.invalidate()
2154
2154
2155 def status(self, node1='.', node2=None, match=None,
2155 def status(self, node1='.', node2=None, match=None,
2156 ignored=False, clean=False, unknown=False,
2156 ignored=False, clean=False, unknown=False,
2157 listsubrepos=False):
2157 listsubrepos=False):
2158 '''a convenience method that calls node1.status(node2)'''
2158 '''a convenience method that calls node1.status(node2)'''
2159 return self[node1].status(node2, match, ignored, clean, unknown,
2159 return self[node1].status(node2, match, ignored, clean, unknown,
2160 listsubrepos)
2160 listsubrepos)
2161
2161
2162 def addpostdsstatus(self, ps):
2162 def addpostdsstatus(self, ps):
2163 """Add a callback to run within the wlock, at the point at which status
2163 """Add a callback to run within the wlock, at the point at which status
2164 fixups happen.
2164 fixups happen.
2165
2165
2166 On status completion, callback(wctx, status) will be called with the
2166 On status completion, callback(wctx, status) will be called with the
2167 wlock held, unless the dirstate has changed from underneath or the wlock
2167 wlock held, unless the dirstate has changed from underneath or the wlock
2168 couldn't be grabbed.
2168 couldn't be grabbed.
2169
2169
2170 Callbacks should not capture and use a cached copy of the dirstate --
2170 Callbacks should not capture and use a cached copy of the dirstate --
2171 it might change in the meanwhile. Instead, they should access the
2171 it might change in the meanwhile. Instead, they should access the
2172 dirstate via wctx.repo().dirstate.
2172 dirstate via wctx.repo().dirstate.
2173
2173
2174 This list is emptied out after each status run -- extensions should
2174 This list is emptied out after each status run -- extensions should
2175 make sure it adds to this list each time dirstate.status is called.
2175 make sure it adds to this list each time dirstate.status is called.
2176 Extensions should also make sure they don't call this for statuses
2176 Extensions should also make sure they don't call this for statuses
2177 that don't involve the dirstate.
2177 that don't involve the dirstate.
2178 """
2178 """
2179
2179
2180 # The list is located here for uniqueness reasons -- it is actually
2180 # The list is located here for uniqueness reasons -- it is actually
2181 # managed by the workingctx, but that isn't unique per-repo.
2181 # managed by the workingctx, but that isn't unique per-repo.
2182 self._postdsstatus.append(ps)
2182 self._postdsstatus.append(ps)
2183
2183
2184 def postdsstatus(self):
2184 def postdsstatus(self):
2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2185 """Used by workingctx to get the list of post-dirstate-status hooks."""
2186 return self._postdsstatus
2186 return self._postdsstatus
2187
2187
2188 def clearpostdsstatus(self):
2188 def clearpostdsstatus(self):
2189 """Used by workingctx to clear post-dirstate-status hooks."""
2189 """Used by workingctx to clear post-dirstate-status hooks."""
2190 del self._postdsstatus[:]
2190 del self._postdsstatus[:]
2191
2191
2192 def heads(self, start=None):
2192 def heads(self, start=None):
2193 if start is None:
2193 if start is None:
2194 cl = self.changelog
2194 cl = self.changelog
2195 headrevs = reversed(cl.headrevs())
2195 headrevs = reversed(cl.headrevs())
2196 return [cl.node(rev) for rev in headrevs]
2196 return [cl.node(rev) for rev in headrevs]
2197
2197
2198 heads = self.changelog.heads(start)
2198 heads = self.changelog.heads(start)
2199 # sort the output in rev descending order
2199 # sort the output in rev descending order
2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2200 return sorted(heads, key=self.changelog.rev, reverse=True)
2201
2201
2202 def branchheads(self, branch=None, start=None, closed=False):
2202 def branchheads(self, branch=None, start=None, closed=False):
2203 '''return a (possibly filtered) list of heads for the given branch
2203 '''return a (possibly filtered) list of heads for the given branch
2204
2204
2205 Heads are returned in topological order, from newest to oldest.
2205 Heads are returned in topological order, from newest to oldest.
2206 If branch is None, use the dirstate branch.
2206 If branch is None, use the dirstate branch.
2207 If start is not None, return only heads reachable from start.
2207 If start is not None, return only heads reachable from start.
2208 If closed is True, return heads that are marked as closed as well.
2208 If closed is True, return heads that are marked as closed as well.
2209 '''
2209 '''
2210 if branch is None:
2210 if branch is None:
2211 branch = self[None].branch()
2211 branch = self[None].branch()
2212 branches = self.branchmap()
2212 branches = self.branchmap()
2213 if branch not in branches:
2213 if branch not in branches:
2214 return []
2214 return []
2215 # the cache returns heads ordered lowest to highest
2215 # the cache returns heads ordered lowest to highest
2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2216 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
2217 if start is not None:
2217 if start is not None:
2218 # filter out the heads that cannot be reached from startrev
2218 # filter out the heads that cannot be reached from startrev
2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2219 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
2220 bheads = [h for h in bheads if h in fbheads]
2220 bheads = [h for h in bheads if h in fbheads]
2221 return bheads
2221 return bheads
2222
2222
2223 def branches(self, nodes):
2223 def branches(self, nodes):
2224 if not nodes:
2224 if not nodes:
2225 nodes = [self.changelog.tip()]
2225 nodes = [self.changelog.tip()]
2226 b = []
2226 b = []
2227 for n in nodes:
2227 for n in nodes:
2228 t = n
2228 t = n
2229 while True:
2229 while True:
2230 p = self.changelog.parents(n)
2230 p = self.changelog.parents(n)
2231 if p[1] != nullid or p[0] == nullid:
2231 if p[1] != nullid or p[0] == nullid:
2232 b.append((t, n, p[0], p[1]))
2232 b.append((t, n, p[0], p[1]))
2233 break
2233 break
2234 n = p[0]
2234 n = p[0]
2235 return b
2235 return b
2236
2236
2237 def between(self, pairs):
2237 def between(self, pairs):
2238 r = []
2238 r = []
2239
2239
2240 for top, bottom in pairs:
2240 for top, bottom in pairs:
2241 n, l, i = top, [], 0
2241 n, l, i = top, [], 0
2242 f = 1
2242 f = 1
2243
2243
2244 while n != bottom and n != nullid:
2244 while n != bottom and n != nullid:
2245 p = self.changelog.parents(n)[0]
2245 p = self.changelog.parents(n)[0]
2246 if i == f:
2246 if i == f:
2247 l.append(n)
2247 l.append(n)
2248 f = f * 2
2248 f = f * 2
2249 n = p
2249 n = p
2250 i += 1
2250 i += 1
2251
2251
2252 r.append(l)
2252 r.append(l)
2253
2253
2254 return r
2254 return r
2255
2255
2256 def checkpush(self, pushop):
2256 def checkpush(self, pushop):
2257 """Extensions can override this function if additional checks have
2257 """Extensions can override this function if additional checks have
2258 to be performed before pushing, or call it if they override push
2258 to be performed before pushing, or call it if they override push
2259 command.
2259 command.
2260 """
2260 """
2261
2261
2262 @unfilteredpropertycache
2262 @unfilteredpropertycache
2263 def prepushoutgoinghooks(self):
2263 def prepushoutgoinghooks(self):
2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2264 """Return util.hooks consists of a pushop with repo, remote, outgoing
2265 methods, which are called before pushing changesets.
2265 methods, which are called before pushing changesets.
2266 """
2266 """
2267 return util.hooks()
2267 return util.hooks()
2268
2268
2269 def pushkey(self, namespace, key, old, new):
2269 def pushkey(self, namespace, key, old, new):
2270 try:
2270 try:
2271 tr = self.currenttransaction()
2271 tr = self.currenttransaction()
2272 hookargs = {}
2272 hookargs = {}
2273 if tr is not None:
2273 if tr is not None:
2274 hookargs.update(tr.hookargs)
2274 hookargs.update(tr.hookargs)
2275 hookargs = pycompat.strkwargs(hookargs)
2275 hookargs = pycompat.strkwargs(hookargs)
2276 hookargs[r'namespace'] = namespace
2276 hookargs[r'namespace'] = namespace
2277 hookargs[r'key'] = key
2277 hookargs[r'key'] = key
2278 hookargs[r'old'] = old
2278 hookargs[r'old'] = old
2279 hookargs[r'new'] = new
2279 hookargs[r'new'] = new
2280 self.hook('prepushkey', throw=True, **hookargs)
2280 self.hook('prepushkey', throw=True, **hookargs)
2281 except error.HookAbort as exc:
2281 except error.HookAbort as exc:
2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2282 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
2283 if exc.hint:
2283 if exc.hint:
2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2284 self.ui.write_err(_("(%s)\n") % exc.hint)
2285 return False
2285 return False
2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2286 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
2287 ret = pushkey.push(self, namespace, key, old, new)
2287 ret = pushkey.push(self, namespace, key, old, new)
2288 def runhook():
2288 def runhook():
2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2289 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2290 ret=ret)
2290 ret=ret)
2291 self._afterlock(runhook)
2291 self._afterlock(runhook)
2292 return ret
2292 return ret
2293
2293
2294 def listkeys(self, namespace):
2294 def listkeys(self, namespace):
2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2295 self.hook('prelistkeys', throw=True, namespace=namespace)
2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2296 self.ui.debug('listing keys for "%s"\n' % namespace)
2297 values = pushkey.list(self, namespace)
2297 values = pushkey.list(self, namespace)
2298 self.hook('listkeys', namespace=namespace, values=values)
2298 self.hook('listkeys', namespace=namespace, values=values)
2299 return values
2299 return values
2300
2300
2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2301 def debugwireargs(self, one, two, three=None, four=None, five=None):
2302 '''used to test argument passing over the wire'''
2302 '''used to test argument passing over the wire'''
2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2303 return "%s %s %s %s %s" % (one, two, pycompat.bytestr(three),
2304 pycompat.bytestr(four),
2304 pycompat.bytestr(four),
2305 pycompat.bytestr(five))
2305 pycompat.bytestr(five))
2306
2306
2307 def savecommitmessage(self, text):
2307 def savecommitmessage(self, text):
2308 fp = self.vfs('last-message.txt', 'wb')
2308 fp = self.vfs('last-message.txt', 'wb')
2309 try:
2309 try:
2310 fp.write(text)
2310 fp.write(text)
2311 finally:
2311 finally:
2312 fp.close()
2312 fp.close()
2313 return self.pathto(fp.name[len(self.root) + 1:])
2313 return self.pathto(fp.name[len(self.root) + 1:])
2314
2314
2315 # used to avoid circular references so destructors work
2315 # used to avoid circular references so destructors work
2316 def aftertrans(files):
2316 def aftertrans(files):
2317 renamefiles = [tuple(t) for t in files]
2317 renamefiles = [tuple(t) for t in files]
2318 def a():
2318 def a():
2319 for vfs, src, dest in renamefiles:
2319 for vfs, src, dest in renamefiles:
2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2320 # if src and dest refer to a same file, vfs.rename is a no-op,
2321 # leaving both src and dest on disk. delete dest to make sure
2321 # leaving both src and dest on disk. delete dest to make sure
2322 # the rename couldn't be such a no-op.
2322 # the rename couldn't be such a no-op.
2323 vfs.tryunlink(dest)
2323 vfs.tryunlink(dest)
2324 try:
2324 try:
2325 vfs.rename(src, dest)
2325 vfs.rename(src, dest)
2326 except OSError: # journal file does not yet exist
2326 except OSError: # journal file does not yet exist
2327 pass
2327 pass
2328 return a
2328 return a
2329
2329
2330 def undoname(fn):
2330 def undoname(fn):
2331 base, name = os.path.split(fn)
2331 base, name = os.path.split(fn)
2332 assert name.startswith('journal')
2332 assert name.startswith('journal')
2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2333 return os.path.join(base, name.replace('journal', 'undo', 1))
2334
2334
2335 def instance(ui, path, create):
2335 def instance(ui, path, create, intents=None):
2336 return localrepository(ui, util.urllocalpath(path), create)
2336 return localrepository(ui, util.urllocalpath(path), create,
2337 intents=intents)
2337
2338
2338 def islocal(path):
2339 def islocal(path):
2339 return True
2340 return True
2340
2341
2341 def newreporequirements(repo):
2342 def newreporequirements(repo):
2342 """Determine the set of requirements for a new local repository.
2343 """Determine the set of requirements for a new local repository.
2343
2344
2344 Extensions can wrap this function to specify custom requirements for
2345 Extensions can wrap this function to specify custom requirements for
2345 new repositories.
2346 new repositories.
2346 """
2347 """
2347 ui = repo.ui
2348 ui = repo.ui
2348 requirements = {'revlogv1'}
2349 requirements = {'revlogv1'}
2349 if ui.configbool('format', 'usestore'):
2350 if ui.configbool('format', 'usestore'):
2350 requirements.add('store')
2351 requirements.add('store')
2351 if ui.configbool('format', 'usefncache'):
2352 if ui.configbool('format', 'usefncache'):
2352 requirements.add('fncache')
2353 requirements.add('fncache')
2353 if ui.configbool('format', 'dotencode'):
2354 if ui.configbool('format', 'dotencode'):
2354 requirements.add('dotencode')
2355 requirements.add('dotencode')
2355
2356
2356 compengine = ui.config('experimental', 'format.compression')
2357 compengine = ui.config('experimental', 'format.compression')
2357 if compengine not in util.compengines:
2358 if compengine not in util.compengines:
2358 raise error.Abort(_('compression engine %s defined by '
2359 raise error.Abort(_('compression engine %s defined by '
2359 'experimental.format.compression not available') %
2360 'experimental.format.compression not available') %
2360 compengine,
2361 compengine,
2361 hint=_('run "hg debuginstall" to list available '
2362 hint=_('run "hg debuginstall" to list available '
2362 'compression engines'))
2363 'compression engines'))
2363
2364
2364 # zlib is the historical default and doesn't need an explicit requirement.
2365 # zlib is the historical default and doesn't need an explicit requirement.
2365 if compengine != 'zlib':
2366 if compengine != 'zlib':
2366 requirements.add('exp-compression-%s' % compengine)
2367 requirements.add('exp-compression-%s' % compengine)
2367
2368
2368 if scmutil.gdinitconfig(ui):
2369 if scmutil.gdinitconfig(ui):
2369 requirements.add('generaldelta')
2370 requirements.add('generaldelta')
2370 if ui.configbool('experimental', 'treemanifest'):
2371 if ui.configbool('experimental', 'treemanifest'):
2371 requirements.add('treemanifest')
2372 requirements.add('treemanifest')
2372
2373
2373 revlogv2 = ui.config('experimental', 'revlogv2')
2374 revlogv2 = ui.config('experimental', 'revlogv2')
2374 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2375 if revlogv2 == 'enable-unstable-format-and-corrupt-my-data':
2375 requirements.remove('revlogv1')
2376 requirements.remove('revlogv1')
2376 # generaldelta is implied by revlogv2.
2377 # generaldelta is implied by revlogv2.
2377 requirements.discard('generaldelta')
2378 requirements.discard('generaldelta')
2378 requirements.add(REVLOGV2_REQUIREMENT)
2379 requirements.add(REVLOGV2_REQUIREMENT)
2379
2380
2380 return requirements
2381 return requirements
@@ -1,636 +1,636 b''
1 # sshpeer.py - ssh repository proxy class for mercurial
1 # sshpeer.py - ssh repository proxy class for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import re
10 import re
11 import uuid
11 import uuid
12
12
13 from .i18n import _
13 from .i18n import _
14 from . import (
14 from . import (
15 error,
15 error,
16 pycompat,
16 pycompat,
17 util,
17 util,
18 wireproto,
18 wireproto,
19 wireprotoserver,
19 wireprotoserver,
20 wireprototypes,
20 wireprototypes,
21 wireprotov1peer,
21 wireprotov1peer,
22 )
22 )
23 from .utils import (
23 from .utils import (
24 procutil,
24 procutil,
25 )
25 )
26
26
27 def _serverquote(s):
27 def _serverquote(s):
28 """quote a string for the remote shell ... which we assume is sh"""
28 """quote a string for the remote shell ... which we assume is sh"""
29 if not s:
29 if not s:
30 return s
30 return s
31 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
31 if re.match('[a-zA-Z0-9@%_+=:,./-]*$', s):
32 return s
32 return s
33 return "'%s'" % s.replace("'", "'\\''")
33 return "'%s'" % s.replace("'", "'\\''")
34
34
35 def _forwardoutput(ui, pipe):
35 def _forwardoutput(ui, pipe):
36 """display all data currently available on pipe as remote output.
36 """display all data currently available on pipe as remote output.
37
37
38 This is non blocking."""
38 This is non blocking."""
39 if pipe:
39 if pipe:
40 s = procutil.readpipe(pipe)
40 s = procutil.readpipe(pipe)
41 if s:
41 if s:
42 for l in s.splitlines():
42 for l in s.splitlines():
43 ui.status(_("remote: "), l, '\n')
43 ui.status(_("remote: "), l, '\n')
44
44
45 class doublepipe(object):
45 class doublepipe(object):
46 """Operate a side-channel pipe in addition of a main one
46 """Operate a side-channel pipe in addition of a main one
47
47
48 The side-channel pipe contains server output to be forwarded to the user
48 The side-channel pipe contains server output to be forwarded to the user
49 input. The double pipe will behave as the "main" pipe, but will ensure the
49 input. The double pipe will behave as the "main" pipe, but will ensure the
50 content of the "side" pipe is properly processed while we wait for blocking
50 content of the "side" pipe is properly processed while we wait for blocking
51 call on the "main" pipe.
51 call on the "main" pipe.
52
52
53 If large amounts of data are read from "main", the forward will cease after
53 If large amounts of data are read from "main", the forward will cease after
54 the first bytes start to appear. This simplifies the implementation
54 the first bytes start to appear. This simplifies the implementation
55 without affecting actual output of sshpeer too much as we rarely issue
55 without affecting actual output of sshpeer too much as we rarely issue
56 large read for data not yet emitted by the server.
56 large read for data not yet emitted by the server.
57
57
58 The main pipe is expected to be a 'bufferedinputpipe' from the util module
58 The main pipe is expected to be a 'bufferedinputpipe' from the util module
59 that handle all the os specific bits. This class lives in this module
59 that handle all the os specific bits. This class lives in this module
60 because it focus on behavior specific to the ssh protocol."""
60 because it focus on behavior specific to the ssh protocol."""
61
61
62 def __init__(self, ui, main, side):
62 def __init__(self, ui, main, side):
63 self._ui = ui
63 self._ui = ui
64 self._main = main
64 self._main = main
65 self._side = side
65 self._side = side
66
66
67 def _wait(self):
67 def _wait(self):
68 """wait until some data are available on main or side
68 """wait until some data are available on main or side
69
69
70 return a pair of boolean (ismainready, issideready)
70 return a pair of boolean (ismainready, issideready)
71
71
72 (This will only wait for data if the setup is supported by `util.poll`)
72 (This will only wait for data if the setup is supported by `util.poll`)
73 """
73 """
74 if (isinstance(self._main, util.bufferedinputpipe) and
74 if (isinstance(self._main, util.bufferedinputpipe) and
75 self._main.hasbuffer):
75 self._main.hasbuffer):
76 # Main has data. Assume side is worth poking at.
76 # Main has data. Assume side is worth poking at.
77 return True, True
77 return True, True
78
78
79 fds = [self._main.fileno(), self._side.fileno()]
79 fds = [self._main.fileno(), self._side.fileno()]
80 try:
80 try:
81 act = util.poll(fds)
81 act = util.poll(fds)
82 except NotImplementedError:
82 except NotImplementedError:
83 # non supported yet case, assume all have data.
83 # non supported yet case, assume all have data.
84 act = fds
84 act = fds
85 return (self._main.fileno() in act, self._side.fileno() in act)
85 return (self._main.fileno() in act, self._side.fileno() in act)
86
86
87 def write(self, data):
87 def write(self, data):
88 return self._call('write', data)
88 return self._call('write', data)
89
89
90 def read(self, size):
90 def read(self, size):
91 r = self._call('read', size)
91 r = self._call('read', size)
92 if size != 0 and not r:
92 if size != 0 and not r:
93 # We've observed a condition that indicates the
93 # We've observed a condition that indicates the
94 # stdout closed unexpectedly. Check stderr one
94 # stdout closed unexpectedly. Check stderr one
95 # more time and snag anything that's there before
95 # more time and snag anything that's there before
96 # letting anyone know the main part of the pipe
96 # letting anyone know the main part of the pipe
97 # closed prematurely.
97 # closed prematurely.
98 _forwardoutput(self._ui, self._side)
98 _forwardoutput(self._ui, self._side)
99 return r
99 return r
100
100
101 def readline(self):
101 def readline(self):
102 return self._call('readline')
102 return self._call('readline')
103
103
104 def _call(self, methname, data=None):
104 def _call(self, methname, data=None):
105 """call <methname> on "main", forward output of "side" while blocking
105 """call <methname> on "main", forward output of "side" while blocking
106 """
106 """
107 # data can be '' or 0
107 # data can be '' or 0
108 if (data is not None and not data) or self._main.closed:
108 if (data is not None and not data) or self._main.closed:
109 _forwardoutput(self._ui, self._side)
109 _forwardoutput(self._ui, self._side)
110 return ''
110 return ''
111 while True:
111 while True:
112 mainready, sideready = self._wait()
112 mainready, sideready = self._wait()
113 if sideready:
113 if sideready:
114 _forwardoutput(self._ui, self._side)
114 _forwardoutput(self._ui, self._side)
115 if mainready:
115 if mainready:
116 meth = getattr(self._main, methname)
116 meth = getattr(self._main, methname)
117 if data is None:
117 if data is None:
118 return meth()
118 return meth()
119 else:
119 else:
120 return meth(data)
120 return meth(data)
121
121
122 def close(self):
122 def close(self):
123 return self._main.close()
123 return self._main.close()
124
124
125 def flush(self):
125 def flush(self):
126 return self._main.flush()
126 return self._main.flush()
127
127
128 def _cleanuppipes(ui, pipei, pipeo, pipee):
128 def _cleanuppipes(ui, pipei, pipeo, pipee):
129 """Clean up pipes used by an SSH connection."""
129 """Clean up pipes used by an SSH connection."""
130 if pipeo:
130 if pipeo:
131 pipeo.close()
131 pipeo.close()
132 if pipei:
132 if pipei:
133 pipei.close()
133 pipei.close()
134
134
135 if pipee:
135 if pipee:
136 # Try to read from the err descriptor until EOF.
136 # Try to read from the err descriptor until EOF.
137 try:
137 try:
138 for l in pipee:
138 for l in pipee:
139 ui.status(_('remote: '), l)
139 ui.status(_('remote: '), l)
140 except (IOError, ValueError):
140 except (IOError, ValueError):
141 pass
141 pass
142
142
143 pipee.close()
143 pipee.close()
144
144
145 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
145 def _makeconnection(ui, sshcmd, args, remotecmd, path, sshenv=None):
146 """Create an SSH connection to a server.
146 """Create an SSH connection to a server.
147
147
148 Returns a tuple of (process, stdin, stdout, stderr) for the
148 Returns a tuple of (process, stdin, stdout, stderr) for the
149 spawned process.
149 spawned process.
150 """
150 """
151 cmd = '%s %s %s' % (
151 cmd = '%s %s %s' % (
152 sshcmd,
152 sshcmd,
153 args,
153 args,
154 procutil.shellquote('%s -R %s serve --stdio' % (
154 procutil.shellquote('%s -R %s serve --stdio' % (
155 _serverquote(remotecmd), _serverquote(path))))
155 _serverquote(remotecmd), _serverquote(path))))
156
156
157 ui.debug('running %s\n' % cmd)
157 ui.debug('running %s\n' % cmd)
158 cmd = procutil.quotecommand(cmd)
158 cmd = procutil.quotecommand(cmd)
159
159
160 # no buffer allow the use of 'select'
160 # no buffer allow the use of 'select'
161 # feel free to remove buffering and select usage when we ultimately
161 # feel free to remove buffering and select usage when we ultimately
162 # move to threading.
162 # move to threading.
163 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
163 stdin, stdout, stderr, proc = procutil.popen4(cmd, bufsize=0, env=sshenv)
164
164
165 return proc, stdin, stdout, stderr
165 return proc, stdin, stdout, stderr
166
166
167 def _clientcapabilities():
167 def _clientcapabilities():
168 """Return list of capabilities of this client.
168 """Return list of capabilities of this client.
169
169
170 Returns a list of capabilities that are supported by this client.
170 Returns a list of capabilities that are supported by this client.
171 """
171 """
172 protoparams = {'partial-pull'}
172 protoparams = {'partial-pull'}
173 comps = [e.wireprotosupport().name for e in
173 comps = [e.wireprotosupport().name for e in
174 util.compengines.supportedwireengines(util.CLIENTROLE)]
174 util.compengines.supportedwireengines(util.CLIENTROLE)]
175 protoparams.add('comp=%s' % ','.join(comps))
175 protoparams.add('comp=%s' % ','.join(comps))
176 return protoparams
176 return protoparams
177
177
178 def _performhandshake(ui, stdin, stdout, stderr):
178 def _performhandshake(ui, stdin, stdout, stderr):
179 def badresponse():
179 def badresponse():
180 # Flush any output on stderr.
180 # Flush any output on stderr.
181 _forwardoutput(ui, stderr)
181 _forwardoutput(ui, stderr)
182
182
183 msg = _('no suitable response from remote hg')
183 msg = _('no suitable response from remote hg')
184 hint = ui.config('ui', 'ssherrorhint')
184 hint = ui.config('ui', 'ssherrorhint')
185 raise error.RepoError(msg, hint=hint)
185 raise error.RepoError(msg, hint=hint)
186
186
187 # The handshake consists of sending wire protocol commands in reverse
187 # The handshake consists of sending wire protocol commands in reverse
188 # order of protocol implementation and then sniffing for a response
188 # order of protocol implementation and then sniffing for a response
189 # to one of them.
189 # to one of them.
190 #
190 #
191 # Those commands (from oldest to newest) are:
191 # Those commands (from oldest to newest) are:
192 #
192 #
193 # ``between``
193 # ``between``
194 # Asks for the set of revisions between a pair of revisions. Command
194 # Asks for the set of revisions between a pair of revisions. Command
195 # present in all Mercurial server implementations.
195 # present in all Mercurial server implementations.
196 #
196 #
197 # ``hello``
197 # ``hello``
198 # Instructs the server to advertise its capabilities. Introduced in
198 # Instructs the server to advertise its capabilities. Introduced in
199 # Mercurial 0.9.1.
199 # Mercurial 0.9.1.
200 #
200 #
201 # ``upgrade``
201 # ``upgrade``
202 # Requests upgrade from default transport protocol version 1 to
202 # Requests upgrade from default transport protocol version 1 to
203 # a newer version. Introduced in Mercurial 4.6 as an experimental
203 # a newer version. Introduced in Mercurial 4.6 as an experimental
204 # feature.
204 # feature.
205 #
205 #
206 # The ``between`` command is issued with a request for the null
206 # The ``between`` command is issued with a request for the null
207 # range. If the remote is a Mercurial server, this request will
207 # range. If the remote is a Mercurial server, this request will
208 # generate a specific response: ``1\n\n``. This represents the
208 # generate a specific response: ``1\n\n``. This represents the
209 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
209 # wire protocol encoded value for ``\n``. We look for ``1\n\n``
210 # in the output stream and know this is the response to ``between``
210 # in the output stream and know this is the response to ``between``
211 # and we're at the end of our handshake reply.
211 # and we're at the end of our handshake reply.
212 #
212 #
213 # The response to the ``hello`` command will be a line with the
213 # The response to the ``hello`` command will be a line with the
214 # length of the value returned by that command followed by that
214 # length of the value returned by that command followed by that
215 # value. If the server doesn't support ``hello`` (which should be
215 # value. If the server doesn't support ``hello`` (which should be
216 # rare), that line will be ``0\n``. Otherwise, the value will contain
216 # rare), that line will be ``0\n``. Otherwise, the value will contain
217 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
217 # RFC 822 like lines. Of these, the ``capabilities:`` line contains
218 # the capabilities of the server.
218 # the capabilities of the server.
219 #
219 #
220 # The ``upgrade`` command isn't really a command in the traditional
220 # The ``upgrade`` command isn't really a command in the traditional
221 # sense of version 1 of the transport because it isn't using the
221 # sense of version 1 of the transport because it isn't using the
222 # proper mechanism for formatting insteads: instead, it just encodes
222 # proper mechanism for formatting insteads: instead, it just encodes
223 # arguments on the line, delimited by spaces.
223 # arguments on the line, delimited by spaces.
224 #
224 #
225 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
225 # The ``upgrade`` line looks like ``upgrade <token> <capabilities>``.
226 # If the server doesn't support protocol upgrades, it will reply to
226 # If the server doesn't support protocol upgrades, it will reply to
227 # this line with ``0\n``. Otherwise, it emits an
227 # this line with ``0\n``. Otherwise, it emits an
228 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
228 # ``upgraded <token> <protocol>`` line to both stdout and stderr.
229 # Content immediately following this line describes additional
229 # Content immediately following this line describes additional
230 # protocol and server state.
230 # protocol and server state.
231 #
231 #
232 # In addition to the responses to our command requests, the server
232 # In addition to the responses to our command requests, the server
233 # may emit "banner" output on stdout. SSH servers are allowed to
233 # may emit "banner" output on stdout. SSH servers are allowed to
234 # print messages to stdout on login. Issuing commands on connection
234 # print messages to stdout on login. Issuing commands on connection
235 # allows us to flush this banner output from the server by scanning
235 # allows us to flush this banner output from the server by scanning
236 # for output to our well-known ``between`` command. Of course, if
236 # for output to our well-known ``between`` command. Of course, if
237 # the banner contains ``1\n\n``, this will throw off our detection.
237 # the banner contains ``1\n\n``, this will throw off our detection.
238
238
239 requestlog = ui.configbool('devel', 'debug.peer-request')
239 requestlog = ui.configbool('devel', 'debug.peer-request')
240
240
241 # Generate a random token to help identify responses to version 2
241 # Generate a random token to help identify responses to version 2
242 # upgrade request.
242 # upgrade request.
243 token = pycompat.sysbytes(str(uuid.uuid4()))
243 token = pycompat.sysbytes(str(uuid.uuid4()))
244 upgradecaps = [
244 upgradecaps = [
245 ('proto', wireprotoserver.SSHV2),
245 ('proto', wireprotoserver.SSHV2),
246 ]
246 ]
247 upgradecaps = util.urlreq.urlencode(upgradecaps)
247 upgradecaps = util.urlreq.urlencode(upgradecaps)
248
248
249 try:
249 try:
250 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
250 pairsarg = '%s-%s' % ('0' * 40, '0' * 40)
251 handshake = [
251 handshake = [
252 'hello\n',
252 'hello\n',
253 'between\n',
253 'between\n',
254 'pairs %d\n' % len(pairsarg),
254 'pairs %d\n' % len(pairsarg),
255 pairsarg,
255 pairsarg,
256 ]
256 ]
257
257
258 # Request upgrade to version 2 if configured.
258 # Request upgrade to version 2 if configured.
259 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
259 if ui.configbool('experimental', 'sshpeer.advertise-v2'):
260 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
260 ui.debug('sending upgrade request: %s %s\n' % (token, upgradecaps))
261 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
261 handshake.insert(0, 'upgrade %s %s\n' % (token, upgradecaps))
262
262
263 if requestlog:
263 if requestlog:
264 ui.debug('devel-peer-request: hello\n')
264 ui.debug('devel-peer-request: hello\n')
265 ui.debug('sending hello command\n')
265 ui.debug('sending hello command\n')
266 if requestlog:
266 if requestlog:
267 ui.debug('devel-peer-request: between\n')
267 ui.debug('devel-peer-request: between\n')
268 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
268 ui.debug('devel-peer-request: pairs: %d bytes\n' % len(pairsarg))
269 ui.debug('sending between command\n')
269 ui.debug('sending between command\n')
270
270
271 stdin.write(''.join(handshake))
271 stdin.write(''.join(handshake))
272 stdin.flush()
272 stdin.flush()
273 except IOError:
273 except IOError:
274 badresponse()
274 badresponse()
275
275
276 # Assume version 1 of wire protocol by default.
276 # Assume version 1 of wire protocol by default.
277 protoname = wireprototypes.SSHV1
277 protoname = wireprototypes.SSHV1
278 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
278 reupgraded = re.compile(b'^upgraded %s (.*)$' % re.escape(token))
279
279
280 lines = ['', 'dummy']
280 lines = ['', 'dummy']
281 max_noise = 500
281 max_noise = 500
282 while lines[-1] and max_noise:
282 while lines[-1] and max_noise:
283 try:
283 try:
284 l = stdout.readline()
284 l = stdout.readline()
285 _forwardoutput(ui, stderr)
285 _forwardoutput(ui, stderr)
286
286
287 # Look for reply to protocol upgrade request. It has a token
287 # Look for reply to protocol upgrade request. It has a token
288 # in it, so there should be no false positives.
288 # in it, so there should be no false positives.
289 m = reupgraded.match(l)
289 m = reupgraded.match(l)
290 if m:
290 if m:
291 protoname = m.group(1)
291 protoname = m.group(1)
292 ui.debug('protocol upgraded to %s\n' % protoname)
292 ui.debug('protocol upgraded to %s\n' % protoname)
293 # If an upgrade was handled, the ``hello`` and ``between``
293 # If an upgrade was handled, the ``hello`` and ``between``
294 # requests are ignored. The next output belongs to the
294 # requests are ignored. The next output belongs to the
295 # protocol, so stop scanning lines.
295 # protocol, so stop scanning lines.
296 break
296 break
297
297
298 # Otherwise it could be a banner, ``0\n`` response if server
298 # Otherwise it could be a banner, ``0\n`` response if server
299 # doesn't support upgrade.
299 # doesn't support upgrade.
300
300
301 if lines[-1] == '1\n' and l == '\n':
301 if lines[-1] == '1\n' and l == '\n':
302 break
302 break
303 if l:
303 if l:
304 ui.debug('remote: ', l)
304 ui.debug('remote: ', l)
305 lines.append(l)
305 lines.append(l)
306 max_noise -= 1
306 max_noise -= 1
307 except IOError:
307 except IOError:
308 badresponse()
308 badresponse()
309 else:
309 else:
310 badresponse()
310 badresponse()
311
311
312 caps = set()
312 caps = set()
313
313
314 # For version 1, we should see a ``capabilities`` line in response to the
314 # For version 1, we should see a ``capabilities`` line in response to the
315 # ``hello`` command.
315 # ``hello`` command.
316 if protoname == wireprototypes.SSHV1:
316 if protoname == wireprototypes.SSHV1:
317 for l in reversed(lines):
317 for l in reversed(lines):
318 # Look for response to ``hello`` command. Scan from the back so
318 # Look for response to ``hello`` command. Scan from the back so
319 # we don't misinterpret banner output as the command reply.
319 # we don't misinterpret banner output as the command reply.
320 if l.startswith('capabilities:'):
320 if l.startswith('capabilities:'):
321 caps.update(l[:-1].split(':')[1].split())
321 caps.update(l[:-1].split(':')[1].split())
322 break
322 break
323 elif protoname == wireprotoserver.SSHV2:
323 elif protoname == wireprotoserver.SSHV2:
324 # We see a line with number of bytes to follow and then a value
324 # We see a line with number of bytes to follow and then a value
325 # looking like ``capabilities: *``.
325 # looking like ``capabilities: *``.
326 line = stdout.readline()
326 line = stdout.readline()
327 try:
327 try:
328 valuelen = int(line)
328 valuelen = int(line)
329 except ValueError:
329 except ValueError:
330 badresponse()
330 badresponse()
331
331
332 capsline = stdout.read(valuelen)
332 capsline = stdout.read(valuelen)
333 if not capsline.startswith('capabilities: '):
333 if not capsline.startswith('capabilities: '):
334 badresponse()
334 badresponse()
335
335
336 ui.debug('remote: %s\n' % capsline)
336 ui.debug('remote: %s\n' % capsline)
337
337
338 caps.update(capsline.split(':')[1].split())
338 caps.update(capsline.split(':')[1].split())
339 # Trailing newline.
339 # Trailing newline.
340 stdout.read(1)
340 stdout.read(1)
341
341
342 # Error if we couldn't find capabilities, this means:
342 # Error if we couldn't find capabilities, this means:
343 #
343 #
344 # 1. Remote isn't a Mercurial server
344 # 1. Remote isn't a Mercurial server
345 # 2. Remote is a <0.9.1 Mercurial server
345 # 2. Remote is a <0.9.1 Mercurial server
346 # 3. Remote is a future Mercurial server that dropped ``hello``
346 # 3. Remote is a future Mercurial server that dropped ``hello``
347 # and other attempted handshake mechanisms.
347 # and other attempted handshake mechanisms.
348 if not caps:
348 if not caps:
349 badresponse()
349 badresponse()
350
350
351 # Flush any output on stderr before proceeding.
351 # Flush any output on stderr before proceeding.
352 _forwardoutput(ui, stderr)
352 _forwardoutput(ui, stderr)
353
353
354 return protoname, caps
354 return protoname, caps
355
355
356 class sshv1peer(wireprotov1peer.wirepeer):
356 class sshv1peer(wireprotov1peer.wirepeer):
357 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
357 def __init__(self, ui, url, proc, stdin, stdout, stderr, caps,
358 autoreadstderr=True):
358 autoreadstderr=True):
359 """Create a peer from an existing SSH connection.
359 """Create a peer from an existing SSH connection.
360
360
361 ``proc`` is a handle on the underlying SSH process.
361 ``proc`` is a handle on the underlying SSH process.
362 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
362 ``stdin``, ``stdout``, and ``stderr`` are handles on the stdio
363 pipes for that process.
363 pipes for that process.
364 ``caps`` is a set of capabilities supported by the remote.
364 ``caps`` is a set of capabilities supported by the remote.
365 ``autoreadstderr`` denotes whether to automatically read from
365 ``autoreadstderr`` denotes whether to automatically read from
366 stderr and to forward its output.
366 stderr and to forward its output.
367 """
367 """
368 self._url = url
368 self._url = url
369 self.ui = ui
369 self.ui = ui
370 # self._subprocess is unused. Keeping a handle on the process
370 # self._subprocess is unused. Keeping a handle on the process
371 # holds a reference and prevents it from being garbage collected.
371 # holds a reference and prevents it from being garbage collected.
372 self._subprocess = proc
372 self._subprocess = proc
373
373
374 # And we hook up our "doublepipe" wrapper to allow querying
374 # And we hook up our "doublepipe" wrapper to allow querying
375 # stderr any time we perform I/O.
375 # stderr any time we perform I/O.
376 if autoreadstderr:
376 if autoreadstderr:
377 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
377 stdout = doublepipe(ui, util.bufferedinputpipe(stdout), stderr)
378 stdin = doublepipe(ui, stdin, stderr)
378 stdin = doublepipe(ui, stdin, stderr)
379
379
380 self._pipeo = stdin
380 self._pipeo = stdin
381 self._pipei = stdout
381 self._pipei = stdout
382 self._pipee = stderr
382 self._pipee = stderr
383 self._caps = caps
383 self._caps = caps
384 self._autoreadstderr = autoreadstderr
384 self._autoreadstderr = autoreadstderr
385
385
386 # Commands that have a "framed" response where the first line of the
386 # Commands that have a "framed" response where the first line of the
387 # response contains the length of that response.
387 # response contains the length of that response.
388 _FRAMED_COMMANDS = {
388 _FRAMED_COMMANDS = {
389 'batch',
389 'batch',
390 }
390 }
391
391
392 # Begin of ipeerconnection interface.
392 # Begin of ipeerconnection interface.
393
393
394 def url(self):
394 def url(self):
395 return self._url
395 return self._url
396
396
397 def local(self):
397 def local(self):
398 return None
398 return None
399
399
400 def peer(self):
400 def peer(self):
401 return self
401 return self
402
402
403 def canpush(self):
403 def canpush(self):
404 return True
404 return True
405
405
406 def close(self):
406 def close(self):
407 pass
407 pass
408
408
409 # End of ipeerconnection interface.
409 # End of ipeerconnection interface.
410
410
411 # Begin of ipeercommands interface.
411 # Begin of ipeercommands interface.
412
412
413 def capabilities(self):
413 def capabilities(self):
414 return self._caps
414 return self._caps
415
415
416 # End of ipeercommands interface.
416 # End of ipeercommands interface.
417
417
418 def _readerr(self):
418 def _readerr(self):
419 _forwardoutput(self.ui, self._pipee)
419 _forwardoutput(self.ui, self._pipee)
420
420
421 def _abort(self, exception):
421 def _abort(self, exception):
422 self._cleanup()
422 self._cleanup()
423 raise exception
423 raise exception
424
424
425 def _cleanup(self):
425 def _cleanup(self):
426 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
426 _cleanuppipes(self.ui, self._pipei, self._pipeo, self._pipee)
427
427
428 __del__ = _cleanup
428 __del__ = _cleanup
429
429
430 def _sendrequest(self, cmd, args, framed=False):
430 def _sendrequest(self, cmd, args, framed=False):
431 if (self.ui.debugflag
431 if (self.ui.debugflag
432 and self.ui.configbool('devel', 'debug.peer-request')):
432 and self.ui.configbool('devel', 'debug.peer-request')):
433 dbg = self.ui.debug
433 dbg = self.ui.debug
434 line = 'devel-peer-request: %s\n'
434 line = 'devel-peer-request: %s\n'
435 dbg(line % cmd)
435 dbg(line % cmd)
436 for key, value in sorted(args.items()):
436 for key, value in sorted(args.items()):
437 if not isinstance(value, dict):
437 if not isinstance(value, dict):
438 dbg(line % ' %s: %d bytes' % (key, len(value)))
438 dbg(line % ' %s: %d bytes' % (key, len(value)))
439 else:
439 else:
440 for dk, dv in sorted(value.items()):
440 for dk, dv in sorted(value.items()):
441 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
441 dbg(line % ' %s-%s: %d' % (key, dk, len(dv)))
442 self.ui.debug("sending %s command\n" % cmd)
442 self.ui.debug("sending %s command\n" % cmd)
443 self._pipeo.write("%s\n" % cmd)
443 self._pipeo.write("%s\n" % cmd)
444 _func, names = wireproto.commands[cmd]
444 _func, names = wireproto.commands[cmd]
445 keys = names.split()
445 keys = names.split()
446 wireargs = {}
446 wireargs = {}
447 for k in keys:
447 for k in keys:
448 if k == '*':
448 if k == '*':
449 wireargs['*'] = args
449 wireargs['*'] = args
450 break
450 break
451 else:
451 else:
452 wireargs[k] = args[k]
452 wireargs[k] = args[k]
453 del args[k]
453 del args[k]
454 for k, v in sorted(wireargs.iteritems()):
454 for k, v in sorted(wireargs.iteritems()):
455 self._pipeo.write("%s %d\n" % (k, len(v)))
455 self._pipeo.write("%s %d\n" % (k, len(v)))
456 if isinstance(v, dict):
456 if isinstance(v, dict):
457 for dk, dv in v.iteritems():
457 for dk, dv in v.iteritems():
458 self._pipeo.write("%s %d\n" % (dk, len(dv)))
458 self._pipeo.write("%s %d\n" % (dk, len(dv)))
459 self._pipeo.write(dv)
459 self._pipeo.write(dv)
460 else:
460 else:
461 self._pipeo.write(v)
461 self._pipeo.write(v)
462 self._pipeo.flush()
462 self._pipeo.flush()
463
463
464 # We know exactly how many bytes are in the response. So return a proxy
464 # We know exactly how many bytes are in the response. So return a proxy
465 # around the raw output stream that allows reading exactly this many
465 # around the raw output stream that allows reading exactly this many
466 # bytes. Callers then can read() without fear of overrunning the
466 # bytes. Callers then can read() without fear of overrunning the
467 # response.
467 # response.
468 if framed:
468 if framed:
469 amount = self._getamount()
469 amount = self._getamount()
470 return util.cappedreader(self._pipei, amount)
470 return util.cappedreader(self._pipei, amount)
471
471
472 return self._pipei
472 return self._pipei
473
473
474 def _callstream(self, cmd, **args):
474 def _callstream(self, cmd, **args):
475 args = pycompat.byteskwargs(args)
475 args = pycompat.byteskwargs(args)
476 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
476 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
477
477
478 def _callcompressable(self, cmd, **args):
478 def _callcompressable(self, cmd, **args):
479 args = pycompat.byteskwargs(args)
479 args = pycompat.byteskwargs(args)
480 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
480 return self._sendrequest(cmd, args, framed=cmd in self._FRAMED_COMMANDS)
481
481
482 def _call(self, cmd, **args):
482 def _call(self, cmd, **args):
483 args = pycompat.byteskwargs(args)
483 args = pycompat.byteskwargs(args)
484 return self._sendrequest(cmd, args, framed=True).read()
484 return self._sendrequest(cmd, args, framed=True).read()
485
485
486 def _callpush(self, cmd, fp, **args):
486 def _callpush(self, cmd, fp, **args):
487 # The server responds with an empty frame if the client should
487 # The server responds with an empty frame if the client should
488 # continue submitting the payload.
488 # continue submitting the payload.
489 r = self._call(cmd, **args)
489 r = self._call(cmd, **args)
490 if r:
490 if r:
491 return '', r
491 return '', r
492
492
493 # The payload consists of frames with content followed by an empty
493 # The payload consists of frames with content followed by an empty
494 # frame.
494 # frame.
495 for d in iter(lambda: fp.read(4096), ''):
495 for d in iter(lambda: fp.read(4096), ''):
496 self._writeframed(d)
496 self._writeframed(d)
497 self._writeframed("", flush=True)
497 self._writeframed("", flush=True)
498
498
499 # In case of success, there is an empty frame and a frame containing
499 # In case of success, there is an empty frame and a frame containing
500 # the integer result (as a string).
500 # the integer result (as a string).
501 # In case of error, there is a non-empty frame containing the error.
501 # In case of error, there is a non-empty frame containing the error.
502 r = self._readframed()
502 r = self._readframed()
503 if r:
503 if r:
504 return '', r
504 return '', r
505 return self._readframed(), ''
505 return self._readframed(), ''
506
506
507 def _calltwowaystream(self, cmd, fp, **args):
507 def _calltwowaystream(self, cmd, fp, **args):
508 # The server responds with an empty frame if the client should
508 # The server responds with an empty frame if the client should
509 # continue submitting the payload.
509 # continue submitting the payload.
510 r = self._call(cmd, **args)
510 r = self._call(cmd, **args)
511 if r:
511 if r:
512 # XXX needs to be made better
512 # XXX needs to be made better
513 raise error.Abort(_('unexpected remote reply: %s') % r)
513 raise error.Abort(_('unexpected remote reply: %s') % r)
514
514
515 # The payload consists of frames with content followed by an empty
515 # The payload consists of frames with content followed by an empty
516 # frame.
516 # frame.
517 for d in iter(lambda: fp.read(4096), ''):
517 for d in iter(lambda: fp.read(4096), ''):
518 self._writeframed(d)
518 self._writeframed(d)
519 self._writeframed("", flush=True)
519 self._writeframed("", flush=True)
520
520
521 return self._pipei
521 return self._pipei
522
522
523 def _getamount(self):
523 def _getamount(self):
524 l = self._pipei.readline()
524 l = self._pipei.readline()
525 if l == '\n':
525 if l == '\n':
526 if self._autoreadstderr:
526 if self._autoreadstderr:
527 self._readerr()
527 self._readerr()
528 msg = _('check previous remote output')
528 msg = _('check previous remote output')
529 self._abort(error.OutOfBandError(hint=msg))
529 self._abort(error.OutOfBandError(hint=msg))
530 if self._autoreadstderr:
530 if self._autoreadstderr:
531 self._readerr()
531 self._readerr()
532 try:
532 try:
533 return int(l)
533 return int(l)
534 except ValueError:
534 except ValueError:
535 self._abort(error.ResponseError(_("unexpected response:"), l))
535 self._abort(error.ResponseError(_("unexpected response:"), l))
536
536
537 def _readframed(self):
537 def _readframed(self):
538 size = self._getamount()
538 size = self._getamount()
539 if not size:
539 if not size:
540 return b''
540 return b''
541
541
542 return self._pipei.read(size)
542 return self._pipei.read(size)
543
543
544 def _writeframed(self, data, flush=False):
544 def _writeframed(self, data, flush=False):
545 self._pipeo.write("%d\n" % len(data))
545 self._pipeo.write("%d\n" % len(data))
546 if data:
546 if data:
547 self._pipeo.write(data)
547 self._pipeo.write(data)
548 if flush:
548 if flush:
549 self._pipeo.flush()
549 self._pipeo.flush()
550 if self._autoreadstderr:
550 if self._autoreadstderr:
551 self._readerr()
551 self._readerr()
552
552
553 class sshv2peer(sshv1peer):
553 class sshv2peer(sshv1peer):
554 """A peer that speakers version 2 of the transport protocol."""
554 """A peer that speakers version 2 of the transport protocol."""
555 # Currently version 2 is identical to version 1 post handshake.
555 # Currently version 2 is identical to version 1 post handshake.
556 # And handshake is performed before the peer is instantiated. So
556 # And handshake is performed before the peer is instantiated. So
557 # we need no custom code.
557 # we need no custom code.
558
558
559 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
559 def makepeer(ui, path, proc, stdin, stdout, stderr, autoreadstderr=True):
560 """Make a peer instance from existing pipes.
560 """Make a peer instance from existing pipes.
561
561
562 ``path`` and ``proc`` are stored on the eventual peer instance and may
562 ``path`` and ``proc`` are stored on the eventual peer instance and may
563 not be used for anything meaningful.
563 not be used for anything meaningful.
564
564
565 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
565 ``stdin``, ``stdout``, and ``stderr`` are the pipes connected to the
566 SSH server's stdio handles.
566 SSH server's stdio handles.
567
567
568 This function is factored out to allow creating peers that don't
568 This function is factored out to allow creating peers that don't
569 actually spawn a new process. It is useful for starting SSH protocol
569 actually spawn a new process. It is useful for starting SSH protocol
570 servers and clients via non-standard means, which can be useful for
570 servers and clients via non-standard means, which can be useful for
571 testing.
571 testing.
572 """
572 """
573 try:
573 try:
574 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
574 protoname, caps = _performhandshake(ui, stdin, stdout, stderr)
575 except Exception:
575 except Exception:
576 _cleanuppipes(ui, stdout, stdin, stderr)
576 _cleanuppipes(ui, stdout, stdin, stderr)
577 raise
577 raise
578
578
579 if protoname == wireprototypes.SSHV1:
579 if protoname == wireprototypes.SSHV1:
580 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
580 return sshv1peer(ui, path, proc, stdin, stdout, stderr, caps,
581 autoreadstderr=autoreadstderr)
581 autoreadstderr=autoreadstderr)
582 elif protoname == wireprototypes.SSHV2:
582 elif protoname == wireprototypes.SSHV2:
583 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
583 return sshv2peer(ui, path, proc, stdin, stdout, stderr, caps,
584 autoreadstderr=autoreadstderr)
584 autoreadstderr=autoreadstderr)
585 else:
585 else:
586 _cleanuppipes(ui, stdout, stdin, stderr)
586 _cleanuppipes(ui, stdout, stdin, stderr)
587 raise error.RepoError(_('unknown version of SSH protocol: %s') %
587 raise error.RepoError(_('unknown version of SSH protocol: %s') %
588 protoname)
588 protoname)
589
589
590 def instance(ui, path, create):
590 def instance(ui, path, create, intents=None):
591 """Create an SSH peer.
591 """Create an SSH peer.
592
592
593 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
593 The returned object conforms to the ``wireprotov1peer.wirepeer`` interface.
594 """
594 """
595 u = util.url(path, parsequery=False, parsefragment=False)
595 u = util.url(path, parsequery=False, parsefragment=False)
596 if u.scheme != 'ssh' or not u.host or u.path is None:
596 if u.scheme != 'ssh' or not u.host or u.path is None:
597 raise error.RepoError(_("couldn't parse location %s") % path)
597 raise error.RepoError(_("couldn't parse location %s") % path)
598
598
599 util.checksafessh(path)
599 util.checksafessh(path)
600
600
601 if u.passwd is not None:
601 if u.passwd is not None:
602 raise error.RepoError(_('password in URL not supported'))
602 raise error.RepoError(_('password in URL not supported'))
603
603
604 sshcmd = ui.config('ui', 'ssh')
604 sshcmd = ui.config('ui', 'ssh')
605 remotecmd = ui.config('ui', 'remotecmd')
605 remotecmd = ui.config('ui', 'remotecmd')
606 sshaddenv = dict(ui.configitems('sshenv'))
606 sshaddenv = dict(ui.configitems('sshenv'))
607 sshenv = procutil.shellenviron(sshaddenv)
607 sshenv = procutil.shellenviron(sshaddenv)
608 remotepath = u.path or '.'
608 remotepath = u.path or '.'
609
609
610 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
610 args = procutil.sshargs(sshcmd, u.host, u.user, u.port)
611
611
612 if create:
612 if create:
613 cmd = '%s %s %s' % (sshcmd, args,
613 cmd = '%s %s %s' % (sshcmd, args,
614 procutil.shellquote('%s init %s' %
614 procutil.shellquote('%s init %s' %
615 (_serverquote(remotecmd), _serverquote(remotepath))))
615 (_serverquote(remotecmd), _serverquote(remotepath))))
616 ui.debug('running %s\n' % cmd)
616 ui.debug('running %s\n' % cmd)
617 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
617 res = ui.system(cmd, blockedtag='sshpeer', environ=sshenv)
618 if res != 0:
618 if res != 0:
619 raise error.RepoError(_('could not create remote repo'))
619 raise error.RepoError(_('could not create remote repo'))
620
620
621 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
621 proc, stdin, stdout, stderr = _makeconnection(ui, sshcmd, args, remotecmd,
622 remotepath, sshenv)
622 remotepath, sshenv)
623
623
624 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
624 peer = makepeer(ui, path, proc, stdin, stdout, stderr)
625
625
626 # Finally, if supported by the server, notify it about our own
626 # Finally, if supported by the server, notify it about our own
627 # capabilities.
627 # capabilities.
628 if 'protocaps' in peer.capabilities():
628 if 'protocaps' in peer.capabilities():
629 try:
629 try:
630 peer._call("protocaps",
630 peer._call("protocaps",
631 caps=' '.join(sorted(_clientcapabilities())))
631 caps=' '.join(sorted(_clientcapabilities())))
632 except IOError:
632 except IOError:
633 peer._cleanup()
633 peer._cleanup()
634 raise error.RepoError(_('capability exchange failed'))
634 raise error.RepoError(_('capability exchange failed'))
635
635
636 return peer
636 return peer
@@ -1,221 +1,221 b''
1 # statichttprepo.py - simple http repository class for mercurial
1 # statichttprepo.py - simple http repository class for mercurial
2 #
2 #
3 # This provides read-only repo access to repositories exported via static http
3 # This provides read-only repo access to repositories exported via static http
4 #
4 #
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import errno
12 import errno
13
13
14 from .i18n import _
14 from .i18n import _
15 from . import (
15 from . import (
16 changelog,
16 changelog,
17 error,
17 error,
18 localrepo,
18 localrepo,
19 manifest,
19 manifest,
20 namespaces,
20 namespaces,
21 pathutil,
21 pathutil,
22 scmutil,
22 scmutil,
23 store,
23 store,
24 url,
24 url,
25 util,
25 util,
26 vfs as vfsmod,
26 vfs as vfsmod,
27 )
27 )
28
28
29 urlerr = util.urlerr
29 urlerr = util.urlerr
30 urlreq = util.urlreq
30 urlreq = util.urlreq
31
31
32 class httprangereader(object):
32 class httprangereader(object):
33 def __init__(self, url, opener):
33 def __init__(self, url, opener):
34 # we assume opener has HTTPRangeHandler
34 # we assume opener has HTTPRangeHandler
35 self.url = url
35 self.url = url
36 self.pos = 0
36 self.pos = 0
37 self.opener = opener
37 self.opener = opener
38 self.name = url
38 self.name = url
39
39
40 def __enter__(self):
40 def __enter__(self):
41 return self
41 return self
42
42
43 def __exit__(self, exc_type, exc_value, traceback):
43 def __exit__(self, exc_type, exc_value, traceback):
44 self.close()
44 self.close()
45
45
46 def seek(self, pos):
46 def seek(self, pos):
47 self.pos = pos
47 self.pos = pos
48 def read(self, bytes=None):
48 def read(self, bytes=None):
49 req = urlreq.request(self.url)
49 req = urlreq.request(self.url)
50 end = ''
50 end = ''
51 if bytes:
51 if bytes:
52 end = self.pos + bytes - 1
52 end = self.pos + bytes - 1
53 if self.pos or end:
53 if self.pos or end:
54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
54 req.add_header('Range', 'bytes=%d-%s' % (self.pos, end))
55
55
56 try:
56 try:
57 f = self.opener.open(req)
57 f = self.opener.open(req)
58 data = f.read()
58 data = f.read()
59 code = f.code
59 code = f.code
60 except urlerr.httperror as inst:
60 except urlerr.httperror as inst:
61 num = inst.code == 404 and errno.ENOENT or None
61 num = inst.code == 404 and errno.ENOENT or None
62 raise IOError(num, inst)
62 raise IOError(num, inst)
63 except urlerr.urlerror as inst:
63 except urlerr.urlerror as inst:
64 raise IOError(None, inst.reason[1])
64 raise IOError(None, inst.reason[1])
65
65
66 if code == 200:
66 if code == 200:
67 # HTTPRangeHandler does nothing if remote does not support
67 # HTTPRangeHandler does nothing if remote does not support
68 # Range headers and returns the full entity. Let's slice it.
68 # Range headers and returns the full entity. Let's slice it.
69 if bytes:
69 if bytes:
70 data = data[self.pos:self.pos + bytes]
70 data = data[self.pos:self.pos + bytes]
71 else:
71 else:
72 data = data[self.pos:]
72 data = data[self.pos:]
73 elif bytes:
73 elif bytes:
74 data = data[:bytes]
74 data = data[:bytes]
75 self.pos += len(data)
75 self.pos += len(data)
76 return data
76 return data
77 def readlines(self):
77 def readlines(self):
78 return self.read().splitlines(True)
78 return self.read().splitlines(True)
79 def __iter__(self):
79 def __iter__(self):
80 return iter(self.readlines())
80 return iter(self.readlines())
81 def close(self):
81 def close(self):
82 pass
82 pass
83
83
84 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
84 # _RangeError and _HTTPRangeHandler were originally in byterange.py,
85 # which was itself extracted from urlgrabber. See the last version of
85 # which was itself extracted from urlgrabber. See the last version of
86 # byterange.py from history if you need more information.
86 # byterange.py from history if you need more information.
87 class _RangeError(IOError):
87 class _RangeError(IOError):
88 """Error raised when an unsatisfiable range is requested."""
88 """Error raised when an unsatisfiable range is requested."""
89
89
90 class _HTTPRangeHandler(urlreq.basehandler):
90 class _HTTPRangeHandler(urlreq.basehandler):
91 """Handler that enables HTTP Range headers.
91 """Handler that enables HTTP Range headers.
92
92
93 This was extremely simple. The Range header is a HTTP feature to
93 This was extremely simple. The Range header is a HTTP feature to
94 begin with so all this class does is tell urllib2 that the
94 begin with so all this class does is tell urllib2 that the
95 "206 Partial Content" response from the HTTP server is what we
95 "206 Partial Content" response from the HTTP server is what we
96 expected.
96 expected.
97 """
97 """
98
98
99 def http_error_206(self, req, fp, code, msg, hdrs):
99 def http_error_206(self, req, fp, code, msg, hdrs):
100 # 206 Partial Content Response
100 # 206 Partial Content Response
101 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
101 r = urlreq.addinfourl(fp, hdrs, req.get_full_url())
102 r.code = code
102 r.code = code
103 r.msg = msg
103 r.msg = msg
104 return r
104 return r
105
105
106 def http_error_416(self, req, fp, code, msg, hdrs):
106 def http_error_416(self, req, fp, code, msg, hdrs):
107 # HTTP's Range Not Satisfiable error
107 # HTTP's Range Not Satisfiable error
108 raise _RangeError('Requested Range Not Satisfiable')
108 raise _RangeError('Requested Range Not Satisfiable')
109
109
110 def build_opener(ui, authinfo):
110 def build_opener(ui, authinfo):
111 # urllib cannot handle URLs with embedded user or passwd
111 # urllib cannot handle URLs with embedded user or passwd
112 urlopener = url.opener(ui, authinfo)
112 urlopener = url.opener(ui, authinfo)
113 urlopener.add_handler(_HTTPRangeHandler())
113 urlopener.add_handler(_HTTPRangeHandler())
114
114
115 class statichttpvfs(vfsmod.abstractvfs):
115 class statichttpvfs(vfsmod.abstractvfs):
116 def __init__(self, base):
116 def __init__(self, base):
117 self.base = base
117 self.base = base
118
118
119 def __call__(self, path, mode='r', *args, **kw):
119 def __call__(self, path, mode='r', *args, **kw):
120 if mode not in ('r', 'rb'):
120 if mode not in ('r', 'rb'):
121 raise IOError('Permission denied')
121 raise IOError('Permission denied')
122 f = "/".join((self.base, urlreq.quote(path)))
122 f = "/".join((self.base, urlreq.quote(path)))
123 return httprangereader(f, urlopener)
123 return httprangereader(f, urlopener)
124
124
125 def join(self, path):
125 def join(self, path):
126 if path:
126 if path:
127 return pathutil.join(self.base, path)
127 return pathutil.join(self.base, path)
128 else:
128 else:
129 return self.base
129 return self.base
130
130
131 return statichttpvfs
131 return statichttpvfs
132
132
133 class statichttppeer(localrepo.localpeer):
133 class statichttppeer(localrepo.localpeer):
134 def local(self):
134 def local(self):
135 return None
135 return None
136 def canpush(self):
136 def canpush(self):
137 return False
137 return False
138
138
139 class statichttprepository(localrepo.localrepository):
139 class statichttprepository(localrepo.localrepository):
140 supported = localrepo.localrepository._basesupported
140 supported = localrepo.localrepository._basesupported
141
141
142 def __init__(self, ui, path):
142 def __init__(self, ui, path):
143 self._url = path
143 self._url = path
144 self.ui = ui
144 self.ui = ui
145
145
146 self.root = path
146 self.root = path
147 u = util.url(path.rstrip('/') + "/.hg")
147 u = util.url(path.rstrip('/') + "/.hg")
148 self.path, authinfo = u.authinfo()
148 self.path, authinfo = u.authinfo()
149
149
150 vfsclass = build_opener(ui, authinfo)
150 vfsclass = build_opener(ui, authinfo)
151 self.vfs = vfsclass(self.path)
151 self.vfs = vfsclass(self.path)
152 self.cachevfs = vfsclass(self.vfs.join('cache'))
152 self.cachevfs = vfsclass(self.vfs.join('cache'))
153 self._phasedefaults = []
153 self._phasedefaults = []
154
154
155 self.names = namespaces.namespaces()
155 self.names = namespaces.namespaces()
156 self.filtername = None
156 self.filtername = None
157
157
158 try:
158 try:
159 requirements = scmutil.readrequires(self.vfs, self.supported)
159 requirements = scmutil.readrequires(self.vfs, self.supported)
160 except IOError as inst:
160 except IOError as inst:
161 if inst.errno != errno.ENOENT:
161 if inst.errno != errno.ENOENT:
162 raise
162 raise
163 requirements = set()
163 requirements = set()
164
164
165 # check if it is a non-empty old-style repository
165 # check if it is a non-empty old-style repository
166 try:
166 try:
167 fp = self.vfs("00changelog.i")
167 fp = self.vfs("00changelog.i")
168 fp.read(1)
168 fp.read(1)
169 fp.close()
169 fp.close()
170 except IOError as inst:
170 except IOError as inst:
171 if inst.errno != errno.ENOENT:
171 if inst.errno != errno.ENOENT:
172 raise
172 raise
173 # we do not care about empty old-style repositories here
173 # we do not care about empty old-style repositories here
174 msg = _("'%s' does not appear to be an hg repository") % path
174 msg = _("'%s' does not appear to be an hg repository") % path
175 raise error.RepoError(msg)
175 raise error.RepoError(msg)
176
176
177 # setup store
177 # setup store
178 self.store = store.store(requirements, self.path, vfsclass)
178 self.store = store.store(requirements, self.path, vfsclass)
179 self.spath = self.store.path
179 self.spath = self.store.path
180 self.svfs = self.store.opener
180 self.svfs = self.store.opener
181 self.sjoin = self.store.join
181 self.sjoin = self.store.join
182 self._filecache = {}
182 self._filecache = {}
183 self.requirements = requirements
183 self.requirements = requirements
184
184
185 self.manifestlog = manifest.manifestlog(self.svfs, self)
185 self.manifestlog = manifest.manifestlog(self.svfs, self)
186 self.changelog = changelog.changelog(self.svfs)
186 self.changelog = changelog.changelog(self.svfs)
187 self._tags = None
187 self._tags = None
188 self.nodetagscache = None
188 self.nodetagscache = None
189 self._branchcaches = {}
189 self._branchcaches = {}
190 self._revbranchcache = None
190 self._revbranchcache = None
191 self.encodepats = None
191 self.encodepats = None
192 self.decodepats = None
192 self.decodepats = None
193 self._transref = None
193 self._transref = None
194
194
195 def _restrictcapabilities(self, caps):
195 def _restrictcapabilities(self, caps):
196 caps = super(statichttprepository, self)._restrictcapabilities(caps)
196 caps = super(statichttprepository, self)._restrictcapabilities(caps)
197 return caps.difference(["pushkey"])
197 return caps.difference(["pushkey"])
198
198
199 def url(self):
199 def url(self):
200 return self._url
200 return self._url
201
201
202 def local(self):
202 def local(self):
203 return False
203 return False
204
204
205 def peer(self):
205 def peer(self):
206 return statichttppeer(self)
206 return statichttppeer(self)
207
207
208 def wlock(self, wait=True):
208 def wlock(self, wait=True):
209 raise error.LockUnavailable(0, _('lock not available'), 'lock',
209 raise error.LockUnavailable(0, _('lock not available'), 'lock',
210 _('cannot lock static-http repository'))
210 _('cannot lock static-http repository'))
211
211
212 def lock(self, wait=True):
212 def lock(self, wait=True):
213 raise error.Abort(_('cannot lock static-http repository'))
213 raise error.Abort(_('cannot lock static-http repository'))
214
214
215 def _writecaches(self):
215 def _writecaches(self):
216 pass # statichttprepository are read only
216 pass # statichttprepository are read only
217
217
218 def instance(ui, path, create):
218 def instance(ui, path, create, intents=None):
219 if create:
219 if create:
220 raise error.Abort(_('cannot create new static-http repository'))
220 raise error.Abort(_('cannot create new static-http repository'))
221 return statichttprepository(ui, path[7:])
221 return statichttprepository(ui, path[7:])
@@ -1,261 +1,261 b''
1 # unionrepo.py - repository class for viewing union of repository changesets
1 # unionrepo.py - repository class for viewing union of repository changesets
2 #
2 #
3 # Derived from bundlerepo.py
3 # Derived from bundlerepo.py
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
4 # Copyright 2006, 2007 Benoit Boissinot <bboissin@gmail.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
5 # Copyright 2013 Unity Technologies, Mads Kiilerich <madski@unity3d.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Repository class for "in-memory pull" of one local repository to another,
10 """Repository class for "in-memory pull" of one local repository to another,
11 allowing operations like diff and log with revsets.
11 allowing operations like diff and log with revsets.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18
18
19 from . import (
19 from . import (
20 changelog,
20 changelog,
21 cmdutil,
21 cmdutil,
22 error,
22 error,
23 filelog,
23 filelog,
24 localrepo,
24 localrepo,
25 manifest,
25 manifest,
26 mdiff,
26 mdiff,
27 pathutil,
27 pathutil,
28 pycompat,
28 pycompat,
29 revlog,
29 revlog,
30 util,
30 util,
31 vfs as vfsmod,
31 vfs as vfsmod,
32 )
32 )
33
33
34 class unionrevlog(revlog.revlog):
34 class unionrevlog(revlog.revlog):
35 def __init__(self, opener, indexfile, revlog2, linkmapper):
35 def __init__(self, opener, indexfile, revlog2, linkmapper):
36 # How it works:
36 # How it works:
37 # To retrieve a revision, we just need to know the node id so we can
37 # To retrieve a revision, we just need to know the node id so we can
38 # look it up in revlog2.
38 # look it up in revlog2.
39 #
39 #
40 # To differentiate a rev in the second revlog from a rev in the revlog,
40 # To differentiate a rev in the second revlog from a rev in the revlog,
41 # we check revision against repotiprev.
41 # we check revision against repotiprev.
42 opener = vfsmod.readonlyvfs(opener)
42 opener = vfsmod.readonlyvfs(opener)
43 revlog.revlog.__init__(self, opener, indexfile)
43 revlog.revlog.__init__(self, opener, indexfile)
44 self.revlog2 = revlog2
44 self.revlog2 = revlog2
45
45
46 n = len(self)
46 n = len(self)
47 self.repotiprev = n - 1
47 self.repotiprev = n - 1
48 self.bundlerevs = set() # used by 'bundle()' revset expression
48 self.bundlerevs = set() # used by 'bundle()' revset expression
49 for rev2 in self.revlog2:
49 for rev2 in self.revlog2:
50 rev = self.revlog2.index[rev2]
50 rev = self.revlog2.index[rev2]
51 # rev numbers - in revlog2, very different from self.rev
51 # rev numbers - in revlog2, very different from self.rev
52 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
52 _start, _csize, _rsize, base, linkrev, p1rev, p2rev, node = rev
53 flags = _start & 0xFFFF
53 flags = _start & 0xFFFF
54
54
55 if linkmapper is None: # link is to same revlog
55 if linkmapper is None: # link is to same revlog
56 assert linkrev == rev2 # we never link back
56 assert linkrev == rev2 # we never link back
57 link = n
57 link = n
58 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
58 else: # rev must be mapped from repo2 cl to unified cl by linkmapper
59 link = linkmapper(linkrev)
59 link = linkmapper(linkrev)
60
60
61 if linkmapper is not None: # link is to same revlog
61 if linkmapper is not None: # link is to same revlog
62 base = linkmapper(base)
62 base = linkmapper(base)
63
63
64 if node in self.nodemap:
64 if node in self.nodemap:
65 # this happens for the common revlog revisions
65 # this happens for the common revlog revisions
66 self.bundlerevs.add(self.nodemap[node])
66 self.bundlerevs.add(self.nodemap[node])
67 continue
67 continue
68
68
69 p1node = self.revlog2.node(p1rev)
69 p1node = self.revlog2.node(p1rev)
70 p2node = self.revlog2.node(p2rev)
70 p2node = self.revlog2.node(p2rev)
71
71
72 e = (flags, None, None, base,
72 e = (flags, None, None, base,
73 link, self.rev(p1node), self.rev(p2node), node)
73 link, self.rev(p1node), self.rev(p2node), node)
74 self.index.insert(-1, e)
74 self.index.insert(-1, e)
75 self.nodemap[node] = n
75 self.nodemap[node] = n
76 self.bundlerevs.add(n)
76 self.bundlerevs.add(n)
77 n += 1
77 n += 1
78
78
79 def _chunk(self, rev):
79 def _chunk(self, rev):
80 if rev <= self.repotiprev:
80 if rev <= self.repotiprev:
81 return revlog.revlog._chunk(self, rev)
81 return revlog.revlog._chunk(self, rev)
82 return self.revlog2._chunk(self.node(rev))
82 return self.revlog2._chunk(self.node(rev))
83
83
84 def revdiff(self, rev1, rev2):
84 def revdiff(self, rev1, rev2):
85 """return or calculate a delta between two revisions"""
85 """return or calculate a delta between two revisions"""
86 if rev1 > self.repotiprev and rev2 > self.repotiprev:
86 if rev1 > self.repotiprev and rev2 > self.repotiprev:
87 return self.revlog2.revdiff(
87 return self.revlog2.revdiff(
88 self.revlog2.rev(self.node(rev1)),
88 self.revlog2.rev(self.node(rev1)),
89 self.revlog2.rev(self.node(rev2)))
89 self.revlog2.rev(self.node(rev2)))
90 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
90 elif rev1 <= self.repotiprev and rev2 <= self.repotiprev:
91 return self.baserevdiff(rev1, rev2)
91 return self.baserevdiff(rev1, rev2)
92
92
93 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
93 return mdiff.textdiff(self.revision(rev1), self.revision(rev2))
94
94
95 def revision(self, nodeorrev, _df=None, raw=False):
95 def revision(self, nodeorrev, _df=None, raw=False):
96 """return an uncompressed revision of a given node or revision
96 """return an uncompressed revision of a given node or revision
97 number.
97 number.
98 """
98 """
99 if isinstance(nodeorrev, int):
99 if isinstance(nodeorrev, int):
100 rev = nodeorrev
100 rev = nodeorrev
101 node = self.node(rev)
101 node = self.node(rev)
102 else:
102 else:
103 node = nodeorrev
103 node = nodeorrev
104 rev = self.rev(node)
104 rev = self.rev(node)
105
105
106 if node == nullid:
106 if node == nullid:
107 return ""
107 return ""
108
108
109 if rev > self.repotiprev:
109 if rev > self.repotiprev:
110 text = self.revlog2.revision(node)
110 text = self.revlog2.revision(node)
111 self._cache = (node, rev, text)
111 self._cache = (node, rev, text)
112 else:
112 else:
113 text = self.baserevision(rev)
113 text = self.baserevision(rev)
114 # already cached
114 # already cached
115 return text
115 return text
116
116
117 def baserevision(self, nodeorrev):
117 def baserevision(self, nodeorrev):
118 # Revlog subclasses may override 'revision' method to modify format of
118 # Revlog subclasses may override 'revision' method to modify format of
119 # content retrieved from revlog. To use unionrevlog with such class one
119 # content retrieved from revlog. To use unionrevlog with such class one
120 # needs to override 'baserevision' and make more specific call here.
120 # needs to override 'baserevision' and make more specific call here.
121 return revlog.revlog.revision(self, nodeorrev)
121 return revlog.revlog.revision(self, nodeorrev)
122
122
123 def baserevdiff(self, rev1, rev2):
123 def baserevdiff(self, rev1, rev2):
124 # Exists for the same purpose as baserevision.
124 # Exists for the same purpose as baserevision.
125 return revlog.revlog.revdiff(self, rev1, rev2)
125 return revlog.revlog.revdiff(self, rev1, rev2)
126
126
127 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
127 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
128 raise NotImplementedError
128 raise NotImplementedError
129 def addgroup(self, deltas, transaction, addrevisioncb=None):
129 def addgroup(self, deltas, transaction, addrevisioncb=None):
130 raise NotImplementedError
130 raise NotImplementedError
131 def strip(self, rev, minlink):
131 def strip(self, rev, minlink):
132 raise NotImplementedError
132 raise NotImplementedError
133 def checksize(self):
133 def checksize(self):
134 raise NotImplementedError
134 raise NotImplementedError
135
135
136 class unionchangelog(unionrevlog, changelog.changelog):
136 class unionchangelog(unionrevlog, changelog.changelog):
137 def __init__(self, opener, opener2):
137 def __init__(self, opener, opener2):
138 changelog.changelog.__init__(self, opener)
138 changelog.changelog.__init__(self, opener)
139 linkmapper = None
139 linkmapper = None
140 changelog2 = changelog.changelog(opener2)
140 changelog2 = changelog.changelog(opener2)
141 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
141 unionrevlog.__init__(self, opener, self.indexfile, changelog2,
142 linkmapper)
142 linkmapper)
143
143
144 def baserevision(self, nodeorrev):
144 def baserevision(self, nodeorrev):
145 # Although changelog doesn't override 'revision' method, some extensions
145 # Although changelog doesn't override 'revision' method, some extensions
146 # may replace this class with another that does. Same story with
146 # may replace this class with another that does. Same story with
147 # manifest and filelog classes.
147 # manifest and filelog classes.
148 return changelog.changelog.revision(self, nodeorrev)
148 return changelog.changelog.revision(self, nodeorrev)
149
149
150 def baserevdiff(self, rev1, rev2):
150 def baserevdiff(self, rev1, rev2):
151 return changelog.changelog.revdiff(self, rev1, rev2)
151 return changelog.changelog.revdiff(self, rev1, rev2)
152
152
153 class unionmanifest(unionrevlog, manifest.manifestrevlog):
153 class unionmanifest(unionrevlog, manifest.manifestrevlog):
154 def __init__(self, opener, opener2, linkmapper):
154 def __init__(self, opener, opener2, linkmapper):
155 manifest.manifestrevlog.__init__(self, opener)
155 manifest.manifestrevlog.__init__(self, opener)
156 manifest2 = manifest.manifestrevlog(opener2)
156 manifest2 = manifest.manifestrevlog(opener2)
157 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
157 unionrevlog.__init__(self, opener, self.indexfile, manifest2,
158 linkmapper)
158 linkmapper)
159
159
160 def baserevision(self, nodeorrev):
160 def baserevision(self, nodeorrev):
161 return manifest.manifestrevlog.revision(self, nodeorrev)
161 return manifest.manifestrevlog.revision(self, nodeorrev)
162
162
163 def baserevdiff(self, rev1, rev2):
163 def baserevdiff(self, rev1, rev2):
164 return manifest.manifestrevlog.revdiff(self, rev1, rev2)
164 return manifest.manifestrevlog.revdiff(self, rev1, rev2)
165
165
166 class unionfilelog(filelog.filelog):
166 class unionfilelog(filelog.filelog):
167 def __init__(self, opener, path, opener2, linkmapper, repo):
167 def __init__(self, opener, path, opener2, linkmapper, repo):
168 filelog.filelog.__init__(self, opener, path)
168 filelog.filelog.__init__(self, opener, path)
169 filelog2 = filelog.filelog(opener2, path)
169 filelog2 = filelog.filelog(opener2, path)
170 self._revlog = unionrevlog(opener, self.indexfile,
170 self._revlog = unionrevlog(opener, self.indexfile,
171 filelog2._revlog, linkmapper)
171 filelog2._revlog, linkmapper)
172 self._repo = repo
172 self._repo = repo
173 self.repotiprev = self._revlog.repotiprev
173 self.repotiprev = self._revlog.repotiprev
174 self.revlog2 = self._revlog.revlog2
174 self.revlog2 = self._revlog.revlog2
175
175
176 def baserevision(self, nodeorrev):
176 def baserevision(self, nodeorrev):
177 return filelog.filelog.revision(self, nodeorrev)
177 return filelog.filelog.revision(self, nodeorrev)
178
178
179 def baserevdiff(self, rev1, rev2):
179 def baserevdiff(self, rev1, rev2):
180 return filelog.filelog.revdiff(self, rev1, rev2)
180 return filelog.filelog.revdiff(self, rev1, rev2)
181
181
182 def iscensored(self, rev):
182 def iscensored(self, rev):
183 """Check if a revision is censored."""
183 """Check if a revision is censored."""
184 if rev <= self.repotiprev:
184 if rev <= self.repotiprev:
185 return filelog.filelog.iscensored(self, rev)
185 return filelog.filelog.iscensored(self, rev)
186 node = self.node(rev)
186 node = self.node(rev)
187 return self.revlog2.iscensored(self.revlog2.rev(node))
187 return self.revlog2.iscensored(self.revlog2.rev(node))
188
188
189 class unionpeer(localrepo.localpeer):
189 class unionpeer(localrepo.localpeer):
190 def canpush(self):
190 def canpush(self):
191 return False
191 return False
192
192
193 class unionrepository(localrepo.localrepository):
193 class unionrepository(localrepo.localrepository):
194 def __init__(self, ui, path, path2):
194 def __init__(self, ui, path, path2):
195 localrepo.localrepository.__init__(self, ui, path)
195 localrepo.localrepository.__init__(self, ui, path)
196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
196 self.ui.setconfig('phases', 'publish', False, 'unionrepo')
197
197
198 self._url = 'union:%s+%s' % (util.expandpath(path),
198 self._url = 'union:%s+%s' % (util.expandpath(path),
199 util.expandpath(path2))
199 util.expandpath(path2))
200 self.repo2 = localrepo.localrepository(ui, path2)
200 self.repo2 = localrepo.localrepository(ui, path2)
201
201
202 @localrepo.unfilteredpropertycache
202 @localrepo.unfilteredpropertycache
203 def changelog(self):
203 def changelog(self):
204 return unionchangelog(self.svfs, self.repo2.svfs)
204 return unionchangelog(self.svfs, self.repo2.svfs)
205
205
206 def _clrev(self, rev2):
206 def _clrev(self, rev2):
207 """map from repo2 changelog rev to temporary rev in self.changelog"""
207 """map from repo2 changelog rev to temporary rev in self.changelog"""
208 node = self.repo2.changelog.node(rev2)
208 node = self.repo2.changelog.node(rev2)
209 return self.changelog.rev(node)
209 return self.changelog.rev(node)
210
210
211 def _constructmanifest(self):
211 def _constructmanifest(self):
212 return unionmanifest(self.svfs, self.repo2.svfs,
212 return unionmanifest(self.svfs, self.repo2.svfs,
213 self.unfiltered()._clrev)
213 self.unfiltered()._clrev)
214
214
215 def url(self):
215 def url(self):
216 return self._url
216 return self._url
217
217
218 def file(self, f):
218 def file(self, f):
219 return unionfilelog(self.svfs, f, self.repo2.svfs,
219 return unionfilelog(self.svfs, f, self.repo2.svfs,
220 self.unfiltered()._clrev, self)
220 self.unfiltered()._clrev, self)
221
221
222 def close(self):
222 def close(self):
223 self.repo2.close()
223 self.repo2.close()
224
224
225 def cancopy(self):
225 def cancopy(self):
226 return False
226 return False
227
227
228 def peer(self):
228 def peer(self):
229 return unionpeer(self)
229 return unionpeer(self)
230
230
231 def getcwd(self):
231 def getcwd(self):
232 return pycompat.getcwd() # always outside the repo
232 return pycompat.getcwd() # always outside the repo
233
233
234 def instance(ui, path, create):
234 def instance(ui, path, create, intents=None):
235 if create:
235 if create:
236 raise error.Abort(_('cannot create new union repository'))
236 raise error.Abort(_('cannot create new union repository'))
237 parentpath = ui.config("bundle", "mainreporoot")
237 parentpath = ui.config("bundle", "mainreporoot")
238 if not parentpath:
238 if not parentpath:
239 # try to find the correct path to the working directory repo
239 # try to find the correct path to the working directory repo
240 parentpath = cmdutil.findrepo(pycompat.getcwd())
240 parentpath = cmdutil.findrepo(pycompat.getcwd())
241 if parentpath is None:
241 if parentpath is None:
242 parentpath = ''
242 parentpath = ''
243 if parentpath:
243 if parentpath:
244 # Try to make the full path relative so we get a nice, short URL.
244 # Try to make the full path relative so we get a nice, short URL.
245 # In particular, we don't want temp dir names in test outputs.
245 # In particular, we don't want temp dir names in test outputs.
246 cwd = pycompat.getcwd()
246 cwd = pycompat.getcwd()
247 if parentpath == cwd:
247 if parentpath == cwd:
248 parentpath = ''
248 parentpath = ''
249 else:
249 else:
250 cwd = pathutil.normasprefix(cwd)
250 cwd = pathutil.normasprefix(cwd)
251 if parentpath.startswith(cwd):
251 if parentpath.startswith(cwd):
252 parentpath = parentpath[len(cwd):]
252 parentpath = parentpath[len(cwd):]
253 if path.startswith('union:'):
253 if path.startswith('union:'):
254 s = path.split(":", 1)[1].split("+", 1)
254 s = path.split(":", 1)[1].split("+", 1)
255 if len(s) == 1:
255 if len(s) == 1:
256 repopath, repopath2 = parentpath, s[0]
256 repopath, repopath2 = parentpath, s[0]
257 else:
257 else:
258 repopath, repopath2 = s
258 repopath, repopath2 = s
259 else:
259 else:
260 repopath, repopath2 = parentpath, path
260 repopath, repopath2 = parentpath, path
261 return unionrepository(ui, repopath, repopath2)
261 return unionrepository(ui, repopath, repopath2)
General Comments 0
You need to be logged in to leave comments. Login now